query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
1ad8b2bda71c27dd30959fac0f57aaa8
Finds the shortest path through a weighted graph using a heuristic. Used for planning & pathfinding in low dimension spaces.
[ { "docid": "337086241106fd93d954e33e0b7111a4", "score": "0.6438782", "text": "def a_star(start_vertex, adjacent, cost, heuristic, goal, max_depth=None):\n if max_depth is not None and max_depth <= 0:\n raise ValueError(\"Argument max_depth must be greater than zero\")\n\n class InternalNode:\n '''\n value = User data\n parent = Parent's InternalNode (Or None if starting vertex)\n g = Total cost of getting to this node\n h = heuristic(vertex)\n f = g + h\n depth = Path length (in vertexes) to this node\n old = Has a shorter path to this node been found?\n '''\n # Try slots with performance tests, measure both time and memory\n # __slots__ = ('value', 'parent', 'old', 'h', 'g', 'f', 'depth')\n def __init__(self, vertex, parent):\n self.value = vertex\n self.parent = parent\n self.old = False\n if parent is None:\n self.depth = 1\n self.g = 0\n else:\n self.depth = parent.depth + 1\n edge_cost = cost(parent.value, vertex)\n self.g = parent.g + edge_cost\n self.h = heuristic(vertex)\n self.f = self.g + self.h\n\n def __lt__(self, other):\n return self.f < other.f\n\n start_node = InternalNode(start_vertex, None)\n\n frontier = [start_node]\n # visited contains all of frontier\n visited = {start_vertex: start_node}\n inconsistent_heuristic = False\n revisits = 0 # num-explore != len(visited) b/c revists overwrite previous entry in visited.\n\n while frontier:\n node = heapq.heappop(frontier)\n if node.old:\n continue\n\n if max_depth is not None and node.depth > max_depth:\n break\n\n vertex = node.value\n if goal(vertex):\n # Make the path\n path = []\n while node is not None:\n path.append(node)\n node = node.parent\n\n # Check for inadmissibile heuristics along the final path\n total_path_cost = path[0].g\n for vertex in path:\n remaining_path_cost = total_path_cost - vertex.g\n if vertex.h > remaining_path_cost:\n logger.warning(\"Detected inadmissible heuristic\")\n break\n\n calculate_EBF(len(visited) + revisits, len(path))\n\n return tuple(p.value for p in reversed(path))\n\n # Explore more of the graph\n for neighbor in adjacent(vertex):\n neighbor_node = InternalNode(neighbor, node)\n\n never_visited = neighbor not in visited\n shorter_path_found = False\n if not never_visited:\n previous_visit = visited[neighbor]\n if previous_visit.g > neighbor_node.g:\n shorter_path_found = True\n previous_visit.old = True\n revisits += 1\n # Detect Negative cost cycles.\n # TODO: Determine the time complexity of the following loop.\n cursor = neighbor_node.parent\n while cursor is not None:\n if cursor is previous_visit:\n raise CyclicGraphException(\"Negative Cost Cycle Detected\")\n cursor = cursor.parent\n\n if never_visited or shorter_path_found:\n # Visit this neighbor\n visited[neighbor] = neighbor_node\n heapq.heappush(frontier, neighbor_node)\n\n # Check for inconsistent heuristic (decreasing estimated total path cost)\n if node.f > neighbor_node.f:\n if not inconsistent_heuristic:\n inconsistent_heuristic = True\n logger.warning(\"Detected inconsistent heuristic\")\n\n raise NoPathException()", "title": "" } ]
[ { "docid": "5478766196043feca0d87a016cf5f766", "score": "0.7408174", "text": "def shortest_path(graph, distances, visited_set, use_heuristic=False):\n n_list = []\n h = 0\n for name, dist in distances.items():\n if name not in visited_set and dist != float('inf'):\n if use_heuristic:\n h = heuristic(graph, graph.node_dict[name])\n n_list.append((dist + h, name))\n if len(n_list):\n return min(n_list)[1]\n return None", "title": "" }, { "docid": "720f903b8371c1bf8325eb58c5632ae5", "score": "0.7376161", "text": "def get_shortest_path(weighted_graph, start, end):\r\n\r\n # We always need to visit the start\r\n nodes_to_visit = {start}\r\n #print(nodes_to_visit)\r\n visited_nodes = set()\r\n # Distance from start to start is 0\r\n distance_from_start = {start: 0}\r\n tentative_parents = {}\r\n\r\n while nodes_to_visit:\r\n # The next node should be the one with the smallest weight\r\n current = min(\r\n [(distance_from_start[node], node) for node in nodes_to_visit]\r\n )[1]\r\n #print(current)\r\n # The end was reached\r\n if current == end:\r\n break\r\n\r\n nodes_to_visit.discard(current)\r\n visited_nodes.add(current)\r\n\r\n edges = weighted_graph[current]#.get(current)\r\n #print(edges)\r\n unvisited_neighbours = set(edges).difference(visited_nodes)\r\n for neighbour in unvisited_neighbours:\r\n neighbour_distance = distance_from_start[current] + \\\r\n edges[neighbour]\r\n if neighbour_distance < distance_from_start.get(neighbour,\r\n float('inf')):\r\n distance_from_start[neighbour] = neighbour_distance\r\n tentative_parents[neighbour] = current\r\n nodes_to_visit.add(neighbour)\r\n\r\n return _deconstruct_path(tentative_parents, end)", "title": "" }, { "docid": "ca4e53a8e77dc3f5268e6e4e87c5a55a", "score": "0.7306786", "text": "def astar_path(G, source, target, heuristic=None, weight='weight', search_space_nodes=False, search_space_size=False):\r\n if G.is_multigraph():\r\n raise NetworkXError(\"astar_path() not implemented for Multi(Di)Graphs\")\r\n\r\n if heuristic is None:\r\n # The default heuristic is h=0 - same as Dijkstra's algorithm\r\n def heuristic(u, v):\r\n return 0\r\n\r\n push = heapq.heappush\r\n pop = heapq.heappop\r\n\r\n # The queue stores priority, node, cost to reach, and parent.\r\n # Uses Python heapq to keep in priority order.\r\n # Add a counter to the queue to prevent the underlying heap from\r\n # attempting to compare the nodes themselves. The hash breaks ties in the\r\n # priority and is guarenteed unique for all nodes in the graph.\r\n c = heapq.count()\r\n queue = [(0, next(c), source, 0, None)]\r\n\r\n # Maps enqueued nodes to distance of discovered paths and the\r\n # computed heuristics to target. We avoid computing the heuristics\r\n # more than once and inserting the node into the queue too many times.\r\n enqueued = {}\r\n # Maps explored nodes to parent closest to the source.\r\n explored = {}\r\n i = 0\r\n while queue:\r\n i = i + 1\r\n # Pop the smallest item from queue.\r\n _, __, curnode, dist, parent = pop(queue)\r\n\r\n if curnode == target:\r\n path = [curnode]\r\n node = parent\r\n while node is not None:\r\n path.append(node)\r\n node = explored[node]\r\n path.reverse()\r\n if search_space_nodes:\r\n return path, explored.keys()\r\n elif search_space_size:\r\n return path, len(explored.keys())\r\n elif h:\r\n return path, i #len(explored.keys())\r\n\r\n return path\r\n\r\n if curnode in explored:\r\n continue\r\n\r\n explored[curnode] = parent\r\n\r\n for neighbor, w in G[curnode].items():\r\n if neighbor in explored:\r\n continue\r\n ncost = dist + w.get(weight, 1)\r\n if neighbor in enqueued:\r\n qcost, h = enqueued[neighbor]\r\n # if qcost < ncost, a longer path to neighbor remains\r\n # enqueued. Removing it would need to filter the whole\r\n # queue, it's better just to leave it there and ignore\r\n # it when we visit the node a second time.\r\n if qcost <= ncost:\r\n continue\r\n else:\r\n h = heuristic(neighbor, target)\r\n enqueued[neighbor] = ncost, h\r\n push(queue, (ncost + h, next(c), neighbor, ncost, curnode))\r\n\r\n raise nx.NetworkXNoPath(\"Node %s not reachable from %s\" % (source, target))", "title": "" }, { "docid": "0813c601d9a353dd8c0684859f29d0bc", "score": "0.71155983", "text": "def astar_path_pathmax(G, source, target, heuristic=None, weight='weight', search_space_nodes=False, search_space_size=False):\r\n if G.is_multigraph():\r\n raise NetworkXError(\"astar_path() not implemented for Multi(Di)Graphs\")\r\n\r\n if heuristic is None:\r\n # The default heuristic is h=0 - same as Dijkstra's algorithm\r\n def heuristic(u, v):\r\n return 0\r\n\r\n push = heapq.heappush\r\n pop = heapq.heappop\r\n\r\n # The queue stores priority, node, cost to reach, and parent.\r\n # Uses Python heapq to keep in priority order.\r\n # Add a counter to the queue to prevent the underlying heap from\r\n # attempting to compare the nodes themselves. The hash breaks ties in the\r\n # priority and is guarenteed unique for all nodes in the graph.\r\n c = heapq.count()\r\n queue = [(0, next(c), source, 0, None)]\r\n\r\n # Maps enqueued nodes to distance of discovered paths and the\r\n # computed heuristics to target. We avoid computing the heuristics\r\n # more than once and inserting the node into the queue too many times.\r\n enqueued = {}\r\n # Maps explored nodes to parent closest to the source.\r\n explored = {}\r\n i = 0\r\n # For consistency, store the estimates\r\n estimates = {source:heuristic(source,target)}\r\n first = True\r\n while queue:\r\n i = i + 1\r\n # Pop the smallest item from queue.\r\n _, __, curnode, dist, parent = pop(queue)\r\n\r\n if curnode == target:\r\n path = [curnode]\r\n node = parent\r\n while node is not None:\r\n path.append(node)\r\n node = explored[node]\r\n path.reverse()\r\n if search_space_nodes:\r\n return path, explored.keys()\r\n elif search_space_size:\r\n return path, len(explored.keys())\r\n elif h:\r\n return path, i #len(explored.keys())\r\n\r\n return path\r\n\r\n if curnode in explored:\r\n continue\r\n\r\n explored[curnode] = parent\r\n\r\n for neighbor, w in G[curnode].items():\r\n if neighbor in explored:\r\n continue\r\n ncost = dist + w.get(weight, 1)\r\n if neighbor in enqueued:\r\n qcost, h = enqueued[neighbor]\r\n # if qcost < ncost, a longer path to neighbor remains\r\n # enqueued. Removing it would need to filter the whole\r\n # queue, it's better just to leave it there and ignore\r\n # it when we visit the node a second time.\r\n if qcost <= ncost:\r\n continue\r\n else:\r\n h = max(heuristic(neighbor, target), estimates[curnode]-w.get(weight,1))\r\n estimates[neighbor] = h\r\n enqueued[neighbor] = ncost, h\r\n push(queue, (ncost + h, next(c), neighbor, ncost, curnode))\r\n\r\n raise nx.NetworkXNoPath(\"Node %s not reachable from %s\" % (source, target))", "title": "" }, { "docid": "b3ab440b7abd0616576d9bf38d039210", "score": "0.7099636", "text": "def compute_shortest_path(self) -> None:\n self.update_distance_of_all_edges_to(math.inf)\n self.distance_to[self.starting_node] = 0\n\n self.queue.insert(self.starting_node, 0)\n\n while self.queue.any:\n (node, _) = self.queue.remove_min()\n for neighbor, weight in node.neighbors.items():\n self.relax(node, neighbor, weight)", "title": "" }, { "docid": "165c3191e44df0e16c827be8138e2799", "score": "0.7020808", "text": "def dijkstras_shortest_path(initial_position, destination, graph, adj):\n pass", "title": "" }, { "docid": "2c007d6a98c9d0ffe754c6e1fff49e66", "score": "0.7004128", "text": "def shortest_path(self, s, t, weight=lambda e: 1):\n dist = {s: 0}\n prev = {s: None}\n Q = {s}\n while Q:\n u = min(Q, key=dist.get)\n Q.remove(u)\n if u == t:\n break\n for v in self.neighbors(u):\n alt = dist[u] + weight((u, v))\n if v not in dist or alt < dist[v]:\n Q.add(v)\n dist[v] = alt\n prev[v] = u\n if t not in prev:\n return None\n path = []\n while t is not None:\n path.append(t)\n t = prev[t]\n return reversed(path)", "title": "" }, { "docid": "7ba1f9269864602e6c925c7812ae7fe5", "score": "0.69155586", "text": "def dijkstra_path(G, source, target, weight='weight'):\r\n (length,path)=single_source_dijkstra(G, source, target=target,\r\n weight=weight)\r\n try:\r\n #print \"Length to\", str(target) + \":\", str(path[target])\r\n return path[target]\r\n except KeyError:\r\n raise nx.NetworkXNoPath(\"node %s not reachable from %s\"%(source,target))", "title": "" }, { "docid": "d03d39bfe0c88cb08ba9b016432557e1", "score": "0.6905445", "text": "def single_source_dijkstra(G,source,target=None,cutoff=None,weight='weight'):\r\n if source==target:\r\n return (0, [source])\r\n dist = {} # dictionary of final distances\r\n paths = {source:[source]} # dictionary of paths\r\n seen = {source:0}\r\n fringe=[] # use heapq with (distance,label) tuples\r\n heapq.heappush(fringe,(0,source))\r\n while fringe:\r\n (d,v)=heapq.heappop(fringe)\r\n if v in dist:\r\n continue # already searched this node.\r\n dist[v] = d\r\n if v == target:\r\n break\r\n #for ignore,w,edgedata in G.edges_iter(v,data=True):\r\n #is about 30% slower than the following\r\n if G.is_multigraph():\r\n edata=[]\r\n for w,keydata in G[v].items():\r\n minweight=min((dd.get(weight,1)\r\n for k,dd in keydata.items()))\r\n edata.append((w,{weight:minweight}))\r\n else:\r\n edata=iter(G[v].items())\r\n\r\n for w,edgedata in edata:\r\n vw_dist = dist[v] + edgedata.get(weight,1)\r\n\r\n if w in dist:\r\n if vw_dist < dist[w]:\r\n raise ValueError('Contradictory paths found:',\r\n 'negative weights?')\r\n elif w not in seen or vw_dist < seen[w]:\r\n seen[w] = vw_dist\r\n heapq.heappush(fringe,(vw_dist,w))\r\n paths[w] = paths[v]+[w]\r\n return (dist,paths)", "title": "" }, { "docid": "2935ca57b952c5844593cf24873cabc0", "score": "0.6892118", "text": "def shortest_paths(self, u, by_weight=False, algorithm=None,\n weight_function=None, check_weight=True, cutoff=None):\n if weight_function is not None:\n by_weight = True\n elif by_weight:\n weight_function = lambda e:e[2]\n else:\n weight_function = lambda e:1\n\n if algorithm is None and not by_weight:\n algorithm = 'BFS'\n\n if by_weight and check_weight:\n self._check_weight_function(weight_function)\n\n if algorithm=='BFS':\n if by_weight:\n raise ValueError(\"The 'BFS' algorithm does not work on \" +\n \"weighted graphs.\")\n return self._backend.shortest_path_all_vertices(u, cutoff)\n\n elif algorithm=='Dijkstra_NetworkX':\n import networkx\n # If this is not present, an error might be raised by NetworkX\n if self.num_verts()==1 and self.vertices()[0]==u:\n return {u:[u]}\n if by_weight:\n if self.is_directed():\n G = networkx.DiGraph([(e[0], e[1], dict(weight=weight_function(e))) for e in self.edge_iterator()])\n else:\n G = networkx.Graph([(e[0], e[1], dict(weight=weight_function(e))) for e in self.edge_iterator()])\n else:\n # Needed to remove labels.\n if self.is_directed():\n G = networkx.DiGraph(self.edges(labels=False))\n else:\n G = networkx.Graph(self.edges(labels=False))\n G.add_nodes_from(self.vertices())\n return networkx.single_source_dijkstra_path(G, u)\n\n elif algorithm in ['Dijkstra_Boost','Bellman-Ford_Boost',None]:\n from sage.graphs.base.boost_graph import shortest_paths\n _,pred = shortest_paths(self, u, weight_function, algorithm)\n paths = {}\n for v in pred.keys():\n w = v\n path = [w]\n while w != u:\n w = pred[w]\n path.append(w)\n path.reverse()\n paths[v] = path\n return paths\n\n else:\n raise ValueError(\"Algorithm \" + algorithm + \" not yet \" +\n \"implemented. Please, contribute!\")", "title": "" }, { "docid": "62b6f182269daf06fd638618f023f634", "score": "0.688034", "text": "def astar_graph_search(problem, h):\n def f(n):\n return n.path_cost + h(n)\n return best_first_graph_search(problem, f)", "title": "" }, { "docid": "e93c91088fafa572a330d39a275caaa8", "score": "0.68642414", "text": "def single_source_dijkstra_path(G,source, cutoff=None, weight='weight'):\r\n (length,path)=single_source_dijkstra(G,source, weight = weight)\r\n return path", "title": "" }, { "docid": "643935d6e525c0092a1ac0a8fd56a286", "score": "0.6858446", "text": "def shortest_path(self):\n print(\"Attempting to find shortest path from point A:{} to point B:{}\".format(self.start.node, self.goal.node))\n\n current = self.Nodes[self.start.node]\n current.score = self.f_score(current)\n\n self.frontier.put((current.score, current.node)) # Adding start node to the frontier\n\n bestGoalReached = False\n\n while not self.frontier.empty():\n\n current_node = self.frontier.get() # Picking the node with the lowest f-score\n current = self.Nodes[current_node[1]] # Using index 1 since f_score is stored at index 0\n\n for neighbour in current.neighbours: # Loop through all connecting nodes and add them to frontier\n\n if neighbour not in self.explored:\n self.Nodes[neighbour] = Node(neighbour, self.M)\n neighbour = self.Nodes[neighbour]\n neighbour.score = self.f_score(neighbour, current)\n\n self.frontier.put((neighbour.score, neighbour.node))\n\n # Break condition\n if current.node == self.goal.node:\n bestone = self.frontier.get()\n if bestone[0] >= current.score:\n bestGoalReached = True\n break\n self.frontier.put(bestone)\n\n self.explored.add(current.node)\n\n if bestGoalReached:\n print(\"Shortest path found!\")\n path = self.retrace_path(self.goal.node)\n else:\n print(\"No path found!\")\n path = None\n\n return path", "title": "" }, { "docid": "84b2ca33a4dcba31823536327124dd9d", "score": "0.6823071", "text": "def shortest_path(graph, source, target):\n\n # instantiate queue and first node\n path = {}\n Q = []\n for node in graph.node_list():\n if node != source:\n # queue for minimizing distance\n Q.append((math.inf, node))\n # keep track of the current tentative distance and\n # neighbor of each node\n path[node] = (math.inf, None)\n\n # initialize source node\n Q.append((0, source))\n path[source] = (0, None)\n\n # iterate through queue based on next closest distance\n while len(Q) != 0:\n # sort queue and get next lowest neighbor\n Q.sort()\n dist, node = Q.pop(0)\n\n # get neighbors and distance to next node\n for neighbor, weight in graph.neighbors(node):\n # find this, if still in our queue\n queue_index = None\n for i,(w,n) in enumerate(Q):\n if n == neighbor:\n queue_index = i\n\n if queue_index is None:\n # neighbor not in queue, skip\n continue\n\n # check if it's shorter to proceed here via \n # the current node than our queue's estimate\n alt = dist + weight\n if alt < Q[queue_index][0]:\n # update variables\n path[neighbor] = (alt, node)\n Q[queue_index] = (alt, neighbor)\n\n # backtrack to get the shortest path and cumulative costs\n traj = []\n current = target\n while True:\n _, parent = path[current]\n traj.append(current)\n\n # stop condition\n if parent is None:\n break\n current = parent\n\n # reverse path\n traj.reverse()\n\n # return path, cost\n return traj, path[target][0]", "title": "" }, { "docid": "4520892affda9b68e448ff745355752c", "score": "0.6790623", "text": "def solve(W):\n # setup dijkstra\n h, w = len(W), len(W[0])\n D, P = dijsktra(W, (0,0))\n print('Minimum Sum path cost: {}'.format(D[(h-1, w-1)]))", "title": "" }, { "docid": "6d0f0d768d098e7ac4ac8ec5b5515bcd", "score": "0.67847425", "text": "def get_shortest_path(graph: nx.Graph, start: int, goal: int) -> list:\n paths = list()\n path_goal_min_val = float('inf')\n path_goal_min = None\n\n # Check for early termination if we're already at the goal.\n if start == goal:\n return [start]\n\n # Determine the intial distance from the start to the goal.\n goal_initial_distance = distance.euclidean(\n graph.intersections[start],\n graph.intersections[goal]\n )\n path = Path(\n cost=Cost(goal_initial_distance, 0, goal_initial_distance),\n intersections=[start],\n previous=start,\n frontier=start\n )\n heapq.heappush(paths, path)\n\n while len(paths) >= 1:\n nearest_frontier_path = heapq.heappop(paths)\n for neighbor_road in graph.roads[nearest_frontier_path.frontier]:\n\n # Ensure we don't go backwards.\n if neighbor_road == nearest_frontier_path.previous:\n continue\n else:\n new_path = update_path(\n graph=graph,\n path=nearest_frontier_path,\n new_frontier=neighbor_road,\n goal=goal\n )\n\n if neighbor_road == goal: # We've got to the goal....\n if new_path.cost.total < path_goal_min_val: # ...and it's cheaper than we've seen so far.\n path_goal_min_val = new_path.cost.total\n path_goal_min = new_path.intersections\n else: # ... and it's more expensive than we've seen so far.\n continue\n else:\n if path_goal_min is not None: # We have some kind of path to the goal...\n if new_path.cost.total >= path_goal_min_val: # ... and the current cost is too high.\n continue\n else: # ... it's not too high\n heapq.heappush(paths, new_path)\n else: # We havn't found the goal yet, keep going.\n heapq.heappush(paths, new_path)\n\n if path_goal_min is not None:\n return path_goal_min\n else:\n return -1", "title": "" }, { "docid": "1f0cac23f2cafcc2c9218e4908fddecf", "score": "0.6763409", "text": "def bidirectional_dijkstra(G, source, target, weight = 'weight'):\r\n if source is None or target is None:\r\n raise nx.NetworkXException(\r\n \"Bidirectional Dijkstra called with no source or target\")\r\n if source == target: return (0, [source])\r\n #Init: Forward Backward\r\n dists = [{}, {}]# dictionary of final distances\r\n paths = [{source:[source]}, {target:[target]}] # dictionary of paths\r\n fringe = [[], []] #heap of (distance, node) tuples for extracting next node to expand\r\n seen = [{source:0}, {target:0} ]#dictionary of distances to nodes seen\r\n #initialize fringe heap\r\n heapq.heappush(fringe[0], (0, source))\r\n heapq.heappush(fringe[1], (0, target))\r\n #neighs for extracting correct neighbor information\r\n if G.is_directed():\r\n neighs = [G.successors_iter, G.predecessors_iter]\r\n else:\r\n neighs = [G.neighbors_iter, G.neighbors_iter]\r\n #variables to hold shortest discovered path\r\n #finaldist = 1e30000\r\n finalpath = []\r\n dir = 1\r\n while fringe[0] and fringe[1]:\r\n # choose direction\r\n # dir == 0 is forward direction and dir == 1 is back\r\n dir = 1-dir\r\n # extract closest to expand\r\n (dist, v )= heapq.heappop(fringe[dir])\r\n if v in dists[dir]:\r\n # Shortest path to v has already been found\r\n continue\r\n # update distance\r\n dists[dir][v] = dist #equal to seen[dir][v]\r\n if v in dists[1-dir]:\r\n # if we have scanned v in both directions we are done\r\n # we have now discovered the shortest path\r\n return (finaldist,finalpath)\r\n\r\n for w in neighs[dir](v):\r\n if(dir==0): #forward\r\n if G.is_multigraph():\r\n minweight=min((dd.get(weight,1)\r\n for k,dd in G[v][w].items()))\r\n else:\r\n minweight=G[v][w].get(weight,1)\r\n vwLength = dists[dir][v] + minweight #G[v][w].get(weight,1)\r\n else: #back, must remember to change v,w->w,v\r\n if G.is_multigraph():\r\n minweight=min((dd.get(weight,1)\r\n for k,dd in G[w][v].items()))\r\n else:\r\n minweight=G[w][v].get(weight,1)\r\n vwLength = dists[dir][v] + minweight #G[w][v].get(weight,1)\r\n\r\n if w in dists[dir]:\r\n if vwLength < dists[dir][w]:\r\n raise ValueError(\"Contradictory paths found: negative weights?\")\r\n elif w not in seen[dir] or vwLength < seen[dir][w]:\r\n # relaxing\r\n seen[dir][w] = vwLength\r\n heapq.heappush(fringe[dir], (vwLength,w))\r\n paths[dir][w] = paths[dir][v]+[w]\r\n if w in seen[0] and w in seen[1]:\r\n #see if this path is better than than the already\r\n #discovered shortest path\r\n totaldist = seen[0][w] + seen[1][w]\r\n if finalpath == [] or finaldist > totaldist:\r\n finaldist = totaldist\r\n revpath = paths[1][w][:]\r\n revpath.reverse()\r\n finalpath = paths[0][w] + revpath[1:]\r\n raise nx.NetworkXNoPath(\"No path between %s and %s.\" % (source, target))", "title": "" }, { "docid": "6a74c5d9d643a1eec74ec24ecc402e7c", "score": "0.6726148", "text": "def vcg_cheapest_path(graph: nx.Graph, source, target):\n # first, find the shortest path between the source and the target\n main_path = nx.dijkstra_path(graph, source, target, \"weight\")\n # convert the main path to contain tuples that represents edges\n main_path = [tuple(main_path[i: i+2]) for i in range(len(main_path)-1)]\n # find it's cost\n main_cost = nx.dijkstra_path_length(graph, source, target, 'weight')\n\n # now calc every edge's cost\n for (s, t, w) in graph.edges.data('weight'):\n # the edge\n e = (s, t)\n # if the edge in the main path\n if e in main_path:\n # calc the shortest path without this edge\n # duplicate the graph\n temp_graph = nx.Graph.copy(graph)\n # remove the current edge\n temp_graph.remove_edges_from([(s, t), (t, s)])\n # calc the new shortest path\n current_cost = nx.dijkstra_path_length(temp_graph, source, target, 'weight')\n # the cost of the edge is: the main cost -(current cost + the edge's weight)\n cost = main_cost - (current_cost + w)\n print(\"(%s, %s) cost is %d\" % (s, t, cost))\n # else, the edge isn't in the main path\n else:\n # then its cost is 0\n print(\"(%s, %s) cost is 0\" % (s, t))", "title": "" }, { "docid": "56cedf60aac09e43f1779c53594105d3", "score": "0.6726049", "text": "def a_star_search(problem, heuristic):\n \n smallState = problem.start.extractSmallState()\n n0 = Node(smallState, problem.start, None, None, problem,True, heuristic)\n if problem.is_goal_state(n0.state):\n return ['None']\n frontier = utils.PriorityQueue()\n frontier.push(n0, n0.cost)\n explored = set()\n while not frontier.is_empty():\n node = frontier.pop()\n path = node.get_path()\n explored.add(node.state)\n if problem.is_goal_state(node.state):\n return path\n next_states = problem.get_successors(node.fullState, node.state)\n frontier_costs = []\n frontier_states = []\n for n in frontier.heap:\n frontier_states.append(n[2].state)\n frontier_costs.append(n[2].cost)\n for next_state in next_states:\n next_node = Node(next_state[0], next_state[1], next_state[2], node, problem,True, heuristic)\n #print next_state[0]\n if (next_node.state not in explored and next_node.state not in frontier_states) or \\\n (next_node.state in frontier_states and frontier_costs[\n frontier_states.index(next_node.state)] > next_node.cost):\n frontier.push(next_node, next_node.cost)\n print \"no more frontiers...return path!!\"\n return path #node.getPath()", "title": "" }, { "docid": "c291f9bdeb9f758c43fc0e06dc3fede8", "score": "0.6661014", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n startNode = (problem.getStartState(), [], 0) # State of form: (node, path, pathcost)\n\n fringe = util.PriorityQueue() # Priority queue for fringe = selects nodes with lowest priority (Based on heuristic)\n fringe.push(startNode, 0) # Start node with initial priority of 0\n\n closed = set() # Contains all expanded nodes\n\n while not fringe.isEmpty():\n curState = fringe.pop() # Select node with lowest priority\n\n node = curState[0] # Store current node\n path = curState[1] # Get path to current node\n pathcost = curState[2] # Get cost of path to current node\n\n if problem.isGoalState(node): # If current node is the goal\n return path # Return path to current node\n\n if node not in closed: # If current node has not been expanded\n closed.add(node) # Add current node to closed set, since we will expand it\n\n # 'children' is a list of the form: [(successor, action, stepCost), (), ...]\n children = problem.getSuccessors(node) # Get children of current node (expand current node)\n\n for child,action,cost in children: # Iterate through each child \n if child in closed: # If child has already been expanded, then ignore it\n continue\n else:\n hn = heuristic(child, problem) # Heuristic: estimated cost from 'child' to goal node\n gn = pathcost + cost # Path cost from start node to 'child'\n newState = (child, path + [action], gn) # newState = (current node, updated path, updated cost)\n fringe.push(newState, hn + gn) # Add newState to fringe with priority : f(n) = h(n) + g(n)\n\n raise Exception(\"Failure\") # Did not find goal", "title": "" }, { "docid": "7d8c4182777769140bf8ec32bbdfed85", "score": "0.66590106", "text": "def a_star(graph, h, start, goal):\n\n path = []\n path_cost = 0\n queue = PriorityQueue()\n queue.put((0, start))\n visited = set(start)\n\n branch = {}\n found = False\n \n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n \n if current_node == goal: \n print('Found a path.')\n found = True\n break\n else: \n for next_node in graph[current_node]:\n cost = graph.edges[current_node, next_node]['weight']\n new_cost = current_cost + cost + heuristic(next_node, goal)\n \n if next_node not in visited: \n visited.add(next_node) \n queue.put((new_cost, next_node))\n \n branch[next_node] = (new_cost, current_node)\n \n if found:\n # retrace steps\n n = goal\n path_cost = branch[n][0]\n path.append(goal)\n while branch[n][1] != start:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n else:\n print('**********************')\n print('Failed to find a path!')\n print('**********************')\n \n return np.array(path[::-1]), path_cost", "title": "" }, { "docid": "2debe06281955bda2de328501b390e8e", "score": "0.6651053", "text": "def astar_graph_search(problem, h=None):\n h = memoize(h or problem.h, slot='h')\n return best_first_graph_search(problem, lambda n: n.path_cost + h(n))", "title": "" }, { "docid": "e88db77078ea8de978254b1e26e8430c", "score": "0.66370904", "text": "def dijkstra_shortest_path(grid_obs, source, dest):\n\n direction = [21, -1, -21, 1]\n outer_neighbors = [42, -2, -42, 2]\n vertexdict = dict()\n unvisited = []\n for i in range(len(grid_obs)):\n if grid_obs[i] != 'air': #<----------- Add things to avoid here\n vertexdict[i] = [1, 999, -999] #key = index, value = (cost, shortest dist from start, prev vert)\n unvisited.append(i) #add to unvisited list\n\n #set source vertex cost and shortest_dist_from_start to 0\n if source in vertexdict:\n vertexdict[source][0] = 0\n vertexdict[source][1] = 0\n else:\n return np.zeros(99)\n\n while len(unvisited) != 0:\n #find curVert - lowest shortest dist vertex\n lowestDist = float('inf')\n curVert = None\n for i in unvisited:\n if vertexdict[i][1] < lowestDist:\n curVert = i\n lowestDist = vertexdict[i][1]\n\n #examine neighbors of curVert\n for i in range(len(direction)):\n adjVert = curVert + direction[i]\n furtherVert = curVert + outer_neighbors[i]\n if adjVert in unvisited:\n #newcost = (cost of adjVert) + (shortest dist from curVert)\n newCost = vertexdict[adjVert][0] + vertexdict[curVert][1]\n if newCost < vertexdict[adjVert][1]:\n vertexdict[adjVert][1] = newCost\n vertexdict[adjVert][2] = curVert\n if furtherVert in unvisited:\n newCost = vertexdict[furtherVert][0] + vertexdict[curVert][1] + 1\n if newCost < vertexdict[furtherVert][1]:\n vertexdict[furtherVert][1] = newCost\n vertexdict[furtherVert][2] = curVert\n unvisited.remove(curVert)\n\n backtrack = dest\n path_list = []\n path_list.append(dest)\n while backtrack != source:\n path_list.insert(0, vertexdict[backtrack][2])\n backtrack = vertexdict[backtrack][2]\n return path_list", "title": "" }, { "docid": "c4eea4a222c33c2db69f256a4c88261e", "score": "0.66312426", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n startNode = (problem.getStartState(), [])\n frontier = util.PriorityQueue()\n \"Priority is g(n) + h(n), where g(n) is cost and h(n) the heuristic\"\n frontier.push(startNode, heuristic(startNode[0], problem))\n \"Expanded is a set for faster search. Insert only the state, to avoid visiting already visited nodes\"\n expanded = set()\n\n\n while not frontier.isEmpty():\n node = frontier.pop()\n if problem.isGoalState(node[0]):\n return node[1]\n if not (node[0] in expanded):\n expanded.add(node[0])\n children = problem.expand(node[0])\n for x in children:\n tempPath = list(node[1])\n tempPath.append(x[1])\n frontier.push((x[0], tempPath), problem.getCostOfActionSequence(tempPath) + heuristic(x[0], problem)) \n util.raiseNotDefined()", "title": "" }, { "docid": "4d3a7ba3799d7ad21c3eff9f23ad29b6", "score": "0.6625922", "text": "def find_path(self, graph, start, end, heuristic_fn):\n \n # It starts off very similiar to Dijkstra. However, we will need to lookup\n # nodes in the _open list before. There can be thousands of nodes in the _open\n # list and any unordered search is too expensive, so we trade some memory usage for\n # more consistent performance by maintaining a dictionary (O(1) lookup) between\n # vertices and their nodes. \n _open_lookup = {}\n _open = []\n closed = set()\n \n # We require a bit more information on each node than Dijkstra\n # and we do slightly more calculation, so the heuristic must\n # prune enough nodes to offset those costs. In practice this\n # is almost always the case if their are any large _open areas\n # (nodes with many connected nodes).\n \n # Rather than simply expanding nodes that are on the _open list\n # based on how close they are to the start, we will expand based\n # on how much distance we predict is between the start and end \n # node IF we go through that parent. That is a combination of \n # the distance from the start to the node (which is certain) and\n # the distance from the node to the end (which is guessed).\n \n # We use the counter to enforce consistent ordering between nodes\n # with the same total predicted distance.\n \n counter = 0 \n heur = heuristic_fn(graph, start, end)\n _open_lookup[start] = {'vertex': start,\n 'dist_start_to_here': 0,\n 'pred_dist_here_to_end': heur,\n 'pred_total_dist': heur,\n 'parent': None}\n heapq.heappush(_open, (heur, counter, start))\n counter += 1\n \n while len(_open) > 0:\n current = heapq.heappop(_open)\n current_vertex = current[2]\n current_dict = _open_lookup[current_vertex]\n del _open_lookup[current_vertex]\n closed.update(current_vertex)\n \n if current_vertex == end:\n return self.reverse_path(current_dict)\n \n neighbors = graph.graph[current_vertex]\n for neighbor in neighbors:\n if neighbor in closed:\n # If we already expanded it it's definitely not better\n # to go through this node, or we would have expanded this\n # node first.\n continue\n \n cost_start_to_neighbor = current_dict['dist_start_to_here'] \\\n + graph.get_edge_weight(current_vertex, neighbor)\n # avoid searching twice\n neighbor_from_lookup = _open_lookup.get(neighbor, None)\n if neighbor_from_lookup is not None:\n # If our heuristic is NOT consistent or the grid is NOT uniform,\n # it is possible that there is a better path to a neighbor of a \n # previously expanded node. See above, ctrl+f \"river example neighbors\"\n \n # Note that the heuristic distance from here to end will be the same for\n # both, so the only difference will be in start->here through neighbor\n # and through the old neighbor.\n \n old_dist_start_to_neighbor = neighbor_from_lookup['dist_start_to_here']\n \n if cost_start_to_neighbor < old_dist_start_to_neighbor:\n pred_dist_neighbor_to_end = neighbor_from_lookup['pred_dist_here_to_end']\n pred_total_dist_through_neighbor_to_end = cost_start_to_neighbor + pred_dist_neighbor_to_end\n # Note, we've already shown that neighbor (the vector) is already in the _open list,\n # but unfortunately we don't know where and we have to do a slow lookup to fix the \n # key its sorting by to the new predicted total distance.\n \n # In case we're using a fancy debugger we want to search in user-code so when \n # this lookup freezes we can see how much longer its going to take.\n found = None\n for i in range(0, len(_open)):\n if _open[i][2] == neighbor:\n found = i\n break\n assert(found is not None)\n # TODO: I'm not certain about the performance characteristics of doing this with heapq, nor if\n # TODO: it would be better to delete heapify and push or rather than replace\n\n _open[found] = (pred_total_dist_through_neighbor_to_end, counter, neighbor)\n counter += 1\n heapq.heapify(_open)\n _open_lookup[neighbor] = {'vertex': neighbor,\n 'dist_start_to_here': cost_start_to_neighbor, \n 'pred_dist_here_to_end': pred_dist_neighbor_to_end, \n 'pred_total_dist': pred_total_dist_through_neighbor_to_end, \n 'parent': current_dict}\n continue\n\n # We've found the first possible way to the path!\n pred_dist_neighbor_to_end = heuristic_fn(graph, neighbor, end)\n pred_total_dist_through_neighbor_to_end = cost_start_to_neighbor + pred_dist_neighbor_to_end\n heapq.heappush(_open, (pred_total_dist_through_neighbor_to_end, counter, neighbor))\n _open_lookup[neighbor] = {'vertex': neighbor,\n 'dist_start_to_here': cost_start_to_neighbor,\n 'pred_dist_here_to_end': pred_dist_neighbor_to_end,\n 'pred_total_dist': pred_total_dist_through_neighbor_to_end,\n 'parent': current_dict}\n \n return None", "title": "" }, { "docid": "79442350a3b6cf835ee6f223922bc889", "score": "0.6611269", "text": "def dijkstra(G, s):\n\n shortest_path = [INF for i in range(len(G))]\n shortest_path[s] = 0\n\n visited_nodes = set([s])\n\n while len(visited_nodes) < len(G):\n # choose an edge using dijkstra greedy score\n dgreedy_min = INF\n src = s\n dst = s\n need_break = True\n for i in visited_nodes:\n for edge in G[i]:\n if shortest_path[i] + edge[\"COST\"] < dgreedy_min and edge[\"DEST\"] not in visited_nodes:\n dgreedy_min = shortest_path[i] + edge[\"COST\"]\n src = i\n dst = edge[\"DEST\"]\n need_break = False\n\n if not need_break:\n # put shortest path cost for current node\n shortest_path[dst] = dgreedy_min\n visited_nodes.add(dst)\n else:\n break\n\n return shortest_path", "title": "" }, { "docid": "d530ff25290ab928756e4c9d8698cc46", "score": "0.6589445", "text": "def graph_a_star(graph, h, start, goal):\n\n path = []\n path_cost = 0\n queue = PriorityQueue()\n queue.put((0, start))\n visited = set(start)\n\n branch = {}\n found = False\n\n while not queue.empty():\n item = queue.get()\n current_node = item[1]\n if current_node == start:\n current_cost = 0.0\n else:\n current_cost = branch[current_node][0]\n\n if current_node == goal:\n print('Found a path.')\n found = True\n break\n else:\n for next_node in graph[current_node]:\n cost = graph.edges[current_node, next_node]['weight']\n branch_cost = current_cost + cost\n queue_cost = branch_cost + h(next_node, goal)\n\n if next_node not in visited:\n visited.add(next_node)\n branch[next_node] = (branch_cost, current_node)\n queue.put((queue_cost, next_node))\n\n\n if found:\n # retrace steps\n n = goal\n path_cost = branch[n][0]\n path.append(goal)\n while branch[n][1] != start:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n else:\n print('**********************')\n print('Failed to find a path!')\n print('**********************')\n return path[::-1], path_cost", "title": "" }, { "docid": "3a208959306ede2e478a798128ef9323", "score": "0.6522947", "text": "def find_shortest_path(graph):\n comm = MPI.COMM_WORLD\n num_processes = comm.Get_size()\n rank = comm.Get_rank()\n\n if rank == 0:\n levels, nodes, distances, weights = parse_graph_repr(graph)\n start_time = MPI.Wtime()\n # How many nodes each process receives\n nodes_per_proc = math.ceil(nodes/(num_processes - 1))\n for level in range(2, levels + 2):\n node_index = 0\n destination = 1\n while(node_index < nodes):\n nodes_to_compute = range(node_index, node_index + nodes_per_proc)\n message = [distances[level -1], weights[level - 1], nodes_to_compute]\n comm.send(True, dest = destination, tag = SYNC_LEVEL)\n comm.send(message, dest = destination)\n destination += 1\n node_index += nodes_per_proc\n \n distances[level] = []\n for worker_process in range(1, destination):\n shortest_paths = comm.recv(source = worker_process)\n for key in shortest_paths:\n distances[level].append(shortest_paths[key])\n # Tells other processes to stop waiting for new tasks\n for node in range(1, num_processes):\n comm.send(False, dest = node, tag = SYNC_LEVEL)\n end_time = MPI.Wtime()\n print(end_time - start_time)\n else:\n # All process wait for requests from master process\n new_level = comm.recv(source = 0, tag = SYNC_LEVEL)\n while(new_level):\n # Receives from master process all data needed to find the shortest path\n # to node next_level_node\n message = comm.recv(source = 0)\n distances = message[0]\n weights = message[1]\n next_level_nodes = message[2]\n shortest_distances = {}\n for next_level_node in next_level_nodes:\n local_distances = []\n for node in range(0, len(weights[0])):\n dist = distances[node] + weights[node][next_level_node]\n local_distances.append(dist)\n shortest_distances[next_level_node] = min(local_distances)\n # Computes the shorstest path to next_level_node and sends it back\n comm.send(shortest_distances, dest = 0)\n new_level = comm.recv(source = 0, tag = SYNC_LEVEL)", "title": "" }, { "docid": "78fb25e3a1f97bf0630d184ad0799ded", "score": "0.6508275", "text": "def dijkstras_shortest_path(initial_position, destination, graph, adj, visited_nodes):\n initial_box = find_box(initial_position, graph)\n destination_box = find_box(destination, graph)\n distances = {initial_box: 0} # Table of distances to cells \n previous_cell = {initial_box: None} # Back links from cells to predecessors\n queue = [(0, initial_box)] # The heap/priority queue used\n\n # Initial distance for starting position\n distances[initial_position] = 0\n \n while queue:\n # Continue with next min unvisited node\n current_distance, current_box = heappop(queue)\n print (current_distance == distances[current_box])\n #print (\"cur_node: \" +str(current_box))\n \n # Early termination check: if the destination is found, return the path\n if current_box == destination_box:\n node = destination_box\n path = []\n \n prev_point = initial_position\n while node is not None:\n line_end = (0,0)\n line_start = (0,0)\n #print (str(initial_box) + \" \" + str(destination_box) + \" \" + str(previous_cell[node]))\n if destination_box == initial_box:\n #print(\"single box\")\n line_start = initial_position\n line_end = destination\n elif previous_cell[node] != None or previous_cell[node] == initial_box:\n \n if node == destination_box:\n #print(\"destination box\")\n line_start = destination\n line_end = next_point(destination, node, previous_cell[node])\n else:\n #print(\"the rest\")\n line_start = prev_point\n line_end = next_point(prev_point, node, previous_cell[node])\n else:\n #print(\"initial box\")\n line_start = prev_point\n line_end = initial_position\n \n visited_nodes.append(node)\n path.append((line_start,line_end))\n prev_point = line_end \n node = previous_cell[node]\n #print (\"djtra: \" + str(path))\n #print(\"path: \" + str(path))\n return (path[::-1], visited_nodes) \n\n # Calculate tentative distances to adjacent cells\n for adjacent_node, edge_cost in adj(graph, current_box):\n new_distance = current_distance + heuristic(destination_box, adjacent_node)\n\n if adjacent_node not in distances or new_distance < distances[adjacent_node]:\n # Assign new distance and update link to previous cell\n distances[adjacent_node] = new_distance\n previous_cell[adjacent_node] = current_box\n heappush(queue, (new_distance, adjacent_node))\n \n # Failed to find a path\n print(\"Failed to find a path from\", initial_position, \"to\", destination)\n return None", "title": "" }, { "docid": "caac5b76450ade4c94be9fcf520919ab", "score": "0.64940196", "text": "def run_dijkstra(argv_graph, argv_source, argv_target, argv_weight_edge):\n current_distance, current_path = networkx.single_source_dijkstra(\n G=argv_graph,\n source=argv_source,\n target=argv_target,\n weight=argv_weight_edge\n )\n return current_distance, current_path", "title": "" }, { "docid": "abfd88b2b936217a32b399c74a72fc42", "score": "0.64717966", "text": "def shortestPath(self):\n\t\tdist = [float('inf')] * (self.size+1)\n\t\tdist[1] = 0\n\t\tfor v in range(1,self.size):\n\t\t\tfor u in self.graph[v]:\n\t\t\t\tif u == None:\n\t\t\t\t\tbreak\n\t\t\t\tif dist[u] > (dist[v] + 1):\n\t\t\t\t\tdist[u] = dist[v] + 1\n\t\treturn dist[self.size]", "title": "" }, { "docid": "782e9b78d30ccd5a4d2f16eb033647fd", "score": "0.64639467", "text": "def single_source_dijkstra(G, source, target=None, cutoff=None, weight='weight',start_time='00:00:00'):\n if source == target:\n return ({source: 0}, {source: [(source,None,None,None)]})\n if cutoff is not None:\n cutoff = get_sec(cutoff)\n \n\n\n paths = {source: [(source,None,None,None)]} # dictionary of paths\n return _dijkstra(G, source, paths=paths, cutoff=cutoff,\n target=target,time_start=start_time)", "title": "" }, { "docid": "6610334d1a66a457d334d9aac6dc20d4", "score": "0.6456011", "text": "def find_shortest_dijkstra_route(graph, journey):\n all_paths = dict(nx.all_pairs_dijkstra_path(graph))\n all_lengths = dict(nx.all_pairs_dijkstra_path_length(graph))\n\n if len(all_paths) != len(all_lengths):\n print(\"Path count is not equal to path length count, \"\n \"maybe some links are missing a weight?\")\n return False\n\n shortest_path = []\n \n for destination, path in all_paths[journey[0]].items():\n\n # If all nodes in our journey are in the current path being checked\n if all(node in path for node in journey):\n\n if (len(shortest_path) == 0) or (len(path) < len(shortest_path)):\n shortest_path = path\n\n\n total = 0\n for section in shortest_path:\n total += len(section) - 1\n\n print(\"\\nShortest dijkstra journey: {} connection(s)\".format(total))\n if len(shortest_path) < 1:\n print(\"No shortest dijkstra path found!\\n\")\n return False\n\n else:\n print(\"{} hop(s) {}\\n\".format(len(shortest_path) - 1, shortest_path))\n return shortest_path", "title": "" }, { "docid": "218e1c83a7768b1b645b39769b47c692", "score": "0.64454645", "text": "def isPath(self, v, w): #O(n^2)\n if (v not in self.graph.keys()) or (w not in self.graph.keys()): #Raise ValueError it start node or target node isn't in the graph\n raise ValueError(\"One of the nodes was not found in the graph\")\n toVisit = deque([])\n visited = []\n toVisit.append(v)\n while len(toVisit) != 0:\n workingNode = toVisit.popleft()\n if w in self.graph[workingNode]: #If the target node is in the list of nodes the working node is connected to\n visited.append(workingNode) #Add working node to visited\n visited.append(w) #Add target node to visited\n break \n elif workingNode not in visited:\n visited.append(workingNode)\n for edge in self.graph[workingNode]:\n toVisit.append(edge)\n if w in visited:\n fileName = str(datetime.datetime.today()) + \"Shortest_Path.txt\"\n f = open(fileName, \"w+\")\n for i in visited:\n f.write(str(i) + \" \")\n f.close()\n return True\n else:\n return False", "title": "" }, { "docid": "8193ce28132aa1bb72007c06c5ed4950", "score": "0.64427286", "text": "def shortest(visited, paths):\n shortest = float(\"inf\")\n index = -1\n for i, path in enumerate(paths):\n if not visited[i] and path < shortest:\n index = i\n shortest = path\n return index", "title": "" }, { "docid": "9c04022b8084799a37db3c7e60a32cc2", "score": "0.6425694", "text": "def searchShortestPath(self,graph,start,to,path=[]):\n path=path+[start] # creates a new list by same name as path instead of appending to original\n if start==to:\n return path\n shortest=None\n if start not in graph:\n return None\n else:\n for node in graph[start]: \n if node not in path:\n #print(path)# to take care of already covered nodes/cycles using this if so have an if which searches\n\n val=self.searchShortestPath(graph,node,to,path) \n if val:\n if((shortest==None)):\n shortest=val\n elif(len(val)<len(shortest)):\n shortest=val\n return shortest", "title": "" }, { "docid": "99f6f0fb6704387a03e56317400d920b", "score": "0.64230776", "text": "def heuristic(graph, curr_node):\n distances = [i[1] for i in graph.neighbors(curr_node)]\n try:\n return min(distances)\n except ValueError:\n return 0", "title": "" }, { "docid": "9dc90a8a9a6a280e1b445b9f59f25131", "score": "0.64035165", "text": "def dijkstra_predecessor_and_distance(G,source, cutoff=None, weight='weight'):\r\n push=heapq.heappush\r\n pop=heapq.heappop\r\n dist = {} # dictionary of final distances\r\n pred = {source:[]} # dictionary of predecessors\r\n seen = {source:0}\r\n fringe=[] # use heapq with (distance,label) tuples\r\n push(fringe,(0,source))\r\n while fringe:\r\n (d,v)=pop(fringe)\r\n if v in dist: continue # already searched this node.\r\n dist[v] = d\r\n if G.is_multigraph():\r\n edata=[]\r\n for w,keydata in G[v].items():\r\n minweight=min((dd.get(weight,1)\r\n for k,dd in keydata.items()))\r\n edata.append((w,{weight:minweight}))\r\n else:\r\n edata=iter(G[v].items())\r\n for w,edgedata in edata:\r\n vw_dist = dist[v] + edgedata.get(weight,1)\r\n if cutoff is not None:\r\n if vw_dist>cutoff:\r\n continue\r\n if w in dist:\r\n if vw_dist < dist[w]:\r\n raise ValueError('Contradictory paths found:',\r\n 'negative weights?')\r\n elif w not in seen or vw_dist < seen[w]:\r\n seen[w] = vw_dist\r\n push(fringe,(vw_dist,w))\r\n pred[w] = [v]\r\n elif vw_dist==seen[w]:\r\n pred[w].append(v)\r\n return (pred,dist)", "title": "" }, { "docid": "3a29bd57c3580ba7737f80dc53b9587a", "score": "0.63937813", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Node: pos, g, h\n startNode = (problem.getStartState(), 0, 0)\n myList = util.PriorityQueue()\n visit = set()\n myList.push((startNode, [], []), 0) #position, visited, directions, f\n\n \n while not myList.isEmpty(): #If L = empty, then FAIL\n node, visit, path = myList.pop() # else pick a node n from L.\n if problem.isGoalState(node[0]): #If n is a goal node, STOP\n return path #return n and the path to it from an initial node.\n else: #Otherwise, remove n from OPEN\n if node[0] not in visit:\n visit += [node[0]] # put in in CLOSE\n children = problem.getSuccessors(node[0])\n for child in children: #and for all children x of n,\n if child[:][0] not in visit: #if x is not in CLOSE,\n # add x to OPEN and keep path information\n tempG = node[1] + child[2]\n tempH = heuristic(child[0], problem)\n tempF = tempG + tempH\n myList.push(((child[0], tempG, tempH), visit, path + [child[:][1]]), tempF)\n \n print \"ERROR: PATH NOT FOUND\"\n return []", "title": "" }, { "docid": "7731233edd787c81c752d1695d1e40de", "score": "0.638614", "text": "def find_shortest_path(self):\n tree = Dijkstra(self.vertices, self.edges)\n matrix = tree.build_adj_matrix()\n short_path = [int(pnt) for pnt in tree.find_shortest_route(self.origin_id, self.destination_id, matrix)]\n all_path = self.path_to_xy(short_path, self.vertices)\n path_edges_xy = self.path_to_edge_xy(all_path)\n x_vertices_path = [x[0] for x in all_path]\n y_vertices_path = [x[1] for x in all_path]\n for path_line in path_edges_xy:\n self.main_subplot.plot([path_line[0][0], path_line[1][0]], [path_line[0][1], path_line[1][1]],\n color=\"#003D59\",\n linewidth=3)\n self.main_subplot.plot(x_vertices_path, y_vertices_path, \"o\", markersize=7, color=\"#FE6625\")\n self.fig_canvas = FigureCanvasTkAgg(self.mainFigure, self.root)\n self.fig_canvas.get_tk_widget().grid(row=3, column=1, rowspan=10, columnspan=10)", "title": "" }, { "docid": "d640c89d78af039d58cc09622243e81e", "score": "0.6367492", "text": "def find_shortest_path(G, D, P, s, f):\n dijkstra(G, D, P, s)\n path = [f]\n v = P[f]\n while v is not None:\n path.append(v)\n v = P[v]\n path.reverse()\n return path", "title": "" }, { "docid": "71fa290b7628d0a50b77530a41c34b31", "score": "0.63570195", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n # priority queue to keep track of where our agent can move\n frontier = util.PriorityQueue()\n # start node contains the start state, an empty list representing the actions its taken thusfar\n startNode = (problem.getStartState(), [])\n # push node onto the stack with a prio of 0 (arbitrary bc start)\n frontier.push(startNode, 0)\n\n # holds the states visited so we dont end up in an infinite loop\n expanded = []\n\n while not frontier.isEmpty():\n node = frontier.pop()\n # the current state of our node\n currentState = node[0]\n # the path we took to get to our current state\n actionPath = node[1]\n # if we are at the goal, then return path to the goal\n if problem.isGoalState(currentState):\n return actionPath\n # ensure that we are not double visiting a node\n if currentState not in expanded:\n # add the current state to our expanded so we dont go over it more than once\n expanded.append(currentState)\n # children is a list of nodes connected to our currentState in format (child, action, stepCost)\n children = problem.expand(currentState)\n for child in children:\n # throw node on stack in form new state, newPath, stepCost\n newPath = actionPath + [child[1]]\n childState = child[0]\n childNode = (childState, newPath)\n\n # g(n) -- the observed cost from startState to the child state\n cost = problem.getCostOfActionSequence(newPath)\n # g(n) + h(n) -- take g(n) and add h(n) which is the heuristic estimate of child state to goal\n cost += heuristic(childState, problem)\n\n frontier.push(childNode, cost)\n # if we get through the entire search frontier without reaching the goal, return None\n return None", "title": "" }, { "docid": "a89ca0b5e173339dfb98ccf8381be1e6", "score": "0.6356788", "text": "def FindPath(self, start, goal, path=[]):\n node, pathCost = start, 0\n frontier = util.Queue()\n visited = set()\n\n if start == goal:\n return path\n\n while node != goal:\n successors = [(edge.target, edge) for edge in self.GetEdges(node)]\n for successor, edge in successors:\n residual = edge.capacity - self.flow[edge]\n intPath = (edge, residual)\n if residual > 0 and not intPath in path and intPath not in visited:\n visited.add(intPath)\n frontier.push((successor, path + [(edge, residual)], pathCost + 1))\n\n if frontier.isEmpty():\n return None\n else:\n node, path, pathCost = frontier.pop()\n\n return path", "title": "" }, { "docid": "37a61ca057ce0058d4b2316bd48f0f99", "score": "0.6348699", "text": "def shortestPath(graph, start, end, path=[]):\n\n path = path + [start]\n if start == end:\n return path\n if start not in graph:\n return None\n shortest = None\n for node in graph[start]:\n if node not in path:\n newpath = shortestPath(graph, node, end, path)\n if newpath:\n if not shortest or len(newpath) < len(shortest):\n shortest = newpath\n return shortest", "title": "" }, { "docid": "e97c00d61d3e71c07a4d6bdcc1fd97bc", "score": "0.6343977", "text": "def test_get_shortest_paths_graph(self):\n shortest_path_graph = self.mapped_network.copy()\n shortest_path_graph.delete_vertices([2, 6, 7, 8])\n shortest_path_graph.simplify(combine_edges=max)\n shortest_path_graph.delete_vertices(\n shortest_path_graph.vs.select(_degree_eq=0))\n eid = shortest_path_graph.get_eid(\"0\", \"3\")\n shortest_path_graph.delete_edges(eid)\n weights = list(1 - np.array(shortest_path_graph.es['weight']))\n shortest_path_graph.es['weight'] = weights\n\n n = Network(\n self.interact_network,\n max_adj_p=0.05,\n max_l2fc=-1,\n min_l2fc=1,\n )\n n.set_up_network(self.protein_list)\n\n fn = FilteredNetwork(n)\n shortest_path_graph_to_test = fn.get_shortest_paths_graph()\n\n self.__check_for_graph_eq(shortest_path_graph,\n shortest_path_graph_to_test)", "title": "" }, { "docid": "715930dda65138632ba9b60a613c3ebe", "score": "0.6340343", "text": "def ida_star_search(vertex, goal, discovered, bound, heuristic_function):\n f = heuristic_function(vertex)\n if f > bound:\n return f, None\n if vertex.current == goal:\n return True, vertex\n min = math.inf\n new = expand_fringe(vertex)\n for n in new:\n if n not in discovered:\n discovered.append(n)\n t, solving_n = ida_star_search(n, goal, discovered, bound, heuristic_function)\n if isinstance(t, bool) and t:\n return True, solving_n\n if t < min:\n min = t\n discovered.remove(n)\n return min, None", "title": "" }, { "docid": "6af600252a08ccc7462cae8f82f48bdf", "score": "0.6336181", "text": "def aStar(start, goal, neighbor_func, distance_func, heuristic_func):\n pqueue = PriorityQueue()\n g_costs = {start : 1}\n parents = {start : start}\n \n pqueue.push(heuristic_func(start, goal), start)\n while not pqueue.isEmpty():\n next_cost, next_node = pqueue.pop()\n g_costs[next_node] = g_costs[parents[next_node]] \\\n + distance_func(next_node, parents[next_node])\n if next_node == goal: break\n children = neighbor_func(next_node)\n for child in children:\n updateChild(goal, distance_func, heuristic_func,\n child, next_node, parents, g_costs, pqueue)\n return getPathToGoal(start, goal, parents)", "title": "" }, { "docid": "37702ff4d47e7b0f1adf3984b61d1f1a", "score": "0.6333605", "text": "def astar_search(problem, h=None):\n h = memoize(h or problem.h, 'h')\n return best_first_graph_search(problem, lambda n: n.path_cost + h(n))", "title": "" }, { "docid": "1c44282a9e53a8eca15c9f998fedea5a", "score": "0.63241047", "text": "def AStar_search(problem, heuristic=nullHeuristic):\n\n frontier = util.PriorityQueue()\n startNode = Node(problem.getStartState(),None,0,None,0)\n frontier.push(startNode,heuristic(startNode.state,problem))\n explored_set = set()\n while True:\n Curr_node = frontier.pop()\n Curr_state = Curr_node.state\n if problem.isGoalState(Curr_state):\n path = []\n while Curr_node.depth != 0:\n path.insert(0,Curr_node.state)\n Curr_node = Curr_node.parent_node\n path.insert(0,startNode.state)\n return path\n elif Curr_state in explored_set:\n continue \n else:\n explored_set.add(Curr_state)\n new_frontiers = problem.getSuccessors(Curr_state)\n for transition in new_frontiers: \n if not transition[0] in explored_set:\n len(explored_set)\n new_node = Node(transition[0],transition[1],transition[2]+Curr_node.path_cost,Curr_node,Curr_node.depth+1)\n frontier.push(new_node,heuristic(new_node.state,problem)+new_node.path_cost)\n\n \n util.raiseNotDefined()", "title": "" }, { "docid": "8caac4d8e858cc5cf2fb463d3d135032", "score": "0.63137025", "text": "def dijkstra(self, start, target): #O(n^2)\n if (start not in self.graph.keys()) or (target not in self.graph.keys()): #Checks that the starting node and target node are in the graph\n raise ValueError(\"One of the nodes was not found in the graph\")\n shortestPath = {} #Node and the weight of the shortest path to that node from the start node\n fromNode = {} #Node and the node before that the shortest path comes from\n toVisit = self.graph.copy() #Copy of the graph to keep a list of nodes that need visiting\n for i in toVisit:\n shortestPath[i] = math.inf #Initialise every nodes shortest path to infinity\n shortestPath[start] = 0 #Set the start node's shortest path to 0\n while toVisit:\n minNode = None\n for node in toVisit: #Iterates over all nodes in toVisit and selects one with the shortest weight in shortest path\n if minNode == None: #First case where node == None\n minNode = node\n elif shortestPath[node] < shortestPath[minNode]:\n minNode = node\n for toNode, weight in toVisit[minNode].items(): #Looks at all the nodes that the current node can get to \n if (weight + shortestPath[minNode]) < shortestPath[toNode]: #If the weight to the node + the weight to get to the current node is less than its current path\n shortestPath[toNode] = (weight + shortestPath[minNode]) #Update the shortest path to the weight to the node + the weight to get to the current node\n fromNode[toNode] = minNode #Update which node you came from to get to that node\n toVisit.pop(minNode) #Remove the current node from the list to visit\n path = []\n workingNode = target\n while workingNode != start: #Iterate over every node's path node to get path\n path.insert(0, workingNode) #Inserts current node at the start of the list\n workingNode = fromNode[workingNode] #Set the next node to the one before\n path.insert(0, start)\n cost = shortestPath[target]\n return path, cost", "title": "" }, { "docid": "6eb8bc02bad0ec6638007d37f7327c2d", "score": "0.6310573", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n start_state = problem.getStartState()\n if problem.isGoalState(start_state):\n return []\n class Node:\n def __init__(self, state, direction = None, total_cost = 0, parent_node = None):\n self.state = state\n self.direction = direction\n self.total_cost = total_cost\n self.parent_node = parent_node\n self._successor = None\n def successor_nodes(self, visited_states):\n if self._successor != None:\n self._successor = [x for x in self._successor if x.state not in visited_states]\n else: \n self._successor = [Node(x[0], direction=x[1], total_cost=self.total_cost + x[2], parent_node=self) \\\n for x in problem.getSuccessors(self.state) if x[0] not in visited_states]\n return self._successor\n def __str__(self):\n return f\"({self.state}, {self.direction}, {self.total_cost}, {self.parent_node != None})\"\n def __repr__(self):\n return str(self)\n\n visited_states = set()\n best_goal_node = None\n # the priority queue unlike uniform cost search uses the estimated cost to reach the goal\n # by adding the real cost needed to reach the node plus a heuristic estimate\n opened_que = util.PriorityQueueWithFunction(lambda x: x.total_cost + heuristic(x.state, problem)) \n opened_que.push(Node(start_state)) # start node is added\n \n while not opened_que.isEmpty(): # the entire graph will be searched until the cost of remaining nodes are too high\n node = opened_que.pop() # the node with the best estimate is popped\n if node.state in visited_states: # nodes already visited are skiped\n continue\n visited_states.add(node.state) # current node is visited\n # if current node has a worst estimate than the best path for the goal, then the search loop stops\n if best_goal_node != None and node.total_cost + heuristic(node.state, problem) >= best_goal_node.total_cost:\n break\n successor_nodes = node.successor_nodes(visited_states)\n\n for s_node in successor_nodes:\n # the goal state with the best cost is chosen if the node is a goal state\n if problem.isGoalState(s_node.state) and (best_goal_node == None or s_node.total_cost < best_goal_node.total_cost):\n best_goal_node = s_node\n else:\n opened_que.push(s_node) # non goal nodes added to the priority queue\n\n # unwind the path generated by the best_goal_node\n movements = []\n curr_node = best_goal_node\n while curr_node.parent_node != None:\n movements.append(curr_node.direction)\n curr_node = curr_node.parent_node\n movements.reverse()\n return movements\n # util.raiseNotDefined()", "title": "" }, { "docid": "15b8f7000e4de51fd65445622ae13d10", "score": "0.6299237", "text": "def dijkstra(graph,src,dest,visited,distances,predecessors): \n # a few sanity checks\n if src not in graph:\n raise TypeError('the root of the shortest path tree cannot be found in the graph')\n if dest not in graph:\n raise TypeError('the target of the shortest path cannot be found in the graph') \n # ending condition\n if src == dest:\n # We build the shortest path and display it\n if dest not in distances:\n distances[dest] = 0\n path=[]\n pred=dest\n while pred != None:\n path.append(pred)\n pred=predecessors.get(pred,None)\n pathList = []\n pathList.append(str(path))\n pathList.append(distances[dest])\n return pathList\n\n else : \n # if it is the initial run, initializes the cost\n if not visited: \n distances[src]=0\n # visit the neighbors\n for neighbor in graph[src] :\n if neighbor not in visited:\n new_distance = distances[src] + graph[src][neighbor]\n if new_distance < distances.get(neighbor,float('inf')):\n distances[neighbor] = new_distance\n predecessors[neighbor] = src\n # mark as visited\n visited.append(src)\n # now that all neighbors have been visited: recurse \n # select the non visited node with lowest distance 'x'\n # run Dijskstra with src='x'\n unvisited={}\n for k in graph:\n if k not in visited:\n unvisited[k] = distances.get(k,float('inf')) \n x=min(unvisited, key=unvisited.get)\n return dijkstra(graph,x,dest,visited,distances,predecessors)", "title": "" }, { "docid": "e6f3e458797785ccc5ab427ec1939a4f", "score": "0.6289656", "text": "def get_best_path(roadmap, start, end, restricted_roads, to_neighbor = False):\r\n\r\n \r\n # Write Dijkstra implementation here\r\n\r\n # PROBLEM 4c: Handle the to_neighbor = True case here\r\n\r\n #def get_best_path(roadmap, start, end, restricted_roads, to_neighbor = False):\r\n\r\n #if either start or end is not a valid node, return None\r\n if not roadmap.has_node(start) or not roadmap.has_node(end):\r\n return None\r\n #if start and end are the same node, return ([], 0) # Empty path with 0 travel time\r\n if start == end:\r\n return ([], 0)\r\n\r\n #Label every node as unvisited\r\n unvisited = roadmap.get_all_nodes()\r\n distanceTo = {node: float('inf') for node in roadmap.get_all_nodes()}\r\n distanceTo[start] = 0\r\n # Mark all nodes as not having found a predecessor node on path\r\n #from start\r\n predecessor = {node: None for node in roadmap.get_all_nodes()}\r\n\r\n while unvisited:\r\n # Select the unvisited node with the smallest distance from \r\n # start, it's current node now.\r\n current = min(unvisited, key=lambda node: distanceTo[node])\r\n\r\n # Stop, if the smallest distance \r\n # among the unvisited nodes is infinity.\r\n if distanceTo[current] == float('inf'):\r\n break\r\n\r\n # Find unvisited neighbors for the current node \r\n # and calculate their distances from start through the\r\n # current node.\r\n\r\n #iterate thru roads starting from current node\r\n for neighbour_road in roadmap.get_roads_for_node(current):\r\n\r\n #add road's time to total time\r\n alternativePathDist = distanceTo[current] + neighbour_road.get_total_time() #hops as distance\r\n\r\n # Compare the newly calculated distance to the assigned. \r\n # Save the smaller distance and update predecssor.\r\n if alternativePathDist < distanceTo[neighbour_road.get_destination()]:\r\n if neighbour_road.get_type() in restricted_roads:\r\n distanceTo[neighbour_road.get_destination()] = float('inf')\r\n else:\r\n distanceTo[neighbour_road.get_destination()] = alternativePathDist\r\n predecessor[neighbour_road.get_destination()] = current\r\n\r\n # Remove the current node from the unvisited set.\r\n unvisited.remove(current)\r\n \r\n #Attempt to be build a path working backwards from end\r\n path = []\r\n current = end\r\n while predecessor[current] != None:\r\n path.insert(0, current)\r\n current = predecessor[current]\r\n if path != []:\r\n path.insert(0, current)\r\n else:\r\n return None\r\n\r\n best_time = distanceTo[end]\r\n #get the road between two nodes and add the time of that\r\n #no method explicitly for that\r\n #but there is get_roads_for_node\r\n\r\n #return a tuple\r\n return (path, best_time)", "title": "" }, { "docid": "ffedf68b0d5e82859ba894db78dd300b", "score": "0.62883484", "text": "def find_path(graph,\n start,\n end,\n cost = lambda pos: 1,\n passable = lambda pos: True,\n heuristic = helper.manhattan_dist):\n # tiles to check (tuples of (x, y), cost)\n todo = pqueue.PQueue()\n todo.update(start, 0)\n \n # tiles we've been to\n visited = set()\n \n # associated G and H costs for each tile (tuples of G, H)\n costs = { start: (0, heuristic(start, end)) }\n \n # parents for each tile\n parents = {}\n \n while todo and (end not in visited):\n todo.tie_breaker = lambda a,b: better_tile(a, b, start, end)\n \n cur, c = todo.pop_smallest()\n visited.add(cur)\n \n # check neighbours\n for n in graph.neighbours(cur):\n # skip it if we've already checked it, or if it isn't passable\n if ((n in visited) or\n (not passable(n))):\n continue\n \n if not (n in todo):\n # we haven't looked at this tile yet, so calculate its costs\n g = costs[cur][0] + cost(cur)\n h = heuristic(n, end)\n costs[n] = (g, h)\n parents[n] = cur\n todo.update(n, g + h)\n else:\n # if we've found a better path, update it\n g, h = costs[n]\n new_g = costs[cur][0] + cost(cur)\n if new_g < g:\n g = new_g\n todo.update(n, g + h)\n costs[n] = (g, h)\n parents[n] = cur\n \n # we didn't find a path\n if end not in visited:\n return []\n \n # build the path backward\n path = []\n while end != start:\n path.append(end)\n end = parents[end]\n path.append(start)\n path.reverse()\n \n return path", "title": "" }, { "docid": "14281caf2bbe26c2a06c1ec2adc2edf6", "score": "0.62715036", "text": "def shortestPath(self, origin, destination, edgeCost=lambda src,dst: 1, \\\n\t\t\t\theuristic=lambda src,dst: 0):\n\t\tqueue = []\n\t\tvisited = set()\n\t\tpathCosts = {origin:0}\n\t\tparents = {origin:None}\n\t\theappush(queue, (0, origin))\n\t\twhile queue:\n\t\t\tpriority, node = heappop(queue)\n\t\t\tif node == destination:\n\t\t\t\tbreak\n\t\t\tif node in visited:\n\t\t\t\tcontinue\n\t\t\tvisited.add(node)\n\t\t\tfor neighbor in self.edges[node]:\n\t\t\t\tif neighbor in visited:\n\t\t\t\t\tcontinue\n\t\t\t\tnewPathCost = pathCosts[node] + edgeCost(node, neighbor)\n\t\t\t\tif neighbor in pathCosts:\n\t\t\t\t\tif pathCosts[neighbor] <= newPathCost:\n\t\t\t\t\t\tcontinue\n\t\t\t\tparents[neighbor] = node\n\t\t\t\tpathCosts[neighbor] = newPathCost\n\t\t\t\theappush(queue, (heuristic(neighbor, destination) + \\\n\t\t\t\t\t\tnewPathCost, neighbor))\n\t\tif node != destination:\n\t\t\traise PathError()\n\t\tpath = []\n\t\twhile node != None:\n\t\t\tpath = [node] + path\n\t\t\tnode = parents[node]\n\t\treturn path", "title": "" }, { "docid": "e8fcf8fd04aa2c4d374ea9465c508052", "score": "0.62714535", "text": "def find_path(self):\n start = AstarNode(\n None,\n 0,\n self.h(\n self.start,\n self.goal),\n self.start,\n self.start_time)\n open_list = Q.PriorityQueue()\n open_hash = defaultdict()\n open_list.put(start)\n open_hash[(start.f, start.vertex.id)] = True\n closed_list = defaultdict()\n\n while not(open_list.empty()):\n\n # get the current best node\n best_node = open_list.get()\n open_hash.pop((best_node.f, best_node.vertex.id))\n # the following line expands the node and checks if the node is a\n # goal\n if self.is_goal(best_node):\n path = []\n node = best_node\n while node.parent is not None:\n path.append(node.vertex)\n node = node.parent\n path.append(node.vertex)\n return path[::-1]\n\n # expand the node if the node is not the goal and afterwards add to node\n # to closed_list\n self.expand_node(best_node, open_list, closed_list, open_hash)\n\n if open_list.empty():\n return None", "title": "" }, { "docid": "33f187b852733fd2f0941109c9cae1d9", "score": "0.6258173", "text": "def heuristic_neighboors(G, idx_first_node = 0):\n\n num_nodes = G.order()\n dist = 0\n\n min_dist, opt_list_of_nodes = heuristic_shortest_edge(G)\n list_of_nodes = list(opt_list_of_nodes) #copy\n\n for i in range(1, num_nodes):\n for j in [x for x in range(1, num_nodes) if x != i]:\n list_of_nodes = swap(list_of_nodes, i,j)\n path_found = verify_path(G, list_of_nodes)\n\n if path_found is False:\n break\n\n dist = list_of_nodes_to_dist(G, list_of_nodes, min_dist)\n\n if dist < min_dist :\n min_dist = dist\n opt_list_of_nodes = list(list_of_nodes) #copy\n\n list_of_nodes = swap(list_of_nodes, i,j)\n\n return(min_dist, opt_list_of_nodes)", "title": "" }, { "docid": "eeece756afb4771587d8b472616f4606", "score": "0.6241078", "text": "def a_star(start, goal, node_map, win):\n \"\"\"Works by minimising the heuristic score + cost of path from node to node \"\"\"\n start.g_score = 0\n open_set = [start] # records nodes with minimum A* score\n came_from = [] # Records nodes that are selected in the path\n\n while len(open_set) != 0:\n # Select node with lowest f_score\n f_score_list = list(map(lambda node: node.f_score, open_set))\n index = f_score_list.index(min(f_score_list))\n current = open_set[index]\n if current == goal:\n print(\"Success\")\n return reconstruct_path(current, node_map)\n del open_set[index]\n current.draw_node(win, (255, 255, 255))\n # pygame.time.wait(1)\n\n distance_current_neighbour = 8\n current_neighbours = get_neighbours(node_map, current)\n for neighbour in current_neighbours:\n tentative_gscore = current.g_score + distance_current_neighbour\n if tentative_gscore < neighbour.g_score:\n current_index = find_in_list_of_list(node_map, current)\n neighbour.came_from = current\n came_from.append(current_index)\n neighbour.g_score = tentative_gscore\n neighbour.f_score = neighbour.g_score + neighbour.h_value\n if neighbour not in open_set:\n open_set.append(neighbour)\n\n return 0", "title": "" }, { "docid": "837fa049b4a4147b8f7106452716d1d4", "score": "0.62349063", "text": "def solve(self):\n for i in np.arange(1, self.graph.objects_number):\n for char in self.graph.mapping:\n char_width = self.graph.alphabet[self.graph.mapping[char]].shape[1]\n path_weights = self.graph.vertex_weights[i - char_width, :] + \\\n self.graph.edge_weights[i - char_width, i, :, char]\n\n self.graph.vertex_weights[i, char] = np.min(path_weights)\n\n self.labels[i, char] = np.argmin(path_weights)\n\n result = ''\n i = self.graph.objects_number - 1\n while i > 0:\n label = np.argmin(self.graph.vertex_weights[i, :])\n result = self.graph.mapping[label] + result\n i = i - self.graph.alphabet[self.graph.mapping[label]].shape[1]\n\n min_path_weight = np.min(self.graph.vertex_weights[-1, :])\n print('Minimum path weight: {}'.format(min_path_weight))\n print('Recognized string: {}'.format(result))\n return result, min_path_weight", "title": "" }, { "docid": "94f9137f910dab03d7c4d807be21b237", "score": "0.6232196", "text": "def find_path(self, graph, start, end, heuristic_fn):\n \n # This algorithm is really just repeating unidirectional A* twice,\n # but unfortunately it's just different enough that it requires \n # even more work to try to make a single function that can be called \n # twice.\n \n \n # Note: The nodes in by_start will have heuristic distance to the end,\n # whereas the nodes in by_end will have heuristic distance to the start.\n # This means that the total predicted distance for the exact same node\n # might not match depending on which side we found it from. However, \n # it won't make a difference since as soon as we evaluate the same node\n # on both sides we've finished.\n #\n # This also means that we can use the same lookup table for both.\n \n open_by_start = []\n open_by_end = []\n open_lookup = {}\n \n closed = set()\n \n # used to avoid hashing the dict.\n counter_arr = [0]\n \n total_heur_distance = heuristic_fn(graph, start, end)\n heapq.heappush(open_by_start, (total_heur_distance, counter_arr[0], start))\n counter_arr[0] += 1\n open_lookup[start] = { 'vertex': start, \n 'parent': None, \n 'source': self.NodeSource.BY_START, \n 'dist_start_to_here': 0,\n 'pred_dist_here_to_end': total_heur_distance,\n 'pred_total_dist': total_heur_distance }\n \n heapq.heappush(open_by_end, (total_heur_distance, counter_arr, end))\n counter_arr[0] += 1\n open_lookup[end] = { 'vertex': end,\n 'parent': None,\n 'source': self.NodeSource.BY_END, \n 'dist_end_to_here': 0,\n 'pred_dist_here_to_start': total_heur_distance,\n 'pred_total_dist': total_heur_distance }\n \n # If the start runs out then the start is in a closed room,\n # if the end runs out then the end is in a closed room,\n # either way there is no path from start to end.\n while len(open_by_start) > 0 and len(open_by_end) > 0:\n result = self._evaluate_from_start(graph, start, end, heuristic_fn, open_by_start, open_by_end, open_lookup, closed, counter_arr)\n if result is not None:\n return result\n \n result = self._evaluate_from_end(graph, start, end, heuristic_fn, open_by_start, open_by_end, open_lookup, closed, counter_arr)\n if result is not None:\n return result\n \n return None", "title": "" }, { "docid": "8c886729ed4bd3f688f1a1f52f8be4ed", "score": "0.6219312", "text": "def a_star_search(problem):\n # sets the intial goal state and initial state\n goal_state = problem.goal_states[0]\n state = problem.init_state\n\n # initializes all of the variables and data structures needed for the search\n node = Node(None,state,None,0)\n if goal_state == node.state:\n return [state], 0, 0\n frontier = queue.PriorityQueue() # intializes the frontier of the priority queue\n frontier.put((0,node)) # priority queue will store the path cost and the node as a tuple\n explored = {}\n explored[node.state] = 0 # initializes explored dictionary\n path = False\n max_frontier_size = 0\n num_nodes_expanded = 0\n \n while True:\n if frontier.qsize() == 0: # loops until the frontier is empty\n return [], num_nodes_expanded, max_frontier_size # if frontier is empty --> no solution to the problem\n max_frontier_size = max(max_frontier_size,frontier.qsize())\n node = (frontier.get())[1] # gets the node with the smallest cost\n if node.state == goal_state:\n break\n actions = problem.get_actions(node.state) # gets all of the actions associated with the node\n for action in actions:\n num_nodes_expanded += 1\n child = problem.get_child_node(node,action) # gets child associated with the actions\n if child.state not in explored or explored[child.state] > child.path_cost: # if the child has not been explored yet or if it has and has a higher path cost than what is already in the dictionary\n f = child.path_cost + problem.manhattan_heuristic(child.state,goal_state) # determines the path cost with the heuristic\n frontier.put((f,child))\n explored[child.state] = child.path_cost # updates the data structures accordingly\n\n final_path = [] # determines the final path by working backwards --> same process as breadth and bidirectional searches\n final_path.insert(0,goal_state)\n parent = node.parent\n while(True):\n final_path.insert(0,parent.state)\n if(parent.state == state):\n break\n parent = parent.parent\n\n return final_path, num_nodes_expanded, max_frontier_size", "title": "" }, { "docid": "43512a3cc447caf54d9c297e504b75fb", "score": "0.6216858", "text": "def shortest_path(self, u, v, by_weight=False, algorithm=None,\n weight_function=None, check_weight=True,\n bidirectional=None): # TODO- multiple edges??\n if weight_function is not None:\n by_weight = True\n\n if algorithm is None:\n algorithm = 'Dijkstra_Bid' if by_weight else 'BFS_Bid'\n\n if algorithm in ['BFS', 'Dijkstra_NetworkX', 'Bellman-Ford_Boost']:\n return self.shortest_paths(u, by_weight, algorithm, weight_function, check_weight)[v]\n\n if weight_function is None and by_weight:\n weight_function = lambda e:e[2]\n\n if bidirectional is not None:\n deprecation(18938, \"Variable 'bidirectional' is deprecated and \" +\n \"replaced by 'algorithm'.\")\n\n if u == v: # to avoid a NetworkX bug\n return [u]\n\n\n if by_weight:\n if algorithm == 'BFS_Bid':\n raise ValueError(\"The 'BFS_Bid' algorithm does not \" +\n \"work on weighted graphs.\")\n if check_weight:\n self._check_weight_function(weight_function)\n else:\n weight_function = lambda e:1\n\n if algorithm==\"Dijkstra_Bid\":\n return self._backend.bidirectional_dijkstra(u, v, weight_function)\n elif algorithm==\"Dijkstra_Bid_NetworkX\":\n import networkx\n if self.is_directed():\n G = networkx.DiGraph([(e[0], e[1], dict(weight=weight_function(e))) for e in self.edge_iterator()])\n else:\n G = networkx.Graph([(e[0], e[1], dict(weight=weight_function(e))) for e in self.edge_iterator()])\n G.add_nodes_from(self.vertices())\n return networkx.bidirectional_dijkstra(G, u, v)[1]\n elif algorithm==\"BFS_Bid\":\n return self._backend.shortest_path(u,v)\n else:\n raise ValueError(\"Algorithm '\" + algorithm + \"' not yet implemented.\")", "title": "" }, { "docid": "b0ac22fa1e1fe27cfde57abea68dfa1d", "score": "0.6215874", "text": "def dijkstra_shortest_path(graph, s):\n # Create a new sub-area of the graph\n x = Graph(s)\n # Keep track of distances\n A = {s.name: 0}\n while x != graph:\n # Take a subsection of the graph we have not explored\n x_v = graph - x\n assert all(node not in x_v.nodes for node in x.nodes)\n assert all(node not in x.nodes for node in x_v.nodes)\n # Find edges that cross between known and unknown\n edges = graph.find_edges(x.nodes.keys(), x_v.nodes.keys())\n if not edges:\n for node in graph.nodes.values():\n for edge in node.edges:\n if edge.start in x.nodes and edge.finish in x_v.nodes:\n logger.error(edge)\n\n logger.info(\"Found %s edges to unexplored areas\", len(edges))\n # Calculate Dijkstra's greedy criterion\n greedy = [A[edge.start] + edge.length for edge in edges]\n # Pick out the minimum based on the greedy criterion\n vw = edges[greedy.index(min(greedy))]\n assert vw.finish not in A\n assert vw.start in x.nodes and vw.finish in x_v.nodes\n logger.info(\"Found path of length %s to %s from %s\",\n vw.length, vw.finish, vw.start)\n # Add this new node to our graph, storing the path length\n x.add_node(graph.nodes[vw.finish])\n A[vw.finish] = A[vw.start] + vw.length\n logger.info(\"Total path from %s to %s is %s\",\n s.name, vw.finish, A[vw.finish])\n # Return the final distance\n return A", "title": "" }, { "docid": "b15c9d1a26185055c5529945337e0703", "score": "0.62136203", "text": "def _dijkstra(G,source,time_start='00:00:00',pred=None, paths=None, cutoff=None,\n target=None):\n G_succ = G.succ if G.is_directed() else G.adj\n\n push = heappush\n pop = heappop\n dist = {} # dictionary of final distances\n #nb_walks = \n time_start = get_sec(time_start)\n seen = {source: time_start}\n prev = dict.fromkeys(list(G.nodes), (None, None, None,None)) # Pred node,pred_edge_id,pred_trip_id,pred_type\n c = count()\n fringe = [] # use heapq with (distance,label) tuples\n push(fringe, (time_start, next(c), source))\n while fringe:\n (d, _, v) = pop(fringe)\n if v in dist:\n continue # already searched this node.\n dist[v] = d\n if v == target:\n break\n current_time = dist[v]\n for u, e_group in G_succ[v].items():\n for id_,e in e_group.items():\n tmp = (v,id_,e['trip_id'],e['type'])\n cost = get_weight(e, current_time, prev[v]) \n if cost is None:\n continue\n vu_dist = dist[v] + cost\n if cutoff is not None:\n if vu_dist > cutoff:\n continue\n if u in dist:\n if vu_dist < dist[u]:\n raise ValueError('Contradictory paths found:',\n 'negative weights?')\n elif u not in seen or vu_dist < seen[u]:\n seen[u] = vu_dist\n prev[u] = tmp\n push(fringe, (vu_dist, next(c), u))\n if paths is not None:\n paths[u] = copy.deepcopy(paths[v]) + [tmp]\n if pred is not None:\n pred[u] = [v]\n elif vu_dist == seen[u]:\n if pred is not None:\n pred[u].append(v)\n\n if paths is not None:\n return (dist, paths)\n if pred is not None:\n return (dist, pred)\n return dist,None", "title": "" }, { "docid": "f92e2bc852992d879710c7c6d29aedda", "score": "0.6213613", "text": "def astar_search(problem, h=None):\n h = h or problem.h\n h = memoize(h, 'h')\n def f(n):\n return max(getattr(n, 'f', -infinity), n.path_cost + h(n))\n return best_first_graph_search(problem, f)", "title": "" }, { "docid": "4efcf71ccecc04b506c1a2983863a7d4", "score": "0.6210283", "text": "def astar(graph, start, goal, cost_heuristic=cost_heuristic_none, tie_heuristic=tie_heuristic_high_g):\n open_list = OpenList()\n\n # add start node to open list\n g = 0\n h = cost_heuristic(start)\n open_list.put(start, (g+h, tie_heuristic(g, h)))\n \n # set of nodes that have already been explored\n explored = set()\n\n # dict mapping children to parent\n predecessors = dict()\n\n # dict mapping nodes to cost from start\n costs = dict()\n costs[start] = 0\n\n path_found = False\n nodes_expanded = 0\n while not open_list.empty():\n node = open_list.get()\n nodes_expanded += 1\n\n # break if goal is found\n if node == goal:\n path_found = True\n break\n\n explored.add(node)\n\n # expand node\n for successor, cost in zip(*graph.get_successors(node)):\n # if we have already explored successor don't add to open list\n if successor in explored:\n continue\n\n g = costs[node] + cost\n h = cost_heuristic(successor)\n priority = (g+h, tie_heuristic(g, h))\n\n # if open_list already has successor,\n # and priority is lower than what is already there\n # update the priority, otherwise, skip\n if open_list.contains(successor):\n if priority < open_list.get_priority(successor):\n open_list.decrease_key(successor, priority)\n else:\n continue\n else:\n # if open_list doesn't have successor, add to open_list\n open_list.put(successor, priority)\n\n # update cost from start and predecessor\n costs[successor] = g\n predecessors[successor] = node\n\n if not path_found:\n return path_found, [], float('inf'), nodes_expanded\n\n # construct path\n path = []\n if path_found:\n node = goal\n path.append(goal)\n while node != start:\n node = predecessors[node]\n path.append(node)\n path = path[::-1] # reverse list\n\n return path_found, path, costs[goal], nodes_expanded", "title": "" }, { "docid": "bb2d8cdc189ff1e5c760f9568d38234a", "score": "0.62086135", "text": "def aStarSearch(problem, heuristic):\n \"*** YOUR CODE HERE ***\"\n from game import Directions\n from util import PriorityQueue\n \n fringe = PriorityQueue()\n direction_to_goal = PriorityQueue()\n position_visited = []\n current_position = problem.getStartState()\n final_directions = [] \n while problem.isGoalState(current_position) == False:\n if current_position not in position_visited:\n position_visited.append(current_position) \n for temporary_position, temporary_direction, temporary_cost in problem.getSuccessors(current_position): \n totalcost = heuristic(temporary_position, problem, \"goal\")\n fringe.push(temporary_position, totalcost)\n direction_to_goal.push(final_directions + [temporary_direction], totalcost)\n current_position = fringe.pop()\n final_directions = direction_to_goal.pop()\n return final_directions", "title": "" }, { "docid": "ee0b875e222cee2704cb80a4897864bc", "score": "0.6206935", "text": "def minimumCostPath(analyzer, destStation):\n return model.minimumCostPath(analyzer, destStation)", "title": "" }, { "docid": "92267f3a2801c0d009cf59aae6233281", "score": "0.6206013", "text": "def greedyBestFirstSearch(problem, heuristic):\n startState = problem.getStartState()\n\n queue = util.PriorityQueue()\n parent = {}\n distance = {}\n\n queue.push(startState, 0 + heuristic(startState, problem))\n distance[startState] = 0\n\n while not queue.isEmpty():\n\n parentState = queue.pop()\n\n if problem.isGoalState(parentState):\n goalState = parentState\n break\n\n successorList = problem.getSuccessors(parentState)\n for currentStateInfo in successorList:\n currentState = currentStateInfo[0]\n currentStateCost = currentStateInfo[2]\n\n if currentState not in distance.keys():\n distance[currentState] = distance[parentState] + currentStateCost\n parent[currentState] = [parentState]\n parent[currentState] += currentStateInfo\n queue.push(currentState,heuristic(currentState, problem))\n\n\n moves = []\n while True:\n if goalState is startState:\n break\n\n parentState = parent[goalState]\n\n goalState = parentState[0]\n moves.append(parentState[2])\n\n moves.reverse()\n # print(moves)\n return moves", "title": "" }, { "docid": "a6118e45a28f86e422f0c79011cec382", "score": "0.62059385", "text": "def test_weights_single_shortest_path(self): \n edges = self.context.frame.create(\n [(0,1,3), (0, 2, 2),\n (0, 3, 6), (0, 4, 4),\n (1, 3, 5), (1, 5, 5),\n (2, 4, 1), (3, 4, 2),\n (3, 5, 1), (4, 5, 4)],\n [\"src\", \"dst\", \"weights\"])\n vertices = self.context.frame.create([[0], [1], [2], [3], [4], [5]], [\"id\"])\n graph = self.context.graph.create(vertices, edges)\n\n #validate centrality values\n result_frame = graph.closeness_centrality(\"weights\", False)\n result = result_frame.to_pandas()\n expected_values = {0 : 0.238,\n 1: 0.176,\n 2: 0.333,\n 3: 0.667,\n 4: 0.25,\n 5: 0.0}\n self._validate_result(result, expected_values)", "title": "" }, { "docid": "29c7707fbbb5287f01dfb6b2725a4191", "score": "0.6202098", "text": "def AStar_search(problem, heuristic=nullHeuristic):\n\n node=Node(problem.start_node, None, 0, None, 0)\n frontier = util.PriorityQueue()\n frontier.push(node, 0)\n explored = set([])\n while True:\n if frontier.isEmpty():\n return False\n node = frontier.pop()\n if (problem.isGoalState(node.state)):\n path = []\n n=node\n while True:\n path = [n.state]+path\n if n.state == problem.start_node:\n break\n n = n.parent_node\n return path\n if node.state in explored:\n continue\n explored.add(node.state)\n successors = problem.getSuccessors(node.state)\n for successor in successors:\n if(successor[0] not in explored):\n cnode=Node(successor[0], successor[1], node.path_cost+successor[2], node, node.depth+1)\n frontier.push(cnode , cnode.path_cost+ heuristic(cnode.state, problem))", "title": "" }, { "docid": "b6829b8eb70b6dbac23cc1d19d6b9f44", "score": "0.61849886", "text": "def shortestPath(graph, start, end, _print = False):\n return DFS(graph, start, end, [], None, _print)", "title": "" }, { "docid": "06767fb488c5b946410f383b352bf7e0", "score": "0.6177424", "text": "def astar_tree_search(problem, h):\n def f(n):\n return n.path_cost + h(n)\n return best_first_tree_search(problem, f)", "title": "" }, { "docid": "67c53a6831bee8f2a94a0ef842031c15", "score": "0.61762226", "text": "def dijkstra(graph, s):\n\n def get_smallest_greedy(node):\n \"\"\"Given a starting node, return a 3-tuple containng the lowest\n greedy score, node, and the corresponding neighboring node among s's\n non-visited neighbors\n \"\"\"\n smallest_greedy = None\n for edge in (x for x in graph[node] if x[0] not in visited):\n neighbor, weight = edge\n greedy_score = A[node-1] + weight\n print(\"Edge {} has greedy score {}\".format(edge, greedy_score))\n if smallest_greedy is None or greedy_score < smallest_greedy[0]:\n smallest_greedy = (greedy_score, node, neighbor)\n return smallest_greedy\n\n def get_unvisited_neighbors(node):\n \"\"\"Given a node, return a list of the node's unvisited neighbors\"\"\"\n l = [\n edge[0]\n for edge in graph[node]\n if edge[0] not in visited\n ]\n print(\"In get_unvisited_neighbors: about to return {}\".format(l))\n return l\n\n A = [1000000 for _ in range(len(graph))] # Computed shortest distances\n A[s-1] = 0 # Distance from s to s is 0; subtract 1 for zero-based index\n B = [[] for _ in range(len(graph))]\n visited = set([s]) # Nodes processed so far\n heap = []\n\n # for neighbor in get_unvisited_neighbors(s):\n # smallest_greedy = get_smallest_greedy(neighbor)\n # if smallest_greedy:\n # h.heappush(heap, smallest_greedy)\n print(\"Value of B before while loop {}\".format(B))\n while heap:\n # Among all edges (v,w) with v member of x, w not a member of x,\n # pick one that minimizes a[v-1] + length from v to w\n # (Dijstra's greedy criterion)\n\n while True:\n print(\"Heap: {}\".format(heap))\n greedy_score, v, w = h.heappop(heap)\n if w in visited:\n # Recalculate greedy_score and put back in heap\n h.heappush(heap, get_smallest_greedy(v))\n else:\n break\n visited.add(w)\n A[w-1] = greedy_score\n for neighbor in get_unvisited_neighbors(w):\n smallest_greedy = get_smallest_greedy(neighbor)\n if smallest_greedy:\n h.heappush(heap, smallest_greedy)\n\n # print(\"Value of B before append: {}\".format(B))\n # print(\"Value of B[w-1]: {}\".format(B[w-1]))\n # print(\"Value of B[v-1]: {}\".format(B[v-1]))\n # print(\"Value of w: {}\".format(w))\n # print(\"Value of B[v-1].append(w): {}\".format(B[v-1].append(w)))\n B[w-1] = B[v-1] + [w]\n # print(\"Value of B after append: {}\".format(B))\n\n return A", "title": "" }, { "docid": "7f19970b39dfc01593c8c29db02bf92a", "score": "0.61660033", "text": "def shortest_hamilton(paths: List[List[int]], adj_mat: List[List[float]]) -> Tuple[List[int], float, int]:\n shortest_time = math.inf\n shortest_path = None\n cpt=1\n \n def path_length(path: List[int]) -> float:\n \"\"\"\n Sub-function which returns the length of a path\n\n Parameters\n ----------\n path : List[int]\n the path described as a list of integers (representing the rooms).\n\n Returns\n -------\n float\n length in seconds/centimeters.\n\n \"\"\"\n length = 0\n for i in range(len(path)-1):\n length += adj_mat[path[i]][path[i+1]]\n return length\n \n for path in paths:\n path_len = path_length(path)\n if path_len < shortest_time:\n shortest_time = path_len\n shortest_path = path\n cpt=1\n if path_len == shortest_time:\n cpt+=1\n \n return shortest_path, shortest_time, cpt", "title": "" }, { "docid": "e4efb5f0cb556b71e597af47c5c0d31a", "score": "0.6165086", "text": "def A_star(self, heuristic):\n # A list that store a object with the nodes and its heuristic weight\n to_visit_nodes = [\n {'weight': self.__head.depth, 'node': self.__head}\n ]\n # Node that solves the problem\n node = self.__head\n\n # Search in tree by node that solves the problem while there are nodes\n # to visit or the tree size is greater than maximum defined\n while len(to_visit_nodes) > 0 and self.size < self.MAX_SIZE:\n # Get the first node in the to_visit_nodes array\n current_node = to_visit_nodes.pop(0)['node']\n # Make copy of initial board\n board = copy.deepcopy(self.__initial_board)\n\n # Execute node movement in initial board\n for movement in current_node.value:\n board.move(movement)\n\n # Returs if the node commands achieves the goal\n if board.is_goal_achieved():\n node = current_node\n self.__initial_board = copy.deepcopy(board)\n break\n\n # If the node commands doesn't achieve the goal\n # add node available movements as it childrens\n # and add it to the begin of to_visit_nodes\n for movement in board.available_movements():\n if not self.__is_inverse(current_node, movement):\n new_node = self.insert_node(current_node, movement)\n heuristic_value = heuristic(board) + new_node.depth\n # Add object with node and it weight to to_visit_nodes list\n to_visit_nodes.insert(\n 0, {'weight': heuristic_value, 'node': new_node}\n )\n # Sort to_visit_nodes list to keep the minimum weight \n # as the first position list\n to_visit_nodes.sort(key=lambda x: x['weight'])\n\n return node.value", "title": "" }, { "docid": "5090acbc869fc4c387f90538b8f3edb3", "score": "0.6159175", "text": "def single_source_dijkstra_path_length(G, source, weight= 'weight', target_cutoffs=[], only_targets=False):\r\n #print \"Target cutoffs:\", target_cutoffs\r\n dist = {} # dictionary of final distances\r\n\tfinal_dist ={}\r\n target_cutoffs = set(target_cutoffs)\r\n\r\n if source in target_cutoffs:\r\n target_cutoffs.remove(source)\r\n seen = {source:0}\r\n fringe=[] # use heapq with (distance,label) tuples\r\n heapq.heappush(fringe,(0,source))\r\n while fringe:\r\n (d,v)=heapq.heappop(fringe)\r\n\r\n if v in dist:\r\n continue # already searched this node.\r\n\r\n dist[v] = d\r\n if v in target_cutoffs:\r\n target_cutoffs.remove(v)\r\n\t\t\tfinal_dist[v] = d\r\n if not target_cutoffs:\r\n #print dist\r\n return final_dist if only_targets else dist\r\n\r\n #for ignore,w,edgedata in G.edges_iter(v,data=True):\r\n #is about 30% slower than the following\r\n edata=iter(G[v].items())\r\n\r\n for w,edgedata in edata:\r\n vw_dist = dist[v] + edgedata.get(weight,1)\r\n\r\n if w not in seen or vw_dist < seen[w]:\r\n seen[w] = vw_dist\r\n heapq.heappush(fringe,(vw_dist,w))\r\n if target_cutoffs:\r\n raise ValueError(\"There are still target cutoffs:\", str(target_cutoffs))\r\n return dist", "title": "" }, { "docid": "865b40afa3cb91c1dbc352d15304310c", "score": "0.61520076", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n \n aVisitar = util.PriorityQueue()\n visitats = []\n direccions = []\n costTotal = 0\n prediccio = 0+heuristic(problem.getStartState(), problem)\n # ((3,4), [west, south], 54) ()\n # (on soc, camí absolut, cost mínim per arribar)\n # Node = (Coordenades, Path, Prediction)\n startNode = (problem.getStartState(), direccions, costTotal) #a cada node guardem TOTA la llista de direccions i tots els pares dels que venim, aixi quan trobem el node de desti nomes hem de retornar les direccions del mateix node.\n \n aVisitar.push(startNode, prediccio)\n \n while not aVisitar.isEmpty():\n \n nodeCoord, direccions, costRecorregut = aVisitar.pop()\n \n if problem.isGoalState(nodeCoord):\n return direccions\n \n if nodeCoord in visitats: continue\n \n visitats.append(nodeCoord)\n \n for fillCoord, direccio, cost in problem.getSuccessors(nodeCoord):\n if fillCoord not in visitats:\n newNode = (fillCoord, direccions+[direccio],costRecorregut+cost) #es guarden dins de cada node\n aVisitar.push(newNode, costRecorregut + cost + heuristic(fillCoord,problem))", "title": "" }, { "docid": "46d585b7188e237a79f068f17471e72a", "score": "0.6150554", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n\n # class to represent SearchNode\n class SearchNode:\n \"\"\"\n Creates node: <state, action, f(s), g(s), h(s), parent_node>\n \"\"\"\n def __init__(self, state, action=None, g=None, h=None,\n parent=None):\n self.state = state\n self.action = action\n self.parent = parent\n # heuristic value\n self.h = h\n # combined cost\n if parent:\n self.g = g + parent.g\n else:\n self.g = 0\n # evaluation function value\n self.f = self.g + self.h\n\n def extract_solution(self):\n \"\"\" Gets complete path from goal state to parent node \"\"\"\n action_path = []\n search_node = self\n while search_node:\n if search_node.action:\n action_path.append(search_node.action)\n search_node = search_node.parent\n return list(reversed(action_path))\n\n\n # make search node function\n def make_search_node(state, action=None, cost=None, parent=None):\n if hasattr(problem, 'heuristicInfo'):\n if parent:\n # same parent - avoid re-calculation\n # for reducing computations in logic\n if parent == problem.heuristicInfo[\"parent\"]:\n problem.heuristicInfo[\"sameParent\"] = True\n else:\n problem.heuristicInfo[\"sameParent\"] = False\n # adding parent info for reducing computations\n problem.heuristicInfo[\"parent\"] = parent\n # get heuristic value\n h_value = heuristic(state, problem)\n return SearchNode(state, action, cost, h_value, parent)\n\n # create open list\n open = util.PriorityQueue()\n node = make_search_node(problem.getStartState())\n open.push(node, node.f)\n closed = set()\n best_g = {} # maps states to numbers\n\n # run until open list is empty\n while not open.isEmpty():\n node = open.pop() # pop-min\n\n if node.state not in closed or node.g < best_g[node.state]:\n closed.add(node.state)\n best_g[node.state] = node.g\n\n # goal-test\n if problem.isGoalState(node.state):\n return node.extract_solution()\n\n # expand node\n successors = problem.getSuccessors(node.state)\n for succ in successors:\n child_node = make_search_node(succ[0],succ[1],succ[2], node)\n if child_node.h < float(\"inf\"):\n open.push(child_node, child_node.f)\n\n # no solution\n util.raiseNotDefined()", "title": "" }, { "docid": "3308af318d31d6a27abe770106d000eb", "score": "0.61470634", "text": "def shortestPath(graph, u, v, k):\n V = 4\n if k == 0 and u == v:\n return 0\n if k <= 0:\n return INF\n if k == 1 and graph[u][v] != INF:\n return graph[u][v]\n result = INF\n for i in range(0, V):\n if graph[u][i] != INF and u != i and v != i:\n rec_res = shortestPath(graph, i, v, k - 1)\n if rec_res != INF:\n result = min(result, graph[u][i] + rec_res)\n return result", "title": "" }, { "docid": "4b40051a811f8d270806fa0c6a2a2420", "score": "0.61315674", "text": "def astar_search_graph(problem, h=None):\n h = memoize(h or problem.h, 'h')\n iterations, all_node_colors, node, all_node_f = best_first_graph_search_for_vis(problem,\n lambda n: n.path_cost + h(n))\n return(iterations, all_node_colors, node, all_node_f)", "title": "" }, { "docid": "0f38a0f2989551d3760c1aaa8dbf0db6", "score": "0.6129433", "text": "def shortest_path(graph: Dict[str, List[str]], start_node: str, end_node: str\n ) -> List[str]:\n dest_src = shortest_path_bfs(graph, start_node, end_node)\n return build_path(dest_src, start_node, end_node)", "title": "" }, { "docid": "e267a7932a21d5c8e55c11d00843b9d5", "score": "0.6125924", "text": "def shortest_path(S, neighbours, D_cond):\n Q = PriorityQueue()\n Q.put((0, S))\n seen = set()\n d = defaultdict(lambda: np.inf)\n d[S] = 0\n\n while not Q.empty():\n vd, v = Q.get()\n if D_cond(v):\n return vd\n seen.add(v)\n\n for neigh, cost in neighbours(v):\n if neigh in seen: continue\n nd = vd + cost\n if nd < d[neigh]:\n d[neigh] = nd\n Q.put((nd, neigh))\n\n return -1", "title": "" }, { "docid": "f189fc4c18170af77b4c1bb7d2e4ff2c", "score": "0.6122015", "text": "def dijkstra(graph, src, dest, visited, distances, predecessors):\n # ending condition\n if src == dest:\n # We build the shortest path and display it\n path = []\n pred = dest\n while pred != None:\n path.append(pred)\n pred = predecessors.get(pred, None)\n # reverses the array, to display the path nicely\n path.pop()\n path.reverse()\n return path\n\n else:\n # if it is the initial run, initializes the cost\n if not visited:\n distances[src] = 0\n # visit the neighbors\n for neighbor in graph[src]:\n if neighbor not in visited:\n new_distance = distances[src] + graph[src][neighbor]\n if new_distance < distances.get(neighbor, float('inf')):\n distances[neighbor] = new_distance\n predecessors[neighbor] = src\n # mark as visited\n visited.append(src)\n # now that all neighbors have been visited: recurse\n # select the non visited node with lowest distance 'x'\n # run Dijskstra with src='x'\n unvisited = {}\n for k in graph:\n if k not in visited:\n unvisited[k] = distances.get(k, float('inf'))\n x = min(unvisited, key=unvisited.get)\n return dijkstra(graph, x, dest, visited, distances, predecessors)", "title": "" }, { "docid": "8eb34bdbddd13c3491d571312d1531ec", "score": "0.61158985", "text": "def get_shortest_paths(self, node_i, node_j):\n try:\n node_i = self.node_labels_map[node_i]\n except:\n pass\n try:\n node_j = self.node_labels_map[node_j]\n except:\n pass\n # paths to self\n if node_i==node_j:\n return (0,1)\n # path to different nodes\n steps = 1\n a = b = self.adjacency_matrix.toarray()\n while steps<a.shape[0]:\n if b[node_i,node_j] > 0:\n return (steps, int(b[node_i,node_j]))\n b = b@a\n steps+=1\n\n return (np.Inf, 0)", "title": "" }, { "docid": "9231aed3696f16fa5509e5d371266454", "score": "0.61128074", "text": "def ShortestPath(graphLosses: {int: {int}}, Armada: int) -> {int:int}:\n inTree, parent, distance = DijkstraTable(graphLosses, Armada)\n playersLeft = {k:v for k, v in distance.items()} # Players that not yet been choosen\n\n while True:\n # Determine next player arbitrarily\n nextPlayer = min(playersLeft, key = playersLeft.get)\n if playersLeft[nextPlayer] == float('inf'):\n break\n del playersLeft[nextPlayer]\n inTree[nextPlayer] = True\n \n for playerLosses in graphLosses[nextPlayer]:\n if (distance[nextPlayer] + 1) < distance[playerLosses]:\n distance[playerLosses] = distance[nextPlayer] + 1\n playersLeft[playerLosses] = distance[playerLosses]\n parent[playerLosses] = nextPlayer\n return parent, distance", "title": "" }, { "docid": "2cc3b1f0edfcda7db8d49ddd21c9e1b2", "score": "0.61109465", "text": "def dag_longest_path(G, weight='weight', default_weight=1):\n dist = {} # stores {v : (length, u)}\n for v in networkx.topological_sort(G):\n us = [(dist[u][0] + data.get(weight, default_weight), u)\n for u, data in G.pred[v].items()]\n # Use the best predecessor if there is one and its distance is non-negative, otherwise terminate.\n maxu = max(us) if us else (0, v)\n dist[v] = maxu if maxu[0] >= 0 else (0, v)\n u = None\n v = max(dist, key=dist.get)\n path = []\n while u != v:\n path.append(v)\n u = v\n v = dist[v][1]\n path.reverse()\n return path", "title": "" }, { "docid": "fecf9d48fb4f242cb4f7753fd7e01d35", "score": "0.6110839", "text": "def shortest_path( graph: Graph, startKey: str, endKey: str ) \\\r\n -> ( list, float ):\r\n remaining = PrioQ()\r\n for v in graph:\r\n if v.key == startKey:\r\n remaining.insert( v, 0 )\r\n else:\r\n remaining.insert( v )\r\n\r\n lowest = remaining.item()\r\n assert lowest.vtx.key == startKey\r\n\r\n while lowest.vtx.key != endKey:\r\n remaining.remove()\r\n if lowest.dist is None or remaining.is_empty(): # No way to get to end\r\n return [], -1\r\n thisDist = lowest.dist\r\n for u in lowest.vtx.get_connections():\r\n # Only do this if u is not final.\r\n u_ddata = remaining.get_ddata( u.key )\r\n if u_ddata is not None:\r\n newDist = thisDist + lowest.vtx.get_weight( u )\r\n if u_ddata.dist is None or newDist < u_ddata.dist:\r\n u_ddata.dist = newDist\r\n u_ddata.pred = lowest\r\n lowest = remaining.item()\r\n path = []\r\n if lowest.dist is None: # We found the end, but it never got connected.\r\n totalDistance = -1\r\n else:\r\n totalDistance = lowest.dist\r\n ddata = lowest\r\n while ddata is not None:\r\n path.insert( 0, ddata.vtx.key )\r\n ddata = ddata.pred\r\n\r\n return path, totalDistance", "title": "" }, { "docid": "fb326a74d25cacce10cdbada622bece2", "score": "0.61105686", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n #COMP90054 Task 1, Implement your A Star search algorithm here\n \"*** YOUR CODE HERE ***\"\n\n openList = util.PriorityQueue() # priority is f = g + h\n initialState = problem.getStartState()\n initialNode = (initialState, \"\", 0, [])\n openList.push(initialNode, initialNode[2] + heuristic(initialState, problem))\n closedList = set()\n bestG = dict() # maps each state to their corresponding best g\n\n while openList:\n node = openList.pop()\n state, action, cost, path = node\n\n # initiate an infinite g for unvisited states\n if state not in bestG:\n bestG[state] = INFINITY\n\n # when state is unvisited or re-open if there is a better g for state\n if state not in closedList or cost < bestG[state]:\n closedList.add(state)\n bestG[state] = cost # update state's best G\n\n # when goal is reached, break loop to return path\n if problem.isGoalState(state):\n path = path + [(state, action)]\n break\n\n # explore state's children\n succNodes = problem.expand(state)\n for succNode in succNodes:\n succState, succAction, succCost = succNode\n\n # if goal is reachable from succState, push to priority queue\n if heuristic(succState, problem) < INFINITY:\n newNode = (succState, succAction, cost + succCost, path + [(state, action)])\n openList.push(newNode, newNode[2] + heuristic(succState, problem))\n\n actions = [action[1] for action in path]\n del actions[0]\n\n return actions", "title": "" }, { "docid": "96ad03c4a95b9d23c924079d86cd714a", "score": "0.6107522", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n\n class Root:\n def __init__(self, position: tuple, path: list, cost: int) -> None:\n self.position = position\n self.path = path\n self.cost = cost\n\n def getPosition(self) -> tuple:\n return self.position\n\n def getPath(self) -> list:\n return self.path\n\n def getCost(self) -> int:\n return self.cost\n\n visited: set = set()\n rootsQueue: PriorityQueue = PriorityQueue()\n\n rootsQueue.push(Root(problem.getStartState(), [], 0),\n 0) # Push the initial root\n\n while not rootsQueue.isEmpty():\n currentNode: Root = rootsQueue.pop()\n\n if problem.isGoalState(currentNode.getPosition()):\n return currentNode.getPath().copy()\n\n if currentNode.getPosition() not in visited:\n visited.add(currentNode.getPosition())\n successors: list = problem.getSuccessors(currentNode.getPosition())\n for nextNode in successors:\n nextNodePostition: tuple = nextNode[0]\n if nextNodePostition not in visited:\n nextMove: str = nextNode[1]\n newPath: list = currentNode.getPath().copy()\n newPath.append(nextMove)\n newCost: int = problem.getCostOfActions(\n newPath) + heuristic(nextNodePostition, problem)\n if nextNodePostition not in visited:\n rootsQueue.push(\n Root(nextNodePostition, newPath, newCost), newCost)\n\n return []", "title": "" }, { "docid": "d31f81a77eae673cbcf70cfe62eaf386", "score": "0.61031836", "text": "def dijkstra_search(graph, initial_node, dest_node):\r\n pass", "title": "" }, { "docid": "1c2b4486fe21f653de48240e84e75c0d", "score": "0.60928303", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n # COMP90054 Task 1, Implement your A Star search algorithm here\n \"*** YOUR CODE HERE ***\"\n current = problem.getStartState()\n pq = util.PriorityQueue()\n pq.push((current, '', 0, []), 0)\n\n # we will reuse bestG as the closed set as well to save on memory\n bestG = dict()\n\n while not pq.isEmpty():\n state, action, cost, path = pq.pop()\n\n skip = state in bestG\n if skip:\n skip = cost >= bestG[state]\n\n if skip:\n continue\n\n bestG[state] = cost\n\n if problem.isGoalState(state):\n path = path + [(state, action)]\n break\n\n for succNode in problem.expand(state):\n succState, succAction, succCost = succNode\n newNode = (succState, succAction, cost +\n succCost, path + [(state, action)])\n h = heuristic(succState, problem)\n if h < float('inf'):\n pq.push(newNode, cost + succCost + h)\n\n actions = [action[1] for action in path]\n del actions[0]\n return actions", "title": "" }, { "docid": "161109332002688b96f3ae44a9efb837", "score": "0.6091422", "text": "def shortest_path(digr, s):\r\n nodes_explored = [s]\r\n nodes_unexplored = DFS(digr, s)[1:] # all accessible nodes from s\r\n dist = {s:0}\r\n node_heap = []\r\n\r\n for n in nodes_unexplored:\r\n min = compute_min_dist(digr, n, nodes_explored, dist)\r\n heapq.heappush(node_heap, (min, n))\r\n\r\n while len(node_heap) > 0:\r\n min_dist, nearest_node = heapq.heappop(node_heap)\r\n dist[nearest_node] = min_dist\r\n nodes_explored.append(nearest_node)\r\n nodes_unexplored.remove(nearest_node)\r\n\r\n # recompute keys for just popped node\r\n for v in digr.neighbors(nearest_node):\r\n if v in nodes_unexplored:\r\n for i in range(len(node_heap)):\r\n if node_heap[i][1] == v:\r\n node_heap[i] = (compute_min_dist(digr, v, nodes_explored, dist), v)\r\n heapq.heapify(node_heap)\r\n\r\n return dist", "title": "" }, { "docid": "2f94c299f446fd745834a866a2d4ee73", "score": "0.60835236", "text": "def dijkstra_search(graph, initial_node, dest_node):\n pass", "title": "" }, { "docid": "921eab54db35be87dd92d8e57f5bfa55", "score": "0.60811883", "text": "def dijkstra_search_fill(graph, start):\n frontier = PriorityQueue()\n frontier.put(start, 0, 0, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n current, current_priority = frontier.get()\n\n for next in graph.neighbors(current):\n new_cost = cost_so_far[current] + graph.cost(current, next)\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost\n frontier.put(next, priority, 0, priority)\n came_from[next] = current\n\n return came_from, cost_so_far", "title": "" }, { "docid": "6f87af3030e787e478af19c93c7f84ca", "score": "0.60807145", "text": "def DFSJIT_KShortest(graph, start, target, K):\r\n def cost(u, v):\r\n return graph.edges[u,v][\"weight\"]\r\n\r\n def TS(u,v):\r\n return graph.edges[u, v][\"TS\"]\r\n\r\n lazyDijk = JITModifiedDijkstra(graph, start)\r\n\r\n def R(u,v,j,pre):\r\n if u == start:\r\n KPaths.append([(u,j),] + pre)\r\n\r\n for pred in graph.predecessors(u):\r\n ipred_upper = bisect_right(TS(pred, u), TS(u, v)[j]-cost(pred, u))-1\r\n ipred_lower = custom_bisect_left(TS(pred, u),pred,hi=ipred_upper+1, compare=lazyDijk.is_threshold_strictly_smaller_t_target)\r\n for i in range(ipred_lower,ipred_upper+1):\r\n R(pred, u, i, [(u,j),] + pre)\r\n\r\n if len(KPaths) == K: return #early stopping\r\n\r\n\r\n # gather candiates\r\n C = [(v,j) for v in graph.predecessors(target) for j in range(len(TS(v,target)))]\r\n\r\n # sort canidates\r\n C = sorted(C, key=lambda x: TS(x[0],target)[x[1]] + cost(x[0],target))\r\n # enumerate paths\r\n KPaths = []\r\n for x in C:\r\n if not lazyDijk.is_threshold_strictly_smaller_t_target(TS(x[0], target)[x[1]], x[0]):\r\n R(x[0], target, x[1], [])\r\n if len(KPaths) >= K: break\r\n\r\n return KPaths[:K], lazyDijk.Fin", "title": "" }, { "docid": "dce74db7d61ebb158a4555c73f94750f", "score": "0.6079356", "text": "def dijkstra(self, initial):\n visited = {initial: 0}\n paths = {}\n\n nodes = set(self.nodes)\n\n while nodes:\n min_node = None\n for node in nodes:\n if node in visited:\n if min_node is None or visited[node] < visited[min_node]:\n min_node = node\n\n if min_node is None:\n break\n\n nodes.remove(min_node)\n current_weight = visited[min_node]\n\n for edge in self.edges[min_node]:\n try:\n weight = current_weight + self.distances[(min_node, edge)]\n except KeyError:\n continue\n if edge not in visited or weight < visited[edge]:\n visited[edge] = weight\n paths[edge] = min_node\n\n return visited, paths", "title": "" }, { "docid": "be1c06f9aef5e133cf9c4513f74be225", "score": "0.60782087", "text": "def get_optimal_path(self, start: Tuple, goal: Tuple, distance_tolerance=.05) -> List[Tuple]:\n if self.display:\n print('Initializing Display')\n self.__init_display(start, goal)\n\n self.__build_tree(start)\n graph = self.__build_graph(self.vertices, self.edges)\n potential_goals = []\n for vertex in self.vertices: # find all vertices within 5m of goal\n if self.__euclidean_dist(vertex, goal) < distance_tolerance:\n potential_goals.append(vertex)\n\n path_queue = []\n for potential_goal in potential_goals: # find shortest path among potential goals\n path = networkx.shortest_path(graph, start, potential_goal, weight='weight')\n cost = networkx.path_weight(graph, path, weight='weight')\n heapq.heappush(path_queue, (cost, path))\n if len(path_queue) == 0:\n return []\n\n best_path = heapq.heappop(path_queue)[1]\n best_path = self.__add_heading_to_waypoints(best_path, start[2], goal[2])\n if self.display:\n self.__draw_optimal_path(best_path)\n return best_path", "title": "" }, { "docid": "83436034572aba2c986140541cc86cd6", "score": "0.60766315", "text": "def a_star(self, heuristic):\n priority_queue = queue.PriorityQueue()\n visited = set()\n parent_cell = {}\n cost_so_far = {} # Cost from start to each cell\n\n # Put in que as tuple (priority, cell)\n priority_queue.put((0, self.start_position))\n visited.add(self.start_position)\n cost_so_far[self.start_position] = 0\n\n while not priority_queue.empty():\n current_cell = priority_queue.get()\n # Get the cell only, don't care about priority\n current_cell = current_cell[1]\n if current_cell == self.end_position:\n path = self.build_path(parent_cell)\n return {\"Status\": \"Found Path\", \"Visited cells\": visited,\n \"No of visited cells\": len(visited), \"Path\": path, \"Path length\": len(path)}\n\n for next_cell in self.a_map.connected_cells(current_cell):\n new_cost = cost_so_far[current_cell] + 1\n if next_cell not in visited:\n cost_so_far[next_cell] = new_cost\n parent_cell[next_cell] = current_cell\n visited.add(next_cell)\n\n priority = new_cost + self.find_heuristic(next_cell, heuristic)\n priority_queue.put((priority, next_cell))\n\n return {\"Status\": \"Path Not Found!!!\", \"Visited cells\": visited,\n \"No of visited cells\": len(visited), \"Path\": [], \"Path length\": \"N/A\"}", "title": "" }, { "docid": "76e0282816b4404526a8218afd5b581d", "score": "0.607352", "text": "def shortest_path(self, start):\r\n dist = [sys.maxsize] * self.vertex_num\r\n dist[start] = 0\r\n pred = [None] * self.vertex_num\r\n q = []\r\n for v in range(self.vertex_num):\r\n heapq.heappush(q, (v, dist[v]))\r\n while not len(q) == 0:\r\n u = heapq.heappop(q)\r\n u = self.graph[u[0]].get_vertex()\r\n adjacent = self.graph[u].get_adjacent()\r\n for v in adjacent:\r\n if dist[u] + v[1] < dist[v[0]]:\r\n dist[v[0]] = dist[u] + v[1]\r\n for i in range(len(q)):\r\n if q[i][0] == v[0]:\r\n q[i] = (v[0], dist[u] + v[1])\r\n break\r\n pred[v[0]] = u\r\n # filter from dist\r\n return (dist, pred)", "title": "" } ]
0de9fca772bfb65735c5d9615ad30b7f
store decoded instruction, prepare for next one
[ { "docid": "32f9f6ffb70ceefd08ea42257e2ae3c0", "score": "0.57765085", "text": "def _save_instruction(self, insn):\n self.instructions.append(insn)\n self.cycles = 0\n self.used_words = []\n self.first_address = None", "title": "" } ]
[ { "docid": "251672139dd55cedcb073d1046e8b9c9", "score": "0.66286063", "text": "def _decode(self):\n try:\n ip = self.getReg(self.REG_IP)\n return Instruction.decode(self)\n except InstructionError, e:\n raise ProcessorError(ip, \"error decoding instruction: %s\" % e)", "title": "" }, { "docid": "f452000cf52e579c7fee56e7fc5a4356", "score": "0.6112518", "text": "def decode():\n return opcodes[R[4][:4]].__name__ + \" \" +R[4][4:]", "title": "" }, { "docid": "7364df75648bfe6375069a0179cf0f43", "score": "0.6100631", "text": "def test_instruction_valid_decode(self):\n instruction_str = '4.args,8.hostname,4.port,4.1984;'\n\n instruction_opcode = 'args'\n instruction_args = ('hostname', 'port', '1984')\n\n instruction = Instruction.load(instruction_str)\n\n self.assertEqual(instruction_str, instruction.encode())\n self.assertEqual(instruction_opcode, instruction.opcode)\n self.assertEqual(instruction_args, instruction.args)", "title": "" }, { "docid": "c9cb0ba611666872747826cf6df9f298", "score": "0.60047764", "text": "def decode_on_the_fly(self):", "title": "" }, { "docid": "ac4c62e205c81929cf384f10cdc668a7", "score": "0.5974516", "text": "def _fetch_opcode(self):\n np.copyto(\n self._opcode,\n self._memory[self._program_counter].astype(self._opcode.dtype) << 8 \n | self._memory[self._program_counter+1]\n )\n #print(hex(self._opcode[0]))\n\n #Enough opcodes use these to be worth initialising here once\n vx = (self._opcode & 0x0F00) >> 8\n vy = (self._opcode & 0x00F0) >> 4\n np.copyto(self._vx, vx.astype(np.uint8))\n np.copyto(self._vy, vy.astype(np.uint8))", "title": "" }, { "docid": "8e74de0bec6689215d0ea5d2a60b892b", "score": "0.5794661", "text": "def get_instruction(self, address):", "title": "" }, { "docid": "5afedd75e120334c3463650ba55a5e6a", "score": "0.57900345", "text": "def convert_to_instruction_format(self):\n binary = self.binary_instruction_string\n self.instruction.update({\"Opcode\": int(binary[0:6], 2)})\n self.instruction.update({\"RS\": int(binary[6:11], 2)})\n self.instruction.update({\"RT\": int(binary[11:16], 2)})\n self.instruction.update({\"RD\": int(binary[16:21], 2)})\n self.instruction.update({\"Shift\": int(binary[21:26], 2)})\n self.instruction.update({\"FuncCode\": int(binary[26:32], 2)})\n self.instruction.update({\"Immed\": binary[16:32]})\n if self.instruction[\"Immed\"][0] == '1':\n self.instruction[\"Immed\"] = -1 * int(twos_comp(self.instruction[\"Immed\"]), 2)\n else:\n self.instruction[\"Immed\"] = int(self.instruction[\"Immed\"], 2)", "title": "" }, { "docid": "9f776b8e45373effc9579bc48460ae47", "score": "0.5778926", "text": "def decode_single_operand_instruction(address, data):\n\n instruction = int.from_bytes(data[:2], 'little') # decode instruction\n\n assert instruction <= 0xFFFF\n assert (instruction >> 10) == 0b000100, \\\n 'Passed in a non-single operand instruction to decode_single_operand_instruction'\n\n\n opcodes = {\n 0b000: Opcode.RRC,\n 0b001: Opcode.SWPB,\n 0b010: Opcode.RRA,\n 0b011: Opcode.SXT,\n 0b100: Opcode.PUSH,\n 0b101: Opcode.CALL,\n 0b110: Opcode.RETI,\n }\n def get_opcode(instruction):\n raw = (instruction >> 7) & 0b111\n assert raw in opcodes, 'Invalid Opcode: {}'.format(raw)\n return opcodes[raw]\n\n widths = {\n 0b0: OperandWidth.WORD,\n 0b1: OperandWidth.BYTE,\n }\n get_width = lambda x: widths[(x >> 6) & 0b1]\n\n registers = {\n 0b0000: Register.R0,\n 0b0001: Register.R1,\n 0b0010: Register.R2,\n 0b0011: Register.R3,\n 0b0100: Register.R4,\n 0b0101: Register.R5,\n 0b0110: Register.R6,\n 0b0111: Register.R7,\n 0b1000: Register.R8,\n 0b1001: Register.R9,\n 0b1010: Register.R10,\n 0b1011: Register.R11,\n 0b1100: Register.R12,\n 0b1101: Register.R13,\n 0b1110: Register.R14,\n 0b1111: Register.R15,\n }\n get_register = lambda x: registers[(x & 0b1111)]\n\n normal_addressing_modes = {\n 0b00: AddressingMode.DIRECT,\n 0b01: AddressingMode.INDEXED,\n 0b10: AddressingMode.INDIRECT,\n 0b11: AddressingMode.AUTOINCREMENT,\n }\n r0_addressing_modes = {\n 0b00: AddressingMode.DIRECT,\n 0b01: AddressingMode.SYMBOLIC,\n 0b10: AddressingMode.INDIRECT,\n 0b11: AddressingMode.IMMEDIATE,\n }\n r2_addressing_modes = {\n 0b00: AddressingMode.DIRECT,\n 0b01: AddressingMode.ABSOLUTE,\n 0b10: AddressingMode.CONSTANT4,\n 0b11: AddressingMode.CONSTANT8,\n }\n r3_addressing_modes = {\n 0b00: AddressingMode.CONSTANT0,\n 0b01: AddressingMode.CONSTANT1,\n 0b10: AddressingMode.CONSTANT2,\n 0b11: AddressingMode.CONSTANTNEG1,\n }\n def get_addressing_mode(instruction, dest_register):\n mode = (instruction >> 4) & 0b11\n if dest_register == Register.R0: # R0 special\n return r0_addressing_modes[mode]\n elif dest_register == Register.R2: # R2 special\n return r2_addressing_modes[mode]\n elif dest_register == Register.R3: # R3 special\n return r3_addressing_modes[mode]\n else: # normal\n return normal_addressing_modes[mode]\n\n def get_operand(data, addressing_mode):\n n_bytes = 2\n\n modes_with_operands = {AddressingMode.IMMEDIATE, \\\n AddressingMode.INDEXED, \\\n AddressingMode.SYMBOLIC, \\\n AddressingMode.ABSOLUTE}\n if addressing_mode in modes_with_operands: # operand is in the instruction stream, extract\n operand = int.from_bytes(data[2:2+n_bytes], 'little')\n operand = BitVecVal(operand, 8 * n_bytes)\n return operand, n_bytes\n else: # no operand\n return None, 0\n\n opcode = get_opcode(instruction)\n width = get_width(instruction)\n register = get_register(instruction)\n addressing_mode = get_addressing_mode(instruction, register)\n operand, operand_size = get_operand(data, addressing_mode)\n\n return SingleOperandInstruction(data[:2+operand_size], \\\n address, opcode, width, addressing_mode, register, operand), \\\n 2 + operand_size", "title": "" }, { "docid": "1d27c7f31d1deb74779f933898831cb4", "score": "0.5767527", "text": "def read_instr(self, instr):\n if instr:\n self.reg_pc += 1\n if instr[0] == \"add\":\n index = int(instr[2])\n if index >= 0 and index < len(self.data_mem):\n setattr(self, 'reg_'+instr[1], int(getattr(self, 'reg_'+instr[1])) + int(self.data_mem[index]))\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"sub\":\n index = int(instr[2])\n if index >= 0 and index < len(self.data_mem):\n setattr(self, 'reg_'+instr[1], int(getattr(self, 'reg_'+instr[1])) - int(self.data_mem[index]))\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"load\":\n index = int(instr[2])\n if index >= 0 and index < len(self.data_mem):\n setattr(self, 'reg_'+instr[1], int(self.data_mem[index]))\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"store\":\n if int(instr[2]) > 0 and int(instr[2]) < len(self.data_mem):\n self.data_mem[int(instr[2])] = getattr(self, 'reg_'+instr[1])\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"addi\":\n setattr(self, 'reg_'+instr[1], int(getattr(self, 'reg_'+instr[1])) + int(instr[2]))\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"seti\":\n setattr(self, 'reg_'+instr[1], int(instr[2]))\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"jump\":\n if (self.reg_pc - 1) != (int(instr[1])):\n self.reg_pc = int(instr[1])\n else:\n self.running = False\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"jz\":\n if getattr(self, 'reg_'+instr[1]) is 0:\n self.reg_pc = int(instr[2])\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"addptr\":\n index = int(getattr(self, 'reg_'+instr[2])) + int(instr[3])\n if index >= 0 and index < len(self.data_mem):\n setattr(self, 'reg_'+instr[1], int(getattr(self, 'reg_'+instr[1])) + int(self.data_mem[index]))\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"subptr\":\n index = int(getattr(self, 'reg_'+instr[2])) + int(instr[3])\n if index >= 0 and index < len(self.data_mem):\n setattr(self, 'reg_'+instr[1], int(getattr(self, 'reg_'+instr[1])) - int(self.data_mem[index]))\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"loadptr\":\n index = int(getattr(self, 'reg_'+instr[2])) + int(instr[3])\n if index >= 0 and index < len(self.data_mem):\n setattr(self, 'reg_'+instr[1], int(self.data_mem[index]))\n self.instr_counts[instr[0]] += 1\n elif instr[0] == \"storeptr\":\n index = int(getattr(self, 'reg_'+instr[2])) + int(instr[3])\n if index >= 0 and index < len(self.data_mem):\n self.data_mem[index] = getattr(self, 'reg_'+instr[1])\n self.instr_counts[instr[0]] += 1\n if self.running and self.reg_pc >= len(self.instruction_mem):\n self.running = False", "title": "" }, { "docid": "ef84c7738d26167669bb8ee7df5baf95", "score": "0.57642096", "text": "def decode(instr):\n\n\toperator = instruction.getOperator(instr)\n\toperand = instruction.getOperand(instr)\n\n\treturn operator, operand", "title": "" }, { "docid": "b147e26abedbc19b000d18babe32d8f4", "score": "0.5752699", "text": "def decode(cls, seq):\n inst = seq.next()\n\n opcode = inst >> 12\n try:\n opcls = cls.fromOpcode(opcode)\n except KeyError:\n raise InstructionError(\"invalid opcode\")\n opa = Operand.decode((inst >> 6) & 0o77, seq)\n opb = Operand.decode(inst & 0o77, seq)\n\n return opcls(opa, opb)", "title": "" }, { "docid": "a68a5979ae1f7eba5288aaf654a7e104", "score": "0.5738984", "text": "def step(self, display=True):\n\t\tif self.__libraryCall():\n\t\t\treturn\n\n\t\told_ip = self.__ip\n\t\tself.__ip, instr = disassemble(self.memory, old_ip)\n\n\t\tif display:\n\t\t\tself.__printInstruction(old_ip, instr)\n\n\t\t# converts arguments to values\n\t\tfor i in xrange(2, len(instr)):\n\t\t\tinstr[i] = self.__value(instr[i])\n\n\t\tmnem = instr[0]\n\t\tdest = None\n\t\tcycles = instructionCycles(mnem)\n\n\t\t# opcodes that return a value\n\t\tif mnem == 'mov':\n\t\t\tdest = instr[2]\n\t\telif mnem == '+':\n\t\t\tdest = instr[2] + instr[3]\n\t\telif mnem == '-':\n\t\t\tdest = instr[2] - instr[3]\n\t\telif mnem == '*':\n\t\t\tdest = instr[2] * instr[3]\n\t\telif mnem == '/':\n\t\t\t# for negative numbers / does not have same result as Java\n\t\t\t# work around\n\t\t\tdest = abs(instr[2]) / abs(instr[3]) * sign(instr[2]) * sign(instr[3])\n\t\telif mnem == '<':\n\t\t\tdest = int(instr[2] < instr[3])\n\t\telif mnem == '<=':\n\t\t\tdest = int(instr[2] <= instr[3])\n\t\telif mnem == '=':\n\t\t\tdest = int(instr[2] == instr[3])\n\t\telif mnem == 'not':\n\t\t\tdest = ~(instr[2])\n\t\telif mnem == 'loadl':\n\t\t\tdest = instr[2]\n\t\telif mnem == 'load':\n\t\t\tdest = self.memory.loadWord(instr[2] + instr[3])\n\t\telif mnem == 'loadb':\n\t\t\tdest = self.memory.loadByte(instr[2] + instr[3])\n\n\t\t# opcodes that do not return a value\n\t\telif mnem == 'jump':\n\t\t\tself.__ip = instr[2]\n\t\telif mnem == 'jumpf':\n\t\t\tif not instr[2]:\n\t\t\t\tself.__ip = instr[3]\n\t\t\telse:\n\t\t\t\tcycles = 1\n\t\telif mnem == 'jumpt':\n\t\t\tif instr[2]:\n\t\t\t\tself.__ip = instr[3]\n\t\t\telse:\n\t\t\t\tcycles = 1\n\t\telif mnem == 'store':\n\t\t\tself.memory.storeWord(instr[2], address=instr[3] + instr[4])\n\t\telif mnem == 'storeb':\n\t\t\tself.memory.storeWord(instr[2], address=instr[3] + instr[4])\n\t\telif mnem == 'call':\n\t\t\tself.__call(instr[2])\n\t\telif mnem == 'return':\n\t\t\tself.__return()\n\t\telse:\n\t\t\traise errors.OpcodeError('Unknown mnemonic `%s`' % mnem)\n\n\t\t# stores destination\n\t\tif dest is not None:\n\t\t\tself.registers[instr[1]] = operand.normalize(dest)\n\t\t\t\n\t\tself.cycleCount += cycles", "title": "" }, { "docid": "fcc878ed7cc986b1e65a4565e026c0a0", "score": "0.572752", "text": "def exec_next_op(self):\n\n (opcode, argsin, argsout) = self.parse_op()\n assert(opcode in self.ops)\n\n # Execute opcode and push results to memory\n output = self.ops[opcode](\n *[self.mem[a] for a in argsin])\n for i, ptr in enumerate(argsout):\n self.mem[ptr] = output[i]\n\n # Handle status flags and move PC\n # pc_dirty is set by instructions which modify the program counter\n if self.reg[\"pc_dirty\"]:\n self.reg[\"pc_dirty\"] = False\n else:\n self.reg[\"pc\"] += len(argsin) + len(argsout) + 1", "title": "" }, { "docid": "9743042b50c3a142b220726c4ca82e92", "score": "0.5682735", "text": "def step(self):\n try:\n ip = self.getReg(self.REG_IP)\n self._decode().execute(self)\n except ProcessorError:\n self.setReg(self.REG_IP, ip)\n raise\n self.cycle += 1", "title": "" }, { "docid": "8c6fd79caec3101a41d9ad6fb233c9cd", "score": "0.56598914", "text": "def decode(self):\n pass", "title": "" }, { "docid": "8c6fd79caec3101a41d9ad6fb233c9cd", "score": "0.56598914", "text": "def decode(self):\n pass", "title": "" }, { "docid": "5d16761a8cfb0a7868720a745a7d6b8a", "score": "0.56528", "text": "def step_instruction(self):\n if not self.halted:\n # must be here or we get circular dependency issues\n from ..core.util.find_module import find_opcode_cls, valid_opcodes\n\n for op_str in valid_opcodes:\n op_class = find_opcode_cls(op_str)\n\n # We don't know this opcode, there's no module for it\n if op_class is None:\n print('Opcode {} is not known: skipping and continuing'.format(op_str))\n assert False\n continue\n\n # 10 comes from 2 bytes for the op and max 2 longs which are each 4 bytes\n # note: this currently has the edge case that it will fail unintelligibly\n # if encountered at the end of memory\n pc_val = self.get_program_counter_value()\n op = op_class.disassemble_instruction(self.memory.memory[pc_val:pc_val+10])\n if op is not None:\n op.execute(self)\n # done exeucting after doing an operation\n return", "title": "" }, { "docid": "0ee8c162737f8854830d7e0772369cd5", "score": "0.56509286", "text": "def _gen_instruction(self, instruction):\n #instruction = \"%s. %s\" % (self.instruction_number, instruction)\n self.instruction_list.append(instruction)\n #self.instruction_number += 1 ", "title": "" }, { "docid": "5fb93c7ce98fbb2fefda52acb08989bd", "score": "0.56504047", "text": "def _execute_opcode(self):\n\n self._lookup_opcode()\n self._program_counter += 2", "title": "" }, { "docid": "af5838910e8dbdcb9f4a9ea27d7db405", "score": "0.5645701", "text": "def execute():\n opcodes[R[4][:4]](R[4][4:])", "title": "" }, { "docid": "e5f4ae3981b88e681f274e7c3b6d20cf", "score": "0.56123835", "text": "def first_pass(self, line):\n \n if \"(\" and \")\" in line:\n symbol = line.strip(\"()\")\n self.symbols[symbol] = '{0:016b}'.format(self.INSTRUCTION)", "title": "" }, { "docid": "6890393a69eba44ea904d4dfae6dbd41", "score": "0.5572968", "text": "def next_code():\r\n start = bitcount // 8\r\n s = encoded[start:start+4]\r\n try:\r\n code = unpack('>I', s)[0]\r\n except Exception:\r\n code = unpack('>I', s + b'\\x00'*(4-len(s)))[0]\r\n code <<= bitcount % 8\r\n code &= mask\r\n return code >> shr", "title": "" }, { "docid": "df7300149e9bb321b83bbcd043fb67db", "score": "0.55612886", "text": "def _stepFSM(self):\n lastByte = self.data[-1:]\n if lastByte == b'\\n' or lastByte == b'\\r':\n packet = self.data[:-1]\n self.data = b''\n if packet != b'\\r' and packet != b'\\n' and packet != b'':\n self.packetAssembledCallback(packet)", "title": "" }, { "docid": "ac6aa039c3029eac9b6a182148b14b29", "score": "0.5560611", "text": "def decode(self, encoded):\n pass", "title": "" }, { "docid": "ad85f0bf24d02cfb0ef0e9bac5379970", "score": "0.5553704", "text": "def instruction(self, instruction):\r\n if self.irlengths is None:\r\n raise ChainNotProperlyDetected()\r\n \r\n start = sum(self.irlengths[self.current_part+1:])\r\n end = start + self.irlengths[self.current_part]\r\n \r\n for i in range(len(self.current_instructions)):\r\n if i >= start and i < end:\r\n self.current_instructions[i] = instruction & 1\r\n instruction >>= 1\r\n else:\r\n self.current_instructions[i] = 1", "title": "" }, { "docid": "bebc630d51c52e13597da503ad1e8a19", "score": "0.5500053", "text": "def processing_instruction(self, target, data):\n return", "title": "" }, { "docid": "75eae21453de92124f30b6c8fe034133", "score": "0.54891396", "text": "def DeserializeInstructions(self, frame, packing_instructions, huff):\n ops = []\n bb = BitBucket()\n bb.StoreBits(frame.GetAllBits())\n flags = 0\n #print 'DeserializeInstructions'\n while flags == 0:\n frame_len = bb.GetBits16() * 8 # in bits\n #print 'frame_len: ', frame_len\n frame_type = bb.GetBits8()\n #print 'frame_type: ', frame_type\n flags = bb.GetBits8()\n #print 'flags: ', flags\n stream_id = bb.GetBits32()\n #print 'stream_id: ', stream_id\n group_id = bb.GetBits8()\n #print 'group_id: ', group_id\n while frame_len > 8:\n bits_remaining_at_start = bb.BitsRemaining()\n try:\n opcode_val_and_op_count = bb.GetBits8()\n opcode_val = opcode_val_and_op_count >> 4\n op_count = (opcode_val_and_op_count & 0x0f) + 1\n opcode_description = g_opcode_to_op[opcode_val]\n opcode = opcode_description[0]\n fields = opcode_description[1:]\n for i in xrange(op_count):\n op = {'opcode': opcode}\n for field_name in g_packing_order:\n if not field_name in fields:\n continue\n (params, _, unpack_fn) = packing_instructions[field_name]\n val = unpack_fn(bb, params, huff)\n #print val\n op[field_name] = val\n #print \"BitsRemaining: %d (%d)\" % (bb.BitsRemaining(), bb.BitsRemaining() % 8)\n #print \"Deser %d\" % (bb.NumBits() - bb.BitsRemaining())\n #print op\n ops.append(op)\n bits_consumed = (bits_remaining_at_start - bb.BitsRemaining())\n #if not bits_consumed % 8 == 0:\n # print \"somehow didn't consume whole bytes...\"\n # print \"Bits consumed: %d (%d)\" % (bits_consumed, bits_consumed % 8)\n # raise StandardError()\n frame_len -= bits_consumed\n except:\n break\n #print 'ops: ', ops\n return (group_id, ops)", "title": "" }, { "docid": "71c01df723d2bce5f01b5a4a52c8924b", "score": "0.548154", "text": "def reassemble(data):", "title": "" }, { "docid": "0a3e9cf5ae64586ef40b1e5917d6d49f", "score": "0.54729736", "text": "def _decode_state(self, state):\n pass", "title": "" }, { "docid": "676cd0af0b92cb17630050c8919feaa0", "score": "0.5439482", "text": "def assemble(instructions,encoder=\"\"):\n\tif not silent:\n\t\timm.log(\"Opcode results : \")\n\t\timm.log(\"---------------- \")\n\tcnt=1\n\tcmdInput=\"\"\n\tallopcodes=\"\"\n\tencodecmd=\"\"\n\tencodebad=\"\"\n\tcurpos=0\n\t\n\tinstructions = instructions.replace('\"',\"\").replace(\"'\",\"\")\n\n\tsplitter=re.compile('#')\n\tinstructions=splitter.split(instructions)\n\tfor instruct in instructions:\n\t\ttry:\n\t\t\tinstruct = instruct.strip()\n\t\t\tassembled=imm.assemble(instruct)\n\t\t\tstrAssembled=\"\"\n\t\t\tfor assemOpc in assembled:\n\t\t\t\tif (len(hex(ord(assemOpc)))) == 3:\n\t\t\t\t\tsubAssembled = \"\\\\x0\"+hex(ord(assemOpc)).replace('0x','')\n\t\t\t\t\tstrAssembled = strAssembled+subAssembled\n\t\t\t\telse:\n\t\t\t\t\tstrAssembled = strAssembled+hex(ord(assemOpc)).replace('0x', '\\\\x')\n\t\t\tif len(strAssembled) < 30:\n\t\t\t\tif not silent:\n\t\t\t\t\timm.log(\" %s = %s\" % (instruct,strAssembled))\n\t\t\t\tallopcodes=allopcodes+strAssembled\n\t\t\telse:\n\t\t\t\tif not silent:\n\t\t\t\t\timm.log(\" %s => Unable to assemble this instruction !\" % instruct,highlight=1)\n\t\texcept:\n\t\t\tif not silent:\n\t\t\t\timm.log(\" Could not assemble %s \" % instruct)\n\t\t\tpass\n\tif not silent:\n\t\timm.log(\" Full opcode : %s \" % allopcodes)\n\treturn allopcodes\n\t# if (encoder == \"ascii\"):\n\t\t# imm.log(\"Encoding to ASCII...\")\n\t\t# imm.log(\"\")\n\t\t# encodeargs=[]\n\t\t# encodeargs.append(\"doencode\")\n\t\t# encodeargs.append(encodecmd)\n\t\t# encodeargs.append(allopcodes.replace('\\\\x',''))\n\t\t# encodeargs.append(encodebad)\n\t\t# doencode(encodeargs)", "title": "" }, { "docid": "a14c7121af8c13e8dd409c0ab6fc3771", "score": "0.5431002", "text": "def _process_decoder_input(self, target_data, char_to_code, batch_size):\n ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])\n dec_input = tf.concat([tf.fill([batch_size, 1], char_to_code['<GO>']), ending], 1)\n\n return dec_input", "title": "" }, { "docid": "fedad3164d04542966acccdade89032f", "score": "0.5420946", "text": "def put_instruction(self, cmd_string):\n self.instr_queue.put(cmd_string)", "title": "" }, { "docid": "57cc961e7f600dbd812e87b9f1847aa9", "score": "0.5402171", "text": "def parse(self, code):\n stripped_code = re.sub(instruction_regex,\"\", code)\n parse_current = None\n for char in stripped_code:\n prev = parse_current\n for t in instruction_types:\n if char == t.symbol:\n parse_current = t(prev)\n if isinstance(parse_current, RBracket):\n traceback = parse_current\n while True:\n if traceback == None:\n raise ParseException(\"No corresponding left bracket\")\n if isinstance(traceback, LBracket) and traceback.partner == None:\n parse_current.partner = traceback\n traceback.partner = parse_current\n break\n traceback = traceback.prev\n if prev == None:\n self.first_instruction = parse_current\n self.current_instruction = self.first_instruction", "title": "" }, { "docid": "453a48a13ebc1a56f53a18f33471489c", "score": "0.5394362", "text": "def step(self):\n ins = self.prog[self.pc]\n if ins.op == \"addr\":\n self.reg[ins.C] = self.reg[ins.A] + self.reg[ins.B]\n elif ins.op == \"addi\":\n self.reg[ins.C] = self.reg[ins.A] + ins.B\n elif ins.op == \"mulr\":\n self.reg[ins.C] = self.reg[ins.A] * self.reg[ins.B]\n elif ins.op == \"muli\":\n self.reg[ins.C] = self.reg[ins.A] * ins.B\n elif ins.op == \"banr\":\n self.reg[ins.C] = self.reg[ins.A] & self.reg[ins.B]\n elif ins.op == \"bani\":\n self.reg[ins.C] = self.reg[ins.A] & ins.B\n elif ins.op == \"borr\":\n self.reg[ins.C] = self.reg[ins.A] | self.reg[ins.B]\n elif ins.op == \"bori\":\n self.reg[ins.C] = self.reg[ins.A] | ins.B\n elif ins.op == \"setr\":\n self.reg[ins.C] = self.reg[ins.A]\n elif ins.op == \"seti\":\n self.reg[ins.C] = ins.A\n elif ins.op == \"gtir\":\n self.reg[ins.C] = int(ins.A > self.reg[ins.B])\n elif ins.op == \"gtri\":\n self.reg[ins.C] = int(self.reg[ins.A] > ins.B)\n elif ins.op == \"gtrr\":\n self.reg[ins.C] = int(self.reg[ins.A] > self.reg[ins.B])\n elif ins.op == \"eqir\":\n self.reg[ins.C] = int(ins.A == self.reg[ins.B])\n elif ins.op == \"eqri\":\n self.reg[ins.C] = int(self.reg[ins.A] == ins.B)\n elif ins.op == \"eqrr\":\n self.reg[ins.C] = int(self.reg[ins.A] == self.reg[ins.B])\n else:\n raise ValueError(f\"Unrecognized opcode '{ins.op}'\")\n self.pc += 1", "title": "" }, { "docid": "9eff067e1616b31a6346f95061a0a93a", "score": "0.53932285", "text": "def test_instruction_valid_decode_with_protocol_chars(self):\n # arg includes ARG_SEP, ELEM_SEP, INST_TERM and a white space.\n arg_protocol_chars = 'p,.; t'\n instruction_str = '4.args,8.hostname,%s.%s;' %\\\n (len(arg_protocol_chars), arg_protocol_chars)\n\n instruction_opcode = 'args'\n instruction_args = ('hostname', arg_protocol_chars)\n\n instruction = Instruction.load(instruction_str)\n\n self.assertEqual(instruction_str, instruction.encode())\n self.assertEqual(instruction_opcode, instruction.opcode)\n self.assertEqual(instruction_args, instruction.args)", "title": "" }, { "docid": "6aa64327cf507bf3de7eb36e99bfa926", "score": "0.5386389", "text": "def decode_instruction(address, data):\n\n is_single_operand_instruction = lambda x: (x >> 10) == 0b000100\n is_jump_instruction = lambda x: (x >> 13) == 0b001\n is_double_operand_instruction = lambda x: \\\n not is_single_operand_instruction(x) and not is_jump_instruction(x)\n\n # turn a list of BitVecVals and ints into a list of ints\n unBVV = lambda l: [simplify(x).as_long() if is_bv(x) else x for x in data]\n\n data = unBVV(data)\n instruction = int.from_bytes(data[:2], 'little')\n\n if is_single_operand_instruction(instruction):\n return decode_single_operand_instruction(address, data)\n elif is_jump_instruction(instruction):\n return decode_jump_instruction(address, data)\n elif is_double_operand_instruction(instruction):\n return decode_double_operand_instruction(address, data)\n else:\n raise ValueError( \\\n '0x{:x} does not look like a valid MSP430 instruction!'.format( \\\n instruction))", "title": "" }, { "docid": "7d20a5481dc88077993311a5fcd33ff6", "score": "0.538072", "text": "def load_str(self, code):\n\n bytecode = parse(code)\n self.stack += bytecode\n self.sp = len(self.stack)\n if self.ip is None:\n self.ip = 2", "title": "" }, { "docid": "c0a2e5f0e088a0cbd8ffae582ae27402", "score": "0.5370151", "text": "def parse(self):\n\t\tfor code in self.codes:\n\t\t\tpart=code.split()\n\t\t\tif len(part)==1: \n\t\t\t\tif part[0]==\"return\":\n\t\t\t\t\tself.asm_codes+=[\"\\n//\"+code]\n\t\t\t\t\tself.asm_codes+=self.C_return()\n\t\t\t\telse:\n\t\t\t\t\t# arithmetic commands\n\t\t\t\t\tself.asm_codes+=[\"\\n//\"+code]\n\t\t\t\t\tself.asm_codes+=self.C_arith(part[0])\n\t\t\telif part[0]==\"push\": # push command\n\t\t\t\tself.asm_codes+=[\"\\n//\"+code]\n\t\t\t\tself.asm_codes+=self.C_push(part[1:])\n\t\t\telif part[0]==\"pop\": # pop command\n\t\t\t\tself.asm_codes+=[\"\\n//\"+code]\n\t\t\t\tself.asm_codes+=self.C_pop(part[1:])\n\t\t\telif part[0]==\"label\":\n\t\t\t\tself.asm_codes+=[\"\\n//\"+code]\n\t\t\t\tlabel_=self.cur_funcname+\"$\"+part[1]\n\t\t\t\tself.asm_codes+=[\"(\"+label_+\")\"]\n\t\t\telif part[0]==\"goto\":\n\t\t\t\tself.asm_codes+=[\"\\n//\"+code]\n\t\t\t\tlabel_=self.cur_funcname+\"$\"+part[1]\n\t\t\t\tself.asm_codes+=[\"@\"+label_,\"0;JMP\"]\n\t\t\telif part[0]==\"if-goto\":\n\t\t\t\tself.asm_codes+=[\"\\n//\"+code]\n\t\t\t\tlabel_=self.cur_funcname+\"$\"+part[1]\n\t\t\t\tself.asm_codes+=[\"@SP\",\"AM=M-1\",\"D=M\",\"@\"+label_,\"D;JNE\"]\n\t\t\telif part[0]==\"function\":\n\t\t\t\tself.asm_codes+=[\"\\n//\"+code]\n\t\t\t\tself.asm_codes+=self.C_function(part[1:])\n\t\t\telif part[0]==\"call\":\n\t\t\t\tself.asm_codes+=[\"\\n//\"+code]\n\t\t\t\tself.asm_codes+=self.C_call(part[1:])\n\t\t\telse:\n\t\t\t\traise ValueError(\"illegal vm codes found\")", "title": "" }, { "docid": "d0f9ae30eb2da169966256273537d66c", "score": "0.53606045", "text": "def myin(self): # Opcode 20\n a = self.memory[self.index + 1] \n # TODO: might it not be a register?\n register = a % 32768\n if self.lineindex >= len(self.line):\n self.line = input(\"=> \")\n if self.line == \"go\":\n self.line = mud.solve_puzzle()\n self.line += '\\n'\n self.lineindex = 0\n self.registers[register] = ord(self.line[self.lineindex])\n self.lineindex += 1\n self.index += 2", "title": "" }, { "docid": "091e8bcff020bb7b56dbdaf0019f8c2f", "score": "0.53573906", "text": "def feed(self, byte):", "title": "" }, { "docid": "af0e7a993a63c0a3627267d3cfa506aa", "score": "0.5352369", "text": "def test_argument_parsing_serialization(self):\n bytecode = b'%c\\x01\\x02' % dis.opmap['STORE_FAST']\n instructions = Instruction.from_code(bytecode)\n\n self.assertEqual(len(instructions), 1)\n\n instruction = instructions[0]\n self.assertEqual(Instruction(dis.opmap['STORE_FAST'], 0x0201),\n instruction)\n self.assertEqual(instruction.as_bytes, bytecode)", "title": "" }, { "docid": "6e5b3d1ebed8f7f316aa5737b04ea5b8", "score": "0.5352349", "text": "def decode(self, machinecode: int):\n pass", "title": "" }, { "docid": "87eb060801d4d4f38db0ef3cebc01434", "score": "0.53476846", "text": "def handle_instruction(self, inst):\n\t\tdst = None\n\t\t\n\t\tif len(inst.operands) >= 1 and ('rsp' == inst.operands[0] or 'rbp' == inst.operands[0]):\n\t\t\t# Ignoring these\n\t\t\treturn\n\n\t\tif inst.operator.startswith('mov') or inst.operator.startswith('cvt'):\n\t\t\tdst, src = inst.operands[0], inst.operands[1]\n\t\t\tself._mov_inst(dst, src, inst)\n\n\t\telif inst.operator == 'add' or inst.operator == 'adc' or inst.operator == 'adox' or inst.operator == 'adcx' \\\n\t\t\t\tor inst.operator == 'addss':\n\t\t\tdst, src = inst.operands[0], inst.operands[1]\n\t\t\tself._add_inst(dst, src, inst)\n\n\t\telif inst.operator == 'sub':\n\t\t\tdst, src = inst.operands[0], inst.operands[1]\n\t\t\tself._sub_inst(dst, src, inst)\n\n\t\telif inst.operator == 'mul' or inst.operator == 'imul':\n\t\t\tif len(inst.operands) == 1:\n\t\t\t\tmultiplier = inst.operands[0]\n\t\t\t\tself._mul_inst_1_operand(multiplier, inst)\n\t\t\telif len(inst.operands) == 2:\n\t\t\t\tdst, src = inst.operands[0], inst.operands[1]\n\t\t\t\tself._mul_inst_2_operands(dst, src, inst)\n\n\t\telif inst.operator == 'shl' or inst.operator == 'sal':\n\t\t\tdst = inst.operands[0]\n\t\t\tsrc = None\n\t\t\tif len(inst.operands) > 1:\n\t\t\t\tsrc = inst.operands[1]\n\t\t\tself._shl_inst(dst, inst, src)\n\n\t\telif inst.operator == 'shr' or inst.operator == 'sar':\n\t\t\tdst = inst.operands[0]\n\t\t\tsrc = None\n\t\t\tif len(inst.operands) > 1:\n\t\t\t\tsrc = inst.operands[1]\n\t\t\tself._shr_inst(dst, inst, src)\n\n\t\telif 'xor' in inst.operator:\n\t\t\tdst, src = inst.operands[0], inst.operands[1]\n\t\t\tself._xor_inst(dst, src, inst)\n\n\t\telif inst.operator == 'and':\n\t\t\tdst, src = inst.operands[0], inst.operands[1]\n\t\t\tself._add_inst(dst, src, inst)\n\n\t\telif inst.operator == 'or':\n\t\t\tdst, src = inst.operands[0], inst.operands[1]\n\t\t\tself._or_inst(dst, src, inst)\n\n\t\telif inst.operator == 'not':\n\t\t\tdst = inst.operands[0]\n\t\t\tself._not_inst(dst, inst)\n\n\t\telif inst.operator == 'call':\n\t\t\tdst_address = inst.operands[0]\n\t\t\tdst = self._call_inst(dst_address, inst)\n\n\n\t\tif not Utilities.is_int(dst) and not self._registers_manager.is_register(dst) and dst:\n\t\t\tself._asm_function.set_type(dst, self._asm_function.get_type(dst))\n\n\t\treturn dst", "title": "" }, { "docid": "0b08b8061412d29e674e6137e9f308dd", "score": "0.53371245", "text": "def __init__(self):\n self.r = [0] * 8\n self.r[7] = 0xF4 # stack pointer, starts at index 244 in memory\n self.memory = [0] * 256\n self.PC = 0\n self.FL = [0] * 8\n self.instructions = {}\n self.instructions[0b10000010] = self.LDI\n self.instructions[0b01000111] = self.PRN\n self.instructions[0b00000001] = self.HLT\n self.instructions[0b10100010] = self.MUL\n self.instructions[0b10100000] = self.ADD\n self.instructions[0b01000101] = self.PUSH\n self.instructions[0b01000110] = self.POP\n self.instructions[0b01010000] = self.CALL\n self.instructions[0b00010001] = self.RET\n self.instructions[0b10100111] = self.CMP\n self.instructions[0b01010100] = self.JMP\n self.instructions[0b01010110] = self.JNE\n self.instructions[0b01010101] = self.JEQ\n self.instructions[0b10101000] = self.AND\n self.instructions[0b10101010] = self.OR\n self.instructions[0b10101011] = self.XOR\n self.instructions[0b01101001] = self.NOT\n self.instructions[0b10101100] = self.SHL\n self.instructions[0b10101101] = self.SHR\n self.instructions[0b10100100] = self.MOD\n self.instructions[0b10100110] = self.ADDI # chose the binary opcode value myself, might conflict with unimplemented instructions", "title": "" }, { "docid": "e6a82e0a2088ec5fe4c365ef8bc35f99", "score": "0.532976", "text": "def from_string(cls, token: str) -> Instruction:", "title": "" }, { "docid": "0fdf6067c709a50c2f94857d5e659c60", "score": "0.53145546", "text": "def decode_double_operand_instruction(address, data):\n instruction = int.from_bytes(data[:2], 'little') # decode instruction\n\n assert instruction <= 0xFFFF\n\n opcodes = {\n 0b0100: Opcode.MOV,\n 0b0101: Opcode.ADD,\n 0b0110: Opcode.ADDC,\n 0b0111: Opcode.SUBC,\n 0b1000: Opcode.SUB,\n 0b1001: Opcode.CMP,\n 0b1010: Opcode.DADD,\n 0b1011: Opcode.BIT,\n 0b1100: Opcode.BIC,\n 0b1101: Opcode.BIS,\n 0b1110: Opcode.XOR,\n 0b1111: Opcode.AND,\n }\n def get_opcode(instruction):\n raw = (instruction >> 12) & 0b1111\n assert raw in opcodes, 'Invalid opcode for double-operand instruction: {} @ {} (full: {})'.format(raw, address, data)\n return opcodes[raw]\n\n registers = {\n 0b0000: Register.R0,\n 0b0001: Register.R1,\n 0b0010: Register.R2,\n 0b0011: Register.R3,\n 0b0100: Register.R4,\n 0b0101: Register.R5,\n 0b0110: Register.R6,\n 0b0111: Register.R7,\n 0b1000: Register.R8,\n 0b1001: Register.R9,\n 0b1010: Register.R10,\n 0b1011: Register.R11,\n 0b1100: Register.R12,\n 0b1101: Register.R13,\n 0b1110: Register.R14,\n 0b1111: Register.R15,\n }\n get_source_register = lambda x: registers[(instruction >> 8) & 0b1111]\n # TODO: check dest registers are movable into (Not R3?)\n get_dest_register = lambda x: registers[instruction & 0b1111]\n\n normal_source_addressing_modes = {\n 0b00: AddressingMode.DIRECT,\n 0b01: AddressingMode.INDEXED,\n 0b10: AddressingMode.INDIRECT,\n 0b11: AddressingMode.AUTOINCREMENT,\n }\n r0_source_addressing_modes = {\n 0b00: AddressingMode.DIRECT,\n 0b01: AddressingMode.SYMBOLIC,\n 0b10: AddressingMode.INDIRECT,\n 0b11: AddressingMode.IMMEDIATE,\n }\n r2_source_addressing_modes = {\n 0b00: AddressingMode.DIRECT,\n 0b01: AddressingMode.ABSOLUTE,\n 0b10: AddressingMode.CONSTANT4,\n 0b11: AddressingMode.CONSTANT8,\n }\n r3_source_addressing_modes = {\n 0b00: AddressingMode.CONSTANT0,\n 0b01: AddressingMode.CONSTANT1,\n 0b10: AddressingMode.CONSTANT2,\n 0b11: AddressingMode.CONSTANTNEG1,\n }\n def get_source_addressing_mode(instruction, register):\n raw = (instruction >> 4) & 0b11\n if register == Register.R0:\n return r0_source_addressing_modes[raw]\n elif register == Register.R2:\n return r2_source_addressing_modes[raw]\n elif register == Register.R3:\n return r3_source_addressing_modes[raw]\n else:\n return normal_source_addressing_modes[raw]\n\n normal_dest_addressing_modes = {\n 0b0: AddressingMode.DIRECT,\n 0b1: AddressingMode.INDEXED,\n }\n r0_dest_addressing_modes = {\n 0b0: AddressingMode.DIRECT,\n 0b1: AddressingMode.SYMBOLIC,\n }\n r2_dest_addressing_modes = {\n 0b0: AddressingMode.DIRECT,\n 0b1: AddressingMode.ABSOLUTE,\n }\n def get_dest_addressing_mode(instruction, register):\n raw = (instruction >> 7) & 0b1\n if register == Register.R0:\n return r0_dest_addressing_modes[raw]\n elif register == Register.R2:\n return r2_dest_addressing_modes[raw]\n else:\n return normal_dest_addressing_modes[raw]\n\n widths = {\n 0b0: OperandWidth.WORD,\n 0b1: OperandWidth.BYTE,\n }\n get_operand_width = lambda x: widths[(x >> 6) & 0b1]\n\n def get_operand(data, addressing_mode, current_offset):\n n_bytes = 2\n\n modes_with_operands = { \\\n AddressingMode.IMMEDIATE, \\\n AddressingMode.INDEXED, \\\n AddressingMode.ABSOLUTE, \\\n AddressingMode.SYMBOLIC \\\n }\n if addressing_mode in modes_with_operands: # operand is in the instruction stream, extract\n raw = data[current_offset : current_offset + n_bytes]\n operand = int.from_bytes(raw, 'little')\n operand = BitVecVal(operand, 8 * n_bytes)\n return operand, n_bytes\n else: # no operand\n return None, 0\n\n opcode = get_opcode(instruction)\n source_register = get_source_register(instruction)\n dest_register = get_dest_register(instruction)\n source_addressing_mode = get_source_addressing_mode(instruction, source_register)\n dest_addressing_mode = get_dest_addressing_mode(instruction, dest_register)\n width = get_operand_width(instruction)\n\n current_offset = 2 # after the instruction\n source_operand, source_size = \\\n get_operand(data, source_addressing_mode, current_offset)\n current_offset += source_size # advance instruction stream by the # of bytes we pulled off\n dest_operand, dest_size = \\\n get_operand(data, dest_addressing_mode, current_offset)\n current_offset += dest_size\n\n return DoubleOperandInstruction(data[:current_offset], address, opcode, \\\n width, source_addressing_mode, source_register, source_operand, \\\n dest_addressing_mode, dest_register, dest_operand), current_offset", "title": "" }, { "docid": "9f3b09e7fbec85ec0b6e0dd34c060da5", "score": "0.5314034", "text": "def processPacket(self, packet):", "title": "" }, { "docid": "da8eb7681670fd40545126f462d06bb6", "score": "0.53041184", "text": "def decode(self):\r\n # We know the first 2 bytes are the opcode. The second two are the\r\n # block number.\r\n (self.blocknumber,) = struct.unpack(\"!H\", self.buffer[2:4])\r\n log.debug(\"decoding DAT packet, block number %d\" % self.blocknumber)\r\n log.debug(\"should be %d bytes in the packet total\"\r\n % len(self.buffer))\r\n # Everything else is data.\r\n self.data = self.buffer[4:]\r\n log.debug(\"found %d bytes of data\"\r\n % len(self.data))\r\n return self", "title": "" }, { "docid": "c1ffa03ef3bbe2ce6e655ddf37a75556", "score": "0.5267594", "text": "def cycle():\n\n\tglobal program_counter, accumulator\n\t\n\t# FETCH\n\tinstr = fetch()\n\t##trace(\"fetch: pc:\" + str(program_counter) + \" instr:\" + str(instr))\n\tprogram_counter = truncate(program_counter+1)\n\t\n\t# DECODE\n\toperator, operand = decode(instr)\n\n\t# EXECUTE\n\taccumulator = execute(operator, operand, accumulator)", "title": "" }, { "docid": "8fe594f07f2801bb1f937294577ce11d", "score": "0.52645606", "text": "def internal_interpret(self, code):\n if isinstance(code, types.ListType):\n \"\"\"fragment a list and execute each instruction\"\"\"\n if code:\n for item in code:\n self.internal_interpret(item)\n elif '\\n' in code:\n \"\"\"fragment a string and execute each instruction\"\"\"\n for single in [one.strip() for one in code.split('\\n')]:\n self.internal_interpret(single)\n elif code:\n try:\n self.depth += 1\n \"\"\"execute a single instruction\"\"\"\n if self.verbose:\n print code\n\n code = code.strip()\n if code[0] == '#':\n return self\n\n # get first character and the remaining string\n if self.extended_input != '':\n # The space prevents accidental token appending.\n code = self.extended_input + ' ' + code\n\n first, rest = code[:1], code[1:]\n # eliminate inline comment after instruction\n rest = (rest.split('#')[0] if '#' in rest else rest).strip()\n if rest != '':\n t = rest[-1]\n if t in '|,':\n self.extended_input = first + rest\n return self\n\n self.extended_input = ''\n # execute interpreter instruction\n self.internal_execute(first, rest)\n self.iteration += 1\n except Exception as e:\n print e\n finally:\n self.depth -= 1\n return self", "title": "" }, { "docid": "57ba56e9a6b38f6fd1cb3871a539746e", "score": "0.52601606", "text": "def decoder11(**kwargs):\n return Decoder(DecoderBasicBlock, [1, 1, 1], **kwargs)", "title": "" }, { "docid": "6632845890e104c6c5f1e1de1dd81a33", "score": "0.52504754", "text": "def run():\n i = 1\n while running:\n #Fetch\n print(\"Cycle: \" + str(i) + \" ==> \" + \"FETCH\")\n printRegisters()\n fetch()\n\n #Decode\n print(\"Cycle: \" + str(i) + \" ==> \" + \"DECODE\")\n printRegisters()\n instuction = decode()\n\n #Execute\n print(\"Cycle: \" + str(i) + \" ==> \" + \"EXECUTE\")\n printRegisters()\n print(\"Decoded Instruction: \" + instuction )\n #inputCheck()\n execute()\n i += 1\n print(\"\\n\")", "title": "" }, { "docid": "89576cc92ce14c37cde4f1bc8354a77c", "score": "0.5238553", "text": "def __init__(self, remote_node: thymiodirect.connection.RemoteNode, src: str):\n\n self.remote_node = remote_node\n self.src = src\n\n self.instr = {\n \"dc\": {\n \"num_args\": -1\n },\n \"equ\": {\n \"num_args\": 1\n },\n \"stop\": {\n \"code\": [0x0000]\n },\n \"push.s\": {\n \"num_args\": 1\n },\n \"push\": {\n \"num_args\": 1\n },\n \"load\": {\n \"num_args\": 1\n },\n \"store\": {\n \"num_args\": 1\n },\n \"load.ind\": {\n \"num_args\": 2\n },\n \"store.ind\": {\n \"num_args\": 2\n },\n \"neg\": {\n \"code\": [0x7000]\n },\n \"abs\": {\n \"code\": [0x7001]\n },\n \"bitnot\": {\n \"code\": [0x7002]\n },\n \"not\": {\n # empty\n },\n \"sl\": {\n \"code\": [0x8000]\n },\n \"asr\": {\n \"code\": [0x8001]\n },\n \"add\": {\n \"code\": [0x8002]\n },\n \"sub\": {\n \"code\": [0x8003]\n },\n \"mult\": {\n \"code\": [0x8004]\n },\n \"div\": {\n \"code\": [0x8005]\n },\n \"mod\": {\n \"code\": [0x8006]\n },\n \"bitor\": {\n \"code\": [0x8007]\n },\n \"bitxor\": {\n \"code\": [0x8008]\n },\n \"bitand\": {\n \"code\": [0x8009]\n },\n \"eq\": {\n \"code\": [0x800a]\n },\n \"ne\": {\n \"code\": [0x800b]\n },\n \"gt\": {\n \"code\": [0x800c]\n },\n \"ge\": {\n \"code\": [0x800d]\n },\n \"lt\": {\n \"code\": [0x800e]\n },\n \"le\": {\n \"code\": [0x800f]\n },\n \"or\": {\n \"code\": [0x8010]\n },\n \"and\": {\n \"code\": [0x8011]\n },\n \"jump\": {\n \"num_args\": 1\n },\n \"jump.if.not\": {\n \"num_args\": 2\n },\n \"do.jump.when.not\": {\n \"num_args\": 2\n },\n \"dont.jump.when.not\": {\n \"num_args\": 2\n },\n \"emit\": {\n \"num_args\": 3\n },\n \"callnat\": {\n \"num_args\": 1\n },\n \"callsub\": {\n \"num_args\": 1\n },\n \"ret\": {\n \"code\": [0xe000]\n },\n }\n\n def resolve_symbol(a, defs: Dict[str, int], required: bool) -> int:\n\n def resolve_def(name: str) -> int:\n if not required:\n return 0\n if re.match(\"^(0x[0-9a-f]+|[0-9]+)$\", name, flags=re.I):\n return int(name, 0)\n if name not in defs:\n raise Exception(f'Unknown symbol \"{name}\"')\n return defs[name]\n\n if type(a) is str:\n # eval\n val = 0\n minus = False\n offset = 0\n while offset < len(a):\n r = re.match(r\"(\\+|-|[._a-z0-9]+)\", a[offset:], re.I)\n if r is None:\n raise Exception(\"Syntax error\")\n s = r.group()\n if s == \"+\":\n minus = False\n elif s == \"-\":\n minus = True\n else:\n val += -resolve_def(s) if minus else resolve_def(s)\n offset += len(s)\n return val\n\n return a\n\n def def_to_code(instr: str) -> Callable:\n def register(fun):\n self.instr[instr][\"to_code\"] = fun\n return fun\n return register\n\n @def_to_code(\"dc\")\n def to_code_dc(pc: int, args: List[Union[int, str]], label: str, defs: Dict[str, int], phase: int, line: int) -> List[int]:\n return [\n resolve_symbol(a, defs, phase == 1) & 0xffff\n for a in args\n ]\n\n @def_to_code(\"equ\")\n def to_code_equ(pc, args, label, defs, phase, line):\n if label is None:\n raise Exception(f'No label for pseudo-instruction \"equ\" (line {line})')\n if defs is not None:\n defs[label] = resolve_symbol(args[0], defs, phase == 1)\n label = None\n return []\n\n @def_to_code(\"push.s\")\n def to_code_push_s(pc, args, label, defs, phase, line):\n arg = resolve_symbol(args[0], defs, phase == 1)\n if arg >= 0x1000 or -arg > 0x1000:\n raise Exception(f\"Small integer overflow (line {line})\")\n return [0x1000 | arg & 0xfff]\n\n @def_to_code(\"push\")\n def to_code_push(pc, args, label, defs, phase, line):\n arg = resolve_symbol(args[0], defs, phase == 1)\n return [0x2000, arg & 0xffff]\n\n @def_to_code(\"load\")\n def to_code_load(pc, args, label, defs, phase, line):\n arg = resolve_symbol(args[0], defs, phase == 1)\n if arg < 0 or arg >= 0x1000:\n raise Exception(f\"Data address out of range (line {line})\")\n return [0x3000 | arg & 0xfff]\n\n @def_to_code(\"store\")\n def to_code_store(pc, args, label, defs, phase, line):\n arg = resolve_symbol(args[0], defs, phase == 1)\n if arg < 0 or arg >= 0x1000:\n raise Exception(f\"Data address out of range (line {line})\")\n return [0x4000 | arg & 0xfff]\n\n @def_to_code(\"load.ind\")\n def to_code_load_ind(pc, args, label, defs, phase, line):\n arg = resolve_symbol(args[0], defs, phase == 1)\n if arg < 0 or arg >= 0x1000:\n raise Exception(f\"Data address out of range (line {line})\")\n size_arg = resolve_symbol(args[1], defs, phase == 1)\n return [0x5000 | arg & 0xfff, size_arg & 0xffff]\n\n @def_to_code(\"store.ind\")\n def to_code_store_ind(pc, args, label, defs, phase, line):\n arg = resolve_symbol(args[0], defs, phase == 1)\n if arg < 0 or arg >= 0x1000:\n raise Exception(f\"Data address out of range (line {line})\")\n size_arg = resolve_symbol(args[1], defs, phase == 1)\n return [0x6000 | arg & 0xfff, size_arg & 0xffff]\n\n @def_to_code(\"not\")\n def to_code_not(pc, args, label, defs, phase, line):\n raise Exception(f'Unary \"not\" not implemented in the VM (line {line})')\n\n @def_to_code(\"jump\")\n def to_code_jump(pc, args, label, defs, phase, line):\n arg = resolve_symbol(args[0], defs, phase == 1)\n return [0x9000 | (arg - pc) & 0xfff]\n\n @def_to_code(\"jump.if.not\")\n def to_code_jump_if_not(pc, args, label, defs, phase, line):\n test_instr = self.instr[args[0]] if args[0] in self.instr else None\n if (test_instr is None\n or \"code\" not in test_instr\n or len(test_instr[\"code\"]) != 1\n or (test_instr[\"code\"][0] & 0xf000) != 0x8000):\n raise Exception(f'Unknown op \"{args[0]}\" for jump.if.not (line {line})')\n arg = resolve_symbol(args[1], defs, phase == 1)\n return [0xa000 | (test_instr[\"code\"][0] & 0xff), (arg - pc) & 0xffff]\n\n @def_to_code(\"do.jump.when.not\")\n def to_code_do_jump_when_not(pc, args, label, defs, phase, line):\n test_instr = self.instr[args[0]] if args[0] in self.instr else None\n if (test_instr is None\n or \"code\" not in test_instr\n or len(test_instr[\"code\"]) != 1\n or (test_instr[\"code\"][0] & 0xf000) != 0x8000):\n raise Exception(f'Unknown op \"{args[0]}\" for do.jump.when.not (line {line})')\n arg = resolve_symbol(args[1], defs, phase == 1)\n return [0xa100 | (test_instr[\"code\"][0] & 0xff), (arg - pc) & 0xffff]\n\n @def_to_code(\"dont.jump.when.not\")\n def to_code_dont_jump_when_not(pc, args, label, defs, phase, line):\n test_instr = self.instr[args[0]] if args[0] in self.instr else None\n if (test_instr is None\n or \"code\" not in test_instr\n or len(test_instr[\"code\"]) != 1\n or (test_instr[\"code\"][0] & 0xf000) != 0x8000):\n raise Exception(f'Unknown op \"{args[0]}\" for dont.jump.when.not (line {line})')\n arg = resolve_symbol(args[1], defs, phase == 1)\n return [0xa300 | (test_instr[\"code\"][0] & 0xff), (arg - pc) & 0xffff]\n\n @def_to_code(\"emit\")\n def to_code_emit(pc, args, label, defs, phase, line):\n id = resolve_symbol(args[0], defs, phase == 1)\n if id < 0 or id >= 0x1000:\n raise Exception(f\"Event id out of range (line {line})\")\n addr = resolve_symbol(args[1], defs, phase == 1)\n size = resolve_symbol(args[2], defs, phase == 1)\n return [0xb000 | id & 0xfff, addr & 0xffff, size & 0xffff]\n\n @def_to_code(\"callnat\")\n def to_code_callnat(pc, args, label, defs, phase, line):\n arg = resolve_symbol(args[0], defs, phase == 1)\n if arg < 0 or arg >= 0x1000:\n raise Exception(f\"Native call id out of range (line {line})\")\n return [0xc000 | arg & 0xfff]\n\n @def_to_code(\"callsub\")\n def to_code_callsub(pc, args, label, defs, phase, line):\n arg = resolve_symbol(args[0], defs, phase == 1)\n if arg < 0 or arg >= 0x1000:\n raise Exception(f\"Subroutine address out of range (line {line})\")\n return [0xd000 | arg & 0xfff]", "title": "" }, { "docid": "a7f698d38fd07a4e6a13868f4406259b", "score": "0.52328694", "text": "def init_decoder(self):", "title": "" }, { "docid": "39350d846544f71c2d9b453d0afad8b8", "score": "0.52155054", "text": "def emit_decoder_code(self):\n self.decode_preamble()\n for f in self.fields:\n self.decode_emit_one_field(f)\n self.decode_epilogue()", "title": "" }, { "docid": "8bba94b144184779a9dfe18a870acc6c", "score": "0.5214159", "text": "def decode(self):\n NotImplementedError", "title": "" }, { "docid": "796e127e7720bd4aa98b2db4a40dc53b", "score": "0.52110445", "text": "def decode_op(self,pos):\n i=pos\n v=self._memory[CODE][pos]\n try:\n s=OPS[v]\n except IndexError:\n s='NOP'\n if (s in ('PUSH','JMF','JMB','ST','LD','LDM','STM','LDP','STP','JZ','JNZ')) and (i<(MAX_MEM-1)):\n i+=1\n v=self._memory[CODE][i]\n s=s+' '+str(v)\n i+=1\n return (s,i)", "title": "" }, { "docid": "3357da89d2be133e3f9a27de2b64b703", "score": "0.5196791", "text": "def _step_op_decoder(self, step, memory_state,\n controller_state=None, controller_hiddens=None):\n\n last_read_vectors = memory_state[6] # read values from memory\n pre_output, interface, nn_state = None, None, None\n\n if self.dual_controller:\n controller=self.controller2\n else:\n controller=self.controller\n alphas = None\n # compute outputs from controller\n if controller.has_recurrent_nn:\n if not self.use_emb_decoder:\n if self.pointer_mode==1:\n step2 = tf.reshape(step, [-1, self.input_encoder_size])\n elif self.sampled_loss_dim > 0:\n step2 = tf.one_hot(tf.argmax(step, axis=-1), depth=self.output_size)\n else:\n step2 = tf.reshape(step, [-1, self.output_size])\n else:\n step2 = step\n # attention\n\n if self.attend_dim>0:\n values = utility.pack_into_tensor(controller_hiddens,axis=1)\n value_size = self.hidden_controller_dim\n if self.use_mem:\n value_size = self.word_size\n # values = controller_hiddens.gather(tf.range(self.sequence_length))\n encoder_outputs = \\\n tf.reshape(values, [self.batch_size, -1, value_size]) # bs x Lin x h\n v = tf.reshape(tf.matmul(tf.reshape(encoder_outputs, [-1, value_size]), self.U_a),\n [self.batch_size, -1, self.attend_dim])\n\n\n if self.use_mem:\n v+= tf.reshape(\n tf.matmul(tf.reshape(last_read_vectors, [-1, self.read_heads_decode*self.word_size]), self.V_a),\n [self.batch_size, 1, self.attend_dim])\n\n if self.nlayer>1:\n try:\n ns=controller_state[-1][-1]\n print('multilayer state include c and h')\n except:\n ns = controller_state[-1]\n print('multilayer state include only h')\n else:\n ns = controller_state[-1]\n print('single layer')\n print(ns)\n v += tf.reshape(\n tf.matmul(tf.reshape(ns, [-1, self.hidden_controller_dim]), self.W_a),\n [self.batch_size, 1, self.attend_dim]) # bs.Lin x h_att\n print('state include only h')\n\n v = tf.reshape(tf.tanh(v), [-1, self.attend_dim])\n eijs = tf.matmul(v, tf.expand_dims(self.v_a, 1)) # bs.Lin x 1\n eijs = tf.reshape(eijs, [self.batch_size, -1]) # bs x Lin\n alphas = tf.nn.softmax(eijs)\n # exps = tf.exp(eijs)\n # alphas = exps /(tf.reshape(tf.reduce_sum(exps, 1), [-1, 1])+1e-2) # bs x Lin\n\n if self.pointer_mode!=1 and not self.use_mem:\n att = tf.reduce_sum(encoder_outputs * tf.expand_dims(alphas, 2), 1) # bs x h x 1\n att = tf.reshape(att, [self.batch_size, self.hidden_controller_dim]) # bs x h\n step2 = tf.concat([step2, att], axis=-1) # bs x (decoder_input_size + h)\n\n pre_output, interface, nn_state = controller.process_input(step2, last_read_vectors, controller_state)\n\n else:\n pre_output, interface = controller.process_input(step, last_read_vectors)\n\n # memory_matrix isthe copy of memory for reading process later\n # do the write first\n if self.write_protect:\n usage_vector, write_weighting, memory_matrix, link_matrix, precedence_vector \\\n =memory_state[1], memory_state[4], memory_state[0], memory_state[3], memory_state[2]\n\n else:\n usage_vector, write_weighting, memory_matrix, link_matrix, precedence_vector = self.memory.write(\n memory_state[0], memory_state[1], memory_state[5],\n memory_state[4], memory_state[2], memory_state[3],\n interface['write_key'],\n interface['write_strength'],\n interface['free_gates'],\n interface['allocation_gate'],\n interface['write_gate'],\n interface['write_vector'],\n interface['erase_vector']\n )\n\n # then do the read, read after write because the write weight is needed to produce temporal linklage to guide the reading\n read_weightings, read_vectors = self.memory.read(\n memory_matrix,\n memory_state[5],\n interface['read_keys'],\n interface['read_strengths'],\n link_matrix,\n interface['read_modes'],\n )\n if self.pointer_mode!=1:\n fout = controller.final_output(pre_output, read_vectors) # bs x output_size\n else:\n # if self.use_mem and self.attend_dim==0:\n # # pointer with mem\n # values = utility.pack_into_tensor(controller_hiddens, axis=1)# bs x Lin x mem_size\n # # values = controller_hiddens.gather(tf.range(0,self.sequence_length)) #write_weights of encoder\n # encoder_outputs = \\\n # tf.reshape(values, [self.batch_size, -1, self.words_num]) # bs x Lin x mem_size\n # fout = controller.final_output_pointer(read_weightings, encoder_outputs)\n # elif self.attend_dim > 0:\n # # pointer without mem\n fout = controller.final_output_pointer(None, alphas)\n fout = self.pointer_weight(fout, controller_state, read_vectors, step2, alphas)\n\n\n return [\n # report new memory state to be updated outside the condition branch\n memory_matrix, # 0\n\n # neccesary for next step to compute memory stuffs\n usage_vector, # 1\n precedence_vector, # 2\n link_matrix, # 3\n write_weighting, # 4\n read_weightings, # 5\n read_vectors, # 6\n\n # the final output of dnc\n fout, # 7\n\n # the values public info to outside\n interface['read_modes'], # 8\n interface['allocation_gate'], # 9\n interface['write_gate'], # 10\n\n # report new state of RNN if exists, neccesary for next step to compute inner controller stuff\n nn_state if nn_state is not None else tf.zeros(1), # 11\n ]", "title": "" }, { "docid": "c0781861c6957a08f42721a886a47dd4", "score": "0.5191288", "text": "def decode(self, input_line):\n raise NotImplementedError(\"Abstract method\")", "title": "" }, { "docid": "42b16c7bdbd4c07de35e78dffdfc65d1", "score": "0.5190531", "text": "def _process_raw(self, data):\n self.packet += data\n\n # Trim data off the front until we find a SOF character\n while self.packet[0] != self.SOF:\n self.packet.pop(0)\n\n # Process the packet, loading data into self.incoming\n while self.packet:\n cmd = Command(self.packet[1])\n\n # If the packet contains an EOF character, then read in the\n # full packet and store it in the incoming buffer\n try:\n p, self.packet = self.packet[2:].split(bytearray([self.EOF]), 1)\n except ValueError:\n break\n else:\n self.incoming.append(\n (cmd, self._interpret(cmd, self._decode(p))))", "title": "" }, { "docid": "6a125ba21cbfb1795b92a04266e2fab2", "score": "0.5188027", "text": "def test_instruction_valid_encode(self):\n instruction_str = '4.args,8.hostname,4.port,4.1984;'\n instruction_opcode = 'args'\n instruction_args = ('hostname', 'port', 1984)\n\n instruction = Instruction('args', 'hostname', 'port', 1984)\n\n self.assertEqual(instruction_str, instruction.encode())\n self.assertEqual(instruction_opcode, instruction.opcode)\n self.assertEqual(instruction_args, instruction.args)", "title": "" }, { "docid": "54c5b8b3d435b821eda9f069c246bef4", "score": "0.5187366", "text": "def _decode_control(self):\n subop = self.val(24, 4)\n if subop == 0:\n if self.val(23, 1):\n return _Jcc(Asm('JR', ArgAddrIReg(self, 6)), self)\n return _Jcc(Asm('JR'), self)\n if subop == 1:\n return AsmOp('RETI', self)\n if subop == 2:\n return AsmOp('RESP', (ArgReg16(self, 17), ArgReg16(self, 20), ), self)\n if subop == 8:\n return _Jcc(Asm('J', ArgAddrJ(self, 6, 18)), self)\n if subop == 9:\n return _Jcc(Asm('CALL', ArgAddrJ(self, 6, 18)), self)\n if subop == 0xa:\n asm = AsmOp('JMPI', ArgAddrJ(self, 6, 18), self)\n post_mod = ArgAddrIJ(self)\n if post_mod.post_mod:\n asm.args.append(post_mod)\n return asm\n if (subop & 0xc) == 0x4:\n args = (ArgAddrJ(self, 6, 20), ArgRegLoop(self), )\n return AsmOp('LOOP', args, self)\n if subop == 0xb:\n argsy= (ArgRegFull(self, 6), ArgRegFull(self, 0), )\n mvy = Asm('MVY', argsy)\n argsx = [ArgRegFull(self, 18), ArgRegFull(self, 12), ]\n return AsmOp('MVX', argsx, mvy, self)\n if subop == 0xd:\n return AsmOp('HALT', self)\n\n return Todo(\"OpControl %s\" % hex(subop))", "title": "" }, { "docid": "a4399980997d5dd65f28b1a8b2ed9ccc", "score": "0.5174804", "text": "def fetch():\n R[4] = InstrSet[bin2dec(R[5])]\n R[5] = dec2bin(bin2dec(R[5]) + 1)", "title": "" }, { "docid": "b62e886e746a7561c31a3787e019d8d1", "score": "0.51655346", "text": "def decode(self, tgt, enc_output):\n output = tgt\n for i in range(self.num_stacks):\n output = self.decoder_stack[i](output, enc_output)\n\n return output", "title": "" }, { "docid": "9ff1bc23d1492766afe10bfa89443603", "score": "0.5150475", "text": "def decode(self):\r\n raise NotImplementedError, \"Abstract method\"", "title": "" }, { "docid": "5bd4c9e2d576b0084ba4960a62948a88", "score": "0.51474684", "text": "def decode(self, outputs):\n raise NotImplementedError()", "title": "" }, { "docid": "00f107f71a80b82b71b0192ae1eb6e45", "score": "0.51318973", "text": "def rebuild(self, raw_packet):\n [dst, src, self.type] = struct.unpack('!6s6sH', raw_packet[:14])\n self.data = raw_packet[14:]\n self.src = binascii.hexlify(src)\n self.dst = binascii.hexlify(dst)", "title": "" }, { "docid": "2cfecfd7f3f67d504c2755dd490f6c2f", "score": "0.51309407", "text": "def et16_misc(self, word):\n opcode = get_field(word, 5, 11)\n op1 = get_field(word, 8, 11)\n if op1 == 0xf:\n self.pc = self.et16_misc_ifthen(word) \n elif op1 == 0xe:\n log(\"{:#x} bkpt {:#x} instruction encountered\".format(\n self.pc, get_field(word, 0, 7)))\n # we could call self.set_break here if the go command needs it\n # then we pretend we set it.\n self.pc += 2\n elif (op1 == 0xc) or (op1 == 0xd): #pop\n rlist = (get_field(word, 8,8) << 15) | get_field(word, 0, 7)\n count, regs = self.reg_list(rlist)\n if count == 0:\n log(\"No registers specified in pop, ignoring\")\n self.pc += 2\n return self.pc \n offset = count*4\n next_item = 4\n base_val = self.read_reg(SP)\n write_back_val = base_val + offset\n \n return_address = self.pc +2 # usually, unless we pop it out of regs\n for reg in regs:\n val = self.read_memory_int(base_val, 4, signed = True)\n self.write_reg(reg, val)\n if reg == 15:\n return_address = val\n log(\"popped = {:08x} from {:08x} into {:s}\".format(val, \n base_val,\n get_reg(reg)))\n base_val += next_item \n \n self.write_reg(SP, write_back_val) \n #self.write_reg(PC, return_address)\n self.pc = return_address\n return self.pc\n \n elif (op1 == 1) or (op1 == 3) or (op1 == 9) or (op1 == 0xb): # cb\n if self.in_it_block(): raise Code_Error(bna)\n nonzero = get_field(word, 11, 11)\n rn = get_field(word, 0, 2)\n valrn = self.read_reg(rn)\n rniszero = (valrn == 0)\n if nonzero ^ rniszero:\n imm32 = (get_field(word, 9, 9) << 7) | (get_field(word, 3, 7) << 1)\n dest = self.pc + 2 + imm32\n else:\n dest = self.pc + 2\n log_result(dest)\n self.pc = dest\n return self.pc \n elif op1 == 2:\n self.pc = self.et16_misc_extend(word)\n elif (op1 == 4) or (op1 == 5): # push\n rlist = (get_field(word, 8,8) << 14) | get_field(word, 0, 7)\n count, regs = self.reg_list(rlist)\n if count == 0:\n log(\"No registers specified in push, ignoring\")\n self.pc += 2\n return self.pc \n offset = count*4\n next_item = 4\n base_val = self.read_reg(SP) - offset\n write_back_val = base_val\n \n for reg in regs:\n val = self.read_reg(reg)\n self.write_memory(base_val, 4, val)\n log(\"pushed = {:08x} from {:s} to {:08x}\".format(val, \n get_reg(reg),\n base_val))\n base_val += next_item \n \n self.write_reg(SP, write_back_val)\n self.pc += 2\n elif op1 == 0xa:\n self.pc = self.et16_misc_rev(word)\n elif (opcode & 0x7c) == 4: # actually sub sp, #imm\n self.pc = self.et16_add_sp_imm(word) \n elif (opcode & 0x7c) == 0: # add sp, #imm\n self.pc = self.et16_add_sp_imm(word)\n elif opcode == 0x33:\n self.pc = self.et16_misc_cps(word) \n return self.pc", "title": "" }, { "docid": "8098eb3d30c5e8266d8c89092b0e8dc4", "score": "0.51303726", "text": "def interpret():\r\n pass", "title": "" }, { "docid": "574672202311acd7962f0acce7057289", "score": "0.51271194", "text": "def intcode_runner(data,input_list):\n #Local helper functiton\n def get(mode,value):\n \"\"\"\n Gets the data based on:\n * mode 0: position d[x]\n * mode 1: intermediate x\n \"\"\"\n if mode == 0:\n return data[value]\n elif mode == 1:\n return value\n ptr = 0\n output_list = []\n while range(10):\n # Unpack the instruction and load/save registers\n # Append zeroes for default args\n arg = \"0000\" + str(data[ptr])\n inst = int(arg[-2]+arg[-1])\n param = [int(d) for d in arg[:-2]]\n param.reverse()\n\n if inst == 1: # Add\n a,b,ret = data[ptr+1:ptr+4]\n A,B,_ = param[:3]\n data[ret] = get(A,a) + get(B,b)\n ptr += 4\n\n elif inst == 2: # Multiply\n a,b,ret = data[ptr+1:ptr+4]\n A,B,_ = param[:3]\n data[ret] = get(A,a) * get(B,b)\n ptr += 4\n\n elif inst == 3: # Get input\n ret = data[ptr+1]\n data[ret] = input_list.pop(0)\n ptr += 2\n\n elif inst == 4: # Save output\n ret = data[ptr+1]\n RET = param[0]\n output_list.append(get(RET,ret))\n ptr += 2\n\n elif inst == 5: # Jump if true\n comp,ret = data[ptr+1:ptr+3]\n COMP,RET = param[:2]\n if get(COMP,comp) != 0:\n ptr = get(RET,ret)\n else:\n ptr += 3\n\n elif inst == 6: # Jump if false\n comp,ret = data[ptr+1:ptr+3]\n COMP,RET = param[:2]\n if get(COMP,comp) == 0:\n ptr = get(RET,ret)\n else:\n ptr += 3\n\n elif inst == 7: # less than\n a,b,ret = data[ptr+1:ptr+4]\n A,B,_ = param[:3]\n if get(A,a) < get(B,b):\n data[ret] = 1\n else:\n data[ret] = 0\n ptr += 4\n\n elif inst == 8: # equals\n a,b,ret = data[ptr+1:ptr+4]\n A,B,_ = param[:3]\n if get(A,a) == get(B,b):\n data[ret] = 1\n else:\n data[ret] = 0\n ptr += 4\n\n elif inst == 99: # Halt\n break\n pass\n\n else: # Undefined inst, terminate\n break\n return output_list", "title": "" }, { "docid": "44644f01cf5665d68c7656ff329549da", "score": "0.51239216", "text": "def disassemble(bytecode):\n reader = JavaFileReader(io.BytesIO(bytecode))\n offset = 0\n instructions = []\n while offset < len(bytecode):\n opcode = reader.read_u8()\n offset += 1\n args = []\n for arg_type in op_to_arg_types[opcode]:\n if arg_type == \"i8\":\n arg = reader.read_i8()\n offset += 1\n elif arg_type == \"idx8\":\n arg = reader.read_u8()\n offset += 1\n elif arg_type == \"idx16\":\n arg = reader.read_u16()\n offset += 2\n else:\n raise NotImplementedError(arg_type)\n args.append(arg)\n instruction = Instruction(opcode, args)\n logger.debug(\"Loaded %s\", instruction)\n instructions.append(instruction)\n return instructions", "title": "" }, { "docid": "45dd716cb4419e802c871ca8d99e5b50", "score": "0.5119497", "text": "def __init__(self, code):\n self.code = code\n self.current_instruction = None\n self.first_instruction = None\n self.current_cell = Node()\n self.parse(code)", "title": "" }, { "docid": "447f08178b4686e39a8caf0679725286", "score": "0.5115138", "text": "def translate(self, instruction):\n try:\n trans_instrs = self._translate(instruction)\n except NotImplementedError:\n unkn_instr = self._builder.gen_unkn()\n unkn_instr.address = instruction.address << 8 | (0x0 & 0xff)\n trans_instrs = [unkn_instr]\n\n self._log_not_supported_instruction(instruction)\n except:\n self._log_translation_exception(instruction)\n\n raise\n\n # Some sanity check....\n for instr in trans_instrs:\n try:\n check_operands_size(instr, self._arch_info.architecture_size)\n except:\n logger.error(\n \"Invalid operand size: %s (%s)\",\n instr,\n instruction\n )\n\n raise\n\n return trans_instrs", "title": "" }, { "docid": "3ef39b5590f8dfd72ef33f07c9639e8d", "score": "0.5102498", "text": "def _step_op_decoder(self, time, step, memory_state,\n controller_state=None, controller_hiddens=None):\n\n last_read_vectors = memory_state[6] # read values from memory\n pre_output, interface, nn_state = None, None, None\n\n if self.dual_controller:\n controller=self.controller2\n else:\n controller=self.controller\n alphas = None\n # compute outputs from controller\n if controller.has_recurrent_nn:\n if not self.use_emb_decoder:\n step2 = tf.reshape(step, [-1, self.output_size])\n else:\n step2 = step\n # attention\n\n if self.attend_dim>0:\n values = utility.pack_into_tensor(controller_hiddens,axis=1)\n value_size = self.hidden_controller_dim\n if self.use_mem:\n value_size = self.word_size\n # values = controller_hiddens.gather(tf.range(self.sequence_length))\n encoder_outputs = \\\n tf.reshape(values, [self.batch_size, -1, value_size]) # bs x Lin x h\n v = tf.reshape(tf.matmul(tf.reshape(encoder_outputs, [-1, value_size]), self.U_a),\n [self.batch_size, -1, self.attend_dim])\n\n\n if self.use_mem:\n v+= tf.reshape(\n tf.matmul(tf.reshape(last_read_vectors, [-1, self.read_heads_decode*self.word_size]), self.V_a),\n [self.batch_size, 1, self.attend_dim])\n\n if self.nlayer>1:\n try:\n ns=controller_state[-1][-1]\n print('multilayer state include c and h')\n except:\n ns = controller_state[-1]\n print('multilayer state include only h')\n else:\n ns = controller_state[-1]\n print('single layer')\n print(ns)\n v += tf.reshape(\n tf.matmul(tf.reshape(ns, [-1, self.hidden_controller_dim]), self.W_a),\n [self.batch_size, 1, self.attend_dim]) # bs.Lin x h_att\n print('state include only h')\n\n v = tf.reshape(tf.tanh(v), [-1, self.attend_dim])\n eijs = tf.matmul(v, tf.expand_dims(self.v_a, 1)) # bs.Lin x 1\n eijs = tf.reshape(eijs, [self.batch_size, -1]) # bs x Lin\n alphas = tf.nn.softmax(eijs)\n\n if not self.use_mem:\n att = tf.reduce_sum(encoder_outputs * tf.expand_dims(alphas, 2), 1) # bs x h x 1\n att = tf.reshape(att, [self.batch_size, self.hidden_controller_dim]) # bs x h\n step2 = tf.concat([step2, att], axis=-1) # bs x (decoder_input_size + h)\n\n pre_output, interface, nn_state = controller.process_input(step2, last_read_vectors, controller_state)\n\n else:\n pre_output, interface = controller.process_input(step, last_read_vectors)\n\n # memory_matrix isthe copy of memory for reading process later\n # do the write first\n if self.write_protect:\n usage_vector, write_weighting, memory_matrix, link_matrix, precedence_vector \\\n =memory_state[1], memory_state[4], memory_state[0], memory_state[3], memory_state[2]\n\n else:\n usage_vector, write_weighting, memory_matrix, link_matrix, precedence_vector = self.memory.write(\n memory_state[0], memory_state[1], memory_state[5],\n memory_state[4], memory_state[2], memory_state[3],\n interface['write_key'],\n interface['write_strength'],\n interface['free_gates'],\n interface['allocation_gate'],\n interface['write_gate'],\n interface['write_vector'],\n interface['erase_vector']\n )\n\n # then do the read, read after write because the write weight is needed to produce temporal linklage to guide the reading\n\n read_weightings, read_vectors = self.memory.read(\n memory_matrix,\n memory_state[5],\n interface['read_keys'],\n interface['read_strengths'],\n link_matrix,\n interface['read_modes'],\n )\n\n fout = controller.final_output(pre_output, read_vectors) # bs x output_size\n\n\n\n return [\n # report new memory state to be updated outside the condition branch\n memory_matrix, # 0\n\n # neccesary for next step to compute memory stuffs\n usage_vector, # 1\n precedence_vector, # 2\n link_matrix, # 3\n write_weighting, # 4\n read_weightings, # 5\n read_vectors, # 6\n\n # the final output of dnc\n fout, # 7\n\n # the values public info to outside\n interface['read_modes'], # 8\n interface['allocation_gate'], # 9\n interface['write_gate'], # 10\n\n # report new state of RNN if exists, neccesary for next step to compute inner controller stuff\n nn_state if nn_state is not None else tf.zeros(1), # 11\n ]", "title": "" }, { "docid": "66a0813c61198a87d6d1d0531760bbec", "score": "0.50910425", "text": "def disassemble(memory, address):\n\topcode = memory.loadByte(address)\n\tdecoded = _opcodes[opcode]\n\tif decoded is None:\n\t\traise errors.OpcodeError('Invalid opcode 0x%02X' % opcode)\n\n\t# mnemonic\n\tinstruction = [decoded[0]]\n\n\t# destination\n\tif decoded[1] is not None:\n\t\tinstruction.append(decoded[1]())\n\telse:\n\t\tinstruction.append(None)\n\n\t# arguments\n\tfor arg in decoded[2:]:\n\t\tinstruction.append(arg())\n\n\t# loads address/values from memory\n\taddress += 1\n\tif instruction[1] is not None:\n\t\taddress = instruction[1].load(memory, address)\n\n\tfor index in xrange(2, len(instruction)):\n\t\taddress = instruction[index].load(memory, address)\n\n\t\t# for labels, also read names\n\t\tif isinstance(instruction[index], operand.Label):\n\t\t\tinstruction[index].label = str(\n\t\t\t\tmemory.locationToLabel(instruction[index].address))\n\n\treturn address, instruction", "title": "" }, { "docid": "91786223318fc48b14685d16bf8c290b", "score": "0.5090864", "text": "def run(self):\n while self.running:\n print self.instruction_mem[self.reg_pc]\n self.read_instr(self.instruction_mem[self.reg_pc])", "title": "" }, { "docid": "d13f53d6b1d688cc634e0b16b216f924", "score": "0.5090612", "text": "def execute(self):\r\n if self.I == \"1\" and self.OPR == \"111\":\r\n self.R = True\r\n elif self.I == \"0\" and self.OPR == \"111\":\r\n if self.MBR == self.Opcodes[\"HLT\"]:\r\n self._HLT()\r\n elif self.MBR == self.Opcodes[\"CLA\"]:\r\n self._CLA()\r\n elif self.MBR == self.Opcodes[\"CLE\"]:\r\n self._CLE()\r\n elif self.MBR == self.Opcodes[\"CMA\"]:\r\n self._CMA()\r\n elif self.MBR == self.Opcodes[\"CME\"]:\r\n self._CME()\r\n elif self.MBR == self.Opcodes[\"CIR\"]:\r\n self._CIR()\r\n elif self.MBR == self.Opcodes[\"CIL\"]:\r\n self._CIL()\r\n elif self.MBR == self.Opcodes[\"INC\"]:\r\n self._INC()\r\n elif self.MBR == self.Opcodes[\"SPA\"]:\r\n self._SPA()\r\n elif self.MBR == self.Opcodes[\"SNA\"]:\r\n self._SNA()\r\n elif self.MBR == self.Opcodes[\"SZA\"]:\r\n self._SZA()\r\n elif self.MBR == self.Opcodes[\"SZE\"]:\r\n self._SZE()\r\n else:\r\n if self.OPR == self.Opcodes[\"AND\"]:\r\n self._AND()\r\n elif self.OPR == self.Opcodes[\"ADD\"]:\r\n self._ADD()\r\n elif self.OPR == self.Opcodes[\"LDA\"]:\r\n self._LDA()\r\n elif self.OPR == self.Opcodes[\"STA\"]:\r\n self._STA()\r\n elif self.OPR == self.Opcodes[\"BUN\"]:\r\n self._BUN()\r\n elif self.OPR == self.Opcodes[\"BSA\"]:\r\n self._BSA()\r\n elif self.OPR == self.Opcodes[\"ISZ\"]:\r\n self._ISZ()\r\n else:\r\n self.F = False", "title": "" }, { "docid": "5eb0613e455c0d8039db59d5db55e837", "score": "0.50816274", "text": "def process_a_instruction(self):\n self.cmd_type = self.A_CMD\n token_type, self.sym = self.lexer.next_token()", "title": "" }, { "docid": "7fa0112aa9c30c95af5fd0367e7d9eef", "score": "0.50718343", "text": "def disas(self, className, methodName):\n cl = self.dex.getClass(className)\n m = self.dex.getMethodData(className + \"->\" + methodName)\n\n mCodeItem = m.getCodeItem()\n\n self.smaliInfos[\"registerCount\"] = mCodeItem.getRegisterCount()\n self.smaliInfos[\"argumentCount\"] = mCodeItem.getInputArgumentCount()\n\n logger.info(\"Disassembling %s->%s\" % (className, methodName))\n logger.info(self.smaliInfos)\n\n for ins in mCodeItem.getInstructions():\n self.smali[ins.getOffset()] = ins\n\n logger.debug(\"[%d (+%d)] %s %s\" % (ins.getOffset(), ins.getSize(), ins.getMnemonic(), \",\".join(\n map(lambda x: \"[%d,%d]\" % (x.getType(), x.getValue()), ins.getParameters()))))\n\n if ins.getMnemonic() == \"packed-switch\":\n switchTable = {}\n for sd in ins.getSwitchData().getElements():\n switchTable[sd.getKey()] = sd.getTarget()\n self.smaliInfos[\"switchTable_%d\" % ins.getParameters()[1].getValue()] = switchTable\n\n self.checkForInstructionsSupport()", "title": "" }, { "docid": "9d6c247ce5ac3e25b369b6fd18145150", "score": "0.50716174", "text": "def next_op(self):\n\n opcode = self.peek()\n self.ip += 1\n return opcode", "title": "" }, { "docid": "424c1cc1c85588324ae9b620935f32e8", "score": "0.5061991", "text": "def decode(data):\n raise NotImplementedError", "title": "" }, { "docid": "2339f80e168eae4eba5db95614597691", "score": "0.50598484", "text": "def first_pass(self):\n first_pass_output = [] #array of assembly_line objects\n f = open(self.FILE, \"r\")\n temp_file = open(self.FILE +'temp.txt', 'w')\n\n self.current_address = self.start_address = self.get_start_address()\n # current_address = self.start_address\n\n for line in f:\n # handle comments\n if line[0] == '.':\n continue\n\n # get parts of the instruction\n parts = TwoPassAssembler.get_parts(line)\n\n # check if mnemonic exist\n if not parts['mnemonic']:\n raise SyntaxError('Mnemonics must be provided ' + line)\n # if parts['mnemonic'] == 'CSECT':\n # # a new control section is defined\n # s = sp.Second_Pass(first_pass_output, self.inst_table, self.symbol_table,self.symbol_table_en, self.global_variables)\n # s.second_pass()\n # first_pass_output = []\n # return self.first_pass()\n # self.current_address = '0'\n # self.symbol_table[parts['label']] = self.current_address\n if parts['mnemonic'] == 'EXTDEF':\n for var in parts['operands']:\n self.external_defs[var] = var\n continue\n elif parts['mnemonic'] == 'EXTREF':\n for var in parts['operands']:\n self.external_refs[var] = var\n # check if a label exist save it to the symbol table\n if parts['label']:\n if parts['label'] in self.symbol_table:\n raise ValueError('Duplicate Label: ', parts['label'] + parts['mnemonic'])\n\n if parts['mnemonic'] == 'EQU':\n self.current_label = parts['label']\n else:\n self.symbol_table[parts['label']] = self.current_address\n\n if \"operands\" not in parts:\n parts[\"operands\"] = [\"\"]\n\n # checking if the operand is a literal\n if re.search(\"^=([a-zA-Z]\\\"[a-zA-Z0-9]+\\\")\", parts['operands'][0]):\n self.literal_table[parts['operands'][0]] = 0\n\n temp_line = \"{} {} {} {}\\n\".format(self.current_address,\n parts['label'] if parts['label'] else '',\n parts['mnemonic'],\n ','.join(parts['operands']))\n\n temp_file.write(temp_line)\n\n assemb_line = Assembly_Line(self.current_address, parts['label'], parts['mnemonic'],\n parts['operands'])\n first_pass_output.append(assemb_line)\n\n current_inst_size = self.get_size(parts['mnemonic'], parts['operands'])\n current_address_int = int(self.current_address, 16)\n current_address_int += current_inst_size\n self.current_address = format(current_address_int, '04x')\n if parts['mnemonic'] == 'CSECT':\n self.current_address = '0'\n # self.current_address += self.get_size(parts['mnemonic'], parts['operands'])\n\n symbol_table = open(self.FILE + \".symbols\", 'w')\n for symbol, address in self.symbol_table.items():\n # print(symbol, address)\n type = 'A' if symbol in self.symbol_table_en and not self.symbol_table_en[symbol] else 'R'\n symbol_table.write(\"{}: {} {}\\n\".format(symbol, address, type))\n\n symbol_table.close()\n f.close()\n temp_file.close()\n return first_pass_output", "title": "" }, { "docid": "a74654a600b3cf6d95a7a12bf916ae85", "score": "0.5034656", "text": "def build_decoder(shift):\n ### TODO.\n return build_coder(-shift)", "title": "" }, { "docid": "48b48215582d8c11fbdd6e1318ddb899", "score": "0.50341165", "text": "def angr_insn_exec(state):\n ops = []\n for i in range(8):\n op = state.mem[state.inspect.instruction+i]\n if op.byte.resolved.uninitialized:\n break\n ops.append(op.byte.resolved.args[0])\n\n op_bytes = b\"\".join([bytes([x]) for x in ops])\n for i in md.disasm(op_bytes, state.inspect.instruction):\n print(\"0x%x:\\t%s\\t%s\" %(i.address, i.mnemonic, i.op_str))\n break", "title": "" }, { "docid": "6664a414f78c1fea59888fdf593324f5", "score": "0.5025582", "text": "def decode_one_step_scan(params, hps, keep_rate, state, inputs):\n decodes = decode_one_step(params, hps, keep_rate, state, inputs)\n state = {'c' : decodes['c'], 'f' : decodes['f'], 'g' : decodes['g'],\n 'ii' : decodes['ii']}\n return state, decodes", "title": "" }, { "docid": "7b92a748cfaa7469316c6cfcab6440aa", "score": "0.5018523", "text": "def process(self):\n self.init_cmd_params()\n self.lexer.next_command()\n token_type, value = self.lexer.cur_tkn\n if token_type == Lexer.OP and value == '@':\n self.process_a_instruction()\n elif token_type == Lexer.OP and value == '(':\n self.process_label()\n else:\n self.process_c_instruction(token_type, value)", "title": "" }, { "docid": "f69dfe82e5665c0f64b0f736fd6dd262", "score": "0.50113046", "text": "def decode(self, payload: bytes) -> None:\n offset = 0\n offset = self.decode_pizzas(payload, offset)\n offset = self.decode_snakes(payload, offset)", "title": "" }, { "docid": "34ab41fea11211966894dada04a25e88", "score": "0.50093794", "text": "def execute(self, instruction):\n # First fetch the instruction\n i = Instruction(self, instruction)\n # Then execute it\n i.execute()", "title": "" }, { "docid": "8e9d87c083728ad11da1b679838b41cc", "score": "0.500896", "text": "def disassemble(code):\n out = io.StringIO()\n dis(code, file=out)\n return out.getvalue()", "title": "" }, { "docid": "270453383dc2b3ff68bb87ab6efb08d0", "score": "0.5006663", "text": "def generate_code(self): \n\n for line in self._parser.get_line():\n # after the below step code_fragment in ['push argument', 'pop static', ...]\n if line.find('function') == -1 and line.find('call') == -1 and line.find('return') == -1:\n code_fragment = str.split(line, ' ', 1)\n \n # Memory Access Instruction\n if len(code_fragment) == 2:\n #assembly_code = CodeWriter.VM_STACK_COMMANDS[code_fragment[0]]\n assembly_code = CodeWriter.VM_STACK_COMMANDS[code_fragment]\n if code_fragment[0] in ['pop static', 'push static']:\n assembly_code = str.replace(assembly_code, 'X',\n self._parser.get_filename() + '.' + code_fragment[1])\n else:\n assembly_code = str.replace(assembly_code, 'X', code_fragment[1])\n self._file_object.write(assembly_code)\n # Arithmetic Instruction\n elif len(code_fragment) == 1:\n if code_fragment[0] in ['add', 'sub', 'or', 'not', 'and', 'neg']:\n assembly_code = CodeWriter.VM_STACK_COMMANDS[code_fragment[0]]\n self._file_object.write(assembly_code)\n elif code_fragment[0] in ['eq', 'gt', 'lt']:\n assembly_code = CodeWriter.VM_STACK_COMMANDS[code_fragment[0]]\n assembly_code = str.replace(assembly_code, '_J', '_' + str(self._jump_sequence))\n self._file_object.write(assembly_code)\n self._jump_sequence += 1\n else:\n \n code_fragment = str.split(line, ' ')\n assembly_code = CodeWriter.VM_STACK_COMMANDS[code_fragment[0]]\n if code_fragment[0] not in ['return', 'call'] :\n assembly_code = str.replace(assembly_code, 'X', code_fragment[1])\n elif code_fragment[0] == 'call':\n assembly_code = str.replace(assembly_code, '_X', '_' + str(self._jump_sequence))\n assembly_code = str.replace(assembly_code, '_FUNC', code_fragment[1])\n assembly_code = str.replace(assembly_code, 'N_ARG', code_fragment[2])\n self._jump_sequence += 1\n self._file_object.write(assembly_code)", "title": "" }, { "docid": "11a2303fd861ca89a854d62d8de762c4", "score": "0.5001202", "text": "def test_ori_assemble():\n\n # ORI.W #$FFFF,$1234\n data = bytearray.fromhex('0078FFFF1234')\n\n result = Ori.disassemble_instruction(data)\n\n assm = result.assemble()\n\n assert data == assm", "title": "" }, { "docid": "0533f5b754d8bb08d2ab3e5a8c63849a", "score": "0.5000867", "text": "def load_operand(self, raw_data, idx):\n if \"E\" in raw_data or \"e\" in raw_data:\n raw_data = raw_data.upper().split(\"E\")\n self.operands[idx] = raw_data[0]\n self.e_operands[idx] = raw_data[1]\n else:\n self.operands[idx] = raw_data\n self.e_operands[idx] = None", "title": "" }, { "docid": "adcc46c8e4793fa9489ad6009f316a8a", "score": "0.49945256", "text": "def decode(self, data):\n return None", "title": "" }, { "docid": "f8fcee8c7e5795fed1308baecea6fe6e", "score": "0.4992828", "text": "def exe_instruction(self):\n if self._cursor < 0 or self._cursor >= len(self._instructions):\n raise ValueError(\"Trying to execute an invalid line\")\n self._program_execution.append(self._cursor)\n current_inst = self._instructions[self._cursor]\n if current_inst[0] == \"jmp\":\n self._jump(current_inst[1])\n elif current_inst[0] == \"acc\":\n self._accumulate(current_inst[1])\n elif current_inst[0] == \"nop\":\n self._nop()\n else:\n raise ValueError(f\"Instruction {current_inst[0]} not known\")", "title": "" }, { "docid": "f47e8a83f7ec8817d86b2a6db0095d6f", "score": "0.49860635", "text": "def run_program(code):\n i = 0\n while True:\n instruction = code[i]\n if instruction == 99:\n break\n elif instruction == 1:\n code[code[i+3]] = code[code[i+1]] + code[code[i+2]]\n elif instruction == 2:\n code[code[i+3]] = code[code[i+1]] * code[code[i+2]]\n else:\n raise RuntimeError(\"Unknown instruction code\")\n i += 4\n return code", "title": "" }, { "docid": "732ae2976e18ad282644f298b520ecfb", "score": "0.496383", "text": "def getNext(self) -> ghidra.program.model.listing.Instruction:\n ...", "title": "" }, { "docid": "cb5fcd718714b8b48a2939d155df3273", "score": "0.49621803", "text": "def decompile(self):\n l=[]\n i=0\n while i<MAX_MEM:\n (s,i)=self.decode_op(i)\n l.append(s)\n # v=self._memory[CODE][i]\n # try:\n # s=OPS[v]\n # except IndexError:\n # s='NOP'\n # if (s in ('PUSH','JMF','JMB','ST','LD','LDM','STM','LDP','STP','JZ','JNZ')) and (i<(MAX_MEM-1)):\n # i+=1\n # v=self._memory[CODE][i]\n # s=s+' '+str(v)\n # l.append(s)\n # i+=1\n return l", "title": "" }, { "docid": "3aa5e8cbc5c27fbc6d61d8569e785282", "score": "0.49525633", "text": "def skip(self):\n self._decode()", "title": "" }, { "docid": "086f555dc31a24d3a8ef186cf0e97f1b", "score": "0.49498215", "text": "def stepi(self):\n if not self.loaded:\n log(\"Load program first\")\n return\n # This is the only place that emulate is called\n # Keep it that way or move the following into emulate.\n\n instr = self.read_memory_int((self.pc & address_mask), 4)\n return self.emulate(instr)", "title": "" }, { "docid": "0dda5b67614a1470a2bc089a37a15c89", "score": "0.49492738", "text": "def step_program(self) -> None:\n mem = self.mem\n ptr = self.ptr\n #print(f\"to-step {self}\")\n opcode = mem[ptr]\n if opcode == 1:\n op1 = mem[ptr+1]\n op2 = mem[ptr+2]\n trg = mem[ptr+3]\n mem[trg] = mem[op1] + mem[op2]\n self.ptr += 4\n elif opcode == 2:\n op1 = mem[ptr+1]\n op2 = mem[ptr+2]\n trg = mem[ptr+3]\n mem[trg] = mem[op1] * mem[op2]\n self.ptr += 4\n elif opcode == 99:\n self.state = self.HALTED\n else:\n raise(RuntimeError(f\"illegal opcode {opcode}\")) \n self.ctr += 1\n print(f\"stepped! {self}\")", "title": "" } ]
45a4339872939e50f1b9f5e7efc859c3
Return the metadata of the requested table
[ { "docid": "587eaf079abab28e435c2b2b70d0eb05", "score": "0.67164326", "text": "def table_info(request, name=None, get_table_size=False):\n\n try:\n if name is None:\n return JsonResponse({\n 'success': False,\n 'err': \"Invalid table name\"\n })\n\n t = mdl.Table.objects.filter(table_name=name)[0]\n\n if not t:\n return JsonResponse({'success': False, 'err': \"Table not found\"})\n\n t_props = dict()\n data = list()\n if len(t.tablefield_set.all()) > 0:\n data = [model_to_dict(tf) for tf in t.tablefield_set.all(\n ).order_by('-is_primary_key')]\n t_props[\"fields\"] = data\n this_tbl = model_to_dict(t)\n this_tbl[\"props\"] = t_props\n\n if get_table_size:\n schemas = getattr(settings, \"META_DB_SCHEMAS\", None)\n if not schemas:\n raise Exception('Schemas not configured')\n \n with get_schema_instance(schemas) as s:\n this_tbl[\"props\"][\"table_size\"] = s.get_table_info(schemas[0], name).get_table_size(s)\n\n\n return JsonResponse(this_tbl, safe=False)\n except Exception as e:\n return JsonResponse({\n 'success': False,\n 'err': str(e),\n 'errStack': traceback.format_exc()\n })", "title": "" } ]
[ { "docid": "3fb576fc72d1a4763e97ad9a7ca78b31", "score": "0.7701821", "text": "def _metadata_table(self):\n return _make_metadata_table(self)", "title": "" }, { "docid": "609a06854c7b48a6deecaa33488245e4", "score": "0.76902467", "text": "def get_table_metadata(self):\n return {\n 'num_items': self.table.item_count,\n 'primary_key_name': self.table.key_schema[0],\n 'status': self.table.table_status,\n 'bytes_size': self.table.table_size_bytes,\n 'global_secondary_indices': self.table.global_secondary_indexes\n }", "title": "" }, { "docid": "250283f2e2304e194f142db736060a20", "score": "0.7509559", "text": "def _metadata_table(self):\n\n return _make_metadata_table(self)", "title": "" }, { "docid": "ac606b0895784f3b7113cf0daffad326", "score": "0.739347", "text": "def metadata(self) -> TGResultSetMetaData:", "title": "" }, { "docid": "d8990c63eec002040613d5bef6f19baf", "score": "0.73146164", "text": "def query_tableinfo(self, table_name):\n if table_name in self.tables:\n self.query('pragma table_info({table_name:s});'.format(\n table_name=table_name))\n res = {'name': [], 'type': [], 'index': []}\n for row in self.results:\n res['index'].append(row[0])\n res['name'].append(row[1])\n res['type'].append(row[2])\n return res\n else:\n return None", "title": "" }, { "docid": "eddf6ccac08c357d7836a439deca44ac", "score": "0.7172757", "text": "def describe_table(self, table, fetch_through_pandas=True, fail_silently=False):\n\n sql_query = f\"\"\"SELECT name FROM sqlite_master WHERE type='{table}';\"\"\"\n return self.query(sql_query, fetch_through_pandas=fetch_through_pandas, fail_silently=fail_silently)", "title": "" }, { "docid": "64327d0e819a76a3572a6e819538ffbe", "score": "0.7112003", "text": "def metadataOfAttribute(self, table, attributeName):\n\t\ts=\"\"\"PRAGMA table_info(\"\"\"+table+\"\"\")\"\"\"\n\t\tcaracts=self.cursor.execute(s)\n\t\tfor caract in self.cursor:\n\t\t\tif caract[1]==attributeName:\n\t\t\t\treturn caract\n\t\treturn None", "title": "" }, { "docid": "11c9b1a8e253f7e3c31c5aa5238a6bfe", "score": "0.7060124", "text": "def describe_table(self, table, schema=\"public\", fetch_through_pandas=True, fail_silently=False):\n\n sql_query = f\"\"\"SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='{schema}' AND table_name='{table}'\"\"\"\n return self.query(sql_query, fetch_through_pandas=fetch_through_pandas, fail_silently=fail_silently)", "title": "" }, { "docid": "312763160c578ad95577a06e2a8726d5", "score": "0.70215464", "text": "def metadata(self):\n return sqlalchemy.MetaData()", "title": "" }, { "docid": "4a807efeb5040b45146551030e777449", "score": "0.6961712", "text": "def test_get_table_metadata(self):\n with tempfile.NamedTemporaryFile() as db_file:\n with closing(sqlite3.connect(db_file.name)) as connection:\n with closing(connection.cursor()) as cursor:\n cursor.execute(\n 'CREATE TABLE messages (id INTEGER, message TEXT)')\n\n database = Database(db_file.name)\n table = database['messages']\n schema = {column.name: type(column.type)\n for column in table.columns}\n self.assertDictEqual(\n schema,\n {'id': INTEGER, 'message': TEXT})", "title": "" }, { "docid": "bad5d6645ffac5078d4550713fa68df0", "score": "0.6899399", "text": "def get_table_info(session, topic_name=None, table_name=None, prefix=None):\n if topic_name is not None:\n try:\n mymeta = _table_info_topic_cache[(topic_name,prefix)]\n except KeyError:\n mymeta = session.query(\n models.RosSqlMetadata).filter_by(topic_name=topic_name,\n prefix=prefix,\n ).one()\n _table_info_topic_cache[(topic_name,prefix)] = mymeta\n _table_info_table_cache[(mymeta.table_name,prefix)] = mymeta\n if table_name is not None:\n assert mymeta.table_name == table_name\n else:\n assert table_name is not None\n try:\n mymeta = _table_info_table_cache[(table_name,prefix)]\n except KeyError:\n mymeta = session.query(\n models.RosSqlMetadata).filter_by(table_name=table_name,\n prefix=prefix,\n ).one()\n _table_info_table_cache[(table_name,prefix)] = mymeta\n _table_info_topic_cache[(mymeta.topic_name,prefix)] = mymeta\n\n assert mymeta.ros_sql_schema_version == models.SCHEMA_VERSION\n\n try:\n myts = _timestamp_info_cache[(mymeta.table_name,prefix)]\n except KeyError:\n myts = session.query(\n models.RosSqlMetadataTimestamps).filter_by( main_id=mymeta.id ).all()\n _timestamp_info_cache[(mymeta.table_name,prefix)] = myts\n\n MsgClass = util.get_msg_class(mymeta.msg_class_name)\n timestamp_columns = []\n for tsrow in myts:\n timestamp_columns.append((tsrow.column_base_name, tsrow.is_duration))\n try:\n mybackrefs = _backref_info_cache[(mymeta.table_name,prefix)]\n except KeyError:\n mybackrefs = session.query(models.RosSqlMetadataBackrefs).filter_by( main_id=mymeta.id ).all()\n _backref_info_cache[(mymeta.table_name,prefix)] = mybackrefs\n backref_info_list = []\n for backref in mybackrefs:\n backref_info_list.append( {'parent_field':backref.parent_field,\n 'child_table':backref.child_table,\n 'child_field':backref.child_field,\n })\n return {'class':MsgClass,\n 'top':mymeta.is_top,\n 'pk_name':mymeta.pk_name,\n 'table_name':mymeta.table_name,\n 'topic_name':mymeta.topic_name,\n 'prefix':mymeta.prefix,\n 'timestamp_columns':timestamp_columns,\n 'backref_info_list':backref_info_list,\n 'parent_id_name':mymeta.parent_id_name,\n }", "title": "" }, { "docid": "eb92ede1b559d8cb625437429fa83e15", "score": "0.68642867", "text": "def getTableInfo( dbConn, tableName ):\r\n query = \"pragma table_info(%s)\" % tableName\r\n c = dbConn.execute( query )\r\n r = c.fetchall()\r\n return r", "title": "" }, { "docid": "243b641190592a752eb31a8d0b181339", "score": "0.67788893", "text": "def describe(self, table):\n if table not in self.get_metadata().tables:\n print(\"Table not found: %s\" % table)\n return\n tbl = self.get_metadata().tables[table]\n\n def nullstr(nullable):\n return 'NULL' if nullable else 'NOT NULL'\n\n def namestr(c):\n return ('*%s' if c.primary_key else '%s') % c.name\n\n with pager() as out:\n items = ((namestr(c), c.type, nullstr(c.nullable))\n for c in tbl.columns)\n out.write(b'Columns' + b'\\n')\n asciitable.draw(\n FakedResult(sorted(items), 'Name Type Nullable'.split()),\n out, paginate=True,\n max_fieldsize=5000)\n out.write(b'\\n')\n out.write(b'Primary Key (*)\\n')\n out.write(b'---------------\\n')\n pk = ', '.join(c.name for c in tbl.columns if c.primary_key)\n out.write(b' ')\n if not pk:\n out.write(b'(None Found!)')\n else:\n out.write(pk.encode('utf8'))\n out.write(b'\\n\\n')\n out.write(b'Foreign Keys\\n')\n out.write(b'------------\\n')\n fks = self.get_metadata().foreign_keys(table)\n fk = None\n for fk in fks:\n out.write((' %s\\n' % str(fk)).encode('utf8'))\n if fk is None:\n out.write(b' (None Found)')\n out.write(('\\n\\nReferences to %s\\n' % table).encode('utf8'))\n out.write(b'--------------' + b'-' * len(table) + b'\\n')\n fks = self.get_metadata().fields_referencing(table)\n fk = None\n for fk in fks:\n out.write(b' ' + str(fk).encode('utf8') + b'\\n')\n if fk is None:\n out.write(b' (None found)\\n')\n out.write(b'\\n\\nIndexes\\n')\n\n def items():\n for idx in self.get_metadata().indexes(table):\n yield (idx.name, ', '.join(c.name for c in idx.columns),\n idx.unique)\n asciitable.draw(\n FakedResult(sorted(items()), 'Name Columns Unique'.split()),\n out, paginate=True, max_fieldsize=5000)", "title": "" }, { "docid": "91400717af77bba1241d879179dfb0b8", "score": "0.66180456", "text": "def get_table(self):\n return self.table", "title": "" }, { "docid": "8fecb3619cdde57c4046b0459bc1f41d", "score": "0.661317", "text": "def describe_table(self, table):\n try:\n sql = 'DESCRIBE %s;' % table\n return self.execute_command(sql)\n except Exception, ex:\n logging.error(ex)\n traceback.print_exc()", "title": "" }, { "docid": "27669e0f295af6118cfa9a313bd0f8e4", "score": "0.6568474", "text": "def get_metadata(self, table_name):\n # see if already downloaded\n\n\n if not table_name.startswith(\"NM_\"):\n path = \"api/v01/dataset/def.sdmx.json?\"\n query_params = {\"search\": \"*\"+table_name+\"*\"}\n else:\n path = \"api/v01/\" + table_name + \".def.sdmx.json?\"\n query_params = {}\n \n data = self.__fetch_json(path, query_params)\n\n # return empty if no useful metadata returned (likely table doesnt exist)\n if not data[\"structure\"][\"keyfamilies\"]:\n return\n\n # this is the nomis internal table name\n table = data[\"structure\"][\"keyfamilies\"][\"keyfamily\"][0][\"id\"]\n\n rawfields = data[\"structure\"][\"keyfamilies\"][\"keyfamily\"][0][\"components\"][\"dimension\"]\n fields = {}\n for rawfield in rawfields:\n field = rawfield[\"conceptref\"]\n\n fields[field] = {}\n\n # ignore when too many categories (i.e. geograpical ones)\n if field.upper() == \"CURRENTLY_RESIDING_IN\" or field.upper() == \"PLACE_OF_WORK\":\n continue\n\n # further query to get categories\n path = \"api/v01/dataset/\"+table+\"/\"+field+\".def.sdmx.json?\"\n #print(path)\n\n try:\n fdata = self.__fetch_json(path, {})\n except timeout:\n print(\"HTTP timeout requesting metadata for \" + table_name)\n return {}\n except (HTTPError, URLError):\n print(\"HTTP error requesting metadata for \" + table_name)\n return {}\n else:\n values = fdata[\"structure\"][\"codelists\"][\"codelist\"][0][\"code\"]\n #print(field+\":\")\n for value in values:\n # KEYs are stored as strings for json compatibility\n fields[field][value[\"value\"]] = value[\"description\"][\"value\"]\n\n # Fetch the geographies available for this table\n geogs = {}\n path = \"api/v01/dataset/\"+table+\"/geography/TYPE.def.sdmx.json?\"\n try:\n fdata = self.__fetch_json(path, {})\n except timeout:\n print(\"HTTP timeout requesting geography metadata for \" + table_name)\n except (HTTPError, URLError):\n print(\"HTTP error requesting geography metadata for \" + table_name)\n else:\n if fdata[\"structure\"][\"codelists\"]:\n values = fdata[\"structure\"][\"codelists\"][\"codelist\"][0][\"code\"]\n #print(values)\n for value in values:\n geogs[str(value[\"value\"])] = value[\"description\"][\"value\"]\n\n result = {\"nomis_table\": table,\n \"description\": data[\"structure\"][\"keyfamilies\"][\"keyfamily\"][0][\"name\"][\"value\"],\n \"fields\": fields,\n \"geographies\": geogs}\n\n # save a copy\n self.write_metadata(table_name, result)\n\n return result", "title": "" }, { "docid": "45c960ec428f0055f592255f23a026f1", "score": "0.65054077", "text": "def get_metadata():\r\n dbname = os.path.join(BASE_DIR, 'piSPEC.sqlite3')\r\n engine = create_engine('sqlite:///{}'.format(dbname), echo=False)\r\n metadata = MetaData(bind=engine)\r\n return metadata", "title": "" }, { "docid": "4f35d87ab635b4237c8fbef274312228", "score": "0.6465774", "text": "def get_table(self, table):\n # we query for ALL data within the table we specified\n query = \"SELECT * From [dbo].[\"+table+\"]\"\n\n # now send the query and connection parameters to 'read_sql' function\n data = read_sql(query, self.cnxn)\n\n # update the user\n print(\"{} table imported from {}. Type variable_name.head() to view, \"\n \"for example \\\"data.head()\\\".\".format(table, self.database))\n return data", "title": "" }, { "docid": "ce9525eab5f28786e98842b8a8084786", "score": "0.6456331", "text": "def _TData_result(self, table_name):\n sql_statement = f\"SELECT * FROM {table_name}\"\n\n columns = self._db.execute(\n f'pragma table_info({table_name})').fetchall()\n cloumns = [c[1] for c in columns]\n\n # execute shortcut\n result = self._db.execute(sql_statement).fetchall()\n df_dict = self._data_classify(result, cloumns)\n\n return df_dict", "title": "" }, { "docid": "5b11811f8c723b49988481b8b30740ca", "score": "0.6425894", "text": "def get_metadata():", "title": "" }, { "docid": "eb6189d514ddaabeef0dd60dda324de3", "score": "0.6409158", "text": "def get_table_info(self, table):\n desc = table.describe()\n status = desc['Table']['TableStatus']\n throughput = desc['Table']['ProvisionedThroughput']\n num_decreases = throughput['NumberOfDecreasesToday']\n read = throughput['ReadCapacityUnits']\n write = throughput['WriteCapacityUnits']\n return read, write, num_decreases, status", "title": "" }, { "docid": "8a66cfa8b89e4718c03575ef08d4e534", "score": "0.6388957", "text": "def get_metadata(self):\n metadata = pd.DataFrame(index=self._frame.columns)\n metadata.index.name = 'Columns'\n metadata['Role'] = self.role\n metadata['Type'] = self.type\n metadata['dtype'] = [] if len(metadata) == 0 else self._frame.dtypes.values\n return metadata", "title": "" }, { "docid": "4c6501d43d4ea8e12b237926bb362e0d", "score": "0.63580996", "text": "def get_metadata(self):\n resp = self.query(BaseQuery(metadata=dict()))\n # pylint: disable=no-member\n return DotAccessDict(resp.result).types", "title": "" }, { "docid": "da3666792a308e83674711e4ef292bd1", "score": "0.6341356", "text": "def describe_table(self, table, fetch_through_pandas=True, fail_silently=False):\n\n sql_query = f\"DESCRIBE {table}\"\n return self.query(sql_query, fetch_through_pandas=fetch_through_pandas, fail_silently=fail_silently)", "title": "" }, { "docid": "4eb5ceb495e0eb5c33d9067a2212b090", "score": "0.6296463", "text": "def describe_table(self, table_name):\n table = self._find_table(table_name)\n\n return DiscoDynamoDB._convert_table_to_dict(table)", "title": "" }, { "docid": "dd6f013e61718510a61607e1ddd44205", "score": "0.6282561", "text": "def get_table(table):\n g.tinydb = TinyDB(current_app.config['DATABASE'])\n person_table = g.tinydb.table(table)\n return person_table", "title": "" }, { "docid": "84db2fb3e53beee6fcbc6f24004ea7b0", "score": "0.627955", "text": "def get_metadata(engine):\n metadata = sqlalchemy.MetaData(bind=engine)\n metadata.reflect(bind=engine)\n return metadata", "title": "" }, { "docid": "4b47f38f3d4d4da1d37b8ac044b3075f", "score": "0.627714", "text": "def metadata() -> None:", "title": "" }, { "docid": "202d96be9bc4a69d68cdbb8065ba6612", "score": "0.62668556", "text": "def metadata_columns(self):\n return []", "title": "" }, { "docid": "bcbdd33b2cee96dceba830841d725d2e", "score": "0.62625545", "text": "def load_metadata(self, table_name):\n filename = self.cache_dir / (table_name + \"_metadata.json\")\n # if file not there, get from nomisweb\n if not os.path.isfile(str(filename)):\n if self.verbose: print(filename, \"not found, downloading...\")\n return self.get_metadata(table_name)\n else:\n if self.verbose: print(filename, \"found, using cached metadata...\")\n with open(str(filename)) as metafile:\n meta = json.load(metafile)\n\n return meta", "title": "" }, { "docid": "f3abe625d9d06b84ebc474eb97658f92", "score": "0.6247486", "text": "def table(self):\n return self._table", "title": "" }, { "docid": "b0b4548c00de23b4172ba63f4e91ce07", "score": "0.62358904", "text": "def get_metadata(self):\n if not self.connected:\n return model.Database()\n return self.metadata_accessor.get_metadata(\n self.engine, do_reflection=self.do_reflection)", "title": "" }, { "docid": "d7a16a164715df382b0a1fa4675d8c99", "score": "0.62244403", "text": "def getTableInfo(self, rowObject):\n try:\n return self.schema[rowObject.rowTableName]\n except KeyError:\n raise DBError(\"class %s was not registered with %s\" % (\n rowObject.__class__, self))", "title": "" }, { "docid": "8a5ded6e33a74f60e3b56eff81dbdf82", "score": "0.62216496", "text": "def metadata(self):\n pass", "title": "" }, { "docid": "7dc4f65bf388a762b45fb7488b5d9ba9", "score": "0.6202552", "text": "def getMetadata(url: str = URL_METADATA) -> pd.DataFrame:\n t = requests.get(url.format(ID=ID))\n soup = BeautifulSoup(t.text, \"html.parser\")\n\n # get the second table, containing the metadata\n metadata_table = soup.findAll(\n \"table\", {\"class\": \"table table-striped table-bordered table-condensed\"}\n )[-1]\n metadata_table = pd.read_html(str(metadata_table))[0]\n\n metadata_table = metadata_table.set_index([\"Field\"])\n\n # Check for validity\n if metadata_table.shape != (19, 1) or \"Last updated\" not in metadata_table.index:\n raise ValueError(\"Metadata has changed format.\")\n\n return metadata_table", "title": "" }, { "docid": "a0102f653530de87a0e764b126b64a1b", "score": "0.61975294", "text": "def get_metadata(self):\n return self.metadata", "title": "" }, { "docid": "6ae4ff619386eee68b71478e4f997011", "score": "0.61936593", "text": "def getTable(self):\n return self.__table", "title": "" }, { "docid": "4b7eecb77ff3a47ea3ab2556600204b9", "score": "0.61738837", "text": "def get_columns(self, table):\n if self.database:\n retval = self.data[table.lower()]\n return retval # [self.name] + retval\n else:\n return [\"*\"]", "title": "" }, { "docid": "e972c0ab33a9889ef8f30ca0f135d182", "score": "0.61654013", "text": "def metadata(self):", "title": "" }, { "docid": "ee53aba40c6d158c0b4a7a6d5bc8640d", "score": "0.6150339", "text": "def get_column_metadata(record_info_in: sdk.RecordInfo) -> dict:\n from snakeplane.helper_classes import AnchorMetadata\n\n metadata = AnchorMetadata()\n\n for field in record_info_in:\n metadata.add_column(\n field.name,\n field.type,\n size=field.size,\n source=field.source,\n scale=field.scale,\n description=field.description,\n )\n\n return metadata", "title": "" }, { "docid": "e75d51decd971b1f38ea2859dbad78c3", "score": "0.6080888", "text": "def get_metadata(self, key=None):\n with connect(self.dbpath) as conn:\n if key is None:\n return conn.metadata\n if key in conn.metadata.keys():\n return conn.metadata[key]\n return None", "title": "" }, { "docid": "5431266e77ef72077cd4556e4fd2f2d6", "score": "0.60769486", "text": "def get(self, **kwargs):\n\n table_list = self._dbeng.get_tables()\n df = pd.DataFrame()\n columns = kwargs.pop('columns', ['default'])\n fields = self.schema.get_display_fields(columns)\n tables = []\n\n for table in table_list:\n try:\n table_inst = self._get_table_sqobj(table)\n except ModuleNotFoundError:\n # ignore unknown tables\n continue\n\n info = {'table': table}\n info.update(table_inst.get_table_info(\n columns=['namespace', 'hostname', 'timestamp'],\n **kwargs))\n tables.append(info)\n\n df = pd.DataFrame.from_dict(tables)\n if df.empty:\n return df\n\n df = df.sort_values(by=['table']).reset_index(drop=True)\n cols = df.columns\n total = pd.DataFrame([['TOTAL', df['firstTime'].min(),\n df['lastTime'].max(),\n df['intervals'].max(),\n df['allRows'].sum(),\n df['namespaceCnt'].max(),\n df['deviceCnt'].max()]],\n columns=cols)\n df = pd.concat([df, total]).dropna().reset_index(drop=True)\n return df[fields]", "title": "" }, { "docid": "73fc56301fa90d02e702c758fbbf06ef", "score": "0.60726047", "text": "def tabletypeinfo(self):\n\t\tcur = self.begin()\n\t\tcur.gettabletypeinfo()\n\t\tself.commit(cur)\n\t\tself.display()", "title": "" }, { "docid": "b728b917ae0b4bc3cb3c656f602c0512", "score": "0.6069551", "text": "def table(self) -> str:\n return pulumi.get(self, \"table\")", "title": "" }, { "docid": "2042f783f44af96386eab85c94d51ce2", "score": "0.603869", "text": "def InitializeMetadata(self):\n self.Table('__metadata__', restricted=True, columns=Metadata.COLUMNS,\n keys=1, timeout=0)", "title": "" }, { "docid": "548d720e7049b1747330445181585ee4", "score": "0.60274416", "text": "def model_meta(self):\n try:\n column_value = getattr(self, self.PartitionableMeta.partition_column)\n except AttributeError:\n raise PartitionColumnError(\n model=self.__class__.__name__,\n current=self.PartitionableMeta.partition_column,\n allowed=self._meta.get_field_names()\n )\n\n pk = self._meta.primary_key\n\n return {\n 'table': self._meta.db_table,\n 'pk': list(pk.field_names) if isinstance(pk, CompositeKey) else pk.name,\n 'dialect': self._meta.database.__class__.__name__.lower().replace('database', ''),\n 'column_value': column_value,\n }", "title": "" }, { "docid": "c8144613ed22be8720b3edd9e4e161a9", "score": "0.601982", "text": "def db_table_structure(self, table: str) -> List[Tuple[int, str, str]]:\n res = self.__run_single_query(f\"PRAGMA table_info('{table}')\")\n return [(int(x[0]), str(x[1]), str(x[2])) for x in res]", "title": "" }, { "docid": "d938306b829603f77ccce0d944cb1c1c", "score": "0.6018642", "text": "def get_metadata(self):\n return self._metadata", "title": "" }, { "docid": "1dc3a89bbab7c806e9d09f629e01a7ad", "score": "0.6018245", "text": "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "1dc3a89bbab7c806e9d09f629e01a7ad", "score": "0.6018245", "text": "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "24e0cf849e5d87bb3017e6deb2cdb9a5", "score": "0.6011606", "text": "def getMetaData(self):\n return self._meta", "title": "" }, { "docid": "93518980734e9ff8539a7a6c66fcd752", "score": "0.6006502", "text": "def get_table(self, tablename):\n import pandas as pd\n api_format = ('https://{username}.carto.com/api/v2/sql?q='\n 'SELECT%20*%20FROM%20{tablename}&format=csv')\n request = api_format.format(\n username=self.cdb_username,\n tablename=tablename)\n return pd.read_csv(request)", "title": "" }, { "docid": "09dae97179b0c0a5311772323eb7f3a3", "score": "0.5991706", "text": "def column_metadata(self):\n return self._column_metadata", "title": "" }, { "docid": "77b90b16954bdd564707fcb38f313e83", "score": "0.59859216", "text": "def metaData(self):\n return {}", "title": "" }, { "docid": "77b90b16954bdd564707fcb38f313e83", "score": "0.59859216", "text": "def metaData(self):\n return {}", "title": "" }, { "docid": "06256fb09cdaf0815dfc006db6ae2f8b", "score": "0.59858465", "text": "def describe_table(self, name):\n return self.layer1.describe_table(name)", "title": "" }, { "docid": "653b893e27bac985e3cef35833675b4e", "score": "0.5979829", "text": "def GetMetadata(self):\n basic_data = super(Snowflake, self).GetMetadata()\n basic_data['warehouse'] = self.warehouse\n basic_data['database'] = self.database\n basic_data['schema'] = self.schema\n basic_data.update(self.client_interface.GetMetadata())\n return basic_data", "title": "" }, { "docid": "fc9fb90614e34854e76c26246b3c16d7", "score": "0.5971195", "text": "def metadata(self):\n if self._surface_dataset:\n return self._surface_dataset.metadata\n else:\n return {}", "title": "" }, { "docid": "63bd2a1f1016939ec64a70ad375897bd", "score": "0.5947179", "text": "def get_create_table_info(stream):\n pipe = Pipeline()\n\n pipe.append(InfoCreateTable())\n\n return pipe(stream)", "title": "" }, { "docid": "3d21f42c8e4ec748dd7ce93619a2c09f", "score": "0.5941311", "text": "def _get_dataset_metadata(self):\n raise NotImplementedError", "title": "" }, { "docid": "d5d4f0c5290a4960d5bfe6a034f26fe4", "score": "0.59393543", "text": "def SQL_statement(self, metadata):\n name = self.name\n column_schema = self.db_schema_attributes # Mandatory\n pk_schema = self.db_schema_pk\n unique_schema = self.db_schema_unique\n return parse_table(name, metadata, column_schema_list=column_schema, pk_schema=pk_schema,\n unique_schema=unique_schema)", "title": "" }, { "docid": "3878f7a16d73c0dda803475d445dc179", "score": "0.5937628", "text": "def __extract(self, tableName,tableTool):\n\n for keyName in tableIncludes[tableName]:\n try: keyval = tableTool.getcol(keyName)\n except RuntimeError: keyval = \"Undefined\"; pass\n self.metaDict[tableName+':'+keyName]= keyval\n tableTool.close()\n return", "title": "" }, { "docid": "dce9faffa98e20a39af867bf3ae0b5e3", "score": "0.5932356", "text": "def table(self, table):\n query = self.query()\n\n return query.from_(table)", "title": "" }, { "docid": "938e6c9314dd9cc13446cf1fa31fd411", "score": "0.5923885", "text": "def schema(self, table_name, raw=False):\n query = 'SELECT * FROM {0} LIMIT 0;'.format(table_name)\n output = self.query(query, verbose=True)\n fields = output.get('fields')\n if raw:\n return {key: fields[key]['type'] for key in fields}\n else:\n columns = ['Column name', 'Column type']\n rows = [(key, fields[key]['type']) for key in fields]\n self._print_table(rows, columns=columns, padding=[10, 5])\n return None", "title": "" }, { "docid": "b06810d8c09ac0aa1fc53ae4bea9e99f", "score": "0.5919636", "text": "def metadata(self) -> Optional[Any]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "2a9598496ad5c62ccf6138380634ce71", "score": "0.5902331", "text": "def _table_om(self, table):\n\n return ObjectMap({\n 'id': prepId(table['name']),\n 'title': table['name'],\n # The following properties will be updated on monitoring.\n # 'compaction': 'NONE',\n # 'enabled': 'true',\n # 'number_of_col_families': '',\n # 'col_family_block_size': ''\n })", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.5895381", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.5895381", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.5895381", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.5895381", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.5895381", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.5895381", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.5895381", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.5895381", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.5895381", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "1a2d80fdc2506215521989c0df36154e", "score": "0.5891515", "text": "def getTableDescriptor(self, table):\n pass", "title": "" }, { "docid": "93f263b9c93911746edf4c6d917c9f8a", "score": "0.58888984", "text": "def metadata(self) -> Union[DSMetada, None]:\n return self.metadata_json", "title": "" }, { "docid": "2570f39ffc0d87a12dd79837d8a2adfa", "score": "0.58847237", "text": "def funcGetMetadataCopy(self):\n\n\t\treturn copy.deepcopy(self._dictTableMetadata)", "title": "" }, { "docid": "9f28bb99b7f658cb37c553bd6b42139b", "score": "0.58736724", "text": "def test_metadata_column(mock_schema, mock_database):\n table_name = \"foo\"\n column_definition = [\n (\n \"foo\",\n DataType.STRING,\n ),\n (\"metadata\", DataType.INT),\n ]\n\n columns = [\n Column(\n name=name,\n dataType=type,\n )\n for name, type in column_definition\n ]\n\n table = Table(\n id=UUID(\"1f8c1222-09a0-11ed-871b-ca4e864bb16a\"),\n name=table_name,\n columns=columns,\n serviceType=DatabaseServiceType.BigQuery,\n )\n\n orm_table = ometa_to_sqa_orm(table, None)\n\n assert not orm_table.__table_args__.get(\"quote\")\n assert [\n name.lower() for name, _ in column_definition\n ] == orm_table.__table__.columns.keys()\n assert orm_table.__tablename__ == table_name\n assert orm_table.__table_args__[\"schema\"] == \"schema\"\n for name, _ in column_definition:\n assert hasattr(orm_table, name)", "title": "" }, { "docid": "f895baa107fe0e546f9c66feb46f420b", "score": "0.5869618", "text": "def metadata(self) -> Metadata:\n return self._metadata", "title": "" }, { "docid": "79392dfa541385cec87d5a1d34bf84dc", "score": "0.5868383", "text": "def get_sql_table( session, metadata, topic_name, prefix=None ):\n if prefix is None:\n mymeta = session.query(models.RosSqlMetadata).\\\n filter_by(topic_name=topic_name).one()\n else:\n mymeta = session.query(models.RosSqlMetadata).\\\n filter_by(topic_name=topic_name,prefix=prefix).one()\n return metadata.tables[mymeta.table_name]", "title": "" }, { "docid": "7137fe724c024c2f62f280434d3220d4", "score": "0.5868368", "text": "def metadata(self):\n meta = {}\n meta['name'] = self.name\n meta['id'] = self.id\n meta['family'] = self.family\n meta['hkl'] = self.parameters['hkl']\n if 'shiftindex' in self.parameters:\n meta['shiftindex'] = self.parameters['shiftindex']\n meta['cutboxvector'] = self.parameters['cutboxvector']\n \n return meta", "title": "" }, { "docid": "cfc99d940dd82bfd58b21703ce697d2b", "score": "0.58683497", "text": "def get_table(self, name):\n response = self.layer1.describe_table(name)\n return Table(self, response)", "title": "" }, { "docid": "039ac92a0dd925607d58b82ff7af3dde", "score": "0.5868139", "text": "def read_table(self, table_name):\n table_object = self.get_table_object(table_name)\n table = self.session.query(table_object)\n return table", "title": "" }, { "docid": "dae809111af92084c77822690fce294d", "score": "0.58650374", "text": "def get_metadata_from_FullModel(data):\n # fileheader metadata keys should be aligned with filename ones\n\n UUID = data.INSTANCE_ID.unique().tolist()[0]\n metadata = data.get_object_data(UUID).to_dict()\n metadata.pop(\"Type\", None) # Remove Type form metadata\n\n return metadata", "title": "" }, { "docid": "8eb6f131396ba32d56ddb93ab8ba67a1", "score": "0.58589625", "text": "def get_contract_metadata(self, contract_ticker):\n\n con = sqa.create_engine('mysql+mysqldb://root:@localhost/predictit_db').connect()\n sql_statement = \"select * from all_contracts_metadata where contract_ticker='{}' limit 1\".format(contract_ticker)\n df = pd.read_sql(sql_statement, con)\n\n return df", "title": "" }, { "docid": "8729205cdfec1a704698f6dbd27ea6e3", "score": "0.58554304", "text": "def get_metadata(self):\n path = \"/api/v3/metadata\"\n res = self.request_get(path)\n return res", "title": "" }, { "docid": "ffc49ff7bb41356e4c94a330d93f4d77", "score": "0.5849034", "text": "def get_table(table_name: str):\n try:\n response = crud.table.get(table_name=table_name)\n return response\n except Exception as e:\n raise e", "title": "" }, { "docid": "0a5a9b1a4bb38e79e8d7e19e39ecc24a", "score": "0.5847551", "text": "def table(self):\n return self._response_table", "title": "" }, { "docid": "bf9b2e0d5a719ef0a6ce5d40d8849ea5", "score": "0.58460444", "text": "def get_table_entry_meta_data(cls, table_id, key, meta_data_id):\n return \"\"", "title": "" }, { "docid": "d966f19632b7091b83209e84b889bc8b", "score": "0.58452356", "text": "def MetaData(self):\r\n\t\treturn self._get_attribute('metaData')", "title": "" }, { "docid": "06dfbe906f471ef648034d8c2839c182", "score": "0.58439654", "text": "def metadata(self) -> Dict[str, Dict[str, Any]]:\n pass", "title": "" }, { "docid": "ed9e2875166e89aa0660c071b0ea6cab", "score": "0.58438057", "text": "def getMetadata(data, sourceMetadata=None):\n metadata = {}\n label = None\n\n if sourceMetadata and \"label_column\" in sourceMetadata:\n label = sourceMetadata[\"label_column\"]\n\n for count, (column, values) in enumerate(data.iteritems()):\n dataType = values.dtype\n if column == label:\n dataType = \"label\"\n data[column] = data[column].astype(str)\n if column == \"id\":\n dataType = \"id\"\n if column == \"iid\":\n dataType = \"iid\"\n elif dataType == \"object\":\n dataType = \"categorical\"\n elif \"int\" in str(dataType) or \"float\" in str(dataType):\n dataType = \"numeric\"\n desc = {\n \"fullname\": column,\n \"unit\": None,\n \"short\": chars[count],\n \"dataType\": dataType,\n }\n metadata[column] = desc\n\n if sourceMetadata and \"columns\" in sourceMetadata:\n for col, val in sourceMetadata[\"columns\"].items():\n for k, v in val.items():\n metadata[col][k] = v\n\n return metadata, data", "title": "" }, { "docid": "98eefc6f7bec16e67d54aa5e3ea4f4b0", "score": "0.5842558", "text": "def _get_columnsconfig_metadata(self, colconf_name):\n\n headers = {\"User-Agent\": self._session.headers[\"User-Agent\"],\n \"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/plain\"}\n\n response = self._request(\"POST\", self.COLUMNS_CONFIG_URL,\n data=(\"colConfigId={}\".format(colconf_name)), headers=headers)\n\n column_dict = response[0].json()\n\n meta_fields = [\"Column Name\", \"Column Label\", \"Data Type\", \"Units\", \"Description\", \"Examples/Valid Values\"]\n names = []\n labels = []\n data_types = []\n field_units = []\n descriptions = []\n examples = []\n\n for colname in column_dict:\n # skipping the _selected column (gets rmoved in return table)\n if colname == \"_selected_\":\n continue\n\n field = column_dict[colname]\n\n # skipping any columns that are removed\n if field.get(\"remove\", False):\n continue\n\n names.append(colname)\n labels.append(field.get(\"text\", colname))\n\n # datatype is a little more complicated\n d_type = utils.parse_type(field.get(\"type\", \"\"))[0]\n if not d_type:\n d_type = utils.parse_type(field.get(\"vot.datatype\", \"\"))[0]\n data_types.append(d_type)\n\n # units\n units = field.get(\"unit\", \"\")\n if not units:\n units = field.get(\"vot.unit\", \"\")\n field_units.append(units)\n\n descriptions.append(field.get(\"vot.description\", \"\"))\n examples.append(field.get(\"example\", \"\"))\n\n meta_table = Table(names=meta_fields, data=[names, labels, data_types, field_units, descriptions, examples])\n\n # Removing any empty columns\n for colname in meta_table.colnames:\n if (meta_table[colname] == \"\").all():\n meta_table.remove_column(colname)\n\n return meta_table", "title": "" }, { "docid": "b2ea586507242fe5f568d5c8eba2f0aa", "score": "0.5840134", "text": "def table_finder(self):\n result = {}\n self.cur.execute('SHOW TABLES')\n results = self.cur.fetchall()\n for each in results:\n info = JOIN.describe(self, each[0])\n result[each[0]] = info\n return result", "title": "" }, { "docid": "28909e23dab05cdb0e0c9e0ca60c7e89", "score": "0.5839304", "text": "def getMetadata(self):\n descList = []\n for userName,allDB in self._all.item():\n projectName = self.getProjectName(userName)\n descList = [doc.desc for doc in self._all.getDocuments(projectName=projectName)]\n\n return pandas.DataFrame(descList)", "title": "" }, { "docid": "b7f598465f50bf8983f67bff2e8c8488", "score": "0.5835373", "text": "def _get_table(self, obj):\n if isinstance(obj, Marble):\n return obj\n else:\n return obj.table", "title": "" }, { "docid": "d534b5870b34dfbc1e4d0578c27946c8", "score": "0.5830471", "text": "def read_table(self):\n query = \" SELECT \" + \",\".join(self.columns_read)\n query += \" FROM \" + self.table + \" \"\n query += self.clauses\n # to load products from a single category\n if self.table == 'Product':\n query += str(self.catg)\n self.cursor.execute(query)\n return self.cursor.fetchall()", "title": "" }, { "docid": "d3b0425478d9d0b232dfcfd2c7c55c81", "score": "0.58268297", "text": "def metadata(self) -> dict:\n return self._metadata", "title": "" }, { "docid": "d3b0425478d9d0b232dfcfd2c7c55c81", "score": "0.58268297", "text": "def metadata(self) -> dict:\n return self._metadata", "title": "" } ]
4a18047a82e15e9da2c85135e680c633
Get the best ARIMA model.
[ { "docid": "0282a9f33bdc11ac5c8b5843ba0d9963", "score": "0.7164151", "text": "def get_best_model(self):\n return self.auto_est.get_best_model()", "title": "" } ]
[ { "docid": "d8842cd0d2022b7bbda5c575f8abd437", "score": "0.6984959", "text": "def ARIMA_model(training_data,exogenous_value,order):\n\n arima_model = sm.tsa.statespace.SARIMAX(endog=training_data.values, exog=exogenous_value, order=order).fit()\n \n return arima_model", "title": "" }, { "docid": "18f5cda0714bfd5a4df1d0af4b1c5c4e", "score": "0.6802671", "text": "def find_best_arma_repr(logger, base_process):\n ps, ds, qs = list(range(5)), [0], list(range(5))\n best_model, bic = None, np.inf\n logger.info(\"Start search for ARMA parameters:\")\n for p, d, q in itertools.product(ps, ds, qs):\n model = ARIMA(base_process, order=(p, d, q))\n try:\n model_fit = model.fit(disp=0)\n if model_fit.bic < bic:\n best_model = (p, d, q)\n bic = model_fit.bic\n # print(p,d,q, \" BIC = {}\".format(model_fit.bic))\n logger.info(\"{},{},{} BIC = {}\".format(p,d,q, model_fit.bic))\n except Exception as e:\n logger.info(\"{},{},{} rejected:\".format(p,d,q))\n logger.error(e)\n continue\n\n logger.info(\"End search for ARMA parameters.\")\n return best_model", "title": "" }, { "docid": "bd0b7583eadc42976f9b2206a37d9977", "score": "0.6718653", "text": "def set_best_model(self,metric='mape'):\n self.best_model = self.order_all_forecasts_best_to_worst(metric)[0]", "title": "" }, { "docid": "3e559b30627af4ec09b983038be2bf40", "score": "0.66974324", "text": "def createModel(self):\n model = ARIMA(self.timeSeries, order=self.order, freq=infer_freq(self.timeSeries.index))\n return model", "title": "" }, { "docid": "3732655543317e34814f04c8e4ced694", "score": "0.6643961", "text": "def best_model(self):\n return self.model_finder.best_model()", "title": "" }, { "docid": "1cd049847667e05bd7449afe0f371b73", "score": "0.6561318", "text": "def select_best_model(experiment, run_id, metric=\"root_mean_squared_error\"):\n automl_run = AutoMLRun(experiment, run_id = run_id)\n best_run, fitted_model = automl_run.get_output(metric=metric)\n print(fitted_model.steps)\n return fitted_model", "title": "" }, { "docid": "61a9bf5fd4ce870226d4b43c60f62fa3", "score": "0.6478377", "text": "def get_best_model(self):\n return self.best_model.model", "title": "" }, { "docid": "8586e0482ed13163b59765ba80104eae", "score": "0.64696604", "text": "def load_best_model(self):\n model_id = max(self.history, key=lambda x: x['accuracy'])['model_id']\n return load_model(os.path.join(self.path, str(model_id) + '.h5'))", "title": "" }, { "docid": "dde5eeafc51eb7dcc3cb011531690396", "score": "0.6461207", "text": "def load_best_model(self):\n return self.load_model_by_id(self.get_best_model_id())", "title": "" }, { "docid": "2ee1f1990d85d3d6587fed9409b5213b", "score": "0.64280236", "text": "def fit_initial_model(self, y):\n model = self.fit_case(y, self.components.without_arma())\n if len(self.components.seasonal_periods) > 0:\n # Try non-seasonal model without ARMA\n model_candidate = self.fit_case(y, self.components.without_seasonal_periods().without_arma())\n if model_candidate.aic < model.aic:\n model = model_candidate\n return model", "title": "" }, { "docid": "5323685c5c2935cdb157e20321ee0bcd", "score": "0.6416233", "text": "def find_bestmodel(self):\n\n \n self.read_fit()\n fit = self.fit\n \n # define parameters\n mis = self.misfit_mean\n s = self.penalty_structure/np.median(self.penalty_structure)\n a = self.penalty_anisotropy/np.median(self.penalty_anisotropy)\n \n # define function to minimise\n f = a*s*np.abs(a-s)/(a+s)\n \n # define the parameters relating to the best model\n self.params_bestmodel = fit[f==min(f[mis<min(mis)*self.misfit_threshold])][0]\n self.params_fittingmodels = fit[mis<min(mis)*self.misfit_threshold]", "title": "" }, { "docid": "fb7066b1163c2388226669cef7209f62", "score": "0.63604534", "text": "def auto_arima(ts, p_max=1, q_max=1, d_max=1, verbose=True): \n # ARIMA parameters p, d and q parameters can take any value between 0 and par_max\n p_range = range(0, p_max + 1)\n q_range = range(0, q_max + 1)\n d_range = range(0, min(d_max + 1, 3))\n\n # Generate all possible triplets of parameters\n # p = q = range(0, par_max + 1)\n pdq = [(x[0], x[1], x[2]) for x in list(itertools.product(p_range, d_range, q_range))]\n \n model_results = []\n best_model = {}\n min_aic = 1000000\n for params in pdq:\n try:\n mod = sm.tsa.ARIMA(ts, order=params)\n results = mod.fit()\n \n if verbose:\n print('ARIMA{0} AIC:{1:.2f} BIC:{2:.2f}'.format(params, results.aic, results.bic))\n model_results.append({'aic':results.aic,\n 'bic':results.bic,\n 'params':params,\n 'model':results})\n if min_aic > results.aic:\n best_model={'aic':results.aic,\n 'bic':results.bic,\n 'params':params,\n 'model':results}\n min_aic = results.aic\n except Exception as ex:\n print(ex)\n if verbose:\n print('Best model params:{0} AIC:{1:.2f} BIC:{2:.2f}'.format(best_model['params'],\n best_model['aic'], best_model['bic'])) \n\n return best_model, model_results", "title": "" }, { "docid": "e1e7b363abce89a581dc67ac3a6a16b3", "score": "0.63149923", "text": "def get_best(self):\n raise NotImplementedError(\"get_best not implemented\")", "title": "" }, { "docid": "2ea309e4f509e7cc91583310850242be", "score": "0.6312902", "text": "def seek_garch_model(TS):\n best_aic = np.inf \n best_order = None\n best_mdl = None\n\n pq_rng = range(5) # [0,1,2,3,4]\n d_rng = range(2) # [0,1]\n for i in pq_rng:\n for d in d_rng:\n for j in pq_rng:\n try:\n tmp_mdl = smt.ARIMA(TS, order=(i,d,j)).fit(\n method='mle', trend='nc'\n )\n tmp_aic = tmp_mdl.aic\n if tmp_aic < best_aic:\n best_aic = tmp_aic\n best_order = (i, d, j)\n best_mdl = tmp_mdl\n except: continue\n print('aic: {:6.5f} | order: {}'.format(best_aic, best_order)) \n return best_aic, best_order, best_mdl", "title": "" }, { "docid": "48b88dbb9b24ee896107cedd4a6ec19f", "score": "0.63127595", "text": "def arima_fit(data, order, filename=None):\n\n\tmodel = ARIMA(data, order=order)\n\tmodel_fit = model.fit()\n\t\n\tif filename is not None:\n\t\tmodel_fit.save(filename)\n\n\treturn model_fit", "title": "" }, { "docid": "343ad646168942a93b81517375a1d5e8", "score": "0.629863", "text": "def chooseBestModel(results):\n bestResult = None\n\n ### YOUR CODE HERE\n bestResult = max(results, key=lambda x: x['dev'])\n ### END YOUR CODE\n\n return bestResult", "title": "" }, { "docid": "7d0ba345f0c6fd848360ce6162621522", "score": "0.6165708", "text": "def getBestModel(models, scores, bestScore='max'):\n if bestScore == 'max':\n return models[np.argmax(scores)]\n elif bestScore == 'min':\n return models[np.argmin(scores)]\n else:\n print('Please choose \"max\" or \"min\" for bestScore parameter')", "title": "" }, { "docid": "faa2dcf9ec78f37e0d6507878770883a", "score": "0.6153771", "text": "def best_model(self) -> GPModel:\n evaluated_gp_models = [model for model in self.selected_models if model.evaluated]\n sorted_gp_models = sorted(evaluated_gp_models, key=lambda m: m.score, reverse=True)\n\n # Set model dict if needed\n for gp_model in sorted_gp_models:\n if gp_model.model_input_dict != self.model_dict:\n gp_model.model_input_dict = self.model_dict\n\n return sorted_gp_models[0]", "title": "" }, { "docid": "82cc04a6aaf32f6e813507c95d727f8b", "score": "0.6121148", "text": "def use_best_model(self, model):\n model.load_state_dict(t.load(self.filename))\n model.eval()\n return model, self.best_loss, self.best_accuracy, self.best_epoch", "title": "" }, { "docid": "3f1d59a55b292d67d1763d3c7f9a5fe5", "score": "0.60938436", "text": "def _get_best_model(self, curs):\n\n select_stmt = \"\"\"select model_idx, pickled_model\n from models\n where model_idx =\n (select model_idx\n from model_scores\n order by score desc\n limit 1)\"\"\"\n curs.execute(select_stmt)\n row = curs.fetchone()\n\n return row[0], loads(str(row[1]))", "title": "" }, { "docid": "c29b412e1bdde4d9339d3e8233764d02", "score": "0.60641575", "text": "def best_model(self):\n\n best_model = None\n for model in self._models:\n if model.name == self._best_model_name:\n best_model = model\n best_model.set_params(self._models_best_params[best_model.name])\n return best_model", "title": "" }, { "docid": "f572d63ab880c9fd217a1e8512c51deb", "score": "0.604909", "text": "def best_model(self):\n\n db_conn = sqlite3.connect(self.db_file)\n _, model = self._get_best_model(db_conn.cursor())\n db_conn.close()\n return model", "title": "" }, { "docid": "f5bdeca97144b7cebd400c682a69b9de", "score": "0.6045213", "text": "def find_best_fit(data, seasonal=False, trace=True):\n\tbest_params = dict()\n\n\tstepwise_fit = auto_arima(data, seasonal=seasonal, trace=trace, start_p=0, start_q=0, max_p=5, max_q=5)\n\tbest_params['order'] = stepwise_fit.get_params()['order']\n\tbest_params['seasonal_order'] = stepwise_fit.get_params()['seasonal_order']\n\n\treturn best_params", "title": "" }, { "docid": "06e40c236ed14a47332daaa11941a822", "score": "0.6043732", "text": "def get_arima_obj(self, sm_model, sm_results):\n p = sm_results._results.k_ar\n if 'k_diff' in sm_results._results.__dict__.keys():\n d = sm_results._results.k_diff\n else:\n d = 0\n q = sm_results._results.k_ma\n if sm_model.method == 'css-mle':\n pred_method = \"conditionalLeastSquares-exactLeastSquares\"\n elif sm_model.method == 'css':\n pred_method = \"conditionalLeastSquares\"\n elif sm_model.method == 'mle':\n pred_method = \"exactLeastSquares\"\n else:\n pred_method = None\n\n ar_content = ' '.join([str(i) for i in sm_results._results.arparams] if int(p) > 0 else [])\n ar_params_array = ArrayType(content = ar_content, n = p, type_ = 'real')\n \n ma_content = ' '.join([str(coeff) for coeff in sm_results._results.maparams] if int(q) > 0 else [])\n ma_coeff_array = ArrayType(content = ma_content, n = q, type_ = 'real')\n ny_maCoef_obj = MACoefficients(Array = ma_coeff_array)\n\n nyoka_arima_obj = ARIMA(constantTerm = sm_results.params[0],\n predictionMethod = pred_method,\n Extension = self.get_arima_extension_list(sm_model),\n NonseasonalComponent = NonseasonalComponent(p = p, d = d, q = q, AR = AR(Array = ar_params_array), MA = MA(MACoefficients = ny_maCoef_obj)))\n return nyoka_arima_obj", "title": "" }, { "docid": "64fb9bf9891e240a32fde724dba3c806", "score": "0.601159", "text": "def load_best_model():\n with open('best_model.pickle', 'rb') as f:\n clf = pickle.load(f)\n return clf", "title": "" }, { "docid": "c8a6b5a471aebf91aaa20a43f6a16dd4", "score": "0.5946205", "text": "def get_model_best(self) -> str:\n models = self.get_model_names()\n if not models:\n raise ValueError(\"Trainer has no fit models that can predict.\")\n model_performances = self.get_models_attribute_dict(attribute=\"val_score\")\n performances_list = [(m, model_performances[m]) for m in models if model_performances[m] is not None]\n\n if not performances_list:\n raise ValueError(\"No fitted models have validation scores computed.\")\n\n return max(performances_list, key=lambda i: i[1])[0]", "title": "" }, { "docid": "c57d80a18ad372a37c0554c9a2ec6032", "score": "0.59442794", "text": "def get_best(self):\n summary = self.get_summary()\n return summary.get(\"best\", None)", "title": "" }, { "docid": "ac3dabf20d06c97802d1fb887f650fcc", "score": "0.5938479", "text": "def test_get_best_model(self):\n # aic order is null, alt1, alt2\n # in this case, no substantial diff, so should return smaller nfp, ie null\n hyp = _make_hyp(112, 110, 111, 10, 11, 12)\n got = hyp.get_best_model(threshold=0.05)\n self.assertIs(got, hyp.null)\n # here alt2 is winner\n hyp = _make_hyp(110, 111, 104, 10, 11, 12)\n got = hyp.get_best_model(threshold=0.05)\n self.assertIs(got, hyp[\"alt2\"])\n # but if we set threshold more permissive, it will return null\n got = hyp.get_best_model(threshold=0.03)\n self.assertIs(got, hyp.null)", "title": "" }, { "docid": "50965bc5f4c9284c085b974dc32e8074", "score": "0.5933562", "text": "def get_sarimax_obj(self, sm_model, sm_results):\n #NonSeasonal\n p = sm_results._results.specification.k_ar\n if 'k_diff' in sm_results._results.specification.__dict__.keys():\n d = sm_results._results.specification.k_diff\n else:\n d = 0\n q = sm_results._results.specification.k_ma\n \n ns_ar_content = ' '.join([str(i) for i in sm_results._results._params_ar] if int(p) > 0 else [])\n ns_ar_params_array = ArrayType(content = ns_ar_content, n = p, type_ = 'real')\n\n ns_ma_content = ' '.join([str(coeff) for coeff in sm_results._results._params_ma] if int(q) > 0 else [])\n ns_ma_coeff_array = ArrayType(content = ns_ma_content, n = q, type_ = 'real')\n ny_ns_maCoef_obj = MACoefficients(Array = ns_ma_coeff_array)\n\n #Seasonal\n P = sm_results._results.specification.seasonal_order[0]\n D = sm_results._results.specification.seasonal_order[1]\n Q = sm_results._results.specification.seasonal_order[2]\n S = sm_results._results.specification.seasonal_periods\n\n seasonal_ar_content = ' '.join([str(i) for i in sm_results._results._params_seasonal_ar] if int(P) > 0 else [])\n seasonal_ar_params_array = ArrayType(content = seasonal_ar_content, n = P, type_ = 'real')\n\n seasonal_ma_content = ' '.join([str(coeff) for coeff in sm_results._results._params_seasonal_ma] if int(Q) > 0 else [])\n seasonal_ma_coeff_array = ArrayType(content = seasonal_ma_content, n = Q, type_ = 'real')\n ny_seasonal_maCoef_obj = MACoefficients(Array = seasonal_ma_coeff_array)\n\n nyoka_sarimax_obj = ARIMA(#predictionMethod = None,\n Extension = self.get_sarimax_extension_list(sm_results),\n NonseasonalComponent = NonseasonalComponent(p = p, d = d, q = q, AR = AR(Array = ns_ar_params_array), MA = MA(MACoefficients = ny_ns_maCoef_obj)),\n SeasonalComponent = SeasonalComponent(P = P, D = D, Q = Q, period = S, AR = AR(Array = seasonal_ar_params_array), MA = MA(MACoefficients = ny_seasonal_maCoef_obj)))\n return nyoka_sarimax_obj", "title": "" }, { "docid": "7e44bec5e06da2b2e23a69d5091369f1", "score": "0.59301406", "text": "def choose_best_model(scores, parameters, max_accuracy, tolerance):\n\tfor score in scores:\n\t\tif max_accuracy - score <= tolerance:\n\t\t\ts = score\n\t\t\tbreak\n\treturn parameters[scores.index(s)]", "title": "" }, { "docid": "46bd00d102a8a5ce4a31af1f31c8cb5e", "score": "0.59240323", "text": "def forecast_var_from_best(returns):\n from pyetf.algos import seek_garch_model\n from arch import arch_model\n res_tup = seek_garch_model(returns)\n order = res_tup[1]\n p_ = order[0]\n o_ = order[1]\n q_ = order[2]\n return arch_model(returns, p=p_, o=o_, q=q_, dist='StudentsT')", "title": "" }, { "docid": "60f61b63cdff3ea775489b1ebc7a0f96", "score": "0.5922123", "text": "def get_best_model(self):\n # Next line relies on aggregator inner field where model dicts are stored\n best_tensor_dict = self.server.aggregator.best_tensor_dict\n self.task_runner_stub.rebuild_model(best_tensor_dict, validation=True, device='cpu')\n return self.task_runner_stub.model", "title": "" }, { "docid": "be5b463a5895e7e9554a7c0823c73eb2", "score": "0.5887998", "text": "def load_arima(filename): \n\tARIMA.__getnewargs__ = __getnewargs__\n\tmodel = ARIMAResults.load(filename)", "title": "" }, { "docid": "894926c0f7cdff1a44409af51688180b", "score": "0.58866745", "text": "def get_best_model_and_metrics(self,X,y,metric_to_optimize):\n all_y_predictions = self.predict_all_models(X)\n all_model_metrics = self.score_all_models(all_y_predictions, y)\n best_model_and_metrics = max(all_model_metrics.iteritems(), key=lambda model_metrics : model_metrics[1]._asdict()[metric_to_optimize])\n return best_model_and_metrics", "title": "" }, { "docid": "e69059889bd4f2b9e94c247f38413117", "score": "0.5877223", "text": "def get_best_model_id(self):\n\n if self.optimize_mode is OptimizeMode.Maximize:\n return max(self.history, key=lambda x: x[\"metric_value\"])[\"model_id\"]\n return min(self.history, key=lambda x: x[\"metric_value\"])[\"model_id\"]", "title": "" }, { "docid": "12a2686f81ce751c81fbac5f624c972d", "score": "0.58557403", "text": "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n \n scores, models = [], []\n\n for num_states in range(self.min_n_components, self.max_n_components + 1):\n try:\n hmm_model, log_likelihood = self.__train_score(num_states)\n scores.append(self.score_bic(num_states, log_likelihood))\n models.append(hmm_model)\n except Exception as e:\n pass\n # Avoid any possible errors\n assert len(scores) == len(models)\n\n if len(scores) > 1:\n best_model = models[np.argmin(scores)]\n return best_model\n return None", "title": "" }, { "docid": "cbacbbab025cbb5475b1593e14ae0d5d", "score": "0.5836302", "text": "def best_model_predict(self, X):\n return np.argmax(self.best_model_predict_proba(X), axis=1)", "title": "" }, { "docid": "f6d538862d7b41af9958e359d5fbb468", "score": "0.5812949", "text": "def best_guess(self):\n # if no data, return None\n if self.count() == 0:\n return None\n \n # if there is data, get the best forecast\n preference_list = ['actual', 'mn_ahead', 'hr_ahead', 'dy_ahead']\n for forecast_type in preference_list:\n\n # see if there's data for this forecast\n forecast_qset = self.filter(forecast_code=FORECAST_CODES[forecast_type])\n if forecast_qset.count() > 0:\n return forecast_qset.latest()", "title": "" }, { "docid": "e97ff477e96640d4aab46fc84d7f510d", "score": "0.5801108", "text": "def getPredictionFromBestModel(bestdelta, bestlags, fout, cut, start_test, dataSets, parameters):\n \n (X_train, y_train, X_test, y_test)=dataPrep(bestdelta, bestlags, fout, cut, start_test, dataSets, parameters)\n model = performClassification(X_train, y_train, X_test, y_test, 'RF', parameters, fout, False)\n \n #with open(parameters[0], 'rb') as fin:\n # model = cPickle.load(fin) \n \n return model.predict(X_test), model.score(X_test, y_test)", "title": "" }, { "docid": "daf31da289e53da2dbb26e82d22eb409", "score": "0.57901007", "text": "def get_best_epa(self):\n\n pool_list = self.to_list()\n pool_list.sort(key=lambda x: x.fitness, reverse=True)\n return pool_list[0].epa", "title": "" }, { "docid": "3deb3cf03ed121a83768932403a8e9c6", "score": "0.5781125", "text": "def __init__(self,data,max_ar,max_ma,verbose=True,find_online_ar=True,criterion='mae'):\n\n if type(max_ar) == int:\n # издержки питона\n max_ar+=1\n ar_=range(1,max_ar)\n else:\n ar_=max_ar\n \n if type(max_ma) == int:\n max_ma+=1\n ma_=range(0,max_ma)\n else:\n ma_=max_ma\n\n table_aic=pd.DataFrame(index=ar_,columns=ma_)\n table_aic.index.name='AIC AR\\MA'\n table_bic=table_aic.copy()\n table_bic.index.name='BIC AR\\MA'\n table_mae=table_aic.copy()\n table_mae.index.name='MAE AR\\MA'\n\n for ar in ar_:\n for ma in ma_:\n if ma > ar:\n continue\n\n arma = sm.tsa.SARIMAX(endog=data, order=(ar,0,ma))\n# arma = ARMA(endog=data, order=(ar,ma))\n try:\n results=arma.fit()\n except:\n if verbose:\n print('not solve for model ',ar,ma)\n continue\n table_aic.loc[ar][ma]=results.aic\n table_bic.loc[ar][ma]=results.bic\n table_mae.loc[ar][ma]=np.mean(np.abs(results.resid))\n if verbose:\n print(ar,ma)\n del arma, results\n\n ar_aic=table_aic[table_aic==np.nanmin(table_aic.values)].dropna(axis=1,how='all').dropna(how='all').index.item()\n ma_aic=table_aic[table_aic==np.nanmin(table_aic.values)].dropna(axis=1,how='all').dropna(how='all').columns.item()\n\n ar_bic=table_bic[table_bic==np.nanmin(table_bic.values)].dropna(axis=1,how='all').dropna(how='all').index.item()\n ma_bic=table_bic[table_bic==np.nanmin(table_bic.values)].dropna(axis=1,how='all').dropna(how='all').columns.item()\n\n ar_mae=table_mae[table_mae==np.nanmin(table_mae.values)].dropna(axis=1,how='all').dropna(how='all').index.item()\n ma_mae=table_mae[table_mae==np.nanmin(table_mae.values)].dropna(axis=1,how='all').dropna(how='all').columns.item()\n\n if verbose:\n print('\\r\\n')\n print(table_aic)\n print('the best model for aic (AR/MA) is:',ar_aic,ma_aic)\n\n print('\\r\\n')\n print(table_bic)\n print('the best model for aic (AR/MA) is:',ar_bic,ma_bic)\n\n\n print('\\r\\n')\n print(table_mae)\n print('the best model for aic (AR/MA) is:',ar_mae,ma_mae)\n\n # назначение лучшей модели\n if criterion == 'mae':\n self.best_model=(ar_mae,ma_mae)\n if criterion == 'aic':\n self.best_model=(ar_aic,ma_aic) \n if criterion == 'bic':\n self.best_model=(ar_bic,ma_bic)\n \n print('WE CHOOSE THE BEST MODEL IS:',self.best_model[0],self.best_model[1])\n\n # эвристичечкий метод имени Славы\n if find_online_ar:\n err=np.nanmin(np.nanmin(table_mae.values))\n for i in range(20):\n ar= AR(data)\n res=ar.fit(ar_mae+i)\n if np.mean(np.abs(res.resid)) < err:\n break\n self.best_model_ar=i+ar_mae\n print('WE CHOOSE THE BEST ONLINE AR MODEL IS:',self.best_model_ar)", "title": "" }, { "docid": "41bd91f6d69d322695cc3e679fa1e1fb", "score": "0.5759042", "text": "def evaluate_arima_model(X, arima_order):\n warnings.filterwarnings(\"ignore\")\n # prepare training dataset\n train_size = int(len(X) * 0.66)\n train, test = X[0:train_size], X[train_size:]\n history = [x for x in train]\n # make predictions\n predictions = list()\n for t in range(len(test)):\n model = ARIMA(history, order=arima_order)\n model_fit = model.fit(disp=0)\n yhat = model_fit.forecast()[0]\n predictions.append(yhat)\n history.append(test[t])\n # calculate out of sample error\n error = mean_squared_error(test, predictions)\n return error", "title": "" }, { "docid": "70646551e4aa8b8eac9666d6bfe440a7", "score": "0.5750371", "text": "def fit(self, X, y=None, time_col=TIME_COL, value_col=VALUE_COL, **fit_params):\n X = X.sort_values(by=time_col)\n # fits null model\n super().fit(X, y=y, time_col=time_col, value_col=value_col, **fit_params)\n\n self.fit_df = X\n # fits AutoArima model\n self.model = AutoARIMA(\n start_p=self.start_p,\n d=self.d,\n start_q=self.start_q,\n max_p=self.max_p,\n max_d=self.max_d,\n max_q=self.max_q,\n start_P=self.start_P,\n D=self.D,\n start_Q=self.start_Q,\n max_P=self.max_P,\n max_D=self.max_D,\n max_Q=self.max_Q,\n max_order=self.max_order,\n m=self.m,\n seasonal=self.seasonal,\n stationary=self.stationary,\n information_criterion=self.information_criterion,\n alpha=self.alpha,\n test=self.test,\n seasonal_test=self.seasonal_test,\n stepwise=self.stepwise,\n n_jobs=self.n_jobs,\n start_params=self.start_params,\n trend=self.trend,\n method=self.method,\n maxiter=self.maxiter,\n offset_test_args=self.offset_test_args,\n seasonal_test_args=self.seasonal_test_args,\n suppress_warnings=self.suppress_warnings,\n error_action=self.error_action,\n trace=self.trace,\n random=self.random,\n random_state=self.random_state,\n n_fits=self.n_fits,\n out_of_sample_size=self.out_of_sample_size,\n scoring=self.scoring,\n scoring_args=self.scoring_args,\n with_intercept=self.with_intercept,\n return_conf_int=self.return_conf_int,\n dynamic=self.dynamic,\n regressor_cols=self.regressor_cols\n )\n\n # fits auto-arima\n if self.regressor_cols is None:\n reg_df = None\n else:\n reg_df = X[self.regressor_cols]\n self.model.fit(y=X[[value_col]], X=reg_df)\n\n return self", "title": "" }, { "docid": "2044c63add6304ec5fd90870560c6bbb", "score": "0.57298243", "text": "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n best_score, best_model = float(\"inf\"), None\n n_observations = self.X.shape[0]\n n_features = self.X.shape[1]\n \n for n_components in range(self.min_n_components, self.max_n_components + 1):\n model = None\n logL = None\n try:\n model = GaussianHMM(n_components=n_components, covariance_type='diag', n_iter=1000, random_state=self.random_state, verbose=False).fit(self.X, self.lengths)\n logL = model.score(self.X, self.lengths)\n n_parameters = n_components * (n_components - 1) + 2 * n_features * n_components\n aicValue = -2 * logL + 2 * n_parameters\n if aicValue < best_score:\n best_score, best_model = aicValue, model\n except Exception:\n break\n\n if best_model is not None:\n return best_model\n else:\n return self.base_model(self.n_constant)", "title": "" }, { "docid": "8b5a9be674149aaea7120733c2fd2ee0", "score": "0.57130283", "text": "def Select_model(model):\n\t# Get a list of all successfully built models from a.outputs\n\tModels = [x for x in model.outputs if x['failure'] is None]\n\t# Rank the models by DOPE score\n\tkey = 'DOPE score'\n\tif version_info[:2] == (2,3):\n\t # Python 2.3's sort doesn't have a 'key' argument\n\t Models.sort(lambda a,b: cmp(a[key], b[key]))\n\telse:\n\t Models.sort(key=lambda a: a[key])\n\t# Get top model\n\tbest_model = Models[0]\n\tbest_score = best_model[key]\n\tbest_name = best_model[\"name\"]\n\treturn(best_name, best_score)", "title": "" }, { "docid": "18b36d7d281cf1d6bfdd82fb8a358d64", "score": "0.5691423", "text": "def find_best_model(dict_models_scores):\n # sort by mean scores (dict values)\n # then select the last model (highest mean scores)\n best_model = {\n k: v for k, v in sorted(\n dict_models_scores.items(), key=lambda item: item[1]\n )\n }\n best_model = list(best_model.keys())[-1]\n model_name = str(best_model)[:15]\n\n print(f\"Best model: {model_name}\")\n\n return best_model", "title": "" }, { "docid": "e24bdb02a79156e408fda6350cd2891f", "score": "0.5689549", "text": "def evaluate_models(dataset, p_values, d_values, q_values):\n warnings.filterwarnings(\"ignore\")\n dataset = dataset.astype('float32')\n best_score, best_cfg = float(\"inf\"), None\n for p in p_values:\n for d in d_values:\n for q in q_values:\n order = (p,d,q)\n try:\n mse = evaluate_arima_model(dataset, order)\n if mse < best_score:\n best_score, best_cfg = mse, order\n #print('ARIMA%s MSE=%.3f' % (order,mse))\n except:\n continue\n print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score))", "title": "" }, { "docid": "3fb85b23af84bd9a488870ee4950f14f", "score": "0.5681769", "text": "def best(self):\n return self and self[0][0] or None", "title": "" }, { "docid": "6e3d11ed0c8f1d59ba6fdb5f7662f866", "score": "0.5671174", "text": "def best(self):\n return self.__best", "title": "" }, { "docid": "c39f1a8effedf21a76984e50dedd7e71", "score": "0.56626356", "text": "def best(self):\n raise NotImplementedError()", "title": "" }, { "docid": "1df5f7d13f8d91266632a11adc2c16ee", "score": "0.5656058", "text": "def best(self):\r\n\r\n # print(x for x in self)\r\n results = sorted([x for x in self],\r\n key=lambda x: -x['evaluation']['test']['f1'])\r\n\r\n if results:\r\n return results[0]\r\n return None", "title": "" }, { "docid": "7f6d0fb6edad8fbd903936af9130d119", "score": "0.5645036", "text": "def get_high_model(self):\n return self.high_model", "title": "" }, { "docid": "43805141e018287dc51c2f3401635d43", "score": "0.5640113", "text": "def get_best_epa(self):\n\n sorted_list = copy.deepcopy(self.initial_population)\n sorted_list.sort(key=lambda x: x.epa, reverse=False)\n return sorted_list[0].epa", "title": "" }, { "docid": "265811dc384b77355cc06668be4514ac", "score": "0.5636188", "text": "def best(cls):\n return cls", "title": "" }, { "docid": "dde48caec2bdd18752a65d3fe2b92d18", "score": "0.5635722", "text": "def get_best(self):\n\n return self.all_time_best['x']", "title": "" }, { "docid": "d436c043f4cce5a77785bd4a3332d4ae", "score": "0.56301963", "text": "def get_best_learner(self):\n\t\tk, mape_knn = self.bestKNN()\n\n\t\tn, mape_bag = self.bestBagLearner()\n\n\t\tmss, mape_tree = self.bestTree()\n\n\t\tn_trees, mape_forest = self.bestForest()\n\n\t\tbest = \"knn\"\n\t\tbest_mape = mape_knn\n\n\t\tif mape_bag < best_mape:\n\t\t\tbest = \"bag\"\n\t\t\tbest_mape = mape_bag\n\n\t\tif mape_tree < best_mape:\n\t\t\tbest = \"tree\"\n\t\t\tbest_mape = mape_tree\n\n\t\tif mape_forest < best_mape:\n\t\t\tbest = \"forest\"\n\t\t\tbest_mape = mape_tree\n\n\t\tif best == \"knn\":\n\t\t\tself.best_model = KNNLearner(k = k)\n\t\t\tself.best = {\"model\":best, \"k\":k, \"mape on validation\": best_mape}\n\n\t\telif best == \"bag\":\n\t\t\tself.best_model = BagLearner(learner=KNNLearner, kwargs={\"k\":k})\n\t\t\tself.best = {\"model\":best, \"n_learners\":n, \"k\":k, \"mape on validation\": best_mape}\n\n\t\telif best == \"tree\":\n\t\t\tself.best_model = DecisionTreeRegressor(min_samples_split=mss, random_state=817)\n\t\t\tself.best = {\"model\":best, \"min_samples_split\":mss, \"mape on validation\": best_mape}\n\n\t\telse:\n\t\t\tself.best_model = RandomForestRegressor(n_estimators=n_trees, random_state=817)\n\t\t\tself.best = {\"model\":best, \"n_trees\":n_trees, \"min_samples_split\":mss, \"mape on validation\": best_mape}\n\n\t\tself.best_model.fit(self.trainX_normed, self.trainY)", "title": "" }, { "docid": "068eeffaf39b5ac2422df82e31a6d2f3", "score": "0.56292367", "text": "def get_best_model(cls, log_dir, mode='train'):\n results_dict = cls.load_results_dict(log_dir, mode)\n\n # models are ordered by validation accuracy; choose first one.\n best_model_name = next(iter(results_dict))\n log_dir_best_model = os.path.join(log_dir, best_model_name)\n ckpt_filename = cls.get_ckpt_from_log_dir_model(log_dir_best_model)\n\n return best_model_name, ckpt_filename", "title": "" }, { "docid": "068eeffaf39b5ac2422df82e31a6d2f3", "score": "0.56292367", "text": "def get_best_model(cls, log_dir, mode='train'):\n results_dict = cls.load_results_dict(log_dir, mode)\n\n # models are ordered by validation accuracy; choose first one.\n best_model_name = next(iter(results_dict))\n log_dir_best_model = os.path.join(log_dir, best_model_name)\n ckpt_filename = cls.get_ckpt_from_log_dir_model(log_dir_best_model)\n\n return best_model_name, ckpt_filename", "title": "" }, { "docid": "ac36e9d6dd8ce71ce257ff04fd442f4a", "score": "0.55999243", "text": "def getBestFit(self, par):\n \n idx = self.getParameterIndex(par)\n \n return self.best_fits[idx]", "title": "" }, { "docid": "b3551484bc88b415670d4d60e9ebdca7", "score": "0.55981547", "text": "def best_model(self, train, valid=None, return_promising=False):\n raise NotImplementedError('implement me')", "title": "" }, { "docid": "dadd2d153ad7d9db8226cb576d49bed9", "score": "0.5581197", "text": "def load_best_model(history, path_to_cp):\n #which one is the best epoch?\n best_epoch = str(history['val_acc'].index(max(history['val_acc'])) +1).zfill(3)\n print('best epoch: ', best_epoch)\n cp_path = ''.join(string for string in [path_to_cp, '/weights-improvement-', best_epoch, '*.hdf5'])\n cp_path = glob.glob(cp_path)[0]\n model = load_model(cp_path)\n return model, cp_path", "title": "" }, { "docid": "156235d9d6c0559b5fb616201cef9635", "score": "0.55654925", "text": "def best_result(results):\n return results[0]", "title": "" }, { "docid": "2aeea559cb7f8cb54faa1348760a3ab8", "score": "0.55565846", "text": "def get_best_exp_from_BO(bo):\n Y_pred = bo.model.predict(bo.X)\n return bo.X[np.argmin(Y_pred[0])]", "title": "" }, { "docid": "936037143fe83abb222c708d01b3f435", "score": "0.5553913", "text": "def return_best_pipeline(self):\n best_pipeline_name = self.best_pipeline[\"model_name\"]\n # return grid search object with pipeline\n return self.modeling_results[best_pipeline_name][\"gs_obj\"]", "title": "" }, { "docid": "1dad49855703ae55b5211dfcda1f8c83", "score": "0.5550752", "text": "def modelling_ARIMA(df, name):\n data_close = df[f'CLOSE_{name}']\n b, a = signal.butter(3, 1/10)\n filtrd_data_close = signal.filtfilt(b, a, data_close)\n df2 = pd.DataFrame({\"X\":data_close.to_numpy(),\"Xf\": filtrd_data_close},index=df.index)\n dr = df2.index\n realidad = df2.loc[dr[:22808]]\n futuro = df2.loc[dr[22808:]]\n predictions_ARIMA = dict()\n\n for col in realidad.columns:\n train = realidad[col]\n test = futuro[col]\n\n # Entrena el modelo ARIMA\n model_ARIMA = ARIMA(train, order=(0,0,1))\n print(f\"Entrenando con los datos desde la serie {col}\")\n model_fit_ARIMA = model_ARIMA.fit(maxlag=4)\n \n # Predice los valores ARIMA\n predictions_ARIMA[f'{col}_prediction'] = model_fit_ARIMA.predict(start=len(train),\n end=len(train)+len(test)-1, dynamic=False)\n \n pred_ARIMA = pd.DataFrame(predictions_ARIMA)\n pred_ARIMA.index = futuro.index\n\n ARIMA_predictions = pd.DataFrame({\n \"GT\":futuro.X,\n \"X\":pred_ARIMA.X_prediction,\n \"Xf\":pred_ARIMA.Xf_prediction,\n \"diff_X\": futuro.X - pred_ARIMA.X_prediction,\n \"diff_Xf\":futuro.X - pred_ARIMA.Xf_prediction},index=futuro.index)\n\n return ARIMA_predictions", "title": "" }, { "docid": "749a9787e902204834fb96ebb6facd35", "score": "0.55499136", "text": "def load_model(name):\n tmp = np.load(os.path.join('model_weights','{0}_best_model.npz'.format(name)))\n return tmp['res'].item()", "title": "" }, { "docid": "0c2d106df8904cc4d85cf7672c9f8ba9", "score": "0.55480444", "text": "def fit(self, y, X=None, **fit_args):\n sarimax_kwargs = {} if not self.kwargs else self.kwargs\n\n self.model_ = auto_arima(\n y,\n X=X,\n start_p=self.start_p,\n d=self.d,\n start_q=self.start_q,\n max_p=self.max_p,\n max_d=self.max_d,\n max_q=self.max_q,\n start_P=self.start_P,\n D=self.D,\n start_Q=self.start_Q,\n max_P=self.max_P,\n max_D=self.max_D,\n max_Q=self.max_Q,\n max_order=self.max_order,\n m=self.m,\n seasonal=self.seasonal,\n stationary=self.stationary,\n information_criterion=self.information_criterion,\n alpha=self.alpha,\n test=self.test,\n seasonal_test=self.seasonal_test,\n stepwise=self.stepwise,\n n_jobs=self.n_jobs,\n start_params=self.start_params,\n trend=self.trend,\n method=self.method,\n maxiter=self.maxiter,\n offset_test_args=self.offset_test_args,\n seasonal_test_args=self.seasonal_test_args,\n suppress_warnings=self.suppress_warnings,\n error_action=self.error_action,\n trace=self.trace,\n random=self.random,\n random_state=self.random_state,\n n_fits=self.n_fits,\n return_valid_fits=False, # only return ONE\n out_of_sample_size=self.out_of_sample_size,\n scoring=self.scoring,\n scoring_args=self.scoring_args,\n with_intercept=self.with_intercept,\n sarimax_kwargs=sarimax_kwargs,\n **fit_args)\n\n return self", "title": "" }, { "docid": "13cadfa1899917b467bebe751817680f", "score": "0.55405414", "text": "def load_best_model(model_name):\n # Loading BaseModel\n name = model_name\n basemodel = BaseModel(model_name[:-1])\n model = basemodel.load_model(False)\n model = basemodel.adding_toplayer(model, name)\n return model", "title": "" }, { "docid": "6fafbaa534d60f0ff54bb4449fd443f3", "score": "0.5537722", "text": "def get_optim(self):\n # just return the first model, since all replicas are the same\n return self.call_async(0, '_async_get_optim').gen()", "title": "" }, { "docid": "be3b8965f37497276f508ad2920fde76", "score": "0.55295825", "text": "def model(self):\n return self.optimizer.models[-1]", "title": "" }, { "docid": "3e1d1f1d0e7fdcfd28bab17a17c8298b", "score": "0.5528059", "text": "def get_best_model(self, scores_target_list):\n max_roc_auc = 0\n max_pr_auc = 0\n best_model = scores_target_list[0][2]\n best_roc = 0\n for y_score, y_true, model in scores_target_list:\n auroc = average_precision_score(y_true, y_score)\n if auroc >= best_roc:\n best_roc = auroc\n best_model = model\n\n return best_model", "title": "" }, { "docid": "863716c1afc2f0798929d22f8b679878", "score": "0.5516804", "text": "def _estimate_model(self):\n ###Lars Algorithm\n if self.solver == \"Lars\":\n self.underlying = linear_model.LassoLars(fit_intercept=self.intercept, normalize=False)\n if self.cv_folds is 'IC': #For AIC/BIC. criterion kwarg should be provided. \n model = linear_model.LassoLarsIC(fit_intercept=self.intercept, normalize=False, **self.kwargs)\n elif self.cv_folds is not None:\n model = linear_model.LassoLarsCV(fit_intercept=self.intercept, cv=self.cv_folds, normalize=False, **self.kwargs)\n else:\n model = linear_model.Lasso(fit_intercept=self.intercept, **self.kwargs)\n ###Coordinate Descent Algorithm\n elif self.solver == \"Coordinate Descent\":\n self.underlying = linear_model.Lasso(fit_intercept=self.intercept)\n if self.cv_folds is not None: \n model = linear_model.LassoCV(fit_intercept=self.intercept, cv=self.cv_folds, **self.kwargs)\n else:\n model = linear_model.Lasso(fit_intercept=self.intercept, **self.kwargs)\n else:\n raise NotImplementedError('Solver not implemented. Choices are Lars or Coordinate Descent.')\n #self.model.fit(np.asanyarray(self.x_train.values,order='F'), self.y_train)\n model.fit(self.x_train, self.y_train)\n return model", "title": "" }, { "docid": "e578aa08d8b37de0f651ca428ec2a693", "score": "0.5512007", "text": "def load_bestfit_model():\n # the model file\n model = os.path.join(os.environ['RSDFIT'], 'data', 'models', 'model_nseries.npy')\n\n # the directory of box 1\n d = os.path.join(os.environ['RSDFIT_FITS'], 'periodic', 'ChallengeBoxN', 'box1_xlos')\n d = os.path.join(d, 'poles', 'nlopt_gausscov_base_kmax04')\n\n # the bestfit values\n fits = load_fits()\n print(\"taking the mean of %d fits...\" %len(fits))\n\n driver = FittingDriver.from_directory(d, model_file=model)\n theta = numpy.array([fits[name].mean() for name in driver.theory.free_names])\n driver.theory.set_free_parameters(theta)\n\n return driver.theory.model", "title": "" }, { "docid": "4d48a438d9c02b32ea2651ce7d8cab73", "score": "0.5511122", "text": "def ar_model_fitting(training, column_name, max_ar=5):\n ar_models = []\n for lag in range(1, max_ar + 1):\n\n ar_fit = fit_arma_model(lag, training)\n\n betas = [ar_fit.params[k] for k in range(1, lag + 1)]\n const = ar_fit.params[0]\n ar_models.append((lag, betas, const, column_name))\n\n logger.success(f\"Finished ar_model_fitting for {column_name}.\")\n return ar_models", "title": "" }, { "docid": "5ee73d33b45e3ce0a3890cdd6bc08ded", "score": "0.5495614", "text": "def get_best_model_name(exp_path, autoencoder=False):\n\n # relevant path\n if(autoencoder==False):\n models_path = os.path.join(exp_path, \"models\")\n logs_path = os.path.join(exp_path, \"training_logs.json\")#\n else:\n models_path = os.path.join(exp_path, \"models\")\n models_path = os.path.join(models_path, \"autoencoder\")\n logs_path = os.path.join(exp_path, \"autoencoder_logs.json\")\n\n # loading autoencoder_logs\n if(not os.path.exists(logs_path)):\n print(\"ERROR! Training logs do not exist...\")\n exit()\n with open(logs_path) as file:\n logs = json.load(file)\n\n if(autoencoder == False):\n valid_loss = logs[\"loss\"][\"valid\"]\n else:\n valid_loss = logs[\"valid_loss\"]\n\n # obtaining the model with the smallest validation löo\n models = os.listdir(models_path)\n min_epoch = 0\n min_loss = 1e8\n model_name = \"\"\n for model in models:\n if(\"model_\" not in model and \"autoencoder_\" not in model):\n continue\n if(model.split(\"_\")[-1] == \"trained\"):\n epoch = -1\n else:\n epoch = int(model.split(\"_\")[-1])\n loss = valid_loss[epoch]\n if(loss < min_loss):\n min_loss = loss\n min_epoch = epoch\n model_name = model\n\n print(f\"Loading model: {model_name} with validation loss {min_loss}\")\n\n return model_name", "title": "" }, { "docid": "8c9630363d94574d077ffe74cf242306", "score": "0.5472011", "text": "def best(self):\n if self.iList:\n return self.iList[self.kBest]", "title": "" }, { "docid": "3c863ad8a8b146709f19ddf61a92ec96", "score": "0.54641503", "text": "def getBestParam(Acc_var):\n max_counts = ([list(Acc_var[\"n_estimator\"]).count(x) for x in\n list(set(Acc_var[\"n_estimator\"]))])\n max_est = ([x for x in list(set(Acc_var[\"n_estimator\"])) if\n list(Acc_var[\"n_estimator\"]).count(x) == max(max_counts)])\n if len(max_est) == 1:\n best_nEst = max_est[0]\n acc = max(Acc_var[Acc_var[\"n_estimator\"] == best_nEst][\"Accuracy\"])\n else:\n acc = 0\n for est in max_est:\n if acc < max(Acc_var[Acc_var[\"n_estimator\"] == est][\"Accuracy\"]):\n acc = max(Acc_var[Acc_var[\"n_estimator\"] == est][\"Accuracy\"])\n best_nEst = est\n maxf = list(Acc_var[(Acc_var[\"n_estimator\"] == best_nEst) &\n (Acc_var[\"Accuracy\"] == acc)][\"max_features\"])[0]\n maxd = list(Acc_var[(Acc_var[\"n_estimator\"] == best_nEst) &\n (Acc_var[\"Accuracy\"] == acc)][\"max_depth\"])[0]\n crit = list(Acc_var[(Acc_var[\"n_estimator\"] == best_nEst) &\n (Acc_var[\"Accuracy\"] == acc)][\"criterion\"])[0]\n return [best_nEst, maxf, maxd, crit]", "title": "" }, { "docid": "5ab80a8d7200005c9062f974397664bb", "score": "0.5455693", "text": "def get_model(self, index):\n\n stateDim, actionDim = spaceInfo.getSpaceInfo(self.mdp)\n modelConfig = self.config['regressors'][index]\n\n fitActions = False\n if 'fitActions' in modelConfig:\n fitActions = modelConfig['fitActions']\n\n if modelConfig['modelName'] == 'ExtraTree':\n model = ExtraTreesRegressor\n params = {'n_estimators': modelConfig['nEstimators'],\n 'criterion': self.config[\"regressors\"][index]['supervisedAlgorithm']\n ['criterion'],\n 'min_samples_split': modelConfig['minSamplesSplit'],\n 'min_samples_leaf': modelConfig['minSamplesLeaf']}\n elif modelConfig['modelName'] == 'ExtraTreeEnsemble':\n model = ExtraTreeEnsemble\n params = {'nEstimators': modelConfig['nEstimators'],\n 'criterion': self.config[\"regressors\"][index]['supervisedAlgorithm']\n ['criterion'],\n 'minSamplesSplit': modelConfig['minSamplesSplit'],\n 'minSamplesLeaf': modelConfig['minSamplesLeaf']}\n elif modelConfig['modelName'] == 'MLP':\n model = MLP\n params = {'nInput': stateDim,\n 'nOutput': 1,\n 'hiddenNeurons': modelConfig['nHiddenNeurons'],\n 'nLayers': modelConfig['nLayers'],\n 'optimizer': modelConfig['optimizer'],\n 'activation': modelConfig['activation']}\n if fitActions:\n params[\"nInput\"] = stateDim + actionDim\n elif modelConfig['modelName'] == 'MLPEnsemble':\n model = MLPEnsemble\n params = {'nInput': stateDim,\n 'nOutput': 1,\n 'hiddenNeurons': modelConfig['nHiddenNeurons'],\n 'nLayers': modelConfig['nLayers'],\n 'optimizer': modelConfig['optimizer'],\n 'activation': modelConfig['activation']}\n if fitActions:\n params[\"nInput\"] = stateDim + actionDim\n elif modelConfig['modelName'] == 'Linear':\n model = LinearRegression\n params = {}\n elif modelConfig['modelName'] == 'LinearEnsemble':\n model = LinearEnsemble\n params = {}\n else:\n raise ValueError('Unknown estimator type.')\n\n if fitActions:\n return model(**params)\n else:\n if isinstance(self.mdp.action_space, spaces.Box):\n warnings.warn(\"Action Regressor cannot be used for continuous \"\n \"action environment. Single regressor will be \"\n \"used.\")\n return model(**params)\n return ActionRegressor(model,\n self.mdp.action_space.values, decimals=5,\n **params)", "title": "" }, { "docid": "64feca32e435acca855266024fd83353", "score": "0.5453234", "text": "def findbest(result):\r\n # calculate rmse\r\n colname = list(result.columns)[1:]\r\n result_rmse = dict.fromkeys(colname)\r\n\r\n for i in colname:\r\n result_rmse[i] = int(sqrt(mean_squared_error(result['y'][-6:],result[i][-6:])))\r\n result_rmse = dict(sorted(result_rmse.items(), key = lambda x: x[1]))\r\n\r\n try:\r\n for i in list(models.keys()):\r\n if i == None:\r\n del result_rmse[i]\r\n except:\r\n pass\r\n\r\n if list(result_rmse.keys())[0] == 'setA_rwalk' and list(result_rmse.keys())[1] == 'setA_rwalk_seasonal':\r\n result_select = result[['y',list(result_rmse.keys())[1]]]\r\n else:\r\n result_select = result[['y',list(result_rmse.keys())[0]]]\r\n \r\n #plot the best predictions for validation set\r\n plt.figure(figsize=(11,7))\r\n plt.plot(result_select['y'], label='True', marker='.', color='b')\r\n plt.plot(result_select[list(result_select.keys())[1]][-6:], label=list(result_select.keys())[1], marker='.', color='r')\r\n plt.legend(loc='best')\r\n plt.show()\r\n return result_rmse,result_select", "title": "" }, { "docid": "88e7b75ef4391ad748ce5745218d63c8", "score": "0.5452956", "text": "def get_forecast_data(x,exogenous_forecast,training_data,exogenous_train):\n\n arima_model = ARIMA_model(training_data,exogenous_train,(1,0,0))\n forecast_data = arima_model.forecast( exog=exogenous_forecast[x])[0]\n return forecast_data", "title": "" }, { "docid": "7d17075ffc62357ae214b9dbfbb9e748", "score": "0.54504037", "text": "def best(self):\n return self.bestN(1)", "title": "" }, { "docid": "5e4769a6abae02413baca5a4547c7784", "score": "0.54414386", "text": "def best_x(self):\n self._best_x = self.optimiser.X[np.argmin(self.optimiser.model.predict(self.optimiser.X, with_noise=False)[0])]\n return self._best_x", "title": "" }, { "docid": "5185e99222db2f79170b7fa8ca91630b", "score": "0.54313934", "text": "def getBestScore(models, scores, bestScore='max'):\n if bestScore == 'max':\n return np.max(scores)\n elif bestScore == 'min':\n return np.min(scores)\n else:\n print('Please choose \"max\" or \"min\" for bestScore parameter')", "title": "" }, { "docid": "aa369f2205debc8608d8918d96a4d4d5", "score": "0.5397251", "text": "def best(self):\n return self.population[0]", "title": "" }, { "docid": "8a41c8122b56325be81d3841e8148676", "score": "0.538141", "text": "def get_best_value(self, metric):\n try:\n if \"loss\" in metric:\n return min(self.aggregate[metric][\"mean\"])\n elif \"acc\" in metric:\n return max(self.aggregate[metric][\"mean\"])\n else:\n raise NotImplementedError\n except KeyError:\n raise KeyError(\n \"Metric {0:s} not available for testproblem {1:s} of this setting\"\n .format(metric, self.aggregate[\"testproblem\"]))", "title": "" }, { "docid": "80272739cf5a5e486cb5e70e3f3952e7", "score": "0.53802466", "text": "def search_best(saved_model):\n # Get the augmentor\n augmenter = get_augmenter()\n augmenter._setup()\n\n # Load in the dataset\n dataset_loader = get_dataset(raw=True)\n\n # Get train, validation split\n _, valid = dataset_loader.get_dataset()\n\n # Load a saved model\n fcn = get_ensemble_predictors(saved_model)\n\n # Loop through\n results = []\n masks = []\n\n for i, (img, mask, _id) in enumerate(zip(valid['x'], valid['y'], valid['id'])):\n\n # Apply normalization\n img, mask = augmenter.apply_normalization(img, mask)\n\n # Run through network\n pred = fcn(img.squeeze()).squeeze()\n\n # Collect all results\n results.append(pred)\n masks.append(dataset_loader._df['masks_org'][_id].squeeze() == 255)\n\n # Convert to arrays\n results = np.array(results)\n masks = np.array(masks).astype(np.int32)\n\n # Check threshold\n thresholds = np.linspace(0, 1, 50).tolist()\n ious = [iou_metric_batch(masks, np.int32(results > threshold)) for threshold in thresholds]\n\n best_iou = max(ious)\n best_t = thresholds[ious.index(best_iou)]\n print('Best threshold: %1.2f with iout of %1.2f' % (best_t, best_iou))\n\n return best_t", "title": "" }, { "docid": "be1717c3f43e2e02cbcfd2219acf3357", "score": "0.5365599", "text": "def trainGetMAEandmlModel(self):\n\n queryset = self.model.objects.all()\n df = self.querySetToPDdataframe(queryset)\n\n y_var = list(FotocasaHouse.objects.values_list('price', flat=True))\n X_var = df\n X_train, X_test, y_train, y_test = train_test_split(X_var, y_var, test_size=0.2, random_state=0)\n mlmodel = XGBRegressor(n_estimators=500)\n mlmodel.fit(X_train, y_train, verbose=False, early_stopping_rounds=5,\n eval_set=[(X_test, y_test)])\n\n predictions = mlmodel.predict(X_test)\n\n mae = mean_absolute_error(predictions, y_test)\n\n return mae, mlmodel", "title": "" }, { "docid": "9e8081c81d0251544482953f0ef3cd5e", "score": "0.5359651", "text": "def _update_best_model(self):\n raise NotImplementedError()", "title": "" }, { "docid": "20abbe37ef9527053fc1e66641491de7", "score": "0.5349148", "text": "def get_model(self):\n # just return the first model, since all replicas are the same\n return self.call_async(0, '_async_get_model').gen()", "title": "" }, { "docid": "bc4d8180a34b4d8e9e66a39e646c48ff", "score": "0.5346548", "text": "def get_best_model(client=None,\n project_key=None,\n analysis_id=None,\n ml_task_id=None,\n metric=None):\n prj = client.get_project(project_key)\n analysis = prj.get_analysis(analysis_id)\n ml_task = analysis.get_ml_task(ml_task_id)\n trained_models = ml_task.get_trained_models_ids()\n trained_models_snippets = [ml_task.get_trained_model_snippet(m) for m in trained_models]\n # Assumes that for your metric, \"higher is better\"\n best_model_snippet = max(trained_models_snippets, key=lambda x:x[metric])\n best_model_id = best_model_snippet[\"fullModelId\"]\n return ml_task, best_model_id", "title": "" }, { "docid": "84e9f8f479ac4dea73fd4c8c63aafab6", "score": "0.53372157", "text": "def get_best(self, label, phase, metric):\n pass", "title": "" }, { "docid": "82b5231ab24e37a13c310688078e933f", "score": "0.53363526", "text": "def extract_best_pipeline_from_the_best_models(best_pipelines): \n best_model_pipeline = None\n score_name = best_pipelines[0].performance_metric_name\n # Value to decide if the score should be maximized or minimized\n comp_value = 1\n # If the score name ends with _error or _loss, then it should be\n # minimized. See https://scikit-learn.org/stable/modules/model_evaluation.html\n if score_name.endswith('_error') or score_name.endswith('_loss'):\n comp_value = -1\n\n best_score = -1*comp_value*np.inf\n\n for model_idx in range(len(best_pipelines)):\n #The best model is selected accordingly to the respective score\n if comp_value*best_pipelines[model_idx].performance_metric_value > comp_value*best_score:\n best_model_pipeline = deepcopy(best_pipelines[model_idx])\n best_score = best_model_pipeline.performance_metric_value\n return best_model_pipeline", "title": "" }, { "docid": "545f37c528a5aa3049c69332a7ceab9e", "score": "0.53340983", "text": "def __init__(self,\n p=2,\n q=2,\n seasonal=True,\n P=1,\n Q=1,\n m=7,\n metric='mse',\n metric_mode=None,\n logs_dir=\"/tmp/auto_arima_logs\",\n cpus_per_trial=1,\n name=\"auto_arima\",\n remote_dir=None,\n load_dir=None,\n **arima_config\n ):\n if load_dir:\n self.best_model = ARIMAModel()\n self.best_model.restore(load_dir)\n try:\n from bigdl.orca.automl.auto_estimator import AutoEstimator\n self.search_space = {\n \"p\": p,\n \"q\": q,\n \"seasonal\": seasonal,\n \"P\": P,\n \"Q\": Q,\n \"m\": m,\n }\n self.metric = metric\n self.metric_mode = metric_mode\n model_builder = ARIMABuilder()\n self.auto_est = AutoEstimator(model_builder=model_builder,\n logs_dir=logs_dir,\n resources_per_trial={\n \"cpu\": cpus_per_trial},\n remote_dir=remote_dir,\n name=name)\n except ImportError:\n warnings.warn(\"You need to install `bigdl-orca[automl]` to use `fit` function.\")", "title": "" }, { "docid": "066db604967f9b7ad760319acaa731ee", "score": "0.53304225", "text": "def _get_model_for_prediction(self, model: Optional[Union[str, AbstractTimeSeriesModel]] = None) -> str:\n if model is None:\n if self.model_best is None:\n best_model_name: str = self.get_model_best()\n self.model_best = best_model_name\n logger.info(\n f\"Model not specified in predict, will default to the model with the \"\n f\"best validation score: {self.model_best}\",\n )\n return self.model_best\n else:\n if isinstance(model, AbstractTimeSeriesModel):\n return model.name\n else:\n return model", "title": "" }, { "docid": "2cd4503d467507b44ced18f18e7439ab", "score": "0.5328805", "text": "def predict_sarimax_model(x_train,y_train,x_test,y_test,order,seasonal_order,feats_to_use=None,round_predictions=False,plot_results=True):\n \n #ipdb.set_trace()\n \n predictions = pd.Series()\n y_train_history = y_train.copy()\n \n if feats_to_use is None:\n feats_to_use = x_train.columns\n \n x_train_history = x_train[feats_to_use].copy()\n for t in pd.DataFrame(y_test).iterrows():\n model = SARIMAX(endog=y_train_history,exog=x_train_history[feats_to_use],order=order,seasonal_order=seasonal_order,\n enforce_stationarity=False,enforce_invertibility=False)\n \n model_fit = model.fit(disp=0)\n output = model_fit.forecast(exog=pd.DataFrame(x_test[feats_to_use].loc[t[0],:]).T)\n \n if output.iloc[0]< y_train.min() :\n yhat = y_train.min()\n elif output.iloc[0]> y_train.max() :\n yhat =y_train.max()\n else :\n if round_predictions:\n yhat = round(output.iloc[0],0)\n else:\n yhat = output.iloc[0]\n \n \n if round_predictions:\n predictions.loc[t[0]] = round(yhat,0)\n else:\n predictions.loc[t[0]] = yhat\n \n y_train_history.loc[t[0]] = t[1].values[0]\n x_train_history.loc[t[0],:] = x_test[feats_to_use].loc[t[0],:]\n #x_train_history = pd.concat([x_train_history[feats_to_use],\n # pd.DataFrame(x_test[feats_to_use].loc[t[0],:]).T],axis=0)\n #print(\"Period: \",t[0],'predicted=%f, expected=%f' % (yhat, t[1].values[0]))\n #print(\"Period: \",t[0],'predicted=%f, expected=%f' % (yhat, t[1].values[0]))\n \n try:\n \n if plot_results:\n mse_error = np.sqrt(mean_squared_error(y_test, predictions))\n mae_error = mean_absolute_error(y_test, predictions)\n mape_error = mean_absolute_percentage_error(y_test.values, predictions.values)\n \n print('Test RMSE: %.3f' % mse_error)\n print('Test MAE: %.3f' % mae_error)\n print('Test MAPE: %.3f' % mape_error)\n # plot\n predictions = pd.DataFrame(data=y_test.values,index=y_test.index,columns=['actual']).merge(pd.DataFrame(data=predictions.values,\n index=predictions.index,columns=['predictions']),\n left_index=True,right_index=True)\n predictions.plot()\n plt.show()\n print(model_fit.summary())\n \n \n except Exception as e:\n print(e)\n \n return model_fit,predictions", "title": "" }, { "docid": "4caaed738264d9af73ada2bd55870059", "score": "0.5322474", "text": "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n selected_model = None\n #Model selection: The lower the AIC/BIC value the better the model\n selected_score = float('inf')\n\n for num_states in range(self.min_n_components, self.max_n_components + 1):\n try:\n\n model = self.base_model(num_states)\n logL = model.score(self.X, self.lengths)\n\n #calculate the score\n\n #p is the number of parameters\n #parameters = n * n + 2 * n * d - 1 where d is number of features\n p = num_states * num_states + 2 * num_states * len(self.X[0]) - 1\n #N is the number of data points\n num_of_data = len(self.X)\n logN = np.log(num_of_data)\n score = -2 * logL + p * logN\n\n if score < selected_score:\n selected_model = model\n selected_score = score\n\n except Exception:\n pass\n\n return selected_model", "title": "" }, { "docid": "f8b7d1e85d3d80aad57ede051cdc4555", "score": "0.532233", "text": "def best_value(self):\n return self._best_value", "title": "" }, { "docid": "0e2af80c4e77d914d171fdba0b52c0ea", "score": "0.5317436", "text": "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n # initialize variables with score being the largest possible number\n best_score= float(\"inf\")\n best_model= None\n\n # get other attributes we need for BIC calculation\n num_features= len(self.X[0])\n N= np.sum(self.lengths)\n logN= np.log(N)\n\n for component_num in range(self.min_n_components, self.max_n_components + 1):\n try:\n\n # get model and log likelihood\n model= self.base_model(component_num) #GaussianHMM\n logL= model.score(self.X, self.lengths)\n\n # calculate parameters\n p= (component_num**2) + 2 * num_features * component_num - 1\n\n # calculate BIC\n score= -2 * logL + p * logN\n\n # update score and model that generated score\n if score < best_score:\n best_score= score\n best_model= model\n\n except:\n #print(\"failure on {} @ {}\".format(self.this_word, component_num))\n pass\n\n return best_model", "title": "" }, { "docid": "67709c49e0b95817ceb4a404aff35183", "score": "0.53158575", "text": "def best_param(model, data, pdq, pdqs):\n ans = []\n for comb in tqdm(pdq):\n for combs in tqdm(pdqs):\n try:\n mod = model(data,\n order=comb,\n seasonal_order=combs,\n enforce_stationarity=False,\n enforce_invertibility=False,\n freq='D')\n\n output = mod.fit()\n ans.append([comb, combs, output.aic])\n #print(ans.head(2))\n except:\n continue\n\n ans_df = pd.DataFrame(ans, columns=['pdq', 'pdqs', 'aic'])\n ans_df['pdq'] = ans_df['pdq'].astype(float)\n ans_df['pdqs'] = ans_df['pdqs'].astype(float)\n ans_df['aic'] = ans_df['aic'].astype(float)\n return #ans_df.loc[ans_df.aic.idxmin()]", "title": "" }, { "docid": "cd76f241ce430fb806c84e87971776bc", "score": "0.5314306", "text": "def get_best_value(self):\n return self.get_fitness(self.get_best_agent())", "title": "" } ]
ac4a70a179548769a54b4e8bcda2a33e
max Informationbased Nonparametric Exploration.
[ { "docid": "e66a409de6378bd34be549f1e3ec9e89", "score": "0.0", "text": "def mic_test(self, inputs):\n # init\n mic_matrix = np.full(\n (self.num_features, self.num_features), np.nan)\n sign_matrix = np.zeros(\n (self.num_features, self.num_features))\n\n # mic & sign matrix\n for i in range(self.num_features):\n for j in range(self.num_features):\n if i != j:\n mine = MINE(alpha=0.6, c=15)\n mine.compute_score(inputs[:, i], inputs[:, j])\n mic = mine.mic()\n\n mic_matrix[i, j] = mic\n\n if mic > self.mic_thresold:\n sign_matrix[i, j] = 1\n\n return mic_matrix, sign_matrix", "title": "" } ]
[ { "docid": "b1786dfbdc9d9be4ed12f80d58b1acee", "score": "0.6003945", "text": "def _argmax(self, params, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "ceef1e74a0096d55b64ef192415892ef", "score": "0.598692", "text": "def max(input, dim):\n pass", "title": "" }, { "docid": "d0938d34608e81f2d82788c37c9e3532", "score": "0.593062", "text": "def get_posterior_statistics(self, N=None):\n N = 2\n if N is None:\n i = self.posterior_sample[:, -1].argmax()\n pars = self.posterior_sample[i, :]\n else:\n mask = self.posterior_sample[:, self.index_component]==N\n self.mask = mask\n i = self.posterior_sample[mask, -1].argmax()\n pars = self.posterior_sample[mask][i, :]\n\n print 'maximum likelihood '\n print pars[:5]\n print pars[pars != 0]\n\n sort_periods = False\n if sort_periods:\n # sort the periods (this works quite well with 2 planets...)\n periods = np.exp(self.Tall)\n amplitudes = self.Aall\n eccentricities = self.Eall\n sorted_periods = apply_argsort(periods, periods, axis=1)\n sorted_amplitudes = apply_argsort(periods, amplitudes, axis=1)\n sorted_eccentricities = apply_argsort(periods, eccentricities, axis=1)\n\n P1, P2 = sorted_periods.T\n K1, K2 = sorted_amplitudes.T\n e1, e2 = sorted_eccentricities.T\n assert P1.shape == P2.shape\n\n if N == 2:\n periods = np.exp(res.Tall[mask,:2])\n amplitudes = self.Aall[mask, :2]\n eccentricities = self.Eall[mask, :2]\n\n sorted_periods = apply_argsort(periods, periods, axis=1)\n sorted_amplitudes = apply_argsort(periods, amplitudes, axis=1)\n sorted_eccentricities = apply_argsort(periods, eccentricities, axis=1)\n\n P1, P2 = sorted_periods.T\n K1, K2 = sorted_amplitudes.T\n e1, e2 = sorted_eccentricities.T\n else:\n pass\n\n print \n print 'medians:'\n print\n\n a = '$%7.5f\\,^{+\\,%7.5f}_{-\\,%7.5f}$' % percentile68_ranges(P1)\n b = ' & $%4.3f$' % P1.std()\n print '%-40s' % a, b\n\n a, b = '$%3.2f\\,^{+\\,%3.2f}_{-\\,%3.2f}$' % percentile68_ranges(K1), ' & $%4.3f$' % K1.std()\n print '%-40s' % a, b\n \n a, b = '$%4.3f\\,^{+\\,%4.3f}_{-\\,%4.3f}$' % percentile68_ranges(e1), ' & $%4.3f$' % e1.std()\n print '%-40s' % a, b\n\n a, b = '$%7.5f\\,^{+\\,%7.5f}_{-\\,%7.5f}$' % percentile68_ranges(P2), ' & $%4.3f$' % P2.std()\n print '%-40s' % a, b\n\n a, b = '$%3.2f\\,^{+\\,%3.2f}_{-\\,%3.2f}$' % percentile68_ranges(K2), ' & $%4.3f$' % K2.std()\n print '%-40s' % a, b\n\n a, b = '$%4.3f\\,^{+\\,%4.3f}_{-\\,%4.3f}$' % percentile68_ranges(e2), ' & $%4.3f$' % e2.std()\n print '%-40s' % a, b\n\n\n\n ############################################################\n\n mjup2mearth = 317.828\n star_mass = 0.913\n\n\n m_mj = 4.919e-3 * star_mass**(2./3) * P1**(1./3) * K1 * np.sqrt(1-e1**2)\n m_me = m_mj * mjup2mearth\n # a = ((system.star_mass + m_me*mearth2msun)/(m_me*mearth2msun)) * sqrt(1.-ecc**2) * K * (P*mean_sidereal_day/(2*np.pi)) / au2m\n\n print 'b - $%4.2f\\,^{+\\,%4.2f}_{-\\,%4.2f}$ [MEarth]' % percentile68_ranges(m_me)\n # print '%8s %11.4f +- %7.4f [AU]' % ('a', a.n, a.s)\n\n\n\n m_mj = 4.919e-3 * star_mass**(2./3) * P2**(1./3) * K2 * np.sqrt(1-e2**2)\n m_me = m_mj * mjup2mearth\n # a = ((system.star_mass + m_me*mearth2msun)/(m_me*mearth2msun)) * sqrt(1.-ecc**2) * K * (P*mean_sidereal_day/(2*np.pi)) / au2m\n\n print 'c - $%4.2f\\,^{+\\,%4.2f}_{-\\,%4.2f}$ [MEarth]' % percentile68_ranges(m_me)\n # print '%8s %11.4f +- %7.4f [AU]' % ('a', a.n, a.s)", "title": "" }, { "docid": "4ab9269ad289e50c71bb3ff1d4c2423f", "score": "0.59154165", "text": "def graphene_mobilityFE(n, sigmaxx):\n\n params = np.polyfit(n*1.602e-19, sigmaxx, 1)\n return params[0]", "title": "" }, { "docid": "c9bf4751ebee4340e5e3a5a655ff7042", "score": "0.5724603", "text": "def MaximalParameterValue(self, *args):\n return _Prs3d.Prs3d_Drawer_MaximalParameterValue(self, *args)", "title": "" }, { "docid": "ed16a48409887c5a7bd97b9c8abe6b51", "score": "0.56957066", "text": "def get_maximum(self):\n maxi = np.argmax(self.gp.Y)\n return self.gp.X[maxi, :], self.gp.Y[maxi]", "title": "" }, { "docid": "2eb0a30d971c3e02a7d968d2e33dc584", "score": "0.56721926", "text": "def __init__(self):\n super(Softmax, self).__init__()", "title": "" }, { "docid": "7265cd5e35d3f8b9437f1f486be20f76", "score": "0.5668487", "text": "def probs_over_ops(self):\n return F.gumbel_softmax(self.path_alpha, self.temperature)", "title": "" }, { "docid": "0eb3482d9a3118ee1d7439737689026e", "score": "0.565801", "text": "def maximiser(self):\n if self.check_max:\n self._max = all(self.f > v.f for v in self.nn)\n self.check_max = False\n\n return self._max", "title": "" }, { "docid": "8d3bd561c602b2f3a8fdb9eaa230c583", "score": "0.56527305", "text": "def soil_moist_max(awc, root_depth):\r\n values = awc.ravel() * root_depth.ravel()\r\n values[values < 3] = 3\r\n values[values > 20] = 20\r\n record = ParameterRecord(\r\n \"soil_moist_max\",\r\n values,\r\n dimensions=[\r\n [\"nhru\", values.size],\r\n ],\r\n datatype=2,\r\n )\r\n return record", "title": "" }, { "docid": "b540473abb1f293873b7564be423bad7", "score": "0.55750936", "text": "def test_noisy_max(self):\n rand_data = src.examples.get_random_data(self.DATA_SIZE)\n range_set = range(-self.DOMAIN_SIZE, self.DOMAIN_SIZE)\n\n # Pr[quality_median(result) < quality_median(np.median(randD))-2/eps*(log(domain_size)+t)] < exp(-t)\n error_parameter = 10\n difference = (2 / self.eps * (math.log(self.DOMAIN_SIZE) + error_parameter))\n # TODO change the print to something more accurate. here and in the next method\n print \"The maximum 'allowed' difference between the \" \\\n \"mechanism result and the true median is: %.2f\" % difference\n\n result = self.__test_mechanism(src.basicdp.noisy_max, rand_data, range_set, True)\n\n print \"The Noisy-Max Mechanism returned: %.2f\" % result\n print \"Result quality: %d\\n\" % src.qualities.quality_median(rand_data, result)\n\n # print and plot the results\n # self.__plot_test_results(result, rand_data, range_set)\n\n # pass if both mechanism returns a relatively high value result\n self.assertGreaterEqual(src.qualities.quality_median(rand_data, result),\n src.qualities.quality_median(rand_data,\n np.median(rand_data)) - difference)", "title": "" }, { "docid": "b44fdc4805d9e831851dff2d3ffbb6b5", "score": "0.55531096", "text": "def get_exploration_policy(self):", "title": "" }, { "docid": "ea7e6eefed37ea9c96db12a1c1938186", "score": "0.5538254", "text": "def compute_maximal_regular_subalgebra(algebra):", "title": "" }, { "docid": "0616a2000a98e20d34a4b9e053ae5ea1", "score": "0.55329233", "text": "def test_elementwise_max(self):\n\n utils.compare_tracing_methods(\n SimpleMaxModule(), torch.randn(4), torch.randn(4), fusible_ops={\"aten::max\"}\n )", "title": "" }, { "docid": "5deb8555e79daae8d541f758ca381a33", "score": "0.5521617", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsVLVD_max(*args)", "title": "" }, { "docid": "72285aee8f9912181d5d2affdd7b31f3", "score": "0.55032694", "text": "def maxFitness(obj, this):\n c = obj.getCoefficients()\n # print(this)\n # print(c)\n val = 0\n\n for i in range(len(c)):\n val += (c[i] * this[i])\n\n mx = -abs(val) + obj.getConstant()\n return mx", "title": "" }, { "docid": "3ecb24a737d2b6cb006b71aced7a6253", "score": "0.5480469", "text": "def maximise(self, *args, **kwargs):\n\n for _ in range(3):\n x = self._maximise(*args, **kwargs)\n\n return x", "title": "" }, { "docid": "492eb049778ca0964e0be78a4fabc7d1", "score": "0.5479945", "text": "def logits(self):", "title": "" }, { "docid": "da3986bd138ed75f734b4d7309e2fe61", "score": "0.5470811", "text": "def second_local_max( profile ):\n inds = argrelextrema(profile, np.greater)[0]\n \n maxInd = inds[0]\n \n if 0:\n plt.plot(np.r_[0:profile.size], profile, 'r+')\n plt.plot( [maxInd, maxInd], [0,0.02] , 'b' )\n plt.show()\n \n return maxInd", "title": "" }, { "docid": "9e86a1b6740210ecc83c3a4e7dfdbd8d", "score": "0.5469874", "text": "def test_find_max_eval(self):\n\n filt = FilterMatrix()\n\n # Values to calculate theoretical and empirical pdfs\n eigen_observations = np.array([0.1, 0.2, 0.2, 0.3, 0.3, 0.3, 0.4, 0.4, 0.5])\n tn_relation = 2\n kde_bwidth = 0.4\n\n # Optimizing and getting the maximum random eigenvalue and the optimal variation\n maximum_eigen, var = filt._find_max_eval(eigen_observations, tn_relation, kde_bwidth)\n\n # Testing the maximum random eigenvalue and the optimal variation\n self.assertAlmostEqual(maximum_eigen, 2.41011, delta=1e-5)\n self.assertAlmostEqual(var, 0.82702, delta=1e-5)", "title": "" }, { "docid": "7b0004f2be2b437be98b8b59256f5b0b", "score": "0.54655266", "text": "def dfs_maximizing(state) :\n raise NotImplementedError", "title": "" }, { "docid": "9110a06c3278c59d7b1c76744f86936a", "score": "0.5449637", "text": "def mlp(self):\n raise NotImplementedError", "title": "" }, { "docid": "a7e2600241a891ca2e37aa9db02f111e", "score": "0.5442058", "text": "def max_params(self):\n return self.dims_in[0] * self.dims_in[1] * self.n_channels * self.depth_multiplier", "title": "" }, { "docid": "6c7a35872b5e0973371ceeb044f2727c", "score": "0.5440587", "text": "def nits(self):", "title": "" }, { "docid": "6c7a35872b5e0973371ceeb044f2727c", "score": "0.5440587", "text": "def nits(self):", "title": "" }, { "docid": "a82bf2415f716b24e48a4dadd3e9c96b", "score": "0.5430711", "text": "def param_sim_asc_max():\n\n para_out=[]\n parameters={}\n\n ###Discovery panel\n low=2\n high=20\n\n asc_nb_af=high\n asc_nb_eu=high\n asc_nb_as=high\n\n\n daf=random.uniform(0.05,0.10)\n\n\n ####Demographic model\n #population size in Africa\n NAF=float(round(10**5))\n parameters['NAF']=NAF\n\n #Ancestral population size, before population growth in AF\n #choose ancestral Ne based on being some value smaller than NAF between 1 or 0.1 x.\n #population growth\n Nrat_High=0.0 #Allow only growth for now\n NANC=float(round(10**Nrat_High*NAF))\n parameters['NANC']=NANC\n\n NCEU=float(round(10**5.0))\n parameters['NCEU']=NCEU\n\n NCHB=float(round(10**5.0))\n parameters['NCHB']=NCHB\n\n # Population size of WAJ\n NWA = float(round(10 **6.7))\n parameters['NWA'] = NWA\n\n # Population size of EAJ\n NEA = float(round(10 **6.7))\n parameters['NEA'] = NEA\n\n # Population size of AJ before growth\n if (NWA < NEA):\n NAg_High = math.log10(NWA)\n else:\n NAg_High = NEA\n NAg_Low = 2.0\n NAg = float(round(10 ** random.uniform(NAg_Low, NAg_High)))\n parameters['NAg'] = NAg\n\n #Population size of Jews\n NJ=float(round(10**6.0))\n parameters['NJ']=NJ\n\n #Population size of Middle Easterns\n NM=float(round(10**6.0))\n parameters['NM']=NM\n\n #migration rate from Europe to AJ\n m_High=1\n m_Low=0\n m = random.uniform(m_Low, m_High)\n parameters['m']=m\n\n #Time of the instantaneous growth in Africa, before the split between Africans and non Africans\n Tgrowth_Low=1\n Tgrowth_High=4100\n Tgrowth_Af=float(randint(Tgrowth_Low,Tgrowth_High))\n parameters['Tgrowth_Af']=Tgrowth_Af\n\n #Time of split between YRI and CEU/CHB\n Taf_High=4100\t\t\t\t #102,500 years using 25 years per generation\n Taf=float(Taf_High)\n parameters['Taf']=Taf\n\n #Time of split between Europe and Middle East\n TEM_High=1200\n TEM=float(TEM_High)\n parameters['TEM']=TEM\n\n #Time of split between CEU and CHB\n Teu_as_High=int(Taf)-1\n Teu_as=float(Teu_as_High)\n parameters['Teu_as']=Teu_as\n\n #Time of split between Jews and AJ\n TA_High=36\n TA=float(TA_High)\n parameters['TA']=TA\n\n #Time of split between Jews and Middle East\n TMJ_High=int(TEM)-1\n TMJ=float(TMJ_High)\n parameters['TMJ']=TMJ\n\n # Time of split between Eastern and Western AJ\n TAEW_High = (TA) - 2\n TAEW = float(TAEW_High)\n parameters['TAEW'] = TAEW\n\n #Time of migration\n Tm_High=int(TA)-1\n Tm_Low=16\n Tm=float(randint(Tm_Low,Tm_High))\n parameters['Tm']=Tm\n\n # Time of growth in AJ\n TAg_High = int(TAEW) - 1\n TAg_Low = 1\n TAg = float(randint(TAg_Low, TAg_High))\n parameters['TAg'] = TAg\n\n para_out.extend([asc_nb_af])\n para_out.extend([asc_nb_eu])\n para_out.extend([asc_nb_as])\n para_out.extend([daf])\n para_out.extend([math.log10(NAF)])\n para_out.extend([math.log10(NANC)])\n para_out.extend([math.log10(NCEU)])\n para_out.extend([math.log10(NCHB)])\n para_out.extend([math.log10(NWA)])\n para_out.extend([math.log10(NEA)])\n para_out.extend([math.log10(NAg)])\n para_out.extend([math.log10(NJ)])\n para_out.extend([math.log10(NM)])\n para_out.extend([m])\n para_out.extend([Tgrowth_Af])\n para_out.extend([Taf])\n para_out.extend([TEM])\n para_out.extend([Teu_as])\n para_out.extend([TA])\n para_out.extend([TMJ])\n para_out.extend([TAEW])\n para_out.extend([Tm])\n para_out.extend([TAg])\n\n\n case, modified_Tgrowth_Af = choose_case(parameters)\n\n parameters['Tgrowth_Af'] = modified_Tgrowth_Af\n\n return [parameters, para_out, case, daf]", "title": "" }, { "docid": "732fa3d043db691b3f75e3fb23bf4807", "score": "0.54172605", "text": "def maxs(self):\n return self.xyz.max(axis=0)", "title": "" }, { "docid": "e5a729397d0382125d76385f1087742c", "score": "0.5415086", "text": "def find_max_independent_set(graph, params):\n\n max_ind_set = []\n\n # QHACK #\n qaoa = qml.qaoa\n cost_h, mixer_h = qaoa.max_independent_set(graph, constrained=True)\n\n def qaoa_layer(gamma, alpha):\n qaoa.cost_layer(gamma, cost_h)\n qaoa.mixer_layer(alpha, mixer_h)\n\n wires = range(NODES)\n\n def circuit(params, **kwargs):\n qml.layer(qaoa_layer, N_LAYERS, params[0], params[1])\n \n dev = qml.device(\"default.qubit\", wires=wires)\n # dev = qml.device(\"qulacs.simulator\", wires=wires)\n cost_function = qml.ExpvalCost(circuit, cost_h, dev)\n\n optimizer = qml.GradientDescentOptimizer()\n steps = 3\n # params = [[0.5 for _ in range(N_LAYERS)] for _ in range(2)]\n\n for _ in range(steps):\n params = optimizer.step(cost_function, params)\n \n @qml.qnode(dev)\n def probability_circuit(gamma, alpha):\n circuit([gamma, alpha])\n return qml.probs(wires=wires)\n\n probs = probability_circuit(params[0], params[1])\n probs_list = []\n for p in probs:\n probs_list.append(float(p))\n # print(probs_list)\n # probs[0] = 0.0\n\n # print(probs)\n # plt.style.use(\"seaborn\") \n # fig = plt.figure()\n # ax1 = fig.add_subplot(111)\n # ax1.set_xticks(np.arange(len(probs)))\n # ax1.bar(range(2 ** len(wires)), probs)\n # plt.show()\n\n max_prob = max(probs_list)\n max_index = probs_list.index(max_prob)\n bit_str = \"{0:b}\".format(max_index).zfill(NODES)\n # print(bit_str)\n\n for idx, val in enumerate(bit_str):\n if val == \"1\":\n max_ind_set.append(idx)\n # QHACK #\n\n return max_ind_set", "title": "" }, { "docid": "1f7fb247d3f1488492ec3a529d596115", "score": "0.54109615", "text": "def get_max_independent_set_operator(num_nodes):\n pauli_list = []\n for i in range(num_nodes):\n x_p = np.zeros(num_nodes, dtype=np.bool)\n z_p = np.zeros(num_nodes, dtype=np.bool)\n z_p[i] = True\n pauli_list.append([0.5, Pauli(z_p, x_p)])\n shift = -num_nodes/2\n return WeightedPauliOperator(paulis=pauli_list), shift", "title": "" }, { "docid": "562e5451403e6b4a89d9469d0d4abae3", "score": "0.53967315", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsLD_max(*args)", "title": "" }, { "docid": "c42b00e24626048d4446c2f03d2a5788", "score": "0.5390317", "text": "def Maxi (situation, comp, player, depth, module, alpha, beta):\r\n if module.isFinished(situation) or depth == 0 :\r\n return module.evalFunction(situation, player)\r\n else:\r\n situations = module.nextSituations({\"\":0}, situation, comp)\r\n for sit in situations:\r\n tmp = Mini(sit[0], comp, player, depth-1, module, alpha, beta)\r\n if tmp >= beta :\r\n return beta\r\n if tmp > alpha:\r\n alpha = tmp\r\n \r\n return alpha", "title": "" }, { "docid": "f88e1ea9270707198deace14ece94dab", "score": "0.53901976", "text": "def get_max_element(self):\n return int(2 ** (self.n / self.density) * (1 + random.expovariate(1 / (GEOMETRIC_MEAN - 1))))", "title": "" }, { "docid": "82174643c755e8186fcc5fe868106b64", "score": "0.5388567", "text": "def __maxMutualInformation(self, train_x, train_y):\n _, factorsNum = np.shape(train_x)\n mutualInformations = list()\n for i in range(factorsNum):\n cur_column = train_x[:,i]\n factorValues, factorCounts = np.unique(cur_column, return_counts=True)\n mutualInformation = 0\n for idx, val in enumerate(factorValues):\n pos = np.where(cur_column == val)[0]\n prob = (factorCounts[idx]/np.sum(factorCounts))\n mutualInformation += prob * DecisionTreeNode(train_x[pos], train_y[pos]).entropy\n mutualInformations.append(mutualInformation)\n return mutualInformations.index(min(mutualInformations))", "title": "" }, { "docid": "53fbd0d330c94429bf03af03d3450b77", "score": "0.538681", "text": "def max_eta(il):\n result = epoch\n for i in il:\n print(\"max_eta(): result = \", result)\n result = max(il[i].eta, result)\n return result", "title": "" }, { "docid": "c74b5b4d62eff6cbb6f089f96618d9e9", "score": "0.5382892", "text": "def get_Imax_isoseismal(self):\r\n\t\timport numpy as np\r\n\t\tfrom ..macro.isoseismal import read_isoseismals\r\n\r\n\t\tpg_data = read_isoseismals(self.ID, filled=True)\r\n\t\tif pg_data:\r\n\t\t\treturn np.max(pg_data.values['Intensity'])\r\n\t\telse:\r\n\t\t\treturn 0", "title": "" }, { "docid": "3fb07e9a9fe7c04c0fcc1b1c8a3ccf23", "score": "0.53816575", "text": "def local_max(image: HObject) -> HObject:\n with HalconOperator(468) as proc:\n proc.set_input_object(1, image)\n proc.execute()\n local_maxima = HObject(proc.get_output_object_key(1))\n return local_maxima # type: ignore", "title": "" }, { "docid": "4ab0bc41df2bc7d50a308135b37d91bb", "score": "0.53793573", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsVLVF_max(*args)", "title": "" }, { "docid": "1ba63c69a9b6fb8448a190b2382c80ef", "score": "0.5373187", "text": "def measure_vmax(self, source: Sources):\n pass", "title": "" }, { "docid": "09b0edf4ba4c6738e6d0135db619bbf0", "score": "0.536929", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsD_max(*args)", "title": "" }, { "docid": "c382944647402174e16c34cf76adb486", "score": "0.5363205", "text": "def _hardmax(prob):\n\t# prob += np.random.uniform(0, 1e-5, prob.shape) # Break symmetry.\n\t# return np.equal(prob, np.max(prob, axis=1, keepdims=True)).astype(np.float32)\n\t# y = np.zeros(prob.shape, dtype=prob.dtype)\n\tidx = np.argmax(prob, axis=1)\n\t# print(idx)\n\ty = np.eye(prob.shape[1], dtype=prob.dtype)[idx]\n\t# print(y)\n\tassert y.sum() == prob.shape[0]\n\treturn y", "title": "" }, { "docid": "a784bdb076e96567024081b60bb68d29", "score": "0.5323657", "text": "def test_image_maximum(context):\n vpw.prep(\"up_data\", vpw.pack(Param.NUM_WIDTH, 75000))\n\n io = vpw.idle(10) # wait for longer then the pipelined depth of module\n assert io[\"dn_data\"] == _model_rescale(75000, 0), \"Model max is different from modules max.\"", "title": "" }, { "docid": "68e9161a1fd5d5114c75d18ab55bbdf7", "score": "0.5321821", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsSI_max(*args)", "title": "" }, { "docid": "6b14bc4e8e6288e7ed6d43cbf0162215", "score": "0.52991474", "text": "def maxmgval(self):\n f={'mview_d':'vsip_mmaxmgval_d(self.vsip,None)',\n 'vview_d':'vsip_vmaxmgval_d(self.vsip,None)',\n 'mview_f':'vsip_mmaxmgval_f(self.vsip,None)',\n 'vview_f':'vsip_vmaxmgval_f(self.vsip,None)'}\n if self.type in f:\n return eval(f[self.type])\n else:\n print('Type <:'+self.type+':> not supported by maxmgval')\n return", "title": "" }, { "docid": "be195c5f00a4b6dd279154ba1a73d1b4", "score": "0.5295196", "text": "def compute(self):\n assert len(self.parents) == 1, \"Error in SoftMax node parents number in computing graph\"\n x = self.parents[0].value.A1\n \n #Prevent excessive index\n MAX_VALUE = 1e2\n max_x = np.max(x)\n if max_x > MAX_VALUE:\n x = x / max_x * MAX_VALUE\n s = np.sum(np.exp(x))\n x = np.exp(x) / s\n self.value = np.mat(x)", "title": "" }, { "docid": "2bc3e36560df85d0467e12708b734608", "score": "0.5293803", "text": "def get_max_marginal_cost(self):\n pass", "title": "" }, { "docid": "b0c3865ca60596b4b661a64236878294", "score": "0.5288151", "text": "def learn(self, x , N_iter_max=1000, lamda_start=1,lamda_end=0.0001 ,koef_min=0.1, koef_max=0.7, rn_max=100, _print=False, type_learn=1): \r\n\r\n \r\n self.R=len(x[0])\r\n self.N_iter_max=N_iter_max\r\n self.koef=koef_max\r\n self.koef0=koef_min\r\n \r\n self.lamda_start=lamda_start\r\n self.lamda_end=lamda_end\r\n \r\n\r\n \r\n \r\n self.a=np.zeros(self.N,dtype=float)+lamda_start\r\n self.lamda_end=lamda_end\r\n \r\n if self.w_key:\r\n self.w=np.random.random( (self.N,self.R) )*koef_max + koef_min\r\n self.w_key=False\r\n \r\n if _print:\r\n print(\"Коэфициенты до обучения:\")\r\n print(self.w)\r\n print(\"Максимальное число итераций:\" + str(N_iter_max))\r\n \r\n \r\n if type_learn==1:\r\n n=0\r\n M=len(x)\r\n lis=[]\r\n for i in range(M):\r\n lis.append(i)\r\n while self.a.max()>self.lamda_end and n<N_iter_max:\r\n random.shuffle(lis)\r\n for i in lis:\r\n j=self._get_min_index(x,i)\r\n self._correct(x,i,j)\r\n n+=1\r\n\r\n\r\n if type_learn==2:\r\n self.Pmin=( self.N-1)/ self.N\r\n p=np.zeros(self.N, dtype=float)+self.Pmin\r\n\r\n \r\n M=len(x)\r\n lis=[]\r\n for i in range(M):\r\n lis.append(i)\r\n \r\n n_winer=np.zeros(self.N_nodes)\r\n n=0\r\n\r\n while self.a.max()>self.lamda_end and n<N_iter_max:\r\n\r\n random.shuffle(lis)\r\n for i in lis:\r\n j=self._get_min_index(x,i,p)\r\n if j==-1:\r\n p=p+1/self.N\r\n continue\r\n n_winer[j]+=1\r\n p=p+1/self.N\r\n p[j]=p[j]-self.Pmin-1/self.N\r\n \r\n\r\n self._correct(x,i,j)\r\n \r\n \r\n n+=1\r\n \r\n if _print:\r\n print(n)\r\n print(n_winer)\r\n\r\n\r\n if type_learn==3:\r\n self.rn_max=rn_max \r\n self._placed_node()\r\n M=len(x)\r\n lis=[]\r\n for i in range(M):\r\n lis.append(i)\r\n\r\n n=0\r\n\r\n while n<N_iter_max:\r\n \r\n random.shuffle(lis)\r\n for j in lis:\r\n i_win=self._get_min_index(x,j)\r\n\r\n rn=self._get_rn(i_win) #i,rho\r\n\r\n\r\n \r\n a1=self._ak(n)*(x[j]-self.w)\r\n a2=np.exp(-rn/rn.max()*self._lk(n)).reshape((self.N,1))\r\n\r\n self.w=self.w+a2*a1\r\n \r\n n+=1\r\n if _print:\r\n print(\"Обученные коэфициенты:\")\r\n print(self.w)", "title": "" }, { "docid": "0c17e1af4a2a39420f5b1fc3314d699d", "score": "0.52880096", "text": "def get_f_max(self):\n return self.model.predict(self.model.X)[0].max()", "title": "" }, { "docid": "0754a9ab759bd3585da4795e5fdc5b12", "score": "0.52767944", "text": "def get_ensemble_max(self):\n #edit 08/08/2019: new function, virtually the same as get_ensemble_min, but for maximum value of each ensemble\n\t\t\n self.ensemble_max=np.zeros((np.size(self.data),np.shape(self.data)[0]))\t\t\n for i in range(np.size(self.data)):\n if i<2:\n self.ensemble_max[i,:]=np.nan\n else: \n puff_numbers=self.get_ensembles(ensemble_size=i)\t\n self.ensemble_max[i,:]=np.matlib.repmat(self.data,np.shape(self.data)[0],1)[0,puff_numbers].max(axis=1)", "title": "" }, { "docid": "0a54eb249cea0ecec88319fee2b3fe3f", "score": "0.5273987", "text": "def maximisation(self):\n s1 = self.tik_.sum(axis=0)\n self.pi_k_ = s1 / self.X_.shape[0]\n self.pi_k_[-1] = 1 - self.pi_k_[:-1].sum()\n s1 = self.pi_k_ * self.X_.shape[0]\n for j in range(self.X_.shape[1]):\n for lvl in np.unique(self.X_[:, j]):\n ind = self.X_[:, j] == lvl\n self.prob_[lvl, :] = self.tik_[ind, :].sum(axis=0) / s1\n self.reabse_probs()\n\n # Laplacian smoothing\n # for idx in range(self.prob_.shape[1]):\n # self.prob_[self.prob_[:, idx] < (1 / s1[idx]), idx] = 1 / s1[idx]\n # self.reabse_probs()\n return self", "title": "" }, { "docid": "f8565c6114c6de0de969d3486918a359", "score": "0.5267663", "text": "def highIG(lst,Y):\n temp = []\n for i in lst:\n x = InformationGain(count(Y),count(PosNeg(i,Y)[0]),count(PosNeg(i,Y)[1]))\n temp.append(x)\n index, value = max(enumerate(temp), key=operator.itemgetter(1))\n return index", "title": "" }, { "docid": "e37cbb4bf276800c1f953b2e156dee4a", "score": "0.52656305", "text": "def evaluate(self, opt, grads, infos, objVar):", "title": "" }, { "docid": "b7eb3354229fcdcc4854fadcfeb3ba23", "score": "0.5262458", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsVLVUS_max(*args)", "title": "" }, { "docid": "9484ba7af0e61bc05f1dc9dabb2ef7d7", "score": "0.52598196", "text": "def max(self, **kwargs):\n raise NotImplementedError(\"Implement a custom max method\")", "title": "" }, { "docid": "84c348cbf40f089554e90e54378d109a", "score": "0.52573943", "text": "def maxval(self):\n f={'mview_d':'vsip_mmaxval_d(self.vsip,None)',\n 'vview_d':'vsip_vmaxval_d(self.vsip,None)',\n 'mview_f':'vsip_mmaxval_f(self.vsip,None)',\n 'vview_f':'vsip_vmaxval_f(self.vsip,None)',\n 'mview_i':'vsip_mmaxval_i(self.vsip,None)',\n 'vview_i':'vsip_vmaxval_i(self.vsip,None)',\n 'mview_si':'vsip_mmaxval_si(self.vsip,None)',\n 'vview_si':'vsip_vmaxval_si(self.vsip,None)',\n 'vview_vi':'vsip_vmaxval_vi(self.vsip,None)'}\n if self.type in f:\n return eval(f[self.type])\n else:\n print('Type <:'+self.type+':> not supported by maxval')\n return", "title": "" }, { "docid": "1f88456bc8b54662fa4f7fba594347f2", "score": "0.5250428", "text": "def max(self):\n return self.getNdArray().max()", "title": "" }, { "docid": "6837dabc62ff7898c5cd6fe3008232d9", "score": "0.52492416", "text": "def softmax_in_NN_2(max_iteration):\n params_1 = {'mean': [1, 1], 'covariance_matrix': 0.5 * np.eye(2)}\n params_2 = {'mean': [-1, -1], 'covariance_matrix': 0.5 * np.eye(2)}\n params = [params_1, params_2]\n X, y = two_clusters_gaussian(params, 1000)\n activation_fn_type = 'relu'\n activation_fn = lambda x: np.maximum(np.zeros(x.shape), x)\n width = 5\n hidden_layers = 1\n input_dim = 2\n output_dim = 2\n architecture = {'width': width,\n 'hidden_layers': hidden_layers,\n 'input_dim': input_dim,\n 'output_dim': output_dim,\n 'activation_fn_type': 'relu',\n 'activation_fn_params': 'rate=1',\n 'activation_fn': activation_fn}\n rand_state = 0\n random = np.random.RandomState(rand_state)\n params = {'step_size': 1e-3,\n 'max_iteration': max_iteration,\n 'random_restarts': 1}\n nlm = NLM(architecture)\n y = get_dummies(y).values\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=random)\n nlm.fit_MLE(X_train.T, y_train.T, params)\n classifier = Classifier(nlm.weights, nlm.forward)\n y_pred_test = classifier.predict(X_test)\n accuracy = accuracy_score(y_true=y_test, y_pred=y_pred_test) # ok for the transformations, accuracy not checked\n print(accuracy)", "title": "" }, { "docid": "217837e44744e18cc1553ab97754462d", "score": "0.5238953", "text": "def main():\n x = defaultdict(lambda : defaultdict(lambda :defaultdict(int)))\n\n feature, acc, mse = analyze(\"maxtemp\")\n x[\"knn\"][\"maxtemp\"][\"acc\"] = acc * 100\n x[\"knn\"][\"maxtemp\"][\"mse\"] = mse\n\n feature, acc, mse = analyze(\"mintemp\")\n x[\"knn\"][\"mintemp\"][\"acc\"] = acc * 100\n x[\"knn\"][\"mintemp\"][\"mse\"] = mse\n\n feature, acc, mse = analyze(\"wind\")\n x[\"knn\"][\"wind\"][\"acc\"] = acc * 100\n x[\"knn\"][\"wind\"][\"mse\"] = mse \n\n feature, acc, mse = analyze(\"percipitation\")\n x[\"knn\"][\"percipitation\"][\"acc\"] = acc * 100\n x[\"knn\"][\"percipitation\"][\"mse\"] = mse\n\n return x", "title": "" }, { "docid": "e515c8f7954ca9a5e4c489dc52a14b0d", "score": "0.52322704", "text": "def best_ind(self):\n random.seed(64)\n NGEN = 10\n MU = 10\n pop = self.toolbox.population(n=MU)\n hof = tools.ParetoFront()\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean, axis=0)\n stats.register(\"std\", numpy.std, axis=0)\n stats.register(\"min\", numpy.min, axis=0)\n stats.register(\"max\", numpy.max, axis=0)\n # algorithms.eaMuPlusLambda(pop, self.toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof)\n algorithms.eaSimple(pop, self.toolbox, cxpb=0.6, mutpb=0.05, ngen=NGEN, halloffame=hof)\n print(\"The best individual is :\", hof[-1])\n # print(\"The best fitness is :\", eval_ind(self, hof[-1]))\n return hof[-1]", "title": "" }, { "docid": "0a13a6d1e56e95e53c6a4493f66f3221", "score": "0.52155906", "text": "def _maximum_entropy( self ):\n max_ent = -1\n for i, region in enumerate( self.phase_space ):\n if region[1] > max_ent:\n self.max_entropy = region[1]\n self.max_region = i\n if self.verbose:\n print \"Maximum entropy found: \", self.max_entropy", "title": "" }, { "docid": "1f1552263aa4781f3e6f992a75e4c347", "score": "0.5214875", "text": "def _genmaximum(self, parray, **kwargs):\n\n output = np.empty((0, 5))\n for array in parray:\n output = np.append(output, np.amax(array, axis=0).reshape((1, 5,)),\n axis=0)\n\n return output", "title": "" }, { "docid": "961725e5a7f985ae57ff7adb1abc0b84", "score": "0.52103394", "text": "def __call__(self, x):\n return self.be.fprop_softmax(x, self.axis)", "title": "" }, { "docid": "04ae4a73d85141028495a73f92e90cd6", "score": "0.5201827", "text": "def opt_func(exponents):\n# print('exponents=', exponents)\n hydrogen = Hydrogen(exponents)\n hydrogen.variational()\n return hydrogen.eigvals[0]", "title": "" }, { "docid": "9098ffafae8051e083adc12a5ba30b3c", "score": "0.51942647", "text": "def arg_max(state, prob):\n\n psi = state.full()\n q = np.linspace(-18, 18, 10001)\n leng = np.linspace(-18, 18, 10001)\n n = len(psi)\n A = osc_eigen(n, leng)\n xpsi_dm = np.dot(A.T, np.dot(psi, A))\n\n # xpsi = np.dot(psi.T,A)\n\n xpsi = np.diagonal(xpsi_dm)\n\n FT_list = []\n for p_value in leng:\n p_slice = np.exp(1j * leng * p_value) \n FT_list.append(p_slice)\n \n FT = np.stack(FT_list) \n \n ppsi = np.matmul(FT, xpsi)\n N = np.dot(ppsi, np.conjugate(ppsi.T))\n ppsi = 1/np.sqrt(N) * ppsi\n\n den_p = np.multiply(ppsi, np.conjugate(ppsi))\n\n mask_one = np.ones_like(q) # mask on the q space which is 1 everywhere\n mask_zero = np.zeros_like(q) \n delta_condition = np.sqrt(np.pi) / 6.0\n int_mask_1 = np.where((q % np.sqrt(np.pi) < delta_condition),\n mask_one, mask_zero)\n int_mask = np.where((q % np.sqrt(np.pi) > np.sqrt(np.pi) - delta_condition),\n mask_one, int_mask_1)\n \n den = np.multiply(ppsi, np.conjugate(ppsi))\n \n argmax_center = -100.\n cor_p_max = -100.\n\n for ag in np.linspace(-np.sqrt(np.pi)/2.0, np.sqrt(np.pi)/2.0, 1000):\n \"\"\"\n Compute the argmax\n \"\"\"\n\n r_mask_p = np.roll(int_mask, int(ag / dq))\n\n cor_p = np.dot(r_mask_p, den.T)\n\n if cor_p > cor_p_max:\n\n argmax_center = ag\n cor_p_max = cor_p\n\n delta_space = np.linspace(0, np.sqrt(np.pi)/2.0, 1000)\n \n if abs(argmax_center) > np.sqrt(np.pi)/6.0:\n\n print(\"Fail! Argmax too large:\", argmax_center)\n err_title = \"\\nloc{}_error{}_meas{}_kerr{}_prob{:.9f} argmax:{}\".format(location_str,\n error_str,\n measurement_result,\n kerr_scale,\n prob,\n argmax_center)\n\n with open(\"unaccepted_states.txt\", \"a+\") as f_un:\n f_un.write(err_title)\n sys.exit(0)\n \n else:\n\n epsilon_noisy = []\n \n for delta_condition in delta_space:\n\n int_mask_1 = np.where((q % np.sqrt(np.pi) < delta_condition),\n mask_one, mask_zero)\n int_mask = np.where((q % np.sqrt(np.pi) > np.sqrt(np.pi) - delta_condition),\n mask_one, int_mask_1)\n \n # equivalent to shift back using argmax\n r_mask_p = np.roll(int_mask, int(argmax_center / dq))\n\n # r_mask_p = int_mask\n\n cor_p = np.dot(r_mask_p, den_p.T)\n\n epsilon_noisy.append(cor_p)\n\n file_save_title = \"sqrt_over_2_minus_delta_one_minus_epsilon_loc{}_error{}_meas{}_{}_{}pulse_noise{}_round{}_kerr{}_prob{:.9f}\".format(location_str,\n error_str,\n measurement_result,\n if_non_linear,\n pulse_shape,\n if_noise,\n rounds,\n kerr_scale,\n prob)\n\n with open(save_path + file_save_title + \".txt\", \"w+\") as f:\n \n for i in range(len(delta_space)):\n f.write(\"{} {}\\n\".format(np.sqrt(np.pi) / 2.0 - delta_space[i], epsilon_noisy[i].real))", "title": "" }, { "docid": "4a155741f546f2d977158278539aadc5", "score": "0.51888984", "text": "def test_noinfo2(self) :\n\t\tss = Chou2006()\n\t\tss.exptype = \"noinfo\"\n\t\tss.equations = [1]\n\t\tar = ALRSolver(ss)\n\t\tar.solve(l1penalty=0.0,maxiter=10000,tol=10e-7)\n\t\tassert len(ar.all_exp_art) == 1\n\t\tassert len(ar.all_exp_art[0]['eqns']) == 1\n\t\tart = ar.all_exp_art[0]['eqns'][0]\t\t\n\t\tparams = art.params[-1]\n\t\tassert params is None", "title": "" }, { "docid": "a2674163b2aacd2773537e156d8f8348", "score": "0.51864356", "text": "def max(self):\n return 1", "title": "" }, { "docid": "17bce258141920592c0cce123f103da2", "score": "0.51808935", "text": "def max(self):\n return self._calculate_stats(np.max)", "title": "" }, { "docid": "396ebd01e92ceafb5e1e46d38210eaac", "score": "0.5180729", "text": "def maxmgvalindx(self):\n f={'mview_d':'vsip_mmaxmgval_d(self.vsip,idx)',\n 'vview_d':'vsip_vmaxmgval_d(self.vsip,idx)',\n 'mview_f':'vsip_mmaxmgval_f(self.vsip,idx)',\n 'vview_f':'vsip_vmaxmgval_f(self.vsip,idx)'}\n if self.type in f:\n if 'mview' in self.type:\n idx=vsip_scalar_mi()\n eval(f[self.type])\n return (int(idx.r),int(idx.c))\n else:\n idx=vindexptr()\n eval(f[self.type])\n retval=int(vindexptrToInt(idx))\n vindexfree(idx)\n return retval\n else:\n print('Type <:'+self.type+':> not supported by maxmgvalindx')\n return", "title": "" }, { "docid": "380cba45db1ed8aaf173abe49ca24a3a", "score": "0.51790226", "text": "def get_solution(self):\n index = -1\n max_f = -math.inf\n\n for i in range(self.N):\n if self.train_v[i] > 1.2 and self.train_f[i] > max_f:\n max_f = self.train_f[i]\n index = i\n return self.train_x[index]", "title": "" }, { "docid": "73a670ea833f40d264b224fed90ff49a", "score": "0.5178954", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsSL_max(*args)", "title": "" }, { "docid": "c3a189ccc6a0dea98f4029d04f0c21d1", "score": "0.5175437", "text": "def argmax_mae_acc(output, target):\n with torch.no_grad():\n batch_size = target.size(0)\n \n true_predict_count=0\n for i in range(1,101):\n# predicted_group=torch.argmax(F.softmax(output[:,3*i-3:3*i],1),1)\n predicted_group=torch.argmax(output[:,3*i-3:3*i],1)\n# print(predicted_group)\n# os._exit(0)\n true_predict_count+=torch.sum(torch.eq(predicted_group,target[:,4*i-1].long()))\n \n for j in range(batch_size):\n predicted_classes=[]\n if predicted_group[j]==0:\n predicted_g1=1\n predicted_g2=0\n predicted_g3=0\n elif predicted_group[j]==1:\n predicted_g1=0\n predicted_g2=1\n predicted_g3=0\n else:\n predicted_g1=0\n predicted_g2=0\n predicted_g3=1\n for k in range(0,i):\n predicted_classes.append(predicted_g1)\n predicted_classes.append(predicted_g2)\n for l in range(0,101-i):\n predicted_classes.append(predicted_g3)\n if j==0:\n tmp_batch_predicted_classes=torch.tensor(predicted_classes).view(1,-1)\n else:\n tmp_batch_predicted_classes=torch.cat((tmp_batch_predicted_classes, torch.tensor(predicted_classes).view(1,-1)),0)\n if i==1:\n batch_predicted_classes=tmp_batch_predicted_classes.unsqueeze(2)\n else:\n batch_predicted_classes=torch.cat((batch_predicted_classes, tmp_batch_predicted_classes.unsqueeze(2)),2) \n predicted_classes_count=torch.sum(batch_predicted_classes,2)\n# predicted_classes=torch.argmax(predicted_classes_count,1)\n predicted_ages=torch.argmax(predicted_classes_count,1)\n\n mae=torch.sum(torch.abs(predicted_ages-target[:,400].long())).float()/batch_size\n \n acc=true_predict_count.float().mul_(100.0/(batch_size*100))\n \n return mae, acc, predicted_ages", "title": "" }, { "docid": "f468016c94bf5e326a698c56aa01c720", "score": "0.5175234", "text": "def mnlogit():\n obj = Namespace()\n obj.params = [\n [0.00100163, -0.05864195, -0.06147822, -0.04769671, -0.05222987,\n -0.09522432],\n [0., 0.03186139, 0.12048999, 0.83211915, 0.92330292,\n 1.5680646],\n [-0.0218185, -0.01988066, -0.00808564, -0.00487463, -0.01400173,\n -0.00562079],\n [0., 0.03306875, 0., 0.02362861, 0.05486435,\n 0.14656966],\n [0., 0.04448213, 0.03252651, 0.07661761, 0.07265266,\n 0.0967758],\n [0.90993803, -0.50081247, -2.08285102, -5.26132955, -4.86783179,\n -9.31537963]]\n obj.conf_int = [\n [[-0.0646223, 0.06662556],\n [np.nan, np.nan],\n [-0.03405931, -0.00957768],\n [np.nan, np.nan],\n [np.nan, np.nan],\n [0.26697895, 1.55289711]],\n\n [[-0.1337913, 0.01650741],\n [-0.14477255, 0.20849532],\n [-0.03500303, -0.00475829],\n [-0.11406121, 0.18019871],\n [0.00479741, 0.08416684],\n [-1.84626136, 0.84463642]],\n\n [[-0.17237962, 0.04942317],\n [-0.15146029, 0.39244026],\n [-0.02947379, 0.01330252],\n [np.nan, np.nan],\n [-0.02501483, 0.09006785],\n [-3.90379391, -0.26190812]],\n\n [[-0.12938296, 0.03398954],\n [0.62612955, 1.03810876],\n [-0.02046322, 0.01071395],\n [-0.13738534, 0.18464256],\n [0.03017236, 0.12306286],\n [-6.91227465, -3.61038444]],\n\n [[-0.12469773, 0.02023799],\n [0.742564, 1.10404183],\n [-0.02791975, -0.00008371],\n [-0.08491561, 0.19464431],\n [0.0332926, 0.11201273],\n [-6.29331126, -3.44235233]],\n\n [[-0.17165567, -0.01879296],\n [1.33994079, 1.79618841],\n [-0.02027503, 0.00903345],\n [-0.00267819, 0.29581751],\n [0.05343135, 0.14012026],\n [-11.10419107, -7.52656819]]]\n\n obj.bse = [\n [0.03348221, 0.03834221, 0.05658338, 0.04167742, 0.03697408,\n 0.03899631],\n [np.nan, 0.09012101, 0.13875269, 0.10509867, 0.09221543,\n 0.11639184],\n [0.00624543, 0.00771564, 0.01091253, 0.00795351, 0.00710116,\n 0.00747679],\n [np.nan, 0.07506769, np.nan, 0.08215148, 0.07131762,\n 0.07614826],\n [np.nan, 0.02024768, 0.02935837, 0.02369699, 0.02008204,\n 0.02211492],\n [0.32804638, 0.68646613, 0.92906957, 0.84233441, 0.72729881,\n 0.91267567]]\n\n obj.nnz_params = 32\n obj.aic = 3019.4391360294126\n obj.bic = 3174.6431733460686\n return obj", "title": "" }, { "docid": "145490bffa53266fb216f9f1fc3ae925", "score": "0.5171364", "text": "def find_optimal_pauli(self):\n i_max, j_max, f_max = 0, 0, 0\n theta_max = 0\n for i in range(self.dim):\n for j in range(self.dim):\n theta, f = self.optimize_theta(i, j)\n if f > f_max:\n i_max, j_max, f_max = i, j, f\n theta_max = theta\n return i_max, j_max, theta_max, f_max", "title": "" }, { "docid": "8ee0dc925a8771384237b8158b151397", "score": "0.5170123", "text": "def best_model(n_iter, model_type, D):\n\n # Train data\n datax = D['data'].as_matrix()\n K = 4\n\n # List to save results\n likelihood = []\n Cen = {0 : [], 1 : [], 2 : [], 3 : []}\n Sigma = {0 : [], 1 : [], 2 : [], 3 : []}\n pi = {0 : [], 1 : [], 2 : [], 3 : []}\n model = []\n\n # Iterate over n_iter times\n for k in tqdm(range(int(n_iter))):\n classifier = GM(K, covariance_type = model_type)\n # Fit the classifier\n classifier.fit(datax)\n A = classifier.q.argmax(axis = 0).astype(int)\n C = classifier.mu.reshape(K, 2)\n model.append(classifier)\n for i,j in enumerate(np.arctan2(C[:, 0], C[:, 1]).argsort()):\n Cen[i].append(C[j])\n Sigma[i].append(classifier.sigma[j])\n pi[i].append(classifier.pi[j])\n\n likelihood.append(classifier.L[-1])\n\n print (\"Moyenne de vraissemblance : {} - et écart-type : {}\"\\\n .format(np.mean(likelihood),np.std(likelihood)))\n for k in Cen.keys():\n Cen[k] = np.array(Cen[k])\n print (\"----- Centroide 1 -----\")\n print (\"moyenne : {}\\nécart-type : {}\"\\\n .format(np.mean(Cen[k], axis = 0), np.std(Cen[k], axis = 0)))\n\n # Better result = better likelihood\n best = np.array(likelihood).argmax()\n\n # Dictionary of best parameters\n best_params = {\n \"mu\" : np.array([Cen[i][best] for i in Cen.keys()]),\n \"sigma\": np.array([Sigma[i][best] for i in Sigma.keys()]),\n \"pi\" : np.array([pi[i][best] for i in pi.keys()]),\n \"likelihood_train\": likelihood[best],\n \"likelihood_test\": model[best]._log_likelihood_incomplete(D['test'].as_matrix())\n }\n\n return best_params", "title": "" }, { "docid": "da973fe155a51399d31d04e888f446e3", "score": "0.5166291", "text": "def MaximumNumberOfPoints(self):\n raise NotImplementedError()", "title": "" }, { "docid": "8a68f17d269a1ba8416f1ed7a3fab225", "score": "0.5164417", "text": "def find_numax():\n # Not used\n print('Find nu_max')\n\n # Load data\n freq, power = loadnpz(cps).T\n\n ac_minheights = ac_minheight\n\n # Run Gaussian filter\n nmps = scipy.ndimage.gaussian_filter1d(power, 1.5*nmsigma)\n\n # Initial guess of nu_max\n included_peak, included_height = find_peaks(freq, nmps,\n ac_minheights,\n ac_comparorder)\n max_peak = np.argmax(included_height)\n print('nu_max is guessed to be %s' % included_peak[max_peak])\n nu_max_filt = (50 <= freq) & (2000 >= freq)\n freqcut = freq[nu_max_filt]\n nmpscut = nmps[nu_max_filt]\n popt = gaussian_fit(freqcut, nmpscut)\n nu_max = popt[1]\n print('nu_max = %.4f, popt = %s' % (nu_max, popt))\n\n A = np.amax(nmps[nu_max_filt]) / np.amax(power[nu_max_filt])\n\n # Plot the smoothened power spectrum and the value of nu_max\n plt.figure()\n # fix_margins\n plt.plot(freq[::100], A * power[::100], 'k', linewidth=0.2)\n plt.plot(freq[::100], nmps[::100], 'k')\n plt.plot(freqcut, nmpscut, 'g')\n plt.plot(freqcut, gauss(freqcut, * popt), 'b')\n plt.plot(nu_max, gauss(nu_max, * popt), 'ro')\n # plt.title(r'The extremely smoothened power spectrum of %s' % starname)\n plt.xlabel(r'Frequency [$\\mu$Hz]')\n plt.ylabel(r'Power [ppm$^2$]')\n plt.xlim([np.amin(freq), 2000])\n plt.ylim([0, 0.75])\n plt.savefig('%s_nm%s_%s_%s.pdf' % (starname, nmsigma, minfreq,\n maxfreq), bbox_inches='tight')\n #plt.show()\n\n return nu_max", "title": "" }, { "docid": "09fa647a87bb8a340cf3039f57d97355", "score": "0.5163646", "text": "def softmax_in_NN_3(max_iteration):\n params_1 = {'mean': [1, 1], 'covariance_matrix': 0.5 * np.eye(2)}\n params_2 = {'mean': [-1, -1], 'covariance_matrix': 0.5 * np.eye(2)}\n params_3 = {'mean': [8, 9], 'covariance_matrix': 0.5 * np.eye(2)}\n params = [params_1, params_2, params_3]\n X, y = two_clusters_gaussian(params, 1000)\n activation_fn_type = 'relu'\n activation_fn = lambda x: np.maximum(np.zeros(x.shape), x)\n width = 5\n hidden_layers = 1\n input_dim = 2\n output_dim = 3\n architecture = {'width': width,\n 'hidden_layers': hidden_layers,\n 'input_dim': input_dim,\n 'output_dim': output_dim,\n 'activation_fn_type': 'relu',\n 'activation_fn_params': 'rate=1',\n 'activation_fn': activation_fn}\n rand_state = 0\n random = np.random.RandomState(rand_state)\n params = {'step_size': 1e-3,\n 'max_iteration': max_iteration,\n 'random_restarts': 1}\n nlm = NLM(architecture)\n y = get_dummies(y).values\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=random)\n nlm.fit_MLE(X_train.T, y_train.T, params)\n classifier = Classifier(nlm.weights, nlm.forward)\n y_pred_test = classifier.predict(X_test)\n accuracy = accuracy_score(y_true=y_test, y_pred=y_pred_test)\n return accuracy", "title": "" }, { "docid": "50c845b452aa8dff142a68628224b5f5", "score": "0.5153114", "text": "def _softmaxFun2Expr(self,fun,typeName):\n assert False, 'abstract method called'", "title": "" }, { "docid": "6a721f86f41e2d586525b4a5eda1d4db", "score": "0.5150752", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsSS_max(*args)", "title": "" }, { "docid": "693a7a5c55ab2e597cf5e4d12b01e3cd", "score": "0.5150678", "text": "def collect_rpl_max(model, loader, gamma, total_number, tsne_fea, idx_to_class=None):\n\n with torch.no_grad():\n confidence_dict = {}\n corr_number = 0\n for _, i in enumerate(idx_to_class):\n confidence_dict[idx_to_class[i]] = []\n for i, data in enumerate(loader, 0):\n # get the inputs & combine positive and negatives together\n img = data['image']\n img = img.cuda()\n label_idx = data['label']\n outputs = model.forward(img)\n\n logits, dist_to_rp, outputs = compute_rpl_logits(model, outputs, gamma)\n max_distances, max_indices = torch.max(logits, 1)\n corr_number += torch.sum(max_indices == label_idx.cuda()).item()\n probs = torch.softmax(logits, dim=1)\n max_probs, max_indices = torch.max(probs, 1)\n\n\n for j in range(0, img.shape[0]):\n\n if len(tsne_fea[data['label'][j].item()]) < 100:\n \n tsne_fea[data['label'][j].item()] += [outputs[j].tolist()]\n correct_leaf = idx_to_class[label_idx[j].item()]\n predicted_leaf_idx = max_indices[j].item()\n dist = max_distances[j].item()\n prob = max_probs[j].item()\n confidence_dict[correct_leaf].append({'prediction': predicted_leaf_idx, 'label': data['label'][j].item(), 'dist': dist, 'prob': prob})\n acc = corr_number / total_number\n print('Closed ACC:', acc)\n return confidence_dict", "title": "" }, { "docid": "9a45aedfa284ecba6d19ba649d687312", "score": "0.5139686", "text": "def get_Imax_official(self, Imin_or_max='max', min_fiability=80,\r\n\t\t\t\taggregate_by=\"commune\", agg_method=\"mean\", verbose=False):\r\n\t\tkwargs = locals().copy()\r\n\t\tkwargs.pop('self')\r\n\t\treturn self.get_Imax_traditional(data_type='official', **kwargs)", "title": "" }, { "docid": "9c7125016384f7c47856e59d1c043b88", "score": "0.5136824", "text": "def max_util_of_info(c, A, b, bounds, y):\n from scipy.optimize import linprog\n\n b[-1] = y\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", LinAlgWarning)\n warnings.simplefilter(\"ignore\", OptimizeWarning)\n solution = linprog(c, A, b, bounds=bounds)\n\n maximum_utility_of_information = -solution.fun\n return maximum_utility_of_information", "title": "" }, { "docid": "9622b14eb3090df952767befe2d93fe8", "score": "0.5135849", "text": "def solve(self,max_t=10):\r\n #traffic intensity\r\n self.an.traffic_intensity = self.lam/self.mu\r\n self.an.rho = self.an.traffic_intensity\r\n\r\n #create parameter vector\r\n n_vec = range(0,self.K+1)\r\n rho_vec = [self.an.rho] * (self.K+1)\r\n K_vec = [self.K] * (self.K+1)\r\n\r\n #define Pn function\r\n Pn = lambda n,rho,K: (1.0 - rho) * rho**n / (1.0 - rho**(K+1.0))\r\n\r\n if self.an.rho != 1.0:\r\n self.an.Pn = np.column_stack([n_vec,map(Pn,n_vec,rho_vec,K_vec)])\r\n else:\r\n self.an.Pn = np.column_stack([n_vec,[1.0/(self.K + 1.0)] * (self.K + 1)])\r\n\r\n #Calculate core results\r\n self.an.PK = self.an.Pn[self.K,1]\r\n self.an.lam_eff = self.lam*(1.0-self.an.PK)\r\n self.an.rho_eff = self.an.lam_eff * self.st\r\n if self.an.rho != 1.0:\r\n self.an.Lq = self.an.rho / (1.0 - self.an.rho) - \\\r\n self.an.rho * (self.K * self.an.rho ** self.K + 1) / \\\r\n (1.0 - self.an.rho ** (self.K + 1))\r\n self.an.P0 = (1.0 - self.an.rho) / (1 - self.an.rho**(self.K + 1))\r\n else:\r\n self.an.Lq = (self.K*(self.K-1.0))/(2*(self.K+1))\r\n self.an.P0 = 1.0/(self.K+1)\r\n \r\n self.an.L = self.an.Lq + self.an.rho_eff\r\n self.an.W = self.an.L/self.an.lam_eff\r\n self.an.Wq = self.an.W - self.st\r\n self.an.TA = self.lam*self.an.PK\r\n\r\n self.calc_CDF(max_t)", "title": "" }, { "docid": "56d99bccd6b9254581675111d61e9800", "score": "0.5134898", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsSC_max(*args)", "title": "" }, { "docid": "f7a051f4fb7cf13767695ef9751e14f5", "score": "0.5134044", "text": "def test_max_iter(self):\n hessian_free_newton(self.oracle, self.x0, max_iter=15)", "title": "" }, { "docid": "438ab2dcbb08065ff1582af092f62cfa", "score": "0.51300496", "text": "def maxprob(self, prob):\n raise NotImplementedError", "title": "" }, { "docid": "f001e1cfd34473996617f5904348367e", "score": "0.5124069", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsVLVUL_max(*args)", "title": "" }, { "docid": "365c6603a1be29a0bedde80df8f7273a", "score": "0.51218003", "text": "def nondimensionalise(self):\n pass", "title": "" }, { "docid": "e2eb810ab938a4fb8d7b5ad96b2ee184", "score": "0.5120383", "text": "def max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsF_max(*args)", "title": "" }, { "docid": "8391df804ebf882b200828b171daec4e", "score": "0.5119842", "text": "def _p_max(M_j: int) -> float:\n return (1 / np.sqrt(2 * pi * M_j)) * (2 ** -M_j)", "title": "" }, { "docid": "67dd6eb445bd737a8387376c4b84cb5e", "score": "0.5119147", "text": "def _handler_max_image_view(self, event):\n self._mgr.LoadPerspective(\n self._perspectives['max_image'])", "title": "" }, { "docid": "67dd6eb445bd737a8387376c4b84cb5e", "score": "0.5119147", "text": "def _handler_max_image_view(self, event):\n self._mgr.LoadPerspective(\n self._perspectives['max_image'])", "title": "" }, { "docid": "587e0a9598da172ba0a27c246b21201a", "score": "0.5118634", "text": "def softmax(self, x):\n ### You must implement softmax by youself, otherwise you will not get credits for this part.\n\n\t\t### YOUR CODE HERE\n exp_x = np.exp(x)\n\n probs = exp_x / np.sum(exp_x)\n\n return probs\n\t\t### END YOUR CODE", "title": "" }, { "docid": "824fd092da8e93f29b893a2eebe7e63e", "score": "0.5117919", "text": "def max_element(x):\n return F.adaptive_max_pool1d(\n x.local_value(), 1, return_indices=False)", "title": "" }, { "docid": "9ea2675997c3d6cb804f45d59a70923d", "score": "0.51150846", "text": "def mip(inputs, depth, data_format='channels_last', name='mip'):\n return xip(inputs, depth, 'max', data_format, name)", "title": "" }, { "docid": "b959c9c6f3b14a4a3ec1c10d5cab527d", "score": "0.51125056", "text": "def itkNumericTraitsVLVD_max(*args):\n return _itkNumericTraitsPython.itkNumericTraitsVLVD_max(*args)", "title": "" }, { "docid": "33b70bc93bffb0dff4216169266031cc", "score": "0.51025605", "text": "def softmax(src, index, num_nodes=None):\n\n num_nodes = maybe_num_nodes(index, num_nodes)\n\n out = src - scatter_max(src, index, dim=0, dim_size=num_nodes)[0][index]\n out = out.exp()\n out = out / (\n scatter_add(out, index, dim=0, dim_size=num_nodes)[index] + 1e-16)\n\n return out", "title": "" }, { "docid": "b6b30f757d9b5e478568646c84af3113", "score": "0.50997114", "text": "def sel_unit_max(all_sel_dict, verbose=False):\n\n if verbose:\n print(\"\\n**** sel_unit_max() ****\")\n # is it necessary to copy this - i think so, script failed when I took copy statements out?\n copy_sel_dict = copy.copy(all_sel_dict)\n\n # focussed_dict_print(copy_sel_dict, 'copy_sel_dict')\n\n max_sel_dict = dict()\n\n # # loop through unit dict of sel measure vals for each class\n for measure, class_dict in copy_sel_dict.items():\n\n # # remove np.NaNs from dict\n clean_dict = {k: class_dict[k] for k in class_dict if not np.isnan(class_dict[k])}\n\n # # for each sel measure get list of sel values and classes\n measure_c_name = f\"{measure}_c\"\n classes = list(clean_dict.keys())\n values = list(clean_dict.values())\n\n # print(\"\\ncheck (leave this in, it fails here sometimes\\n\"\n # f\"measure_c_name: {measure_c_name}\\n\"\n # f\"classes:{classes}\\n\"\n # f\"values:{values}\"\n # )\n\n # # for each sel measure get max value and class\n if len(values) > 0:\n max_val = max(values)\n max_class = classes[values.index(max_val)]\n # print(measure, measure_c_name)\n\n # # copy max class and value to max_class_dict\n max_sel_dict[measure] = max_val\n max_sel_dict[measure_c_name] = max_class\n\n # # remove unnecessary variables rom the dict\n max_sel_dict['max_info_count'] = copy_sel_dict['max_info_count'][max_sel_dict[\"max_informed_c\"]]\n max_sel_dict['max_info_thr'] = copy_sel_dict['max_info_thr'][max_sel_dict[\"max_informed_c\"]]\n max_sel_dict['max_info_sens'] = copy_sel_dict['max_info_sens'][max_sel_dict[\"max_informed_c\"]]\n max_sel_dict['max_info_spec'] = copy_sel_dict['max_info_spec'][max_sel_dict[\"max_informed_c\"]]\n max_sel_dict['max_info_prec'] = copy_sel_dict['max_info_prec'][max_sel_dict[\"max_informed_c\"]]\n max_sel_dict['zhou_selects'] = copy_sel_dict['zhou_selects'][max_sel_dict[\"zhou_prec_c\"]]\n max_sel_dict['zhou_thr'] = copy_sel_dict['zhou_thr'][max_sel_dict[\"zhou_prec_c\"]]\n\n focussed_dict_print(copy_sel_dict)\n print(copy_sel_dict['b_sel'].keys())\n print(copy_sel_dict['b_sel'])\n print(copy_sel_dict['b_sel'].values())\n print(list(copy_sel_dict['b_sel'].values()))\n print(len(list(copy_sel_dict['b_sel'].values())))\n # print()\n\n\n if not len(list(copy_sel_dict['b_sel'].values())): # if there are no values\n print(f\"len(list(copy_sel_dict['b_sel'].values())):{len(list(copy_sel_dict['b_sel'].values()))}\")\n max_sel_dict['b_sel'] = float('NaN')\n max_sel_dict['b_sel_zero'] = float('NaN')\n max_sel_dict['b_sel_pfive'] = float('NaN')\n else:\n max_sel_dict['b_sel_off'] = copy_sel_dict['b_sel_off'][max_sel_dict[\"b_sel_c\"]]\n max_sel_dict['b_sel_zero'] = copy_sel_dict['b_sel_zero'][max_sel_dict[\"b_sel_c\"]]\n max_sel_dict['b_sel_pfive'] = copy_sel_dict['b_sel_pfive'][max_sel_dict[\"b_sel_c\"]]\n\n\n # # max corr_coef shold be the absolute max (e.g., including negative) where p < .05.\n # get all values into df\n coef_array = [] # [corr_coef, abs(corr_coef), p, class]\n for coef_k, coef_v in copy_sel_dict['corr_coef'].items():\n abs_coef = abs(coef_v)\n p = copy_sel_dict['corr_p'][coef_k]\n coef_array.append([coef_v, abs_coef, p, coef_k])\n coef_df = pd.DataFrame(data=coef_array, columns=['coef', 'abs', 'p', 'class'])\n\n # # filter and sort df\n coef_df = coef_df.loc[coef_df['p'] < 0.05]\n\n if not len(coef_df): # if there are not items with that p_value\n max_sel_dict['corr_coef'] = float('NaN')\n max_sel_dict['corr_coef_c'] = float('NaN')\n max_sel_dict['corr_p'] = float('NaN')\n else:\n coef_df = coef_df.sort_values(by=['abs'], ascending=False).reset_index()\n max_sel_dict['corr_coef'] = coef_df['coef'].iloc[0]\n max_sel_dict['corr_coef_c'] = coef_df['class'].iloc[0]\n max_sel_dict['corr_p'] = coef_df['p'].iloc[0]\n\n delete_list = ['max_info_count_c', 'max_info_thr_c', 'max_info_sens_c', 'max_info_spec_c', 'max_info_prec_c',\n 'zhou_selects_c', 'zhou_thr_c',\n 'b_sel_off_c', 'b_sel_zero_c', 'b_sel_pfive_c',\n 'corr_p_c']\n\n for delete_item in delete_list:\n if delete_item in max_sel_dict:\n del max_sel_dict[delete_item]\n\n # # round values\n for k, v in max_sel_dict.items():\n if type(v) is float:\n max_sel_dict[k] = round(v, 3)\n\n # print(\"\\n\\n\\n\\nmax sel dict\", max_sel_dict)\n # focussed_dict_print(max_sel_dict, 'max_sel_dict')\n\n return max_sel_dict", "title": "" }, { "docid": "a7ad01f2056a664fe2cdc21a5693024b", "score": "0.5098632", "text": "def fit(self):", "title": "" }, { "docid": "a7ad01f2056a664fe2cdc21a5693024b", "score": "0.5098632", "text": "def fit(self):", "title": "" }, { "docid": "ef7f33ba41e73d7cfc9da83c79f363bc", "score": "0.5096847", "text": "def init_optimization(self):\r\n\r\n start, maxes = self.max_sampler.get_maxes_start_values(\r\n self.m, self.X, self.y, self.X_var, self.y_var,\r\n self.fidelity_choice)\r\n self.maxes = np.transpose(maxes)\r\n return start", "title": "" }, { "docid": "4b1b0f258d9fcff4743e7dabfce4a046", "score": "0.5095604", "text": "def maxproba(proba):\n lenp = len(proba)\n m=0\n for i in range(0,lenp):\n if proba[i]>m:\n m=proba[i]\n im=i\n return im,m", "title": "" } ]
e27c418436ca52894e43d72ed2eed4f1
Computes the accuracy over the k top predictions for the specified values of k
[ { "docid": "1fb344a00114e8ceae403851687e0b81", "score": "0.7847017", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" } ]
[ { "docid": "092b204d6f194e96b85acc310eb7d8eb", "score": "0.8249487", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n #res.append(correct_k.mul_(100.0 / batch_size))\n res.append(correct_k.mul_(1.0 / batch_size))\n \n return res", "title": "" }, { "docid": "86c3b1047f1177dd81269c801b061d39", "score": "0.82482845", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k)\n return res", "title": "" }, { "docid": "3e45591970a4a14023d96807e5dbe49c", "score": "0.82409656", "text": "def accuracy(output, target, topk=(1,)):\n#{{{\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "81532cbf10b8f1f4fa90ec839825174c", "score": "0.8184756", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n \n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n \n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n \n return res", "title": "" }, { "docid": "4d997f31d4783b58eed537f5b8344837", "score": "0.8179338", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n \n return res", "title": "" }, { "docid": "025509b66cfdac5604cf093bde86cbad", "score": "0.8178757", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(1.0 / batch_size))\n return res", "title": "" }, { "docid": "2cf3469d889b1b62fa1dcbf11778a4d9", "score": "0.81783867", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size).item())\n return res", "title": "" }, { "docid": "465f76dd648c36aee36472e1a5bb1a8f", "score": "0.8177302", "text": "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t() # transposition\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "title": "" }, { "docid": "f21ccd8f6fcf4d2a1655b3b2a904e117", "score": "0.81726015", "text": "def accuracy(output, target, topk=(1,)):\n\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "cffc0aa760d42e8dd80618946e22d64d", "score": "0.8170473", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "97817cda1ef553a076ce850516a14d24", "score": "0.8169737", "text": "def accuracy(output, target, topk=(1,)): #one element tuple requires a comma, hence the default value of topk\n \"\"\" In other words, it returns the top k accuracy\"\"\"\n maxk = max(topk) #really just converts it to an int, I suppose\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True) #note that for each datum, the output is actually 10 float values.\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "95eaf79f5561e4f222a41d35de98e8dd", "score": "0.81622064", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1823ba9df85f1387dcff9a3293d8cd1b", "score": "0.81620187", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "23979d1ebf7923aabde8f9a559d6fc67", "score": "0.8160757", "text": "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "title": "" }, { "docid": "c8ea95ea1897dd2f26732b9a54b25ad7", "score": "0.8151118", "text": "def accuracy(output,target,topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _,pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1,-1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "title": "" }, { "docid": "b8e6f627e8c87a42dad4878316b7c362", "score": "0.8146829", "text": "def topk_accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "bcace9c9cc787bf3cd8c6c88e7fa41ab", "score": "0.81375223", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "a283859b9680fe4622040c5cb8bfebbc", "score": "0.8134602", "text": "def accuracy(output, target, topk=(1, 5)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "d40979438d11221ff99dd0e48baef8e6", "score": "0.8127359", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "title": "" }, { "docid": "015278c5f6d40432bd7d4048952c3236", "score": "0.8124759", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "669441adfcfc03623f859258d627eae4", "score": "0.81228584", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t().type_as(target)\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "61c43a08b66caef58afff0cf9016be03", "score": "0.81115425", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n\n correct = pred.eq(target.view(1, -1).expand_as(pred).long())\n\n res = []\n correct_k = correct.sum()\n accuracy_value = correct_k * 100.0 / batch_size\n #res.append(correct_k * 100.0 / batch_size)\n\n return accuracy_value", "title": "" }, { "docid": "13fd89ae77a2fe157e0815db7b6036ee", "score": "0.81060624", "text": "def topk_accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n wrong_k = batch_size - correct_k\n res.append(wrong_k.mul_(100.0 / batch_size))\n\n if len(res) == 1:\n return res[0]\n else:\n return res\n # return res, pred[:1].squeeze(0)", "title": "" }, { "docid": "5f804c4d6c910f4bf56f5680e6edbcb8", "score": "0.8089445", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n wrong_k = batch_size - correct_k\n res.append(wrong_k.mul_(100.0 / batch_size))\n\n return res", "title": "" }, { "docid": "5f804c4d6c910f4bf56f5680e6edbcb8", "score": "0.8089445", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n wrong_k = batch_size - correct_k\n res.append(wrong_k.mul_(100.0 / batch_size))\n\n return res", "title": "" }, { "docid": "8a571f83bf1c8e9991d8c67f3ffdb893", "score": "0.80889446", "text": "def top_k_op(predictions, targets, k=1):\n with tf.name_scope('Top_' + str(k)):\n targets = tf.cast(targets, tf.int32)\n correct_pred = tf.nn.in_top_k(predictions, tf.argmax(targets, 1), k)\n acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n return acc", "title": "" }, { "docid": "3a7a92e501f4a2dae99c5c8dbe846821", "score": "0.80713624", "text": "def accuracy_topk(output, target, topk=(1, )):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n # one-hot case\n if target.ndimension() > 1:\n target = target.max(1)[1]\n\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = dict()\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res[\"acc{}\".format(k)] = correct_k.mul_(1.0 / batch_size).item()\n return res", "title": "" }, { "docid": "c97f6216adec1784d8deae7aeb753e16", "score": "0.8049245", "text": "def accuracy_at_k(\n outputs: torch.Tensor, targets: torch.Tensor, top_k: Sequence[int] = (1, 5)\n) -> Sequence[int]:\n\n with torch.no_grad():\n maxk = max(top_k)\n batch_size = targets.size(0)\n\n _, pred = outputs.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(targets.view(1, -1).expand_as(pred))\n\n res = []\n for k in top_k:\n correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "1752b4d96bc7d4b034eb79a096995e9f", "score": "0.80271596", "text": "def my_accuracy(output_, target, topk=(1,)):\n\tbatch_size = target.size(0)\n\tmaxk = max(topk)\n\t_, pred = output_.topk(maxk, 1, True, True)\n\tpred = pred.t()\n\tcorrect = pred.eq(target.contiguous().view(1, -1).expand_as(pred))\n\n\tres = []\n\tfor k in topk:\n\t\tcorrect_k = correct[:k].view(-1).float().sum(0)\n\t\tres.append(correct_k.mul_(100.0 / batch_size))\n\treturn res", "title": "" }, { "docid": "5dc26c88482f7055e9019e3ad41d504b", "score": "0.8021873", "text": "def accuracy(self, output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n # print(correct_k)\n\n res.append(correct_k.mul_(100.0 / batch_size)[0])\n return res", "title": "" }, { "docid": "1732ed4db06d24adcd3fb8d15cee39f9", "score": "0.80001116", "text": "def accuracy_topk(output, target, k=1):\n batch_size = target.size(0)\n\n _, pred = torch.topk(output, k=k, dim=1, largest=True, sorted=True)\n\n res_total = 0\n for curr_k in range(k):\n curr_ind = pred[:,curr_k]\n num_eq = torch.eq(curr_ind, target).sum()\n acc = num_eq/len(output)\n res_total += acc\n return res_total*100", "title": "" }, { "docid": "e7bb90e50db6baf1b2f578b09473bf56", "score": "0.7961552", "text": "def accuracy(output, target, topk=(1,)):\n '''\n This function comes from \n https://github.com/bearpaw/pytorch-classification/blob/master/utils/eval.py\n '''\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "02515138da082075f88adc7c301783b3", "score": "0.79499626", "text": "def _accuracy(output, target, top_k=(1,)):\n with torch.no_grad():\n max_k = max(top_k)\n batch_size = target.size(0)\n\n _, pred = output.topk(max_k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in top_k:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n pass", "title": "" }, { "docid": "e690fba21eca6f71ebb4f08453c4ecf3", "score": "0.79286027", "text": "def accuracy(self, output, target, topk=(1,)):\n\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "8f3ad3dc225b0b868bb7d31dbd14bfec", "score": "0.79236424", "text": "def accuracy(output, target, topk=(1,)):\r\n count = [0]*args.num_class\r\n acc = [0]*args.num_class\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t() #zhuanzhi\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n for idx in range(batch_size):\r\n count[target[idx]]+=1\r\n if target[idx] == pred[0][idx]:\r\n acc[target[idx]]+=1\r\n res = []\r\n classaccuracys = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n \r\n for i in range(args.num_class):\r\n if count[i]!=0:\r\n classaccuracy = (acc[i]*1.0/count[i])*100.0\r\n else:\r\n classaccuracy = 0\r\n classaccuracys.append(classaccuracy)\r\n return res", "title": "" }, { "docid": "2781ac5daa21c3a689685dd59601855c", "score": "0.7921736", "text": "def accuracy(output, target, top_k=(1,)):\n with torch.no_grad():\n max_k = max(top_k)\n batch_size = target.size(0)\n _, pred = output.topk(max_k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in top_k:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "2cc3bffb77fc16caefab13fff967a057", "score": "0.7906755", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n return [correct[:k].reshape(-1).float().sum(0) * 100.0 / batch_size for k in topk]", "title": "" }, { "docid": "9882d294cb6e61e9e827d39627cbf3a3", "score": "0.7902296", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n # torch.topk : input, k, dim=None, largest=True, sorted=True => returns top k element\n # returns values list & indices list\n _, pred = output.topk(maxk, 1, True, True) \n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred)) # torch.eq: Computes element-wise equality\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) # input, dim,\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "03ebb27143d79a585dc2263b833d5b10", "score": "0.7890316", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]", "title": "" }, { "docid": "3a77c0010b5e7cb6ee1fa0c412ba7f68", "score": "0.7882956", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]", "title": "" }, { "docid": "ffedaec5592e697bdd730af1e1c0991e", "score": "0.78782773", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n max5out, pred = output.topk(maxk, 1, True, True)\n pred2 = pred.t()\n correct = pred2.eq(target.view(1, -1).expand_as(pred2))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res, pred, max5out", "title": "" }, { "docid": "5cf66d24dd4c24c5fe2553f78767c661", "score": "0.78773075", "text": "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n # one-hot case\n if target.ndimension() > 1:\n target = target.max(1)[1]\n\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(1.0 / batch_size))\n\n return res", "title": "" }, { "docid": "27c7493090579c04c6d3f32db33ccc14", "score": "0.78758967", "text": "def accuracy(output: torch.Tensor, target: torch.Tensor, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].float().sum()\n res.append(correct_k.mul_(1.0 / batch_size))\n return res", "title": "" }, { "docid": "9fbd8a5a98a3a59afb831268228e4835", "score": "0.78661317", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n \n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n \n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "8c77a9fae53f20c9293f37add3e44f07", "score": "0.7862789", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].flatten().float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "8c77a9fae53f20c9293f37add3e44f07", "score": "0.7862789", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].flatten().float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "ad34362d34b65784257d0e6e6a818eb6", "score": "0.78623915", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(1.0 / batch_size))\n return res", "title": "" }, { "docid": "ad34362d34b65784257d0e6e6a818eb6", "score": "0.78623915", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(1.0 / batch_size))\n return res", "title": "" }, { "docid": "41551d2f87c04f7f56686d298a853899", "score": "0.78618157", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "41551d2f87c04f7f56686d298a853899", "score": "0.78618157", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "41551d2f87c04f7f56686d298a853899", "score": "0.78618157", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "41551d2f87c04f7f56686d298a853899", "score": "0.78618157", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "41551d2f87c04f7f56686d298a853899", "score": "0.78618157", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "41551d2f87c04f7f56686d298a853899", "score": "0.78618157", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "41551d2f87c04f7f56686d298a853899", "score": "0.78618157", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "62b11c9f12552616ec4be35acc35f49e", "score": "0.7861433", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "62b11c9f12552616ec4be35acc35f49e", "score": "0.7861433", "text": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "3ba6aefadd44d98edcf6e8274c5b4c18", "score": "0.7857391", "text": "def accuracy(output, target, topk=(1,5)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "title": "" }, { "docid": "6ae02464683c4d1aa0fed043c293a971", "score": "0.7852251", "text": "def accuracy(output, target, topk=(1,)):\r\n with torch.no_grad():\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "title": "" }, { "docid": "6ae02464683c4d1aa0fed043c293a971", "score": "0.7852251", "text": "def accuracy(output, target, topk=(1,)):\r\n with torch.no_grad():\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "title": "" } ]
264d245dad9dd026a02a3524d8c95b3e
Kill the previously started Quixote server.
[ { "docid": "ae941954f9eabbecaef43c4c42270d0b", "score": "0.0", "text": "def kill_server():\n global _server_url\n if _server_url != None:\n try:\n fp = urllib.urlopen('%sexit' % (_server_url,))\n except:\n pass\n\n _server_url = None", "title": "" } ]
[ { "docid": "271d9573d0475ee33c60157648dadab6", "score": "0.7437033", "text": "def kill():\n return Server.kill()", "title": "" }, { "docid": "1857b1c801e91961f9d85a091702b042", "score": "0.7063344", "text": "def killServer():\n if TikaServerProcess:\n try:\n os.killpg(os.getpgid(TikaServerProcess.pid), signal.SIGTERM)\n except:\n log.error(\"Failed to kill the current server session\")\n time.sleep(1)\n # patch to support subprocess killing for windows\n if Windows:\n if sys.version.startswith(\"2\"):\n # Python 2.x\n PROCESS_TERMINATE = 1\n handle = ctypes.windll.kernel32.OpenProcess(\n PROCESS_TERMINATE, False, TikaServerProcess.pid\n )\n ctypes.windll.kernel32.TerminateProcess(handle, -1)\n ctypes.windll.kernel32.CloseHandle(handle)\n time.sleep(1)\n elif sys.version.startswith(\"3\"):\n # Python 3.x\n os.kill(TikaServerProcess.pid, signal.SIGTERM)\n time.sleep(1)\n else:\n try:\n os.killpg(os.getpgid(TikaServerProcess.pid), signal.SIGTERM)\n except:\n log.error(\"Failed to kill the current server session\")\n time.sleep(1)\n else:\n log.error(\"Server not running, or was already running before\")", "title": "" }, { "docid": "a96d8fbd56d15697499e2195e5b730ee", "score": "0.70583385", "text": "def kill(self):\n del os.environ['PUBSUB_EMULATOR_HOST']\n print 'Inside Kill', self.process\n os.killpg(self.process.pid, signal.SIGTERM)", "title": "" }, { "docid": "48de9704f274ad88c813437be8fbee84", "score": "0.68917686", "text": "def terminate():\n _process_method(\"node\", \"kangas\", \"terminate\")\n _process_method(\"kangas\", \"server\", \"terminate\")\n _process_method(\"python\", \"kangas\", \"terminate\")", "title": "" }, { "docid": "53353a8b5f97156024455f7eb7144b58", "score": "0.68824625", "text": "def kill_server(self):\n self.stop_server()\n self.server = None\n self.driver = None", "title": "" }, { "docid": "27e6a2aa9e541f61ab61e562959b10c3", "score": "0.68409723", "text": "def kill(server=\"server\", debug=True):\n with settings(warn_only=True):\n execute_command(\"STOP MONGO\", \"fab mongo.stop\", debug=debug)\n result = local(\n 'ps -ax | fgrep \"python {0}.py\" | fgrep -v fgrep'.format(server), capture=True).split(\"\\n\")\n for line in result:\n if line is not '':\n pid = line.split(\" \")[0]\n local(\"kill -9 {0}\".format(pid))\n # local(\"fab queue.stop\")", "title": "" }, { "docid": "2ab23d705de4d88604b4a0b3400ad268", "score": "0.68369114", "text": "def kill(self,pid):\n del self.clients[pid]\n del self.pool[pid]\n print 'Terminated worker subprocess:', pid", "title": "" }, { "docid": "be7f025c15afa7a102cab02878642d72", "score": "0.6832966", "text": "def kill(self):\n self.proc.kill()\n self.proc.wait()\n if self.host != 'localhost':\n from xpedite.dependencies import binPath\n ssh = binPath('ssh')\n subprocess.Popen([ssh, '-T', '-o', 'StrictHostKeyChecking=no', self.host, 'kill', str(self.pid)]).wait()\n self.std.close()", "title": "" }, { "docid": "e1fcfcb17c86b8890c8fdc88dc9d7694", "score": "0.676611", "text": "def tear_server(self):\n self.drv.quit()\n self.server_process.kill()\n self.server_process.wait()", "title": "" }, { "docid": "aefe7cfbcda7b90c2faf2cb59ed8f0c9", "score": "0.6702326", "text": "def quit(self):\n self.server.trash()", "title": "" }, { "docid": "5453ed858863eb4cd0eef79155354b88", "score": "0.6641691", "text": "def stop(self):\n self.zeromq_server.stop()\n self.mqtt_server.remove_node(network_utils.get_own_ip())\n self.mqtt_server.stop()\n self.api_server.stop()", "title": "" }, { "docid": "05bc02b062fea5547bbf8e07817ad44d", "score": "0.6624865", "text": "def kill(self) -> None:\n os.kill(self.pid, signal.SIGTERM)", "title": "" }, { "docid": "8bed534513ca300626236a01f0e9f868", "score": "0.6572219", "text": "def kill(self):\n sleep(POLL_TIME)\n self.shutdown()\n self.server_close()", "title": "" }, { "docid": "f2832647130204b30553aa3ea29d728a", "score": "0.6538976", "text": "def Kill(self):\n \n if sys.platform == \"win32\":\n win32process.TerminateProcess(self._GetChildPID(), 1)\n else:\n os.kill(self._GetChildPID(), signal.SIGKILL)", "title": "" }, { "docid": "2dc57f735997d71f4554653d1b60936e", "score": "0.6488159", "text": "def onQuit(self):\n self.client.leaveServer()\n self.master.destroy()", "title": "" }, { "docid": "6cdfdfb38d5f0f0919a5a7b0456e771d", "score": "0.6468022", "text": "def stop_server(self):\n # kill myServer\n # 参考https://www.cnblogs.com/CoreXin/p/5566607.html\n os.popen(stopAppium)", "title": "" }, { "docid": "d303f325aacc4dee80af40ca36e28556", "score": "0.64474255", "text": "def kill(self):\n self.socket.close()", "title": "" }, { "docid": "b5ddd571a93814ab5c6bf3b28f440a8d", "score": "0.6434538", "text": "def tearDown(self):\n self.server_process.terminate()\n self.server_process = None", "title": "" }, { "docid": "0e39d8b429b73dc527f8426fbc63fcfc", "score": "0.6421385", "text": "def kill(self):\n pass", "title": "" }, { "docid": "0e39d8b429b73dc527f8426fbc63fcfc", "score": "0.6421385", "text": "def kill(self):\n pass", "title": "" }, { "docid": "0e39d8b429b73dc527f8426fbc63fcfc", "score": "0.6421385", "text": "def kill(self):\n pass", "title": "" }, { "docid": "d5be17b4193fdf74dfbe5e3f8ed4986a", "score": "0.63840187", "text": "def kill(self):", "title": "" }, { "docid": "21ae64c64f6c78ebd2575d8d4592a0b1", "score": "0.63811344", "text": "def kill(self):\n if self.proc is None:\n raise ValueError(\"Not started\")\n self.qkill.put(\"kill\")\n self.proc.join()\n self.proc.close()\n time.sleep(0.1)\n self.qstdout.close()\n self.qstderr.close()\n self.qkill.close()\n self.proc = None", "title": "" }, { "docid": "d0ec6d21365305057eb1146eb7b31e43", "score": "0.63784415", "text": "def phone_kill(self):\n self.py.run(\"set.close()\")\n self.py.exit()\n delattr(self, \"py\")", "title": "" }, { "docid": "bd9dfa6770be20e41ea3fa58f1122443", "score": "0.6353981", "text": "def kill(self):\n self._killall(force=True)", "title": "" }, { "docid": "db599a990df68a46b9c711f6350448b0", "score": "0.63239676", "text": "def stop_command(db=None, host=None, murder=False):\n pid = get_server_pid(db, host)\n if not pid: return\n\n sig = signal.SIGTERM if murder else signal.SIGINT\n\n os.kill(pid, sig)", "title": "" }, { "docid": "556892376de7ced2c2c0ff4ce96a0a89", "score": "0.6297316", "text": "def kill(self) -> None:\n if self._process is not None:\n self._process.kill()", "title": "" }, { "docid": "ca00ddf00d4f384d563a3b253b42c43a", "score": "0.62915933", "text": "def kill(self) -> None:\n self.process.kill()\n self.process.wait()\n self.process = None", "title": "" }, { "docid": "aff78b89573cc9694e375d73c33bd217", "score": "0.6279561", "text": "async def kill(self, ctx):\n await ctx.send(MSG_SHUT_DOWN)\n await self.bot.logout()", "title": "" }, { "docid": "2753ea0c7d28054512d14c6cc3aec278", "score": "0.62769693", "text": "def stop_server(self):\n inst = self.get_connection()\n inst.stop()", "title": "" }, { "docid": "372806f5b46ca312b81bf19e23b4fefd", "score": "0.6254214", "text": "def stop_daemon(self):\n\n import subprocess, time, os, signal\n\n self.COMMAND = \"/usr/bin/pkill -9 \" + self.coinName\n os.system(self.COMMAND)", "title": "" }, { "docid": "80dd98762098719ec94217ae3ee08295", "score": "0.624367", "text": "def shutdown():\n vpython.no_notebook.stop_server()", "title": "" }, { "docid": "9524bda02182397ffa0f5c137034590b", "score": "0.6240712", "text": "def kill_server(hosts):\n kill_cmds = [\n \"pkill '(daos_server|daos_io_server)' --signal INT\",\n \"sleep 5\",\n \"pkill '(daos_server|daos_io_server)' --signal KILL\",\n ]\n # Intentionally ignoring the exit status of the command\n pcmd(hosts, \"; \".join(kill_cmds), False, None, None)", "title": "" }, { "docid": "65d63a49be1bf0a220b5e2f27279c81c", "score": "0.6229341", "text": "def teardown(self):\n self.kill()\n super(TestServer, self).teardown()", "title": "" }, { "docid": "d6b640cfc3f3227d6a59e21b1a5a5ba1", "score": "0.6220931", "text": "def stopServer():\n # for client in ArgosController.clients:\n # client.closeClient()\n if ArgosController.client is not None:\n ArgosController.client.closeClient()\n ArgosController.running = False\n ArgosController.TCPServer.close()", "title": "" }, { "docid": "de1236110ee06e6c61e325c83ec9af9c", "score": "0.6213605", "text": "def stop(self):\n LOG.info(_(\"Stopping WSGI server.\"))\n self._server.kill()\n if self._tcp_server is not None:\n LOG.info(_(\"Stopping raw TCP server.\"))\n self._tcp_server.kill()", "title": "" }, { "docid": "c8386b4b017154e32e83f3b128a6f317", "score": "0.6211687", "text": "def stop(self):\n self.app.kill()", "title": "" }, { "docid": "c522a8385d22474549ed5ca84ab6baba", "score": "0.6211478", "text": "def stop(self):\n log.debug(\"cleaning up server\")\n sys.stdout.flush()\n BaseServer.stop(self)\n if not charles.WINDOWS: \n if charles_settings.SOCKFAM == socket.AF_UNIX: # prune socket\n try:\n os.remove(charles_settings.ADDRESS)\n except EnvironmentError, exc:\n log.error(\"error removing socket:\", exc.strerror)\n # pidfile removed in __init__.py:main_loop\n # restarter stopped in restarter.py:_atexit", "title": "" }, { "docid": "a1dff1c03441091d9ac98c30cba6c047", "score": "0.62107444", "text": "def stop(self):\n self.server.shutdown()\n self.server.server_close()", "title": "" }, { "docid": "06321ba3141d1ac950bfa96aa1e9753b", "score": "0.61956245", "text": "def stop(self):\n\n log_info(\"Killing LiteServ: http://{}:{}\".format(self.host, self.port))\n\n self.logfile.flush()\n self.logfile.close()\n self.process.kill()\n self.process.wait()\n\n self._verify_not_running()", "title": "" }, { "docid": "cd725254e22099430e2a272ec30b5cae", "score": "0.6194456", "text": "def kill_build(self):\r\n\r\n self.communicate({\"$type\": \"terminate\"})", "title": "" }, { "docid": "d2d342072d3b821614c99aa80d7f07e9", "score": "0.619332", "text": "def kill(self):\n try:\n if self.process:\n os.kill(self.process.pid, 9)\n except AttributeError:\n # kill may not be available under windows environment\n pass", "title": "" }, { "docid": "a903d27149d83b000bee8b6442430e01", "score": "0.6185369", "text": "def __delete_heroku_server(self):\n heroku_executable_path, heroku_user_identifier = self.__get_heroku_client()\n heroku_app_name = self.__get_app_name()\n print(\"Heroku: Deleting server: {}\".format(heroku_app_name))\n subprocess.check_output(\n shlex.split(\n \"{} destroy {} --confirm {}\".format(\n heroku_executable_path, heroku_app_name, heroku_app_name\n )\n )\n )\n time.sleep(HEROKU_WAIT_TIME)", "title": "" }, { "docid": "8c0004170f016fa5c3ea2dfdb0bab501", "score": "0.6183501", "text": "def shutdown(self):\n logger.info(\"Shutdown method starting.\")\n try:\n if os.path.isfile(\"music_server.pid\"):\n logger.info(\"removing pid file.\")\n os.remove(\"music_server.pid\")\n #the only way to actually kill the player subprocess.\n self.q.put(\"STOP\")\n time.sleep(2)\n self._loop_process.kill()\n except:\n pass", "title": "" }, { "docid": "d6c225d5213af56518efa5a92f45af11", "score": "0.61725885", "text": "def request_exit(self):\n os.kill(self.pid, signal.SIGTERM)", "title": "" }, { "docid": "5f527084cecd9c3479d9dcf8379275a4", "score": "0.6168476", "text": "def stop(self):\n\n # XXX FIXME - don't rely on timing\n # self.__qmp_socket.send(\"quit\\n\".encode())\n # time.sleep(1)\n\n self.kill_qemu()\n for n in self.net:\n print(\"Deleting bridge \", n.uid)\n n.delete_bridge()\n\n for serial_object in self.__serials:\n try:\n serial_object[\"expect\"].close()\n except OSError:\n pass\n serial_object[\"expect\"] = None", "title": "" }, { "docid": "987d189a26dbbe7e610b5d001194825b", "score": "0.6158234", "text": "def kill(self):\n\n # \"removed\" container is occasionally killed in ContainerSandbox.\n # Stay silent about this scenario.\n if self._container_id:\n self._dclient.kill(self._container_id)", "title": "" }, { "docid": "a84d85120441b523ce2c96ffc2cab511", "score": "0.6158076", "text": "async def terminate(self):\n kokoro = self.kokoro\n if kokoro is not None:\n kokoro.terminate()\n \n websocket = self.websocket\n if websocket is None:\n return\n self.websocket = None\n await websocket.close(4000)", "title": "" }, { "docid": "85c05daa150ba3a991d08802b62b33bc", "score": "0.6134346", "text": "def attempt_kill():\n if not bot_running():\n print \"MarkUsBot is not running.\"\n return\n\n pid = bot_pid()\n print \"Attempting to kill the MarkUsBot (PID: %s)\" % pid\n os.kill(pid, signal.SIGKILL)\n os.remove(PID_FILE)\n print \"MarkUsBot should be shut down now.\"", "title": "" }, { "docid": "7d06018eeb638365d4aa086ee253791a", "score": "0.6130186", "text": "def terminate(self):\n self._player_process.kill() # mpg123 doesn't respond to SIGTERMs for some reason unfortunately.\n self._player_process.wait()", "title": "" }, { "docid": "87e68620ac493a5619f98c6cffb913bb", "score": "0.6120489", "text": "def goodbye() -> None:\n stop_app()\n logging.info('Killed server. Bye bye!')", "title": "" }, { "docid": "1ac6854161c04fc0b195a36540e1be80", "score": "0.61117405", "text": "def kill(self):\n try:\n self.container.kill()\n except docker.errors.APIError:\n self._print(\n \"Error when trying to send kill signal to docker container.\")\n LOG.exception(\"Killing container\")", "title": "" }, { "docid": "3f8608fae15398ad3ad461b09091d5e1", "score": "0.60998493", "text": "def shutdown(self) -> None:\n if self.created: # only delete the server if it's created by us\n self.__delete_heroku_server()", "title": "" }, { "docid": "1d7c16f739f74ce4150bcd9b7d376127", "score": "0.6094854", "text": "def server_stop(self, server):", "title": "" }, { "docid": "3b91a57eb69a98d4083da4336799c144", "score": "0.60822904", "text": "def quit(self) -> None:\n print(\"Quitting SCServer... \", end=\"\")\n try:\n self.msg(MasterControlCommand.QUIT, bundle=False)\n except OSCCommunicationError:\n pass # sending failed. scscynth maybe dead already.\n finally:\n super().quit()\n self._server_running = False\n if self._is_local:\n self._has_booted = False\n self.process.kill()\n print(\"Done.\")", "title": "" }, { "docid": "379fd1c3540d5901e2a649db618187ab", "score": "0.60790634", "text": "def kill():\n\t\tfor proc in process_iter():\n\t\t\t# check whether the process name matches\n\t\t\tif \"pdflatex\" in proc.name():\n\t\t\t\tproc.kill()", "title": "" }, { "docid": "143b4cd4d548788d3d4f803bde166ea3", "score": "0.607681", "text": "def _kill(self):\n if self._killed:\n return\n print self.__class__.__name__ + \".kill()\"\n try:\n if self.commandChecker != None and self.commandChecker.isAlive():\n self.commandChecker.interrupt()\n except Exception as e:\n pass\n try:\n if self.fileChecker != None and self.fileChecker.isAlive():\n self.fileChecker.interrupt()\n except Exception as e:\n pass\n try:\n self.deleteOpenItems(True)\n except Exception as e:\n pass\n if OS.isWindows() and not self.currentNetUseLetter == \"\" and not self.currentNetUseIsPersistent:\n try:\n self.execCommand(\"net use \" + self.currentNetUseLetter + \": /DELETE\")\n except Exception as e:\n pass\n if self.isMounted() and not self.currentMountIsPersistent:\n try:\n self._removeMount()\n except Exception as e:\n pass\n try:\n self.cleanCash()\n except Exception as e:\n pass\n self._killed = True", "title": "" }, { "docid": "8112349be8b4ef0946d2388cbb75ff9e", "score": "0.6071985", "text": "def terminate():\n eb_terminate(env.eb_env_name)", "title": "" }, { "docid": "31a7e9b1f185d364242e32d1abc2a414", "score": "0.6066651", "text": "def terminate(self):\n self.process.terminate()", "title": "" }, { "docid": "3db126c9329a2c86ba6337329dac803a", "score": "0.60561097", "text": "def stop(self):\n self.log.info(\"Shutting down TCPServerProxyZMQ\")\n self.kill_switch = True\n self.join(1000)", "title": "" }, { "docid": "51c2273d81eaa0d92df54a9bec751b6e", "score": "0.60407436", "text": "def stop():\n try:\n local(\"killall npm\")\n except:\n pass\n\n try:\n local(\"killall python\")\n except:\n pass", "title": "" }, { "docid": "166c99e7b2cae2789cde4e98822801d0", "score": "0.60404587", "text": "def kill(self):\n self.killed = True", "title": "" }, { "docid": "5f91dc09305703e9b0d92e450a29ea38", "score": "0.6038677", "text": "def terminate(self) -> None:\n if self._process is not None:\n # self._process.terminate()\n self._process.send_signal(signal.SIGINT)\n time.sleep(3)", "title": "" }, { "docid": "04f386ea10b0d63772de70b7fe2af7bb", "score": "0.60302675", "text": "def kill_redis(self, check_alive: bool = True):\n self._kill_process_type(\n ray_constants.PROCESS_TYPE_REDIS_SERVER, check_alive=check_alive\n )", "title": "" }, { "docid": "7744512a2ba828d9420df0f24983a02b", "score": "0.6012819", "text": "def stop():\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"", "title": "" }, { "docid": "7744512a2ba828d9420df0f24983a02b", "score": "0.6012819", "text": "def stop():\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"", "title": "" }, { "docid": "7744512a2ba828d9420df0f24983a02b", "score": "0.6012819", "text": "def stop():\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"", "title": "" }, { "docid": "3378f7625cb01f1851754f3119af17ef", "score": "0.60025334", "text": "def stop(self):\n self.log.info(\"Shutting down TCPServerZMQ\")\n \n for worker in self.workers:\n worker.stop()\n \n self.kill_switch = True\n self.join(5000)\n self.log.info(\"TCPServerZMQ shutdown finished\")", "title": "" }, { "docid": "168a40c38a93a30cb3b76a59f26cbb25", "score": "0.5997902", "text": "def kill_server(self,srvr):\n indx = self.servers.index(srvr)\n del self.servers[indx]", "title": "" }, { "docid": "2972efb302f0877d890a0d9bef45e1b2", "score": "0.5989623", "text": "def kill(self, *toKill):\n #TDB", "title": "" }, { "docid": "0d8c073b8301a65e481885806fa9766b", "score": "0.5985634", "text": "def kill(self, exc: bool = True) -> None:\n if not self.is_active():\n if not exc:\n return\n raise SystemError(\"RPC is not active.\")\n\n try:\n print(\"Terminating local RPC client...\")\n except ValueError:\n pass\n for child in self.process.children(recursive=True):\n try:\n child.kill()\n except psutil.NoSuchProcess:\n pass\n self.process.kill()\n self.process.wait()\n chain._network_disconnected()", "title": "" }, { "docid": "d1480999d64400d54f690dba888812bb", "score": "0.5985283", "text": "def kill(self):\n return self.impl.kill()", "title": "" }, { "docid": "e7da5f99776f5f22076ab111c6653f5b", "score": "0.5984219", "text": "def terminated(self):\n self.process.kill()", "title": "" }, { "docid": "393672d7603d64412e7f1b86ca4d2928", "score": "0.5981826", "text": "def kill(pid):\n # XXX 无法杀孙子进程\n kill_command = \"kill -9 `ps --no-heading --ppid %s|awk '{print $1}'` %s\" % (pid, pid)\n os.system(kill_command)", "title": "" }, { "docid": "ca71b0427a07e2c92f1f302d3b2ad613", "score": "0.59800565", "text": "def stop(self):\n self._kill()", "title": "" }, { "docid": "fcaf97b783f01fbe9bf1c9516b356ac4", "score": "0.596931", "text": "def terminate(self):\n self.hub.terminate()", "title": "" }, { "docid": "0e5f75d2a947bd4b7299a33e552bb981", "score": "0.5968081", "text": "def kill(self):\n self.alive = False", "title": "" }, { "docid": "d8d9248917a0c19c135808fc5886ab15", "score": "0.5966517", "text": "def kill(self):\n if not self.is_complete():\n self._process.kill()\n self._process.wait()\n self.e_time = time.time()\n self.stdout, self.stderr = self._process.communicate()\n self.status = _S_KILL", "title": "" }, { "docid": "01ef61f9b3c7c3de18aa8eedea8050d1", "score": "0.5950759", "text": "def kill_instance(self):\n if self.instance:\n print(\"force-killing {0} instance PID:[{1}]\".format(self.type_str, self.instance.pid))\n self.instance.kill()\n\n self.instance = None\n self.pid = None\n self.ppid = None\n else:\n logging.info(\"I'm already dead, jim!\" + str(repr(self)))", "title": "" }, { "docid": "876032b130a7f0ce924b02eb4d09791a", "score": "0.59449106", "text": "def kill(self):\n self.is_killed = True\n self._is_killed.wait()", "title": "" }, { "docid": "f72c3a5e5eec0e04949366e55abe196d", "score": "0.5944829", "text": "def terminate(self):\n self._mi_api.terminate()", "title": "" }, { "docid": "d97922a8fc0aeb2f165e86092e3cec60", "score": "0.59366035", "text": "def exit(update, context):\n if \"container\" in context.chat_data:\n if context.chat_data[\"mode\"] == 1:\n repl.kill(context.chat_data[\"container\"])\n if context.chat_data[\"mode\"] == 2:\n batch.kill(context.chat_data[\"container\"])\n update.message.reply_text(\"Container terminated\")\n else:\n update.message.reply_text(\"Error: Interpreter not started or already terminated\")", "title": "" }, { "docid": "6622da8cc1a97aa1a56ccc8025590fb3", "score": "0.59357333", "text": "def stop():\n\n tidyUp()\n shutdown_server()\n return \"Stopping server\"", "title": "" }, { "docid": "ae26ff10982fa8d7466b0d625561141e", "score": "0.5935445", "text": "def __del__(self):\n if self.process.isalive:\n try:\n return self.command('QUIT', '')\n except:\n pass", "title": "" }, { "docid": "559507c49f17ed0fbdd4ae9ad961852b", "score": "0.59309345", "text": "def kill(self):\n self._stop_thread = True\n self._rtl433.kill()", "title": "" }, { "docid": "997a1922d8cd324aa5f9351edb49705c", "score": "0.5929235", "text": "def terminate_all_instances(self) -> None:", "title": "" }, { "docid": "1948f870ab856242b605a1d5f34eaf8d", "score": "0.59270746", "text": "def terminate(*_):\n os.kill(os.getpid(), signal.SIGTERM)", "title": "" }, { "docid": "71ecea9439ce1759a4debc20c98a0a1a", "score": "0.5918046", "text": "def command_kill(self, **kw):\n return False", "title": "" }, { "docid": "8a30d378d4253bb7ece04fc218a1af44", "score": "0.591538", "text": "def _clean_process(ppid, workers):\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n while _PythonMultiprocessing.is_process_alive(ppid):\n time.sleep(0.1)\n\n _PythonMultiprocessing._terminate_processes(workers)\n del workers\n os.kill(os.getpid(), signal.SIGTERM)", "title": "" }, { "docid": "7f34294c40cb83c2ab1d3539b60fb020", "score": "0.5911659", "text": "def exit(self):\n self.client.stopListening()\n self.parent.destroy()", "title": "" }, { "docid": "59ce756c5f587ba804804359c3410e54", "score": "0.5895893", "text": "def stop(self):\n self.server.stop(grace=None)\n self.thread_pool.shutdown(wait=False)", "title": "" }, { "docid": "98d7c24a5813a97bc62fb7f7a69c1eda", "score": "0.5895399", "text": "def kill(self, sig):\n\n # Same as os.kill, but the pid is given for you.\n if self.isalive():\n os.kill(self.pid, sig)", "title": "" }, { "docid": "db1837102ca910836c32d7c31ec4fdac", "score": "0.58953756", "text": "def _pg_kill(self):\n return self._pg_freezer(\"kill\")", "title": "" }, { "docid": "dcfc526af4710981d2fb77ed6ff0e538", "score": "0.5889495", "text": "def kill_planner(self):\n\t\tself.env.Destroy()\n\t\tRaveDestroy() # destroy the runtime", "title": "" }, { "docid": "edbf26f8d63d35279350af814748090c", "score": "0.58757603", "text": "def shutdown_server():\n SERVER_UNDER_TEST.stop()", "title": "" }, { "docid": "fb4ffe98160d705aa4848a498b123c74", "score": "0.5861021", "text": "def stop_server():\n import CAServer\n print(\"stopping server\")\n for obj in syringe_pump_driver,syringe_pump2_driver,\\\n syringe_pump_combined_driver:\n CAServer.unregister_object(obj,\"NIH:\"+obj.name)", "title": "" }, { "docid": "70028efe1e5d00cd8b4d75dfcb2dd599", "score": "0.5857742", "text": "def close_server(self):", "title": "" }, { "docid": "9282446b7ef681b76b113eccba042799", "score": "0.5854586", "text": "def kill(nick):\n print(\"kill %s\" % nick)", "title": "" }, { "docid": "8f24966ca18fca99323b91e45569cb37", "score": "0.58531934", "text": "def terminate(self):\n if self._terminating:\n return\n self._exit_start = self._env.now()\n self._terminating = True\n self._killall()", "title": "" }, { "docid": "325aca7d2de065e0e5ebefa6ac58f6cc", "score": "0.5853142", "text": "def stop(self):\n if self._server:\n self._server.close()\n\n self._server = None", "title": "" }, { "docid": "11f1abfe2de9275b6e04004757699b5c", "score": "0.585238", "text": "def kill(self):\n self._stop = True", "title": "" } ]
870b64780dfa182edf12e4df39653c90
Smooth the data with von Mises functions
[ { "docid": "fdf5009c7e87693ef95b73de8a36878e", "score": "0.632402", "text": "def vonmises_smoothing(self, **kwargs):\n return self.component_fitting(mode='vonmises', **kwargs)", "title": "" } ]
[ { "docid": "5babe5be0badb185409d0cd539883ea3", "score": "0.5696802", "text": "def smooth(values, dt, tau):\n result = np.empty(len(values))\n result[0] = values[0]\n weights = np.exp(-dt / tau)\n for i in range(1, len(values)):\n result[i] = weights[i] * result[i - 1] + (1 - weights[i]) * values[i]\n\n return result", "title": "" }, { "docid": "1a5f87c10c1625aec67468704be88e93", "score": "0.55947316", "text": "def test_vector_value_smooth(self):\n \"\"\" smooth is an alternate method \"\"\"\n ml_dfp = DataFingerprint()\n ml_dfp.numeric_encoding = 'smooth'\n res = ml_dfp.vector_value(1.19313)\n print(res)\n expected = [0., 0.80687, 0.19313, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]\n assert_array_almost_equal(res, expected)", "title": "" }, { "docid": "036c060cd1a977bebb57d5a8379916a4", "score": "0.55933636", "text": "def fz(self):\n\t\ttime = np.linspace(0, 5, 1000)\n\t\tdef sm1(x, t):\n\t\t\tif x[0] < 0:\n\t\t\t\treturn np.array([x[1], 0]) \n\t\t\telse:\n\t\t\t\treturn np.array([x[1], self.eta*self.Ez0(x[0])])\n\t\ttraj = odeint(sm1,[0,1.],time)\n\t\tif traj[-1,0] < 0:\n\t\t\tind = np.where(traj[:,0] > 0)[0][-1]\n\t\t\tt = [time[ind+1],time[ind]]\n\t\t\tz = [traj[ind+1,0],traj[ind,0]] #must be increasing for interp\n\t\t\treturn 1/(np.interp(0,z,t)*self.T)\n\t\telse:\n\t\t\treturn -1", "title": "" }, { "docid": "3dda4cff2b586ddd6343327a81be056b", "score": "0.5578475", "text": "def make_v_func():\n period_factor = rnd.uniform(0.3, 0.5)\n amplitude_factor = rnd.uniform(0.1, 0.3)\n\n def v_func(elapsed):\n w = period_factor*elapsed\n return amplitude_factor*math.cos(w*2*math.pi)\n return v_func", "title": "" }, { "docid": "23c0492eafd84368da69935fbf2d4b30", "score": "0.5561881", "text": "def __call__(self, state):\n\n result = self.u(\n self.dataset[\"v_inv\"].interp(\n state,\n assume_sorted=True,\n kwargs={\"fill_value\": \"extrapolate\"},\n )\n )\n\n result.name = \"v\"\n result.attrs = self.dataset[\"v\"].attrs\n\n return result", "title": "" }, { "docid": "4597e17cdfca822f47edc32efa19b100", "score": "0.5530115", "text": "def discrete_vel(v,timescale):\n # I commented out time average smoothing because it confuses the analysis for what information transfer\n # actually means. It might be better to use Savitzky-Golay filtering or something like that. However,\n # those are also fit by looking over many data points.\n #vsmooth = moving_mean_smooth(v,timescale)[::timescale]\n vsmooth = v[::timescale]\n\n if v.ndim>1:\n change = np.sign(np.diff(vsmooth,axis=0))\n else:\n change = np.sign(np.diff(vsmooth))\n \n return change", "title": "" }, { "docid": "3db2de4f1fb016c8c6997db672cc642c", "score": "0.5475839", "text": "def m_step(self):\n self.mu.data = self.mu_numerator / self.mu_denominator\n # Laplace smoothing\n self.var.data = (self.var_numerator + self.var_threshold) / (self.var_denominator + self.C * self.var_threshold)\n\n self.init_accumulators()", "title": "" }, { "docid": "2687776ba602a41adfef28cf520f851c", "score": "0.5471605", "text": "def make_v_func():\n period_factor = 1\n amplitude_factor = 0.2\n\n def v_func(elapsed):\n w = period_factor * elapsed.to_sec()\n return amplitude_factor * math.cos(w * 2 * math.pi)\n return v_func", "title": "" }, { "docid": "df461e2a0d3f9ebf6236d9e28a9a4d6c", "score": "0.5461545", "text": "def insVol(NoOfSteps, NoOfRates, IV):\r\n V = np.zeros([NoOfRates, NoOfSteps]) \r\n \r\n tenor_steps = NoOfSteps / NoOfRates # Steps in between tenor points\r\n \r\n for i in range(NoOfSteps): # Loop per time step\r\n for j in range(NoOfRates): # Loop per forward rate\r\n if i == 0:\r\n V[j,:] = IV[j] # Set every time step equal to the initialized IV\r\n if i >= tenor_steps*j:\r\n V[0:j,i:] = np.nan # If the forward rate is 'dead' reset the value to NaN\r\n\r\n return V # Return the instantaneous volatility matrix\r", "title": "" }, { "docid": "5e30376bf4cee12fd43f1615cdc236ae", "score": "0.5440621", "text": "def get_velocity_smoothed(self):\n vx = 0\n vy = 0\n N = len(self.prev_velocities)\n if N == 0:\n return self.get_velocity()\n for velocity in self.prev_velocities:\n vx += velocity[0]\n vy += velocity[1]\n return (vx / N, vy / N)", "title": "" }, { "docid": "d81eb890b90c13725c495614f85a5ca9", "score": "0.5406132", "text": "def make_v_func():\n period_factor = random.uniform(0.3, 0.5)\n amplitude_factor = random.uniform(0.1, 0.2)\n\n def v_func(elapsed):\n w = period_factor * elapsed.to_sec()\n return amplitude_factor * math.cos(w * 2 * math.pi)\n return v_func", "title": "" }, { "docid": "20c6dadeb16f2394b56b76b241498210", "score": "0.5395294", "text": "def mass_streamfunction(data, v_field='vcomp', a=Rad_earth, g=grav):\n if 'lon' in data[v_field].dims:\n vbar = data[v_field].mean('lon')\n c = 2*np.pi*a*np.cos(vbar.lat*np.pi/180) / g\n # take a diff of half levels, and assign to pfull coordinates\n dp = xr.DataArray(data.phalf.diff('phalf').values*100, coords=[('pfull', data.pfull)])\n return c*(vbar*dp).cumsum(dim='pfull')", "title": "" }, { "docid": "2e143a9eba42e3a636ba0e9efe4cd6a4", "score": "0.5379439", "text": "def calcspectrav(xi,yi,zi,Ni=6,usevmod=True,vmod='iasp91',phsarv=['s','S']):\n\n # want them to be at least 2-d\n xi,yi,zi=np.atleast_1d(xi),np.atleast_1d(yi),np.atleast_1d(zi)\n if xi.ndim==1:\n xi=np.atleast_2d(xi).transpose()\n if yi.ndim==1:\n yi=np.atleast_2d(yi).transpose()\n if zi.ndim==1:\n zi=np.atleast_2d(zi).transpose()\n\n if usevmod:\n from obspy.taup import TauPyModel\n try:\n # check if it exists\n model = TauPyModel(model=vmod)\n except:\n # read from an npz file\n #fdir=os.path.join(os.environ['DATA'],'VELMODELS',vmod)\n #fname=os.path.join(fdir,vmod+'.npz')\n fname=vmod\n model = TauPyModel(model=fname)\n\n # for each station\n dhor = np.power(xi,2)+np.power(yi,2)\n dhor = np.power(dhor,0.5)\n\n # initialize\n ttrav = np.zeros(xi.shape)\n\n for m in range(0,xi.shape[1]):\n dhtry=np.linspace(np.min(dhor[:,m]),np.max(dhor[:,m]),Ni)\n ztry=np.linspace(np.min(zi),np.max(zi),Ni)\n dhtry,ztry=np.unique(dhtry),np.unique(ztry)\n ttravi=np.zeros([len(dhtry),len(ztry)],dtype=float)\n \n for kd in range(0,len(dhtry)):\n for kz in range(0,len(ztry)):\n arrivals=model.get_travel_times(\n distance_in_degree=dhtry[kd]/111.,\n source_depth_in_km=ztry[kz],\n phase_list=phsarv)\n ttravi[kd,kz]=np.min([arr.time for arr in arrivals])\n\n # interpolate to these points\n dhoru,zu=np.unique(dhor[:,m]),np.unique(zi[:,m])\n if len(ztry)>1 and len(dhtry)>1:\n f=scipy.interpolate.RectBivariateSpline(dhtry,ztry,ttravi)\n ttravj=f(dhoru,zu)\n # f=scipy.interpolate.interp2d(ztry,dhtry,ttravi,kind='cubic')\n # ttravj=f(zu,dhoru)\n elif len(ztry)>1:\n f=scipy.interpolate.interp1d(ztry,ttravi.flatten(),kind='cubic')\n ttravj=f(zu).reshape([len(dhoru),len(zu)])\n elif len(dhtry)>1:\n f=scipy.interpolate.interp1d(dhtry,ttravi.flatten(),kind='cubic')\n ttravj=f(dhoru).reshape([len(dhoru),len(zu)])\n else:\n ttravj=ttravi.flatten().reshape([len(dhoru),len(zu)])\n\n # pick the relevant points\n ih = general.closest(dhoru,dhor[:,m])\n iz = general.closest(zu,zi[:,m])\n\n ttravs=[ttravj[ih[n],iz[n]] for n in range(0,len(ih))]\n ttrav[:,m]=ttravs\n\n else:\n\n # total distance and travel time\n spd = 3.\n ttrav = np.power(xi,2)+np.power(yi,2)+np.power(zi,2)\n ttrav = np.power(ttrav,0.5)\n ttrav = ttrav/spd\n\n return ttrav", "title": "" }, { "docid": "4c1eac3f87ab532d9047d47596112f7a", "score": "0.53450763", "text": "def Z_smoothed(self, i):\n \n return np.exp(self.a + self.b * np.log(i))", "title": "" }, { "docid": "4f2edd2dc9a6c86723f2e65ff32c26e2", "score": "0.5336506", "text": "def _VCCurveData(self):\n aData = self._accelerationData\n if aData.size == 0:\n return np.empty(0, dtype=np.float), self._accelerationData\n\n \"\"\"\n Theory behind the calculation:\n \n Let x(t) be a real-valued time-domain signal, and X(2πf) = F{x(t)}(2πf)\n be the Fourier Transform of that signal. By Parseval's Theorem,\n\n ∫x(t)^2 dt = ∫|X(2πf)|^2 df\n\n (see https://en.wikipedia.org/wiki/Parseval%27s_theorem#Notation_used_in_physics)\n\n Rewriting the right side of that equation in the discrete form becomes\n\n ∫x(t)^2 dt ≈ ∑ |X[k]|^2 • ∆f\n \n where ∆f = fs/N = (1/∆t) / N = 1/T.\n Limiting the right side to a range of discrete frequencies (k_0, k_1):\n\n ∫x(t)^2 dt ≈ [∑; k=k_0 -> k≤k_1] |X[k]|^2 • ∆f\n\n The VC curve calculation is the RMS over the time-domain. If T is the\n duration of the time-domain signal, then:\n\n √((1/T) ∫x(t)^2 dt)\n ≈ √((1/T) [∑; k=k_0 -> k≤k_1] |X[k]|^2 • ∆f)\n = ∆f • √([∑; k=k_0 -> k≤k_1] |X[k]|^2)\n\n If the time-series data is acceleration, then the signal needs to first\n be integrated into velocity. This can be done in the frequency domain\n by replacing |X(2πf)|^2 with (1/2πf)^2 |X(2πf)|^2.\n \"\"\"\n f, a_psd = self._PSDData\n f, v_psd = psd.differentiate(f, a_psd, n=-1)\n f_oct, v_psd_oct = psd.to_octave(\n f,\n v_psd,\n fstart=self._vc_init_freq,\n octave_bins=self._vc_bins_per_octave,\n mode=\"sum\",\n )\n v_vc = np.sqrt(f[1] * v_psd_oct) # the PSD must already scale by ∆f?\n\n return f_oct, v_vc", "title": "" }, { "docid": "9b2a4695fe6e26c8d38c8a0f8aff44b7", "score": "0.5331286", "text": "def dispersion(values: Vector) -> float:\n return central_moment(values, 2)", "title": "" }, { "docid": "402d101e9376f6550db6658cc05da221", "score": "0.5310179", "text": "def rts_smoother(mu, Cov, A, V, removethis=None):\n\n N = len(mu)\n #n = N-1\n\n # Start from the last time instance and smoothen backwards\n x = mu[-1,:]\n Covx = Cov[-1,:,:]\n\n for n in reversed(range(N-1)):#(An, Vn) in zip(reversed(A), reversed(V)):\n\n #n = n - 1\n #if n <= 0:\n # break\n\n # The predicted value of n\n x_p = np.dot(A[n], mu[n,:])\n Cov_p = np.dot(np.dot(A[n], Cov[n,:,:]), A[n].T) + V[n]\n\n # Temporary variable\n S = np.linalg.solve(Cov_p, np.dot(A[n], Cov[n,:,:]))\n\n # Smoothed value of n\n x = mu[n,:] + np.dot(S.T, x-x_p)\n Covx = Cov[n,:,:] + np.dot(np.dot(S.T, Covx-Cov_p), S)\n\n # Force symmetric covariance (for numeric inaccuracy)\n Covx = 0.5*Covx + 0.5*Covx.T\n\n # Store results\n mu[n,:] = x\n Cov[n,:] = Covx\n\n\n return (mu, Cov)", "title": "" }, { "docid": "d40f6500690fae015171c77a98487dda", "score": "0.5308868", "text": "def v(self, t):\n # should be t0<t<t1. \n return self.v0 + (self.v1-self.v0)*(t-self.t0) / (self.t1 - self.t0)", "title": "" }, { "docid": "8e9ce806a1a2ee3d0e97bee2de5168dd", "score": "0.5302649", "text": "def test_values(self):\n expected_umat = np.array(\n [\n [1.0, 1.0, 1.0, 0.0, 0.0],\n [1.25352113, 1.19354839, 1.0, 0.08333333, 0.0],\n [1.48780488, 1.50000000, 1.0, 1.00000000, 1.0],\n [2.0, 2.0, 1.0, 1.0, 1.0],\n ]\n )\n umat = self.plugin._smart_smooth(self.umat, self.umat, self.weights)\n self.assertArrayAlmostEqual(umat, expected_umat)", "title": "" }, { "docid": "29c3a91939ed1090a1623412ae8b7cca", "score": "0.5288916", "text": "def Yt_mvar_diffuse_smooth_vec():\n y = np.zeros((3, 2, 1))\n y[0] = np.array([1, 2]).reshape(-1, 1)\n y[1] = np.array([2.4, 3.2]).reshape(-1, 1)\n y[2] = np.array([3, 5]).reshape(-1, 1)\n return y", "title": "" }, { "docid": "120bac00e2c69f2837b7f01c1cc50564", "score": "0.5281519", "text": "def Yt_mvar_diffuse_smooth():\n y = np.zeros((4, 2, 1))\n y[0] = np.array([1, 2]).reshape(-1, 1)\n y[1] = np.array([np.nan, np.nan]).reshape(-1, 1)\n y[2] = np.array([np.nan, 3.5]).reshape(-1, 1)\n y[3] = np.array([3, 5]).reshape(-1, 1)\n return y", "title": "" }, { "docid": "4b32e496dbec2b651413c59e4d827d29", "score": "0.5274948", "text": "def get_motor_vals(self,t):\r\n t_adj = t-self.time_ref\r\n v = np.array([self.m0_func(t_adj), # front-right-upper\r\n self.m1_func(t_adj), # front-left-upper\r\n self.m3_func(t_adj), # back-right-upper\r\n self.m2_func(t_adj), # back-left-upper\r\n self.m5_func(t_adj), # front-right-lower\r\n self.m4_func(t_adj), # front-left-lower\r\n self.m6_func(t_adj), # back-right-lower\r\n self.m7_func(t_adj)]) # back-left-lower\r\n return v", "title": "" }, { "docid": "e2081ace6ff610b1ba7a363122aa2edf", "score": "0.5274095", "text": "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V", "title": "" }, { "docid": "d3a0e70831deaf44fd68b1164b4c8e5d", "score": "0.52700233", "text": "def transform(self, data):\r\n # coef_2017 = data.get_synthetic_miniOD(None)[self.preselect].to_numpy().mean(axis=0)\r\n if self.mean:\r\n res, self.mean_coef = data.get_miniOD(hours=[], log=self.log, mean=self.mean)\r\n self.mean_coef = self.mean_coef[self.preselect].to_numpy()\r\n else:\r\n res = data.get_miniOD(hours=[], log=self.log, mean=self.mean)\r\n data = res[self.preselect].to_numpy() / utils.maxi(self.station_coef,0.001)\r\n res = np.zeros((data.shape[0], self.dim))\r\n # self.station_coef_2015 = np.zeros(coef_2015.shape)\r\n # self.station_coef_2017 = np.zeros(coef_2017.shape)\r\n for i in range(self.dim):\r\n a = self.labels == i\r\n # a_2015 = a[:len(self.preselect_2015)]\r\n # self.station_coef_2015[a_2015] = coef_2015[a_2015] / coef_2015[a_2015].sum()\r\n # self.station_coef_2017[a] = coef_2017[a] / coef_2017[a].sum()\r\n res[:, i] = data[:, a].mean(axis=1)\r\n # print(coef_2017)\r\n return res", "title": "" }, { "docid": "68801c01c2b870a53a3d59267d3fdf93", "score": "0.52684104", "text": "def _spd_transform(values, probs, variances):\n # Scaled Poisson distribution from Bohm and Zech, NIMA 748 (2014) 1-6\n scale = znp.maximum(\n values * tf.math.reciprocal_no_nan(variances), znp.ones_like(values)\n )\n probs = probs * scale\n values = values * scale\n return probs, values", "title": "" }, { "docid": "0e501111e0afa797b61a73ff3dde154b", "score": "0.5265829", "text": "def smooth(self) -> torch.Tensor:\n mu_smooth = self._smooth_cache.mu_tk\n cov_smooth = self._smooth_cache.cov_tk\n\n for t in reversed(range(self._smooth_cache.T - 1)):\n mu_p = self._smooth_cache.mu_tk_minus[t + 1]\n cov_p = self._smooth_cache.cov_tk_minus[t + 1]\n\n if self._is_continuous:\n # continuous: retrieve cached gain directly\n Kt = self._smooth_cache.G_tk[t + 1]\n else:\n # discrete: retrieve cached Jacobian\n A_t = self._smooth_cache.G_tk[t]\n Kt = cov_smooth[t] @ A_t.transpose(-1, -2) @ torch.inverse(cov_p)\n\n mu_smooth[t] = mu_smooth[t] + (\n Kt @ (mu_smooth[t + 1] - mu_p).unsqueeze(-1)\n ).squeeze(-1)\n cov_smooth[t] = cov_smooth[t] + Kt @ (\n cov_smooth[t + 1] - cov_p\n ) @ Kt.transpose(-1, -2)\n\n mu_smooth_tensor = torch.stack(mu_smooth)\n cov_smooth_tensor = torch.stack(cov_smooth)\n cov_smooth_tensor = reg_psd(\n 0.5 * (cov_smooth_tensor + cov_smooth_tensor.transpose(-1, -2)),\n reg=self._reg,\n ) # DEBUG - see other comment\n\n return self.gaussian_parameters_to_vector(mu_smooth_tensor, cov_smooth_tensor)", "title": "" }, { "docid": "d58b093daafdf9770fed91bfd1b7e08d", "score": "0.52529305", "text": "def verlet(x0, v0, m, acc, T, dt):\r\n n = m.size\r\n time = np.arange(0, T + dt, dt)\r\n pos = np.zeros((n, 3, len(time)))\r\n vel = np.zeros((n, 3, len(time)))\r\n pos[:, :, 0] = x0\r\n vel[:, :, 0] = v0\r\n for i in range(len(time) - 1):\r\n # Verlet step\r\n pos[:, :, i + 1] = pos[:, :, i] + (vel[:, :, i] + 0.5 * acc(pos[:, :, i], m) * dt) * dt\r\n vel[:, :, i + 1] = vel[:, :, i] + 0.5 * (acc(pos[:, :, i], m) + acc(pos[:, :, i + 1], m)) * dt\r\n return time, pos, vel", "title": "" }, { "docid": "37bfcb2c7b40d085d6f0270fddee2511", "score": "0.52521545", "text": "def _lin_final(self):\r\n self.vec['u'].array[:] /= self.vec['u0'].array[:]\r\n self.vec['f'].array[:] /= self.vec['f0'].array[:]\r\n self.vec['du'].array[:] /= self.vec['u0'].array[:]\r\n self.vec['df'].array[:] /= self.vec['f0'].array[:]\r\n if self.mode == 'rev':\r\n self.scatter('lin')", "title": "" }, { "docid": "a359e9215f1b9617534571aef6e8adca", "score": "0.5250503", "text": "def smooth(x, smoothie):\r\n size_x = np.size(x)\r\n if smoothie > 0:\r\n if (len(x) > 1 and len(x) < size_x):\r\n # out_add = append(append([x[0,:]]*smoothie,x,axis=0),\r\n # [x[(len(x)-1),:]]*smoothie,axis=0)\r\n # out_add = (np.append([x[0, :]]*int(smoothie), x, axis=0))\r\n out_add = np.vstack(([x[0, :]] * int(smoothie), x,\r\n [x[(len(x) - 1), :]] * int(smoothie)))\r\n help = np.transpose(out_add)\r\n # out = signal.lfilter(np.ones(smoothie) / smoothie, 1, help)\r\n out = signal.lfilter(\r\n np.hstack((np.ones(smoothie) / (2 * smoothie), 0,\r\n np.ones(smoothie) / (2 * smoothie))), 1, help)\r\n out = np.transpose(out)\r\n # out = out[smoothie:len(out), :]\r\n out = out[2 * smoothie:len(out), :]\r\n # out = filter(ones(1,smoothie)/smoothie,1,out_add)\r\n # out[1:smoothie,:] = []\r\n else:\r\n # out_add = np.append(np.append([x[0]] * smoothie, x),\r\n # [x[size_x - 1]] * smoothie)\r\n out_add = np.hstack(([x[0]] * int(smoothie), x,\r\n [x[(len(x) - 1)]] * int(smoothie)))\r\n out = signal.lfilter(np.hstack((\r\n np.ones(smoothie) / (2 * smoothie), 0,\r\n np.ones(smoothie) / (2 * smoothie))), 1, out_add)\r\n out = out[2 * smoothie:len(out)]\r\n out[0:smoothie] = out[smoothie]\r\n out[len(out) - smoothie:len(out)] = out[len(out) - smoothie - 1]\r\n # for i in xrange(smoothie, len(x) + smoothie):\r\n # sum = 0\r\n # for k in xrange(-smoothie, smoothie):\r\n # sum = sum + out_add[i + k]\r\n # suma[i - smoothie] = float(sum) / (2 * smoothie)\r\n # out = suma\r\n # out[0:smoothie] = out[smoothie]\r\n # out[size_x - 1 - smoothie:size_x] = \\\r\n # out[size_x - 1 - smoothie]\r\n else:\r\n out = x\r\n return out", "title": "" }, { "docid": "3327cef189bf70a34329d34aba5766f6", "score": "0.524582", "text": "def smooth(self, y=None):\n y = self._parse_y(y)\n\n # if not implement `filter`, implement `filter`\n try :\n self.x_pred[0]\n except :\n self.forward(y)\n\n T = y.shape[0]\n self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype)\n self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys),\n dtype = self.dtype)\n A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)\n\n self.x_smooth[-1] = self.x_filt[-1]\n self.V_smooth[-1] = self.V_filt[-1]\n\n # t in [0, T-2] (notice t range is reversed from 1~T)\n for t in reversed(range(T - 1)) :\n # visualize calculating times\n print(\"\\r smooth calculating... t={}\".format(T - t)\n + \"/\" + str(T), end=\"\")\n\n # extract parameters for time t\n F = _last_dims(self.F, t, 2)\n\n # calculate fixed interval smoothing gain\n A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1])))\n \n # fixed interval smoothing\n self.x_smooth[t] = self.x_filt[t] \\\n + self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1])\n self.V_smooth[t] = self.V_filt[t] \\\n + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T))", "title": "" }, { "docid": "535f7746bbe243a112ff38f9acec80fa", "score": "0.5245585", "text": "def gradient_TV(v,u,lamb):\n# on n'utilise pas gradx et grady car pour minimiser \n# la fonctionnelle E2 par descente de gradient nous avons choisi \n# de prendre les memes conditions au bords que pour la resolution quadratique\n (sy,sx)=v.shape\n Kx=np.zeros((sy,sx))\n Ky=np.zeros((sy,sx))\n Kx[0,0]=1\n Kx[0,1]=-1\n Ky[0,0]=1\n Ky[1,0]=-1\n Dx=appfiltre(u,Kx)\n Dy=appfiltre(u,Ky)\n ng=(Dx**2+Dy**2)**0.5+1e-5\n div=appfiltre(dx/ng,Kx)+appfiltre(dy/ng,Ky)\n return 2*(u-v)-lamb*div", "title": "" }, { "docid": "f55a90d0ed62ef1803cb5ebe162da745", "score": "0.52455384", "text": "def get_model_vectors(v, By, Bz, tilt, f107, epsilon_multiplier = 1.):\n\n\n ca = np.arctan2(By, Bz)\n epsilon = v**(4/3.) * np.sqrt(By**2 + Bz**2)**(2/3.) * (np.sin(ca/2)**(8))**(1/3.) / 1000 * epsilon_multiplier # Newell coupling \n tau = v**(4/3.) * np.sqrt(By**2 + Bz**2)**(2/3.) * (np.cos(ca/2)**(8))**(1/3.) / 1000 # Newell coupling - inverse \n\n # make a dict of the 19 external parameters, where the keys are postfixes in the column names of coeffs:\n external_params = {'const' : 1 , \n 'sinca' : 1 * np.sin(ca),\n 'cosca' : 1 * np.cos(ca),\n 'epsilon' : epsilon ,\n 'epsilon_sinca' : epsilon * np.sin(ca),\n 'epsilon_cosca' : epsilon * np.cos(ca),\n 'tilt' : tilt ,\n 'tilt_sinca' : tilt * np.sin(ca),\n 'tilt_cosca' : tilt * np.cos(ca),\n 'tilt_epsilon' : tilt * epsilon ,\n 'tilt_epsilon_sinca': tilt * epsilon * np.sin(ca),\n 'tilt_epsilon_cosca': tilt * epsilon * np.cos(ca),\n 'tau' : tau ,\n 'tau_sinca' : tau * np.sin(ca),\n 'tau_cosca' : tau * np.cos(ca),\n 'tilt_tau' : tilt * tau ,\n 'tilt_tau_sinca' : tilt * tau * np.sin(ca),\n 'tilt_tau_cosca' : tilt * tau * np.cos(ca),\n 'f107' : f107 }\n\n # The SH coefficients are the sums in the expansion in terms of external parameters, scaled by the ext. params.:\n tor_c = reduce(lambda x, y: x+y, [coeffs['tor_c_' + param] * external_params[param] for param in external_params.keys()]).dropna()\n tor_s = reduce(lambda x, y: x+y, [coeffs['tor_s_' + param] * external_params[param] for param in external_params.keys()]).fillna(0)\n pol_c = reduce(lambda x, y: x+y, [coeffs['pol_c_' + param] * external_params[param] for param in external_params.keys()]).dropna()\n pol_s = reduce(lambda x, y: x+y, [coeffs['pol_s_' + param] * external_params[param] for param in external_params.keys()]).fillna(0)\n pol_s = pol_s.ix[pol_c.index] # equal number of sin and cos terms, but sin coeffs will be 0 where m = 0\n tor_s = tor_s.ix[tor_c.index] # \n\n\n return tor_c[:, np.newaxis], tor_s[:, np.newaxis], pol_c[:, np.newaxis], pol_s[:, np.newaxis], pol_c.index.values, tor_c.index.values", "title": "" }, { "docid": "29593bf1a22ecc920f32b51c3c9b0bbe", "score": "0.52386594", "text": "def ax(v, vx, b, m):\n return - (b * v * vx) / m", "title": "" }, { "docid": "6c4ace15f97faabdc371ecf0defcbdfd", "score": "0.5230752", "text": "def smooth(v):\n\tfor _ in range(2):\n\t\tfor i in xrange(0, len(v)-2, 2):\n\t\t\t\tv[i+1] = (v[i] + v[i+1] + v[i+2]) / 3\n\t\tfor i in xrange(1, len(v)-3, 2):\n\t\t\t\tv[i+1] = (v[i] + v[i+1] + v[i+2]) / 3\n\treturn v", "title": "" }, { "docid": "75a3322a7e59477d743f06f840747d36", "score": "0.5230007", "text": "def make_v_func():\n period_factor = random.uniform(0.3, 0.5)\n amplitude_factor = random.uniform(0.2, 0.9)\n def v_func(elapsed):\n return math.cos(period_factor * elapsed.to_sec() * math.pi * 2) * amplitude_factor\n return v_func", "title": "" }, { "docid": "923d9e692e74eb4eef93618e476b6455", "score": "0.5223346", "text": "def smooth_ust(u, z):\n z0 = 1e-3\n kappa = 0.4\n nu_air = 1.56e-5\n for i in range(20):\n ust = kappa * u / np.log(z / z0)\n z0 = 0.132 * nu_air / ust\n return ust", "title": "" }, { "docid": "02c72059b4cb2dccfa017a5c9ed8f0bf", "score": "0.5223317", "text": "def smooth(self, lag = 10):\n self.enkf.smooth(lag)", "title": "" }, { "docid": "1b2d0144421a9422747005175b528504", "score": "0.52084297", "text": "def _compute_smooth_during_construction(self, xi):\n if self._variance_in_window:\n beta = self._covariance_in_window / self._variance_in_window\n alpha = self._mean_y_in_window - beta * self._mean_x_in_window\n value_of_smooth_here = beta * (xi) + alpha\n else:\n value_of_smooth_here = 0.0\n return value_of_smooth_here", "title": "" }, { "docid": "fc12ed52c1cc6ab7c3e99922f525b13f", "score": "0.5207431", "text": "def fcn2min(params, x, data):\n\tamp = params['amp']\n\tshift = params['shift']\n\tomega = params['omega']\n\tdecay = params['decay']\n\tmodel = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)\n\treturn model - data", "title": "" }, { "docid": "eb91b3c9e77187a4a1f853328141b5d0", "score": "0.51989156", "text": "def smoothed_value(self) -> float:\n raise NotImplementedError", "title": "" }, { "docid": "397f44da29ed2cb55ab0177681cab762", "score": "0.5186666", "text": "def smoothed(self, iterations=1):\r\n copy = self.Clone(shallow=True)\r\n copy.Smooth(iterations)\r\n return copy", "title": "" }, { "docid": "76060d70bf215a1988d3ab520a25e9c8", "score": "0.5184236", "text": "def fcn2min(params, x, data):\n amp = params['amp']\n shift = params['shift']\n omega = params['omega']\n decay = params['decay']\n model = amp * np.sin(x*omega + shift) * np.exp(-x*x*decay)\n return model - data", "title": "" }, { "docid": "bab7bdee275a768411d4aed23d2576c3", "score": "0.51826066", "text": "def norme_VT(I):\n (sy,sx)=I.shape\n Kx=np.zeros((sy,sx))\n Ky=np.zeros((sy,sx))\n Kx[0,0]=1\n Kx[0,1]=-1\n Ky[0,0]=1\n Ky[1,0]=-1\n Dx=appfiltre(I,Kx)\n Dy=appfiltre(I,Ky)\n ng=(Dx**2+Dy**2)**0.5\n return ng.sum()", "title": "" }, { "docid": "999f4b0ae16ff88052f13c6a30dd9877", "score": "0.5182226", "text": "def lorentz(v, v0, I, w):\n # Adding a height scaling factor so that peak intensities are lowered as\n # they are more broad. If I is the intensity with a default w of 0.5 Hz:\n scaling_factor = 0.5 / w # i.e. a 1 Hz wide peak will be half as high\n return scaling_factor * I * ((0.5 * w) ** 2 / ((0.5 * w) ** 2 + (v - v0) ** 2))", "title": "" }, { "docid": "f645201d0a8977901e2515de5a732d31", "score": "0.51763344", "text": "def smoothed(x, y):\n\tpos = position()\n\tvect = D2Point(x, y) - pos\n\tvect.length = vect.length/SMOOTH\n\treturn pos + vect", "title": "" }, { "docid": "1d3d8e3d08535b3d6e67b60fb5c750e1", "score": "0.51738346", "text": "def backtransform(VCV):\r\n p = len(VCV[:,0])\r\n sigma2 = np.zeros([p])\r\n rho = np.zeros((p)*(p)).reshape(p,p) \r\n \r\n sigma2[0] = VCV[0,0]\r\n sigma2[1] = VCV[1,1] - VCV[0,1] * np.reciprocal(VCV[0,0]) * VCV[0,1]\r\n rho[0,1] = VCV[0,1] * np.reciprocal(VCV[0,0])\r\n i = 2\r\n for i in np.arange(2,p):\r\n sigma2[i] = np.float64(VCV[i,i] - np.dot(np.dot(VCV[i,0:i], np.linalg.inv(VCV[0:i,0:i])), VCV[i,0:i])) \r\n rho[0:i,i] = np.dot(np.linalg.inv(VCV[0:i,0:i]), VCV[i,0:i])\r\n \r\n return [rho,sigma2]", "title": "" }, { "docid": "7f9e0b56ef10d8a51bf38b62b5fb48c4", "score": "0.5173249", "text": "def latent_heat_vapourisation(tair):\n return (2.501 - 0.00237 * tair) * 1E06", "title": "" }, { "docid": "3cd9aab67137787fa8ac2c4c0e79103d", "score": "0.51690006", "text": "def transform(self,M):\n for index, v in enumerate(self.verts):\n self.verts[index] = [M[0]*v[0]+M[1]*v[1]+M[2],\\\n M[3]*v[0]+M[4]*v[1]+M[5]]", "title": "" }, { "docid": "19dc1756e4be1744f674fa598d2ff2c5", "score": "0.5165006", "text": "def test_kernel_smooth(self):\n expected_output = np.array(\n [\n [0.8125, 0.3750, 0.0625, 0.0, 0.0],\n [1.1250, 0.7500, 0.3125, 0.0625, 0.0],\n [1.8125, 1.3125, 0.7500, 0.3125, 0.0625],\n [2.5000, 1.8125, 1.1250, 0.6250, 0.1875],\n ]\n )\n\n output = self.plugin.smooth(self.umat, 2, method=\"kernel\")\n self.assertArrayAlmostEqual(output, expected_output)", "title": "" }, { "docid": "a892344bfbf29360f4ff1f9783b12973", "score": "0.5163979", "text": "def mirror_model(data):\n mirrored = data.copy(deep=True)\n mirrored[\"Ep\"] = -mirrored[\"Ep\"]\n mirrored[\"x0\"] = data[\"x0\"] + np.pi\n data = data.append(mirrored, ignore_index=True)\n data[\"x0\"] = data[\"x0\"] % (2*np.pi)\n data.sort_values(by=\"Ep\", inplace=True)\n return data", "title": "" }, { "docid": "5e81d7daea9bb0b50fbc8339cd9c7f2e", "score": "0.5163549", "text": "def minimise_TV_gradient(v,lamb,pas,nbpas):\n u=np.zeros(v.shape)\n Energ=np.zeros(nbpas)\n for k in range(nbpas):\n Energ[k]=lamb*norme_VT(u)+norm2(u-v)**2\n u=u-pas*gradient_TV(v,u,lamb)\n return (u,Energ)", "title": "" }, { "docid": "6a6971eac8e5587ea4245deb7e82fe79", "score": "0.51595795", "text": "def slerp(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "9f58707baca2258e3d4d2987d28dc8be", "score": "0.5145417", "text": "def von_mises_stress(self, **kwargs):\n return self._get_result_data_function_of_operator(\n \"S_eqv\", self, self._data_sources, **kwargs\n )", "title": "" }, { "docid": "79492d1e927eade73a296a0b3d144691", "score": "0.5142926", "text": "def smooth(data,vmax,mask,std=1):\n data_smth = np.copy(data)\n data_smth[np.isinf(data_smth)] = np.nan\n data_smth[data_smth>vmax] = vmax\n data_smth = convolve(data_smth,Gaussian2DKernel(x_stddev=std,y_stddev=std))\n data_smth[data_smth==0] = np.nanmean(data_smth)\n data_smth[np.isnan(mask)] = np.nan\n return data_smth", "title": "" }, { "docid": "35994c55f7a75f490a17e3058a7567bc", "score": "0.51372904", "text": "def smooth(x):\n x = min(abs(x), 1.0)\n return 1.0 - 3.0*(x**2) + 2.0*(x**3)", "title": "" }, { "docid": "fcbd18a1e87a47be74c4444ab62edb58", "score": "0.5133665", "text": "def smooth(perc):\n # from perc: bin1 0:2, bin2 3:6, bin3 7:10, bin4 11:14, bin5 15:18, bin6 19:22\n smoother = np.zeros(shape=(len(perc), len(perc[0]))) # shape=(22, 3)\n for j in range(len(perc[0])): # 3\n yax = perc[:, j]\n # print(yax) # columns from perc: j=0 --> -1sigma, j=1 --> median, j=2 --> +1sigma\n for i in range(3):\n smoother[i][j] = (yax[0] + yax[1] + yax[2]) / 3\n smoother[i+19][j] = (yax[-1] + yax[-2] + yax[-3]) / 3\n for i in range(4):\n smoother[i+3][j] = (yax[3] + yax[4] + yax[5] + yax[6]) / 4\n smoother[i+7][j] = (yax[7] + yax[8] + yax[9] + yax[10]) / 4\n smoother[i+11][j] = (yax[11] + yax[12] + yax[13] + yax[14]) / 4\n smoother[i+15][j] = (yax[15] + yax[16] + yax[17] + yax[18]) / 4\n\n # print(smoother)\n return smoother", "title": "" }, { "docid": "5a41e35a113a143ee68f45e41af3cc6c", "score": "0.51329136", "text": "def propagate(self, u):\n rhs = lambda t,s: DubinsCarDeriv(s,u)\n ans = solve_ivp(rhs, (0, self.dt), self.x)\n self.x = ans.y[:,-1]\n self.x += self.dyn_noise_coef @ np.random.standard_normal(self.state_dim)", "title": "" }, { "docid": "3367f986be14abdd86571ccbcd6d11cb", "score": "0.51274294", "text": "def compute_V(self,x):\n q,sys_params = x\n z = self.featurize(q,sys_params)\n vp,ep,up,_,_ = self.gnlayers(z) # (bs*n,k), (bs*n*n,k), (bs,k)\n energy = self.linear(up) # (bs,1)\n return energy.squeeze(-1)", "title": "" }, { "docid": "4aeb026b277b3d0044a55a476055996f", "score": "0.5125817", "text": "def run_velocyto(adata: anndata.AnnData) -> anndata.AnnData:\n vlm = converter(adata)\n\n # U_norm: log2(U_sz + pcount)\n # vlm.U_sz: norm_factor * U\n # S_norm: log2(S_sz + pcount)\n # vlm.S_sz norm_factor * S\n # vlm.Ux: smoothed unspliced\n # vlm.Sx: smoothed spliced\n # vlm.Ux_sz: smoothed unspliced -- old code\n # vlm.Sx_sz: smoothed spliced -- old code\n\n vlm.normalize() # add U_norm, U_sz, S_norm, S_sz\n vlm.perform_PCA()\n vlm.knn_imputation() # Ux, Sx, Ux_sz, Sx_sz\n vlm.pcs = adata.X # pcs: cell x npcs ndarray\n\n # vlm.Sx = vlm.S_sz\n # vlm.Ux = vlm.U_sz\n # vlm.Sx_sz = vlm.S_sz\n # vlm.Ux_sz = vlm.U_sz\n\n # gamma fit\n vlm.fit_gammas() # limit_gamma = False, fit_offset = True, use_imputed_data = False, use_size_norm = False\n\n # estimate velocity\n vlm.predict_U()\n vlm.calculate_velocity()\n\n # predict future state after dt\n vlm.calculate_shift() # assumption = 'constant_velocity'\n vlm.extrapolate_cell_at_t() # delta_t = 1.\n\n return vlm", "title": "" }, { "docid": "6445de255eb936d46cbe1e33e37c425f", "score": "0.5116838", "text": "def inv_transform(self,X): \r\n return self.fit_transform(X)@self.Vt_", "title": "" }, { "docid": "fc458e145457ca738c1ff2d175370b9e", "score": "0.51059663", "text": "def test_box_smooth(self):\n expected_output = np.array(\n [\n [0.84, 0.60, 0.36, 0.12, 0.04],\n [1.20, 0.92, 0.60, 0.28, 0.12],\n [1.56, 1.24, 0.84, 0.44, 0.20],\n [1.92, 1.56, 1.08, 0.60, 0.28],\n ]\n )\n\n output = self.plugin.smooth(self.umat, 2)\n self.assertArrayAlmostEqual(output, expected_output)", "title": "" }, { "docid": "03e04c9c01755a776ee8e6c097c5784f", "score": "0.5099542", "text": "def HH2000_Film(self):\n self.vs_over_v0 = -0.064*self.meltfrac\n self.vp_over_v0 = -0.029*self.meltfrac", "title": "" }, { "docid": "17fd33289dd74a69037ab391f899b409", "score": "0.50987995", "text": "def v_solver(dt,veff,gtot,p):\r\n\r\n if time_it:\r\n t1 = time.time()\r\n\r\n v = numpy.zeros(numpy.shape(veff),float)\r\n\r\n if numpy.shape(gtot)!=numpy.shape(veff):\r\n raise Exception,'gtot and veff must be of the same shape'\r\n\r\n v[0] = p.vr\r\n\r\n cm = p.Cm\r\n vth = p.vth\r\n vr = p.vr\r\n a = []\r\n\r\n # use weave\r\n\r\n code = \"\"\"\r\n for(int i=1;i<Nv[0];i++) {\r\n v(i) = (v(i-1)-veff(i-1))*exp(-((float)dt)*gtot(i-1)/cm)+veff(i-1);\r\n if (v(i)>vth) {\r\n v(i) = vr;\r\n a.append(i);\r\n }\r\n }\r\n \"\"\"\r\n\r\n #scipy.weave.inline(code,['v', 'gtot', 'dt', 'veff', 'cm', 'vr', 'vth','a'],\r\n # type_converters=scipy.weave.converters.blitz)\r\n weave.inline(code, ['v', 'gtot', 'dt', 'veff', 'cm', 'vr', 'vth', 'a'],\r\n type_converters=weave.converters.blitz)\r\n \r\n\r\n if time_it:\r\n print 'Elapsed ',time.time()-t1,' seconds.'\r\n \r\n return (v,a)", "title": "" }, { "docid": "4e4f78cbb41d2a8e31148ee4d5c156ae", "score": "0.509833", "text": "def arima_ons(data, options):\r\n\r\n #MATLAB:\r\n #mk = options.mk;\r\n #lrate = options.lrate;\r\n #w = options.init_w;\r\n #epsilon = options.epsilon;\r\n mk = options.mk\r\n lrate = np.array([[options.lrate]])\r\n w = options.init_w\r\n epsilon = options.epsilon\r\n\r\n #MATLAB:\r\n #list = [];\r\n #SE = 0;\r\n #A_trans = eye(mk) * epsilon;\r\n list = np.array([])\r\n SE = 0\r\n A_trans = np.eye(mk) * epsilon\r\n\r\n # MATLAB:\r\n # for i = mk+1:size(data,2)\r\n for i in range(mk, len(data)): #from 10 till 9999\r\n\r\n #MATLAB: diff = w*data(i-mk:i-1)'-data(i);\r\n diff = diff_calc(w, data, mk, i)\r\n\r\n #MATLAB: grad = 2*data(i-mk:i-1)*diff;\r\n grad = grad_calc(data, i, mk, diff)\r\n\r\n # MATLAB: A_trans = A_trans - A_trans * grad' * grad * A_trans/(1 + grad *\\\r\n # A_trans * grad');\r\n A_trans = A_trans_calc(A_trans, grad)\r\n\r\n # MATLAB: w = w - lrate * grad * A_trans ;\r\n w = w_calc_arima_ons(w, lrate, grad, A_trans) #weight modified by gradient descent\r\n\r\n # MATLAB:\r\n #SE = SE + diff ^ 2;\r\n SE = SE + diff ** 2\r\n\r\n # MATLAB:\r\n #if mod(i,options.t_tick)==0\r\n # list = [list; sqrt(SE/i)];\r\n #end\r\n if (i%options.t_tick) == 0:\r\n list = np.append(list, np.sqrt(SE / i))\r\n\r\n #make column from row\r\n list = list.reshape(list.size, -1)\r\n\r\n # test for i == 10\r\n if i == 10:\r\n test_arima_ons(i, mk, lrate, data, A_trans)\r\n\r\n return list, w", "title": "" }, { "docid": "db193efed583e7fa6770ba470f4d7b9f", "score": "0.50965494", "text": "def svd(self, X): # [5pts]\n raise NotImplementedError", "title": "" }, { "docid": "5453355449c7eb2c757cbb9231f7e1b4", "score": "0.5094336", "text": "def interp(U):\n V = np.zeros([4, P, Q, R, T])\n M1 = U.M1.copy()\n M2 = U.M2.copy()\n M3 = U.M3.copy()\n F = U.F.copy()\n V[0, :, :, :, :] = (M1[:-1, :, :, :] + M1[1:, :, :, :])/2\n V[1, :, :, :, :] = (M2[:, :-1, :, :] + M2[:, 1:, :, :])/2\n V[2, :, :, :, :] = (M3[:, :, :-1, :] + M3[:, :, 1:, :])/2\n V[3, :, :, :, :] = ( F[:, :, :, :-1] + F[:, :, :, 1:])/2\n return V", "title": "" }, { "docid": "2a0bd244e0b0a0f9306635bceac6e540", "score": "0.50781286", "text": "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "title": "" }, { "docid": "0eb5a0fff66b85f2c86637006f3f0012", "score": "0.507755", "text": "def _energy_with_gradient(self, velocity):\n sim = self.sim\n v = self.pixel2meters * velocity\n vt = self.pixel2meters * self.velocity #last time step velocity\n u = self.pixel2meters * self.expected_speed\n p2m = self.pixel2meters\n l1 = self.lambda1\n l2 = self.lambda2\n sd = self.sigma_d\n sw = self.sigma_w\n b = self.beta\n z = self.pixel2meters * self.goal_position\n p = self.pixel2meters * self.position\n\n # using back propogation in the following\n # E_s (speed) = lambda1 * (u - |v|)**2\n normv = np.linalg.norm(v) \n E_s = l1 * (u - normv) ** 2\n\n gnormv = - 2 * l1 * (u - normv)\n gvs = gnormv * v / normv\n gvs2pixel = gvs * p2m\n\n # E_d (direction) = - (p dot v) / (|p| * |v|)\n pdotv = np.dot((z - p), v)\n normv = np.linalg.norm(v)\n normpnormv = np.linalg.norm((z-p)) * normv\n E_d = -l2 * pdotv / normpnormv\n\n gpdotv = -l2 / normpnormv\n gnormpnormv = l2 * pdotv / normpnormv ** 2\n gnormv = gnormpnormv * np.linalg.norm(z-p)\n gvd = gnormv * v / normv\n gvd += gpdotv * (z - p)\n gvd2pixel = gvd * p2m\n\n # E_i = sigma(i)(wr(i) * exp(- d**2 / (2**sd**2)))\n # q = v - vj; k = pi - pj; cos(phi) = -kdotvt / (|k|*|vt|)\n # i is this pedestrian\n # d = k - kdotq * q / |q|**2\n # wr = exp(-k ** 2 / (2 * sw**2)) * ( (1+cos(phi)) / 2)**beta\n ID = self.ID\n index = np.argwhere(sim.ped_ID == ID)[0][0]\n ped_count = len(sim.ped_list)\n\n gvi2pixel = np.array([0., 0.])\n E_i = 0.\n\n if ped_count != 1:\n # if there is more than one pedestrian, calculate social energy\n k = np.delete(sim.ped_relative_position[index], index, axis = 0) * p2m # relative position \n q = np.tile(v, (ped_count - 1, 1)) - np.delete(sim.all_ped_velocity, index, axis = 0) * p2m\n\n kdotq = np.sum(k * q, axis = 1) \n normq = np.linalg.norm(q, axis = 1) \n t = - kdotq / normq ** 2 #kdotq / |q|**2\n mask = t>0\n maskt = mask * t\n d = k + q * maskt[:, np.newaxis]\n normd = np.linalg.norm(d, axis = 1)\n E_v = np.exp( - normd**2 / (2 * sd**2))\n wd = np.exp(- np.linalg.norm(k, axis = 1)**2 / (2 * sw**2))\n cos = - np.dot(vt, np.swapaxes(k, 0, 1)) / (np.linalg.norm(vt) * np.linalg.norm(k, axis = 1))\n wphi = ((1 + cos) / 2)**b\n E_i = np.sum(wphi * wd * E_v)\n\n\n gE_v = wphi * wd\n gnormd = gE_v * E_v * (- normd / sd**2)\n gd = (gnormd / normd)[:, np.newaxis] * d\n gmaskt = np.sum(q * gd, axis = 1)\n gq = gd * maskt[:, np.newaxis]\n gt = gmaskt * mask\n gnormq = 2 *gt * kdotq / normq**3\n gq += (gnormq / normq)[:, np.newaxis] * q\n gkdotq = - gt / normq**2\n gq += gkdotq[:, np.newaxis] * k\n gvi = np.sum(gq, axis = 0)\n gvi2pixel = gvi * p2m\n\n if 2 in self.debug_mode:\n print \"##########current pedestrian index: \", index\n print \"wd: \", wd\n print \"wphi: \", wphi\n print \"k: \", k\n print \"q: \", q\n print \"d: \", d\n print \"E: \", E_v\n print \"Speed energy S: \", E_s\n print \"direction energy D: \", E_d\n print \"social energy I: \", E_i\n print \"total energy E: \", E_i + E_s + E_d\n print \"\"\n print \"gI: \", gvi2pixel\n print \"gS: \", gvs2pixel\n print \"gD: \", gvd2pixel\n else:\n if 2 in self.debug_mode:\n print \"##########current pedestrian index: \", index\n print \"Speed energy S: \", E_s\n print \"direction energy D: \", E_d\n print \"social energy I: 0.\"\n print \"total energy E: \", E_s + E_d\n\n # sum energy and energy gradient together\n energy = E_s + E_d + E_i \n energy_gradient = gvs2pixel + gvd2pixel + gvi2pixel\n return (energy, energy_gradient)", "title": "" }, { "docid": "36ed956adbff3b065f94e2349cfef59f", "score": "0.5076376", "text": "def smooth(self, \n x=0,y=0, \n window='kaiser',\n debug = False): #smoothes via adjacent averaging\n # n is the seed of the odd numbers: n is how many nearest neighbors \n # in each direction\n # make sure n is integer and n < grid dimension\n # account for interpolation using grid factor\n nx = x\n ny = y\n # create the window function\n if window == 'kaiser':\n # beta, a real number, is a form parameter of the kaiser window\n # beta = 5 makes this look approximately gaussian in weighting \n # beta = 5 similar to Hamming window, according to numpy\n # over window (about 0 at end of window)\n beta=5.0\n wx = np.kaiser(2*nx+1, beta)\n wy = np.kaiser(2*ny+1, beta)\n # for a 2D array, y is the first index listed\n w = np.zeros((len(wy),len(wx)))\n for i in range(len(wy)):\n for j in range(len(wx)):\n w[i,j] = wy[i]*wx[j]\n # create a padded array of zi\n # numpy 1.7.x required for this to work\n temp_z = np.pad(self.zi, ((ny,ny), \n (nx,nx)), \n mode='edge')\n from scipy.signal import convolve\n out = convolve(temp_z, w/w.sum(), mode='valid')\n if debug:\n plt.figure()\n sp1 = plt.subplot(131)\n plt.contourf(self.zi, 100)\n plt.subplot(132, sharex=sp1, sharey=sp1)\n plt.contourf(w,100)\n plt.subplot(133)\n plt.contourf(out,100)\n self.z=out\n # reset zmax\n self.zmax = self.z.max()\n self.zmin = self.z.min()", "title": "" }, { "docid": "73a33b29ee98d59f02f7bda320cafd58", "score": "0.50693244", "text": "def sensor_model(self, observation, state):\n # Write your code here!\n\n fx = np.nonzero(observation)[0][0]\n fy = np.nonzero(observation)[1][0]\n tx = np.nonzero(state)[0][0]\n ty = np.nonzero(state)[1][0]\n\n\n prob = np.zeros((39, 39))\n switcher = {\n 0: 0.001 / 152,\n 1: 0.003 / 144,\n 2: 0.005 / 136,\n 3: 0.006 / 128,\n 4: 0.007 / 120,\n 5: 0.009 / 112,\n 6: 0.01 / 104,\n 7: 0.01 / 96,\n 8: 0.012 / 88,\n 9: 0.012 / 80,\n 10: 0.013 / 72,\n 11: 0.013 / 64,\n 12: 0.013 / 56,\n 13: 0.013 / 48,\n 14: 0.015 / 40,\n 15: 0.021 / 32,\n 16: 0.041 / 24,\n 17: 0.102 / 16,\n }\n for i in range(18):\n num = switcher.get(i)\n prob[i, i:39 - i] = prob[38 - i, i:39 - i] = [num] * (39 - 2 * i)\n prob[i:39 - i, i] = prob[i:39 - i, 38 - i] = [num] * (39 - 2 * i)\n prob[18, 18] = prob[20, 20] = prob[18, 20] = prob[20, 18] = 0.018\n prob[18, 19] = prob[20, 19] = prob[19, 20] = prob[19, 18] = 0.072\n prob[19, 19] = 0.334\n\n normalize = prob[19 - tx : 39- tx, 19 - ty : 39- ty]\n return normalize[fx,fy] / np.sum(normalize)", "title": "" }, { "docid": "363c73d807439c1cf62bc43ed9870a19", "score": "0.5057619", "text": "def _p_et_ ( p ) :\n return cpp.Gaudi.Math.Kinematics.transverseEnergy ( p.momentum() , p.momCovMatrix() )", "title": "" }, { "docid": "7294bad695abff834a81c12514c3ad6a", "score": "0.505471", "text": "def __smooth_data(self, U, nb_pts):\n N = nb_pts\n S = U.copy()\n S.fill(np.nan)\n mav = deque(maxlen=N)\n # initialize the mav (moving average) \n for e in U[:N]: mav.append(e)\n # move!\n index, count = N//2, 0\n while count < S.shape[0] - N :\n S[index] = np.mean(mav)\n mav.append(U[N+count])\n count += 1\n index += 1\n \n return S", "title": "" }, { "docid": "79a64b1cf554f8baef42a8486f2fa0e6", "score": "0.5053552", "text": "def _smooth_vector(x, window_len=5, window=\"hanning\"):\n\n if window_len < 3:\n return x\n\n s = np.r_[x[window_len - 1: 0: -1], x, x[-2: -window_len - 1: -1]]\n # print(len(s))\n if window == \"flat\": # moving average\n w = np.ones(window_len, \"d\")\n else:\n w = eval(\"np.\" + window + \"(window_len)\")\n\n y = np.convolve(w / w.sum(), s, mode=\"valid\")\n return y", "title": "" }, { "docid": "f46d1da3d2f1d66e98a4e57e8bbe3026", "score": "0.5053472", "text": "def bspline1dTimeSeries(self, cv, v0, a0, vn, an, n=100):\r\n\r\n ### pが偶数(p=4)の時に特化して実装  (mの値, A, cのサイズあたりをpに合わせて宣言するようにすれば対応できる)\r\n p = 4\r\n ### pが奇数の時はknotsベクトルの表現方法を変える. この時mの値も変わる (textbook: p.195参照)\r\n count = len(cv) - 1 # = n in textbook\r\n\r\n u = np.zeros(2*p+count+2)\r\n for i in range(p+1):\r\n u[i] = cv[0, 0]\r\n for i in range(count):\r\n u[i+p+1] = (cv[i+1, 0] + cv[i, 0]) / 2.0\r\n for i in range(p+1):\r\n u[i+p+1+count] = cv[-1, 0]\r\n\r\n # 係数行列を生成\r\n t = cv[:, 0]\r\n m = count + p\r\n A = np.zeros([3*2+count-1, 3*2+count-1])\r\n c = np.zeros([3*2+count-1])\r\n\r\n # Aに代入\r\n for i in range(m+1):\r\n A[0, i] = self.coxDeBoor(t[0], u, i, p)\r\n A[1, i] = self.coxDeBoorDerivative(t[0], u, i, p, der=1)\r\n A[2, i] = self.coxDeBoorDerivative(t[0], u, i, p, der=2)\r\n\r\n #u=umaxの時は特別な対処が必要 (textbook p.470参照)\r\n eps = 0.001\r\n A[-3, i] = self.coxDeBoor(t[-1]-eps, u, i, p)\r\n A[-2, i] = self.coxDeBoorDerivative(t[-1]-eps, u, i, p, der=1)\r\n A[-1, i] = self.coxDeBoorDerivative(t[-1]-eps, u, i, p, der=2)\r\n for i in range(count-1):\r\n for j in range(m+1):\r\n A[i+3, j] = self.coxDeBoor(t[i+1], u, j, p)\r\n\r\n # cに代入\r\n c[0] = cv[0, 1]\r\n c[1] = v0[0]\r\n c[2] = a0[0]\r\n c[-3] = cv[-1, 1]\r\n c[-2] = vn[0]\r\n c[-1] = an[0]\r\n for i in range(count-1):\r\n c[i+3] = cv[i+1, 1]\r\n\r\n # 係数を計算\r\n pcoeff = np.linalg.solve(A, c)\r\n\r\n # スプライン軌道を計算\r\n y = []\r\n dy = []\r\n ddy = []\r\n tsample = np.linspace(0, t[-1], n+1)\r\n for i in range(len(tsample)):\r\n tmp = 0\r\n tmp_der = 0\r\n tmp_der2 = 0\r\n for j in range(m+1):\r\n if i == len(tsample)-1:\r\n eps = 0.001\r\n tmp = tmp + pcoeff[j] * self.coxDeBoor(tsample[i]-eps, u, j, p)\r\n tmp_der = tmp_der + pcoeff[j] * self.coxDeBoorDerivative(tsample[i]-eps, u, j, p, der=1)\r\n tmp_der2 = tmp_der2 + pcoeff[j] * self.coxDeBoorDerivative(tsample[i]-eps, u, j, p, der=2)\r\n else:\r\n tmp = tmp + pcoeff[j] * self.coxDeBoor(tsample[i], u, j, p)\r\n tmp_der = tmp_der + pcoeff[j] * self.coxDeBoorDerivative(tsample[i], u, j, p, der=1)\r\n tmp_der2 = tmp_der2 + pcoeff[j] * self.coxDeBoorDerivative(tsample[i], u, j, p, der=2)\r\n y.append(tmp)\r\n dy.append(tmp_der)\r\n ddy.append(tmp_der2)\r\n\r\n return y, dy, ddy, tsample, pcoeff", "title": "" }, { "docid": "5c76f941ad5b8586961e75296c76c8a8", "score": "0.5053246", "text": "def univariate_kalman_smoother(y,Z,T,a,P,K,F,v): \n\n N = np.zeros((a.shape[0],a.shape[0],y.shape[0]+1))\n L = np.zeros((a.shape[0],a.shape[0],y.shape[0]+1))\n V = np.zeros((a.shape[0],a.shape[0],y.shape[0]+1))\n alpha = np.zeros((T.shape[0],y.shape[0]+1)) \n r = np.zeros((T.shape[0],y.shape[0]+1)) \n\n for t in reversed(range(y.shape[0])):\n if t != 0:\n L[:,:,t] = T - np.dot(K[:,t],Z)\n r[:,t-1] = np.dot(Z.T,v[t])/(F[:,:,t]).ravel()[0]\n N[:,:,t-1] = np.dot(Z.T,Z)/(F[:,:,t]).ravel()[0] + np.dot(np.dot(L[:,:,t].T,N[:,:,t]),L[:,:,t])\n alpha[:,t] = a[:,t] + np.dot(P[:,:,t],r[:,t-1])\n V[:,:,t] = P[:,:,t] - np.dot(np.dot(P[:,:,t],N[:,:,t-1]),P[:,:,t])\n else:\n alpha[:,t] = a[:,t]\n V[:,:,t] = P[:,:,t] \n return alpha, V", "title": "" }, { "docid": "0fb755074832eaa41ddaa83771720dc6", "score": "0.5052921", "text": "def vanderinv(v):\n\n N = v.shape[0]\n f = np.eye(N)\n x = v[:,1]\n n = N-1\n for k in range(0,n):\n for i in range(n,k,-1):\n f[i,:] = (f[i,:] - f[i-1])/(x[i] - x[i-k-1])\n for k in range(n-1,-1,-1):\n for i in range(k,n):\n f[i,:] = f[i,:] - f[i+1]*x[k]\n return f", "title": "" }, { "docid": "760ac442e5f6d59461c101aba371aeb8", "score": "0.50511706", "text": "def smoothing_test(self):\n data = [[0.0, 0.0], [1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]]\n tsSrc = TimeSeries.from_twodim_list(data)\n tsSrc.normalize(\"second\")\n\n # Initialize a correct result.\n # The numbers look a little bit odd, based on the binary translation problem\n data = [[1.5, 0.0],[2.5, 0.12000000000000002],[3.5, 0.24080000000000004],[4.5, 0.36099200000000004]]\n tsDst = TimeSeries.from_twodim_list(data)\n\n # Initialize the method\n hm = HoltMethod(0.2, 0.3, valuesToForecast=0)\n res = tsSrc.apply(hm)\n\n if not res == tsDst: raise AssertionError", "title": "" }, { "docid": "c4f8f4bcaa3c24cd6b5c784d4b97f3ef", "score": "0.50458753", "text": "def smooth_transition(x, f1, f2, x0, K):\n return f1 + 0.5 * (1 + tanh(K * (x - x0))) * (f2 - f1)", "title": "" }, { "docid": "c5f1a9685758b9f85475521defb2e4e0", "score": "0.5045794", "text": "def motion_model(x, u):\n F = np.eye(3)\n\n B = np.matrix([[DT * math.cos(x[2, 0]) * ratio, DT * math.cos(x[2, 0]) * ratio],\n [DT * math.sin(x[2, 0]) * ratio, DT * math.sin(x[2, 0]) * ratio],\n [DT * ratio, -DT * ratio],\n ])\n\n x = F * x + B * u\n\n return x", "title": "" }, { "docid": "95d6c49d83495ae4f5adb50b9f23862c", "score": "0.5042326", "text": "def deprojectv( v_obs, inclination ):\n\n\treturn v_obs / math.sin( inclination * DEGREE_TO_RADIAN )", "title": "" }, { "docid": "48747e6f21ce23399b1efb8931498558", "score": "0.5039111", "text": "def tie_Pavlovetal2019(Is, Ir, energy, z1, z2, pix_size, delta, beta, bg_val, scale):\n\n lambda_energy = kevToLambda(energy)\n waveNumber = (2 * pi) / lambda_energy\n mu = 2 * waveNumber * beta\n\n magnificationFactor = (z1 + z2) / z1\n pix_size=pix_size*magnificationFactor\n #pix_size = pix_size * magnificationFactor\n\n sigmaSource = 150.e-6\n\n gamma = delta / beta\n\n is_divided_by_Ir = np.true_divide(Is, Ir)\n\n numerator = 1 - is_divided_by_Ir\n\n # average_image = np.mean(numerator)\n # Correction on the average image. Now the average of the new array is ~0\n # numerator = numerator - average_image\n\n saveEdf(numerator, 'ImageNew.edf')\n\n padCol = 1600\n padRow = 1600\n width, height = numerator.shape\n numerator = np.pad(numerator, ((padRow, padRow), (padCol, padCol)), 'reflect')\n\n fftNumerator = fftshift(fft2(numerator))\n\n Nx, Ny = fftNumerator.shape\n print('Nx:'+str(Nx)+' Ny:'+str(Ny))\n u, v = np.meshgrid(np.arange(0, Nx), np.arange(0, Ny))\n u = (u - (Nx / 2))\n v = (v - (Ny / 2))\n\n u_m= u / (Nx * pix_size)\n v_m = v / (Ny * pix_size)\n uv_sqr= np.transpose(u_m ** 2 + v_m ** 2) # ie (u2+v2)\n # without taking care of source size\n # denominator = 1 + pi * gamma * z2 * lambda_energy * k_sqr\n\n # Beltran et al method to deblur with source\n denominator = 1 + pi * (gamma * z2 - waveNumber * sigmaSource * sigmaSource) * lambda_energy * uv_sqr\n\n# denominator *= magnificationFactor\n tmp = fftNumerator / denominator\n\n # Low pass filter\n sigma_x = ((1/ (Nx * pix_size*1.6)) * scale) ** 2\n sigma_y = ((1/ (Ny * pix_size*1.6)) * scale) ** 2\n f = (1. - np.exp(-(u_m ** 2 / (2. * sigma_x) + v_m ** 2 / (2. * sigma_y)))) # ie f(x,y)\n lff = np.transpose(f) # ie LFF\n\n # Application of the Low pass filter\n tmp = lff * tmp\n\n # inverse fourier transform\n tmpThickness = ifft2(ifftshift(tmp)) # F-1\n img_thickness = np.real(tmpThickness)\n # Division by mu\n img_thickness = img_thickness / mu\n # multiplication to be in micron\n img_thickness = img_thickness * 1e6\n # unpadding\n img_thickness = img_thickness[padRow:padRow + width, padCol:padCol + height]\n img_thickness += bg_val\n\n return img_thickness", "title": "" }, { "docid": "03cfbc9a5410927b15033b923a85e77e", "score": "0.50325376", "text": "def VETOAct(PPM): #done\n #def mass\n mass = 1.4 #kg\n #Dim Vars\n n = 296\n IsoAct = list(range(len(Iso[1])))\n for i in range(len(Iso[1])):\n IsoAct[i] = (Lam[i]*PPM[i]*Abs[i])/(Ms[i]*1e6)*mass*n\n return IsoAct", "title": "" }, { "docid": "9b4df7b0d61a8a994efd6eb3802ba3bc", "score": "0.5030979", "text": "def viterbi(self, observations):\n\n # delta[t][i]= max_{q_1, \\hdots, q_{t-1}} P[q_1, \\hdots, q_t=i,O_1, \\hdots, O_t | \\lambda]\n # delta[t+1][i]= [max_i \\delta_t(i) * a_{i,j}] * b_j(O_{t+1})\n delta=[]\n # psi[t][i]= el estado correspondiente a delta[t][i]\n psi= []\n\n delta= [{}]\n psi= [{}]\n\n states= self.states()\n for s in states:\n psi[0][s]= s # por poner algo, creop que no se usa este valor\n delta[0][s]= self.initial_probability[s]*self.get_observation_probability(observations[0], s)\n\n\n # para el resto de las observaciones (paso 2)\n for t in range(1, len(observations)):\n observation= observations[t]\n delta_t= {}\n psi_t= {}\n\n for s in states:\n new_max= -1 # estamos hablando de probabilidades, nunca va a haber un -1\n s_max= None\n for old_s in states:\n tmp= delta[t-1][old_s]*self.state_transition[old_s][s]\n if new_max < tmp:\n new_max= tmp\n s_max= old_s\n\n if s_max is None:\n raise Exception(\"cant find maximun for delta[%s][%s]\" % (t,s))\n\n new_max*= self.get_observation_probability(observation, s)\n delta_t[s]= new_max\n psi_t[s]= s_max\n\n delta.append(delta_t)\n psi.append(psi_t)\n \n # terminacion (paso 3)\n observation_probability= -1\n delta_T= delta[len(observations)-1]\n hidden_state= None\n for s in states:\n tmp= delta_T[s]\n\n if tmp > observation_probability:\n observation_probability= tmp\n hidden_state= s\n\n if hidden_state is None:\n raise Exception(\"cant find last hidden_state\")\n\n hidden_states= [hidden_state]\n # recorro al reves todas las observaciones menos la ultima\n for t in range(len(observations)-2,-1,-1): \n first_state= hidden_states[0]\n hidden_states.insert(0, psi[t+1][first_state])\n\n return (observation_probability, hidden_states)", "title": "" }, { "docid": "56c06fb18f37a0e0bf732f7d5bf9d8cd", "score": "0.50303566", "text": "def Update(self,u):\n for iv in range(u.nv):\n for idim in range(u.ndim):\n # Divergence update\n u.var[iv] -= u.dtds*(u.flux[iv,idim]-np.roll(u.flux[iv,idim],1,axis=idim))", "title": "" }, { "docid": "0a7fba4ede1d1e7f5c4417705a6d8fb2", "score": "0.5028", "text": "def update_values(self):\n points = self.discretization.all_points\n self.values = self.lyapunov_function(points).eval().squeeze()", "title": "" }, { "docid": "30654a656fa58db1a8fe4bd8115b164d", "score": "0.5024316", "text": "def DONonLin(x, V, Y, Mass, c, fun_rhsUser, fun_jacUser, fun_BilinUser, integr_param, mask_index_x):\n\n #Orhogonalise the stochastic basis\n V, R\t= qrM(V, Mass)\n\n #Integration paramaters\n T = integr_param['T']\n dt = integr_param['dt']\n ndtsub = integr_param['ndtsub']\n \n #Constant\n dtsub\t= dt / ndtsub\n\n #Generate covariance matrix\n mV, nV\t\t= len(V), len(V[0])\n mY, stoch_size\t= len(Y), len(Y[0])\n YY\t\t = np.zeros((mY * mY, stoch_size))\n\n #Determine the covariance matrix and bilinearform\n for i in range(mY):\n for j in range(mY):\n YY[i * mY + j]\t= Y[i] * Y[j]\n\n #Generate dictionary for the expectance for Y and <Y, Y> = YY\n Exps\t\t= {}\n Exps['YY']\t= np.sum(YY, axis = 1) / stoch_size\t\n VV\t\t = fun_BilinUser(V, V)\n Jac\t\t = fun_jacUser(x)\n \n time_all = np.zeros(int(T/dt)+1)\n x_all = np.zeros((len(time_all), len(x)))\n V_all = np.zeros((len(time_all), mV, nV))\n Y_all = np.zeros((len(time_all), mY, stoch_size))\n norm_x = np.zeros(len(time_all))\n norm_V = np.zeros((len(time_all), mY))\n\n for time_i in range(len(time_all)):\n #Time loop, propagate mean, V and Y\n print('-----------------------------------------------')\n print('Time step '+str(time_i+1)+' of '+str(len(time_all)))\n time_all[time_i] = time_i * dt\n \t\n #Determine the reduced stochastic ODE elements\n VJacV\t= np.matmul(V.transpose(), np.matmul(Jac, V))\n VVV\t = np.matmul(V.transpose(), VV)\n Vc\t = np.matmul(V.transpose(), c)\n\n #Reshape dimensions for Vc (if 1 dimensional)\n if len(np.shape(Vc)) == 1: \n Vc = Vc.reshape((len(Vc), 1))\n \n #Determine and update Y, YY\n print('\\nDetermine stochastic coefficients...')\n Y, YY, Exps\t= SODEsolve(Y, YY, Exps, VJacV, VVV, Vc, dtsub, ndtsub)\n print('Stochastic coefficients finished \\n')\n\n #Determine and update V, VV\n print('Determine stochastic basis...')\n V, VV, R\t= SolveStochBasis(Mass, Jac, fun_BilinUser, V, VV, dt, Exps, c)\n print('Stochastic basis finished \\n')\n\n #Adapt Y and the covariance matrix based on the change in V\n Y\t\t= np.matmul(R, Y)\n Exps['YY']\t= np.reshape(np.matmul(np.matmul(R, np.reshape(Exps['YY'], (nV, nV))), R.transpose()), nV * nV)\n \n #Determine the eigenvalues\n w, q = np.linalg.eig(np.array(np.reshape(Exps['YY'], (nV, nV)), dtype = np.float))\n w = np.sort(w)[::-1]\n norm_V[time_i] = w\n \n #Propogate the mean (x): Mass dx/dt = F(x,0) + <V,V>Ekron(Y,Y)\n function_1\t= lambda xh: fun_rhsUser(xh, np.matmul(VV, Exps['YY']))\n function_2\t= lambda xh: fun_jacUser(xh)\n function_3\t= lambda xh: rhs_JacBE(function_1, function_2, Mass, xh, x, dt)\n\n #Determine the new mean and update the Jacobian\t of mean state\n print('Newton iteration started')\n x, res, test1, flg, test2, Jac\t= NewtonRaphson(function_3, x, mask_index_x)\n norm_x[time_i]\t\t\t = np.linalg.norm(x, 2)\n print('Newton iteration finished \\n')\n\n #Save all the time series (mean, V and Y)\n x_all[time_i]\t= x\n V_all[time_i]\t= V\n Y_all[time_i]\t= Y\n \n\n return time_all, x_all, V_all, Y_all, norm_x, norm_V", "title": "" }, { "docid": "4166d558c1b6f3edb7454763d7d57220", "score": "0.50242984", "text": "def forward(self, x, y):\n\n return 1. - self.vsi(x=x, y=y)", "title": "" }, { "docid": "72ad8502c01734b9a96b9a90a5aefe48", "score": "0.5021422", "text": "def lensingprobability(zs,Mapp):\n\n # cosmological parameters\n Omega_m = 0.3\n Omega_L = 1-Omega_m\n w = -1\n h = 0.72 # *100 km/s/Mpc = Hubble's constant\n c = 299792.458 # km/s\n\n # integration over velocity dispersion\n nbin_v = 30\n minv = 10**1.6\n maxv = 10**2.6\n vel = np.linspace(minv,maxv,nbin_v)\n\n # this array will contain the function to integrate over v\n integrand = []\n\n # for the integration over redshift zl\n # quantities independant from v\n nbin = 30\n # in order to avoid nans (dls = 0), zl must never be equal to zs\n zl = np.arange(nbin)*np.float(zs)/nbin\n\n hl = 100*np.sqrt(Omega_m*(1+zl)**3+Omega_L*(1+zl)**(-3*(1+w))) \n # hl in km/s/(Mpc/h)\n volume = (1+zl)**2*c/hl # in Mpc/h\n\n ds = distance(zs)\n dls = []\n for z in zl:\n dls.append(distance(zs,z))\n dls = np.array(dls)\n\n for v in vel:\n vdisp = velocitydispersion(v) # km/s/(Mpc/h)**3\n\n # biased lensing cross-section\n # can lensingcrosssection take an array for zl???\n # It seems that yes\n sigma_l = []\n for i in range(len(zl)):\n dist = distance(zs)\n # converting magnitude to absolute magnitude\n Mabs = Mapp-5*(np.log10(dist*1e6)-1)\n # a is in square radian\n a = lensingcrosssection(v,zl[i],zs,Mabs)\n # sigma_l is in Mpc/h\n sigma_l.append(a*dist**2)\n sigma_l = np.array(sigma_l)\n\n # result of the integration over the redshift\n integrand_z = volume*vdisp*sigma_l \n integral = np.trapz(integrand_z,zl)\n\n integrand.append(integral)\n\n integral_vel = np.trapz(integrand,vel)\n\n return integral_vel", "title": "" }, { "docid": "3d44bf34a857077cb1b344ad2dbb28c4", "score": "0.5021098", "text": "def update_V(self,c_states, v_states, e_fermi, zero_elfield=True): \n self._counter += 1\n self._Ef[self._counter%2] = e_fermi\n \n max_iteration = 5000\n\n V_conv_threshold = 2.e-4\n \n Ef_conv_threshold = 1.e-6 # if fermi energy does not change from one iteration to another, converged\n\n free_electrons_density = get_electron_density(c_states, e_fermi, self._conddosmass, self.npoints, self._conddegen, smearing=self.smearing, beta_eV=self.beta_eV)\n free_holes_density = get_hole_density(v_states, e_fermi, self._valdosmass, self.npoints, self._valdegen, smearing=self.smearing, beta_eV=self.beta_eV)\n \n total_charge_density = self._doping - free_electrons_density + free_holes_density\n \n #updating the time spent solving Poisson\n start_t = time.time()\n if is_periodic: \n new_V = -1.*spw.periodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0] # minus 1 because function returns electrostatic potential, not energy\n else:\n new_V = -1.*spw.nonperiodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0]\n end_t = time.time()\n\n self._time_Poisson += end_t-start_t\n \n new_V -= n.mean(new_V)\n \n if self._counter == 1:\n self._max_ind = n.argmax(new_V)\n \n if zero_elfield:\n # in V/ang\n self._slope = (new_V[-1] - new_V[0])/(self._xgrid[-1] - self._xgrid[0])\n new_V -= self._slope*self._xgrid\n else:\n self._slope = 0.\n \n #we want ot avoid oscillations in converging algorithm\n #one has to stock new_V for comparison purposes\n \n \n self._indicator[self._counter%2] = new_V[self._max_ind]-self._V[self._max_ind] #need to keep track of oscillations when converging\n\n \n if self._indicator[0]*self._indicator[1] < 0:\n #oscillation, take the middle ground\n if not reduce_stdout_output: \n print(\"OSCILLATION\")\n self._subcounter = 0\n self.max_step_size *= 0.1\n if self.max_step_size <= 0.1*V_conv_threshold:\n self.max_step_size = 0.1*V_conv_threshold\n \n else:\n self._subcounter += 1\n \n if self._subcounter == 20:\n self.max_step_size *= 1.4\n self._subcounter = 0\n \n step = new_V - self._V\n current_max_step_size = n.max(n.abs(step))\n \n #convergence check\n self._over = False\n if current_max_step_size < V_conv_threshold:\n start_t = time.time()\n if is_periodic:\n check_V = -1.*spw.periodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0] # minus 1 because function returns electrostatic potential, not energy\n\n else:\n check_V = -1.*spw.nonperiodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0]\n end_t = time.time()\n self._time_Poisson += end_t-start_t\n check_val = n.max(n.abs(check_V-self._V))\n self._indicator[self._counter%2] = check_V[self._max_ind]-self._V[self._max_ind]\n if check_val > 5*V_conv_threshold:\n current_max_step_size = check_val\n step = check_V - self._V\n #self.max_step_size *= 0.5\n else:\n \n self._over = True\n\n \n if not reduce_stdout_output: \n print('convergence param:', current_max_step_size) \n \n if current_max_step_size != 0 and self._over == False:\n #self._V += step * min(self.max_step_size, current_max_step_size) #/ (current_max_step_size)\n self._V += step * self.max_step_size\n self._old_V = self._V.copy()\n elif current_max_step_size == 0 and self._over == False:\n self._V = new_V\n self._old_V = self._V.copy()\n \n elif self._over == True:\n self._V = self._old_V\n if not reduce_stdout_output:\n print(\"Final convergence parameter: \", check_val)\n self._finalV_check = check_val\n \n if n.abs(self._Ef[0]-self._Ef[1]) <= Ef_conv_threshold and current_max_step_size < 10*V_conv_threshold:\n self._E_count += 1\n \n if self._E_count == 4:\n if not reduce_stdout_output:\n print(\"Convergence of Fermi energy: \", n.abs(self._Ef[0]-self._Ef[1]))\n current_max_step_size = 0.1*V_conv_threshold # froced convergence if that happens 4 times in a row\n if is_periodic:\n check_V = -1.*spw.periodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0] # minus 1 because function returns electrostatic potential, not energy\n\n else:\n check_V = -1.*spw.nonperiodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0]\n check_val = n.max(n.abs(check_V-self._V))\n if not reduce_stdout_output: \n print(\"Final convergence parameter: \", check_val)\n self._finalV_check = check_val\n else:\n self._E_count = 0 \n \n self._finalE_check = n.abs(self._Ef[0]-self._Ef[1])\n return current_max_step_size < V_conv_threshold", "title": "" }, { "docid": "c3349a1e075142832d1e81a1df06fe10", "score": "0.5017713", "text": "def linear(G, V):\n return G", "title": "" }, { "docid": "b18ff0af2f158b1a180ac396f8b6c68a", "score": "0.50148976", "text": "def get_V_electrodes(self,x): # accepts 'x' as a list\n\n\t\tV=np.zeros(x.size)\n\t\t# add initial field using conformal mapping\n\t\tfor i, XX in enumerate(x):\n\t\t\tV[i]+=self.get_V_inf(XX)\n\t\tlayer=self.interface_of_electrodes+1\n\t\t#z=np.sum([o.z for o in self.interfaces[:layer]])\n\t\t# we are above electrodes\n\t\tgetPfield=self.get_P_field_pos\n\t\tif len(self.interfaces) > layer:\n\t\t\treverse_exp_arg_fac=-np.pi*2*self.layers[layer].t_eff\n\t\t\treverse_exp_mul_fac=np.exp(reverse_exp_arg_fac*2)\n\t\t\treverse_cur_exp=np.exp(reverse_exp_arg_fac)\n\t\telse:\n\t\t\treverse_cur_exp=0\n\t\tfor n in range(self.max_fourier_n):\n\t\t\tN=2*n+1\n\t\t\tP=getPfield(n)\n\t\t\tA2np1=self.getA(n)\n\t\t\tsin=np.sin(N*np.pi*x)\n\t\t\tV+=-1/np.pi/N*A2np1*sin*(P-1)\n\t\t\tR=0\n\t\t\tif len(self.interfaces) > layer:\n\t\t\t\tR=self.interfaces[layer].get_r_pos_dir_eff(n) #pos dir\n\t\t\tif not R==0:\n\t\t\t\tV+=-1/np.pi/N*A2np1*sin*reverse_cur_exp*P*R\n\t\t\t\treverse_cur_exp*=reverse_exp_mul_fac\n\t\t\tif abs(P-1)/N<self.accuracy_limit and abs(P*reverse_cur_exp/N)<self.accuracy_limit :#np.abs(P-1)/N<self.accuracy_limit:\n\t\t\t\tbreak\n\t\treturn V", "title": "" }, { "docid": "467a7c9201771c07c89215ce4eb44532", "score": "0.5014489", "text": "def smooth(data,N):\r\n \r\n cumsum = np.cumsum(np.insert(data, 0, 0)) \r\n return (cumsum[N:] - cumsum[:-N]) / N", "title": "" }, { "docid": "ae9f8b0fa6b64e46fefb3fe1021bccf5", "score": "0.50143534", "text": "def _sigma_pair_smooth(self, T):\n self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype)\n self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys),\n dtype = self.dtype)\n\n # pairwise covariance\n self.V_pair = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys),\n dtype = self.dtype)\n A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)\n\n self.x_smooth[-1] = self.x_filt[-1]\n self.V_smooth[-1] = self.V_filt[-1]\n\n # t in [0, T-2]\n for t in reversed(range(T - 1)) :\n # visualize calculating time\n print(\"\\r self.expectation step calculating... t={}\".format(T - t)\n + \"/\" + str(T), end=\"\")\n\n # extract parameters at time t\n F = _last_dims(self.F, t, 2)\n\n # calculate fixed interval smoothing gain\n A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1])))\n \n # fixed interval smoothing\n self.x_smooth[t] = self.x_filt[t] \\\n + self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1])\n self.V_smooth[t] = self.V_filt[t] \\\n + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T))\n\n # calculate pairwise covariance\n self.V_pair[t + 1] = self.xp.dot(self.V_smooth[t + 1], A.T) # self.V_smooth[t]", "title": "" }, { "docid": "59150dadbdb1dfd3bfae54c3f1255776", "score": "0.5014165", "text": "def MetrSampF_egv(N, k, smpx, q2, sae, burn=100, Fini=None):\n y = smpx[1:N, 0].reshape(N-1, 1) # a column vector\n xnm = smpx[0:N-1, :] # a column vector\n\n ### conditional posterior moments\n iCov = _N.dot(xnm.T, xnm)/q2 # inv conditional posterior cov.\n Cov = _N.linalg.inv(iCov) \n M = _N.dot(Cov, _N.dot(xnm.T, y))/q2 # conditional posterior mean\n # print M\n # print iCov\n\n # initial value of F\n if Fini == None:\n# F = generateValidAR(k) # returns a column vector\n F = sae.draw() # returns a column vector\n else:\n Fini = Fini.reshape((k, 1))\n F = Fini\n\n FM = F - M\n\n # The Fn's being generated are not uniform in AR space\n # This non-uniformity acts as a prior?\n aO = -0.5*_N.dot(FM.T, _N.dot(iCov, FM)) # arguments to exp\n\n\n rands = _N.random.rand(burn)\n\n for n in range(burn):\n# Fn = generateValidAR(k)\n Fn = sae.draw()\n FnM = Fn - M\n\n aC = -0.5*_N.dot(FnM.T, _N.dot(iCov, FnM))\n r = _N.exp(aO - aC) # or compare aO - aC with 0\n if rands[n] < min(r, 1):\n F = Fn\n aO = aC\n\n# lrands = _N.log(_N.random.rand(burn))\n\n# for n in range(burn):\n# Fn = generateValidAR(k)\n# FnM = Fn - M\n\n# aC = -0.5*_N.dot(FnM.T, _N.dot(iCov, FnM))\n# # r = _N.exp(aO - aC) # or compare aO - aC with 0\n# lr = aC - aO # or compare aO - aC with 0\n# # print \"--- %(aC).3e %(aO).3e %(diff).3e\" % {\"aC\" : aC, \"aO\" : aO, \"diff\" : (aO-aC)}\n# if lrands[n] < min(lr, 0):\n# F = Fn\n# aO = aC\n\n\n return F[:,0]", "title": "" }, { "docid": "bf476e16435d6283a013d04788186256", "score": "0.50036", "text": "def backwards_smooth(self):\n\n if len(self.associated_keypoints) == 0:\n return [], []\n\n n_states = 1 + self.associated_keypoints[-1].frame_idx - self.initial_frame_idx\n if n_states == 0:\n return [], []\n\n # The output array\n smoothed_states = [None,] * n_states\n smoothed_covars = [None,] * n_states\n\n # Start with final state/covariance\n smoothed_states[-1] = self.states[n_states-1]\n smoothed_covars[-1] = self.covariances[n_states-1]\n\n # Working backwards...\n for idx in xrange(n_states-1, 0, -1):\n smoothed_state_kp1 = smoothed_states[idx]\n smoothed_covar_kp1 = smoothed_covars[idx]\n\n a_posteriori_state = self.states[idx-1]\n a_posteriori_covariance = self.covariances[idx-1]\n\n a_priori_state = self._a_priori_states[idx]\n a_priori_covariance = self._a_priori_covariances[idx]\n\n C = a_posteriori_covariance.dot(state_evolution_mat.T).dot(np.linalg.inv(a_priori_covariance))\n smoothed_state = a_posteriori_state + C.dot(smoothed_state_kp1 - a_priori_state)\n smoothed_covar = a_posteriori_covariance + C.dot(smoothed_covar_kp1 - a_priori_covariance).dot(C.T)\n\n smoothed_states[idx-1] = smoothed_state\n smoothed_covars[idx-1] = smoothed_covar\n\n return smoothed_states, smoothed_covars", "title": "" }, { "docid": "e13862af92d174ff300d792f0dfd38fc", "score": "0.5000253", "text": "def age_to_v(t, coeffs):\n b, a = coeffs\n logv = (np.log(t) - a)/b\n return np.exp(logv)", "title": "" }, { "docid": "a6e3f0df182d3c86e9be30f2dae21227", "score": "0.49992162", "text": "def test_values(self):\n first_row_v = np.array(\n [\n 2.451711,\n 2.451711,\n 2.451711,\n 2.341303,\n 2.341303,\n 2.341303,\n 2.028805,\n 2.028805,\n 2.028805,\n 1.694845,\n 1.694845,\n 1.694845,\n 1.503583,\n 1.503583,\n ]\n )\n vmat = self.plugin._smooth_advection_fields(self.vmat, self.weights)\n self.assertArrayAlmostEqual(vmat[0], first_row_v)", "title": "" }, { "docid": "70dc6d68916b43675d9054ec5d525009", "score": "0.4997256", "text": "def forward_transform(self, u):\n\n assert np.shape(u)[0]==self.n+1\n\n v = np.matmul(self.P[:,0] * self.weights, u)\n\n v[-1] = v[-1]/(2.0 + (self.alpha + self.beta + 1.0)/self.n)\n\n return v", "title": "" }, { "docid": "2abad61d661331b7deab8e1c4a255798", "score": "0.49966547", "text": "def invert(self) -> 'PreisachModel':\r\n invModel = PreisachModel(self.n, self.alpha0)\r\n\r\n # Construct set of first order reversal curves (FODs) for identification of the inverse everett map\r\n # number of FODs correspond to the number of Hystereon elements n in Preisach plane\r\n FODs = np.zeros((self.n * self.n // 2 + self.n // 2 + 1, 2), dtype=np.float64)\r\n Mk = np.zeros(FODs.shape[0], dtype=np.float64)\r\n mk = np.zeros(FODs.shape[0], dtype=np.float64)\r\n invEverettVals = np.zeros(FODs.shape[0], dtype=np.float64)\r\n cnt = 0\r\n print('Inverting Model...')\r\n for valAlpha in np.linspace(-self.alpha0, self.alpha0, self.n - 1):\r\n for valBeta in np.linspace(-self.beta0, valAlpha, int((valAlpha - (-self.alpha0)) // self.width)):\r\n FODs[cnt, 0] = valAlpha\r\n FODs[cnt, 1] = valBeta\r\n # Reset and excite non inverted model with the FODs to get the grid Points of the inverse model\r\n # by dominant output extrema of the non inverted model\r\n self.setNegSatState()\r\n invEverettVals[cnt] = (1 / 2) * (valAlpha - valBeta)\r\n Mk[cnt] = self(valAlpha)\r\n mk[cnt] = self(valBeta)\r\n cnt += 1\r\n\r\n points = np.zeros((len(Mk), 2), dtype=np.float64)\r\n points[:, 1] = np.concatenate([Mk])\r\n points[:, 0] = np.concatenate([mk])\r\n Z = np.concatenate([invEverettVals])\r\n # Fit interpolator function on irregular grid using linear interpolation\r\n invEverettInterp = LinearNDInterpolator(points, Z, fill_value=0)\r\n\r\n # Set interpolator as everett function of the inverse model\r\n invModel.setEverettFunction(invEverettInterp)\r\n\r\n # return inverse model\r\n print('Model inversion succesfull !!!')\r\n return invModel", "title": "" }, { "docid": "df6ec85e55945866d6676c57bf7294c1", "score": "0.49933127", "text": "def update_V(Y, M, U, beta, omega, tau, lambda_v, r):\n kappa_y = ((Y - 0.5).t())[:,:,None].repeat(1,1,U.shape[1]) #kappa = shape Ls x N x K\n kappa_m = 0.5*(M + r)[:,:,None].repeat(1,1,beta.shape[1]) #kappa_m = shape Ls x L x K\n sigma_V = torch.empty(Y.shape[1], U.shape[1], U.shape[1]).to(device) #sigma_V = shape Ls x K x K\n u1 = U[:, :, None] #u1 = shape N x K x 1\n u2 = U[:, None, :] #u2 = shape N x 1 x K\n uut = torch.matmul(u1,u2) #uut = shape N x K x K\n omega_t = omega.t() #omega_t = shape Ls x N\n beta1 = beta[:, :, None] #beta1 = shape L x K x 1\n beta2 = beta[:, None, :] #beta2 = shape L x 1 x K\n betabetat = torch.matmul(beta1, beta2) #betabetat = shape L x K x K\n for i in range(Y.shape[1]):\n omega_t_i = (omega_t[i])[:, None, None].repeat(1, U.shape[1], U.shape[1]) #omega_i = shape N x K x K\n tau_i = (tau[i])[:, None, None].repeat(1, beta.shape[1], beta.shape[1]) #tau_i = shape L x K x K\n sigma_V[i] = (torch.sum(omega_t_i*uut, axis=0) + torch.sum(tau_i*betabetat, axis=0) + lambda_v*torch.eye(U.shape[1]).to(device)).inverse() #sigma_u[i] = shape K x K\n\n term1 = torch.sum(kappa_y*U, axis=1).squeeze() #term1 = shape Ls x K\n term2 = torch.sum(kappa_m*beta, axis=1).squeeze() #term2 = shape Ls x K\n V = torch.matmul(sigma_V, (term1 + term2)[:,:,None]).squeeze()\n \n return V", "title": "" } ]
e7b7cfb0d91468ec87cc607502d3d264
Parse specification in the specification YAML file.
[ { "docid": "1ddd55b5916bf21eab1a67ee8e2b71de", "score": "0.7328811", "text": "def read_specification(self):\n try:\n self._cfg_yaml = load(self._cfg_file)\n except YAMLError as err:\n raise PresentationError(msg=\"An error occurred while parsing the \"\n \"specification file.\",\n details=str(err))\n\n self._parse_env()\n self._parse_configuration()\n self._parse_input()\n self._parse_output()\n self._parse_static()\n self._parse_elements()\n\n logging.debug(\"Specification: \\n{}\".\n format(pformat(self._specification)))", "title": "" } ]
[ { "docid": "427c032eef2b4c90af30cbfcb501e46f", "score": "0.65131885", "text": "def fileToSpec(file):\n\n stream = open(file, 'r') if file is not None else sys.stdin\n return Specification(yaml.load(stream))", "title": "" }, { "docid": "77bdebc986d9400600856c6fb64388c2", "score": "0.6417013", "text": "def _parse_input(self):\n\n logging.info(\"Parsing specification file: input ...\")\n\n idx = self._get_type_index(\"input\")\n if idx is None:\n raise PresentationError(\"No data to process.\")\n\n try:\n for key, value in self._cfg_yaml[idx][\"general\"].items():\n self._specification[\"input\"][key] = value\n self._specification[\"input\"][\"builds\"] = dict()\n\n for job, builds in self._cfg_yaml[idx][\"builds\"].items():\n if builds:\n if isinstance(builds, dict):\n build_end = builds.get(\"end\", None)\n try:\n build_end = int(build_end)\n except ValueError:\n # defined as a range <start, build_type>\n build_end = self._get_build_number(job, build_end)\n builds = [x for x in range(builds[\"start\"], build_end+1)\n if x not in builds.get(\"skip\", list())]\n self._specification[\"input\"][\"builds\"][job] = list()\n for build in builds:\n self._specification[\"input\"][\"builds\"][job]. \\\n append({\"build\": build, \"status\": None})\n\n else:\n logging.warning(\"No build is defined for the job '{}'. \"\n \"Trying to continue without it.\".\n format(job))\n except KeyError:\n raise PresentationError(\"No data to process.\")\n\n logging.info(\"Done.\")", "title": "" }, { "docid": "9a026e4c2a5d2ea6e4e8efaf29a14b1d", "score": "0.6326856", "text": "def parse_spec_file(spec_fname):\n\n if not os.path.isfile(spec_fname):\n print(\"Decoy spec file {} does not exist\".format(spec_fname))\n sys.exit()\n \n open_in = open(spec_fname, 'r')\n read_in = open_in.readlines()\n open_in.close()\n\n ### DEFAULTS FROM MYSINGER - these are used if property not specified\n range_dict = {\"CHG_RANGES\": [ 0, 0, 0, 0, 0, 1, 2],\n \"HBD_RANGES\": [ 0, 0, 1, 1, 2, 2, 3],\n \"HBA_RANGES\": [ 0, 1, 2, 2, 3, 3, 4],\n \"RB_RANGES\": [ 1, 2, 2, 3, 3, 4, 5],\n \"MWT_RANGES\": [ 20, 35, 50, 65, 80, 100, 125],\n \"LOGP_RANGES\": [0.4, 0.8, 1.2, 1.8, 2.4, 3.0, 3.6]}\n\n for line in read_in:\n splitline = line.strip().split()\n if len(splitline) == 3:\n prop_type = splitline[0]\n if prop_type.lower()[0] == \"c\":\n chg_low = int(splitline[1])\n chg_high = int(splitline[2])\n\n chg_range = make_seven(chg_low, chg_high, \"CHG\")\n range_dict[\"CHG_RANGES\"] = chg_range\n\n elif prop_type.lower() == \"hbd\":\n hbd_low = int(splitline[1])\n hbd_high = int(splitline[2])\n\n hbd_range = make_seven(hbd_low, hbd_high, \"HBD\")\n range_dict[\"HBD_RANGES\"] = hbd_range\n\n elif prop_type.lower() == \"hba\":\n hba_low = int(splitline[1])\n hba_high = int(splitline[2])\n\n hba_range = make_seven(hba_low, hba_high, \"HBA\")\n range_dict[\"HBA_RANGES\"] = hba_range\n\n elif prop_type.lower() == \"rb\":\n rb_low = int(splitline[1])\n rb_high = int(splitline[2])\n\n rb_range = make_seven(rb_low, rb_high, \"RB\")\n range_dict[\"RB_RANGES\"] = rb_range\n\n elif prop_type.lower()[0] == \"m\":\n mwt_low = float(splitline[1])\n mwt_high = float(splitline[2])\n\n mwt_range = make_seven(mwt_low, mwt_high, \"MWT\")\n range_dict[\"MWT_RANGES\"] = mwt_range\n\n elif prop_type.lower()[0] == \"l\":\n logp_low = float(splitline[1])\n logp_high = float(splitline[2])\n\n logp_range = make_seven(logp_low, logp_high, \"LOGP\")\n range_dict[\"LOGP_RANGES\"] = logp_range\n else:\n print(\"WARNING: Unrecognized line in {}: {}\".format(spec_fname, line.strip()))\n\n print(range_dict)\n return(range_dict)", "title": "" }, { "docid": "28e5f05220e115b3d36741802debb28a", "score": "0.63003016", "text": "def _parse_configuration(self):\n\n logging.info(\"Parsing specification file: configuration ...\")\n\n idx = self._get_type_index(\"configuration\")\n if idx is None:\n logging.warning(\"No configuration information in the specification \"\n \"file.\")\n return None\n\n try:\n self._specification[\"configuration\"] = self._cfg_yaml[idx]\n\n except KeyError:\n raise PresentationError(\"No configuration defined.\")\n\n # Data sets: Replace ranges by lists\n for set_name, data_set in self.configuration[\"data-sets\"].items():\n if not isinstance(data_set, dict):\n continue\n for job, builds in data_set.items():\n if builds:\n if isinstance(builds, dict):\n build_end = builds.get(\"end\", None)\n try:\n build_end = int(build_end)\n except ValueError:\n # defined as a range <start, build_type>\n build_end = self._get_build_number(job, build_end)\n builds = [x for x in range(builds[\"start\"], build_end+1)\n if x not in builds.get(\"skip\", list())]\n self.configuration[\"data-sets\"][set_name][job] = builds\n elif isinstance(builds, list):\n for idx, item in enumerate(builds):\n try:\n builds[idx] = int(item)\n except ValueError:\n # defined as a range <build_type>\n builds[idx] = self._get_build_number(job, item)\n\n # Data sets: add sub-sets to sets (only one level):\n for set_name, data_set in self.configuration[\"data-sets\"].items():\n if isinstance(data_set, list):\n new_set = dict()\n for item in data_set:\n try:\n for key, val in self.configuration[\"data-sets\"][item].\\\n items():\n new_set[key] = val\n except KeyError:\n raise PresentationError(\n \"Data set {0} is not defined in \"\n \"the configuration section.\".format(item))\n self.configuration[\"data-sets\"][set_name] = new_set\n\n # Mapping table:\n mapping = None\n mapping_file_name = self._specification[\"configuration\"].\\\n get(\"mapping-file\", None)\n if mapping_file_name:\n logging.debug(\"Mapping file: '{0}'\".format(mapping_file_name))\n try:\n with open(mapping_file_name, 'r') as mfile:\n mapping = load(mfile)\n logging.debug(\"Loaded mapping table:\\n{0}\".format(mapping))\n except (YAMLError, IOError) as err:\n raise PresentationError(\n msg=\"An error occurred while parsing the mapping file \"\n \"'{0}'.\".format(mapping_file_name),\n details=repr(err))\n # Make sure everything is lowercase\n if mapping:\n self._specification[\"configuration\"][\"mapping\"] = \\\n {key.lower(): val.lower() for key, val in mapping.iteritems()}\n else:\n self._specification[\"configuration\"][\"mapping\"] = dict()\n\n # Ignore list:\n ignore = None\n ignore_list_name = self._specification[\"configuration\"].\\\n get(\"ignore-list\", None)\n if ignore_list_name:\n logging.debug(\"Ignore list file: '{0}'\".format(ignore_list_name))\n try:\n with open(ignore_list_name, 'r') as ifile:\n ignore = load(ifile)\n logging.debug(\"Loaded ignore list:\\n{0}\".format(ignore))\n except (YAMLError, IOError) as err:\n raise PresentationError(\n msg=\"An error occurred while parsing the ignore list file \"\n \"'{0}'.\".format(ignore_list_name),\n details=repr(err))\n # Make sure everything is lowercase\n if ignore:\n self._specification[\"configuration\"][\"ignore\"] = \\\n [item.lower() for item in ignore]\n else:\n self._specification[\"configuration\"][\"ignore\"] = list()\n\n logging.info(\"Done.\")", "title": "" }, { "docid": "36df6172ede354fa61bc934ad80c9dc3", "score": "0.6186652", "text": "def _parse_elements(self):\n\n logging.info(\"Parsing specification file: elements ...\")\n\n count = 1\n for element in self._cfg_yaml:\n try:\n element[\"output-file\"] = self._replace_tags(\n element[\"output-file\"],\n self._specification[\"environment\"][\"paths\"])\n except KeyError:\n pass\n\n try:\n element[\"input-file\"] = self._replace_tags(\n element[\"input-file\"],\n self._specification[\"environment\"][\"paths\"])\n except KeyError:\n pass\n\n # add data sets to the elements:\n if isinstance(element.get(\"data\", None), str):\n data_set = element[\"data\"]\n try:\n element[\"data\"] = self.configuration[\"data-sets\"][data_set]\n except KeyError:\n raise PresentationError(\"Data set {0} is not defined in \"\n \"the configuration section.\".\n format(data_set))\n\n if element[\"type\"] == \"table\":\n logging.info(\" {:3d} Processing a table ...\".format(count))\n try:\n element[\"template\"] = self._replace_tags(\n element[\"template\"],\n self._specification[\"environment\"][\"paths\"])\n except KeyError:\n pass\n\n # add data sets\n try:\n for item in (\"reference\", \"compare\"):\n if element.get(item, None):\n data_set = element[item].get(\"data\", None)\n if isinstance(data_set, str):\n element[item][\"data\"] = \\\n self.configuration[\"data-sets\"][data_set]\n\n if element.get(\"history\", None):\n for i in range(len(element[\"history\"])):\n data_set = element[\"history\"][i].get(\"data\", None)\n if isinstance(data_set, str):\n element[\"history\"][i][\"data\"] = \\\n self.configuration[\"data-sets\"][data_set]\n\n except KeyError:\n raise PresentationError(\"Wrong data set used in {0}.\".\n format(element.get(\"title\", \"\")))\n\n self._specification[\"tables\"].append(element)\n count += 1\n\n elif element[\"type\"] == \"plot\":\n logging.info(\" {:3d} Processing a plot ...\".format(count))\n\n # Add layout to the plots:\n layout = element[\"layout\"].get(\"layout\", None)\n if layout is not None:\n element[\"layout\"].pop(\"layout\")\n try:\n for key, val in (self.configuration[\"plot-layouts\"]\n [layout].items()):\n element[\"layout\"][key] = val\n except KeyError:\n raise PresentationError(\"Layout {0} is not defined in \"\n \"the configuration section.\".\n format(layout))\n self._specification[\"plots\"].append(element)\n count += 1\n\n elif element[\"type\"] == \"file\":\n logging.info(\" {:3d} Processing a file ...\".format(count))\n try:\n element[\"dir-tables\"] = self._replace_tags(\n element[\"dir-tables\"],\n self._specification[\"environment\"][\"paths\"])\n except KeyError:\n pass\n self._specification[\"files\"].append(element)\n count += 1\n\n elif element[\"type\"] == \"cpta\":\n logging.info(\" {:3d} Processing Continuous Performance \"\n \"Trending and Analysis ...\".format(count))\n\n for plot in element[\"plots\"]:\n # Add layout to the plots:\n layout = plot.get(\"layout\", None)\n if layout is not None:\n try:\n plot[\"layout\"] = \\\n self.configuration[\"plot-layouts\"][layout]\n except KeyError:\n raise PresentationError(\n \"Layout {0} is not defined in the \"\n \"configuration section.\".format(layout))\n # Add data sets:\n if isinstance(plot.get(\"data\", None), str):\n data_set = plot[\"data\"]\n try:\n plot[\"data\"] = \\\n self.configuration[\"data-sets\"][data_set]\n except KeyError:\n raise PresentationError(\n \"Data set {0} is not defined in \"\n \"the configuration section.\".\n format(data_set))\n self._specification[\"cpta\"] = element\n count += 1\n\n logging.info(\"Done.\")", "title": "" }, { "docid": "0a7de6246f97717a75532b255f341b4b", "score": "0.6006543", "text": "def validate_spec_file(args):\n org_spec = yaml.load(open(args['--spec-file']).read())\n spec_str = ['master_account_id', 'default_policy', 'default_ou' ]\n for s in spec_str:\n if not s in org_spec:\n msg = \"Invalid spec-file: missing required param '%s'.\" % s\n raise RuntimeError(msg)\n if not isinstance(org_spec[s], str):\n msg = \"Invalid spec-file: '%s' must be type 'str'.\" % s\n raise RuntimeError(msg)\n spec_list = ['organizational_unit_spec', 'policy_spec']\n for a in spec_list:\n if not a in org_spec:\n msg = \"Invalid spec-file: missing required param '%s'.\" % a\n raise RuntimeError(msg)\n if not isinstance(org_spec[a], list):\n msg = \"Invalid spec-file: '%s' must be type 'list'.\" % a\n raise RuntimeError(msg)\n\n # Validate this policy_spec is properly formed.\n err_prefix = \"Malformed policy in spec-file:\"\n for p_spec in org_spec['policy_spec']:\n if not isinstance(p_spec, dict):\n msg = \"%s not a dictionary: '%s'\" % (err_prefix, str(p_spec))\n raise RuntimeError(msg)\n if not 'Name' in p_spec:\n msg = \"%s missing 'Name' key: '%s'\" % (err_prefix, str(p_spec))\n raise RuntimeError(msg)\n # dont manage default policy\n if p_spec['Name'] == org_spec['default_policy']:\n org_spec['policy_spec'].remove(p_spec)\n break\n if not ensure_absent(p_spec):\n required_keys = ['Description', 'Effect', 'Actions']\n for key in required_keys:\n if not key in p_spec:\n msg = (\"%s '%s': missing required param '%s'\" %\n (err_prefix, p_spec['Name'], key))\n raise RuntimeError(msg)\n if not isinstance(p_spec['Actions'], list):\n msg = (\"%s '%s': 'Actions' must be type list.\" %\n (err_prefix, p_spec['Name']))\n raise RuntimeError(msg)\n\n # recursive function to validate ou_spec are properly formed.\n def validate_ou_spec(ou_spec_list):\n global account_map, ou_list\n err_prefix = \"Malformed OU in spec-file:\"\n for ou_spec in ou_spec_list:\n if not isinstance(ou_spec, dict):\n msg = err_prefix + \"not a dictionary: '%s'\" % str(ou_spec)\n raise RuntimeError(msg)\n if not 'Name' in ou_spec:\n msg = err_prefix + \"missing 'Name' key near: '%s'\" % str(ou_spec)\n raise RuntimeError(msg)\n ou_list.append(ou_spec['Name'])\n # check for children OUs. recurse before running other tests\n if 'Child_OU' in ou_spec and isinstance(ou_spec['Child_OU'], list):\n validate_ou_spec(ou_spec['Child_OU'])\n # check for optional keys\n optional_keys = ['Accounts', 'Policies', 'Child_OU']\n for key in optional_keys:\n if key in ou_spec and ou_spec[key]:\n if ensure_absent(ou_spec):\n msg = (\"%s OU '%s' is 'absent, but '%s' is populated.\" %\n (err_prefix, ou_spec['Name'], key))\n raise RuntimeError(msg)\n if not isinstance(ou_spec[key], list):\n msg = (\"%s OU '%s': value of '%s' must be a list.\" %\n (err_prefix, ou_spec['Name'], key))\n raise RuntimeError(msg)\n\n # build mapping of accounts to ou_names\n # make sure accounts are unique across org\n if key == 'Accounts' and key in ou_spec and ou_spec['Accounts']:\n for account in ou_spec['Accounts']:\n if account in account_map:\n msg = (\"%s account %s set in multiple OU: %s, %s\" %\n ( err_prefix, account,\n account_map[account], ou_spec['Name']))\n raise RuntimeError(msg)\n else:\n account_map[account] = ou_spec['Name']\n\n # initailize lists of managed resources\n global account_map, ou_list\n account_map = {}\n ou_list = []\n policy_list = map(lambda p: p['Name'], org_spec['policy_spec'])\n # pretent we manage default_policy\n policy_list.append(org_spec['default_policy'])\n\n # call recursive function to validate OUs.\n # side effect: populate account_map, ou_list.\n validate_ou_spec(org_spec['organizational_unit_spec'])\n org_spec['managed'] = dict(\n accounts = account_map.keys(),\n ou = ou_list,\n policies = policy_list)\n return org_spec", "title": "" }, { "docid": "ecdc94daebfc47f5339babd2b09fa57e", "score": "0.5927044", "text": "def _parse_spec_line(line):\n fields = line.split(\"\\t\")\n\n if len(fields) < 9:\n raise Exception(\"Invalid SPEC line encountered: \" + line)\n\n title = fields[1]\n sequences = fields[3].split(\",\")\n prec_mz = float(fields[4])\n # charge is stored as float just in case\n charge = float(fields[5])\n taxids = fields[6].split(\",\")\n ptm_strings = fields[7].split(\";\")\n similarity_score = float(fields[8])\n\n json_properties = fields[9] if len(fields) >= 10 else \"{}\"\n\n if title == \"PXD000443;PRIDE_Exp_Complete_Ac_31019.xml;spectrum=377050\":\n pass\n\n # make sure sequences and ptms have the same length\n if len(sequences) != len(ptm_strings):\n raise Exception(\"Invalid SPEC line encountered: different number of sequences and PTMs defined: \" +\n line)\n\n psms = ClusteringParser._create_psms(sequences, ptm_strings)\n\n return objects.Spectrum(title, prec_mz, charge, taxids, psms, similarity_score, json_properties)", "title": "" }, { "docid": "0c05012d3390f40c2450b37aa259b11e", "score": "0.58171475", "text": "def load_format_specifications(self, spec_files):\n self.ddef = {} # ddef == \"data definitions\", loaded from format specification files\n fs_var = \"fs\" # variable that must be defined in format specification files\n if not spec_files:\n self.load_specifications_from_h5_file(fs_var)\n for file_name in spec_files:\n path = self.get_spec_file_path(file_name)\n # with file(path) as f:\n with open(path) as f:\n file_contents = f.read()\n try:\n # use use ast.literal_eval rather than eval to prevent potential security problems\n # vals = eval(file_contents)\n vals = ast.literal_eval(file_contents)\n except Exception as e:\n print ((\"** Invalid format in specification file '%s' (should \"\n \"be mostly JSON)\") % file_name)\n print (\"Error is: %s\" % e)\n error_exit()\n patch_json_vals(vals, {\"float('NaN')\": float('NaN')})\n if fs_var not in vals:\n raise Exception(\"Variable '%s' not defined in specification file '%s'\" %\n (fs_var, file_name))\n # get definitions that are in variable fs_var\n ddefin = vals[fs_var]\n # validate that components of definition are present\n errors = self.validate_fs(ddefin)\n if errors:\n print ((\"Specification file '%s', has\"\n \" the following errors:\\n%s\" % (file_name, errors)))\n error_exit()\n # save map from file name to name spaces stored in that file\n name_spaces = list(ddefin) #py3, was ddefin.keys() # e.g. \"core\". Usually only one namespace, but could be more\n self.fsname2ns[file_name] = name_spaces\n # find_links.add_item(self.fsname2ns, file_name, name_spaces)\n # also save map from name space to file name\n for ns in name_spaces:\n ddefin[ns]['file_name'] = file_name\n # Merge definition in with others\n # todo: check to be sure name spaces not defined more than once\n self.ddef.update(ddefin)\n if not self.ddef:\n raise Exception(\"No file format specifications were provided. At least one\"\n \" is required.\")\n if self.default_ns not in self.ddef.keys():\n raise Exception((\"Default name space ('%s') was not defined in any format \"\n \"specification file\") % self.default_ns)", "title": "" }, { "docid": "0244b244ecc037e48b3748b223f47613", "score": "0.5809865", "text": "def test_from_specfile(self):\n validator = SchemaValidator.from_specfile(self.specfile, \"iris\")\n self.assertKwargsEqual(validator.get_parser_args(),\n self.ideal_iris_parser_args)\n validator = SchemaValidator.from_specfile(self.specfile,\n \"person_activity\")\n self.assertKwargsEqual(validator.get_parser_args(),\n self.ideal_activity_parser_args)", "title": "" }, { "docid": "487c2b8589bf4e7f10f85860babd8661", "score": "0.57950497", "text": "def load_metadata_specfile(filename, program_options=None):\n print_verbose(\"Load metadata spec file: %s\"%(filename), program_options)\n\n specfile_data = []\n\n ifp = open(filename, \"r\")\n\n for line in ifp:\n line = line.strip()\n\n if len(line)==0:\n continue\n\n if line[0] == \"#\":\n continue\n\n # Get the field type.\n line_type, line_str = line.split(\" \", 1)\n line_type = line_type.upper()\n\n # Prune whitespaces\n line_type = line_type.strip()\n\n if line_type not in ['R', 'D', 'N', 'DO']:\n raise ValueError, \"Bad value in column 0. Allowable values are 'R', 'D', 'N', 'DO' but I got a '%s'\"%(line[0])\n\n line_contents = [x.strip() for x in line_str.strip().split(',')]\n\n print_debug(\"%s: %s\"%(line_type, line_contents), program_options)\n\n entry = { 'type': line_type }\n if line_type == 'R':\n entry['property_name'] = line_contents[0]\n entry['allowable_value'] = line_contents[1]\n elif line_type == 'D':\n entry['property_name'] = line_contents[0]\n entry['dependency_name'] = line_contents[1]\n entry['dependency_value'] = line_contents[2]\n entry['allowable_value'] = line_contents[3]\n elif line_type == 'N':\n entry['property_name'] = line_contents[0]\n elif line_type == 'DO':\n entry['property_name'] = line_contents[0]\n else:\n raise ValueError, \"Unknown line_type in spec file.\"\n\n specfile_data.append(entry)\n\n ifp.close()\n\n print_verbose(\"Load metadata spec file: Complete\", program_options)\n\n return specfile_data", "title": "" }, { "docid": "7916e6f2ff1a65af03b1ab2e0d529af4", "score": "0.5730861", "text": "def test_artifacts_yaml_can_parse():\n load_artifacts()", "title": "" }, { "docid": "fe6d75bf3c2998d90f23604cbcebd0ac", "score": "0.5720248", "text": "def parse(cls, contents):\n return yaml.load(\n contents\n )", "title": "" }, { "docid": "e338663eef95946e6da26627f6bc6079", "score": "0.56948483", "text": "def _parse_(self):\n self.data = {}\n self.file_txt = '\\n'.join((i for i in self.file_txt.split(\"\\n\") if i))\n\n self.get_sects()\n\n for sect in self.sect_txt:\n self.data[sect] = {}\n for line in self.sect_txt[sect]:\n splitter = line.split(\"=\")\n vals = '='.join(splitter[1:]).strip().split()\n if len(vals) == 1:\n self.data[sect][splitter[0].strip()] = type_check.eval_type(vals[0])\n else:\n self.data[sect][splitter[0].strip()] = [type_check.eval_type(i)\n for i in vals]", "title": "" }, { "docid": "5e5a16f63079fe6e5b411857a3e22dc7", "score": "0.56627905", "text": "def parse_model_yaml_file(path, f):\n return parse_model_group_list(path, yaml_load(f))", "title": "" }, { "docid": "e2528fdb1d3bf7f2e0cba428a91fa986", "score": "0.566061", "text": "def _read_spec(self, workflow, start_node):\r\n type = start_node.nodeName.lower()\r\n name = start_node.getAttribute('name').lower()\r\n assert type in _spec_tags\r\n\r\n if type == 'concurrence':\r\n return self._read_concurrence(workflow, start_node)\r\n elif type == 'if':\r\n return self._read_if(workflow, start_node)\r\n elif type == 'sequence':\r\n return self._read_sequence(workflow, start_node)\r\n elif type == 'task':\r\n spec = specs.Simple(workflow, name)\r\n return spec, spec\r\n else:\r\n print(\"Unknown type:\", type)\r\n assert False # Unknown tag.\r", "title": "" }, { "docid": "797828563ae99b8f917ee35f58400f57", "score": "0.56447786", "text": "def parse_spec(self):\n if self.main_win.specfile is None:\n return\n if not self.main_win.is_exp_exists():\n # do not parse on initial assignment\n return\n try:\n last_scan = int(self.main_win.scan.split('-')[-1])\n detector_name, roi = get_det_from_spec(self.main_win.specfile, last_scan)\n self.roi.setText(str(roi))\n self.roi.setStyleSheet('color: blue')\n\n if detector_name is not None:\n self.detector.setText(str(detector_name)[:-1])\n self.detector.setStyleSheet('color: blue')\n except Exception as e:\n print(str(e))\n msg_window ('error parsing spec')", "title": "" }, { "docid": "80f224d82b597402f73bc55bdf414d97", "score": "0.55994797", "text": "def _parse_env(self):\n\n logging.info(\"Parsing specification file: environment ...\")\n\n idx = self._get_type_index(\"environment\")\n if idx is None:\n return None\n\n try:\n self._specification[\"environment\"][\"configuration\"] = \\\n self._cfg_yaml[idx][\"configuration\"]\n except KeyError:\n self._specification[\"environment\"][\"configuration\"] = None\n\n try:\n self._specification[\"environment\"][\"paths\"] = \\\n self._replace_tags(self._cfg_yaml[idx][\"paths\"])\n except KeyError:\n self._specification[\"environment\"][\"paths\"] = None\n\n try:\n self._specification[\"environment\"][\"urls\"] = \\\n self._cfg_yaml[idx][\"urls\"]\n except KeyError:\n self._specification[\"environment\"][\"urls\"] = None\n\n try:\n self._specification[\"environment\"][\"make-dirs\"] = \\\n self._cfg_yaml[idx][\"make-dirs\"]\n except KeyError:\n self._specification[\"environment\"][\"make-dirs\"] = None\n\n try:\n self._specification[\"environment\"][\"remove-dirs\"] = \\\n self._cfg_yaml[idx][\"remove-dirs\"]\n except KeyError:\n self._specification[\"environment\"][\"remove-dirs\"] = None\n\n try:\n self._specification[\"environment\"][\"build-dirs\"] = \\\n self._cfg_yaml[idx][\"build-dirs\"]\n except KeyError:\n self._specification[\"environment\"][\"build-dirs\"] = None\n\n try:\n self._specification[\"environment\"][\"testbeds\"] = \\\n self._cfg_yaml[idx][\"testbeds\"]\n except KeyError:\n self._specification[\"environment\"][\"testbeds\"] = None\n\n logging.info(\"Done.\")", "title": "" }, { "docid": "0bfb263f68c4a9462b7195b9fdd83d09", "score": "0.5591919", "text": "def _ReadSpecificationFile(self, path):\n specification_store = specification.FormatSpecificationStore()\n\n with io.open(\n path, 'rt', encoding=self._SPECIFICATION_FILE_ENCODING) as file_object:\n for line in file_object.readlines():\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n\n try:\n identifier, offset, pattern = line.split()\n except ValueError:\n logger.error(f'[skipping] invalid line: {line:s}')\n continue\n\n try:\n offset = int(offset, 10)\n except ValueError:\n logger.error(f'[skipping] invalid offset in line: {line:s}')\n continue\n\n try:\n # TODO: find another way to do this that doesn't use an undocumented\n # API.\n pattern = codecs.escape_decode(pattern)[0]\n # ValueError is raised when the patterns contains invalid escaped\n # characters, such as \"\\xg1\".\n except ValueError:\n logger.error(f'[skipping] invalid pattern in line: {line:s}')\n continue\n\n format_specification = specification.FormatSpecification(identifier)\n format_specification.AddNewSignature(pattern, offset=offset)\n specification_store.AddSpecification(format_specification)\n\n return specification_store", "title": "" }, { "docid": "3c32e5b75d8bd84bb73d4290d6f19b15", "score": "0.55415946", "text": "def parse(self):\n self.x = yaml.load(open(self.interface, 'r'))", "title": "" }, { "docid": "f61eaa55e699062443359d0b55de7615", "score": "0.55018", "text": "def ParseYaml(filename):\n print('Loading yaml file to validate with schema')\n entity_instance_block = ''\n found_entities = 0\n all_content = {}\n with open(filename) as file:\n for line in file:\n if _ENTITY_INSTANCE_PATTERN.match(line):\n # wait until entity instance block reaches _ENTITIES_PER_BATCH\n if found_entities > _ENTITIES_PER_BATCH:\n _ValidateBlock(entity_instance_block, all_content)\n entity_instance_block = ''\n found_entities = 0\n found_entities += 1\n else:\n if _IGNORE_PATTERN.match(line):\n continue\n entity_instance_block = entity_instance_block + line\n\n # handle the singleton case\n if found_entities>0:\n _ValidateBlock(entity_instance_block, all_content)\n\n return all_content", "title": "" }, { "docid": "d054fb58a7531b09e3a96e20a334bb84", "score": "0.5492737", "text": "def parse_spec(spec):\n m = _spec_pat.match(spec)\n if m is None:\n return None\n name = m.group(1)\n return ParsedPipSpec(name=name)", "title": "" }, { "docid": "2b78e9531952691f38a3a6b016eb026c", "score": "0.54837406", "text": "def test_validator_with_specfile_spec(self):\n # This is necessary because the validator might have to write\n # specifications to the dictionary.\n validator = SchemaValidator(specification=self.basespecs['iris'],\n specfile=self.specfile)\n self.assertFalse(validator.is_multifile)\n validated_parser_args = validator.get_parser_args()\n self.assertKwargsEqual(validated_parser_args,\n self.ideal_iris_parser_args)", "title": "" }, { "docid": "490838ab5cab7a50c6bf7d90af189cdc", "score": "0.545403", "text": "def _parseYAML(self, yaml_file):\n with open(yaml_file, 'r') as stream:\n cfg = yaml.safe_load(stream)\n return cfg", "title": "" }, { "docid": "bda5b7a1e7a26d5d2c1f1225162624a0", "score": "0.5421811", "text": "def test_parse_file(self):\n reader = Reader(self.site, self.config)\n\n # test input - meta[\"foo\"] = \"bar\" and raw = \"baz\"\n raw = \"\"\"\n foo: bar\n ---\n baz\"\"\"\n\n reader.parse_file(raw)\n entity = self.site.entities[0]\n\n self.assertEqual(entity.meta[\"foo\"], \"bar\")\n self.assertEqual(entity.raw, \"baz\")", "title": "" }, { "docid": "fc743d424f3d43ae0e9000764506aa16", "score": "0.5411125", "text": "def load_design(filename):\n try:\n # Load data\n statements = document.parseFile(filename)\n \n except ParseException as e:\n print()\n print(\"Parsing error in template:\", filename)\n print(e)\n sys.exit(1)\n \n # Build data\n spec = Spec()\n for stat in statements:\n #print list(stat)\n if stat[0] == struct:\n spec.add_structure(*stat[1:])\n elif stat[0] == seq:\n spec.add_sequence(*stat[1:])\n elif stat[0] == app:\n spec.add_apply(*stat[1:])\n return spec", "title": "" }, { "docid": "0213178d6e27ca19ebbbf06910478b8e", "score": "0.54088724", "text": "def parse(self, fileContents):\n pass", "title": "" }, { "docid": "c81090d14f90c3ee819ed2ce7417646c", "score": "0.5405577", "text": "def parse_file(self):\n with open(sys.argv[1], \"rb\") as file_:\n\n # Parse the file header\n self._parse_file_header(file_)\n\n # Parse the sections\n while True:\n # get type and length info\n section_type_nr, section_length = self._parse_tld(file_)\n\n # Check if we have reached the end of the file\n if not section_type_nr or not section_length:\n break\n\n # Get table name and ticks per microsecond\n section_length, table_name = \\\n self._parse_section_header(file_,\n section_type_nr, section_length)\n\n # Read in the remainder of the section\n binary_content = file_.read(section_length)\n\n # String Table Section\n if section_type_nr == 1:\n self._parse_string_table(binary_content, section_length)\n\n # Symbol Table Section\n elif section_type_nr == 2:\n self._parse_symbol_table(binary_content, section_length,\n table_name)\n # Event Definition Section\n elif section_type_nr == 3:\n self._parse_event_definition_section(binary_content,\n section_length,\n table_name)\n # Track Definition Section\n elif section_type_nr == 4:\n self._parse_track_definition_section(binary_content,\n section_length,\n table_name)\n # Event Section\n elif section_type_nr == 5:\n self._parse_event_section(binary_content,\n section_length,\n table_name)\n # If the given number is not in the range 1-5\n else:\n print(\"Invalid Section number {}\".format(\n section_type_nr))", "title": "" }, { "docid": "1c3d936a54a2a4e87d03693794b4cd3e", "score": "0.5396018", "text": "def parse(yaml_fn):\n with open(yaml_fn) as fh:\n doc = yaml.safe_load(fh)\n tf_rules = []\n topic_rules = []\n\n if 'topic' in doc:\n topic_rules = RenameRule.__make_rules__(doc['topic'])\n if 'tf' in doc:\n tf_rules = RenameRule.__make_rules__(doc['tf'])\n return topic_rules, tf_rules", "title": "" }, { "docid": "7980ca34bef860d703ccf918df102c16", "score": "0.5382169", "text": "def parse(self):\n\n if self.filename is not None:\n try:\n f = open(self.filename)\n except IOError:\n print \"I/O error\"\n except:\n import sys\n print \"Unexpected error:\", sys.exc_info()[0]\n raise\n\n lines = f.readlines() # Returns list of lines.\n f.close()\n\n elif self.config is not None:\n lines = self.config.splitlines()\n\n else:\n print \"Error: You haven't specify any config file\"\n\n lines = self.__clearLines(lines)\n marks = self.__getMarks(lines)\n\n for m in marks:\n block = self.__addBlock(m[0], lines[m[1]:m[2]])", "title": "" }, { "docid": "c065120933fc8fa0f41f3ea870f94b02", "score": "0.53653294", "text": "def __parse(self): \n parser = argparse.ArgumentParser(prog=release.appname, description=release.description)\n\n parser.add_argument(\"-c\", \"--c\", nargs=1, required=True, help=\"configuration file of the analysis\") \n namespace = parser.parse_args()\n configFile = namespace.c\n\n try:\n self.__parseConfigFile(configFile[0])\n except Exception, e:\n self._logger.fatal(\"An error occured while parsing the configuration file: {0}\".format(e))", "title": "" }, { "docid": "7cd0d603f20b03e542d230eadbf80401", "score": "0.53592193", "text": "def _parse_output(self):\n\n logging.info(\"Parsing specification file: output ...\")\n\n idx = self._get_type_index(\"output\")\n if idx is None:\n raise PresentationError(\"No output defined.\")\n\n try:\n self._specification[\"output\"] = self._cfg_yaml[idx]\n except (KeyError, IndexError):\n raise PresentationError(\"No output defined.\")\n\n logging.info(\"Done.\")", "title": "" }, { "docid": "5e407f985ca8da3e3b1e49127757009b", "score": "0.5328679", "text": "def parse(spec: str) -> 'TagMatcher':\n m = re.match(r\"(\\w+|\\*)/@([a-zA-Z_\\-]+)={(\\w+)}\", spec)\n if m:\n return TagMatcher(m.group(1), m.group(2), m.group(3))\n else:\n raise click.BadParameter(\"Invalid tag spec: %s\" % spec)", "title": "" }, { "docid": "d01a0e83b22091986a27a5de62ed3d2b", "score": "0.53154314", "text": "def init():\n data = {}\n\n data['blocks'] = []\n \n with open(\"specification.yml\", \"w\") as yaml_file:\n yaml_file.write(yaml.dump(data))", "title": "" }, { "docid": "cc5d4fcc2569d537bbe5178611b57870", "score": "0.53123456", "text": "def parse_ic(fname):\n with open(fname, \"r\") as stream:\n try:\n dat = yaml.load(stream, Loader=yaml.FullLoader)\n u0 = float(\n dat[\"realms\"][0][\"initial_conditions\"][0][\"value\"][\"velocity\"][0]\n )\n rho0 = float(\n dat[\"realms\"][0][\"material_properties\"][\"specifications\"][0][\"value\"]\n )\n mu = float(\n dat[\"realms\"][0][\"material_properties\"][\"specifications\"][1][\"value\"]\n )\n\n return u0, rho0, mu\n\n except yaml.YAMLError as exc:\n print(exc)", "title": "" }, { "docid": "08cad11f967bac1aaa6cb6f58abe753a", "score": "0.53030276", "text": "def testSetWithParse(self):\n json_spec = ('{\"sort_by_sequence\": true, \"for_log\": true, '\n '\"warning_ranges\": {\"ranges\": [[0, 10]]}, \"name\": null, '\n '\"normal_ranges\": {\"ranges\": [[1, 3]], \"values\": '\n '[3, 20, 444]}}')\n\n expected_warning_ranges = [[0, 10]]\n expected_normal_ranges = [[1, 3], set({3, 444, 20})]\n\n parsed_spec = base_check.ParseCheckSpecs(json_spec)\n\n self.assertEqual(parsed_spec['warning_ranges'], expected_warning_ranges)\n self.assertEqual(parsed_spec['normal_ranges'], expected_normal_ranges)", "title": "" }, { "docid": "4479f62df79682d7daa6db1a2cb5fa7c", "score": "0.5292875", "text": "def parse_ic(fname):\n with open(fname, \"r\") as stream:\n try:\n dat = yaml.load(stream)\n u0 = float(\n dat[\"realms\"][0][\"initial_conditions\"][0][\"value\"][\"velocity\"][0]\n )\n rho0 = float(\n dat[\"realms\"][0][\"material_properties\"][\"specifications\"][0][\"value\"]\n )\n mu = float(\n dat[\"realms\"][0][\"material_properties\"][\"specifications\"][1][\"value\"]\n )\n\n return u0, rho0, mu\n\n except yaml.YAMLError as exc:\n print(exc)", "title": "" }, { "docid": "d7c48a2293e6d4cb8089ff5585eed295", "score": "0.5283069", "text": "def parse(self, input_file):", "title": "" }, { "docid": "d9db05239a0f8eee17603e4ebf332c2e", "score": "0.52635175", "text": "def parse(self, requires_cfg=True):\n self._parse_default()\n self._parse_config(requires_cfg)\n self._parse_env()", "title": "" }, { "docid": "469160ff908b24a4a077bff8e6ee291f", "score": "0.5252595", "text": "def parse(self):\n scope = self.scope_default\n for i, s in enumerate(self.src_iter):\n if not s.strip():\n continue\n\n t = self.parse_stage(s, i)\n if t is not False:\n scope = t\n if scope not in self.stages:\n self.stages[scope] = []\n continue\n\n t = self.parse_command(s, i)\n if t is not False:\n continue\n\n t = self.parse_element(s, i)\n if t is not False:\n self.stages[scope].append(t)\n continue\n\n raise TReaderWrongException(f\"unexpected expression: something wrong\")", "title": "" }, { "docid": "404a42d10333d0ca061ffd72db2c4457", "score": "0.5250197", "text": "def parse_compound_yaml_file(path, f):\n\n return parse_compound_list(path, yaml_load(f))", "title": "" }, { "docid": "159a00b0c1a904f15d449f6a66ea6d76", "score": "0.52455324", "text": "def parse_spec(self):\n if self.main_win.specfile is None:\n return\n if not self.main_win.is_exp_exists():\n # do not parse on initial assignment\n return\n try:\n last_scan = int(self.main_win.scan.split('-')[-1])\n delta, gamma, theta, phi, chi, scanmot, scanmot_del, detdist, detector_name, energy = parse_spec(self.main_win.specfile, last_scan)\n if energy is not None:\n self.energy.setText(str(energy))\n self.energy.setStyleSheet('color: blue')\n if delta is not None:\n self.delta.setText(str(delta))\n self.delta.setStyleSheet('color: blue')\n if gamma is not None:\n self.gamma.setText(str(gamma))\n self.gamma.setStyleSheet('color: blue')\n if theta is not None:\n self.theta.setText(str(theta))\n self.theta.setStyleSheet('color: blue')\n if chi is not None:\n self.chi.setText(str(chi))\n self.chi.setStyleSheet('color: blue')\n if phi is not None:\n self.phi.setText(str(phi))\n self.phi.setStyleSheet('color: blue')\n if detdist is not None:\n self.detdist.setText(str(detdist))\n self.detdist.setStyleSheet('color: blue')\n if scanmot is not None:\n self.scanmot.setText(str(scanmot))\n self.scanmot.setStyleSheet('color: blue')\n if scanmot_del is not None:\n self.scanmot_del.setText(str(scanmot_del))\n self.scanmot_del.setStyleSheet('color: blue')\n if detector_name is not None:\n self.detector.setText(str(detector_name)[:-1])\n self.detector.setStyleSheet('color: blue')\n except Exception as e:\n print(str(e))\n msg_window ('error parsing spec')", "title": "" }, { "docid": "628c79ba16fb399af59a09033c4dc153", "score": "0.52388555", "text": "def parse(self, filename):\n pass", "title": "" }, { "docid": "ef751d4a7b1d3ec54255ea51560d4a23", "score": "0.52325964", "text": "def parse(self, filepath, content):\n try:\n parsed = yaml.load(content, Loader=yaml.FullLoader)\n except yaml.YAMLError as exc:\n msg = \"No YAML object could be decoded from file: {}\\n{}\"\n raise SettingsBackendError(msg.format(filepath, exc))\n return parsed", "title": "" }, { "docid": "fa23c769faf49998d7c4d88af1261951", "score": "0.5232148", "text": "def parse(self):\n self._clean_up_lines() # Remove comments, blank lines\n self._split_block_kw() # Extract blocks from the lines\n self._parse_keywords() # Parse the key, value pair", "title": "" }, { "docid": "11f2117e03e08d212edd0d1f3d3d2528", "score": "0.522824", "text": "def load_specification(cls, path: Text) -> Dict[Text, Any]:\n metadata_path = os.path.join(path, \"domain.json\")\n\n return json.loads(rasa.shared.utils.io.read_file(metadata_path))", "title": "" }, { "docid": "8a787f2c62c41668a6f772f10aa0514a", "score": "0.5226857", "text": "def validate_yaml_semantics(\n yaml_config: Union[Dict, str, Path],\n path_prefix: Union[None, str, Path] = None\n):\n if not path_prefix:\n if isinstance(yaml_config, (str, Path)):\n path_prefix = os.path.dirname(str(yaml_config))\n else:\n path_prefix = \"\"\n\n yaml_config = load_yaml(yaml_config)\n\n def _check_file(_filename: str, _field: str):\n if not os.path.isfile(_filename):\n raise AssertionError(f\"File '{_filename}' provided as '{_field}' \"\n \"does not exist.\")\n\n # Handles both a single parameter file, and a parameter file that has been\n # split into multiple subset files.\n for parameter_subset_file in (\n list(np.array(yaml_config[PARAMETER_FILE]).flat)):\n _check_file(\n os.path.join(path_prefix, parameter_subset_file),\n parameter_subset_file\n )\n\n for problem_config in yaml_config[PROBLEMS]:\n for field in [SBML_FILES, CONDITION_FILES, MEASUREMENT_FILES,\n VISUALIZATION_FILES, OBSERVABLE_FILES]:\n if field in problem_config:\n for filename in problem_config[field]:\n _check_file(os.path.join(path_prefix, filename), field)", "title": "" }, { "docid": "572e4fe9fc59b66310ff05365e59755a", "score": "0.52156025", "text": "def test_parser_examples(script_runner, specification):\n ret = script_runner.run('spag_cli', '-p', specification, '-v', '-d', '-t')\n assert ret.returncode == 0\n assert ret.stderr == ''\n assert ret.stdout == ''", "title": "" }, { "docid": "99d38a4f1eb73763e2a7af49ad55b817", "score": "0.5210972", "text": "def test_parse_test_definition():\n test_definitions = yaml.safe_load(\n \"\"\"\n - name: sample_test\n working_dir: sample_dir\n frequency: nightly\n team: sample\n cluster:\n byod:\n type: gpu\n cluster_compute: compute.yaml\n run:\n timeout: 100\n script: python script.py\n variations:\n - __suffix__: aws\n - __suffix__: gce\n cluster:\n cluster_compute: compute_gce.yaml\n \"\"\"\n )\n # Check that parsing returns two tests, one for each variation (aws and gce). Check\n # that both tests are valid, and their fields are populated correctly\n tests = parse_test_definition(test_definitions)\n aws_test = tests[0]\n gce_test = tests[1]\n schema = load_schema_file()\n assert not validate_test(aws_test, schema)\n assert not validate_test(gce_test, schema)\n assert aws_test[\"name\"] == \"sample_test.aws\"\n assert gce_test[\"cluster\"][\"cluster_compute\"] == \"compute_gce.yaml\"\n assert gce_test[\"cluster\"][\"byod\"][\"type\"] == \"gpu\"\n invalid_test_definition = test_definitions[0]\n # Intentionally make the test definition invalid by create an empty 'variations'\n # field. Check that the parser throws exception at runtime\n invalid_test_definition[\"variations\"] = []\n with pytest.raises(ReleaseTestConfigError):\n parse_test_definition([invalid_test_definition])\n # Intentionally make the test definition invalid by making one 'variation' entry\n # missing the __suffix__ entry. Check that the parser throws exception at runtime\n invalid_test_definition[\"variations\"] = [{\"__suffix__\": \"aws\"}, {}]\n with pytest.raises(ReleaseTestConfigError):\n parse_test_definition([invalid_test_definition])", "title": "" }, { "docid": "952c860e1b6bfd2ce1588d051b12b1a9", "score": "0.5198008", "text": "def _parse_metadata(raw_yaml):\n meta = yaml.load(raw_yaml)\n version = meta.get('version', '0.0.0')\n try:\n version = tuple(int(x) for x in version.split('.'))\n except:\n version = (0, 0, 0)\n meta['version'] = version\n return meta", "title": "" }, { "docid": "28b990755949fe8a3333f371de29111b", "score": "0.5191729", "text": "def spexfile_parse(parser_name, contents, out_dict=None):\n if parser_name == \"project\":\n return project_parser(parser_name, contents, out_dict)\n elif parser_name == \"gw\":\n return gw_parser(parser_name, contents, out_dict)\n elif parser_name == \"ks\":\n return ks_parser(parser_name, contents, out_dict)\n elif parser_name == \"dielec\":\n return dielec_parser(parser_name, contents, out_dict)\n elif parser_name == \"plussoc\":\n return plussoc_parser(parser_name, contents, out_dict)\n else:\n return {}", "title": "" }, { "docid": "53af65819cef6e529881d86bff449ab7", "score": "0.5183898", "text": "def parse_yaml(text):\n\n try:\n return yaml.safe_load(text) or {}\n except error.YAMLError as e:\n raise exc.DSLParsingException(\n \"Definition could not be parsed: %s\\n\" % e\n )", "title": "" }, { "docid": "b416f707bffbc60ddd139f022729db62", "score": "0.5177216", "text": "def _read_yaml_dic(self):\n with codecs.open(self.yamlDicPath, 'r', 'utf-8') as f:\n text = f.read()\n rawLexemes = re.split(u'\\s+-lexeme', text)\n for l in rawLexemes:\n lex = self.LEX_REGEX.search(l)\n paradigmTypes = self.PARADIGM_REGEX.findall(l) # todo test this change\n gr = self.POS_REGEX.search(l)\n if not lex or not paradigmTypes or not gr:\n continue\n lex, gr = lex.group(1), gr.group(1)\n for pType in paradigmTypes:\n lexStems = self.STEM_REGEX.findall(l)\n lexStems = [tuple([j for j in i.replace(u'/', u'|').split(u'|') if j]) for i in lexStems]\n pType = self.renamer.get(pType, pType)\n yield lex, pType, gr, lexStems", "title": "" }, { "docid": "84bf12e0f53205996d4f25bfcc2c63af", "score": "0.5167031", "text": "def parse_config(config_file):\n with open(config_file, 'r') as f:\n parsed = yaml.safe_load(f.read())\n Config.old_index_pattern = parsed['old_index_pattern']\n Config.new_index_pattern = parsed['new_index_pattern']\n Config.wildcard_fields = parsed['wildcard_fields']\n Config.ignore_fields = parsed['ignore_fields']\n Config.field_mapping = parsed['field_mapping']\n Config.valid_ecs_fields = parsed.get('valid_fields')", "title": "" }, { "docid": "958693bd1937892308eaf7fb9df698ac", "score": "0.51576334", "text": "def parse(filename):\n ## 0. Get text from file\n file = open(filename, 'r')\n text = file.read()\n\n ## 1. Get ranges\n inds = []\n for p_i in range(len(parts)):\n ## Get indice\n ind = text.find(parts[p_i]+':')\n ## Append indices\n inds.append(ind)\n inds.append(ind+len(parts[p_i])+1)\n inds.append(len(text))\n inds = inds[1:]\n\n ## 2. Get the structure split\n note = {}\n for i in range(len(parts)):\n aux_text = text[inds[2*i]:inds[2*i+1]].strip()\n if listtypte[i] is False:\n note[parts[i]] = aux_text\n else:\n aux_list = aux_text.split(listtypte[i])\n aux_list = [aux.strip() for aux in aux_list]\n note[parts[i]] = aux_list\n return note", "title": "" }, { "docid": "ef79f6871452e26ee0c449d79fc14e21", "score": "0.51555187", "text": "def _parse_file(f):\n a = iter(open(f))\n units = _get_units(a)\n _gotosection(a, 'ENTITIES')\n lines = []\n circles = []\n arcs = []\n entities = [lines, circles, arcs]\n gc, val = _parsepair(a)\n while 1:\n if val == 'LINE':\n ed, f, val = _process_entity(a)\n lines.append(ed)\n #print 'line\\n'\n elif val == 'CIRCLE':\n ed, f, val = _process_entity(a)\n circles.append(ed)\n #print 'circle\\n'\n elif val == 'ARC':\n ed, f, val = _process_entity(a)\n arcs.append(ed)\n #print 'arc\\n'\n else:\n ed, f, val = _process_entity(a)\n if not f:\n return (units, entities)", "title": "" }, { "docid": "76e40fd9e700d1ce230c4ebb8b80cae4", "score": "0.5143586", "text": "def validate_specs():\n validate_specs_from_path(get_specs_path())", "title": "" }, { "docid": "3933b013e80e2b8dbe153b7ec3ae45c0", "score": "0.511893", "text": "def parseConfig(self, filename):\n with open(filename) as f:\n # skip header line\n f.readline()\n ymlContent = f.read()\n # remove !!opencv-matrix\n regex = re.compile(r\"!!.*\")\n conf = yaml.load(regex.sub(r'', ymlContent))\n\n self.camMatrix = self.__parseNumpyMatrix(conf[\"camera_matrix\"])\n self.distCoeffs = self.__parseNumpyMatrix(conf[\"distortion_coefficients\"])\n self.imageWidth = conf[\"image_width\"]\n self.imageHeight = conf[\"image_height\"]", "title": "" }, { "docid": "048870907b80aabd5956ce1fc0f6b464", "score": "0.51066685", "text": "def parse_specs(cls, raw_specs: Iterable[str], *, build_root: Optional[str] = None) -> Specs:\n build_root = build_root or get_buildroot()\n spec_parser = CmdLineSpecParser(build_root)\n\n address_specs: OrderedSet[AddressSpec] = OrderedSet()\n filesystem_specs: OrderedSet[FilesystemSpec] = OrderedSet()\n for spec_str in raw_specs:\n parsed_spec = spec_parser.parse_spec(spec_str)\n if isinstance(parsed_spec, AddressSpec):\n address_specs.add(parsed_spec)\n else:\n filesystem_specs.add(parsed_spec)\n\n return Specs(\n AddressSpecs(address_specs, filter_by_global_options=True),\n FilesystemSpecs(filesystem_specs),\n )", "title": "" }, { "docid": "5b658b414173ee946f6ac1bdd64f0847", "score": "0.5102064", "text": "def parse_spec(spec_file, keep_config=False, macros=None):\n if not keep_config:\n rpm.reloadConfig()\n if macros is not None:\n for name, value in macros.items():\n rpm.addMacro(name, value)\n\n with NamedTemporaryFile(mode=\"w+\") as tmplog:\n # rpm will print errors to stdout if logfile is not set\n rpm.setLogFile(tmplog)\n\n try:\n spec = rpm.spec(spec_file)\n except ValueError as exc:\n # re-raise errors with rpm output appended to message\n raise ValueError(str(exc) + open(tmplog.name, 'r').read())\n return spec", "title": "" }, { "docid": "9af7be6f16e020d94602623169107f7d", "score": "0.50860137", "text": "def parse(self, file_path, doc=None):\n logger.info(\"Parsing YAML ontology file %s\" % file_path)\n self._doc = doc or self.get_doc(file_path)\n validate(self._doc,\n context=\"<%s>\" % os.path.basename(file_path))\n\n # TODO version and author\n self._file_path = file_path\n self._namespace = self._doc[NAMESPACE_KEY].lower()\n self._ontology_doc = self._doc[ONTOLOGY_KEY]\n self._parse_ontology()\n logger.info(f\"You can now use `from osp.core.namespaces import \"\n f\"{self._namespace}`.\")", "title": "" }, { "docid": "8fc36cf53b1c0d12566295346fbfcaf0", "score": "0.50812715", "text": "def parseInput(self, filename):\n file = open(filename, \"r\")\n self.parseLines(file.readlines())", "title": "" }, { "docid": "6cc9675a9dc5e860e781cf9788fe678e", "score": "0.5068972", "text": "def validate_specfile(specfile: pathlib.Path) -> None:\n # Ensure the file exists.\n if not specfile.exists():\n raise exceptions.CLIError(f\"the specfile '{specfile}' cannot be found\")\n\n # Ensure the extension is valid.\n ext = specfile.suffix.lower()\n if ext not in VALID_EXTENSIONS:\n raise exceptions.CLIError(\n f\"specification format not supported: '{specfile}' \"\n f\"(valid extensions are {', '.join(VALID_EXTENSIONS)}, \"\n \"case insensitive)\"\n )", "title": "" }, { "docid": "4cddf6a1ac9b4357ca9c2117aaa452b3", "score": "0.5058263", "text": "def lifetime_parse(spec):\n\n\tparser = _init_parser()\n\ttry:\n\t\treturn parser.parseString(spec).asDict()\n\texcept ParseException, ex:\n\t\traise ParsingError(ex)", "title": "" }, { "docid": "84a2c1cc2cc9abf23c7237734294d946", "score": "0.504453", "text": "async def parse(self, ctx: Context):\n await ctx.trigger_typing()\n check = lambda x: x.author == ctx.author and x.channel == ctx.channel\n message = _(\n \"Paste your YAML here. It will be validated, and if there is \"\n \"an exception, it will be returned to you.\"\n )\n\n await ctx.send(message)\n\n try:\n content = await self.bot.wait_for(\"message\", timeout=500, check=check)\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.NotFound):\n await message.delete()\n\n content = content.content\n valid = validator(cleanup_code(content))\n\n if not valid:\n return await ctx.send(_(\"This YAML is invalid.\"))\n\n try:\n parser = RaffleManager(valid)\n parser.parser(ctx)\n except RaffleError as e:\n exc = _(\"An exception occured whilst parsing your data.\")\n return await ctx.send(cross(exc) + format_traceback(e))\n\n await ctx.send(tick(_(\"This YAML is good to go! No errors were found.\")))\n\n await self.clean_guild_raffles(ctx)", "title": "" }, { "docid": "579b8987d84360bef5930e3ae3c28727", "score": "0.504084", "text": "def validate_swagger(swagger_spec):\n spec_dict, _ = read_from_filename(swagger_spec)\n validate_spec(spec_dict)", "title": "" }, { "docid": "30cdbc8fac14fb7ac7d0496f6be21ec9", "score": "0.5040609", "text": "def parse(self, infile):\n raise NotImplementedError()", "title": "" }, { "docid": "d5647e955278c23ccb4d9ff9c6945f6a", "score": "0.50379026", "text": "def _parse(self) -> None:\n with open(self.filepath) as fh_props:\n if self.extension in ['.ini', '.cfg']:\n all_lines = ''.join(fh_props.readlines())\n cfg = ConfigParser()\n cfg.read_string(all_lines)\n for section in cfg.sections():\n self.properties.update(dict(cfg.items(section)))\n elif self.extension == '.properties':\n all_lines = list(map(str.strip, filter(None, fh_props.readlines())))\n self.properties.update({\n p[0].strip(): p[1].strip() for p in [\n p.split('=', 1) for p in list(\n filter(lambda l: re.match('[a-zA-Z0-9][._\\\\-a-zA-Z0-9]* *=.*', l), all_lines)\n )\n ]\n })\n elif self.extension in ['.yml', '.yaml']:\n self.properties.update(flatten_dict(yaml.safe_load(fh_props)))\n else:\n raise NotImplementedError(f'Extension {self.extension} is not supported')\n log.info('Successfully loaded %d properties from:\\n\\t=>%s', len(self.properties), self.filepath)", "title": "" }, { "docid": "7ce2a605290e6c799f867d60cd373448", "score": "0.50365776", "text": "def parse(klass, path):\n rule = re.compile(r'^([\\w\\-]+)\\s+->\\s+([\\w\\-\\s\\|]+)#?(.*)$')\n pcfg = klass()\n try:\n with open(path, 'rb') as cfg:\n for line in cfg.readlines():\n line = line.strip()\n # Ignore commented or empty lines\n if not line or line.startswith('#'): continue\n\n match = rule.match(line)\n if not match:\n raise GrammarError(\"Problem parsing: %s\" % line)\n else:\n lhs = match.groups()[0]\n rhs = match.groups()[1]\n\n for term in rhs.split('|'):\n prod = Production(lhs, term.strip())\n pcfg[prod] = prod\n\n return pcfg\n except IOError as e:\n raise GrammarError(\"Could not open grammar:\\n%s\" % str(e))", "title": "" }, { "docid": "c462fcc71a37c78a4768397855262923", "score": "0.50291103", "text": "def __ValidJsonOrYaml(self, file_name, file_contents):\n if endpoints.FilenameMatchesExtension(file_name,\n ['.json', '.yaml', '.yml']):\n config_dict = endpoints.LoadJsonOrYaml(file_contents)\n if config_dict:\n return config_dict\n else:\n raise calliope_exceptions.BadFileException(\n 'Could not read JSON or YAML from config file '\n '[{0}].'.format(file_name))\n else:\n return False", "title": "" }, { "docid": "42fdcf30dcf59d0fcc9711172468766e", "score": "0.50259256", "text": "def parse_yaml(filename: Path):\n with open(filename, 'r') as f:\n try:\n return yaml.safe_load(f)\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as err:\n nicestr = \"Bad syntax in '{0}' line {2} column {3}: {1}\"\n nicemsg = nicestr.format(filename, err.problem, err.problem_mark.line, err.problem_mark.column)\n raise ParseError(nicemsg, filename, err.problem_mark.line, err.problem_mark.column)", "title": "" }, { "docid": "2224f980096bc6b774e03901fb176071", "score": "0.5010925", "text": "def parse_chipsec_cfg(file_data: bytes) -> None:\n # We could parse an XML file... but the files are always well formed\n platform: Optional[str] = None\n for raw_line in file_data.splitlines():\n line = raw_line.decode()\n\n # Parse <configuration platform=\"...\">\n if (\n \"<configuration\" in line\n and line != \"<configuration>\"\n and line != '<configuration platform=\"[PLATFORM_CODE]\" req_pch=\"BOOLEAN\">'\n ):\n if platform is not None:\n print(f\"Warning: duplicate '<configuration' tag in {line!r}\")\n if matches := re.match(r'^<configuration +platform=\"([0-9A-Za-z_]+)\"', line):\n platform = matches.group(1)\n else:\n print(f\"Warning: unable to parse platform from {line!r}\")\n\n # Parse <info family=\"core\" detection_value=\"...\">\n # Ignore Intel Quark platform\n if platform != \"QRK\" and \"<info\" in line and \"detection_value\" in line:\n if matches := re.match(\n r'^ *<info family=\"(atom|core|quark|xeon)\" detection_value=\"([0-9A-Fa-fx, -]+)\"', line.replace(\"'\", '\"')\n ):\n family, detection_value = matches.groups()\n for values_str in detection_value.split(\",\"):\n values_str = values_str.strip().lower()\n has_wildcard = False\n if matches := re.match(r\"^0x([0-9a-f]+)-0x([0-9a-f]+)$\", values_str):\n # Interval\n values_start = int(matches.group(1), 16)\n values_end = int(matches.group(2), 16)\n if (values_start & ~0xF) != (values_end & ~0xF):\n print(f\"Warning: interval too large {values_str} in {line!r}\")\n\n # Detect wide range as \"unknown stepping\"\n value_without_stepping = values_start & ~0xF\n if values_start == value_without_stepping and values_end == value_without_stepping | 0xF:\n has_wildcard = True\n values = [value_without_stepping]\n else:\n values = list(range(values_start, values_end + 1))\n elif matches := re.match(r\"^0x([0-9a-f]+)$\", values_str):\n # Raw value\n values = [int(matches.group(1), 16)]\n else:\n print(f\"Warning: unknown detection_value format {values_str!r} in {line!r}\")\n values = []\n for cpuid_value in values:\n cpuinfo = X86CPUInfo(\"GenuineIntel\", None, cpuid_value)\n cpuid_desc = f\"Intel {cpuinfo.x86_family}, {cpuinfo.x86_model:#x}, {cpuinfo.x86_stepping}\"\n cpu_models = CPU_MODELS.get(cpuinfo.vendor_id, {}).get(cpuinfo.x86_family)\n if cpu_models is None:\n print(f\"Warning({cpuid_value:#x}): unknown Intel family {cpuinfo.x86_family}\")\n cpuid_data = None\n elif has_wildcard:\n cpuid_desc = f\"Intel {cpuinfo.x86_family}, {cpuinfo.x86_model:#x}, any stepping\"\n assert cpuinfo.x86_stepping == 0 # This was the value set\n cpuid_data = cpu_models.get((cpuinfo.x86_model, -1))\n else:\n cpuid_data = cpu_models.get((cpuinfo.x86_model, cpuinfo.x86_stepping))\n\n if (cpuid_value, platform) in KNOWN_DUBIOUS_CPUID:\n # Skip detection for known dubious assiociations\n pass\n elif cpuid_data is None:\n print(f\"Update({cpuid_value:#x}): {cpuid_desc} is {platform}\")\n else:\n if cpuid_data.acronym is None:\n # No abbreviation\n print(\n f\"Update({cpuid_value:#x}): abbrev {platform} for {cpuid_desc} [{cpuid_data.main_desc}]\" # noqa\n )\n elif platform and platform not in cpuid_data.acronym:\n # Missing platform\n print(\n f\"Update({cpuid_value:#x}): missing {platform} in {cpuid_data.acronym} for {cpuid_desc} [{cpuid_data.main_desc}]\" # noqa\n )\n else:\n print(f\"Warning: unable to parse info from {line!r}\")\n\n if \" msr=\" in line.lower():\n if matches := re.match(r'^ *<register name=\"([0-9A-Za-z_]+)\" +type=\"msr\" msr=\"(0x[0-9A-F]+)\"', line):\n msr_name, msr_index_hex = matches.groups()\n msr_index = int(msr_index_hex, 0)\n\n normalized_msr_name = msr_name\n if normalized_msr_name.startswith(\"MSR_\"):\n normalized_msr_name = normalized_msr_name[4:]\n if normalized_msr_name == \"PRMRR_PHYBASE\" and msr_index == 0x1F4:\n normalized_msr_name = \"PRMRR_PHYS_BASE\"\n normalized_msr_name = KNOWN_NORMALIZED_MSR_NAMES.get(normalized_msr_name, normalized_msr_name)\n\n known_names = MSRS.msrs.get(msr_index)\n if not known_names:\n if 0xC0002014 <= msr_index <= 0xC0002169 and msr_name.startswith(\n (\"MCA_CONFIG_\", \"MCA_IPID_\", \"MCA_SYND_\", \"MCA_DESTAT_\", \"MCA_DEADDR_\", \"MCA_MISC1_\")\n ):\n # Ignore AMD Renoir MCA registers\n pass\n else:\n print(f\"Update(MSR 0x{msr_index:X}): new {msr_name}\")\n elif (\n normalized_msr_name not in known_names.values()\n and (\"IA32_\" + normalized_msr_name) not in known_names.values()\n ):\n # List known names\n desc_names = \" \".join(\n f\"{name}({prefix})\" if prefix else name for prefix, name in sorted(known_names.items())\n )\n print(f\"Update(MSR 0x{msr_index:X}): {msr_name} (known {desc_names})\")\n else:\n print(f\"Warning: unable to parse MSR definition from {line!r}\")", "title": "" }, { "docid": "bfeef78bcdb427098a5deb1029462215", "score": "0.50098467", "text": "def cfg_from_file(filename=None, dict=None, reset_model_spec=True):\n\n with open(filename, \"r\") as f:\n yaml_cfg = edict(yaml.load(f))\n if not reset_model_spec:\n output_dir = \"/\".join(filename.split(\"/\")[:-1])\n __C.RL_MODEL_SPEC = os.path.join(\n output_dir, yaml_cfg[\"RL_MODEL_SPEC\"].split(\"/\")[-1]\n )\n if dict is None:\n _merge_a_into_b(yaml_cfg, __C)\n else:\n _merge_a_into_b(yaml_cfg, dict)\n process_cfg(reset_model_spec=reset_model_spec)", "title": "" }, { "docid": "c63ae7f78eb4db2b771b32cd184b0c64", "score": "0.5007348", "text": "def _parse_line(self, line):\n tokens = line.strip().split(Config.DELIMITER)\n tag, value = tuple(tokens)\n for item in self._items:\n if tag == item.tag():\n item.from_file_string(value)\n break", "title": "" }, { "docid": "db380fa7612a2b7643fcf91feebd86dc", "score": "0.4999825", "text": "def _ReadArtifactDefinition(self, yaml_definition):\n if not yaml_definition:\n raise errors.FormatError(u'Missing YAML definition.')\n\n name = yaml_definition.get('name', None)\n if not name:\n raise errors.FormatError(u'Invalid artifact definition missing name.')\n\n # The description is assumed to be mandatory.\n description = yaml_definition.get('doc', None)\n if not description:\n raise errors.FormatError(\n u'Invalid artifact definition missing description.')\n\n artifact_definition = artifact.ArtifactDefinition(\n name, description=description)\n\n for collector in yaml_definition.get('collectors', []):\n type_indicator = collector.get('collector_type', None)\n if not type_indicator:\n raise errors.FormatError(\n u'Invalid artifact definition collector missing type.')\n\n arguments = collector.get('args', None)\n collector_definition = artifact_definition.AppendCollector(\n type_indicator, arguments)\n\n if collector_definition:\n collector_definition.conditions = collector.get(\n 'conditions', [])\n collector_definition.returned_types = collector.get(\n 'returned_types', [])\n self._ReadSupportedOS(yaml_definition, collector_definition, name)\n\n # TODO: check conditions.\n artifact_definition.conditions = yaml_definition.get('conditions', [])\n artifact_definition.provides = yaml_definition.get('provides', [])\n self._ReadLabels(yaml_definition, artifact_definition, name)\n self._ReadSupportedOS(yaml_definition, artifact_definition, name)\n artifact_definition.urls = yaml_definition.get('urls', [])\n\n return artifact_definition", "title": "" }, { "docid": "e70390e60a122bed31fa4bdea695eaa5", "score": "0.49979648", "text": "def _parse_requirements(requirements_file, is_constraint):\n with open(requirements_file) as f:\n lines = f.read().splitlines()\n\n lines = map(str.strip, lines)\n lines = map(_strip_inline_comment, lines)\n lines = _join_continued_lines(lines)\n lines = filterfalse(_is_comment, lines)\n lines = filterfalse(_is_empty, lines)\n\n for line in lines:\n if _is_requirements_file(line):\n req_file = line.split(maxsplit=1)[1]\n # If `req_file` is an absolute path, `os.path.join` returns `req_file`:\n # https://docs.python.org/3/library/os.path.html#os.path.join\n abs_path = os.path.join(os.path.dirname(requirements_file), req_file)\n yield from _parse_requirements(abs_path, is_constraint=False)\n elif _is_constraints_file(line):\n req_file = line.split(maxsplit=1)[1]\n abs_path = os.path.join(os.path.dirname(requirements_file), req_file)\n yield from _parse_requirements(abs_path, is_constraint=True)\n else:\n yield _Requirement(line, is_constraint)", "title": "" }, { "docid": "e70390e60a122bed31fa4bdea695eaa5", "score": "0.49979648", "text": "def _parse_requirements(requirements_file, is_constraint):\n with open(requirements_file) as f:\n lines = f.read().splitlines()\n\n lines = map(str.strip, lines)\n lines = map(_strip_inline_comment, lines)\n lines = _join_continued_lines(lines)\n lines = filterfalse(_is_comment, lines)\n lines = filterfalse(_is_empty, lines)\n\n for line in lines:\n if _is_requirements_file(line):\n req_file = line.split(maxsplit=1)[1]\n # If `req_file` is an absolute path, `os.path.join` returns `req_file`:\n # https://docs.python.org/3/library/os.path.html#os.path.join\n abs_path = os.path.join(os.path.dirname(requirements_file), req_file)\n yield from _parse_requirements(abs_path, is_constraint=False)\n elif _is_constraints_file(line):\n req_file = line.split(maxsplit=1)[1]\n abs_path = os.path.join(os.path.dirname(requirements_file), req_file)\n yield from _parse_requirements(abs_path, is_constraint=True)\n else:\n yield _Requirement(line, is_constraint)", "title": "" }, { "docid": "348b42378340e496ee5a0411979e22e6", "score": "0.49903843", "text": "def parse():\n # TODO: Verificar a necessidade de se ter parâmetros curtos\n opts_curta = \"\"\n opts_longa = Parametros.getopts()\n args = argv[1:]\n opts = gnu_getopt(args, opts_curta, opts_longa)\n parametros = Parametros()\n parametros.parse(opts)\n if len(parametros.config) > 0 and path.exists(parametros.config):\n parametros.__dict__ = yaml.load(open(parametros.config, \"r\").read())\n parametros.parse(opts)\n return parametros", "title": "" }, { "docid": "7577f15c45b0d271bfabc5f0a24e69ad", "score": "0.49887967", "text": "def read_yaml(self):\n\n write_to_log(\"---read_yaml---\\n\")\n\n with open(self.yaml_fn) as file:\n info = yaml.load(file, Loader=yaml.FullLoader)\n nudging_info = info['nudging']\n self.info = nudging_info\n self.nudge_step = nudging_info['step_nu_tr']\n self.start_date = nudging_info['start_date']\n self.rnday = nudging_info['rnday']\n self.end_date = self.start_date + datetime.timedelta(self.rnday)\n self.datetime = pd.date_range(self.start_date, self.end_date,\n freq=self.nudge_step)[:-1] # minus 1 step_nu_tr to keep the end_date within the last day.\n self.time = pd.to_datetime(self.datetime.values) - \\\n pd.to_datetime(self.start_date)\n self.time_seconds = self.time.total_seconds().astype(int)\n self.hgrid_fn = nudging_info['hgrid_input_file']\n self.vgrid_fn = nudging_info['vgrid_input_file']\n self.default_value = nudging_info['default']\n self.vgrid_version = nudging_info['vgrid_version']\n self.mesh = read_mesh(self.hgrid_fn, self.vgrid_fn, self.vgrid_version)\n self.node_x = self.mesh.nodes[:, 0]\n self.node_y = self.mesh.nodes[:, 1]\n self.node_z = self.mesh.nodes[:, 2]\n self.nnode = self.mesh.n_nodes()\n self.nvrt = self.mesh.n_vert_levels\n self._mesh_gpd = None\n self._z = None\n if self.crs is None:\n if 'crs' in nudging_info.keys():\n self.crs = nudging_info['crs']\n else:\n # this is required because 3dfield from roms or hycom only provides lat, lon.\n self.crs = 'EPSG:26910'\n print(\"crs not specified, and assigned to the default crs for UTM10N\")\n\n if self.output_suffix is None:\n if 'output_suffix' in nudging_info:\n self.output_suffix = nudging_info['output_suffix']\n else:\n self.output_suffix = None", "title": "" }, { "docid": "b0b980ef22885b106f17e4c0b8242a26", "score": "0.49875692", "text": "def _parse_static(self):\n\n logging.info(\"Parsing specification file: static content ...\")\n\n idx = self._get_type_index(\"static\")\n if idx is None:\n logging.warning(\"No static content specified.\")\n\n for key, value in self._cfg_yaml[idx].items():\n if isinstance(value, str):\n try:\n self._cfg_yaml[idx][key] = self._replace_tags(\n value, self._specification[\"environment\"][\"paths\"])\n except KeyError:\n pass\n\n self._specification[\"static\"] = self._cfg_yaml[idx]\n\n logging.info(\"Done.\")", "title": "" }, { "docid": "52163dbc4e15d2a5000571dc0c1830d4", "score": "0.496998", "text": "def parse_yaml_into_processed_dict(cls, yaml_file, env_yaml=None, product_cpes=None):\n file_basename = os.path.basename(yaml_file)\n entity_id = derive_id_from_file_name(file_basename)\n if file_basename == cls.GENERIC_FILENAME:\n entity_id = os.path.basename(os.path.dirname(yaml_file))\n\n if env_yaml:\n env_yaml[cls.ID_LABEL] = entity_id\n yaml_data = open_and_macro_expand(yaml_file, env_yaml)\n\n try:\n processed_data = cls.process_input_dict(yaml_data, env_yaml, product_cpes)\n except ValueError as exc:\n msg = (\n \"Error processing {yaml_file}: {exc}\"\n .format(yaml_file=yaml_file, exc=str(exc)))\n raise ValueError(msg)\n\n if yaml_data:\n msg = (\n \"Unparsed YAML data in '{yaml_file}': {keys}\"\n .format(yaml_file=yaml_file, keys=list(yaml_data.keys())))\n raise RuntimeError(msg)\n\n if not processed_data.get(\"definition_location\", \"\"):\n processed_data[\"definition_location\"] = yaml_file\n\n processed_data[\"id_\"] = entity_id\n\n return processed_data", "title": "" }, { "docid": "7210d422112fceca25fc8afac03702d6", "score": "0.49582508", "text": "def build_from_yamlfile(yamlfile):\n d = yaml.safe_load(open(yamlfile))\n return MktimeFilterDict(d['aliases'], d['selections'])", "title": "" }, { "docid": "73f690b736aa73fa201a0b7300fa4b27", "score": "0.49452204", "text": "def _parse_yaml_file(path):\n\n with open(path, \"r\") as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as ex:\n print(ex)", "title": "" }, { "docid": "8307805f614314831c27411648665e0d", "score": "0.49375498", "text": "def validate_circle_yml(filepath):\n # obviously the file must be named 'circle.yml'\n if op.basename(filepath) != 'circle.yml':\n raise InvalidNameError(u\"Filename must be 'circle.yml'\")\n\n allowed_sections = {'checkout', 'database', 'dependencies', 'deployment',\n 'experimental', 'general', 'machine', 'notify', 'test'}\n fd = open(filepath, 'r') # let it raise an IOError if no file exists\n circle_yml = yaml.load(fd) # will throw a ScannerError if not valid YaML\n fd.close()\n circle_sections = circle_yml.keys()\n\n # check for valid sections\n unrecognized_sections = _errant_items(circle_sections, allowed_sections)\n if len(unrecognized_sections) > 0:\n # we have an unrecognized section\n raise UnrecognizedSectionError(u\"The following sections are unrecognized: {}\".format(\", \".join(unrecognized_sections)))\n\n # check each section\n for section in circle_sections:\n if section == 'machine':\n conditions = {'pre', 'post'} # override not allowed\n languages = {'ghc', 'java', 'node', 'php', 'python', 'ruby', 'xcode'}\n system = {'environment', 'hosts', 'services', 'timezone'}\n allowed = conditions.union(languages).union(system)\n try:\n subsections = circle_yml[section].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subsection format in '{}'\".format(section))\n\n # check for valid subsections\n invalid_sections = _errant_items(subsections, allowed)\n if len(invalid_sections) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subsections not allowed in '{}': {}\".format(section, \", \".join(invalid_sections)))\n\n # check each subsection\n for subsection in subsections:\n item = circle_yml[section][subsection]\n if subsection in ('environment', 'hosts'): # dict requirement\n if not isinstance(item, dict):\n raise InvalidSectionError(u\"Invalid subitem format in '{}.{}'\".format(section, subsection))\n elif subsection == 'timezone': # string requirement\n if not isinstance(item, basestring):\n raise InvalidSectionError(u\"'{}.{}' subsection must be a single string\".format(subsection))\n elif subsection == 'services': # list requirement\n if not isinstance(item, list):\n raise InvalidSectionError(u\"'{}.{}' subsection must be a list\".format(subsection))\n elif subsection in languages:\n if not isinstance(item, dict) or len(item.keys()) != 1 or not item.get('version'):\n raise InvalidSectionError(u\"'{}.{}' subsection only supports 'version'\".format(section, subsection))\n elif section == 'checkout':\n allowed = {'post'}\n try:\n subsections = circle_yml[section].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subsection format in '{}'\".format(section))\n\n # check for valid subsections\n invalid_sections = _errant_items(subsections, allowed)\n if len(invalid_sections) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subsections not allowed in '{}': {}\".format(section, \", \".join(invalid_sections)))\n\n # check the only subsection\n item = circle_yml[section]['post']\n if not isinstance(item, list):\n raise InvalidSectionError(u\"'{}' section must be a list\".format('post'))\n elif section == 'dependencies':\n conditions = {'pre', 'override', 'post'}\n misc = {'bundler', 'cache_directories'}\n allowed = conditions.union(misc)\n try:\n subsections = circle_yml[section].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subsection format in '{}'\".format(section))\n\n # check for valid subsections\n invalid_sections = _errant_items(subsections, allowed)\n if len(invalid_sections) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subsections not allowed in '{}': {}\".format(section, \", \".join(invalid_sections)))\n\n # check each subsection\n for subsection in subsections:\n item = circle_yml[section][subsection]\n if subsection in conditions or subsection == 'cache_directories':\n if not isinstance(item, list):\n raise InvalidSectionError(u\"'{}' section must be a list\".format(subsection))\n else:\n if not isinstance(item, dict):\n raise InvalidSectionError(u\"'{}.{}' subsection only supports 'without'\".format(section, subsection))\n allowed_subitems = {'without'}\n subitems = circle_yml[section][subsection].keys()\n\n # check for valid subitems\n invalid_subitems = _errant_items(subitems, allowed_subitems)\n if len(invalid_subitems) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subitems not allowed in '{}.{}': {}\".format(section, subsection, \", \".join(invalid_subitems)))\n\n subsubitem = circle_yml[section][subsection]['without']\n if not isinstance(subsubitem, list):\n raise InvalidSectionError(u\"'{}.{}.{}' subitem must be a list\".format(section, subsection, 'without'))\n elif section == 'database':\n allowed = {'pre', 'override', 'post'}\n try:\n subsections = circle_yml[section].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subsection format in '{}'\".format(section))\n\n # check for valid subsections\n invalid_sections = _errant_items(subsections, allowed)\n if len(invalid_sections) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subsections not allowed in '{}': {}\".format(section, \", \".join(invalid_sections)))\n\n # check each subsection\n for subsection in subsections:\n item = circle_yml[section][subsection]\n if not isinstance(item, list):\n raise InvalidSectionError(u\"'{}' section must be a list\".format(subsection))\n elif section == 'test':\n conditions = {'pre', 'override', 'post'}\n misc = {'minitest_globs'}\n allowed = conditions.union(misc)\n try:\n subsections = circle_yml[section].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subsection format in '{}'\".format(section))\n\n # check for valid subsections\n invalid_sections = _errant_items(subsections, allowed)\n if len(invalid_sections) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subsections not allowed in '{}': {}\".format(section, \", \".join(invalid_sections)))\n\n # check each subsection\n for subsection in subsections:\n item = circle_yml[section][subsection]\n if not isinstance(item, list):\n raise InvalidSectionError(u\"'{}' section must be a list\".format(subsection))\n elif section == 'deployment':\n # all subsection names are allowed except pre, override, post\n disallowed = {'pre', 'override', 'post'}\n try:\n subsections = circle_yml[section].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subsection format in '{}'\".format(section))\n\n # check for valid subsections\n invalid_sections = disallowed.intersection(set(subsections))\n if len(invalid_sections) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subsections not allowed in '{}': {}\".format(section, \", \".join(invalid_sections)))\n\n # check each subsection\n for subsection in subsections:\n allowed = {'branch', 'commands', 'heroku', 'owner', 'tag'}\n try:\n subitems = circle_yml[section][subsection].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subitem format in '{}.{}'\".format(section, subsection))\n\n # check for valid subitems\n invalid_subitems = _errant_items(subitems, allowed)\n if len(invalid_subitems) > 0:\n # we have an invalid subitem\n raise UnrecognizedSectionError(u\"Subitems not allowed in '{}.{}': {}\".format(section, subsection, \", \".join(invalid_subitems)))\n\n required = {'branch', 'tag'}\n found = required.intersection(set(subitems))\n if len(found) == 0:\n raise InvalidSectionError(u\"'branch' or 'tag' required in '{}.{}'\".format(section, subsection))\n branch = circle_yml[section][subsection][found.pop()]\n if not isinstance(branch, basestring) and not isinstance(branch, list):\n raise InvalidSectionError(u\"'branch' value not a list or string in '{}.{}'\".format(section, subsection))\n commands = circle_yml[section][subsection].get('commands')\n if commands and not isinstance(commands, list):\n raise InvalidSectionError(u\"'{}.{}.{}' subitem must be a list\".format(section, subsection, 'commands'))\n heroku = circle_yml[section][subsection].get('heroku')\n if heroku and not isinstance(heroku, dict):\n raise InvalidSectionError(u\"Invalid subitem format in '{}.{}.{}'\".format(section, subsection, 'heroku'))\n owner = circle_yml[section][subsection].get('owner')\n if owner and not isinstance(owner, basestring):\n raise InvalidSectionError(u\"'{}.{}.{}' subitem must be a string\".format(section, subsection, 'owner'))\n elif section == 'notify':\n allowed = {'webhooks'}\n try:\n subsections = circle_yml[section].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subsection format in '{}'\".format(section))\n\n # check for valid subsections\n invalid_sections = _errant_items(subsections, allowed)\n if len(invalid_sections) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subsections not allowed in '{}': {}\".format(section, \", \".join(invalid_sections)))\n\n webhooks = circle_yml[section]['webhooks']\n if not isinstance(webhooks, list):\n raise InvalidSectionError(u\"'{}.{}' subsection must be a list\".format(section, 'webhooks'))\n for url_address in webhooks:\n if not isinstance(url_address, dict) or not url_address.get('url'):\n raise InvalidSectionError(u\"'{}.{}' subsection must be a list of 'url: <url>' items\".format(section, 'webhooks'))\n elif section == 'general':\n allowed = {'artifacts', 'branches', 'build_dir'}\n try:\n subsections = circle_yml[section].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subsection format in '{}'\".format(section))\n\n # check for valid subsections\n invalid_sections = _errant_items(subsections, allowed)\n if len(invalid_sections) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subsections not allowed in '{}': {}\".format(section, \", \".join(invalid_sections)))\n\n for subsection in subsections:\n if subsection == 'artifacts':\n item = circle_yml[section][subsection]\n if not isinstance(item, list):\n raise InvalidSectionError(u\"'{}.{}' subsection must be a list\".format(section, subsection))\n elif subsection == 'branches':\n allowed_subitems = {'ignore', 'only'}\n try:\n subitems = circle_yml[section][subsection].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subitem format in '{}.{}'\".format(section, subsection))\n\n # check for valid subitems\n invalid_subitems = _errant_items(subitems, allowed_subitems)\n if len(invalid_subitems) > 0:\n # we have an invalid subitem\n raise UnrecognizedSectionError(u\"Subitems not allowed in '{}.{}': {}\".format(section, \", \".join(invalid_sections)))\n\n for subitem in subitems:\n item = circle_yml[section][subsection][subitem]\n if not isinstance(item, list):\n raise InvalidSectionError(u\"'{}.{}.{}' subitem must be a list\".format(section, subsection, subitem))\n elif subsection == 'build_dir':\n item = circle_yml[section][subsection]\n if not isinstance(item, basestring):\n raise InvalidSectionError(u\"'{}.{}' subsection must be a string\".format(section, subsection))\n elif section == 'experimental':\n allowed = {'notify'} # currently only notify\n try:\n subsections = circle_yml[section].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subsection format in '{}'\".format(section))\n\n # check for valid subsections\n invalid_sections = _errant_items(subsections, allowed)\n if len(invalid_sections) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subsections not allowed in '{}': {}\".format(section, \", \".join(invalid_sections)))\n\n allowed_subitems = {'branches'}\n try:\n subitems = circle_yml[section]['notify'].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subitem format in '{}.{}'\".format(section, 'notify'))\n\n # check for valid subitems\n invalid_subitems = _errant_items(subitems, allowed_subitems)\n if len(invalid_subitems) > 0:\n # we have an invalid section\n raise UnrecognizedSectionError(u\"Subitems not allowed in '{}.{}': {}\".format(section, 'notify', \", \".join(invalid_subitems)))\n\n for subitem in subitems:\n allowed_subsubitems = {'ignore', 'only'}\n try:\n subsubitems = circle_yml[section]['notify'][subitem].keys()\n except AttributeError:\n raise InvalidSectionError(u\"Invalid subitem format in '{}.{}.{}'\".format(section, 'notify', subitem))\n\n # check for valid subsubitems\n invalid_subsubitems = _errant_items(subsubitems, allowed_subsubitems)\n if len(invalid_subsubitems) > 0:\n # we have an invalid subsubitem\n raise UnrecognizedSectionError(u\"Subitems not allowed in '{}.{}.{}': {}\".format(section, 'notify', subitem, \", \".join(invalid_subsubitems)))\n\n for subsubitem in subsubitems:\n item = circle_yml[section]['notify'][subitem][subsubitem]\n if not isinstance(item, list):\n raise InvalidSectionError(u\"'{}.{}.{}.{}' subitem must be a list\".format(section, 'notify', subitem, subsubitem))\n return True", "title": "" }, { "docid": "45b90c5bc7babc94760815d8dadb9a74", "score": "0.492861", "text": "def _dump_spec(spec):\n with open(\"spec.yaml\", \"w\") as f:\n yaml.dump(spec, f, Dumper=MyDumper, default_flow_style=False)", "title": "" }, { "docid": "4fec6a82d275f1a377f2627cbd4f0db9", "score": "0.49273401", "text": "def validate_options(self):\n all_options = {\n 'spec_dir' : {\n 'description': ('Default location for format specification files'\n ' (defining the format using the specification language). This'\n ' used for loading specification files listed in the \"spec_files\" parameter.'),\n 'default': os.path.dirname(os.path.realpath(__file__))},\n # 'default': './',\n 'link_type': {\n 'description': 'Type of links when linking one dataset to another',\n 'values': { # following tuples have description, then 'Default' if default value\n 'soft': 'hdf5 soft links',\n 'string': 'make string dataset containing path to link'},\n 'default': 'soft' },\n 'identify_custom_nodes': {\n 'description': ('Add and attribute to custom nodes (groups and datasets not '\n 'described by a schema) to indicate they are custom.'),\n 'values': {\n True: 'yes, identify them by including the custom_node_identifier',\n False: 'no, do not include the custom node identifier'},\n 'default': True },\n 'custom_node_identifier': {\n 'description': ('Attribute name and value to assign to custom nodes '\n 'to identify them as custom. If None, or if \"identify_custom_nodes\" is'\n 'not True, then custom nodes are not identified.'),\n 'default': ['schema_id', 'Custom']}, # for NWB will be \"neurodata_type\", \"Custom\"\n 'identify_extension_nodes': {\n 'description': ('Add and attribute to nodes (groups and datasets) defined in '\n 'an extension (schema not in the default namespace) to indicate they are '\n 'defined by the extension.'),\n 'values': {\n True: 'yes, identify them by including the extension_node_identifier',\n False: 'no, do not include the extension node identifier'},\n 'default': True },\n 'extension_node_identifier': {\n 'description': ('Attribute name to assign to nodes (groups and datasets) '\n 'defined by extensions to indicate they are defined by extensions. '\n 'The attribute value will be the identifier of the extension'),\n 'default': 'schema_id'},\n 'identify_normal_nodes': {\n 'description': ('Include attribute and value identifying normal nodes'\n ' (not custom and not defined by extension)'),\n 'values': {\n True: 'yes, include attribute identifying normal nodes',\n False: 'no, do not include id'},\n 'default': False },\n 'normal_node_identifier': {\n 'description': ('Attribute to use for storing value identifying normal '\n '(not custom and not extension) nodes.'),\n 'default': 'schema_id', },\n 'auto_compress': {\n 'description': ('Automatically compress datasets.'),\n 'values': {\n True: 'yes, compress',\n False: 'no, do not compress'},\n 'default': True },\n 'storage_method': {\n 'description': ('Method used to store data. This allows for storing'\n ' data using different storage methods.'),\n 'values': {\n 'hdf5': 'Data stored in hdf5 file using h5py',\n 'commands': ('Data not stored using this code. Commands to store data'\n ' are saved in self.h5commands for processing by'\n ' a calling program, e.g. MatLab.'),},\n 'default': 'hdf5', },\n 'mode': {\n 'description': ('Mode used to access file. Currently only \"w\" or \"w-\" works'\n ' with matlab front-end (that uses storage_method \"commands\").'),\n 'values': {\n 'r': 'Readonly, file must exist. (currently only used for validation).',\n 'r+': 'read/write, file must exist.',\n 'w': 'Create file, replacing if exists.',\n 'w-': 'Create file, fail if exists.',\n 'a': 'Read/write if exists, create otherwise',\n 'no_file': ('Do not read or open a file. This used for generating '\n 'documentation from specification files'), \n },\n 'default': 'w', },\n 'copy_append': {\n 'description': ('Indicates whether to make a copy of the file before '\n 'appending to it if using mode \"r+\" or \"a\" to append to an existing '\n 'file.'),\n 'values': {\n True: 'Make a copy of file before appending to it.',\n False: 'Do not make a copy. Append directly to the exiting file.'},\n 'default': True },\n 'keep_original': {\n 'description': ('If True, and mode is \"r+\", \"w\", or \"a\" (i.e. a mode that can '\n 'change or replace a file), keep a backup copy of any original. The '\n 'backup will be saved with the name \"<filename>.prev\".'),\n 'values': {\n True: 'Keep backup copy',\n False: 'Do not keep backup copy'},\n 'default': True },\n 'use_default_size': {\n 'description': ('When creating a dataset (via call to \"set_dataset\") and '\n 'if the dtype parameter is not specified, should the default data type '\n 'size be used? If the data sizes are already optimized before calling '\n 'set_dataset, then this option could be set to False to avoid having to '\n 'always specify the dtype explicitly in the calls to set_dataset. If '\n 'automatic conversion to the default sizes are desired if dtype is not '\n 'specified, then this should be set True'),\n 'values': {\n True: 'Yes, do automatic conversion to default data type and size',\n False: 'No. Do not automatically convert data types'},\n 'default': True},\n 'save_specs': {\n 'description': ('If True, and if a dataset with id \"<specification_file>\" '\n 'is defined in one of the name spaces, the contents of the format '\n 'specifications files will be saved into the HDF5 file in the '\n '\"<specification_file>\" dataset(s). The name of each dataset will be '\n 'the name of the specification file, and attribute namespaces will be '\n 'set to the namespaces defined in that specification file. '\n 'If the this is False, or if a dataset with id \"<specification_file>\" '\n 'is not found, then the format specification files are not saved '\n 'in the hdf5 file.'),\n 'values': {\n True: 'Save specification files in HDF5 file',\n False: 'Do not save specification files in HDF5 file.' },\n 'default': True},\n 'specs_location': {\n 'description': ('Group within hdf5 file that will contain format specification '\n 'files. This used when loading specification files from a created '\n 'hdf5 file (when reading). If empty, or if format specifications are provided '\n 'in parameter \"spec_files\" then format specifications will not be read '\n 'from the hdf5 file.'),\n 'default': '/general/specifications' # used in NWB format\n },\n 'verbosity': {\n 'description': ('Controls how much is displayed in validation report.'),\n 'values': {\n 'none': 'Display nothing. (Useful for unit tests).',\n 'summary': 'Display summary of validation report.',\n 'all': 'Display everything.'},\n 'default': 'all' },\n }\n errors = []\n # for opt, value in self.options.iteritems():\n for opt in self.options:\n value = self.options[opt]\n if opt not in all_options:\n errors.append(\"Invalid option specified (%s)\" % opt)\n elif 'values' in all_options[opt] and value not in all_options[opt]['values']:\n errors.append((\"Invalid value specified for option (%s), should be\"\n \" one of:\\n%s\") % (opt, all_options[opt]['values'].keys()))\n elif opt == 'custom_node_identifier':\n # validate 'custom_node_identifier' separately\n if not (isinstance(value, (list, tuple)) and len(value) == 2 \n and isinstance(value[0], str) and isinstance(value[1], str)):\n errors.append((\"Invalid value for option 'custom_node_identifer', \"\n \"must be [attribute_id, value], is: %s\") % value)\n if errors:\n print (\"\\n\".join(errors))\n print (\"valid options are:\")\n pp.pprint(all_options)\n error_exit()\n # Add default values for options that were not specified\n for opt in all_options:\n if opt not in self.options:\n self.options[opt] = all_options[opt]['default']\n # print \"After adding defaults, options are:\"\n # pp.pprint(self.options)\n # sys.exit(0)", "title": "" }, { "docid": "1bc1d72fec9985a617e0f768af2ffb9c", "score": "0.49263665", "text": "def _parse_specline(self, specline):\n # Parse into parts\n media_type, renderer = parse_specline(specline)\n\n if media_type == '':\n # no media type specified, use the default\n media_type = self.default_media_type\n if renderer == '':\n # no renderer specified, use the default\n renderer = self.defaults.renderers_by_media_type[media_type]\n\n # Validate media type.\n if media_type_re.match(media_type) is None:\n msg = (\"Malformed media type '%s' in specline '%s'. It must match \"\n \"%s.\")\n msg %= (media_type, specline, media_type_re.pattern)\n raise SyntaxError(msg)\n\n # Hydrate and validate renderer.\n make_renderer = self._get_renderer_factory(media_type, renderer)\n\n # Return.\n return (make_renderer, media_type)", "title": "" }, { "docid": "63dec53eb2f99b9e30f5ac64679f8307", "score": "0.49248078", "text": "def parse(self, filename): \n file = open(filename, 'r')\n self.filename = filename\n return self.__genStructure(file)", "title": "" }, { "docid": "1a33d830a8faf7f598f1e958ffed2ec9", "score": "0.49076137", "text": "def init_from_yaml(self, yaml_data):\n\n self.__yaml_data = yaml_data\n self._parse_common()\n self._parse_parameters()\n self._parse_windows()", "title": "" }, { "docid": "1ec58be07ac9147c2dbaeebc329830b9", "score": "0.49060765", "text": "def create_from_yaml(self):\n with open(path.join(path.dirname(__file__),\n self._spec_filename)) as yaml_file:\n self.body = yaml.safe_load(yaml_file)\n\n self.body[\"metadata\"][\"name\"] = self._name\n\n if (self._nodeSelector_hostname is not None):\n if (\"nodeSelector\" not in self.body[\"spec\"]):\n self.body[\"spec\"][\"nodeSelector\"] = {}\n self.body[\"spec\"][\"nodeSelector\"][\"kubernetes.io/hostname\"] = \\\n self._nodeSelector_hostname\n self._log.debug(\"Creating POD, body:\\n%s\" % self.body)\n\n try:\n self.k8s_CoreV1Api.create_namespaced_pod(body = self.body,\n namespace = self._namespace)\n except client.rest.ApiException as e:\n self._log.error(\"Couldn't create POD %s!\\n%s\\n\" % (self._name,\n e))", "title": "" }, { "docid": "35e77d29ba0250632d92b05340f79a20", "score": "0.4904299", "text": "def readYAML(self):\n #create path to YAMLs\n dir_path = os.path.dirname(os.path.realpath(__file__))\n pathL = os.path.join(dir_path, '../launch/Camera/camera_left_parameters.yaml')\n pathR = os.path.join(dir_path, '../launch/Camera/camera_right_parameters.yaml')\n\n #load YAML files\n with open(pathL, 'r') as f:\n camLParams = yaml.load(f) \n with open(pathR, 'r') as f:\n camRParams = yaml.load(f) \n\n #extract attributes in proper shape\n self.LK = np.reshape(np.array(camLParams[\"camera_matrix\"][\"data\"]), (3,3)) \n self.LProjMat = np.reshape(np.array(camLParams[\"projection_matrix\"][\"data\"]), (3,4)) \n self.RK = np.reshape(np.array(camRParams[\"camera_matrix\"][\"data\"]), (3,3)) \n self.RProjMat = np.reshape(np.array(camRParams[\"projection_matrix\"][\"data\"]), (3,4))", "title": "" }, { "docid": "17f629b4dece5394e96568d4d512a0d2", "score": "0.489974", "text": "def from_yaml_tree(self, node, tag, ctx):", "title": "" }, { "docid": "036c54dc5f12420701f38494552ce340", "score": "0.48948917", "text": "def parse(self):\r\n #=== Stage=1 started Fri Apr 18 21:09:55 2008\r\n match_stage = re.compile(r\"===\\s+(?:Stage=)?(.+)\\s+(started|finished)\\s+(.+)\")\r\n\r\n # === Stage=1 == ncp_psw \r\n match_component_start = re.compile(r\"===\\s+(?:Stage=)?(.+?)\\s+==\\s+(.+)\")\r\n match_component_finished = re.compile(r\"\\+\\+\\s+Finished\\s+at\")\r\n # === Stage=1 == ncp_psw \r\n match_component_cmdline = re.compile(r\"--\\s+(.+)\")\r\n match_component_chdir = re.compile(r\"Chdir\\s+(.+)|cd\\s+(.*?)\\s+.*\")\r\n component_name = None\r\n cmdline = None\r\n chdir = None\r\n content = StringIO.StringIO()\r\n \r\n # parsing the content\r\n for line in self.__file:\r\n line = line.strip()\r\n _logger.debug(line)\r\n if component_name == None:\r\n _logger.debug(\"Searching stage\")\r\n m_match = match_stage.match(line)\r\n _logger.debug(m_match)\r\n if m_match != None:\r\n _logger.debug(\"Found stage %s, %s\" % (m_match.group(2), m_match.group(3)))\r\n if m_match.group(2) == \"started\":\r\n self.start_stage(m_match.group(1), m_match.group(3))\r\n else: \r\n component_name = None \r\n cmdline = None\r\n chdir = None\r\n content = StringIO.StringIO()\r\n self.end_stage(m_match.group(1), m_match.group(3))\r\n else:\r\n _logger.debug(\"Searching for component\")\r\n m_match = match_component_start.match(line)\r\n if m_match != None:\r\n _logger.debug(\"Found component: %s\" % m_match.group(2))\r\n component_name = m_match.group(2)\r\n else:\r\n _logger.debug(\"Searching for component end\")\r\n m_match = match_component_finished.match(line)\r\n if m_match != None:\r\n self.task(component_name, cmdline, chdir, content.getvalue())\r\n component_name = None\r\n cmdline = None\r\n chdir = None\r\n content = StringIO.StringIO()\r\n if cmdline == None:\r\n _logger.debug(\"Searching for component command line\")\r\n m_match = match_component_cmdline.match(line)\r\n if m_match != None:\r\n _logger.debug(\"Found command line: %s\" % m_match.group(1))\r\n cmdline = m_match.group(1)\r\n else:\r\n _logger.debug(\"Searching for component dir\")\r\n if chdir == None:\r\n m_match = match_component_chdir.match(line)\r\n if m_match != None:\r\n chdir = m_match.group(1)\r\n if chdir == None:\r\n chdir = m_match.group(2)\r\n _logger.debug(\"Found dir: %s\" % chdir)\r\n continue\r\n if not line.startswith(\"++ \") and not line.startswith(\"+++ \"):\r\n _logger.debug(\"Adding content\")\r\n content.write(line + \"\\n\")", "title": "" }, { "docid": "6a83e47ca86e234dd1b42a254183f504", "score": "0.48884976", "text": "def parse_spec(specfile, scan):\n # Scan numbers start at one but the list is 0 indexed\n try:\n ss = spec.SPECFile(specfile)[scan - 1]\n except Exception as ex:\n print(str(ex))\n print ('Could not parse ' + specfile )\n return None,None,None,None,None,None,None,None,None,None\n\n # Stuff from the header\n try:\n detector_name = str(ss.getheader_element('UIMDET'))\n except:\n detector_name = None\n try:\n command = ss.command.split()\n scanmot = command[1]\n scanmot_del = (float(command[3]) - float(command[2])) / int(command[4])\n except:\n scanmot = None\n scanmot_del = None\n\n # Motor stuff from the header\n try:\n delta = ss.init_motor_pos['INIT_MOPO_Delta']\n except:\n delta = None\n try:\n gamma = ss.init_motor_pos['INIT_MOPO_Gamma']\n except:\n gamma = None\n try:\n theta = ss.init_motor_pos['INIT_MOPO_Theta']\n except:\n theta = None\n try:\n phi = ss.init_motor_pos['INIT_MOPO_Phi']\n except:\n phi = None\n try:\n chi = ss.init_motor_pos['INIT_MOPO_Chi']\n except:\n chi = None\n try:\n detdist = ss.init_motor_pos['INIT_MOPO_camdist']\n except:\n detdist = None\n try:\n energy = ss.init_motor_pos['INIT_MOPO_Energy']\n except:\n energy = None\n\n # returning the scan motor name as well. Sometimes we scan things\n # other than theta. So we need to expand the capability of the display\n # code.\n return delta, gamma, theta, phi, chi, scanmot, scanmot_del, detdist, detector_name, energy", "title": "" }, { "docid": "477423fe961716ab75cbbed122dcab98", "score": "0.48851117", "text": "def load_schema(self) -> dict:\n with open(self.path, encoding=\"utf-8\") as file:\n content = file.read()\n return json.loads(content) if \".json\" in self.path else yaml.load(content, Loader=yaml.FullLoader)", "title": "" }, { "docid": "caeeed4b8dfdc4b1c96671dd7139953f", "score": "0.48833665", "text": "def init_parse(f_name: str) -> Generator[Tuple[str, np.ndarray], None, None]:\n assert isinstance(f_name, str)\n with open(f_name) as f:\n for line in f.readlines():\n line = line.strip(\"\\n\")\n if not line or line[0] == \"#\":\n continue\n try:\n tag, val = line.split(\":\")\n val = np.array(re.split(\" ,|,\", val)).astype(float)\n except ValueError:\n raise SyntaxError(\"Invalid Syntax\")\n assert isinstance(tag, str)\n assert isinstance(val, np.ndarray)\n\n if tag not in {\"boundary\", \"obstacle\", \"start\", \"goal\"}:\n raise SyntaxError(\"Invalid keyword\")\n if tag in {\"boundary\", \"obstacle\"}:\n if len(val) != 6:\n raise ValueError(\"Invalid Size\")\n else:\n if len(val) != 3:\n raise ValueError(\"Invalid Size\")\n\n yield tag, val", "title": "" }, { "docid": "1550f8952a52e5a7f469c9724164da94", "score": "0.48815838", "text": "def __init__(self, file, sections=None):\n\n #Platefiles may not have spaces in their filenames\n if ' ' in os.path.basename(file):\n raise InvalidPlate('Filenames may not have spaces\\n')\n \n #Init the parser\n ConfigParser.RawConfigParser.__init__(self)\n #self.optionxform=str\n self.plate_filename=file\n \n if sections:\n self._load_from_data(sections)\n else:\n \n #Load the file\n with open(self.plate_filename,'r') as configFile:\n try:\n self.readfp(configFile)\n except ConfigParser.ParsingError as e:\n raise InvalidPlate(str(e)+'\\n')\n if self.has_option('Plate','std_offset'):\n self.set('Plate','offset',self.get('Plate','std_offset'))\n self.remove_option('Plate','std_offset')\n\n #If errors abort\n #errs=self._vet()\n errs=[]\n if errs:\n raise InvalidPlate('\\n'.join(errs))", "title": "" }, { "docid": "dab8d76d1e2a7b52ba884e06237a7b0e", "score": "0.48759758", "text": "def setup_parser(self) -> Iterable[ParserDef]:\n # Parser estimation part of `gdcov` file\n\n # ----+----1----+----2----+----3----+----4----+----5----+----6----+----\n # 3 PARAMETERS\n # 1 USN3.STA.X 376018350 1.112162030692846e+06 6.422311946865588e-04\n # 2 USN3.STA.Y 376018350 -4.842853530993107e+06 1.555844558128825e-03\n # 3 USN3.STA.Z 376018350 3.985496029611300e+06 1.247592374291492e-03\n # 2 1 -5.741554474985751e-01\n # 3 1 5.007734002791966e-01\n # 3 2 -8.214416655688096e-01\n data_parser = ParserDef(\n end_marker=lambda line, _ln, _n: False,\n end_callback=lambda line: self._parse_correlation,\n label=lambda line, _ln: not re.match(\"^\\d+ \\d+\", line),\n skip_line=lambda line: \"PARAMETERS\" in line,\n parser_def={\n # ----+----1----+----2----+----3----+----4----+----5----+----6----+--\n # 1 USN3.STA.X 376018350 1.112162030692846e+06 6.422311946865588e-04\n True: {\n \"parser\": self._parse_estimate,\n \"delimiter\": \" \",\n \"fields\": [\"_\", \"name\", \"time_past_j2000\", \"estimate\", \"sigma\"],\n },\n # ----+----1----+----2----+---\n # 2 1 -5.741554474985751e-01\n False: {\n \"parser\": self._parse_correlation,\n \"delimiter\": \" \",\n \"fields\": [\"row\", \"column\", \"correlation\"],\n },\n },\n )\n\n return itertools.repeat(data_parser)", "title": "" }, { "docid": "be3e4d360b39d4f712b2e6c556d178d6", "score": "0.48758778", "text": "def check_yaml_style(yaml_spec):\n with open(yaml_spec, \"r\", encoding=\"utf-8\") as file_stream:\n try:\n yaml.safe_load(file_stream)\n # pylint: disable=broad-except\n except Exception as exception:\n print(str(exception))", "title": "" }, { "docid": "fcbd9dd37b37563d3a5aa2a3b8e94eb6", "score": "0.48604593", "text": "def init_from_metadata(self):\n metadata = load_yaml(self.metadata_fp)\n self.name = metadata[\"name\"]\n self.type = metadata[\"type\"]\n self.title = metadata[\"title\"]\n self.summary = metadata[\"summary\"]\n self.requirements = []\n if \"requirements\" in metadata:\n for r in metadata[\"requirements\"]:\n req = Requirement()\n req.init_from_dict(r)\n self.requirements.append(req)\n if \"docker_image\" in metadata:\n self.docker_image = metadata[\"docker_image\"]\n self.maintainers = metadata[\"maintainers\"]\n self.set_paths()", "title": "" }, { "docid": "ed286e2cafc9c9ad0792bccc9ddc2819", "score": "0.48550245", "text": "def _deserialize_task_spec(self, workflow, start_node, read_specs):\r\n # Extract attributes from the node.\r\n nodetype = start_node.nodeName.lower()\r\n name = start_node.getAttribute('name').lower()\r\n context = start_node.getAttribute('context').lower()\r\n mutex = start_node.getAttribute('mutex').lower()\r\n cancel = start_node.getAttribute('cancel').lower()\r\n success = start_node.getAttribute('success').lower()\r\n times = start_node.getAttribute('times').lower()\r\n times_field = start_node.getAttribute('times-field').lower()\r\n threshold = start_node.getAttribute('threshold').lower()\r\n threshold_field = start_node.getAttribute('threshold-field').lower()\r\n file = start_node.getAttribute('file').lower()\r\n file_field = start_node.getAttribute('file-field').lower()\r\n kwargs = {'lock': [],\r\n 'data': {},\r\n 'defines': {},\r\n 'pre_assign': [],\r\n 'post_assign': []}\r\n if nodetype not in _spec_map:\r\n _exc('Invalid task type \"%s\"' % nodetype)\r\n if nodetype == 'start-task':\r\n name = 'start'\r\n if name == '':\r\n _exc('Invalid task name \"%s\"' % name)\r\n if name in read_specs:\r\n _exc('Duplicate task name \"%s\"' % name)\r\n if cancel != '' and cancel != u'0':\r\n kwargs['cancel'] = True\r\n if success != '' and success != u'0':\r\n kwargs['success'] = True\r\n if times != '':\r\n kwargs['times'] = int(times)\r\n if times_field != '':\r\n kwargs['times'] = operators.Attrib(times_field)\r\n if threshold != '':\r\n kwargs['threshold'] = int(threshold)\r\n if threshold_field != '':\r\n kwargs['threshold'] = operators.Attrib(threshold_field)\r\n if file != '':\r\n kwargs['file'] = file\r\n if file_field != '':\r\n kwargs['file'] = operators.Attrib(file_field)\r\n if nodetype == 'choose':\r\n kwargs['choice'] = []\r\n if nodetype == 'trigger':\r\n context = [context]\r\n if mutex != '':\r\n context = mutex\r\n\r\n # Walk through the children of the node.\r\n successors = []\r\n for node in start_node.childNodes:\r\n if node.nodeType != minidom.Node.ELEMENT_NODE:\r\n continue\r\n if node.nodeName == 'description':\r\n kwargs['description'] = node.firstChild.nodeValue\r\n elif node.nodeName == 'successor' \\\r\n or node.nodeName == 'default-successor':\r\n if node.firstChild is None:\r\n _exc('Empty %s tag' % node.nodeName)\r\n successors.append((None, node.firstChild.nodeValue))\r\n elif node.nodeName == 'conditional-successor':\r\n successors.append(self._deserialize_condition(workflow, node))\r\n elif node.nodeName == 'define':\r\n key, value = self._deserialize_data(workflow, node)\r\n kwargs['defines'][key] = value\r\n # \"property\" tag exists for backward compatibility.\r\n elif node.nodeName == 'data' or node.nodeName == 'property':\r\n key, value = self._deserialize_data(workflow, node)\r\n kwargs['data'][key] = value\r\n elif node.nodeName == 'pre-assign':\r\n kwargs['pre_assign'].append(self._deserialize_assign(workflow, node))\r\n elif node.nodeName == 'post-assign':\r\n kwargs['post_assign'].append(self._deserialize_assign(workflow, node))\r\n elif node.nodeName == 'in':\r\n kwargs['in_assign'] = self._deserialize_assign_list(workflow, node)\r\n elif node.nodeName == 'out':\r\n kwargs['out_assign'] = self._deserialize_assign_list(workflow, node)\r\n elif node.nodeName == 'cancel':\r\n if node.firstChild is None:\r\n _exc('Empty %s tag' % node.nodeName)\r\n if context == '':\r\n context = []\r\n elif type(context) != type([]):\r\n context = [context]\r\n context.append(node.firstChild.nodeValue)\r\n elif node.nodeName == 'lock':\r\n if node.firstChild is None:\r\n _exc('Empty %s tag' % node.nodeName)\r\n kwargs['lock'].append(node.firstChild.nodeValue)\r\n elif node.nodeName == 'pick':\r\n if node.firstChild is None:\r\n _exc('Empty %s tag' % node.nodeName)\r\n kwargs['choice'].append(node.firstChild.nodeValue)\r\n else:\r\n _exc('Unknown node: %s' % node.nodeName)\r\n\r\n # Create a new instance of the task spec.\r\n module = _spec_map[nodetype]\r\n if nodetype == 'start-task':\r\n spec = module(workflow, **kwargs)\r\n elif nodetype == 'multi-instance' or nodetype == 'thread-split':\r\n if times == '' and times_field == '':\r\n _exc('Missing \"times\" or \"times-field\" in \"%s\"' % name)\r\n elif times != '' and times_field != '':\r\n _exc('Both, \"times\" and \"times-field\" in \"%s\"' % name)\r\n spec = module(workflow, name, **kwargs)\r\n elif context == '':\r\n spec = module(workflow, name, **kwargs)\r\n else:\r\n spec = module(workflow, name, context, **kwargs)\r\n\r\n read_specs[name] = spec, successors", "title": "" } ]
cb0ddeca3dc7638e06e9b469917ee008
Returns the string representation of the model
[ { "docid": "0f283634439620e9ecd215d87804ec00", "score": "0.0", "text": "def to_str(self):\n return pprint.pformat(self.to_dict())", "title": "" } ]
[ { "docid": "aa510b1d67cd504f00d31d7600881756", "score": "0.8602374", "text": "def _repr_model_(self):\n return str(self)", "title": "" }, { "docid": "0fbfd148e937420716433d412dc3b6bf", "score": "0.85093755", "text": "def __str__(self):\n\n return str(self.model)", "title": "" }, { "docid": "62ca1478cc86da1839a211294cee0b1d", "score": "0.8237104", "text": "def get_model_string(self):\n raise NotImplementedError()", "title": "" }, { "docid": "7dc885389a3126dc5e84610b3dcbac00", "score": "0.8050309", "text": "def __str__(self):\n return self.model", "title": "" }, { "docid": "aaab87bfb8340777556763630c38cb8c", "score": "0.80128866", "text": "def serialize_model(self):\n return dill.dumps(self.model)", "title": "" }, { "docid": "dd1cd9a847e8c59a90304da497bd517e", "score": "0.79348034", "text": "def __str__(self):\n # easy enough\n return self._modelName", "title": "" }, { "docid": "7a3f822b3d6ef7ae767af2a7b64504b9", "score": "0.78766525", "text": "def __repr__(self):\n s = '' \n s += 'Model Name: ' + self._name + '\\n'\n s += 'Machine Learning Model: ' + str(type(self._ml)) + '\\n'\n s += 'Optimization Model: ' + str(type(self._mdl)) + '\\n'\n # s += json.dumps(self._exps, indent=4, sort_keys=True)\n s += str(self._exps)\n return s", "title": "" }, { "docid": "51b42a4dd9cae98ba60a80d1750f2a97", "score": "0.77898884", "text": "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "title": "" }, { "docid": "8c5c25f83013c5dee28ff9fedfd2b390", "score": "0.7786461", "text": "def __str__(self):\n return self.serialize()", "title": "" }, { "docid": "11a5a8f245bd7658683861a7f12d980f", "score": "0.7745792", "text": "def __repr__(self):\n cls = self.__class__\n return '{0}({1})'.format(cls.__name__, pprint.pformat(self.__model__))", "title": "" }, { "docid": "48503da44088a3644ca275528f5f27e0", "score": "0.7627315", "text": "def __str__(self):\n\n return self.to_str()", "title": "" }, { "docid": "b9005c9aa5a23b6f5a218792dcaff4ce", "score": "0.76055235", "text": "def __str__(self):\n return self.to_string()", "title": "" }, { "docid": "3e500952efc1caa82e6f025b06d1224d", "score": "0.757999", "text": "def __str__(self):\n return BaseModel.__str_fmt.format(self.__class__.__name__,\n self.id, self.__dict__)\n # return \"[{}] ({}) {}\".format(*args)", "title": "" }, { "docid": "7db7907f0e0c34d4c066293052870f73", "score": "0.7514394", "text": "def __repr__(self):\n s = \"\\nModel name: \" + str(self.name) + \"\\n\"\n s += \" n. of words: \" + str(len(self.words)) + \"\\n\"\n s += \" n. of word lengths: \" + str(len(self.wordlengths)) + \"\\n\"\n s += \" n. of sentence lengths: \" + str(len(self.sentencelengths)) + \"\\n\"\n s += \" n. of stems: \" + str(len(self.stems)) + \"\\n\"\n # you will likely want another line for your custom text-feature!\n return s", "title": "" }, { "docid": "1fc4954becc2117216f0e1c5df935b06", "score": "0.75038797", "text": "def __str__(self):\n return self.__s", "title": "" }, { "docid": "d4da4f6aed41e6543a59f3cf5cb3d06d", "score": "0.75035286", "text": "def __repr__(self):\n # Any better repr ideas?\n return f'<{self.__class__.__name__} of {self.model.name!r}>'", "title": "" }, { "docid": "874c5576c99ef409975d1f18a7c5b78e", "score": "0.74850684", "text": "def __str__(self):\n return str(self.__dict__)", "title": "" }, { "docid": "08f97b75eccc822dd26187d9c66692e9", "score": "0.7440529", "text": "def __str__(self):\n return self.toString(verbose)", "title": "" }, { "docid": "a0213283ff2325f744efad5c67240737", "score": "0.74325883", "text": "def __str__(self):\n return object.__str__(self)", "title": "" }, { "docid": "d428b6b305a3b3c1b513a5c6239822da", "score": "0.74239755", "text": "def __str__(self):\n return obj_to_str(self, ['args', 'simulation', 'dynamic_compartments', 'dynamic_model',\n 'init_populations', 'local_species_population', 'model',\n 'checkpointing_sim_obj', 'simulation_submodels'])", "title": "" }, { "docid": "934d896068dc8b34f156d6a5153f72aa", "score": "0.74052083", "text": "def __str__(self):\n return self._obj", "title": "" }, { "docid": "c9218259eff802474facfcd4cc76b8e3", "score": "0.73983157", "text": "def __str__(self):\n ret_string = (\n f\"{self.name} isotherm model.\\n\"\n f\"RMSE = {self.rmse:.4g}\\n\"\n \"Model parameters:\\n\"\n )\n for param, val in self.params.items():\n ret_string += f\"\\t{param} = {val:.4g}\\n\"\n ret_string += (\n \"Model applicable range:\\n\" +\n f\"\\tPressure range: {self.pressure_range[0]:.3g} - {self.pressure_range[1]:.3g}\\n\"\n f\"\\tLoading range: {self.loading_range[0]:.3g} - {self.loading_range[1]:.3g}\\n\"\n )\n\n return ret_string", "title": "" }, { "docid": "44382bc17a3eb265c100b74213125abd", "score": "0.73930556", "text": "def __str__(self):\n return self.representation_string", "title": "" }, { "docid": "64efbb37fe5045e209c1d2dc7eb142d6", "score": "0.7379557", "text": "def to_string(self):\r\n return self.__str__()", "title": "" }, { "docid": "011c22541b6815f0ab74b57643ac94ad", "score": "0.73496455", "text": "def __str__(self):\n\n return self.__repr__()", "title": "" }, { "docid": "92ab322d0d619f8b57b7814f131ed7d2", "score": "0.7330686", "text": "def serialize(self):\n return str(self)", "title": "" }, { "docid": "db098c5d60b46aa787a417bd4268e2e8", "score": "0.7327901", "text": "def __str__(self):\n return 'This is an NGramModel object'", "title": "" }, { "docid": "5c4e6216289b435a09b864f20716c98e", "score": "0.73114914", "text": "def __str__(self):\n d = self.to_json()\n return str(d)", "title": "" }, { "docid": "cc56604d6bfa131dee74369a2ec636f8", "score": "0.7287187", "text": "def __str__(self):\n return self.__repr__()", "title": "" }, { "docid": "cc56604d6bfa131dee74369a2ec636f8", "score": "0.7287187", "text": "def __str__(self):\n return self.__repr__()", "title": "" }, { "docid": "cc56604d6bfa131dee74369a2ec636f8", "score": "0.7287187", "text": "def __str__(self):\n return self.__repr__()", "title": "" }, { "docid": "cc56604d6bfa131dee74369a2ec636f8", "score": "0.7287187", "text": "def __str__(self):\n return self.__repr__()", "title": "" }, { "docid": "cc56604d6bfa131dee74369a2ec636f8", "score": "0.7287187", "text": "def __str__(self):\n return self.__repr__()", "title": "" }, { "docid": "cc56604d6bfa131dee74369a2ec636f8", "score": "0.7287187", "text": "def __str__(self):\n return self.__repr__()", "title": "" }, { "docid": "cc56604d6bfa131dee74369a2ec636f8", "score": "0.7287187", "text": "def __str__(self):\n return self.__repr__()", "title": "" }, { "docid": "cc56604d6bfa131dee74369a2ec636f8", "score": "0.7287187", "text": "def __str__(self):\n return self.__repr__()", "title": "" }, { "docid": "161115709611109ea799a0abb7b6e7b0", "score": "0.7280258", "text": "def toString(self):\n return self.__str__()", "title": "" }, { "docid": "8b129c57c783d01bd2eb13e7d9cc23e9", "score": "0.726504", "text": "def __str__(self) -> str:\n return self._repr(verbose=False)", "title": "" }, { "docid": "34b0cfe72f84a912a3e53f56da7f8b87", "score": "0.72506684", "text": "def __str__(self) -> str:\n return self.build_str()", "title": "" }, { "docid": "2d3aa200270f6fe17fb0e30be251dc65", "score": "0.7237736", "text": "def __repr__ (self):\n return self.to_str()", "title": "" }, { "docid": "f77cc89554afde82d98bdfee13a8bb9a", "score": "0.7231796", "text": "def __repr__(self) -> str:\n\n r = \"Model Dimensions: {}x{} elements\\n\".format(self.x, self.y)\n r += \"Model Seed: {}\\n\".format(self.seed)\n r += \"Number Of Models Generated: {}\\n\".format(self.mod_n)\n r += \"Model Scale: 1:{}\\n\".format(self.scale)\n r += \"Number Of Hidden Layers: {}\\n\".format(self.hl_n)\n r += \"Size Of Initial Hidden Layer: {}\\n\".format(self.hl_s)\n if self.thresh < 1:\n r += \"Rounding Threshold: {}\\n\".format(self.thresh)\n else:\n r += \"Percentage Of Elements Removed: {}%\\n\".format(self.thresh)\n r += \"Activation Functions:\\n\"\n for i in self.af:\n r += \"{}\\n\".format(i)\n\n return r", "title": "" }, { "docid": "5291f3810d9c809c0225c193f801ea9b", "score": "0.72291917", "text": "def __str__(self):\n\t\t\n\t\treturn self.__getValueWithType(str)", "title": "" }, { "docid": "5d9b0cd3a43129f2b1439c997c05d70a", "score": "0.7228918", "text": "def __str__(self):\n\n return super().__str__()", "title": "" }, { "docid": "21d3e8b081f61967c619abd9f0c4c9d4", "score": "0.7225833", "text": "def __repr__(self):\n identity = inspect(self).identity\n if identity is not None:\n identity = ', '.join(map(str, identity))\n model_name = type(self).__name__\n if hasattr(self, '__repr_args__'):\n args = ((arg, getattr(self, arg)) for arg in self.__repr_args__)\n argstring = ', '.join(f'{arg}={val!r}' for arg, val in args)\n return f'<{model_name} [{identity}]: {argstring}>'\n return f'<{model_name} [{identity}]>'", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.7223068", "text": "def __repr__(self):\n return self.to_str()", "title": "" } ]
8cf8965b43b8078af4ecb4b9011ddce9
Sets the com_adobe_granite_dropwizard_metrics of this ComAdobeGraniteApicontrollerFilterResolverHookFactoryProperties.
[ { "docid": "b9ae26c2fabe116fd5b071ee4d5f46af", "score": "0.6842898", "text": "def com_adobe_granite_dropwizard_metrics(self, com_adobe_granite_dropwizard_metrics: ConfigNodePropertyString):\n\n self._com_adobe_granite_dropwizard_metrics = com_adobe_granite_dropwizard_metrics", "title": "" } ]
[ { "docid": "5237d670163db01b323ffb3fb5e1c006", "score": "0.64473355", "text": "def set_metrics(self):\n pass", "title": "" }, { "docid": "e0ce85d34ef93a98035a9f9937453ade", "score": "0.60261935", "text": "def com_adobe_granite_dropwizard_metrics(self) -> ConfigNodePropertyString:\n return self._com_adobe_granite_dropwizard_metrics", "title": "" }, { "docid": "869be8edc977bd0d90e95961841e393c", "score": "0.5651551", "text": "def metrics(self, metrics):\n\n self._metrics = metrics", "title": "" }, { "docid": "869be8edc977bd0d90e95961841e393c", "score": "0.5651551", "text": "def metrics(self, metrics):\n\n self._metrics = metrics", "title": "" }, { "docid": "276f8a045d2e5c36f240f7ec52a06e3f", "score": "0.51721", "text": "def setup(self, names, objects, period, nsamples):\r\n self.collector.setupMetrics(names, objects, period, nsamples)", "title": "" }, { "docid": "f73c990d91a6d5db13be9511bfc25133", "score": "0.51572543", "text": "def configure_metrics(self, metrics_client):\n from .diagnostics.metrics import MetricsBaseplateObserver\n self.register(MetricsBaseplateObserver(metrics_client))", "title": "" }, { "docid": "219c0d0dc43ac3d4f2a8ddf288f07b33", "score": "0.5138985", "text": "def generate_metrics(self):", "title": "" }, { "docid": "56e88e66c537741a433016541ce75089", "score": "0.5136162", "text": "def collect_metrics(self):\n pass", "title": "" }, { "docid": "d0df8015eeacf1b9fe0130589403993e", "score": "0.500692", "text": "def metrics(self, *args, **kwargs):\n return {}", "title": "" }, { "docid": "f5dedfff270ee9994763ea27a92f39b9", "score": "0.49656093", "text": "def metrics(self):\n if self._metrics is None:\n self._metrics = FnApiMetrics(\n self._monitoring_infos_by_stage, user_metrics_only=True)\n return self._metrics", "title": "" }, { "docid": "d4243512b6e38e76419f72ef22f60dd7", "score": "0.49640882", "text": "def metrics(self):\n raise NotImplementedError()", "title": "" }, { "docid": "52b85749f2944f46d03760fb8ad2dade", "score": "0.49601188", "text": "def configure(self, cfg, influxdb, metrics):\n self.config.extend(autoscaler_config, cfg, influxdb, metrics)\n super().configure()", "title": "" }, { "docid": "56fb513de8d124d46ac4a35590c17afb", "score": "0.49549416", "text": "def setup_metrics(app):\n app.before_request(before_request)\n app.after_request(after_request)\n\n @app.route('/metrics')\n def metrics():\n # update k8s metrics each time this url is called.\n global PROMETHEUS_METRICS\n PROMETHEUS_METRICS = get_k8s_metrics()\n return Response(prometheus_client.generate_latest(), mimetype='text/plain; version=0.0.4; charset=utf-8')", "title": "" }, { "docid": "e570e65810833a2e266dcc3dd2edfe34", "score": "0.4950466", "text": "def metrics(self, metrics=None):\n\n if metrics is None:\n metrics = []\n\n self._space[\"metrics\"] = metrics", "title": "" }, { "docid": "c3b6d45ef0bef20c806fcf7610549410", "score": "0.49346313", "text": "def initialize_metrics(self,metric: Metrics):\n\n self._avr_budg_impr = metric.budget / metric.n_impression_left\n\n self._last_factor_buget = 1\n self._last_factor_utility = 1\n self._last_effi = 1", "title": "" }, { "docid": "077e6837357d4fe566b58573fd175624", "score": "0.4920903", "text": "def collect_metrics():\n def _register(action):\n handler = Handler.get(action)\n handler.add_predicate(partial(_restricted_hook, 'collect-metrics'))\n return action\n return _register", "title": "" }, { "docid": "d5c3bb9fd1bc7643fff7c7231034bf92", "score": "0.49083126", "text": "def collect_metrics(self):\n raise NotImplementedError", "title": "" }, { "docid": "7f83063850e0834dacb8ba254dfa4b17", "score": "0.48915565", "text": "def metrics(self, metrics):\n if self.local_vars_configuration.client_side_validation and metrics is None: # noqa: E501\n raise ValueError(\"Invalid value for `metrics`, must not be `None`\") # noqa: E501\n\n self._metrics = metrics", "title": "" }, { "docid": "30f3da0851eeb657ecb12a478ef94177", "score": "0.48898855", "text": "def _metrics(self):\n m = Metrics()\n m.daemon = True\n m.start()", "title": "" }, { "docid": "c58c6d0fe4cee4726148e74bffcd48b9", "score": "0.48782676", "text": "def metrics(self):\n return self._config['metrics']", "title": "" }, { "docid": "e8b0b4d7d9fcd3cdfea2109f0f4b49af", "score": "0.48525465", "text": "def set_metric(self):\n self.metric = metrics.MAE()\n return self.metric", "title": "" }, { "docid": "6d0427e58417e5a223c8f05452a7808d", "score": "0.48417953", "text": "def setMetricUnits(self):\n return self.interface('state.SetMetricUnits')", "title": "" }, { "docid": "0d9dc0294efc349401edf232aaac0104", "score": "0.48397136", "text": "def setup_metrics(app):\n app.before_request(start_timer)\n\n app.after_request(record_request_data)\n app.after_request(stop_timer)\n\n @app.route('/metrics')\n def metrics():\n \"\"\" Endpoint for exposing latest metrics to Prometheus\"\"\"\n return Response(prometheus_client.generate_latest(registry), mimetype='text/plain; charset=utf-8')", "title": "" }, { "docid": "3bdab0dff37a9946199f529d3b636102", "score": "0.48122355", "text": "def get_global_metrics(self) -> RuleMetrics:", "title": "" }, { "docid": "d7707c2769531d16d027a3f95c601ed6", "score": "0.4775516", "text": "def add_metrics_for(self, *args, plot=None, phases=None):\n assert plot is not None\n from .presets import Config\n self.update_config(*Config.gen_plot(*args, plot=plot, phases=phases))", "title": "" }, { "docid": "8aa7eb2cf0edbf44a21eaac34a912eb7", "score": "0.4774931", "text": "def update_gauges(self):\n metrics = self.erddap_metrics.get_metrics()\n for metric in metrics:\n if metric.metric_value is None:\n # should never be returned by ErddapMetrics... but just to be safe\n continue\n if metric.name not in self.gauges.keys():\n # note: declaring a Gauge here adds it to the list of metrics tracked by Prometheus\n self.gauges[metric.name] = Gauge(metric.name, metric.help, metric.label_names)\n self.gauges[metric.name].labels(*metric.label_values).set(metric.metric_value)", "title": "" }, { "docid": "ed4b8b34161a70e78ccfc191b2d9f5e2", "score": "0.47408426", "text": "def metrics_collector_spec(self, metrics_collector_spec):\n\n self._metrics_collector_spec = metrics_collector_spec", "title": "" }, { "docid": "018634f1ab8f5dea3a9e53876195b041", "score": "0.4714286", "text": "def __init__(__self__, *,\n engagement_metrics: str):\n pulumi.set(__self__, \"engagement_metrics\", engagement_metrics)", "title": "" }, { "docid": "f493cc4a7383b04a8527af7cf7dda346", "score": "0.47039953", "text": "def get_metrics(self):\n return {}", "title": "" }, { "docid": "894db54fe15c7b231a7484405e5b32fe", "score": "0.46966302", "text": "def monitoring_metrics(self):\n if self._monitoring_metrics is None:\n self._monitoring_metrics = FnApiMetrics(\n self._monitoring_infos_by_stage, user_metrics_only=False)\n return self._monitoring_metrics", "title": "" }, { "docid": "ccc4d6118c89def4cb5eb26a3ff936f2", "score": "0.4677977", "text": "def _set_metrics(self, eval_names):\n metrics = [self._default_metric]\n\n metrics = check_metrics(metrics)\n # Set metric container for each sets\n self._metric_container_dict = {}\n for name in eval_names:\n self._metric_container_dict.update(\n {name: UnsupMetricContainer(metrics, prefix=f\"{name}_\")}\n )\n\n self._metrics = []\n self._metrics_names = []\n for _, metric_container in self._metric_container_dict.items():\n self._metrics.extend(metric_container.metrics)\n self._metrics_names.extend(metric_container.names)\n\n # Early stopping metric is the last eval metric\n self.early_stopping_metric = (\n self._metrics_names[-1] if len(self._metrics_names) > 0 else None\n )", "title": "" }, { "docid": "dc92452c4474467f27e069487be3bf43", "score": "0.46773568", "text": "def __init__(self):\n self._ctx = ctx\n self._metrics = {}\n self._metric_cfg = get_metric()", "title": "" }, { "docid": "a0d3bdbe459c137701e36d5fa8858278", "score": "0.46675006", "text": "def metrics(self):\n self._metrics = [\n Metric(m, self._config) for m in self.repository.get_metrics(self.project.name, self.id)\n ]\n\n return self._metrics", "title": "" }, { "docid": "cc325932d200f1e9700cd5ff374f67b8", "score": "0.4662486", "text": "def init(self):\n AbstractMetricElement.init(self)\n self._metrics = self._get_metrics()", "title": "" }, { "docid": "217575f1bbaa1f08d161b449270418be", "score": "0.46614853", "text": "def update_metrics(self, **kwargs):\n\n root_metrics = self._metrics\n\n metrics = root_metrics\n\n now = int(time.time())\n\n if 'first_update' not in metrics:\n metrics['first_update'] = now\n\n metrics['last_update'] = now\n\n metrics = root_metrics\n\n try:\n metrics = metrics['transports']\n\n metrics['total'] = (\n metrics.get('total', 0)\n + kwargs.get('transport_call_count', 0))\n metrics['failures'] = (\n metrics.get('failures', 0)\n + kwargs.get('transport_failure_count', 0))\n\n except KeyError:\n pass\n\n metrics = root_metrics\n\n try:\n metrics = metrics['data_files']\n\n metrics['total'] = (\n metrics.get('total', 0)\n + kwargs.get('datafile_call_count', 0))\n metrics['failures'] = (\n metrics.get('failures', 0)\n + kwargs.get('datafile_failure_count', 0))\n\n # TODO: some data is still not coming from snmpsim v2carch core\n\n except KeyError:\n pass", "title": "" }, { "docid": "9a2f6a424e3374e8f9fdae74b9af6813", "score": "0.46613377", "text": "def set_metric(self, metric):\n self.metric = metric", "title": "" }, { "docid": "9a2f6a424e3374e8f9fdae74b9af6813", "score": "0.46613377", "text": "def set_metric(self, metric):\n self.metric = metric", "title": "" }, { "docid": "b2905e15d0573bef17b8ffe47366e812", "score": "0.46489632", "text": "def com_adobe_granite_apicontroller(self, com_adobe_granite_apicontroller: ConfigNodePropertyString):\n\n self._com_adobe_granite_apicontroller = com_adobe_granite_apicontroller", "title": "" }, { "docid": "25262cf693c54e8aa8cbe8e2fbefad97", "score": "0.4634837", "text": "def get_metrics(self):\n\t\treturn self.connect('/metrics_providers')", "title": "" }, { "docid": "24546c717b1eeed21d65a57bda8209b3", "score": "0.4632256", "text": "def get_metrics(self) -> {}:\n return {'metrics': self._metrics, 'groups': self._metrics_groups}", "title": "" }, { "docid": "fbaf5a85bc7b66ebd0a8e6ee01657bb2", "score": "0.46294197", "text": "def init_loggers(self) -> None:\n metric = self.metric\n if metric==\"acc\":\n self.add_meters('train', metrics.make_meter_acc())\n self.add_meters('val', metrics.make_meter_acc())\n self.add_meters('test', metrics.make_meter_acc())\n self.add_meters('hyperparams', {'learning_rate': metrics.ValueMeter()})\n elif metric==\"f1\":\n self.add_meters('train', metrics.make_meter_f1())\n self.add_meters('val', metrics.make_meter_f1())\n self.add_meters('test', metrics.make_meter_f1())\n self.add_meters('hyperparams', {'learning_rate': metrics.ValueMeter()})\n elif metric==\"loss\":\n self.add_meters('train', metrics.make_meter_loss())\n self.add_meters('val', metrics.make_meter_loss())\n self.add_meters('test', metrics.make_meter_loss())\n self.add_meters('hyperparams', {'learning_rate': metrics.ValueMeter()})\n else:\n raise NotImplementedError(f\"{metric} metric not implemented\")", "title": "" }, { "docid": "0763a863482c7309b8edd2460dff8929", "score": "0.46191826", "text": "def get_metrics(self):\n return {k: sum(v) / len(v) for k, v in self.metrics.items()}", "title": "" }, { "docid": "924e91be591d34176889b744e0708874", "score": "0.46191338", "text": "def get_metric(self):\n return {\n xfmrname: metric / self.counter if self.counter else 0\n for xfmrname, metric in self.tlri_metric.to_dict()\n .get(\"metric\", {})\n .items()\n }", "title": "" }, { "docid": "65c81a2453d9e6a698bcdaa99b9be850", "score": "0.46147254", "text": "def metric(self):\n assert False", "title": "" }, { "docid": "c4cf564a89e5c41a88f146bc3005ae80", "score": "0.46112913", "text": "def init_measurement():\n for rule in get_monitor_rules():\n # add a wrapper for every endpoint\n if rule.endpoint in user_app.view_functions:\n user_app.view_functions[rule.endpoint] = track_performance(user_app.view_functions[rule.endpoint],\n endpoint=rule.endpoint)\n\n # filter dashboard rules\n rules = user_app.url_map.iter_rules()\n rules = [r for r in rules if not r.rule.startswith('/' + config.link)\n and not r.rule.startswith('/static-' + config.link)]\n for rule in rules:\n user_app.view_functions[rule.endpoint] = track_last_accessed(user_app.view_functions[rule.endpoint],\n endpoint=rule.endpoint)", "title": "" }, { "docid": "ff6e633b89275e97a3d8e1914bff2ce9", "score": "0.46029603", "text": "def add_metrics(self):\n\n for metric in self.METRICS:\n self.sanity_patterns = sn.assert_found(re.escape(metric.column), self.stdout)\n self.perf_patterns[metric.label] = reduce(self.stdout, metric.column, metric.function)\n self.reference[metric.label] = (0, None, None, metric.unit) # oddly we don't have to supply the \"*\" scope key??", "title": "" }, { "docid": "fedd5086765d0a577dc3818a5c765d3d", "score": "0.4600462", "text": "def init(self):\n\n AbstractMetricElement.init(self)\n self._metrics = self._get_metrics()", "title": "" }, { "docid": "7a5f3c0575a07d5f19ddd5308c6d175e", "score": "0.45998874", "text": "def metrics_func(self):\n return self._metrics_func", "title": "" }, { "docid": "f53174507dd6644c76bc664b51f65639", "score": "0.45990664", "text": "def read_internal_metrics_callback(self):\n values = [\n self.collectd.Values(\n plugin=PLUGIN_NAME,\n type_instance=\"batch_queue_size\",\n type=\"gauge\",\n values=[self.met_buffer.size()],\n ),\n self.collectd.Values(\n plugin=PLUGIN_NAME,\n type_instance=\"received_metrics\",\n # this is actually counter, but we are using gauge to keep value as it is\n type=\"gauge\",\n values=[self.received_metric_count],\n ),\n self.collectd.Values(\n plugin=PLUGIN_NAME,\n type_instance=\"sent_batches\",\n # this is actually counter, but we are using gauge to keep value as it is\n type=\"gauge\",\n values=[self.met_sender.sent_batch_count],\n ),\n self.collectd.Values(\n plugin=PLUGIN_NAME,\n type_instance=\"sent_metrics\",\n # this is actually counter, but we are using gauge to keep value as it is\n type=\"gauge\",\n values=[self.met_sender.sent_metric_count],\n ),\n self.collectd.Values(\n plugin=PLUGIN_NAME,\n type_instance=\"dropped_batches\",\n # this is actually counter, but we are using gauge to keep value as it is\n type=\"gauge\",\n values=[self.met_buffer.dropped_batch_count],\n ),\n self.collectd.Values(\n plugin=PLUGIN_NAME,\n type_instance=\"dropped_metrics\",\n # this is actually counter, but we are using gauge to keep value as it is\n type=\"gauge\",\n values=[self.met_buffer.dropped_metric_count],\n ),\n ]\n\n for value in values:\n value.dispatch()", "title": "" }, { "docid": "758ae6ba73b4c551ee23045aa26e3caf", "score": "0.45908156", "text": "def metrics(self, train: bool = False) -> Metrics:\n pass", "title": "" }, { "docid": "33218901119116f40c200d7cc6eef637", "score": "0.45877516", "text": "def get_metrics(self):\n return self.metrics", "title": "" }, { "docid": "33218901119116f40c200d7cc6eef637", "score": "0.45877516", "text": "def get_metrics(self):\n return self.metrics", "title": "" }, { "docid": "e68923a2f3ade8ff171ab8082e6b35fa", "score": "0.4585864", "text": "def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DiagnosticSettingMetricArgs']]]]:\n return pulumi.get(self, \"metrics\")", "title": "" }, { "docid": "e68923a2f3ade8ff171ab8082e6b35fa", "score": "0.4585864", "text": "def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DiagnosticSettingMetricArgs']]]]:\n return pulumi.get(self, \"metrics\")", "title": "" }, { "docid": "fbad2c0b2a628f51d1ce243906e994d0", "score": "0.45778903", "text": "def metrics(args):\n defaultApiHelper = DefaultApiHelper(verify_ssl=False)\n defaultApiHelper.metrics()", "title": "" }, { "docid": "02c323c2989af2ba7cde5a2815cc72c9", "score": "0.45523402", "text": "def serve_metrics(self, request: Request) -> Response:\n return self.metrics.expose_metrics(request)", "title": "" }, { "docid": "7bfabdb44ba399346b1d5004d57f4b4c", "score": "0.45464268", "text": "def add_metric(self, values_obj):\n metric = values_obj.type\n if values_obj.type_instance:\n metric += \".\" + values_obj.type_instance\n ti = self.get_time(values_obj)\n metric_time = self.metrics.setdefault(ti, mdict())\n metric_plugin_instance = metric_time.setdefault(\n values_obj.plugin_instance, mdict())\n metric_plugin_instance[metric] = values_obj.values", "title": "" }, { "docid": "1ab4a32c7d60755b215e43f3e88eabfb", "score": "0.45294708", "text": "def metrics(self):\n return self._metrics", "title": "" }, { "docid": "ef13ff6fa8bcfa14cde784da96e4f3df", "score": "0.45263782", "text": "def build_metrics(self):\n self._metrics = {name:make_scorer(globals()[x.get('score')], **x.get('params',{})) for name,x in self._EXP_CONF.get('model_metrics').items()}", "title": "" }, { "docid": "59346a8911f731866aa3027c5f06826a", "score": "0.45245254", "text": "def __set_properties(self, jmx_dict):\n for bean in jmx_dict:\n for key in bean.keys():\n if key == 'name':\n if bean[key] == 'Hadoop:service=ResourceManager,name=MetricsSystem,sub=Stats':\n self.num_active_sources = int(bean['NumActiveSources'])\n logging.debug('%s: num_active_sources = %i', self.__class__.__name__, self.num_active_sources)\n self.num_all_sources = int(bean['NumAllSources'])\n logging.debug('%s: num_all_sources = %i', self.__class__.__name__, self.num_all_sources)\n self.num_active_sinks = int(bean['NumActiveSinks'])\n logging.debug('%s: num_active_sinks = %i', self.__class__.__name__, self.num_active_sinks)\n\n if bean[key] == 'Hadoop:service=ResourceManager,name=ClusterMetrics':\n self.num_active_nodemanagers = int(bean['NumActiveNMs'])\n logging.debug('%s: num_active_nodemanagers = %i', self.__class__.__name__, self.num_active_nodemanagers)\n self.num_decommissioned_nodemanagers = int(bean['NumDecommissionedNMs'])\n logging.debug('%s: num_decommissioned_nodemanagers = %i', self.__class__.__name__, self.num_decommissioned_nodemanagers)\n self.num_lost_nodemanagers = int(bean['NumLostNMs'])\n logging.debug('%s: num_lost_nodemanagers = %i', self.__class__.__name__, self.num_lost_nodemanagers)\n self.num_unhealthy_nodemanagers = int(bean['NumUnhealthyNMs'])\n logging.debug('%s: num_unhealthy_nodemanagers = %i', self.__class__.__name__, self.num_unhealthy_nodemanagers)\n self.num_rebooted_nodemanagers = int(bean['NumRebootedNMs'])\n logging.debug('%s: num_rebooted_nodemanagers = %i', self.__class__.__name__, self.num_rebooted_nodemanagers)", "title": "" }, { "docid": "dd3744da1e7ab3757bdde401ed83a410", "score": "0.45128217", "text": "def init_metrics(self):\n self.threads = []\n self.registry = CollectorRegistry()\n self.data = read_configuration()\n for metric in self.data[\"config\"]:\n if \"labels\" in metric:\n labels = metric[\"labels\"]\n else:\n labels = []\n if metric[\"type\"].lower() == \"counter\":\n instrument = Counter(\n metric[\"name\"],\n metric[\"description\"],\n labels,\n registry=self.registry\n )\n elif metric[\"type\"].lower() == \"gauge\":\n instrument = Gauge(\n metric[\"name\"],\n metric[\"description\"],\n labels,\n registry=self.registry\n )\n elif metric[\"type\"].lower() == \"summary\":\n instrument = Summary(\n metric[\"name\"],\n metric[\"description\"],\n labels,\n registry=self.registry\n )\n elif metric[\"type\"].lower() == \"histogram\":\n # TODO add support to overwrite buckets\n instrument = Histogram(\n metric[\"name\"],\n metric[\"description\"],\n labels,\n registry=self.registry\n )\n else:\n logging.warning(\n \"Unknown metric type {type} for metric {name}, ignoring.\".format(**metric)\n )\n\n t = threading.Thread(\n target=self.update_metrics,\n args=(instrument, metric)\n )\n t.start()\n self.threads.append(t)", "title": "" }, { "docid": "29bdad29476edc61c2631056d3f209c4", "score": "0.45121473", "text": "def register_prometheus(app: Union[Blueprint, Flask], debug: bool) -> None:\n if debug:\n # Running in debug mode.\n PrometheusMetrics(app)\n else:\n # Running with Gunicorn.\n GunicornPrometheusMetrics(app)", "title": "" }, { "docid": "f6642758d0d5fbac06a03f68c75be0d1", "score": "0.4505821", "text": "def __init__(self):\n self.context = None\n self.metrics = []", "title": "" }, { "docid": "17a4194778b0a79ee18750c76f8510e4", "score": "0.44996423", "text": "def metrics_to_scalars(self, metrics):\n if self._dont_reduce:\n metrics.update({\n 'n_steps': len(self.train_dataloader) if self.train_dataloader is not None else None,\n 'n_epochs': self.max_epochs,\n 'epoch': self.current_epoch\n })\n return metrics\n else:\n return super().metrics_to_scalars(metrics)", "title": "" }, { "docid": "fb92120d3309663d0dc583d4c17b9ba3", "score": "0.44943973", "text": "def get_metrics(self):\n return self._metrics", "title": "" }, { "docid": "59d28a15180eee5d446416bc40c2e879", "score": "0.44936338", "text": "def set_custom_eval_metric(self, eval_metric):", "title": "" }, { "docid": "10b5da79cd49554fd79eb34342dfd6ba", "score": "0.4466696", "text": "def __init__(self):\n super().__init__()\n self.metric = 'MAE'", "title": "" }, { "docid": "4124c6d352919fd27c88607f92d16ff5", "score": "0.445843", "text": "def applySettings():\n\n global _mouseTracking\n global _controlTracking\n global _textTracking\n global _edgeMargin\n global _pointerFollowsZoomer\n global _pointerFollowsFocus\n\n __setupMagnifier(settings.magZoomerType)\n __setupZoomer()\n \n _mouseTracking = settings.magMouseTrackingMode\n _controlTracking = settings.magControlTrackingMode\n _textTracking = settings.magTextTrackingMode\n _edgeMargin = settings.magEdgeMargin\n _pointerFollowsZoomer = settings.magPointerFollowsZoomer\n _pointerFollowsFocus = settings.magPointerFollowsFocus\n\n #print \"MAGNIFIER PROPERTIES:\", _magnifier\n #__dumpPropertyBag(_magnifier)\n #print \"ZOOMER PROPERTIES:\", _zoomer\n #__dumpPropertyBag(_zoomer)", "title": "" }, { "docid": "22dad63662bba1459a40c0ca6f1dda92", "score": "0.44578332", "text": "def test_metrics(self):\n pass", "title": "" }, { "docid": "5ba953edce89302382d3f38cd25e7b6f", "score": "0.4451242", "text": "def setUp(self):\n self.load_metrics()", "title": "" }, { "docid": "eb5ecf4360a92fc401b84106bd059385", "score": "0.4451116", "text": "def evaluation_metrics(self):\n pass", "title": "" }, { "docid": "c0090353ac94843d38c7a62574041423", "score": "0.44473368", "text": "def metrics(self) -> Optional[pulumi.Input['BucketReplicationConfigurationRuleDestinationMetricsArgs']]:\n return pulumi.get(self, \"metrics\")", "title": "" }, { "docid": "390d6fe30f34db8a8f7efd62981eeb2b", "score": "0.44471893", "text": "def log_metrics(self, metrics_by_name, info):\n pass", "title": "" }, { "docid": "ef71a3ffcfab17addb7a9c41c1cc01e0", "score": "0.4442823", "text": "def metrics(self) -> Optional[pulumi.Input['BucketReplicationConfigRuleDestinationMetricsArgs']]:\n return pulumi.get(self, \"metrics\")", "title": "" }, { "docid": "7b424435c7e51cd080f8c34b72af03df", "score": "0.4442482", "text": "def update(self, **kwargs):\n for key, val in kwargs.items():\n assert isinstance(val, (float, int))\n assert key.lower() in self.metrics\n self.metrics[key.lower()].update(val)", "title": "" }, { "docid": "8bc511c0747658c752629d56a2bb6b0b", "score": "0.44409752", "text": "def resolution(self, value):\n unit = self.size_unit\n if isinstance(value, units.Quantity):\n self.instrument.resolution = value.to(unit)\n else:\n self.instrument.resolution = value * unit", "title": "" }, { "docid": "2fc6346e84b378e23cde9d75bcf4fde5", "score": "0.44373086", "text": "def reset_metrics(self) -> None:\n pass", "title": "" }, { "docid": "46d62640a30db5cdbc96ac4462a78513", "score": "0.44260702", "text": "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "title": "" }, { "docid": "6793b8852964bda3bcc8b3fccf1cfa1f", "score": "0.44257298", "text": "def _publish_proxy_metrics(self):\n if self._backend.metrics_publisher:\n self._backend.metrics_publisher.publish()", "title": "" }, { "docid": "f4699e0ee900267c2355fd9e4eb6a412", "score": "0.44255537", "text": "def prepare_metric(self):\n pass", "title": "" }, { "docid": "92c7186d53338cec92eccb3d9c0ceb6f", "score": "0.4420469", "text": "def __init__(self, registry):\n self.current_utilization_metric = Gauge('oracledb_resource_current_utilization'\n , 'Generic counter metric from v$resource_limit view in Oracle (current value).'\n , labelnames=['server', 'port', NAME]\n , registry=registry)\n self.limit_value_metric = Gauge('oracledb_resource_limit_value'\n , 'Generic counter metric from v$resource_limit view in Oracle (limit value).'\n , labelnames=['server', 'port', NAME]\n , registry=registry)\n\n self.query = '''\n SELECT resource_name AS %s, current_utilization AS %s, limit_value as %s\n FROM v$resource_limit\n ''' % (NAME, CURRENT_UTILIZATION, LIMIT_VALUE)\n\n super().__init__()", "title": "" }, { "docid": "86ddf07d97aa829f090676f0f5c11ccc", "score": "0.4418823", "text": "def metrics(self) -> pulumi.Output[Optional[Sequence['outputs.DiagnosticSettingMetric']]]:\n return pulumi.get(self, \"metrics\")", "title": "" }, { "docid": "dde1e2e9f5b01a3aca903bd14485e994", "score": "0.441614", "text": "def setup_gauge(key, label):\n global PROMETHEUS_METRICS\n g = prometheus_client.Gauge(key, label)\n g.set_function(lambda: PROMETHEUS_METRICS[key])", "title": "" }, { "docid": "2f9cf1f263bd2dbe1d05ed3a01781753", "score": "0.44077775", "text": "def setDefaults(self):\n BandFilter.setDefaults(self)\n self.num_visits = 10\n self.num_grouped_visits = 1\n self.max_grouped_visits = 2", "title": "" }, { "docid": "2b15ff5afdf4f204d667450231f196ed", "score": "0.4403506", "text": "def add_metric(self, values_obj):\n metric = values_obj.type\n if values_obj.type_instance:\n metric += \".\" + values_obj.type_instance\n ti = self.get_time(values_obj)\n metric_time = self.metrics.setdefault(ti, mdict())\n metric_time[metric] = values_obj.values", "title": "" }, { "docid": "cc59014b216f1489e5b14f3b8279c2f8", "score": "0.44014803", "text": "def test_init_freq_set(self):\n self.dbgfunc()\n pname = U.my_name()\n self.make_plugin(pname)\n c = self.make_cfg(pname, fire=False)\n p = CrawlPlugin.CrawlPlugin(pname, c)\n\n self.expected(False, p.firable)\n self.expected(19, p.frequency)", "title": "" }, { "docid": "a6d99367b3cb6460cdec7b79b8d072af", "score": "0.44001207", "text": "def _set_resolution(self) -> float:\n raise NotImplementedError(\"This method should be implemented by a child class\")", "title": "" }, { "docid": "fe9ef843e27dd2fd6bd2253558bc10be", "score": "0.4396515", "text": "def get_metric(self):", "title": "" }, { "docid": "6782beafc2a746d64a811b6ce659589b", "score": "0.4395572", "text": "def init(self):\n AbstractMetricElement.init(self)\n self.metric_value_type = 'int'\n if self._data['metric_value_type'] == 'float':\n self.metric_value_type = 'float'", "title": "" }, { "docid": "6e001a61bbd326843576d16cfb6ee33c", "score": "0.4389712", "text": "def _setup(self):\n\n token = CONF.grafana_client.token\n base_url = CONF.grafana_client.base_url\n\n if not token:\n LOG.critical(\"GrafanaHelper authentication token not configured\")\n return\n self._headers = {\"Authorization\": \"Bearer \" + token,\n \"Content-Type\": \"Application/json\"}\n\n if not base_url:\n LOG.critical(\"GrafanaHelper url not properly configured, \"\n \"check base_url\")\n return\n self._base_url = base_url\n\n # Very basic url parsing\n parse = urlparse.urlparse(self._base_url)\n if parse.scheme == '' or parse.netloc == '' or parse.path == '':\n LOG.critical(\"GrafanaHelper url not properly configured, \"\n \"check base_url and project_id\")\n return\n\n self._build_metric_map()\n\n if len(self.METRIC_MAP) == 0:\n LOG.critical(\"GrafanaHelper not configured for any metrics\")\n\n self.configured = True", "title": "" }, { "docid": "eeeb3267ccd0604f20fe0f3aa96a399a", "score": "0.43859562", "text": "def update_user_metrics():\n with record('tasks.lifestyle.update_user_metrics'):\n workflow = (\n _find_metric_statuses_to_update.s() | dmap.s(\n _dispatch_series_value_update.s()\n )\n )\n workflow.delay()", "title": "" }, { "docid": "a2e856ee37a1311e5c45873bd42d3f7b", "score": "0.43859318", "text": "def analyze(self):\n self.instance.fluence.gamma.calc_map(**self.analysis_settings)", "title": "" }, { "docid": "7619a2cd746df0689db059fbdfffd98f", "score": "0.43856868", "text": "def _setup_metrics():\n metric_types = BaseMetric.__subclasses__()\n for metric_type in metric_types:\n for metric in metric_type.__subclasses__():\n metric.create_metric()", "title": "" }, { "docid": "87e09112afdd0eadb5c3b7216b4b46b9", "score": "0.43759763", "text": "def get_metrics(self, x_):\n if not hasattr(self, 'metrics_function'):\n raise Exception('You must compile your model before using it.')\n\n batch_size = self.experiment_config[\"batch_size\"]", "title": "" }, { "docid": "c8f5237c9f4cfe1e2ffdb58be4034289", "score": "0.43733633", "text": "def metrics(cell_api, api, outdir, ws_api):\n ctx['cell_api'] = cell_api\n ctx['nodeinf_eps'] = _find_nodeinfo_endpoints(api)\n ctx['outdir'] = outdir\n ctx['ws_api'] = ws_api", "title": "" }, { "docid": "3d0a05a0ee71e4530a760300ab00ecda", "score": "0.43704465", "text": "def create_or_update_server_metrics_config(\n self, name: str, body: JSON, *, content_type: str = \"application/merge-patch+json\", **kwargs: Any\n ) -> JSON:", "title": "" }, { "docid": "84ef85a63dc474fa66a3579da911db2e", "score": "0.4368096", "text": "def add_float_measurement(self, json_name, description, units):\n if app_insights_connection_string:\n metric_name = json_name_to_metric_name(json_name)\n\n new_measure = measure_module.MeasureFloat(metric_name, description, units)\n new_view = view_module.View(\n metric_name, description, [], new_measure, aggregation_module.LastValueAggregation()\n )\n view_manager.register_view(new_view)\n\n def new_setter(value):\n self.mmap.measure_float_put(new_measure, value)\n\n self.metrics[json_name] = new_setter", "title": "" }, { "docid": "32407c5ee5687b1a0ec4b15023d87c6a", "score": "0.43672833", "text": "def SetCallback(self, callback, fields=None):\n stats_collector_instance.Get().SetGaugeCallback(\n self.name, callback, fields=fields)", "title": "" }, { "docid": "ffe9e4a1f1cdd97bccb5724f95a0e596", "score": "0.4366174", "text": "def get_metrics(self, eval_config):\n raise NotImplementedError", "title": "" }, { "docid": "2d5eecbadbc89bca573e637eb326b7aa", "score": "0.43646315", "text": "def metrics(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterClusterConfigDataprocMetricConfigMetricArgs']]]:\n return pulumi.get(self, \"metrics\")", "title": "" } ]
9f51ea2ac5634ec1411934d9594f7912
Get counts of how many images are of each category for singlelabel classification, and (optionally) plot a histogram of counts. csv_file must be in form below. If csv does not have a header row with column names, then set skip_first=False.
[ { "docid": "4df68d23c91c6350ab90cc0a2db2ec12", "score": "0.83755827", "text": "def get_cat_counts(csv_file,skip_first=True,plot_hist=True):\n \n if skip_first == False: \n df = pd.read_csv(csv_file, names = ['img_name','category'])\n if skip_first == True: \n df = pd.read_csv(csv_file, names = ['img_name','category'], skiprows=1)\n \n cat_counts = df['category'].value_counts()\n cat_counts = pd.DataFrame({'category':list(cat_counts.index),'count':list(cat_counts)})\n \n if plot_hist: \n plt.hist(cat_counts['count'])\n plt.xlabel('number images in category')\n plt.ylabel('frequency')\n \n return cat_counts", "title": "" } ]
[ { "docid": "4622c558732975aebb15df0e63ccfd57", "score": "0.59433186", "text": "def generate_category_labels_from_csv(csv_dir):\n categories = {}\n df = pd.read_csv(csv_dir)\n\n for row in df.itertuples():\n categories[row[1]] = row[2]\n return categories", "title": "" }, { "docid": "b15352afa3aef1ffec31ab3d6571e97e", "score": "0.57507217", "text": "def load_dataset_labeled_by_csv(path, file_csv, delimiter, index_col, label_col, image_size):\n\n dataset = {}\n dataset['features'] = []\n dataset['labels'] = []\n\n\n final_test_csv = pd.read_csv(file_csv, delimiter=delimiter, encoding=\"utf-8-sig\")\n final_test_csv.set_index(index_col, inplace=True)\n\n for subdir, dirs, files in os.walk(path): # all file on the dataset folder\n for file in files: # one image by one\n\n filename, file_extension = os.path.splitext(file) # extension control\n if file_extension == '.ppm' or file_extension == '.png':\n label = final_test_csv.loc[filename + file_extension].ClassId # obtain the image label\n imgPath = os.path.join(path, file) # the path of the image\n\n # load image with cv2 library\n img = cv2.resize(cv2.cvtColor(cv2.imread(imgPath), cv2.COLOR_BGR2RGB), (image_size, image_size))\n\n dataset['features'].append(np.asarray(img))\n dataset['labels'].append(label)\n\n return dataset", "title": "" }, { "docid": "4e4197f2b4ad3fc37855c58aae59b870", "score": "0.5700669", "text": "def load_tomorun_csv_histogram_file(fn):\n import tomographer\n \n dat = np.loadtxt(fn, skiprows=1)\n # dat[:,0] should be lin-spaced values corresponding to the left edges of the histogram bins\n fmin = dat[0,0]\n numbins = dat.shape[0]\n binresolution = dat[1,0]-dat[0,0]\n fmax = dat[numbins-1,0] + binresolution\n h = tomographer.HistogramWithErrorBars(fmin, fmax, numbins)\n h.load(dat[:,1], dat[:,2])\n return h", "title": "" }, { "docid": "bda3a6b36771ae8b9acb26d9ecbd5eea", "score": "0.56534743", "text": "def histogram_plot(dataset, label):\r\n hist, bins = np.histogram(dataset, bins=n_classes)\r\n width = 0.7 * (bins[1] - bins[0])\r\n center = (bins[:-1] + bins[1:]) / 2\r\n plt.bar(center, hist, align='center', width=width)\r\n plt.xlabel(label)\r\n plt.ylabel(\"Image count\")\r\n plt.show()", "title": "" }, { "docid": "c108b1b0b3637374026a7218de318af5", "score": "0.5653265", "text": "def count_label(open_tsv):\n ### open files ###\n file = open(open_tsv, 'r', encoding='utf-8')\n lines = csv.reader(file, delimiter='\\t')\n\n ### make label list ###\n label_list = [line[-1] for line in lines]\n label_counter = collections.Counter()\n for label in label_list:\n label_counter[label] += 1\n print(label_counter) # check the number of each label\n file.close()", "title": "" }, { "docid": "41be9937d28c2f4c7b1ba1cd0fd86f0d", "score": "0.5620567", "text": "def build_label_dictionary(path):\n labels = load_csv(path)\n # skip the 1st line header\n labels = labels[1:]\n # keys are filenames\n keys = np.unique(labels[:,0])\n dictionary = get_label_dictionary(labels, keys)\n classes = np.unique(labels[:,-1]).astype(int).tolist()\n # insert background label 0\n classes.insert(0, 0)\n print(\"Num of unique classes: \", classes)\n return dictionary, classes", "title": "" }, { "docid": "2f686b4630dcfee0a9b73cf2b63a4538", "score": "0.5605292", "text": "def make_histogram(csv_in):\n # TODO: It would be better to share parsing with rappor_encode()\n counter = collections.Counter()\n for (_, word) in csv_in:\n counter[word] += 1\n return dict(counter.most_common())", "title": "" }, { "docid": "fbda5760200662c436e30cdc2549695e", "score": "0.5602858", "text": "def load_images_label_csv(label):\n \n FILE_NAME = \"labels.csv\"\n SEP = \"\\t\"\n USECOLS = [\"eye_color\", \"face_shape\", \"file_name\"]\n \n labels_file = pd.read_csv(folder + FILE_NAME, sep=SEP, usecols=USECOLS)\n labels = labels_file[label].values\n \n return labels", "title": "" }, { "docid": "1195eb062696cdb430582338f36ef0a5", "score": "0.5600605", "text": "def _count_coco(data_dir, data_type, data_year):\n\n anno_file = '{}/annotations/instances_{}{}.json'.\\\n format(data_dir, data_type, data_year)\n coco = COCO(anno_file)\n cats = coco.loadCats(coco.getCatIds())\n cat_stats = []\n for cnt, cat in enumerate(cats, 1):\n cat_name = cat['name']\n img_ids = coco.getImgIds(catIds=coco.getCatIds([cat_name]))\n ann_ids = coco.getAnnIds(catIds=coco.getCatIds([cat_name]))\n cat_stats.append((cat_name, len(img_ids), len(ann_ids)))\n print('[{}] {} counted...'.format(cnt, cat_name))\n plt.subplot(2, 1, 1)\n cat_names, cat_imgs, cat_anns = zip(*sorted(cat_stats, key=lambda x_y_z: -x_y_z[2]))\n plt.bar(range(len(cat_names)), cat_anns, tick_label=cat_names)\n plt.title('#Instances Per Category')\n\n plt.subplot(2, 1, 2)\n cat_names, cat_imgs, cat_anns = zip(*sorted(cat_stats, key=lambda x_y_z: -x_y_z[1]))\n plt.bar(range(len(cat_names)), cat_imgs, tick_label=cat_names)\n plt.title('#Images Per Category')\n plt.show()", "title": "" }, { "docid": "4ad8b6e2893688dd36110f3508a038f5", "score": "0.550385", "text": "def get_sample_counts(output_directory: str, datasets: str, class_names: list):\n\n df = pd.read_csv(os.path.join(output_directory, f\"{datasets}.csv\"))\n total_count = df.shape[0]\n labels = df[class_names].values\n positive_counts = np.sum(labels, axis=0)\n class_positive_counts = dict(zip(class_names, positive_counts))\n return total_count, class_positive_counts", "title": "" }, { "docid": "b540aa1b16871e8ed4fda008611afd9f", "score": "0.54797626", "text": "def generate_filtered_bargraph(csv_path:str, fig_save_path:str=None, img_name:str=\"filtered_bar_diagram.png\"):\n df = pd.read_csv(csv_path)\n df.iloc[:,:3].plot(kind=\"bar\")\n\n axes = plt.gca()\n axes.yaxis.grid()\n plt.xticks([*range(0,df.shape[0])],df.iloc[:,-1],rotation=\"horizontal\")\n plt.legend(bbox_to_anchor=(1,1), fontsize=10)\n plt.xlabel(\"AP\", labelpad=20, fontsize= 20)\n plt.ylabel(\"Score\", labelpad=20, fontsize= 20)\n\n plt.gcf().set_size_inches(18.5,10.5)\n\n plt.subplots_adjust(right=0.75)\n plt.gcf().savefig(os.path.join(fig_save_path,img_name),dpi=100)\n\n # plt.show()", "title": "" }, { "docid": "75af9a080a284c2c90b85a2d5cc91412", "score": "0.54708487", "text": "def parse_featureCounts_counts(sample, file):\n df = pd.read_csv(file, sep='\\t', comment='#')\n df.columns = ['FBgn', 'chr', 'start', 'end', 'strand', 'length', 'count']\n df['sample'] = sample\n df.set_index(['sample', 'FBgn'], inplace=True)\n return df['count']", "title": "" }, { "docid": "6edb1f57e8f238e586bc4d52dce594a2", "score": "0.54576355", "text": "def run():\n pcap_data = pd.read_csv('packet_metadata.csv', index_col='No.')\n\n df = pcap_data.groupby('Protocol')['Protocol'].count()\n\n df.plot(kind='bar')\n\n plt.show()", "title": "" }, { "docid": "75d66b316d5903ff61b22c6a1cf7f327", "score": "0.54048854", "text": "def getlabels(csvpath):\n with open(csvpath, 'r') as f:\n rdr = csv.reader(f)\n content = [row for row in rdr]\n\n labelnames = [row[0] for row in content[1:]]\n labels = [(idx, l) for (idx, l) in enumerate(labelnames, 1)]\n return labels", "title": "" }, { "docid": "e29826bc7f0bb68f115676c1e22f18ce", "score": "0.5338011", "text": "def get_label_info(csv_path):\n\n ann = pd.read_csv(csv_path)\n label = {}\n for _, row in ann.iterrows():\n label_name = row['name']\n r = row['r']\n g = row['g']\n b = row['b']\n class_11 = row['class_11']\n label[label_name] = [int(r), int(g), int(b), class_11]\n return label", "title": "" }, { "docid": "8c9e9e2429ab3a1c2e1fc92aed6d34c4", "score": "0.5336224", "text": "def attributes_counts(dataset):\n print(\"'Class' Value Counts: \"+\" \\n\", dataset['Class'].value_counts())\n print(\"\\n Visualisation plot: \"+\" \\n\", dataset['Class'].value_counts().plot(x = dataset['Class'], kind='bar'))", "title": "" }, { "docid": "3cea554353a6b4a63b8a4aa2a586ed6c", "score": "0.53197163", "text": "def load_mnist(csv_filename='C:/Users/fish2/PycharmProjects/untitled/data/mnist_data.csv'):\n mnist = read_csv(csv_filename)\n mnist = np.array(mnist)\n y = mnist[:, 0] # get the labels from the first column\n x = mnist[:, 1:] # get the data from the other columns\n return x, y", "title": "" }, { "docid": "41bc9219f5375b25f5df7c2495b180cf", "score": "0.5304262", "text": "def readCSVLabels(data_file):\n with open(data_file) as f:\n all = csv.reader(f)\n all.next()\n labels = []\n for line in all:\n labels.append(line[1])\n of = open(os.path.join(\".\", \"labels\"), \"w\")\n for label in labels:\n of.write(\"%s\\n\" % label)\n of.close()", "title": "" }, { "docid": "f3ac04e1d3cb718cfb56ad953f1a806d", "score": "0.52883244", "text": "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "title": "" }, { "docid": "c93aba5e7c471d5c64043ff9caeb7143", "score": "0.5281442", "text": "def readCountMartix(path,min_genes):\n \n result = sc.read_csv(path).transpose() #, cache=True\n result.var_names_make_unique()\n n_counts = np.sum(result.X, axis=1)\n result.obs['n_counts'] = n_counts\n sc.pp.filter_cells(result, min_genes=min_genes)\n \n return result", "title": "" }, { "docid": "a78a4d958baf5247e756b846df79153f", "score": "0.52628475", "text": "def count_clusters(data, figs_folder):\n\n cluster_counts = data.groupby('cluster').count()[['Unnamed: 0']]\n counts_path = os.path.join(figs_folder, 'cluster_counts.csv')\n cluster_counts.to_csv(counts_path, index=False)\n\n ax = cluster_counts.divide(len(data)).plot(kind=\"barh\", color='#888b8d', alpha=0.5)\n ax.set_xlabel(\"Fraction of beans belonging to cluster\");\n ax.set_ylabel(\"Cluster label\");\n ax.set_title(\"Relative size of each cluster\");\n ax.get_legend().remove()\n fig_path = os.path.join(figs_folder, 'cluster-counts-' + now + '.png')\n fig = ax.get_figure()\n fig.savefig(fig_path)", "title": "" }, { "docid": "bbc07b67553dceb70a848f35c4eeeafb", "score": "0.52459466", "text": "def count_classes(file_object):\n n_samples, n_classes = file_object['y'].shape\n sample_count = numpy.asarray([0]*n_classes)\n end = 0\n while end < n_samples:\n start = end\n end += 131072 # magic number, power of 2 :D\n if end > n_samples:\n end = n_samples\n data_y = numpy.asarray(file_object['y'][start:end])\n sample_count += numpy.sum(data_y, axis = 0)\n return sample_count", "title": "" }, { "docid": "fbbff7e57d13618a5a773896c187a3a4", "score": "0.52370673", "text": "def class_counts(path: str) -> \"np.typing.NDArray[np.float64]\":\n global args\n\n counts = np.zeros(args.total_classes)\n with rio.open(path, \"r\") as src:\n x = src.read()\n unique, unique_counts = np.unique(x, return_counts=True)\n counts[unique] = unique_counts\n\n return counts", "title": "" }, { "docid": "dc526fdba1534459708ce7fbc7ee8193", "score": "0.523104", "text": "def generate_scribe_category_plot(scribe_query_df, output_fname, label: str ='class'):\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(14, 9)\r\n scribe_query_df.sort_values(label, inplace=True)\r\n # print(scribe_query_df[label])\r\n sns.countplot(data=scribe_query_df, x=label, ax=ax)\r\n fig.savefig(output_fname)\r\n pass", "title": "" }, { "docid": "3502d3274bcc1ec5196a919b44327d07", "score": "0.52277863", "text": "def histogram(self, label_col):\n #Count of respective labels\n label_counts = self._dataframe.groupby(label_col).size().reset_index(name = constants.PANDAS_COUNT_AGG_COLUMN)\n\n #Count the label counts\n label_histogram = label_counts.groupby(constants.PANDAS_COUNT_AGG_COLUMN).size().to_frame(constants.PANDAS_COUNT_HIST_COLUMN).reset_index()\n\n return label_histogram", "title": "" }, { "docid": "184521b54e001509a09e7db27370da3a", "score": "0.51674795", "text": "def read_labeled_data(file_name):\n data_set = []\n labels = []\n #f = open(file_name)\n #data = csv.reader(f, delimiter=\",\")\n #for line in data:\n with open(file_name) as f:\n for line in f:\n line_array = [x for x in line.strip().split(',')]\n label = int(line_array[0])\n one_hot_label = np.zeros(number_of_classes)\n one_hot_label[label-1] = 1\n example = line_array[1:]\n\n data_set.append(example)\n labels.append(one_hot_label)\n return np.array(data_set), np.array(labels)", "title": "" }, { "docid": "2e61ee4b8626f4a6894c2fc5851b9fe6", "score": "0.5158298", "text": "def add_category_count_graph(df):\n category_counts = df[[c for c in df.columns if \"category\" in c]].sum(axis=0)\n category_names = [c[9:] for c in category_counts.index]\n category_counts = category_counts.values\n\n return {\n \"data\": [\n Bar(\n x=category_names,\n y=category_counts\n )\n ],\n\n \"layout\": {\n \"title\": \"Number of messages per category\",\n \"yaxis\": {\n \"title\": \"Count\"\n },\n 'height': 600,\n 'margin': dict(b=200, pad=4),\n }\n }", "title": "" }, { "docid": "80561806a33c6e952a8f0f7fc1bdc241", "score": "0.51301026", "text": "def load_features(picture):\n img = cv2.imread(picture.name) \n \n if(config.RGB):\n picture.rgb = count_histogram(img)\n #x.rgb = count_histogram(img) \n\n if(config.LAB):\n lab_image = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)\n picture.lab = count_histogram(lab_image)\n #x.lab = count_histogram(lab_image) \n \n if(config.HSV):\n hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n picture.hsv = count_histogram(hsv_image)\n \n if(config.GABOR):\n picture.gabor = gabor.count_gabor(img)\n \n if(config.GABORQ):\n picture.gaborq = gaborq.count_gaborq(img)\n \n if(config.POEM):\n picture.poem = poem.count_poem(img)\n \n if(config.COLOR_POEM):\n picture.color_poem = color_poem.count_color_poem(img)\n \n if(config.HAAR):\n picture.haar = haar.count_haar(img)\n \n if(config.HAARQ):\n picture.haarq = haarq.count_haarq(img)", "title": "" }, { "docid": "15072f18e1518d56846144d1e89037c6", "score": "0.5119694", "text": "def dataset_classes():\n face_dataset = FaceLandmarksDataset(csv_file='dummyData/faces/face_landmarks.csv',\n root_dir='dummyData/faces/')\n fig = plt.figure()\n\n for i in range(len(face_dataset)):\n sample = face_dataset[i]\n\n print(i, sample['image'].shape, sample['landmarks'].shape)\n\n ax = plt.subplot(1, 4, i + 1)\n plt.tight_layout()\n ax.set_title('Sample #{}'.format(i))\n ax.axis('off')\n show_landmarks(**sample)\n\n if i == 3:\n plt.show()\n break", "title": "" }, { "docid": "651267cf8646fbd82e6472bf947b1d24", "score": "0.5109721", "text": "def CSVDisplay(self,filepath):\n try:\n import pandas as pd\n from matplotlib import pyplot as plt\n except ImportError:\n raise ImportError('Please install the pandas and matplotlib library first!')\n csv=pd.read_csv(filepath)\n epoch=csv['epoch'].to_numpy()\n acc=csv['acc'].to_numpy()\n loss=csv['loss'].to_numpy()\n val_acc=csv['val_acc'].to_numpy()\n val_loss=csv['val_loss'].to_numpy()\n fig=plt.figure(num='CSV')\n plt.subplot(121)\n plt.plot(epoch,loss)\n plt.plot(epoch,acc)\n plt.legend(('Train loss','Train accuracy'))\n plt.subplot(122)\n plt.plot(epoch,val_loss)\n plt.plot(epoch,val_acc)\n plt.legend(('Validate loss','Validate accuracy'))\n plt.show()", "title": "" }, { "docid": "88a02468b8874410b39fcbe6b7826019", "score": "0.5104361", "text": "def load_csv(csv_file):\n X = list()\n Y = list()\n reader = csv.DictReader(csv_file)\n xlabel, ylabel = reader.fieldnames\n for pair in reader:\n X.append(float(pair[xlabel]))\n Y.append(float(pair[ylabel]))\n return {xlabel: X, ylabel: Y}", "title": "" }, { "docid": "4e4477705025801f3ea1f0fb7316442c", "score": "0.5103437", "text": "def load_dataset(self, csv_file, fail_on_missing=True):\n dataset = np.genfromtxt(csv_file, delimiter=',', dtype='|U')\n pids, fids= dataset.T\n\n # Possibly check if all files exist\n if self.image_root is not None:\n missing = np.full(len(fids), False, dtype=bool)\n for i, fid in enumerate(fids):\n missing[i] = not os.path.isfile(os.path.join(self.image_root, fid))\n\n missing_count = np.sum(missing)\n if missing_count > 0:\n if fail_on_missing:\n raise IOError('Using the `{}` file and `{}` as an image root {}/'\n '{} images are missing'.format(\n csv_file, self.image_root, missing_count, len(fids)))\n else:\n print('[Warning] removing {} missing file(s) from the'\n ' dataset.'.format(missing_count))\n # We simply remove the missing files.\n fids = fids[np.logical_not(missing)]\n pids = pids[np.logical_not(missing)]\n return pids, fids", "title": "" }, { "docid": "ed5d043de40d21bda68cb46b44569a44", "score": "0.50952226", "text": "def count_class(url_labels, url_output, tags, d):\n count = np.zeros(len(tags))\n\n out = open(os.path.join(url_output, \"clases.txt\"), 'w')\n\n for dir in url_labels:\n data = os.listdir(dir)\n for txt in data:\n f = open(os.path.join(dir, txt))\n text = f.read()\n text = text.split()\n for i in range(int(len(text) / d)):\n despl = i * d\n tag = text[0 + despl]\n indx = np.where(tags == tag)\n count[indx[0]] = count[indx[0]] + 1\n f.close()\n print(dir)\n print(count)\n out.write(str(count))\n out.close()\n\n print(\"finish\")", "title": "" }, { "docid": "1cb676e6650fa8c993178ab1dd997bb6", "score": "0.5094119", "text": "def load_mnist(directory):\n\n df = None\n\n y = []\n\n for i in range(10):\n tmp = pd.read_csv(directory % i, header=None, sep=\" \")\n # build labels - one hot vector\n\n hot_vector = [1 if j == i else 0 for j in range(0, 10)]\n\n for j in range(tmp.shape[0]):\n y.append(hot_vector)\n # concatenate dataframes by rows\n if i == 0:\n df = tmp\n else:\n df = pd.concat([df, tmp])\n\n data = df.to_numpy()\n y = np.array(y)\n\n data = data.astype(np.float64) / 255\n\n return data, y", "title": "" }, { "docid": "81d9a55c48081805b6526d3ca8d0d51d", "score": "0.50792485", "text": "def visualize(df):\n count_mean(df)\n count_median(df)\n count_plots(df)\n count_against_features(df)\n categorical_count(df)", "title": "" }, { "docid": "f56c38a592161830010f74e1a0b8664f", "score": "0.507721", "text": "def generate_distribution(data, filename):\n plt.hist(data, bins=50, facecolor='green')\n plt.xlabel('Number of masks')\n plt.ylabel('Count')\n plt.title(\"Distribution of masks' numbers of images in training set\")\n plt.savefig(os.path.join(OUTPUT, filename))\n # plt.show()", "title": "" }, { "docid": "f313616181f7ddbcfa81c8c9a6ff1aa6", "score": "0.507094", "text": "def plot_histograms(df):\n view = (df[['jcp', 'chris', 'ally']]\n #.fillna(0)\n .melt()\n )\n return sns.countplot(x=\"value\", hue=\"variable\", data=view)", "title": "" }, { "docid": "814427b3ca5b1d13c946ff2275e11629", "score": "0.5068409", "text": "def plot_class_distribution(labels, database):\n if database == 'cifar':\n classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog',\n 'frog', 'horse', 'ship', 'truck']\n\n else:\n classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n \n x = np.arange(len(classes))\n\n fig, ax = plt.subplots()\n bars = ax.bar(x, labels)\n\n ax.set_xticks(x)\n ax.set_xtickclasses(classes)\n ax.set_ylabel('Number of examples')\n ax.tick_params(top=True, labeltop=True, \n bottom=False, labelbottom=False)\n\n plt.show()", "title": "" }, { "docid": "fd6648ea6235fadde69bb4e54d55c87b", "score": "0.5053358", "text": "def categorize(self):\n # Load CSV with categorized data, if available\n # If not available, categorize data from scratch\n if self.from_csv() is False:\n # Get image details\n details = self.get_images_details(self._IDs)\n\n # Define dataframe columns\n columns = ['id', 'width', 'height', 'criteria']\n self._df = pd.DataFrame(details, columns=columns)\n\n # Build clusters\n X = (pd.DataFrame(self._df['criteria'].values.tolist())).as_matrix()\n kmeans = KMeans(n_clusters=self.options['num_clusters']).fit(X)\n clusters = kmeans.predict(X)\n self._df['cluster'] = clusters\n\n # Save results\n self.to_csv()", "title": "" }, { "docid": "540f968a03cf2245cb019d0f06cbad82", "score": "0.5044779", "text": "def load_label(label_dir, original_size):\n\n # initializing the label image.\n\tlabel_arr = [[0 for i in range(original_size)] for j in range(original_size)]\n\twith open(label_dir) as csvfile:\n\t readCSV = csv.reader(csvfile, delimiter=',')\n\t for row in readCSV:\n\t \n\t for i in range(0, len(row)-1, 2):\n\t \ty = int(row[i])\n\t \tx = int(row[i+1])\n\t \tlabel_arr[x][y] = 1\n\t\n\t# Transforming the array into numpy array, and reshaping it.\n\tlabel_arr = np.array(label_arr, np.float32)\n\tlabel_arr = np.reshape(label_arr, (original_size, original_size, 1))\n\n\treturn label_arr", "title": "" }, { "docid": "d520ea687f3b474e9bc04646e1cbc879", "score": "0.5042887", "text": "def graph_histogram(dataset_id, col, is_categorical, values, part='train'):\n try:\n for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:\n with plt.style.context(theme, after_reset=True):\n plt.figure(figsize=(7, 7))\n if is_categorical:\n df = pd.DataFrame(values)\n df.columns = ['y']\n encoder = LabelEncoder()\n df['y'] = encoder.fit_transform(df['y'])\n values = df['y'].values\n sns.distplot(values, kde=False)\n x_labels = encoder.inverse_transform(list(range(max(values) + 1)))\n plt.xticks(list(range(max(values) + 1)), x_labels, rotation=90)\n else:\n sns.distplot(values)\n plt.title('distribution of %s (%s set)' % (col, part))\n plt.xlabel('values')\n plt.ylabel('frequencies')\n __save_fig(dataset_id, '_hist_%s_%s' % (part, col), dark)\n except:\n log.error('error in graph_histogram with dataset_id %s' % dataset_id)", "title": "" }, { "docid": "032b6b94eac13ee36492ca4efe255e70", "score": "0.50406826", "text": "def plot_distinctiveness_heatmap(averageDatasets, \n firstWordsFile, \n mode,\n sorting,\n outfolder, \n targetCategories, \n numOfTopics, \n topTopicsShown, \n fontscale, \n dpi):\n print(\"Launched plot_distinctiveness_heatmap.\")\n for average in glob.glob(averageDatasets):\n for targetCategory in targetCategories: \n if targetCategory in average and targetCategory != \"segmentID\":\n print(\"- working on: \"+targetCategory)\n dataToPlot = get_heatmap_dataToPlot(average,\n mode,\n sorting,\n firstWordsFile, \n topTopicsShown,\n numOfTopics)\n create_distinctiveness_heatmap(dataToPlot, \n topTopicsShown,\n targetCategory, \n mode,\n sorting,\n fontscale,\n dpi, \n outfolder)\n print(\"Done.\")", "title": "" }, { "docid": "87a9b158f1db6944946870bff5c1c298", "score": "0.503455", "text": "def generate_bargraph(csv_path:str, fig_save_path:str=None, img_name:str=\"result_bar_diagram.png\"):\n df = pd.read_csv(csv_path)\n df.iloc[:,:-2].plot(kind=\"bar\")\n\n axes = plt.gca()\n axes.yaxis.grid()\n plt.xticks([*range(0,df.shape[0])],df.iloc[:,-1],rotation=\"horizontal\")\n plt.legend(bbox_to_anchor=(1,1), fontsize=10)\n plt.xlabel(\"AP and AR\", labelpad=20, fontsize= 20)\n plt.ylabel(\"Score\", labelpad=20, fontsize= 20)\n\n plt.gcf().set_size_inches(18.5,10.5)\n\n plt.subplots_adjust(right=0.75)\n plt.gcf().savefig(os.path.join(fig_save_path,img_name),dpi=100)\n\n # plt.show()", "title": "" }, { "docid": "352f0c1ad47031be15a88aaf0ac24b22", "score": "0.50307494", "text": "def load():\n data_path = os.path.join(os.path.dirname(__file__), \"data\")\n for example_name in os.listdir(data_path):\n if example_name[0] == '.': # skip hidden files -- they are junk\n continue\n example_path = os.path.join(data_path, example_name)\n images = []\n for condition in CONDITIONS:\n image_path = os.path.join(example_path,\n \"{0:s}.png\".format(condition))\n image = imageio.imread(image_path)\n images.append(image)\n image_set = np.stack(images, axis=0)\n count_path = os.path.join(example_path, \"count.csv\")\n count = np.loadtxt(count_path, delimiter=',')\n yield image_set, count", "title": "" }, { "docid": "bce87769618ba76986fc9d7326fa617d", "score": "0.5026858", "text": "def plot_classification_report(input_file, split): \n\n df = pd.read_csv(input_file)\n df = df[df['split'] == split]\n\n # Compute sklearn Classification Report and saves it in a pandas dataframe\n report = pd.DataFrame(classification_report(df.True_Label, df.Prediction, digits=3, output_dict=True)).transpose()\n report = report.loc[:, [\"precision\", \"recall\", \"f1-score\"]].drop(['accuracy', \"macro avg\"]) # Select what parts of the classification report to plot\n report = report*100 # Multiply by 100 so te percentage is 99.7% instead of 0.997%\n \n # Customize heatmap (Classification Report)\n sns.set(font_scale = 1.3)\n rdgn = sns.diverging_palette(h_neg=10, h_pos=130, s=80, l=62, sep=3, as_cmap=True)\n ax=sns.heatmap(report, cmap=rdgn, annot=True, annot_kws={\"size\": 14}, cbar=True, fmt='.3g', cbar_kws={'label':'%'}, center=90, vmin=0, vmax=100)\n ax.xaxis.tick_top()\n for t in ax.texts: t.set_text(t.get_text() + \" %\") #Put percentage \n plt.yticks(rotation = 0)", "title": "" }, { "docid": "5882a442949ad16e498f608ddf45336d", "score": "0.5002723", "text": "def load_counts(filename):\n counts = Counter()\n with open(filename) as f:\n for line in f.readlines():\n w,c = line.rstrip('\\n').split()\n counts[w] = int(c)\n return counts", "title": "" }, { "docid": "7da9c6c6967d87728a8ad40817322912", "score": "0.5000765", "text": "def frequency_count(binary_data, binary_class):\n num_examples, num_features = binary_data.shape\n pow_2 = 2**np.arange(num_features-1, -1, -1)\n decimal_data = np.dot(binary_data, pow_2)\n decimal_data_zero = decimal_data[binary_class == 0]\n decimal_data_one = decimal_data[binary_class == 1]\n count_zero, x_zero = _integer_histogram(decimal_data_zero)\n count_one, x_one = _integer_histogram(decimal_data_one)\n freq0, freq1, x = _unify_counts(count_zero, x_zero, count_one, x_one)\n data = _as_binary_data(x, num_features)\n return data, freq0, freq1", "title": "" }, { "docid": "ab3541905284387e831e8db4b38056fc", "score": "0.49937308", "text": "def preprocess_csv(self, csv, i):\n\n # Get row from csv file by index and extract features\n row = csv.loc[i, :]\n features = np.asarray([[row['Age'], row['Gender'], row['Position']]])\n\n # Extract labels\n labels = row['Labels']\n return features, labels", "title": "" }, { "docid": "178caaafee27b6e66bebebd2b9c9f884", "score": "0.4991038", "text": "def load_A1_data(folder):\n df= pd.read_csv(\"Datasets/\" +folder+ \"/labels.csv\")\n rows = []\n columns = []\n for i in [df.iloc[:,0]]:\n elements=(i.str.split())\n for data in elements:\n rows.append(data[1:4])\n for y in [df.columns[0]]:\n columns = (y.split())\n# original_dataset = DataFrame(rows,columns=columns)\n pbar = ProgressBar()\n for i in pbar(rows):\n i[0] = np.asarray(Image.open(\"Datasets/\"+folder+ \"/img/\"+i[0])) \n\n df = DataFrame(rows,columns=columns)\n df[\"gender\"] = pd.to_numeric(df[\"gender\"])\n df[\"gender\"] = df[\"gender\"].replace(-1, 0)\n df = df.drop(df.columns[[2]], axis=1)\n return df", "title": "" }, { "docid": "213e9b8a9cf7d1e0cd5e019b82a3d6c6", "score": "0.49872306", "text": "def slice(f):\r\n\r\n df = pd.read_csv(f)\r\n\r\n print \"Network Labels: \" + str(df.describe())\r\n\r\n error_df = df[(df['label'] != df['prediction'])]\r\n\r\n print \"Network Errors: \" + str(error_df.describe())\r\n plt.hist2d(df['label'], df['prediction'], norm = LogNorm())\r\n plt.colorbar()\r\n\r\n plt.title('Network Labels for Batch of 300 after 500 Training Steps')\r\n plt.xlabel('Image Label')\r\n plt.ylabel('Network Prediction')\r\n plt.savefig('labelsvpredictions.png')", "title": "" }, { "docid": "68746b8627b408b6fd7fb2a992cd8d66", "score": "0.49627012", "text": "def convert_csv_to_images(csv_filename, output_folder):\n data = get_data_from_csv(csv_filename)\n base_filename = os.path.splitext(os.path.join(\n output_folder, os.path.basename(csv_filename)))[0]\n\n save_execution_time_plot(base_filename + \"_time.png\", data)\n save_num_nonzero_links_plot(base_filename + \"_num_nonzero_links.png\", data)\n save_transit_load_spread_plot(\n base_filename + \"_transit_load_spread.png\", data)\n save_highest_capacity_links_plot(\n base_filename + \"_highest_capacity_links.png\", data)", "title": "" }, { "docid": "647626d0a157543149ecf4ec38a9fff4", "score": "0.49550566", "text": "def load_classes():\n cats_fn = os.path.join(os.path.dirname(__file__), '..', 'data', 'vgpretrain', 'joint_categories.csv')\n classes_df = pd.read_csv(cats_fn)\n return classes_df", "title": "" }, { "docid": "c1794693b97a4744e9c22d6c1a93adcc", "score": "0.49526975", "text": "def count_hashtags(filename):\n total_counter = Counter()\n print()\n\n print('Parsing current file: ', filename)\n df = json_to_df(filename)\n\n hashtags_dict = get_hashtags(df)\n # try:\n # hashtags_dict = get_hashtags(df)\n # except Exception as e:\n # print(e)\n\n total_counter += hashtags_dict\n\n with open(processed_data_path + '/' + os.path.basename(filename) + '_top_100_hashtags.txt', \"w\", encoding='utf-8') as f:\n for k,v in total_counter.most_common(100):\n # print(k,v)\n f.write( \"{} {}\\n\".format(k,v) )\n return total_counter", "title": "" }, { "docid": "0fc828056ad306ed6d851533812a9b12", "score": "0.49349186", "text": "def draw_his(lib_call='counts'):\n for key in new_cvs.head():\n # the following info has no histogram to display\n if key in [\"YEAR\",\"ANOM_ID\", 'DATE', \"TIME\", \"X\", \"Y\", \"TRAFFIC_CONTROL_CONDITION\",\n \"LOCATION\", \"GEO_ID\", \"LONGITUDE\",\t\"LATITUDE\",\t\"ObjectId\"]:\n continue\n if \"counts\" in lib_call:\n # using value_counts lib\n plt.figure()\n x = new_cvs[key]\n # x = Counter(new_cvs[key])\n n = pd.Series(x).value_counts()\n n.plot(kind='bar')\n for i in range(len(n)):\n print(\"{}, {}\".format(i, n[i]))\n text = plt.text(i, n[i], str(n[i]))\n print(text)\n elif 'his' in lib_call:\n # using histogram lib\n x = new_cvs[key]\n n, bins, patches = plt.hist(x)\n for i in range(len(n)):\n if n[i] <1:\n continue\n plt.text(bins[i], n[i], str(n[i]))\n\n plt.title(key)\n # plt.show()\n\n plt.savefig('{}_his.png'.format(key))\n plt.close()", "title": "" }, { "docid": "b0b5e9bd553affa5381e0ace3efc35c8", "score": "0.49266782", "text": "def load_mnist(path, kind='train'):\r\n file_path = os.path.join(path, kind+'.csv')\r\n #print(file_path)\r\n\r\n df = pd.read_csv(file_path)\r\n #df.head(5)\r\n\r\n labels = df.iloc[:,0]\r\n images = df.iloc[:,1:]\r\n \r\n return labels, images", "title": "" }, { "docid": "e69eb2cc66a6ec1575d8d704ca249771", "score": "0.4926015", "text": "def _convert_labels_to_numbers(path):\n\n\tlabels = _read_csv(path)\n\tlabels_num = []\n\tanimal_dict = create_dict()\n\tfor label in labels:\n\t\tif not (label == 'Animal'):\n\t\t\tlabels_num.append(animal_dict.get(label))\n\n\treturn labels_num", "title": "" }, { "docid": "b4d21e1e1ec9b61d08f3c14f2c48ca73", "score": "0.49244174", "text": "def plot_histogram(labels_hist, classes, dir_name, num_red_classes, title='load_balance'):\n n_classes = len(classes)\n label_counts = [labels_hist[x] for x in classes]\n sorted_counts_indices = sorted(range(len(label_counts)), key=lambda k: label_counts[k])\n sorted_classes = [classes[x] for x in sorted_counts_indices]\n sorted_label_counts = [label_counts[x] for x in sorted_counts_indices]\n nticks = range(n_classes)\n\n classes_colours = ['red' if i < num_red_classes else 'orange' for i in range(n_classes)]\n plt.bar(nticks, sorted_label_counts, width=2, alpha=0.2, color=classes_colours)\n\n plt.title('load balancing graph')\n plt.ylabel('Label Counts')\n\n plt.xticks(nticks, sorted_classes, rotation='vertical', fontsize=4)\n plt.xlabel('Labels')\n\n plt.savefig(path.join(dir_name, title))", "title": "" }, { "docid": "c59b9f1becda255acacca46b0f87d3fc", "score": "0.49208093", "text": "def load_from_csv(self, trigger=None):\n pass\n # if self.check_reset():\n # before_fn = self.try_browse(title='Select first histogram', file_type='csv(*.csv);;all (*)')\n # after_fn = self.try_browse(title='Select second histogram', file_type='csv(*.csv);;all (*)')\n # if before_fn and after_fn:\n # header = self.ih1.load(before_fn)\n # header = self.ih2.load(after_fn)\n # if self.ih1.ind > 0:\n # self.display_fit(fit_method='quick')", "title": "" }, { "docid": "d3252ad3e9c16e4eff99210e1846b9a0", "score": "0.49199745", "text": "def countLines():\n counter = 0\n\n with open('bc.processed.csv', 'r') as readfile:\n for line in readfile:\n counter += 1\n\n print counter", "title": "" }, { "docid": "488eabf55efc8481855312413890cf95", "score": "0.49188882", "text": "def get_count(csv_file):\n count = 0\n with open('customer.csv', 'r', newline='', encoding='ISO-8859-1') as myfile:\n for row in csv.reader(myfile):\n if row[6].lower() == 'active':\n count += 1\n return count", "title": "" }, { "docid": "a34ad295dcc0891ffcca293a0cae784e", "score": "0.49165088", "text": "def test_samples():\n correct = total = 0\n for filename, text in csv.reader(open('samples/samples.csv')):\n img = Image.open('samples/' + filename)\n if ocr(img) == text:\n correct += 1\n total += 1\n print ('Accuracy: %d/%d' % (correct, total))", "title": "" }, { "docid": "62c13cc5a61d3c4442e9269649933900", "score": "0.49163684", "text": "def labelCount(dataset, label): \n \n count = 0\n for entry in dataset:\n if entry[0] == label:\n count += 1\n print count, 'vectors with label', label", "title": "" }, { "docid": "61202286f52ce482f82bb81d64c49839", "score": "0.491028", "text": "def count_table_import(filepath):\n log.info('Starting to import OTU count table')\n try:\n tp = CSVParser(filepath, mode='r', delimiter='\\t')\n counts = tables.models.otu_counts\n sample_info = tables.models.sample_info\n sample_names = tp.get_fieldnames()[1:] # first field is OTUId\n log.info(f'{tp.filename} sample list: {sample_names}')\n with otudb.transaction():\n row_count = 0\n for row in tp.load_data():\n row_count+=1\n log.info('Importing: %s',row['OTUId'])\n sample_id = 0 #TODO implement sample_info imports for relationship\n for sample in sample_names:\n # sample_id = sample_info.get('sample_name' == sample).sample_id\n sample_id += 1\n if sample_id:\n log.info('Importing: %s of %s with %s%%',row['OTUId'],sample,row[sample])\n res = counts.create(otu_id=row['OTUId'],\n sample_id=sample_id,\n percent_abundance=row[sample]\n )\n else:\n log.info('\"%s\" not found in sample_info table.',sample)\n log.info('Completed importing %s rows from: %s', row_count, filepath)\n except Exception as e:\n log.error(f'Whoops while importing {filepath}.')\n raise e", "title": "" }, { "docid": "f46031c0130711afa291c5f86521c657", "score": "0.49015343", "text": "def __init__(self, input_data_csv_file, input_label_csv_file, use_random_graph=False, use_knn_graph=False):\n\n # Read the data csv\n pd_df = pd.read_csv(input_data_csv_file, header=None)\n self.data = pd_df.to_numpy(dtype=np.float32)\n\n # Read the label csv\n pd_df = pd.read_csv(input_label_csv_file, header=None)\n self.label = pd_df.to_numpy(dtype=np.int8)\n\n self.num_healthy_samps = np.count_nonzero(self.label == 0)\n self.num_unhealthy_samps = np.count_nonzero(self.label != 0)\n\n for idx, lab in enumerate(self.label):\n if lab != 0:\n self.label[idx] = 1\n\n # Set other attributes\n self.use_knn_graph = use_knn_graph\n self.use_random_graph = use_random_graph", "title": "" }, { "docid": "a341736fc05da4dc6035cb77672bbac1", "score": "0.48895937", "text": "def generate_total_counts_figure(max_samples, roinfo):\n\n total_counts_fig = bar_plot(\n roinfo.total_counts(),\n x=\"label_id\",\n y=\"count\",\n x_title=\"Label Id\",\n y_title=\"Count\",\n title=\"Total Object Count in Dataset\",\n hover_name=\"label_name\",\n )\n return total_counts_fig", "title": "" }, { "docid": "b75f25ca9002f8b311100e23e192fc1c", "score": "0.48818454", "text": "def load_breakpoint_count_data(files):\n breakpoint_count_data = []\n\n for sample_id, library_id, filepath in files:\n csv_input = scgenome.csvutils.CsvInput(filepath)\n data = csv_input.read_csv()\n\n if library_id is not None:\n data['library_id'] = pd.Series([library_id], dtype=\"category\")\n\n if sample_id is not None:\n data['sample_id'] = pd.Series([sample_id], dtype=\"category\")\n\n breakpoint_count_data.append(data)\n\n breakpoint_count_data = pd.concat(breakpoint_count_data, ignore_index=True)\n breakpoint_count_data = breakpoint_count_data.rename(columns={'cluster_id': 'prediction_id'})\n\n # KLUDGE: normal reads are not filtered properly, filter by their prefix, and having '-' in cell id\n breakpoint_count_data = breakpoint_count_data.loc[~breakpoint_count_data['cell_id'].str.startswith('HS'), :]\n breakpoint_count_data = breakpoint_count_data.loc[breakpoint_count_data['cell_id'].apply(lambda a: '-' in a), :]\n\n return breakpoint_count_data", "title": "" }, { "docid": "35e196ad2238669c6dec8d7f47f1b78e", "score": "0.48768076", "text": "def feature_label_split(file1):\n unused_features = [\"casual\", \"registered\"]\n df = pd.read_csv(file1) # read data to pandas dataframe\n try:\n labels = df[\"count\"].values # get labels\n df.drop(\"count\", axis = 1, inplace = True)\n except KeyError:\n labels = []\n for feature in unused_features:\n try:\n df.drop(feature, axis = 1, inplace = True)\n except ValueError:\n continue\n features = df.iloc[:, 1:].values # get features\n return (features, labels)", "title": "" }, { "docid": "949bb4027d889ad14275e0d16320a709", "score": "0.48726764", "text": "def count_levels(self, path, headers):\n c = collections.Counter()\n\n #Save the order in which the counter combinations created\n self.ordered_headers=headers\n self.total_levels=len(headers)\n\n #build an array of combination tuple possible values\n # e.g. self.combo_sets[2]=(triangle,red),(triangle,green),(square,red)...\n # and self.combo_sets[3]=(triangle,red,fuzzy),(triangle,red,smooth)...\n for level in range(2,self.total_levels+1):\n self.combo_sets[level]=set([])\n\n with open(path) as csv_file:\n self.my_reader = csv.DictReader(csv_file)\n\n # validate that input fields match those read from 1st line of CSV:\n for head in headers:\n assert head in self.my_reader.fieldnames, \"{0:s} not in CSV fields\"\n # save the header name and create an empty set to hold its values\n self.value_sets[head]=set([])\n\n # now loop through CSV file\n for row in self.my_reader:\n combo = None\n #print row <-fine for debug, but not with BIG file\n # start with level = 1\n for level, head in enumerate(headers,1):\n # thing is the value at that column\n thing = row[head]\n # add it to the set of possible values for that column\n self.value_sets[head].add(thing)\n # increment the count of that value for that column\n c[thing] += 1\n\n #build the combo string for counting combinations of values\n if level == 1:\n combo = (thing,) #start a tuple\n else:\n combo += (thing,)\n #combo = '{0:s}{1:s}{2:s}'.format(combo,self.split_char,thing)\n c[combo] += 1\n\n self.combo_sets[level].add(combo)\n\n # set the class variable so we can have this counter built-in\n self.my_counter = c\n return c", "title": "" }, { "docid": "9f91a812ee35fa9a5ed0ca1ffb75b4c5", "score": "0.48691896", "text": "def load_from_csv(\n sample_table,\n peptide_table,\n counts_matrix,\n output,\n):\n\n ds = utils.dataset_from_csv(counts_matrix, peptide_table, sample_table)\n utils.dump(ds, output)", "title": "" }, { "docid": "700e291e0cc2b40cac0bebce07b82c40", "score": "0.48669213", "text": "def get_csv_feature_names(csv_file):\n df = pd.read_csv(csv_file, usecols=[0, 1, 3])\n df.sort_values(by='sumImportance', ascending=False, inplace=True)\n df['rank'] = range(len(df))\n df['rank'] += 1\n return df", "title": "" }, { "docid": "099678b2a7dfa61e01cca40582f40e62", "score": "0.48664713", "text": "def plot_unique(batch: str, data_type: str, out_path: str, subset1: str, subset2: str, targets: List[str],\n threshold: float, unique_count: pd.DataFrame):\n colors_per_target = make_plotly_colors(targets)\n fig = px.histogram(unique_count, x=\"Target\", y=\"Count\", color=\"Target\",\n color_discrete_map=colors_per_target)\n fig.update_xaxes(categoryorder=\"total descending\",\n tickangle=45,\n tickfont=dict(\n size=7.5,\n color='black'\n ))\n fig.update_layout(title_text=f'Count of unique molecules within {threshold} Tc threshold',\n xaxis_title_text='Target',\n yaxis_title_text='Count',\n autosize=False,\n width=1000,\n height=800,\n margin=dict(l=50, r=50, b=50, t=50, pad=10),\n legend={'traceorder': 'normal'})\n if data_type == 'scaffolds':\n save_plotly_fig(\"Unique\", \"Histograms\", out_path, plot=fig, data_type=data_type, batch=batch,\n threshold=threshold, subset1=subset1, subset2=subset2, bias='analogue_bias')\n elif data_type == 'molecules':\n save_plotly_fig(\"Unique\", \"Histograms\", out_path, plot=fig, data_type=data_type, batch=batch,\n threshold=threshold, subset1=subset1, subset2=subset2, bias='domain_bias')", "title": "" }, { "docid": "9118da39650ecca48c3f967e5a623a50", "score": "0.48602515", "text": "def generate_csv(self, output_csv_directory):\n label_checker = np.zeros(2059906)\n counter = 0\n\n # Navigating into the Idiap file structure\n for directory in os.listdir(self.database_path):\n output_csv_file = os.path.join(\n output_csv_directory, directory + \".csv\"\n )\n\n with open(output_csv_file, \"w\") as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=\",\")\n\n print(f\"Processing {directory}\")\n rows = []\n path = os.path.join(self.database_path, directory)\n if not os.path.isdir(path):\n continue\n for sub_directory in os.listdir(path):\n sub_path = os.path.join(path, sub_directory)\n label_checker[counter] = 1\n\n if not os.path.isdir(sub_path):\n continue\n\n for file in os.listdir(sub_path):\n relative_path = os.path.join(\n directory, sub_directory, file\n )\n rows.append(\n [\n str(counter).zfill(7),\n relative_path.rstrip(\"\\n\").rjust(42) + \"\\n\",\n ]\n )\n # csv_writer.writerow([label, relative_path])\n counter += 1\n csv_writer.writerows(rows)\n\n # print(counter)\n # Checking if all labels were taken\n zero_labels = np.where(label_checker == 0)[0]\n if zero_labels.shape[0] > 0:\n print(zero_labels)", "title": "" }, { "docid": "aeb53679ef01394d39e79fafddfbc6ad", "score": "0.48543605", "text": "def clean_article_counts(filename, categories, subcategories = None):\n\n df = pd.read_csv(filename, header = 0)\n\n categories_series = df['URL'].str\\\n .extract(r'https://www.dr.dk//(\\w+/?\\w+)/')[0]\\\n .apply(lambda x: x.split('/') if '/' in x else [x, np.nan]) #apply regex to extract categories\n \n df['Category'] = categories_series.str[0].str.lower()\n df['Subcategory'] = categories_series.str[1].str.lower()\n\n df = pd.concat([df[df['Category'] == category] for category in categories]) #Remove categories not in list\n\n if subcategories: #remove subcategories not in list\n df = pd.concat([df[df['Subcategory'] == subcategory].copy() for subcategory in subcategories])\n\n df['Date'] = pd.to_datetime(df['Date'])\n\n df = df[df['Date'].dt.year >= 2010]\n\n df.to_csv('cleaned_' + filename, header = True, index = False)", "title": "" }, { "docid": "00ec680a08e4606f6bd88fc98ffc1f24", "score": "0.48479638", "text": "def process_Data(data_file):\n\n read_file = csv.reader(data_file)\n line_count = 0\n img_count = 0\n\n chrome = ['Google Chrome', 0]\n ie = ['Internet Explorer', 0]\n safari = ['Safari', 0]\n fox = ['Firefox', 0]\n for line in read_file:\n line_count += 1\n if re.search(\"firefox\", line[2], re.I):\n fox[1] += 1\n elif re.search(r\"MSIE\", line[2]):\n ie[1] += 1\n elif re.search(r\"Chrome\", line[2]):\n chrome[1] += 1\n elif re.search(r\"Safari\", line[2]) and not re.search(\"Chrome\", line[2]):\n safari[1] += 1\n if re.search(r\"jpe?g|JPE?G|png|PNG|gif|GIF\", line[0]):\n img_count += 1\n\n img_hit_pct = (float(img_count) / line_count) * 100\n\n browser_count = [chrome, ie, safari, fox]\n\n high_browser = 0\n top_brow = ' '\n for b in browser_count:\n if b[1] > high_browser:\n high_browser = b[1]\n top_brow = b[0]\n else:\n continue\n\n msg = ('There were {} page hits today.'\n '\\nImage requests account for {}% of '\n 'hits. \\n{} has the most hits at {}.').format(line_count,\n img_hit_pct,\n top_brow,\n high_browser)\n print msg", "title": "" }, { "docid": "4a48d7fa5a54bf29b873f0ca78f649c8", "score": "0.484766", "text": "def barchart(filename):\r\n csv = open(filename)\r\n data = DictReader(csv)#standard csv Python module to read the data \r\n \r\n xlabel = data.fieldnames[0]#column headers\r\n print (xlabel)\r\n rows = [d for d in data]#other data\r\n print (rows)\r\n\t#find the extremes of the data\r\n maximum = max([float(r[n]) for n in data.fieldnames[1:] for r in rows])\r\n minimum = min([float(r[n]) for n in data.fieldnames[1:] for r in rows])\r\n print (maximum)\r\n print (minimum)\r\n \r\n for x,row in enumerate(rows):\r\n lastx=x\r\n label(row[xlabel],(x,10,0))#draw row label\r\n for y,ylabel in enumerate(data.fieldnames[1:]):\r\n bar(10.0*(float(row[ylabel])-minimum)/maximum,(x,0,y+1))#draw the bar for each column of row x\r\n x = lastx+1\r\n for y,ylabel in enumerate(data.fieldnames[1:]):\r\n label(ylabel,(x,0,y+1),'x')#draw column label\r\n return (lastx/2.0,6.0,0.0)#return the center position of the chart\r", "title": "" }, { "docid": "f658ba768b96ac9b6170975d6a731167", "score": "0.48455933", "text": "def plot_categorical_columns(df):\r\n categorical = df.select_dtypes(exclude = 'number')\r\n cat = list(categorical.columns)\r\n\r\n for i in cat:\r\n plt.figure(figsize = (16,14))\r\n sns.countplot(categorical[i])\r\n plt.xticks(rotation = 90)\r\n plt.show()", "title": "" }, { "docid": "df1279ca9122958b404f7c760d2d7d0c", "score": "0.4843653", "text": "def count_csv_rows(csv_file_name):\n\n with open(csv_file_name) as csvfile:\n reader = csv.DictReader(csvfile)\n i = 0\n for row in reader:\n i += 1\n\n return i", "title": "" }, { "docid": "d3ef7bbb196ee6251511e215cf4ef94f", "score": "0.48427463", "text": "def _load_counters(self,labeled_featuresets):\n\t\tself._label_freqdist = FreqDist() \n\t\tself._feature_freqdist = defaultdict(FreqDist) \n\t\tself._feature_values = defaultdict(set) \n\t\tself._fnames = set() \n \n\t\t# Count up how many times each feature value occured, given \n\t\t# the label and featurename. \n\t\tfor featureset, label in labeled_featuresets: \n\t\t\tself._label_freqdist.inc(label) \n\t\t\tfor fname, fval in featureset.items(): \n\t\t\t\t# Increment freq(fval|label, fname) \n\t\t\t\tself._feature_freqdist[label, fname].inc(fval) \n\t\t\t\t# Record that fname can take the value fval. \n\t\t\t\tself._feature_values[fname].add(fval) \n\t\t\t\t# Keep a list of all feature names. \n\t\t\t\tself._fnames.add(fname) \n\n\t\t# If a feature didn't have a value given for an instance, then \n\t\t# we assume that it gets the implicit value 'None.' This loop \n\t\t# counts up the number of 'missing' feature values for each \n\t\t# (label,fname) pair, and increments the count of the fval \n\t\t# 'None' by that amount. \n\t\tfor label in self._label_freqdist: \n\t\t\tnum_samples = self._label_freqdist[label] \n\t\t\tfor fname in self._fnames: \n\t\t\t\tcount = self._feature_freqdist[label, fname].N() \n\t\t\t\tself._feature_freqdist[label, fname].inc(None, num_samples-count) \n\t\t\t\tself._feature_values[fname].add(None)", "title": "" }, { "docid": "68f0019e64ca053f18878a2cb6ec50f3", "score": "0.48276323", "text": "def loadCategorias(catalog):\n catefile = cf.data_dir + 'category-id.csv'\n catfil = open(catefile)\n csvin = csv.reader(catfil, delimiter='\\t')\n data = [row for row in csvin]\n header = data.pop(0)\n for row in data:\n rowdict = dict(zip(header, row))\n model.addCategorias(catalog, rowdict)", "title": "" }, { "docid": "0df48a0c4ec028da944ea82b9a745904", "score": "0.48218036", "text": "def csv_counties(filename, bot_filtered=True):\n db = twitterproj.connect()\n template = \"{},{},{}\"\n with io.open(filename, 'w', encoding='utf-8') as f:\n f.write(\"# county name, state_fips, county_fips\\n\")\n lines = []\n for state in twitterproj.hashtag_counts__counties(db, bot_filtered=bot_filtered):\n line = template.format(state['name'],\n state['state_fips'],\n state['county_fips'])\n lines.append(line)\n f.write('\\n'.join(lines))", "title": "" }, { "docid": "f39b4b7418692e6bc925c0347bf57038", "score": "0.48213053", "text": "def create_histograms(self, categories, titles):\n colors = {}\n for i in range(len(categories)):\n data = categories[i]\n if isinstance(data, pd.core.series.Series):\n data = data[data.isnull() == False].value_counts(sort=False)\n labels = [name for name in data.keys()]\n else:\n data, no_bins = data\n data, bin_edges = np.histogram(data, bins=no_bins)\n bin_edges = (bin_edges[:-1] + bin_edges[1:]) / 2\n labels = bin_edges.astype(np.int)\n\n if len(colors) != len(labels):\n colors = dict(zip(labels, self._get_color(len(labels))))\n\n sp = self._get_next_plot()\n plt.bar(labels, data, color=[colors[l] for l in labels])\n plt.xticks(labels, rotation=90)\n sp.set_title(titles[i])\n plt.tight_layout()\n\n return self", "title": "" }, { "docid": "6a82df5b52f347804c4c87e6257d0596", "score": "0.4818657", "text": "def plot_categorical(df, size_inches=(5, 7), num_cols=None):\n num_features = df.select_dtypes(include=['O']).shape[1]\n num_rows = math.ceil(num_features / num_cols)\n fig, ax = plt.subplots()\n fig.set_size_inches(size_inches)\n for i, column in enumerate(df.select_dtypes(include=['O'])):\n plt.subplot(num_rows, num_cols, i + 1)\n df[column].value_counts().plot.barh()\n plt.title(column)\n return fig, ax", "title": "" }, { "docid": "2467c7989202a16cf5e21e9caddd3a9e", "score": "0.48184267", "text": "def _read_csv(path):\n\n\tlabels = []\n\n\twith open(path, newline='') as csvfile:\n\t labelsreader = csv.reader(csvfile)\n\t for row in labelsreader:\n\t labels.append(row[1])\n\n\treturn labels", "title": "" }, { "docid": "44104ee7f3a038f0f80ebc2de72d81f3", "score": "0.48142344", "text": "def counts(self,c, data):\n num_label = 0\n for x in data:\n if x.label == c :\n num_label += 1\n return num_label", "title": "" }, { "docid": "3aaaf102cc7e28d9b55be6c98138a570", "score": "0.48027056", "text": "def csv_to_graph(path):\n # parse csv file\n\n pass", "title": "" }, { "docid": "443758ae3a104b1a1194d27784c2f676", "score": "0.48017246", "text": "def prepare_file(filename):\n #filename = 'temp_data.csv'\n data = pd.read_csv(filename)\n fts = data['0'].unique() \n gdata = data.groupby('0') \n grp = {}\n for ft in fts:\n grp[ft] = gdata.get_group(ft).values \n #html_grp = gdata.get_group(ft[0]) \n #plain_grp = gdata.get_group(ft[2]) \n #cnt_stats = gdata['1'].count()\n #cnt_stats.sort_values(inplace=True)\n \n # create filetype to index dictionary\n ft_to_idx = {}\n idx_to_ft = {}\n for idx, ft in enumerate(fts):\n ft_to_idx[ft] = idx\n idx_to_ft[idx] = ft\n\n nclasses = len(fts)\n #assert nclasses == 93\n print ('n classes: ',nclasses)\n\n return ft_to_idx, idx_to_ft, nclasses, grp", "title": "" }, { "docid": "4616934d1e5a009fef343a086f9ee3e4", "score": "0.4801162", "text": "def test_piechart_plot_default(classifier_count_file: Path, tmp_path: Path):\n html_outfile = tmp_path / \"out.html\"\n plot_cog_classifier_piechart(\n df=pd.read_csv(classifier_count_file, sep=\"\\t\"),\n html_outfile=html_outfile,\n fig_width=520,\n fig_height=340,\n show_letter=True,\n sort=False,\n )\n assert html_outfile.exists()", "title": "" }, { "docid": "cdee62107703ba46666526ae8780289c", "score": "0.47953004", "text": "def hist_counts(df_act, y_scale=None, idle=False, figsize=(9,3)):\n assert y_scale in [None, 'log']\n df_act = df_act.copy()\n\n col_label = 'occurence'\n title ='Activity occurrences'\n xlabel = 'counts'\n\n if idle:\n df_act = add_idle(df_act)\n df = activities_count(df_act)\n df.reset_index(level=0, inplace=True)\n df = df.sort_values(by=['occurence'], axis=0)\n \n # plot\n fig, ax = plt.subplots(figsize=figsize)\n plt.title(title)\n plt.xlabel(xlabel)\n ax.barh(df['activity'], df['occurence'])\n if y_scale == 'log':\n ax.set_xscale('log')\n return fig", "title": "" }, { "docid": "fc308348d7c974800c52fafb335ab8e9", "score": "0.47916043", "text": "def samples_per_categories(argument):\n if isinstance(argument, basestring):\n samples_per_cat = []\n labels = folder_content1(argument)\n for i in labels:\n samples_per_cat.append(len(folder_content1(i, False)))\n else:\n samples_per_cat = np.bincount(argument).tolist()\n return samples_per_cat", "title": "" }, { "docid": "3e9ab13d8b8f10cb70f5cf2a4963273d", "score": "0.4773678", "text": "def create_count_plot(input_data, x_label, y_label, columns_start, columns_end, rotation=70, max_column=\"max_value\"):\n\n count_plot = sb.countplot(x=max_column,\n data=input_data,\n order=input_data.max_value.value_counts().iloc[columns_start:columns_end].index)\n\n count_plot.set_xticklabels(count_plot.get_xticklabels(), rotation=rotation)\n count_plot.set(xlabel=x_label, ylabel=y_label)\n\n return count_plot.get_figure()", "title": "" }, { "docid": "fd60a59a21c12433ee86281677c75e7a", "score": "0.4772602", "text": "def import_csv(file_path):\n print 'Loading file: %s'%file_path\n\n X = []\n y = []\n with open(file_path, 'rb') as csvfile:\n reader = csv.reader(csvfile)\n next(reader, None) # Skip the header row.\n for sample in reader:\n # Get the class.\n y.append(sample[0])\n\n # Turn inputs into floats and then reshape the data into the 41x41 matrix that we want for the convnet.\n X.append(np.array(sample[2:]).astype(np.float32).reshape(41,41)) # Skip the class and ID columns.\n\n return np.array(X).astype(int),np.array(y)", "title": "" }, { "docid": "363140fae2bb1ed2a5c69d32c45fb7ec", "score": "0.4765159", "text": "def print_dists( inputFile, filelabel=\"\"):\n import matplotlib.pyplot as plt\n\n data = pd.read_csv(inputFile, sep='\\t', header=None)\n data.columns = ['tokens', 'tags']\n\n dir = \"../plots/\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n fig = plt.figure()\n dist = data['tags'].value_counts(normalize=False)\n dist.index = [x[2:] if x != 'O' else x for x in dist.index]\n dist = dist.groupby(level=0).sum().sort_values(ascending=False)\n plt.xlabel(\"Concepts\")\n plt.ylabel(\"Frequency\")\n dist[1:].plot(kind='bar')\n plt.tight_layout()\n plt.show()\n fig.savefig(dir + filelabel + \"_tags_dist\" + \".pdf\", format='pdf')\n\n fig = plt.figure()\n dist = data['tokens'].value_counts(normalize=False)\n plt.xlabel(\"Tokens\")\n plt.ylabel(\"Frequency\")\n dist[:30].plot(kind='bar')\n plt.tight_layout()\n # plt.show()\n fig.savefig(dir + filelabel + \"_tokens_dist\" + \".pdf\", format='pdf')", "title": "" }, { "docid": "f2e00636ddb057003ef678090b8f6004", "score": "0.47617894", "text": "def labels():\r\n f = file(labels_file_name)\r\n lprofile = collections.defaultdict(set)\r\n for i, line in enumerate(f):\r\n labels = line.strip().split(\",\")\r\n for x in labels:\r\n lprofile[x].add(i)\r\n return lprofile.keys()", "title": "" }, { "docid": "968be38ea39c9248fb786aff8a9bd283", "score": "0.47468013", "text": "def simple_feature_histograms_PT(targets: List[str], batch: str, subset: str, dimension: str,\n out_path: str) -> None:\n calculate_data_for_plot(f\"feats_{dimension}\", targets=targets, batch=batch, subset=subset, out_path=out_path)\n\n dataframe_to_plot = concat_data_for_plot(f\"feats_{dimension}\", out_path,\n targets=targets, batch=batch, subset=subset)\n\n colors_list = make_matplotlib_colors(targets)\n feats = list(dataframe_to_plot.columns)\n\n mins = []\n maxes = []\n for feat in feats:\n mins.append(min(dataframe_to_plot[feat]))\n maxes.append(max(dataframe_to_plot[feat]))\n\n for target in range(len(targets)):\n if dimension == \"2D\":\n fig, axs = plt.subplots(7, 4, figsize=(20, 20), sharey='all', tight_layout=True)\n elif dimension == \"3D\":\n fig, axs = plt.subplots(3, 4, figsize=(17, 10), sharey='all', tight_layout=True)\n else:\n raise Exception(\"Wrong dimension\")\n axs = axs.ravel()\n for feat in range(len(feats)):\n xmin = mins[feat]\n xmax = maxes[feat]\n axs[feat].hist(dataframe_to_plot.loc[targets[target]][dataframe_to_plot.columns[feat]],\n color=colors_list[target], range=(xmin, xmax))\n axs[feat].set_title(f'Count of {dataframe_to_plot.columns[feat]} in target {targets[target]}', fontsize=12)\n axs[feat].set_xlabel('Values', fontsize=8)\n axs[feat].set_ylabel('Count', fontsize=8)\n axs[feat].xaxis.set_tick_params(which='both', labelbottom=True)\n\n if dimension == \"2D\":\n fig.delaxes(axs[26])\n fig.delaxes(axs[27])\n\n save_matplotlib_figs('Histograms_PT', out_path,\n batch=batch, subset=subset, dimension=dimension, base=targets[target])", "title": "" }, { "docid": "d41711de0173be82521c7ee3adfd7d0a", "score": "0.47403926", "text": "def plot_all_train_data(file_name):\n\n chunk_size = 0.5*10**8\n title = \"chunk \"\n i = 0\n\n for chunk in pd.read_csv(file_name, chunksize=chunk_size):\n plot_train_dataframe(chunk, title+str(i))\n i += 1\n plt.show()", "title": "" }, { "docid": "110c91cf6e62e936d70eaa0621ffa9ac", "score": "0.4731651", "text": "def load_train_csv(file_name, verbose=True):\n train_df = pd.read_csv(file_name)\n train_df.drop(columns=['elapsed_timedelta', 'width', 'height', 'plate_time', 'sample_date'], inplace=True)\n if verbose:\n print(\"{} train images - {} annotations\".format(train_df['id'].nunique(), len(train_df['id'])))\n print(\"Columns : {}\".format(train_df.columns.values))\n return train_df", "title": "" }, { "docid": "733398d3ce3a364ec8a960f9a3b9b527", "score": "0.4731573", "text": "def load_counts(self):\n with codecs.open(self.filename, encoding='utf-8') as fid:\n self._word_counts = Counter(\n {word: int(count)\n for count, word in (line.split() for line in fid)})\n self.setup_count_sum()", "title": "" }, { "docid": "b32a82966dd629dfbf0bd8637b831eb2", "score": "0.47301903", "text": "def CountandCreate(x, output_dir):\n import pandas as pd\n items = list(set(x))\n countings = []\n for value in items: \n countings.append(x.count(value))\n countings_df = pd.DataFrame({'model': items, 'countings': countings})\n \n output_name = output / \"counts.csv\" \n countings_df.to_csv (output_name, header = True)", "title": "" }, { "docid": "2046df894dca2a33d593e45c630bec51", "score": "0.4725296", "text": "def median_freq_balancing(dataloader, num_classes):\n class_count = 0\n total = 0\n for _, label in dataloader:\n label = label.cpu().numpy()\n\n # Flatten label\n flat_label = label.flatten()\n\n # Sum up the class frequencies\n bincount = np.bincount(flat_label, minlength=num_classes)\n\n # Create of mask of classes that exist in the label\n mask = bincount > 0\n # Multiply the mask by the pixel count. The resulting array has\n # one element for each class. The value is either 0 (if the class\n # does not exist in the label) or equal to the pixel count (if\n # the class exists in the label)\n total += mask * flat_label.size\n\n # Sum up the number of pixels found for each class\n class_count += bincount\n\n # Compute the frequency and its median\n freq = class_count / total\n med = np.median(freq)\n\n return med / freq", "title": "" }, { "docid": "8027aceac79ac131d56b2c062289d17f", "score": "0.46983284", "text": "def visualize_type():\n data_file = parse(MY_FILE, \",\")\n\n counter = Counter(item[\"Category\"] for item in data_file)\n\n lables = tuple(counter.keys())\n\n xlocations = na.array(range(len(labels))) = 0.5\n\n width = 0.5\n\n plt.bar(xlocations, counter.values(), width = width)\n\n plt.xticks(xlocations + width / 2, labels, rotation = 90)\n\n plt.subplots_adjust(bottom = 0.4)\n\n plt.rcParams[\"figure.figsize\"] = 12 , 8\n\n plt.savefig(\"Type.png\")\n\n plt.clf()", "title": "" } ]
c5a655b906dda30ad96710b528390928
project Vector onto TransCom regions
[ { "docid": "e227b2e3424294691783bd382df97a96", "score": "0.0", "text": "def vector2tc(self, vectordata, cov=False):\n\n M = self.tcmatrix\n if cov:\n return np.dot(np.transpose(M), np.dot(vectordata, M))\n else:\n return np.dot(vectordata.squeeze(), M)", "title": "" } ]
[ { "docid": "8ad5bad2918b718686e9f9f86502b8d0", "score": "0.66207576", "text": "def translate(self, vec):\n vec = toKiCADPoint(vec)\n for drawing in self.board.GetDrawings():\n drawing.Move(vec)\n for footprint in self.board.GetFootprints():\n footprint.Move(vec)\n for track in self.board.GetTracks():\n track.Move(vec)\n for zone in self.board.Zones():\n zone.Move(vec)\n for substrate in self.substrates:\n substrate.translate(vec)\n self.boardSubstrate.translate(vec)\n self.backboneLines = [shapely.affinity.translate(bline, vec[0], vec[1])\n for bline in self.backboneLines]\n self.hVCuts = [c + vec[1] for c in self.hVCuts]\n self.vVCuts = [c + vec[0] for c in self.vVCuts]\n for c in self.vVCuts:\n c += vec[1]\n self.setAuxiliaryOrigin(self.getAuxiliaryOrigin() + vec)\n self.setGridOrigin(self.getGridOrigin() + vec)\n for drcE in self.drcExclusions:\n drcE.position += vec", "title": "" }, { "docid": "6fe28d43b6416dc5c29332d7955ffbec", "score": "0.63097304", "text": "def transform_vector(self,uin,vin,lons,lats,nx,ny,returnxy=False,preserve_magnitude=True,**kwargs):\n lonsout, latsout, x, y = self.makegrid(nx,ny,returnxy=True)\n # interpolate to map projection coordinates.\n uin = interp(uin,lons,lats,lonsout,latsout,**kwargs)\n vin = interp(vin,lons,lats,lonsout,latsout,**kwargs)\n if preserve_magnitude:\n # compute original magnitude.\n mag = pylab.sqrt(uin**2+vin**2)\n rad2dg = 180./math.pi\n tiny = 1.e-5\n delta = 0.1\n coslats = pylab.cos(latsout/rad2dg)\n # use dx/dlongitude, dx/dlatitude, dy/dlongitude and dy/dlatitude\n # to transform vector to map projection coordinates.\n # dlongitude is delta degrees at equator, dlatitude is delta degrees.\n xn,yn = self(lonsout,pylab.where(latsout+delta<90.,latsout+delta,latsout))\n # at poles, derivs w/respect to longitude will be zero.\n lonse = pylab.where(coslats>tiny,lonsout+(delta/coslats),lonsout)\n xe,ye = self(lonse,latsout)\n uout = uin*(xe-x)*(coslats/delta) + vin*(xn-x)/delta\n vout = uin*(ye-y)*(coslats/delta) + vin*(yn-y)/delta\n # make sure uout, vout not too small (quiver will raise\n # an exception when trying to rescale vectors).\n uout = pylab.where(pylab.fabs(uout)<tiny,tiny,uout)\n vout = pylab.where(pylab.fabs(vout)<tiny,tiny,vout)\n # fix units. \n if self.projection != 'cyl':\n uout = uout*rad2dg/self.rsphere\n vout = vout*rad2dg/self.rsphere\n # rescale magnitude.\n if preserve_magnitude:\n magout = pylab.sqrt(uout**2+vout**2)\n uout = uout*mag/magout\n vout = vout*mag/magout\n if returnxy:\n return uout,vout,x,y\n else:\n return uout,vout", "title": "" }, { "docid": "a08799fdf201a4a1d97ea8d2139c0718", "score": "0.6139174", "text": "def VecBaseTrafoSky(vec, kvec, qRoh, qRohErw, Q1, Q2):\n \n vecp = vec.reshape((-1, 3)) # casts vec into mx3\n \n if len(vecp) > len(qRohErw):\n print \"Error: vec > Number of BZ in qRohErw\"\n return None\n \n imap = indexMap(kvec, qRoh, qRohErw, Q1, Q2)\n IndexPosList = imap[\"IndexPosList\"]\n IndexNewPosList = imap[\"IndexNewPosList\"]\n\n# kBZ = kvec - q(imap[\"minpos\"], qRoh, qRohErw, Q1, Q2)\n \n nv = np.zeros((len(qRohErw), 3), np.complex)\n for l in xrange(len(qRohErw)):\n if IndexNewPosList[l][0] != None and IndexPosList[l, 0] < len(vecp): # first most important line\n nv[IndexNewPosList[l][0]] = vecp[IndexPosList[l, 0]] # second most important line\n \n return nv", "title": "" }, { "docid": "0ce04c3d72b0c8698cfd6e086e974516", "score": "0.61273146", "text": "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "title": "" }, { "docid": "8f7e8d49701102e23157222cdc3a94a9", "score": "0.6126651", "text": "def GetVectorComponents(self):\n ...", "title": "" }, { "docid": "1416b6774aaf8f49fdd87faf7fe5febd", "score": "0.6086602", "text": "def GetVector(self):\n ...", "title": "" }, { "docid": "18323830291e893f38a3d4c19afdd184", "score": "0.6054801", "text": "def translate(self, vector):\n self.start.translate(vector)\n self.end.translate(vector)", "title": "" }, { "docid": "dbea37288ba88b342fca4d35dc3d79a8", "score": "0.60254073", "text": "def VecBaseTrafoSky_new(vec, kvec, qRoh, qRohErw, Q1, Q2):\n \n vecp = vec.reshape((-1, 3)) # casts vec into mx3\n \n if len(vecp) > len(qRohErw):\n print \"Error: vec > Number of BZ in qRohErw\"\n return None\n \n imap = indexMap(kvec, qRoh, qRohErw, Q1, Q2)\n IndexPosList = imap[\"IndexPosList\"]\n IndexNewPosList = imap[\"IndexNewPosList\"]\n\n# kBZ = kvec - q(imap[\"minpos\"], qRoh, qRohErw, Q1, Q2)\n \n nv = np.zeros((len(qRohErw), 3), np.complex)\n #--------------------------------------------------------------------------\n def multi_order(INPL, sortfrom, sortin):\n IPL = range(len(qRohErw))\n for l in xrange(len(qRohErw)):\n if IndexNewPosList[l][0] != None and IndexPosList[l, 0] < len(vecp): # first most important line\n nv[IndexNewPosList[l][0]] = vecp[IndexPosList[l, 0]] # second most important line\n \n return nv", "title": "" }, { "docid": "c584575db59bbbfc2e4ed8aeddb95614", "score": "0.60223234", "text": "def project(self, vector):\n # The vector cannot be the zero vector.\n if vector == hou.Vector3():\n raise hou.OperationFailed(\"Supplied vector must be non-zero.\")\n\n return vector.normalized() * self.componentAlong(vector)", "title": "" }, { "docid": "740b5de1c4066cc3f016ab312c4f1864", "score": "0.5914351", "text": "def projective_transform(self,X):\r\n projected = np.array([])\r\n\r\n x = X[:,0]/X[:,2]\r\n y = X[:,1]/X[:,2]\r\n\r\n u = self.f * x + self.c[0]/2\r\n v = self.f * y + self.c[1]/2\r\n\r\n u = np.hstack(u)\r\n v = np.hstack(v)\r\n return u,v", "title": "" }, { "docid": "f129c0f88277e1ed6fb1375dd04c3396", "score": "0.5899147", "text": "def translate(self, element, trans_vector):\n matrix = AllplanGeo.Matrix3D()\n matrix.Translate(trans_vector)\n return AllplanGeo.Transform(element, matrix)", "title": "" }, { "docid": "13b842bb4259fd8d80489994ac846ae1", "score": "0.5889872", "text": "def ProjectOnPlane(v, a):\n return _bullet.ProjectOnPlane(v, a)", "title": "" }, { "docid": "eadd20afdbcc8c0d1552e223963dd058", "score": "0.58785737", "text": "def create_mask_from_vector(vector_data_path, cols, rows, geo_transform,\n projection, target_value=1):\n print(vector_data_path)\n data_source = gdal.OpenEx(vector_data_path, gdal.OF_VECTOR)\n layer = data_source.GetLayer(0)\n driver = gdal.GetDriverByName('MEM') # In memory dataset\n target_ds = driver.Create('', cols, rows, 1, gdal.GDT_UInt16)\n target_ds.SetGeoTransform(geo_transform)\n target_ds.SetProjection(projection)\n gdal.RasterizeLayer(target_ds, [1], layer, burn_values=[target_value])\n return target_ds", "title": "" }, { "docid": "f5820dd1268fe67e37509b593ac17e66", "score": "0.5877732", "text": "def proj_vector(self, vector_f):\n vector_p = []\n for cell_index in range(self.mesh.get_number_of_cells()):\n vector_p.append([])\n face_orient = zip(self.mesh.get_cell(cell_index),\n self.mesh.get_cell_normal_orientation(cell_index))\n for (face_index, orient) in face_orient:\n current_centroid = self.mesh.get_face_real_centroid(face_index)\n current_normal = self.mesh.get_face_normal(face_index)*orient\n proj_value = vector_f(current_centroid).dot(current_normal)\n vector_p[-1].append(proj_value)\n\n return vector_p", "title": "" }, { "docid": "def05a09ea57f08a443d64d96d957a9b", "score": "0.586958", "text": "def apply_translation(self, vector: pint.Quantity):\n for i in range(self.num_segments):\n self._segments[i].apply_translation(vector)", "title": "" }, { "docid": "4d422d9aa0ba1fb4a95a00ccc96575b9", "score": "0.5868548", "text": "def translate(pt, vec):\n pt[0] += vec[0]\n pt[1] += vec[1]\n return pt", "title": "" }, { "docid": "ac06cf5924d60f5e9dd543ab64c7d154", "score": "0.58339363", "text": "def translate(self, vector):\n vector = np.asarray(vector)\n\n forward = np.eye(4)\n forward[:, -1][:-1] = vector\n\n reverse = np.eye(4)\n reverse[:, -1][:-1] = -vector\n\n return self.append_transform4(forward, reverse)", "title": "" }, { "docid": "0289d3445934994da3d48fad6dd7c82b", "score": "0.5826715", "text": "def vectorized_map(self) -> Sequence[Polygon]:\n raise NotImplementedError(\"vectorized_map is not implemented by default.\")", "title": "" }, { "docid": "5a47833ef83a1c257df5bbd45dcf5616", "score": "0.5813154", "text": "def vector_proj(v_input, v_target):\n v_target /= np.linalg.norm(v_target[0:2, 0])\n return np.dot(v_input[:, 0], v_target[:, 0]) * v_target", "title": "" }, { "docid": "ddd591e99e6c4576f12e6c2f8de2d04f", "score": "0.58119595", "text": "def _vector(self, pos):\n\n # fractional potential array index.\n ixf = (pos[:,0]-self.x0)/self.dx #- 1\n iyf = (pos[:,1]-self.y0)/self.dy #- 1\n izf = (pos[:,2]-self.z0)/self.dz #- 1\n\n # integer part of potential array index.\n ix = np.floor(ixf).astype(np.int)\n iy = np.floor(iyf).astype(np.int)\n iz = np.floor(izf).astype(np.int)\n\n # calculate distance of point from gridlines.\n xd = np.tile((ixf - ix), (3, 1)).T\n yd = np.tile((iyf - iy), (3, 1)).T\n zd = np.tile((izf - iz), (3, 1)).T\n\n # clamp out of range indicies to edges of array\n ix[ix<0] = 0\n iy[iy<0] = 0\n iz[iz<0] = 0\n ix[ix>self.nx] = self.nx\n iy[iy>self.ny] = self.ny\n iz[iz>self.nz] = self.nz\n\n q111 = self.data[ix , iy , iz ]\n q112 = self.data[ix , iy , iz+1]\n q121 = self.data[ix , iy+1, iz ]\n q122 = self.data[ix , iy+1, iz+1 ]\n q211 = self.data[ix+1, iy , iz ]\n q212 = self.data[ix+1, iy , iz+1]\n q221 = self.data[ix+1, iy+1, iz ]\n q222 = self.data[ix+1, iy+1, iz+1 ]\n\n i1 = (xd*q211 + (1-xd)*q111)\n i2 = (xd*q221 + (1-xd)*q121)\n j1 = (xd*q212 + (1-xd)*q112)\n j2 = (xd*q222 + (1-xd)*q122)\n\n k1 = (yd*i2 + (1-yd)*i1)\n k2 = (yd*j2 + (1-yd)*j1)\n\n return (zd*k2 + (1-zd)*k1)", "title": "" }, { "docid": "9270bec2d5257b2969984ad12ce5bfc4", "score": "0.5772105", "text": "def remove_projection_from_vector(v, w):\n return vector_subtract(v, project(v, w))", "title": "" }, { "docid": "7cf7bb6fde98632dc3169604069a123c", "score": "0.57701236", "text": "def projective_transform(self,x):\n focal = self.f\n sensor = self.c\n \n #General Coordinates\n gcx = x[0]/x[2]\n gcy = x[1]/x[2]\n \n #Pixel Locations\n pu = gcx*focal + sensor[0]/2.\n pv = gcy*focal + sensor[1]/2.\n \n return np.array([pu,pv])", "title": "" }, { "docid": "5a0166674d5f4fb6ec645eb81c1287fd", "score": "0.5727712", "text": "def vectorize(cs):\n for c in cs.collections:\n c.set_rasterized(True)", "title": "" }, { "docid": "a7b69e44bd58286cce05d50d4f7e3921", "score": "0.571815", "text": "def transform(self, v):\n if len(self.current_batch) > 0:\n self.flush()\n return np.squeeze(self.ipca.transform(np.atleast_2d(v)))", "title": "" }, { "docid": "3033af53d181f727fe759fbd54667193", "score": "0.57160413", "text": "def translation( cls, vector3D ):\n M = cls()\n M[ 0] = 1.0; M[ 4] = 0.0; M[ 8] = 0.0; M[12] = vector3D.x;\n M[ 1] = 0.0; M[ 5] = 1.0; M[ 9] = 0.0; M[13] = vector3D.y;\n M[ 2] = 0.0; M[ 6] = 0.0; M[10] = 1.0; M[14] = vector3D.z;\n M[ 3] = 0.0; M[ 7] = 0.0; M[11] = 0.0; M[15] = 1.0;\n return M", "title": "" }, { "docid": "c1794854b58b091dfac5ea53cff56a3b", "score": "0.5703361", "text": "def translate(self, vector):\n self._primitives.modeler.translate(self.id, vector)\n return self", "title": "" }, { "docid": "7ce83a602376052f94d869a2202baf66", "score": "0.5702777", "text": "def create_mask_from_vector(vector_data_path, cols, rows, geo_transform,\n projection, target_value=1):\n data_source = gdal.OpenEx(vector_data_path, gdal.OF_VECTOR)\n layer = data_source.GetLayer(0)\n driver = gdal.GetDriverByName('MEM') # In memory dataset\n target_ds = driver.Create('', cols, rows, 1, gdal.GDT_UInt16)\n target_ds.SetGeoTransform(geo_transform)\n target_ds.SetProjection(projection)\n gdal.RasterizeLayer(target_ds, [1], layer, burn_values=[target_value])\n return target_ds", "title": "" }, { "docid": "c23385157ba998454b777abc2de7e73d", "score": "0.56816894", "text": "def translate(self, vector: pint.Quantity) -> DynamicShapeSegment:\n new_segment = copy.deepcopy(self)\n return new_segment.apply_translation(vector)", "title": "" }, { "docid": "fb4caf1866a024b63ad2d659d0a0a15c", "score": "0.5671559", "text": "def apply_translation(self, vector):\n self._points = (self.points.transpose() + vector).transpose()\n return super().apply_translation(vector)", "title": "" }, { "docid": "834c336ee41df033d4d017f524117d34", "score": "0.567051", "text": "def transform(matrix, vector):\n vout = []\n for j in range(2):\n x = 0\n for i in range(4):\n x += matrix[i][j] * vector[i]\n vout += [x]\n return vout", "title": "" }, { "docid": "0157c722d10c90dac0d77c69e62ecc96", "score": "0.56701595", "text": "def transform(v, R):\n V = [0, 0, 0]\n x, y, z = v\n for r_x in R:\n V_i = []\n for a, b, c in r_x:\n p_i = a*x + b*y + c*z\n V_i.append(p_i)\n V[0] += V_i[0]\n V[1] += V_i[1]\n V[2] += V_i[2]\n return V", "title": "" }, { "docid": "0deafe1a74964b12f9a19150a86e392c", "score": "0.5667168", "text": "def object_to_world(voxels,\n euler_angles_x,\n euler_angles_y,\n translation_vector,\n target_volume_size=(128, 128, 128)):\n scale_factor = 1.82 # object to world voxel space scale factor\n\n translation_vector = tf.expand_dims(translation_vector, axis=-1)\n\n sampling_points = tf.cast(sampling_points_from_3d_grid(target_volume_size),\n tf.float32) # 128^3 X 3\n transf_matrix_x = rotation_matrix_3d.from_euler(euler_angles_x) # [B, 3, 3]\n transf_matrix_y = rotation_matrix_3d.from_euler(euler_angles_y) # [B, 3, 3]\n transf_matrix = tf.matmul(transf_matrix_x, transf_matrix_y) # [B, 3, 3]\n transf_matrix = transf_matrix*scale_factor # [B, 3, 3]\n sampling_points = tf.matmul(transf_matrix,\n tf.transpose(sampling_points)) # [B, 3, N]\n translation_vector = tf.matmul(transf_matrix, translation_vector) # [B, 3, 1]\n sampling_points = sampling_points - translation_vector\n sampling_points = tf.linalg.matrix_transpose(sampling_points)\n sampling_points = sampling_points_to_voxel_index(sampling_points,\n target_volume_size)\n sampling_points = tf.cast(sampling_points, tf.float32)\n interpolated_points = trilinear.interpolate(voxels, sampling_points)\n interpolated_voxels = tf.reshape(interpolated_points,\n [-1] + list(target_volume_size)+[4])\n\n return interpolated_voxels", "title": "" }, { "docid": "9b9d3ac183d7d37e966f936ff8ce40b8", "score": "0.5650408", "text": "def project(vx, vy, occlusion):\n p = np.zeros(vx.shape)\n div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1)\n + np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))\n div = make_continuous(div, occlusion)\n\n for k in range(50):\n p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1)\n + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0\n p = make_continuous(p, occlusion)\n\n vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))\n vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))\n\n vx = occlude(vx, occlusion)\n vy = occlude(vy, occlusion)\n return vx, vy", "title": "" }, { "docid": "262acae79f04ae0d6ed33bd448e57597", "score": "0.5625782", "text": "def unit_vectors(self):\n return self.orientation(self._uv())", "title": "" }, { "docid": "c09835ac45678445e8715b81e636ab88", "score": "0.56252146", "text": "def rangeVector(self):\n v = PISM.IceModelVec2V()\n v.create(self.grid, \"\", True, WIDE_STENCIL)\n\n # Add appropriate meta data.\n intent = \"?inverse?\" # FIXME\n desc = \"SSA velocity computed by inversion\"\n v.set_attrs(intent, \"%s%s\" % (\"X-component of the \", desc), \"m s-1\", \"\", 0)\n v.set_attrs(intent, \"%s%s\" % (\"Y-component of the \", desc), \"m s-1\", \"\", 1)\n v.metadata(0).set_string(\"glaciological_units\", \"m year-1\")\n v.metadata(1).set_string(\"glaciological_units\", \"m year-1\")\n v.write_in_glaciological_units = True\n sys = self.grid.ctx().unit_system()\n huge_vel = PISM.convert(sys, 1e6, \"m/year\", \"m/second\")\n attrs = [(\"valid_min\", -huge_vel), (\"valid_max\", huge_vel), (\"_FillValue\", 2 * huge_vel)]\n for a in attrs:\n for component in range(2):\n v.metadata(component).set_double(a[0], a[1])\n\n return PISMLocalVector(v)", "title": "" }, { "docid": "9fbdc3f062dd17c594ebaf42bc9e817c", "score": "0.562459", "text": "def _transform_in(self):\n return np.array([\n [self.left, self.bottom, 0, 1],\n [self.right, self.top, 0, 1]])", "title": "" }, { "docid": "fb297eb0547c63bfdbe1765163ea69f0", "score": "0.5622042", "text": "def vertex_vector_projections( self, vectors ):\n if len( vectors.shape ) == 1:\n vectors = np.array( [ vectors ] )\n to_return = []\n for point in self.abstract_geometry.points_wocs_ctwocc():\n for vec in vectors:\n to_return.append( cos_theta( vec, point ) )\n return np.array( to_return ).reshape(-1,vectors.shape[0])", "title": "" }, { "docid": "2d04771169fa3c15eb7a59cc9fc6b7bc", "score": "0.5620052", "text": "def main():\n \"\"\"Exemple de transformations.\"\"\"\n transformation = [1, 0, 2], [-1, -1, 0], [0, 2, 1]\n # transformation = [1, 0, 0], [0, 1, 0], [0, 0, 1]\n\n \"\"\"Création du repere.\"\"\"\n cart = repere(trans_matrix=transformation)\n\n \"\"\"Exemple de vecteurs.\"\"\"\n cart.new_vector([2, 2, -3])\n cart.new_vector([-3, -1, -2])\n # cart.new_vector([2, 2, 0])\n # cart.new_vector([0, 0, -1])\n\n \"\"\" Addition de vecteurs.\"\"\"\n cart.add_vector('V1', 'V2', 'V1,2')\n cart.add_vector('V1', 'V1,2')\n\n \"\"\"Dessine les vecteurs.\"\"\"\n # cart.draw_vector('V1', comp=True, det=True)\n # cart.draw_vector('V2', comp=True, det=True)\n cart.draw_vector('V1')\n cart.draw_vector('V2', fade=True)\n cart.draw_vector('V1,2', fade=True)\n cart.draw_vector('V1 + V1,2', det=True, comp=True, added=True, color='c')\n \n cart.show(print_info=True)", "title": "" }, { "docid": "b62acca693b3418ad2a6167f9b68c66a", "score": "0.55821776", "text": "def vectorize(raster, mergecells=True, metavars=True, bandnum=0):\n\n from ..vector.data import VectorData\n\n if mergecells:\n \n # merge/union cells of same values\n\n import shapely\n from shapely.geometry import Polygon, LineString, Point\n \n if raster.mode == \"1bit\":\n\n## import PIL.ImageMorph\n## op = PIL.ImageMorph.MorphOp(op_name=\"edge\")\n## img = raster.bands[bandnum].img\n## # extend img with 1 pixel border to allow identifying edges along the edge\n## # ...\n## \n## pixcount,outlineimg = op.apply(img)\n## outlinepix = outlineimg.load()\n## outlineimg.show()\n## \n## active = op.match(img)\n\n # difficult part is how to connect the edge pixels\n\n # Approach 1: Center-point\n # first get pixels as coordinates via geotransform\n # start on first matching pixel\n # then examine and follow first match among neighbouring pixels in clockwise direction\n # each pixel followed is converted to coordinate via geotransform and added to a list\n # keep following until ring is closed or no more neighbours have match (deadend)\n # jump to next unprocessed pixel, and repeat until all have been processed\n # after all is done, we have one or more polygons, lines in the case of deadends, or points in the case of just one match per iteration\n # if polygons, identify if any of them are holes belonging to other polygons\n\n## outvec = VectorData()\n##\n## def right(dirr):\n## xoff,yoff = dirr\n## # ...\n##\n## def neighbour(prev,cur):\n## x,y = cur\n## if not prev:\n## prev = x,y-1 # pretend came from top so will go down\n## dirr = x-prev[0],y-prev[1]\n## for _ in range(8):\n## xoff,yoff = right(dirr)\n## nx,ny = x+xoff,y+yoff\n## # neighbouring on-pixel, but cannot go back\n## if (nx,ny) not in (prev,cur) and outlinepix[nx,ny]:\n## yield nx,ny\n## dirr = right((xoff,yoff))\n##\n## parts = []\n## while active:\n## print len(active)\n## path = []\n## \n## # can only start new feat on an active non-visited cell, but can follow any on-pixel\n## prev = None\n## cur = active[0]\n## while cur:\n## #print cur\n## path.append(cur)\n## if cur in active:\n## active.remove(cur)\n## \n## connections = list(neighbour(prev, cur))\n## if not connections:\n## # reached deadend, ie nowhere else to go\n## break\n## nxt = connections[0]\n## if nxt == path[0]:\n## # circled back to start\n## path.append(nxt)\n## break\n## elif nxt in path:\n## # hit back on itself, ie infinite loop, ie selfintersection\n## path.append(nxt)\n## break\n## elif len(connections) > 2:\n## # reached a junction, ie more than two possible next direction\n## path.append(nxt)\n## break\n## prev = cur\n## cur = nxt\n##\n## # finished, add part\n## # polygon, ie path has been closed\n## #print len(path)\n## if len(path) > 1:# and path[0]==path[-1]:\n## parts.append(LineString(path))\n## else:\n## pass #parts.append(Point(path[0]))\n##\n## # connect line segments into polygons\n## print len(parts)\n## for poly in shapely.ops.polygonize(parts):\n## print str(poly)[:100]\n## outvec.add_feature([], poly.__geo_interface__)\n##\n## return outvec\n\n # Approach 2: cell outline\n # http://cardhouse.com/computer/vector.htm\n\n # Approach 3: shapely cell merge\n band = raster.bands[bandnum]\n shps = []\n for i,cell in enumerate(band):\n if cell.value:\n #print i\n shp = Polygon(cell.poly[\"coordinates\"][0])\n shps.append(shp)\n union = shapely.ops.cascaded_union(shps)\n #print str(union)[:200]\n\n outvec = VectorData()\n outvec.fields = [\"id\"]\n for i,poly in enumerate(union.geoms):\n outvec.add_feature([i], poly.__geo_interface__)\n\n return outvec\n\n else:\n # for each region of contiguous cells with same value\n # assign a feature and give it that value\n outvec = VectorData()\n outvec.fields = [\"value\"]\n \n band = raster.bands[bandnum]\n zonevalues = (val for count,val in band.img.getcolors(raster.width*raster.height))\n for zoneval in zonevalues:\n #print zoneval\n \n # exclude nullzone\n if zoneval == band.nodataval: continue\n\n shps = []\n for cell in band:\n if cell.value == zoneval:\n shp = Polygon(cell.poly[\"coordinates\"][0])\n shps.append(shp)\n\n union = shapely.ops.cascaded_union(shps)\n #print str(union)[:200]\n\n if hasattr(union, \"geoms\"):\n for poly in union.geoms:\n outvec.add_feature([zoneval], poly.__geo_interface__)\n else:\n outvec.add_feature([zoneval], union.__geo_interface__)\n\n return outvec\n\n\n else:\n \n # separate feature and geometry for each cell\n \n outvec = VectorData()\n\n if metavars:\n outvec.fields = [\"col\",\"row\",\"x\",\"y\",\"val\"]\n band = raster.bands[bandnum]\n nodataval = band.nodataval\n for cell in band:\n if cell.value != nodataval:\n row = [cell.col, cell.row, cell.x, cell.y, cell.value]\n outvec.add_feature(row=row, geometry=cell.poly)\n else:\n outvec.fields = [\"val\"]\n band = raster.bands[bandnum]\n nodataval = band.nodataval\n for cell in band:\n if cell.value != nodataval:\n row = [cell.value]\n outvec.add_feature(row=row, geometry=cell.poly)\n\n return outvec", "title": "" }, { "docid": "98ff9fe8c41442e13f3592399ac68479", "score": "0.5578035", "text": "def as_vector(self):\n # print(np.hstack((self.points[:, 0], self.points[:, 1])))\n return np.hstack((self.points[:, 0], self.points[:, 1]))", "title": "" }, { "docid": "abcfcb7ab4b13f3dd3a87ba4ffa36c1c", "score": "0.5576991", "text": "def translation(vec):\n return np.array([[1, 0, 0, vec[0]],\n [0, 1, 0, vec[1]],\n [0, 0, 1, vec[2]],\n [0, 0, 0, 1]], dtype=np.double)", "title": "" }, { "docid": "de4e1d2f1ba94dffb684eeb44cdb04aa", "score": "0.55709606", "text": "def vector(self): # You can thus call some_wind.vector without the parenthesis as if it was just another attribute.\n return np.array((self.x, self.y))", "title": "" }, { "docid": "3b7cb261a29f9dfb17816646ebf8f191", "score": "0.556556", "text": "def accept(self, visitor):\n visitor.visitVector(self)", "title": "" }, { "docid": "007b442c8fbac306f785cf06c8106fa2", "score": "0.5560993", "text": "def ProjectFVFile(fp, proj_dim=15):\n\n # Test code to read in a pre-existing random projection matrix.\n # This allows the same matrix to be used for both Simpoin and this script.\n #\n # matrix = ReadVectorFile('simpoint_random_proj_matrix.txt')\n # # PrintVectorFile(matrix)\n # sim_proj_matrix = {}\n # index = 1\n # for vector in matrix:\n # sim_proj_matrix[index] = vector\n # index += 1\n\n # Dictionary which contains the random projection matrix. The keys are the\n # FV dimension (NOT the slice number) and the value is a list of random\n # values with length 'proj_dim'.\n #\n proj_matrix = {}\n\n # List of lists which contains the result matrix. One element for each slice. \n #\n result_matrix = []\n\n while True:\n fv = GetSlice(fp)\n if fv == []:\n break\n\n # Get the sum of all counts for this slice for use in normalizing the\n # dimension counts.\n #\n # import pdb; pdb.set_trace()\n # print fv\n vector_sum = 0\n for block in fv:\n vector_sum += block[1]\n # for block in fv: vector_sum += math.fabs(block[1])\n\n # Initilize this slice/vector of the result matrix to zero\n #\n result_vector = [0.0] * proj_dim\n\n # For each element in the slice, project using the \"dimension of the\n # element\", not the element index itself!\n #\n sum = 0\n # import pdb; pdb.set_trace()\n for block in fv:\n dim = block[0]\n # print 'Dim: %4d' % dim\n count = float(block[1]) / vector_sum # Normalize freq count\n # print 'Count: %d Normalized count: %f' % (block[1], count)\n\n # For testing only, use the random project matrix read from a file.\n #\n # proj_vector = sim_proj_matrix.get(dim)\n\n # Get the random vector for the dimension 'dim' and project the values for\n # 'dim' into the result\n #\n proj_vector = GetDimRandomVector(proj_matrix, proj_dim, dim)\n index = 0\n while index < proj_dim:\n result_vector[index] += count * proj_vector[index]\n index += 1\n\n result_matrix.append(result_vector)\n\n # Debugging code\n #\n # PrintProjMatrix(proj_matrix)\n # sys.exit(0)\n\n # import pdb; pdb.set_trace()\n return result_matrix", "title": "" }, { "docid": "9426b704446243743ff6fb457ffb4fa4", "score": "0.5559961", "text": "def Pvectz( P ):\n n_f = P.shape[0]//4\n P_vec = np.zeros((n_f, 12))\n for f in range(n_f):\n Pf = P[4*f:4*(f+1)].flatten()\n P_vec[f] = Pf\n return P_vec", "title": "" }, { "docid": "128304ad949d5058294640b3e0b19cfd", "score": "0.5548701", "text": "def vf(P):\n\n return region4.vf(P)", "title": "" }, { "docid": "40b3bb4ed09f49cdead5e643acfaacb5", "score": "0.55418503", "text": "def vector(self, xy): # It allows you to define what happens when you type 'some_wind.vector = something'\n self.x = xy[0]\n # to be continued ...\n self.y = xy[1]", "title": "" }, { "docid": "6ec77f64301077cbf569882721b29df9", "score": "0.5535797", "text": "def translate(self, vec):\n from copy import deepcopy\n rt = Translation(vec)\n\n trans = rt.dot(self)\n for i in range(0, len(self)):\n self[i] = deepcopy(trans[i])", "title": "" }, { "docid": "000b0f5b354a55f9d24510653a760d13", "score": "0.5530203", "text": "def test_compose_vector_fields_3d():\n np.random.seed(8315759)\n input_shape = (10, 10, 10)\n target_shape = (10, 10, 10)\n #create a simple affine transformation\n ns = input_shape[0]\n nr = input_shape[1]\n nc = input_shape[2]\n s = 1.5\n t = 2.5\n trans = np.array([[1, 0, 0, -t*ns],\n [0, 1, 0, -t*nr],\n [0, 0, 1, -t*nc],\n [0, 0, 0, 1]])\n trans_inv = np.linalg.inv(trans)\n scale = np.array([[1*s, 0, 0, 0],\n [0, 1*s, 0, 0],\n [0, 0, 1*s, 0],\n [0, 0, 0, 1]])\n gt_affine = trans_inv.dot(scale.dot(trans))\n\n #create two random displacement fields\n input_affine = gt_affine\n target_affine = gt_affine\n\n disp1, assign1 = vfu.create_random_displacement_3d(np.array(input_shape,\n dtype=np.int32),\n input_affine,\n np.array(target_shape,\n dtype=np.int32),\n target_affine)\n disp1 = np.array(disp1, dtype=floating)\n assign1 = np.array(assign1)\n\n disp2, assign2 = vfu.create_random_displacement_3d(np.array(input_shape,\n dtype=np.int32),\n input_affine,\n np.array(target_shape,\n dtype=np.int32),\n target_affine)\n disp2 = np.array(disp2, dtype=floating)\n assign2 = np.array(assign2)\n\n #create a random image (with decimal digits) to warp\n moving_image = np.ndarray(target_shape, dtype=floating)\n moving_image[...] = np.random.randint(0, 10, np.size(moving_image)).reshape(tuple(target_shape))\n #set boundary values to zero so we don't test wrong interpolation due to\n #floating point precision\n moving_image[0,:,:] = 0\n moving_image[-1,:,:] = 0\n moving_image[:,0,:] = 0\n moving_image[:,-1,:] = 0\n moving_image[:,:,0] = 0\n moving_image[:,:,-1] = 0\n\n #evaluate the composed warping using the exact assignments (first 1 then 2)\n\n warp1 = moving_image[(assign2[...,0], assign2[...,1], assign2[...,2])]\n expected = warp1[(assign1[...,0], assign1[...,1], assign1[...,2])]\n\n #compose the displacement fields\n target_affine_inv = np.linalg.inv(target_affine)\n\n target_affine_inv = np.linalg.inv(target_affine)\n premult_index = target_affine_inv.dot(input_affine)\n premult_disp = target_affine_inv\n\n for time_scaling in [0.25, 0.5, 1.0, 2.0, 4.0]:\n composition, stats = vfu.compose_vector_fields_3d(disp1,\n disp2/time_scaling,\n premult_index,\n premult_disp,\n time_scaling, None)\n #apply the implementation under test\n warped = np.array(vfu.warp_3d(moving_image, composition, None,\n premult_index, premult_disp))\n assert_array_almost_equal(warped, expected)\n\n #test also using nearest neighbor interpolation\n warped = np.array(vfu.warp_3d_nn(moving_image, composition, None,\n premult_index, premult_disp))\n assert_array_almost_equal(warped, expected)\n\n #test updating the displacement field instead of creating a new one\n composition = disp1.copy()\n vfu.compose_vector_fields_3d(composition, disp2/time_scaling,\n premult_index, premult_disp,\n time_scaling, composition)\n #apply the implementation under test\n warped = np.array(vfu.warp_3d(moving_image, composition, None,\n premult_index, premult_disp))\n assert_array_almost_equal(warped, expected)\n\n #test also using nearest neighbor interpolation\n warped = np.array(vfu.warp_3d_nn(moving_image, composition, None,\n premult_index, premult_disp))\n assert_array_almost_equal(warped, expected)\n\n # Test non-overlapping case\n x_0 = np.asarray(range(input_shape[0]))\n x_1 = np.asarray(range(input_shape[1]))\n x_2 = np.asarray(range(input_shape[2]))\n X = np.ndarray(input_shape + (3,), dtype = np.float64)\n O = np.ones(input_shape)\n X[...,0]= x_0[:, None, None] * O\n X[...,1]= x_1[None, :, None] * O\n X[...,2]= x_2[None, None, :] * O\n random_labels = np.random.randint(0, 2, input_shape[0]*input_shape[1]*input_shape[2]*3)\n random_labels = random_labels.reshape(input_shape+(3,))\n values = np.array([-1, target_shape[0]])\n disp1 = (values[random_labels] - X).astype(floating)\n composition, stats = vfu.compose_vector_fields_3d(disp1,\n disp2,\n None,\n None,\n 1.0, None)\n assert_array_almost_equal(composition, np.zeros_like(composition))\n\n #test updating the displacement field instead of creating a new one\n composition = disp1.copy()\n vfu.compose_vector_fields_3d(composition, disp2, None, None, 1.0, composition)\n assert_array_almost_equal(composition, np.zeros_like(composition))", "title": "" }, { "docid": "44b2262261cc413cd2c0843f1755c308", "score": "0.5517296", "text": "def _vector_plane_projection(self, v, n):\n \n v_proj = v + n * (v * n / n.norm())\n return v_proj", "title": "" }, { "docid": "ca107a345e199f032863b5bc0a961623", "score": "0.55158705", "text": "def projective_trans_region(\n regions: HObject,\n hom_mat_2d: Sequence[float],\n interpolation: str\n) -> HObject:\n with HalconOperator(487) as proc:\n proc.set_input_object(1, regions)\n proc.set_input_tuple(0, hom_mat_2d)\n proc.set_input_tuple(1, interpolation)\n proc.execute()\n trans_regions = HObject(proc.get_output_object_key(1))\n return trans_regions # type: ignore", "title": "" }, { "docid": "d1ab7ad769ed4d9869357c487e4d24b4", "score": "0.55154395", "text": "def rasterise_vector ( raster_fname, vector_fname, where_statement, \n output_fname=\"\", output_format=\"MEM\",verbose=False):\n if output_fname == \"\" and output_format != \"MEM\":\n raise ValueError(\"You need to provide an ouput filename\" +\n \" for format{:s}\".format(output_format))\n g = gdal.Open(raster_fname)\n if g is None:\n raise IOError(\"Could not open file {:s}\".format(raster_fname))\n raster_proj = g.GetProjectionRef()\n geoT = g.GetGeoTransform()\n if verbose:\n print \">>> Opened file {:s}\".format(raster_fname)\n print \">>> Projection: {:s}\".format(raster_proj)\n xs = []\n ys = []\n for x,y in [ [0, 0], [0, g.RasterYSize], [g.RasterXSize, g.RasterYSize], [g.RasterXSize, 0]]:\n xx, yy = gdal.ApplyGeoTransform(geoT, x,y)\n xs.append(xx)\n ys.append(yy)\n extent = [min(xs), min(ys), max(xs), max(ys)]\n xRes = geoT[1]\n yRes = geoT[-1]\n nx = g.RasterXSize\n ny = g.RasterYSize\n if verbose:\n print \">>> File size {:d} rows, {:d} columns\".format(nx, ny)\n print \">>> UL corner: {:g}, {:g}\".format(min(xs), max(ys))\n \n src_ds = gdal.OpenEx(vector_fname)\n if src_ds is None:\n raise IOError(\"Can't read the vector file {}\".format(vector_fname))\n v = gdal.VectorTranslate('', src_ds, format = 'Memory', dstSRS=raster_proj,\n where=where_statement)\n gg = gdal.Rasterize(output_fname, v,\n format=output_format, outputType=gdal.GDT_Byte, xRes=xRes, yRes=yRes, \n where=where_statement,\n outputBounds=[min(xs), min(ys), max(xs), max(ys)], \n width=nx, height=ny, noData=0, burnValues=1)\n \n if gg is not None:\n print \"Done!\"\n else:\n raise ValueError(\"Couldn't generate the mask. Check input parameters\")\n return gg.ReadAsArray()", "title": "" }, { "docid": "d5fd7d24babe27ef4f335d11e0513857", "score": "0.55142593", "text": "def ballTransform(i,loc):\n A = np.multiply(project(100), loc)\n return A", "title": "" }, { "docid": "f2a29d50b1be4112bae760d89df6dbd9", "score": "0.5507468", "text": "def v_projection(img):\n return np.sum(img, axis=0)", "title": "" }, { "docid": "0b70f2ad83dd910666c1002041891861", "score": "0.5498986", "text": "def _vec_rot(self, vec: VecBase) -> None:\n x = vec.x\n y = vec.y\n z = vec.z\n vec._x = (x * self._aa) + (y * self._ba) + (z * self._ca)\n vec._y = (x * self._ab) + (y * self._bb) + (z * self._cb)\n vec._z = (x * self._ac) + (y * self._bc) + (z * self._cc)", "title": "" }, { "docid": "827b867f4bbf760084927a8821ff15d0", "score": "0.54948467", "text": "def create_vi(self):\n nnear = 5\n self.raster_vi = np.flipud(idw(\n self.point_x, self.point_y, self.point_vi, self.grid_x, self.grid_y, nnear).reshape(self.grid_x.shape))", "title": "" }, { "docid": "63be3fe38919687c6e9dce7bcdc92639", "score": "0.54879224", "text": "def force_vector_gcs(self):\n Rt = self.transpose_rotation_matrix\n return Rt @ self.force_vector()", "title": "" }, { "docid": "76a67666fe4e5758d905703a5014f29d", "score": "0.54816544", "text": "def vectors_transformation(x):\n return np.array([x[0]**2, x[0]*x[1], x[1]*x[0], x[1]**2, x[0], x[1], 1])", "title": "" }, { "docid": "7ab9f8e683da529ac3f017cb36727ebe", "score": "0.54764", "text": "def _projection(emb_e, proj_vec):\n\n return emb_e - torch.sum(emb_e * proj_vec, dim=-1, keepdims=True) * proj_vec", "title": "" }, { "docid": "4d4af12787fee778697abd24ff14b423", "score": "0.546564", "text": "def calc_vector(self):\n self.origin = np.array(self.start)\n self.vector = self.end - self.start\n self.vector /= np.linalg.norm(self.vector)", "title": "" }, { "docid": "1d0969752a3bb96740703efcc1644ee8", "score": "0.54634035", "text": "def Project_Boundary_Control(self): \r\n assert self.generate_mesh == 1, \"The finite element mesh must be generated first\"\r\n \r\n assert self.set_finite_elements_spaces == 1, \\\r\n \"The FE approximation spaces must be selected first\"\r\n \r\n if self.set_mixed_boundaries == 0 : \r\n self.Ub_sp0 = interpolate(self.Ub_sp0_Expression, self.Vb).vector()[self.b_ind]\r\n self.Ub_sp1 = interpolate(self.Ub_sp1_Expression, self.Vb).vector()[self.b_ind]\r\n self.Ub = lambda t : self.Ub_sp0 * self.Ub_tm0(t) + self.Ub_sp1 + self.Ub_tm1(t) * np.ones(self.Nb)\r\n \r\n if self.set_mixed_boundaries == 1 :\r\n self.Ub_sp0_D = interpolate(self.Ub_sp0_D_Expression, self.Vb).vector()[self.D_index]\r\n self.Ub_sp1_D = interpolate(self.Ub_sp1_D_Expression, self.Vb).vector()[self.D_index]\r\n self.Ub_D = lambda t : self.Ub_sp0_D * self.Ub_tm0_D(t) + self.Ub_sp1_D + self.Ub_tm1_D(t) * np.ones(self.Nb_D)\r\n self.Ub_D_dir = lambda t : self.Ub_sp0_D * self.Ub_tm0_D_dir(t) + self.Ub_tm1_D_dir(t) * np.ones(self.Nb_D)\r\n\r\n self.Ub_sp0_N = interpolate(self.Ub_sp0_N_Expression, self.Vb).vector()[self.N_index]\r\n self.Ub_sp1_N = interpolate(self.Ub_sp1_N_Expression, self.Vb).vector()[self.N_index]\r\n self.Ub_N = lambda t : self.Ub_sp0_N * self.Ub_tm0_N(t) + self.Ub_sp1_N + self.Ub_tm1_N(t) * np.ones(self.Nb_N) \r\n\r\n self.project_boundary_control = 1\r\n \r\n return self.project_boundary_control", "title": "" }, { "docid": "8fc9034291810ceae4ee459b4884a1a3", "score": "0.5460542", "text": "def translate(self, vec):\n points = self.points + vec\n return Landmarks(points)", "title": "" }, { "docid": "653893f68cceee1f8a6a31b8b2de96e1", "score": "0.5457983", "text": "def output_vectors(lat_lon, temp, precip, snow_depth, dew_point,\n fog, rain, snow, hail, thunder, tornado):\n snd = lambda (x, y): y\n block_generators = make_block_generators(map(snd, temp),\n map(snd, precip),\n map(snd, snow_depth),\n map(snd, dew_point),\n map(snd, fog),\n map(snd, rain),\n map(snd, snow),\n map(snd, hail),\n map(snd, thunder),\n map(snd, tornado))\n if vector_type == 'snow':\n make_snow_vectors(lat_lon, block_generators)\n elif vector_type == 'rain':\n make_rain_vector(lat_lon, block_generators)\n elif vector_type == 'temp':\n make_temp_vector(lat_lon, block_generators)\n elif vector_type == 'snow_days':\n make_snow_days_vector(lat_lon, block_generators)", "title": "" }, { "docid": "8c0706585cb00decebb5f65b3e6e10a0", "score": "0.54572207", "text": "def project_to_cylind(u, v, etas):\n uaz = -u*cos(etas) - v*sin(etas) # azimuth component\n vra = -u*sin(etas) + v*cos(etas) # radial component\n \n return uaz.rename('ut'), vra.rename('vr')", "title": "" }, { "docid": "ce66ec0b8249d6000388e7500f173265", "score": "0.54549223", "text": "def translate(self, vector: pint.Quantity) -> Shape:\n new_shape = copy.deepcopy(self)\n new_shape.apply_translation(vector)\n return new_shape", "title": "" }, { "docid": "f535f1c33686157070206ee58f1b4e1e", "score": "0.54514146", "text": "def test_invert_vector_field_2d():\n shape = (64, 64)\n nr = shape[0]\n nc = shape[1]\n # Create an arbitrary image-to-space transform\n t = 2.5 #translation factor\n\n trans = np.array([[1, 0, -t*nr],\n [0, 1, -t*nc],\n [0, 0, 1]])\n trans_inv = np.linalg.inv(trans)\n\n d, dinv = vfu.create_harmonic_fields_2d(nr, nc, 0.2, 8)\n d = np.asarray(d).astype(floating)\n dinv = np.asarray(dinv).astype(floating)\n\n for theta in [-1 * np.pi/5.0, 0.0, np.pi/5.0]: #rotation angle\n for s in [0.5, 1.0, 2.0]: #scale\n ct = np.cos(theta)\n st = np.sin(theta)\n\n rot = np.array([[ct, -st, 0],\n [st, ct, 0],\n [0, 0, 1]])\n\n scale = np.array([[1*s, 0, 0],\n [0, 1*s, 0],\n [0, 0, 1]])\n\n gt_affine = trans_inv.dot(scale.dot(rot.dot(trans)))\n gt_affine_inv = np.linalg.inv(gt_affine)\n dcopy = np.copy(d)\n\n #make sure the field remains invertible after the re-mapping\n vfu.reorient_vector_field_2d(dcopy, gt_affine)\n\n inv_approx = vfu.invert_vector_field_fixed_point_2d(dcopy, gt_affine_inv,\n np.array([s, s]),\n 40, 1e-7)\n\n mapping = imwarp.DiffeomorphicMap(2, (nr,nc), gt_affine)\n mapping.forward = dcopy\n mapping.backward = inv_approx\n residual, stats = mapping.compute_inversion_error()\n assert_almost_equal(stats[1], 0, decimal=4)\n assert_almost_equal(stats[2], 0, decimal=4)", "title": "" }, { "docid": "6336fcfbbbcf94b7be0e1fd3bc30bcb0", "score": "0.54509354", "text": "def post_convert(self):\n # XXX: is this a GE specific step?\n if self.is_dwi:\n self.bvecs, self.bvals = adjust_bvecs(self.bvecs, self.bvals, self.scanner_type, self.qto_xyz[0:3, 0:3])", "title": "" }, { "docid": "01891f216f98be6efe4326df1517dc4d", "score": "0.5446974", "text": "def vectors_to_raster(file_paths, rows, cols, geo_transform, projection):\n labeled_pixels = np.zeros((rows, cols))\n for i, path in enumerate(file_paths):\n label = i + 1\n ds = create_mask_from_vector(path, cols, rows, geo_transform,\n projection, target_value=label)\n band = ds.GetRasterBand(1)\n labeled_pixels += band.ReadAsArray()\n ds = None\n return labeled_pixels", "title": "" }, { "docid": "01891f216f98be6efe4326df1517dc4d", "score": "0.5446974", "text": "def vectors_to_raster(file_paths, rows, cols, geo_transform, projection):\n labeled_pixels = np.zeros((rows, cols))\n for i, path in enumerate(file_paths):\n label = i + 1\n ds = create_mask_from_vector(path, cols, rows, geo_transform,\n projection, target_value=label)\n band = ds.GetRasterBand(1)\n labeled_pixels += band.ReadAsArray()\n ds = None\n return labeled_pixels", "title": "" }, { "docid": "313f4a5d79859fbd3fd592d4972bc698", "score": "0.54443204", "text": "def test_compose_vector_fields_2d():\n np.random.seed(8315759)\n input_shape = (10, 10)\n target_shape = (10, 10)\n #create a simple affine transformation\n nr = input_shape[0]\n nc = input_shape[1]\n s = 1.5\n t = 2.5\n trans = np.array([[1, 0, -t*nr],\n [0, 1, -t*nc],\n [0, 0, 1]])\n trans_inv = np.linalg.inv(trans)\n scale = np.array([[1*s, 0, 0],\n [0, 1*s, 0],\n [0, 0, 1]])\n gt_affine = trans_inv.dot(scale.dot(trans))\n\n #create two random displacement fields\n input_affine = gt_affine\n target_affine = gt_affine\n\n disp1, assign1 = vfu.create_random_displacement_2d(np.array(input_shape,\n dtype=np.int32),\n input_affine,\n np.array(target_shape,\n dtype=np.int32),\n target_affine)\n disp1 = np.array(disp1, dtype=floating)\n assign1 = np.array(assign1)\n\n disp2, assign2 = vfu.create_random_displacement_2d(np.array(input_shape,\n dtype=np.int32),\n input_affine,\n np.array(target_shape,\n dtype=np.int32),\n target_affine)\n disp2 = np.array(disp2, dtype=floating)\n assign2 = np.array(assign2)\n\n #create a random image (with decimal digits) to warp\n moving_image = np.ndarray(target_shape, dtype=floating)\n moving_image[...] = np.random.randint(0, 10, np.size(moving_image)).reshape(tuple(target_shape))\n #set boundary values to zero so we don't test wrong interpolation due to\n #floating point precision\n moving_image[0,:] = 0\n moving_image[-1,:] = 0\n moving_image[:,0] = 0\n moving_image[:,-1] = 0\n\n #evaluate the composed warping using the exact assignments (first 1 then 2)\n warp1 = moving_image[(assign2[...,0], assign2[...,1])]\n expected = warp1[(assign1[...,0], assign1[...,1])]\n\n #compose the displacement fields\n target_affine_inv = np.linalg.inv(target_affine)\n\n target_affine_inv = np.linalg.inv(target_affine)\n premult_index = target_affine_inv.dot(input_affine)\n premult_disp = target_affine_inv\n\n for time_scaling in [0.25, 0.5, 1.0, 2.0, 4.0]:\n composition, stats = vfu.compose_vector_fields_2d(disp1,\n disp2/time_scaling,\n premult_index,\n premult_disp,\n time_scaling, None)\n #apply the implementation under test\n warped = np.array(vfu.warp_2d(moving_image, composition, None,\n premult_index, premult_disp))\n assert_array_almost_equal(warped, expected)\n\n #test also using nearest neighbor interpolation\n warped = np.array(vfu.warp_2d_nn(moving_image, composition, None,\n premult_index, premult_disp))\n assert_array_almost_equal(warped, expected)\n\n #test updating the displacement field instead of creating a new one\n composition = disp1.copy()\n vfu.compose_vector_fields_2d(composition,disp2/time_scaling, premult_index,\n premult_disp, time_scaling, composition)\n #apply the implementation under test\n warped = np.array(vfu.warp_2d(moving_image, composition, None,\n premult_index, premult_disp))\n assert_array_almost_equal(warped, expected)\n\n #test also using nearest neighbor interpolation\n warped = np.array(vfu.warp_2d_nn(moving_image, composition, None,\n premult_index, premult_disp))\n assert_array_almost_equal(warped, expected)\n\n # Test non-overlapping case\n x_0 = np.asarray(range(input_shape[0]))\n x_1 = np.asarray(range(input_shape[1]))\n X = np.ndarray(input_shape + (2,), dtype = np.float64)\n O = np.ones(input_shape)\n X[...,0]= x_0[:, None] * O\n X[...,1]= x_1[None, :] * O\n random_labels = np.random.randint(0, 2, input_shape[0]*input_shape[1]*2)\n random_labels = random_labels.reshape(input_shape+(2,))\n values = np.array([-1, target_shape[0]])\n disp1 = (values[random_labels] - X).astype(floating)\n composition, stats = vfu.compose_vector_fields_2d(disp1,\n disp2,\n None,\n None,\n 1.0, None)\n assert_array_almost_equal(composition, np.zeros_like(composition))\n\n #test updating the displacement field instead of creating a new one\n composition = disp1.copy()\n vfu.compose_vector_fields_2d(composition, disp2, None, None, 1.0, composition)\n assert_array_almost_equal(composition, np.zeros_like(composition))", "title": "" }, { "docid": "0b6624607aa6bc0a1dd59ccece9819ed", "score": "0.5442208", "text": "def vector_to_rel_pose_s(\n rows_1: Sequence[Union[float, int]],\n cols_1: Sequence[Union[float, int]],\n rows_2: Sequence[Union[float, int]],\n cols_2: Sequence[Union[float, int]],\n cov_rr1: Sequence[Union[float, int]],\n cov_rc1: Sequence[Union[float, int]],\n cov_cc1: Sequence[Union[float, int]],\n cov_rr2: Sequence[Union[float, int]],\n cov_rc2: Sequence[Union[float, int]],\n cov_cc2: Sequence[Union[float, int]],\n cam_par_1: Sequence[Union[float, int, str]],\n cam_par_2: Sequence[Union[float, int, str]],\n method: str\n) -> Tuple[Sequence[Union[int, float]], Sequence[float], float, Sequence[float], Sequence[float], Sequence[float], Sequence[float]]:\n with HalconOperator(355) as proc:\n proc.set_input_tuple(0, rows_1)\n proc.set_input_tuple(1, cols_1)\n proc.set_input_tuple(2, rows_2)\n proc.set_input_tuple(3, cols_2)\n proc.set_input_tuple(4, cov_rr1)\n proc.set_input_tuple(5, cov_rc1)\n proc.set_input_tuple(6, cov_cc1)\n proc.set_input_tuple(7, cov_rr2)\n proc.set_input_tuple(8, cov_rc2)\n proc.set_input_tuple(9, cov_cc2)\n proc.set_input_tuple(10, cam_par_1)\n proc.set_input_tuple(11, cam_par_2)\n proc.set_input_tuple(12, method)\n proc.init_oct(0)\n proc.init_oct(1)\n proc.init_oct(2)\n proc.init_oct(3)\n proc.init_oct(4)\n proc.init_oct(5)\n proc.init_oct(6)\n proc.execute()\n return (\n proc.get_output_tuple_m(0), # rel_pose\n proc.get_output_tuple_m(1), # cov_rel_pose\n proc.get_output_tuple_s(2), # error\n proc.get_output_tuple_m(3), # x\n proc.get_output_tuple_m(4), # y\n proc.get_output_tuple_m(5), # z\n proc.get_output_tuple_m(6) # cov_xyz\n ) # type: ignore", "title": "" }, { "docid": "3d27899c328ea40471806b2c8db0e936", "score": "0.5439419", "text": "def func(self):\n return Vector", "title": "" }, { "docid": "e43fb959cdb9f8ca1f1dfe4f4318fa92", "score": "0.5435764", "text": "def project(box_edge, points):", "title": "" }, { "docid": "69074069ca29353e134a743fe318f6e1", "score": "0.5433739", "text": "def vector(self, vector):\n # ask my pdf\n return self.pdf.vector(vector)", "title": "" }, { "docid": "7eba7ac01f951196d9affbd23dd0186a", "score": "0.54329085", "text": "def vectorize(self, *args, **kwargs):\n kwargs['add_start'] = True\n kwargs['add_end'] = True\n obs = super().vectorize(*args, **kwargs)\n return obs", "title": "" }, { "docid": "5c4350e389074ac317c922ddde98310c", "score": "0.54290587", "text": "def vector(start: Coordinates, direction: Direction) -> Vector:\n x, y = start\n horizontal, vertical = direction\n\n while True:\n yield (x, y)\n x += horizontal\n y += vertical", "title": "" }, { "docid": "4ca6f1fb91c8fbc792406b0effa719ee", "score": "0.54252136", "text": "def coordinate_system(v):\n v1 = Vector(v.x, v.y, v.z)\n if abs(v1.x) > abs(v1.y):\n invLen = 1.0 / math.sqrt(v1.x*v1.x + v1.z*v1.z)\n v2 = Vector(-v1.z * invLen, 0.0, v1.x * invLen)\n else:\n invLen = 1.0 / math.sqrt(v1.y*v1.y + v1.z*v1.z)\n v2 = Vector(-v1.z * invLen, 0.0, v1.x * invLen)\n v3 = cross(v1, v2)\n return v1, v2, v3", "title": "" }, { "docid": "d9e8211c81ec37824258cbfc7c67e6a7", "score": "0.5423794", "text": "def bilinear_map(self,v,w):\n if len(v) != self.dim() or len(w) != self.dim():\n raise TypeError(\"vectors must have length \" + str(self.dim()))\n if self.base_ring().characteristic() == 2:\n raise TypeError(\"not defined for rings of characteristic 2\")\n return (self(v+w) - self(v) - self(w))/2", "title": "" }, { "docid": "8b308a8cfe19b94bb6b00c915423ea38", "score": "0.54093194", "text": "def MorphPointProjectToContainer(box_points, target_entities, offset, coordinate_system, freeze_x_axis, freeze_y_axis, freeze_z_axis):", "title": "" }, { "docid": "3a7da77eaa4624f9a7a33d3d88ee8f33", "score": "0.5405011", "text": "def translate(self, translation):\n for vertex in self.vertices:\n vertex[0] = vertex[0] + translation[0]\n vertex[1] = vertex[1] + translation[1]\n vertex[2] = vertex[2] + translation[2]", "title": "" }, { "docid": "5ffe421ab4b470c377bcd5f1bdfc2500", "score": "0.5402918", "text": "def vector_to_rigid(\n px: Sequence[float],\n py: Sequence[float],\n qx: Sequence[float],\n qy: Sequence[float]\n) -> Sequence[float]:\n with HalconOperator(266) as proc:\n proc.set_input_tuple(0, px)\n proc.set_input_tuple(1, py)\n proc.set_input_tuple(2, qx)\n proc.set_input_tuple(3, qy)\n proc.init_oct(0)\n proc.execute()\n hom_mat_2d = proc.get_output_tuple_m(0)\n return hom_mat_2d # type: ignore", "title": "" }, { "docid": "b78491f98a6b8723e584c8dd361067f5", "score": "0.5398694", "text": "def proj(u, v):\n assert len(u) == len(v)\n zv = _zero_vector(len(u))\n if u == zv:\n return zv\n return ((v*u) / (u*u)) * u", "title": "" }, { "docid": "61f41062f075667d2905855fdad5db27", "score": "0.5398178", "text": "def project (v, w):\n unit_w = scalarProduct2(v,w)/euclideanLength(v)**2\n return (unit_w*v[0],unit_w*v[1])", "title": "" }, { "docid": "611024d3c8816ccbb807d087e030ee0d", "score": "0.5395713", "text": "def vector_to_rel_pose(\n rows_1: Sequence[Union[float, int]],\n cols_1: Sequence[Union[float, int]],\n rows_2: Sequence[Union[float, int]],\n cols_2: Sequence[Union[float, int]],\n cov_rr1: Sequence[Union[float, int]],\n cov_rc1: Sequence[Union[float, int]],\n cov_cc1: Sequence[Union[float, int]],\n cov_rr2: Sequence[Union[float, int]],\n cov_rc2: Sequence[Union[float, int]],\n cov_cc2: Sequence[Union[float, int]],\n cam_par_1: Sequence[Union[float, int, str]],\n cam_par_2: Sequence[Union[float, int, str]],\n method: str\n) -> Tuple[Sequence[Union[int, float]], Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float]]:\n with HalconOperator(355) as proc:\n proc.set_input_tuple(0, rows_1)\n proc.set_input_tuple(1, cols_1)\n proc.set_input_tuple(2, rows_2)\n proc.set_input_tuple(3, cols_2)\n proc.set_input_tuple(4, cov_rr1)\n proc.set_input_tuple(5, cov_rc1)\n proc.set_input_tuple(6, cov_cc1)\n proc.set_input_tuple(7, cov_rr2)\n proc.set_input_tuple(8, cov_rc2)\n proc.set_input_tuple(9, cov_cc2)\n proc.set_input_tuple(10, cam_par_1)\n proc.set_input_tuple(11, cam_par_2)\n proc.set_input_tuple(12, method)\n proc.init_oct(0)\n proc.init_oct(1)\n proc.init_oct(2)\n proc.init_oct(3)\n proc.init_oct(4)\n proc.init_oct(5)\n proc.init_oct(6)\n proc.execute()\n return (\n proc.get_output_tuple_m(0), # rel_pose\n proc.get_output_tuple_m(1), # cov_rel_pose\n proc.get_output_tuple_m(2), # error\n proc.get_output_tuple_m(3), # x\n proc.get_output_tuple_m(4), # y\n proc.get_output_tuple_m(5), # z\n proc.get_output_tuple_m(6) # cov_xyz\n ) # type: ignore", "title": "" }, { "docid": "dc51e06f986bb39e65a5a1278ff49aa0", "score": "0.538851", "text": "def _regfusion_project(data, ras, affine, method='linear'):\n\n data, ras, affine = np.asarray(data), np.asarray(ras), np.asarray(affine)\n coords = nib.affines.apply_affine(np.linalg.inv(affine), ras)\n volgrid = [range(data.shape[i]) for i in range(3)]\n if data.ndim == 3:\n projected = interpn(volgrid, data, coords, method=method)\n elif data.ndim == 4:\n projected = np.column_stack([\n interpn(volgrid, data[..., n], coords, method=method)\n for n in range(data.shape[-1])\n ])\n\n return construct_shape_gii(projected.squeeze())", "title": "" }, { "docid": "1201683505c87d8700e96a1f5593a276", "score": "0.5387183", "text": "def transformFromParentSystem(self, vec):\n return self.mBasis.multv(vec - self.mOrigin)", "title": "" }, { "docid": "39d69ecc65ebf9a9fecbb9d70615b824", "score": "0.53832155", "text": "def translateAlongVectorMatrix(vector, distance):\r\n \r\n unit_vector = np.hstack([unitVector(vector) * distance, 1])\r\n return np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0], unit_vector])", "title": "" }, { "docid": "45920d7314fbbe03a500e5e5ba21f791", "score": "0.53715956", "text": "def to_Vector2D(self) -> VectorProtocolPlanar:\n raise AssertionError", "title": "" }, { "docid": "930b58b9e61cd0aa5eb337d621e5f242", "score": "0.53678715", "text": "def project(d):\n return(np.array([[1,0,0,0],[0,1,0,0],[0,0,0,0],[0,0,-1/d,1]]))", "title": "" }, { "docid": "2e368419ad07cda16effc90ad9db934b", "score": "0.5360767", "text": "def from_vector(self, v):\n for gate in self.factorgates:\n factorgate_local_inds = _gatesetmember._decompose_gpindices(\n self.gpindices, gate.gpindices)\n gate.from_vector( v[factorgate_local_inds] )\n self.dirty = True", "title": "" }, { "docid": "46a23129336e2028bc5d8ea273de4479", "score": "0.5357892", "text": "def from_vector(vector):\n return vector.toArray()", "title": "" }, { "docid": "be3419e590090be79da64990f8f71d67", "score": "0.5354328", "text": "def project_ivectors(input_features):\n print \"projecting ivetors\"\n tv_enroller = bob.machine.IVectorMachine(ubm, subspace_dimension_of_t)\n tv_enroller.load(bob.io.HDF5File(tv_hdf5))\n #print input_features\n for root, dir, files in os.walk(input_features):\n ivectors = []\n for file in files:\n features_path = os.path.join(root, str(file))\n features = read_mfcc_features(features_path)\n stats = bob.machine.GMMStats(ubm.dim_c, ubm.dim_d)\n ubm.acc_statistics(features, stats)\n ivector = tv_enroller.forward(stats) \n lnorm_ivector(ivector)\n ivectors.append(ivector)\n\n ivectors_path = 'ivectors/ivectors_vox_ENubm_ubm/'+ str(gaussians) + '/' + input_features.split('/')[-1]\n if not os.path.exists(ivectors_path):\n os.makedirs(ivectors_path)\n ivectors_path = ivectors_path + '/' + os.path.split(root)[1] + '_' + str(gaussians) + '.ivec'\n save_ivectors(ivectors, ivectors_path)\n print \"saved ivetors to '%s' \" % ivectors_path", "title": "" }, { "docid": "c1a0dd5ed4f1acb689996318dc146649", "score": "0.5341371", "text": "def project(self, trans, dir, minProj, maxProj, witnesPtMin, witnesPtMax):\n return _bullet.btConvexPolyhedron_project(self, trans, dir, minProj, maxProj, witnesPtMin, witnesPtMax)", "title": "" }, { "docid": "121656d81d51d45656c7f0c132897a44", "score": "0.5339801", "text": "def WorldZXPlane():", "title": "" }, { "docid": "404c04e7683317f380d00217bd66e2be", "score": "0.53365475", "text": "def test_invert_vector_field_3d():\n shape = (64, 64, 64)\n ns = shape[0]\n nr = shape[1]\n nc = shape[2]\n\n # Create an arbitrary image-to-space transform\n\n # Select an arbitrary rotation axis\n axis = np.array([2.0, 0.5, 1.0])\n t = 2.5 #translation factor\n\n trans = np.array([[1, 0, 0, -t*ns],\n [0, 1, 0, -t*nr],\n [0, 0, 1, -t*nc],\n [0, 0, 0, 1]])\n trans_inv = np.linalg.inv(trans)\n\n d, dinv = vfu.create_harmonic_fields_3d(ns, nr, nc, 0.2, 8)\n d = np.asarray(d).astype(floating)\n dinv = np.asarray(dinv).astype(floating)\n\n for theta in [-1 * np.pi/5.0, 0.0, np.pi/5.0]: #rotation angle\n for s in [0.5, 1.0, 2.0]: #scale\n rot = np.zeros(shape=(4,4))\n rot[:3, :3] = geometry.rodrigues_axis_rotation(axis, theta)\n rot[3,3] = 1.0\n scale = np.array([[1*s, 0, 0, 0],\n [0, 1*s, 0, 0],\n [0, 0, 1*s, 0],\n [0, 0, 0, 1]])\n\n gt_affine = trans_inv.dot(scale.dot(rot.dot(trans)))\n gt_affine_inv = np.linalg.inv(gt_affine)\n dcopy = np.copy(d)\n\n #make sure the field remains invertible after the re-mapping\n vfu.reorient_vector_field_3d(dcopy, gt_affine)\n\n # Note: the spacings are used just to check convergence, so they don't need\n # to be very accurate. Here we are passing (0.5 * s) to force the algorithm\n # to make more iterations: in ANTS, there is a hard-coded bound on the maximum\n # residual, that's why we cannot force more iteration by changing the parameters.\n # We will investigate this issue with more detail in the future.\n\n inv_approx = vfu.invert_vector_field_fixed_point_3d(dcopy, gt_affine_inv,\n np.array([s, s, s])*0.5,\n 40, 1e-7)\n\n mapping = imwarp.DiffeomorphicMap(3, (nr,nc), gt_affine)\n mapping.forward = dcopy\n mapping.backward = inv_approx\n residual, stats = mapping.compute_inversion_error()\n assert_almost_equal(stats[1], 0, decimal=3)\n assert_almost_equal(stats[2], 0, decimal=3)", "title": "" }, { "docid": "f060d58a80d31fbc8a119f142d270ffb", "score": "0.53352493", "text": "def domainVector(self):\n v = PISM.IceModelVec2S()\n v.create(self.grid, \"\", True, WIDE_STENCIL)\n return PISMLocalVector(v)", "title": "" }, { "docid": "0380ce0a09783d718cd375b2c3704d18", "score": "0.5332048", "text": "def vectorApply(self, src_u, src_v, dst_u, dst_v, fs):\n\n MINTLIB.mnt_regridedges_vectorApply.argtypes = [POINTER(c_void_p),\n DOUBLE_ARRAY_PTR,\n DOUBLE_ARRAY_PTR,\n DOUBLE_ARRAY_PTR,\n DOUBLE_ARRAY_PTR,\n c_int]\n ier = MINTLIB.mnt_regridedges_vectorApply(self.obj, src_u, src_v, dst_u, dst_v, fs)\n if ier:\n error_handler(FILE, 'vectorApply', ier)", "title": "" }, { "docid": "e4be613e6030a197ebe5d808af5382e5", "score": "0.53306997", "text": "def translate(v):\n translate = np.concatenate((np.eye(3), v.reshape(-1,1)), 1)\n translate = np.concatenate((translate, np.array([[0,0,0,1]])),0)\n return translate", "title": "" }, { "docid": "fe221bc9e82b08e152ad3ef2aa1f359b", "score": "0.53300565", "text": "def project_point(self, point):\n v_origin = np.array(point)\n\n # TODO: Check v_origin shape (should be 3)\n\n det_us = self.detector_us\n det_vs = self.detector_vs\n\n # (source_pos, det_o, det_y, det_x) = self._get_vectors()\n det_normal = vc.cross_product(det_us, det_vs)\n\n v_direction = v_origin - self.source_positions\n\n intersection = vc.intersect(\n v_origin, v_direction, self.detector_positions, det_normal\n )\n\n det_i_u = np.sum(intersection * det_us, axis=1) / vc.squared_norm(det_us)\n det_i_v = np.sum(intersection * det_vs, axis=1) / vc.squared_norm(det_vs)\n\n return np.stack((det_i_v, det_i_u), axis=-1)", "title": "" }, { "docid": "d14c8370c59b493f5e5d0ead5cc66228", "score": "0.5326232", "text": "def to_vector(self):\n return self.embedded_map.to_vector()", "title": "" }, { "docid": "e3b18e79b3e0ac8a3612273cee5d2d62", "score": "0.5325607", "text": "def to_vec(self): \n return self.grid.reshape(\n (self.grid.shape[0] * self.grid.shape[1])\n )", "title": "" } ]
3b234e0e1778749fcf491ff4b7f7ca78
Test fma as str_floatnum_floatnum_str for invalid integer array Array code d.
[ { "docid": "3bf97e6bdacdd1cfd5a82f80fb09bc30", "score": "0.0", "text": "def test_fma_invalid_param_str_floatnum_floatnum_str_1306(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.floatnumy, self.floatnumz, self.strout)", "title": "" } ]
[ { "docid": "23478df374aff5d6fc56718d28b5f6d7", "score": "0.813768", "text": "def test_fma_invalid_param_intarray_intarray_str_floatnum_616(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "23478df374aff5d6fc56718d28b5f6d7", "score": "0.8137186", "text": "def test_fma_invalid_param_intarray_intarray_str_floatnum_616(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "dd0771967f6c8441c53fb8cd3670ac75", "score": "0.80673665", "text": "def test_fma_invalid_param_intarray_str_intarray_floatnum_721(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.intarrayz, self.floatnumout)", "title": "" }, { "docid": "dd0771967f6c8441c53fb8cd3670ac75", "score": "0.80673665", "text": "def test_fma_invalid_param_intarray_str_intarray_floatnum_721(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.intarrayz, self.floatnumout)", "title": "" }, { "docid": "3f06bbcb150dfd0334bad820ca31c574", "score": "0.8066964", "text": "def test_fma_invalid_param_floatnum_str_str_intarray_491(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.strz, self.intarrayout)", "title": "" }, { "docid": "3f06bbcb150dfd0334bad820ca31c574", "score": "0.80661047", "text": "def test_fma_invalid_param_floatnum_str_str_intarray_491(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.strz, self.intarrayout)", "title": "" }, { "docid": "6f9e4450c8db3f75c6d3fa156d218f4b", "score": "0.8041948", "text": "def test_fma_invalid_param_str_intarray_str_floatnum_1372(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.intarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "6f9e4450c8db3f75c6d3fa156d218f4b", "score": "0.80418134", "text": "def test_fma_invalid_param_str_intarray_str_floatnum_1372(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.intarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "209d63b89fba60d4ae0ab638eb15d8ef", "score": "0.8038102", "text": "def test_fma_invalid_param_str_intarray_floatnum_str_1348(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.intarrayy, self.floatnumz, self.strout)", "title": "" }, { "docid": "209d63b89fba60d4ae0ab638eb15d8ef", "score": "0.8037259", "text": "def test_fma_invalid_param_str_intarray_floatnum_str_1348(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.intarrayy, self.floatnumz, self.strout)", "title": "" }, { "docid": "eb06e594540b1a787c05e827c40f3ae1", "score": "0.8035472", "text": "def test_fma_invalid_param_str_intnum_str_floatnum_1414(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.intnumy, self.strz, self.floatnumout)", "title": "" }, { "docid": "c0d87e874a4486684b4d259b54ac8f24", "score": "0.8033814", "text": "def test_fma_invalid_param_intarray_floatnum_str_floatnum_574(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatnumy, self.strz, self.floatnumout)", "title": "" }, { "docid": "c0d87e874a4486684b4d259b54ac8f24", "score": "0.8033703", "text": "def test_fma_invalid_param_intarray_floatnum_str_floatnum_574(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatnumy, self.strz, self.floatnumout)", "title": "" }, { "docid": "eb06e594540b1a787c05e827c40f3ae1", "score": "0.8033072", "text": "def test_fma_invalid_param_str_intnum_str_floatnum_1414(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.intnumy, self.strz, self.floatnumout)", "title": "" }, { "docid": "e8948a4c7918e0521ce001b204c40583", "score": "0.80299634", "text": "def test_fma_invalid_param_intarray_floatnum_str_str_578(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatnumy, self.strz, self.strout)", "title": "" }, { "docid": "e8948a4c7918e0521ce001b204c40583", "score": "0.8029418", "text": "def test_fma_invalid_param_intarray_floatnum_str_str_578(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatnumy, self.strz, self.strout)", "title": "" }, { "docid": "f720386514b2aa378b161b75a469dba1", "score": "0.80275226", "text": "def test_fma_invalid_param_intarray_floatarray_str_intnum_534(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatarrayy, self.strz, self.intnumout)", "title": "" }, { "docid": "f720386514b2aa378b161b75a469dba1", "score": "0.80270094", "text": "def test_fma_invalid_param_intarray_floatarray_str_intnum_534(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatarrayy, self.strz, self.intnumout)", "title": "" }, { "docid": "2edfdbea43527476c44794170912baf1", "score": "0.802406", "text": "def test_fma_invalid_param_intarray_floatnum_intnum_str_564(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatnumy, self.intnumz, self.strout)", "title": "" }, { "docid": "2edfdbea43527476c44794170912baf1", "score": "0.8023859", "text": "def test_fma_invalid_param_intarray_floatnum_intnum_str_564(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatnumy, self.intnumz, self.strout)", "title": "" }, { "docid": "2cfcd3fc6e4cadb82f4d1ece6f150ac8", "score": "0.80232364", "text": "def test_fma_invalid_param_intarray_str_floatnum_str_718(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.floatnumz, self.strout)", "title": "" }, { "docid": "2cfcd3fc6e4cadb82f4d1ece6f150ac8", "score": "0.8023186", "text": "def test_fma_invalid_param_intarray_str_floatnum_str_718(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.floatnumz, self.strout)", "title": "" }, { "docid": "e08adcd0437e2b8e34cc69cef3a7a8e5", "score": "0.8015487", "text": "def test_fma_invalid_param_intarray_intnum_str_floatnum_658(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intnumy, self.strz, self.floatnumout)", "title": "" }, { "docid": "e08adcd0437e2b8e34cc69cef3a7a8e5", "score": "0.8015149", "text": "def test_fma_invalid_param_intarray_intnum_str_floatnum_658(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intnumy, self.strz, self.floatnumout)", "title": "" }, { "docid": "75b84342d6fd177b7f3070eeed1e7bf1", "score": "0.80147696", "text": "def test_fma_invalid_param_intarray_str_floatarray_str_711(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.floatarrayz, self.strout)", "title": "" }, { "docid": "75b84342d6fd177b7f3070eeed1e7bf1", "score": "0.80147696", "text": "def test_fma_invalid_param_intarray_str_floatarray_str_711(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.floatarrayz, self.strout)", "title": "" }, { "docid": "5a399adabd76dc5ff562152a49e6ed85", "score": "0.80061525", "text": "def test_fma_invalid_param_floatarray_intarray_str_floatnum_112(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "5a399adabd76dc5ff562152a49e6ed85", "score": "0.80052817", "text": "def test_fma_invalid_param_floatarray_intarray_str_floatnum_112(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "e90658a505c98ee090ceaa7833c1b014", "score": "0.8005021", "text": "def test_fma_invalid_param_floatarray_str_floatnum_str_214(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.floatnumz, self.strout)", "title": "" }, { "docid": "e90658a505c98ee090ceaa7833c1b014", "score": "0.8004781", "text": "def test_fma_invalid_param_floatarray_str_floatnum_str_214(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.floatnumz, self.strout)", "title": "" }, { "docid": "448e8bb21f573fc2a3ec745f08d9ca81", "score": "0.8002842", "text": "def test_fma_invalid_param_intarray_floatarray_str_floatnum_532(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "448e8bb21f573fc2a3ec745f08d9ca81", "score": "0.8002335", "text": "def test_fma_invalid_param_intarray_floatarray_str_floatnum_532(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "ca231d62f5ceef9b3236d098761dd21c", "score": "0.8002189", "text": "def test_fma_invalid_param_intnum_intarray_str_floatnum_868(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intnumx, self.intarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "ca231d62f5ceef9b3236d098761dd21c", "score": "0.80012614", "text": "def test_fma_invalid_param_intnum_intarray_str_floatnum_868(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intnumx, self.intarrayy, self.strz, self.floatnumout)", "title": "" }, { "docid": "244a320b6d93991ecb36e4590bce3eb0", "score": "0.7998755", "text": "def test_fma_invalid_param_floatarray_str_floatnum_intnum_212(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.floatnumz, self.intnumout)", "title": "" }, { "docid": "244a320b6d93991ecb36e4590bce3eb0", "score": "0.79981774", "text": "def test_fma_invalid_param_floatarray_str_floatnum_intnum_212(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.floatnumz, self.intnumout)", "title": "" }, { "docid": "2c07186bdcbf898a9df3b397cbfdd4d7", "score": "0.7989964", "text": "def test_fma_invalid_param_floatarray_str_intarray_floatnum_217(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.intarrayz, self.floatnumout)", "title": "" }, { "docid": "2c07186bdcbf898a9df3b397cbfdd4d7", "score": "0.79899335", "text": "def test_fma_invalid_param_floatarray_str_intarray_floatnum_217(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.intarrayz, self.floatnumout)", "title": "" }, { "docid": "5840da4cdfeea42de9ff5c76f4abd377", "score": "0.79893106", "text": "def test_fma_invalid_param_floatnum_str_str_str_494(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.strz, self.strout)", "title": "" }, { "docid": "5840da4cdfeea42de9ff5c76f4abd377", "score": "0.7988281", "text": "def test_fma_invalid_param_floatnum_str_str_str_494(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.strz, self.strout)", "title": "" }, { "docid": "458077424fa17799be63517365bf4208", "score": "0.7985971", "text": "def test_fma_invalid_param_intarray_intnum_str_floatarray_657(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intnumy, self.strz, self.floatarrayout)", "title": "" }, { "docid": "458077424fa17799be63517365bf4208", "score": "0.7985971", "text": "def test_fma_invalid_param_intarray_intnum_str_floatarray_657(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intnumy, self.strz, self.floatarrayout)", "title": "" }, { "docid": "f099aac87451345af50d9a63a48477c9", "score": "0.79835635", "text": "def test_fma_invalid_param_str_str_floatnum_intarray_1471(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.stry, self.floatnumz, self.intarrayout)", "title": "" }, { "docid": "f099aac87451345af50d9a63a48477c9", "score": "0.79829764", "text": "def test_fma_invalid_param_str_str_floatnum_intarray_1471(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.stry, self.floatnumz, self.intarrayout)", "title": "" }, { "docid": "2540f8ecba6fa7de67de1861c7849b5f", "score": "0.7982867", "text": "def test_fma_invalid_param_floatarray_intnum_str_floatnum_154(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intnumy, self.strz, self.floatnumout)", "title": "" }, { "docid": "d69b808aed5181c94706288c0c69743d", "score": "0.7982591", "text": "def test_fma_invalid_param_floatnum_intnum_str_intarray_407(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.intnumy, self.strz, self.intarrayout)", "title": "" }, { "docid": "2540f8ecba6fa7de67de1861c7849b5f", "score": "0.79819846", "text": "def test_fma_invalid_param_floatarray_intnum_str_floatnum_154(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intnumy, self.strz, self.floatnumout)", "title": "" }, { "docid": "d69b808aed5181c94706288c0c69743d", "score": "0.79819685", "text": "def test_fma_invalid_param_floatnum_intnum_str_intarray_407(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.intnumy, self.strz, self.intarrayout)", "title": "" }, { "docid": "26fcd58202c6f8157dcdf350e8c8d52f", "score": "0.7980974", "text": "def test_fma_invalid_param_floatarray_floatnum_str_intarray_71(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.strz, self.intarrayout)", "title": "" }, { "docid": "2dd126066c183dbeb1b4ea274c40f24b", "score": "0.7980513", "text": "def test_fma_invalid_param_floatnum_str_intarray_str_473(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.intarrayz, self.strout)", "title": "" }, { "docid": "26fcd58202c6f8157dcdf350e8c8d52f", "score": "0.79798645", "text": "def test_fma_invalid_param_floatarray_floatnum_str_intarray_71(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.strz, self.intarrayout)", "title": "" }, { "docid": "6dbf79ad839a881434c0f788bb52bebb", "score": "0.7979776", "text": "def test_fma_invalid_param_floatarray_str_intnum_str_228(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.intnumz, self.strout)", "title": "" }, { "docid": "2dd126066c183dbeb1b4ea274c40f24b", "score": "0.79797745", "text": "def test_fma_invalid_param_floatnum_str_intarray_str_473(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.intarrayz, self.strout)", "title": "" }, { "docid": "b8355af5e3613a1c318c62baec7fb513", "score": "0.7979667", "text": "def test_fma_invalid_param_floatarray_str_floatarray_intnum_205(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.floatarrayz, self.intnumout)", "title": "" }, { "docid": "bef6c7e1fd7e200313fc1ab264a9ae49", "score": "0.7979028", "text": "def test_fma_invalid_param_intarray_intarray_floatnum_str_592(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intarrayy, self.floatnumz, self.strout)", "title": "" }, { "docid": "b8355af5e3613a1c318c62baec7fb513", "score": "0.7978843", "text": "def test_fma_invalid_param_floatarray_str_floatarray_intnum_205(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.floatarrayz, self.intnumout)", "title": "" }, { "docid": "6dbf79ad839a881434c0f788bb52bebb", "score": "0.7978754", "text": "def test_fma_invalid_param_floatarray_str_intnum_str_228(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.stry, self.intnumz, self.strout)", "title": "" }, { "docid": "bef6c7e1fd7e200313fc1ab264a9ae49", "score": "0.7977228", "text": "def test_fma_invalid_param_intarray_intarray_floatnum_str_592(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intarrayy, self.floatnumz, self.strout)", "title": "" }, { "docid": "db046fac59ddfa9c18fd8d445b3bc5bc", "score": "0.7973079", "text": "def test_fma_invalid_param_intarray_intnum_floatnum_str_634(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intnumy, self.floatnumz, self.strout)", "title": "" }, { "docid": "db046fac59ddfa9c18fd8d445b3bc5bc", "score": "0.79728264", "text": "def test_fma_invalid_param_intarray_intnum_floatnum_str_634(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intnumy, self.floatnumz, self.strout)", "title": "" }, { "docid": "c2c404a4a5ea601c6a684ba3bf8392b3", "score": "0.7970217", "text": "def test_fma_invalid_param_floatarray_intnum_str_intnum_156(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intnumy, self.strz, self.intnumout)", "title": "" }, { "docid": "c2c404a4a5ea601c6a684ba3bf8392b3", "score": "0.79699016", "text": "def test_fma_invalid_param_floatarray_intnum_str_intnum_156(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intnumy, self.strz, self.intnumout)", "title": "" }, { "docid": "0c602e52f53f36d324d273ff8ad13356", "score": "0.79688907", "text": "def test_fma_invalid_param_intarray_str_str_floatnum_742(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.strz, self.floatnumout)", "title": "" }, { "docid": "0c602e52f53f36d324d273ff8ad13356", "score": "0.7968712", "text": "def test_fma_invalid_param_intarray_str_str_floatnum_742(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.strz, self.floatnumout)", "title": "" }, { "docid": "aa54b1d042fef967717681aa2380e1f5", "score": "0.7968302", "text": "def test_fma_invalid_param_intarray_str_intnum_str_732(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.intnumz, self.strout)", "title": "" }, { "docid": "aa54b1d042fef967717681aa2380e1f5", "score": "0.79674256", "text": "def test_fma_invalid_param_intarray_str_intnum_str_732(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.intnumz, self.strout)", "title": "" }, { "docid": "078b77bb858fc356560ddb2bf04e4246", "score": "0.796637", "text": "def test_fma_invalid_param_floatnum_str_floatarray_intnum_457(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.floatarrayz, self.intnumout)", "title": "" }, { "docid": "078b77bb858fc356560ddb2bf04e4246", "score": "0.7965608", "text": "def test_fma_invalid_param_floatnum_str_floatarray_intnum_457(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.floatarrayz, self.intnumout)", "title": "" }, { "docid": "9f4b7313785dc17c1a4e801c4c07cdee", "score": "0.7963761", "text": "def test_fma_invalid_param_floatarray_intnum_str_intarray_155(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intnumy, self.strz, self.intarrayout)", "title": "" }, { "docid": "b0c27830ce58c9956b016fad94ad5c0d", "score": "0.79633087", "text": "def test_fma_invalid_param_str_intarray_intnum_str_1362(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.intarrayy, self.intnumz, self.strout)", "title": "" }, { "docid": "b0c27830ce58c9956b016fad94ad5c0d", "score": "0.7963172", "text": "def test_fma_invalid_param_str_intarray_intnum_str_1362(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.intarrayy, self.intnumz, self.strout)", "title": "" }, { "docid": "9f4b7313785dc17c1a4e801c4c07cdee", "score": "0.79628885", "text": "def test_fma_invalid_param_floatarray_intnum_str_intarray_155(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intnumy, self.strz, self.intarrayout)", "title": "" }, { "docid": "f6cb8f94706c98b5ef3f4a01a9a8437a", "score": "0.7960115", "text": "def test_fma_invalid_param_floatarray_intnum_str_str_158(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intnumy, self.strz, self.strout)", "title": "" }, { "docid": "6a17eafe0096d49dd5ccb43ade99e429", "score": "0.7959832", "text": "def test_fma_invalid_param_intnum_str_floatarray_str_963(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intnumx, self.stry, self.floatarrayz, self.strout)", "title": "" }, { "docid": "6a17eafe0096d49dd5ccb43ade99e429", "score": "0.7959832", "text": "def test_fma_invalid_param_intnum_str_floatarray_str_963(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intnumx, self.stry, self.floatarrayz, self.strout)", "title": "" }, { "docid": "f6cb8f94706c98b5ef3f4a01a9a8437a", "score": "0.7959404", "text": "def test_fma_invalid_param_floatarray_intnum_str_str_158(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intnumy, self.strz, self.strout)", "title": "" }, { "docid": "d83a9138e5b0d7cbfb11969d897a4f4a", "score": "0.79551524", "text": "def test_fma_invalid_param_floatnum_str_intarray_floatarray_468(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.intarrayz, self.floatarrayout)", "title": "" }, { "docid": "abb18b482d6ddca58b6d14925c2e6081", "score": "0.7954906", "text": "def test_fma_invalid_param_intarray_str_floatarray_floatnum_707(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.floatarrayz, self.floatnumout)", "title": "" }, { "docid": "abb18b482d6ddca58b6d14925c2e6081", "score": "0.7954906", "text": "def test_fma_invalid_param_intarray_str_floatarray_floatnum_707(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.stry, self.floatarrayz, self.floatnumout)", "title": "" }, { "docid": "d83a9138e5b0d7cbfb11969d897a4f4a", "score": "0.79540807", "text": "def test_fma_invalid_param_floatnum_str_intarray_floatarray_468(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.intarrayz, self.floatarrayout)", "title": "" }, { "docid": "3a903540159a0f6fed216d3552aa2f73", "score": "0.7953621", "text": "def test_fma_invalid_param_floatarray_intarray_str_str_116(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intarrayy, self.strz, self.strout)", "title": "" }, { "docid": "fc05398d0ab963e5f7287a961b2eb0fa", "score": "0.79534507", "text": "def test_fma_invalid_param_str_str_floatarray_intnum_1465(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.stry, self.floatarrayz, self.intnumout)", "title": "" }, { "docid": "3a903540159a0f6fed216d3552aa2f73", "score": "0.7952867", "text": "def test_fma_invalid_param_floatarray_intarray_str_str_116(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intarrayy, self.strz, self.strout)", "title": "" }, { "docid": "96ca32a3df00611b60fd4ab6769f0488", "score": "0.7952539", "text": "def test_fma_invalid_param_intarray_intarray_str_floatarray_615(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intarrayy, self.strz, self.floatarrayout)", "title": "" }, { "docid": "96ca32a3df00611b60fd4ab6769f0488", "score": "0.7952539", "text": "def test_fma_invalid_param_intarray_intarray_str_floatarray_615(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intarrayy, self.strz, self.floatarrayout)", "title": "" }, { "docid": "fc05398d0ab963e5f7287a961b2eb0fa", "score": "0.7952406", "text": "def test_fma_invalid_param_str_str_floatarray_intnum_1465(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.strx, self.stry, self.floatarrayz, self.intnumout)", "title": "" }, { "docid": "37c0feeb62eee6f5cb6bae9270c4e3d1", "score": "0.79510105", "text": "def test_fma_invalid_param_intarray_floatarray_intnum_str_522(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatarrayy, self.intnumz, self.strout)", "title": "" }, { "docid": "27a1f0f3f30831d687e77f5d166ca174", "score": "0.7950916", "text": "def test_fma_invalid_param_intarray_floatnum_str_intnum_576(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatnumy, self.strz, self.intnumout)", "title": "" }, { "docid": "7f276510977161291af82b6bc4876dcf", "score": "0.79501784", "text": "def test_fma_invalid_param_intnum_floatnum_str_floatarray_825(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intnumx, self.floatnumy, self.strz, self.floatarrayout)", "title": "" }, { "docid": "7f276510977161291af82b6bc4876dcf", "score": "0.79501784", "text": "def test_fma_invalid_param_intnum_floatnum_str_floatarray_825(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intnumx, self.floatnumy, self.strz, self.floatarrayout)", "title": "" }, { "docid": "a8ff32e4c62cbff4bb5d52b87c587fd1", "score": "0.7950073", "text": "def test_fma_invalid_param_intnum_str_floatnum_str_970(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intnumx, self.stry, self.floatnumz, self.strout)", "title": "" }, { "docid": "37c0feeb62eee6f5cb6bae9270c4e3d1", "score": "0.79498494", "text": "def test_fma_invalid_param_intarray_floatarray_intnum_str_522(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatarrayy, self.intnumz, self.strout)", "title": "" }, { "docid": "a8ff32e4c62cbff4bb5d52b87c587fd1", "score": "0.7949791", "text": "def test_fma_invalid_param_intnum_str_floatnum_str_970(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatnumz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intnumx, self.stry, self.floatnumz, self.strout)", "title": "" }, { "docid": "91ccb6b0a0586b3ffc7a4c5435e6ad70", "score": "0.79493076", "text": "def test_fma_invalid_param_floatnum_str_intarray_intnum_471(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.intarrayz, self.intnumout)", "title": "" }, { "docid": "27a1f0f3f30831d687e77f5d166ca174", "score": "0.79492956", "text": "def test_fma_invalid_param_intarray_floatnum_str_intnum_576(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.floatnumy, self.strz, self.intnumout)", "title": "" }, { "docid": "160f13da4384200b637061854e0f2e5f", "score": "0.7949106", "text": "def test_fma_invalid_param_intarray_intarray_str_intnum_618(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intarrayy, self.strz, self.intnumout)", "title": "" }, { "docid": "160f13da4384200b637061854e0f2e5f", "score": "0.794884", "text": "def test_fma_invalid_param_intarray_intarray_str_intnum_618(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.intarrayx, self.intarrayy, self.strz, self.intnumout)", "title": "" }, { "docid": "91ccb6b0a0586b3ffc7a4c5435e6ad70", "score": "0.7948244", "text": "def test_fma_invalid_param_floatnum_str_intarray_intnum_471(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.stry, self.intarrayz, self.intnumout)", "title": "" }, { "docid": "db1f0d5ede7c7fed8d664880060732ae", "score": "0.7946502", "text": "def test_fma_invalid_param_floatarray_intarray_str_intnum_114(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intarrayy, self.strz, self.intnumout)", "title": "" }, { "docid": "db1f0d5ede7c7fed8d664880060732ae", "score": "0.79463464", "text": "def test_fma_invalid_param_floatarray_intarray_str_intnum_114(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatarrayy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatarrayx, self.intarrayy, self.strz, self.intnumout)", "title": "" }, { "docid": "dd07a7340b6f2839cf12fbedcb0bbf21", "score": "0.7945929", "text": "def test_fma_invalid_param_floatnum_intnum_str_intnum_408(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.fma(self.floatarrayx, self.floatnumy, self.floatarrayz, self.floatarrayout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.fma(self.floatnumx, self.intnumy, self.strz, self.intnumout)", "title": "" } ]
c08c0ec5a4b64a882c6fc119f5bdfc3d
Request a token from service
[ { "docid": "f9d8e22cb53b8dc916d04b680d731cf2", "score": "0.70175856", "text": "def request_token(self):\n token_url = 'https://uaa.%s/oauth/token' % self.url\n headers = {\n 'accept': 'application/json',\n 'authorization': 'Basic Y2Y6'\n }\n params = {\n 'username': self.username,\n 'password': self.password,\n 'grant_type': 'password'\n }\n r = requests.post(url=token_url, headers=headers, params=params)\n self.token = r.json()\n self.token['time_stamp'] = time.time()", "title": "" } ]
[ { "docid": "d80d4585dd37923bedbd7e387d3b3c0f", "score": "0.71444774", "text": "def _token_request(self, request):\n\n if not self._client.token_endpoint:\n return None\n\n logger.debug('making token request: %s', request)\n client_auth_method = self._client.registration_response.get('token_endpoint_auth_method', 'client_secret_basic')\n auth_header = _ClientAuthentication(self._client.client_id, self._client.client_secret)(client_auth_method,\n request)\n resp = self._provider_configuration.requests_session \\\n .post(self._client.token_endpoint,\n data=request,\n headers=auth_header) \\\n .json()\n logger.debug('received token response: %s', json.dumps(resp))\n\n token_resp = self._parse_response(resp, AccessTokenResponse, TokenErrorResponse)\n if 'id_token' in resp:\n token_resp['id_token_jwt'] = resp['id_token']\n\n return token_resp", "title": "" }, { "docid": "2ef6bd034dbb4a542904c42743f47f2e", "score": "0.71138066", "text": "def get_token(url, body):\n return api_post(url, body)", "title": "" }, { "docid": "5c8aa411c6c44dbabc70740131165073", "score": "0.70831203", "text": "def send_request(self, token, url='/'):\n return self.client.get(url, headers={\n 'Authorization': b'Bearer ' + token\n })", "title": "" }, { "docid": "b8be43c317e5a267ae0b499f3a043628", "score": "0.7058087", "text": "def get_token(self):\r\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\r\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\r\n headers = {'User-Agent': self.user_agent}\r\n response = requests.Session()\r\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\r\n self.token = response2.json()['access_token']\r\n self.t_type = response2.json()['token_type']", "title": "" }, { "docid": "38ed91687a6bb9b5912e8c3b4397de27", "score": "0.7029846", "text": "def _get_token(self, path, req_body):\n token_url = urlparse.urljoin(self.auth_url, path)\n resp, body = self.request(token_url, \"POST\", body=req_body)\n if 'access' in body:\n if not self.management_url:\n # Assume the new Keystone lite:\n catalog = body['access']['serviceCatalog']\n for service in catalog:\n if service['name'] == self.service:\n self.management_url = service['adminURL']\n self.auth_token = body['access']['token']['id']\n else:\n # Assume pre-Keystone Light:\n try:\n if not self.management_url:\n keys = ['auth',\n 'serviceCatalog',\n self.service,\n 0,\n 'publicURL']\n url = body\n for key in keys:\n url = url[key]\n self.management_url = url\n self.auth_token = body['auth']['token']['id']\n except KeyError:\n raise NotImplementedError(\"Service: %s is not available\"\n % self.service)", "title": "" }, { "docid": "3e73a56b4ddc0b3398df2b8bfa5642e7", "score": "0.694203", "text": "async def fetch_token(self):\n\n params = {\n 'command': 'request'\n }\n payload = await self._request('GET', 'api_token.php', params=params)\n return payload['token']", "title": "" }, { "docid": "8b5cbea0474a64e17ace95d7e86b2759", "score": "0.68564594", "text": "def token(address, username, password):\n url = \"https://{}/api/system/v1/auth/token\".format(address)\n header = {\"content-type\": \"application/json\"}\n auth = HTTPBasicAuth(username=username,password=password)\n\n try:\n #SSL certification is turned off, but should be active in production environments\n response = requests.post(url,auth=auth, headers=header, verify=False)\n print(\"service ticket status: \", response.status_code)\n token = response.json()[\"Token\"]\n return token\n except:\n print( \"Error!\")\n return 0", "title": "" }, { "docid": "e7dfe9a91f1d01f2979f7a6e0e9028ea", "score": "0.6847043", "text": "def get_token(self):\n headers = {'Content-Type': 'application/json', 'Authorization': 'my-auth-token'}\n payload = {'username': self.username, 'password': self.password, 'audience': self.audience}\n res = requests.post(self.endpoint + 'auth/login/', json=payload, headers=headers)\n if res.ok:\n self.token = res.json()['token']\n logger.info('(HTTP) Token OK.')\n else:\n print('Error on api get_token.')\n logger.info('(HTTP) Error on api get_token.')", "title": "" }, { "docid": "ef7534cfb9e4a055d632ceece682d453", "score": "0.67925054", "text": "def _get_token_response(self) -> OAuthTokenResponse:", "title": "" }, { "docid": "404f3ba40682b2d7bb5f6f34b00e8051", "score": "0.6771003", "text": "def GetFirstToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "36a5a8c8178a49c545399c89b467b632", "score": "0.6763902", "text": "def _oauth2_request(self, url, token):\r\n return urlfetch.fetch(url.format(urlencode({'access_token':token}))).content", "title": "" }, { "docid": "78bb24f98b5e5829467cbe41ed43b329", "score": "0.67586994", "text": "def request_api_token():\n payload = {\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'grant_type': 'client_credentials',\n 'scope': 'client'\n }\n\n r = requests.post(url=AUTH_URL, headers=AUTH_HEADERS, data=payload, verify=VERIFY_CERT)\n response_json = r.json()\n if 200 <= r.status_code <= 299:\n api_key = response_json['access_token']\n CLIENT_HEADERS['Authorization'] = 'Bearer ' + api_key\n else:\n return_error(f'Error in request_api_token [{r.status_code}] - {r.reason}')", "title": "" }, { "docid": "3304a15b57b806566c7fe52706ba4afc", "score": "0.67559", "text": "def access_token(*args, **kwargs):\n # pylint: disable=unused-argument\n log.debug(\"requested token\")\n\n\n return None", "title": "" }, { "docid": "597e56bdc7c4ddbdaf9bdaee6aa5e928", "score": "0.6738392", "text": "def do_token_get(kc, args):\n print kc.auth_token", "title": "" }, { "docid": "3d4905f10966e82b15f613fd32d73fb1", "score": "0.6730914", "text": "def get_request_token(self):\n data = self._fetch('oauth-request-token', as_json=False)\n return oauth.OAuthToken.from_string(data)", "title": "" }, { "docid": "9301cf6bfcc51d08701cd00785f2ff12", "score": "0.66629297", "text": "def auth_getRequestToken(self, **kwargs):\r\n auth = self._make_handler(\"auth_getRequestToken\")\r\n rsp = auth(**kwargs)\r\n self.set_oauth_token(rsp[\"Auth\"][\"Token\"][\"id\"],\r\n rsp[\"Auth\"][\"Token\"][\"Secret\"])\r\n return rsp", "title": "" }, { "docid": "9f9dcd7e34897f56b7e297cba00a41c3", "score": "0.6649985", "text": "def token_endpoint(app):\n @app.route('/test/token/access', methods=['GET'])\n @token_auth.login_required\n def test_token_access():\n return msg.success('Successful API call with token required')", "title": "" }, { "docid": "1a73ec10a45382ed45f6e0165b66618f", "score": "0.66254294", "text": "def test_get_token(self):\r\n with httmock.HTTMock(device_drivers.csr_request.token):\r\n self.assertTrue(self.csr.authenticate())\r\n self.assertEqual(requests.codes.OK, self.csr.status)\r\n self.assertIsNotNone(self.csr.token)", "title": "" }, { "docid": "165cb483ac02572d84b3acaa6b90205d", "score": "0.6623653", "text": "def _get_token(self):\n url = \"http://127.0.0.1:5000/api/token\"\n req = requests.get(url, auth=(self.username, self.password))\n if req.status_code == requests.codes.ok:\n return req.json()['token']\n else:\n return None", "title": "" }, { "docid": "c4f4e114a58a94c6252191b7d7aea785", "score": "0.66139525", "text": "def get_token_endpoint():\n incoming = request.get_json()\n user = User.get_user_with_email_and_password(incoming[\"email\"], incoming[\"password\"])\n\n if user:\n return jsonify(token = generate_token(user))\n else:\n return jsonify(error = True), 403", "title": "" }, { "docid": "6f4a0abefd3683cf5069a1e7b9b6750c", "score": "0.6610584", "text": "def get_token(self):\n ext = '/api/token/'\n req = self.root_url + ext\n res = requests.post(req, data={'username': self.user, 'password': self._passwd})\n status = res.json()['status']\n if status == 'ok':\n self.token = Token(res.json()['token'], self.root_url)\n else:\n self.token = None", "title": "" }, { "docid": "79cea7304a5c29bc8fefdfc484341fd5", "score": "0.65995115", "text": "def _get_token(session, user_id):\n pass", "title": "" }, { "docid": "86982f3642731688bdce8121db90f81e", "score": "0.65736294", "text": "def get_token():\r\n return secrets.token_urlsafe(50)", "title": "" }, { "docid": "e2ada5eb794f4d33db6c8ee3828f85d6", "score": "0.6553902", "text": "def get_request_token(self):\n self.consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)\n self.client = oauth.Client(self.consumer)\n\n self.response, self.content = self.client.request(self.request_token_url, \"GET\")\n if self.response['status'] != '200':\n raise Exception(\"Invalid response %s.\" % self.response['status'])\n\n return dict(urlparse.parse_qsl(self.content))", "title": "" }, { "docid": "b7ea3f4b6a7d83d094583b69e5876c99", "score": "0.65469825", "text": "def fetch_request_token(self, callback=None):\n url = self.request_token_url\n method = self.REQUEST_METHOD\n parameters = self.request_token_parameters(callback=callback)\n parameters = self.signature_method.sign('GET', url, parameters,\n self.consumer)\n response = self.fetch_response(url, parameters, method)\n return response.add_callback(token_from_response)", "title": "" }, { "docid": "d6d72aedbca346070be2850f629e948e", "score": "0.6542218", "text": "async def request(self, method, url: str):\n headers = {}\n access_token = self.credentials.get_access_token(httplib2.Http()).access_token\n headers['Authorization'] = 'Bearer ' + access_token\n return await self.session.request(method, url, headers=headers)", "title": "" }, { "docid": "31c279467897262135823ce61ecd2d2e", "score": "0.6508452", "text": "def requestToken(self, ip, duration):\n\n self.logger.info(\n \"Request requestToken({0}, {1})\".format(ip, duration)\n )\n token = self.access_manager.generate_token(ip, duration)\n return token", "title": "" }, { "docid": "bf9dd0557534c042b38c494620de710f", "score": "0.64855283", "text": "def token_endpoint(self, context):\n headers = {\"Authorization\": context.request_authorization}\n try:\n response = self.provider.handle_token_request(\n urlencode(context.request),\n headers,\n lambda user_id, client_id: self._get_extra_id_token_claims(user_id, client_id))\n return Response(response.to_json(), content=\"application/json\")\n except InvalidClientAuthentication as e:\n logline = \"invalid client authentication at token endpoint\"\n logger.debug(logline, exc_info=True)\n error_resp = TokenErrorResponse(error='invalid_client', error_description=str(e))\n response = Unauthorized(error_resp.to_json(), headers=[(\"WWW-Authenticate\", \"Basic\")],\n content=\"application/json\")\n return response\n except OAuthError as e:\n logline = \"invalid request: {}\".format(str(e))\n logger.debug(logline, exc_info=True)\n error_resp = TokenErrorResponse(error=e.oauth_error, error_description=str(e))\n return BadRequest(error_resp.to_json(), content=\"application/json\")", "title": "" }, { "docid": "8015a8fea62526d2beaed4a931fe24a3", "score": "0.64845914", "text": "def _make_request(self, data):\n cert = self._get_cert()\n\n response = request(\n self.access_token_method,\n self.access_token_url,\n headers=self.headers,\n cert=cert,\n data=data,\n )\n\n return response", "title": "" }, { "docid": "617c0c769cc5f51deb6653815df6ddd3", "score": "0.6471933", "text": "async def get_token(\n self,\n request: Request,\n client_id: str,\n access_token: Optional[str] = None,\n refresh_token: Optional[str] = None,\n ) -> Optional[Token]:\n raise NotImplementedError(\"Method get_token must be implemented\")", "title": "" }, { "docid": "efdb2ec11afcf60ea6eaf078d6242df3", "score": "0.6460488", "text": "def tc_token(self):\n if self.tc_api_path is None: # no API path, no token\n return None\n data = None\n token = None\n token_url_path = self.env_store.getenv('/ninja/tc/token/url_path', env_type='remote')\n if token_url_path is None:\n # could not retrieve URL path\n return None\n\n # determine the token type\n token_type = 'api'\n # per conversation with Marut, we should be able to just use api tokens\n # if self.ij.runtime_level.lower() in [\n # 'apiservice',\n # 'triggerservice',\n # 'webhooktriggerservice',\n # ]:\n # data = {'serviceId': os.getenv('TC_TOKEN_SVC_ID', '441')}\n # token_type = 'svc'\n\n # retrieve token from API using HMAC auth\n r = self.session_exchange.post(f'{token_url_path}/{token_type}', json=data, verify=True)\n if r.status_code == 200:\n token = r.json().get('data')\n self.log.data('setup', 'Using Token', token)\n self.log.data('setup', 'Token Elapsed', r.elapsed, 'trace')\n else:\n self.log.error(f'Failed to retrieve token ({r.text})')\n return token", "title": "" }, { "docid": "bd65e1a9b3a0c8e2aca8323b4065251c", "score": "0.6437397", "text": "def get_token():\n request_data = request.get_json()\n if not valid_user_login(request_data):\n return Response(error_response(), 400, mimetype='application/json')\n\n center_id = User.query.filter_by(login=request_data['Login']).first()\n login = str(request_data['Login'])\n password = str(request_data['Password'])\n\n condition = valid_login_password(login, password)\n\n if condition:\n expiration_data = datetime.datetime.utcnow() + datetime.timedelta(seconds=100)\n AccessToken.add_request(center_id.id, str(expiration_data))\n token = jwt.encode({'exp': expiration_data}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n else:\n return Response(error_response(), 400, mimetype='application/json')", "title": "" }, { "docid": "5996e88b16276157375051b55e5fe1de", "score": "0.6426599", "text": "def bearer_token_task():\r\n return AuthToken.objects.getaccesstoken()", "title": "" }, { "docid": "70dbc02e56b584e1a627a48bb730303d", "score": "0.6390289", "text": "def GetTokenAt(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "2215ede663bdd3dd3d0f71bb8ee9311c", "score": "0.6387008", "text": "def get_token(AUTH):\n # Get token\n url_get_auth = \"https://api.hatebase.org/4-4/authenticate\"\n\n\n # get toekn from the server\n a = requests.post(url=url_get_auth, data=AUTH)\n auth = a.json()\n token = auth['result']['token']\n\n return token", "title": "" }, { "docid": "87d0da4556fec823f42d7599ae0de3ca", "score": "0.6380539", "text": "def request_access_token_auth_code_flow(self):\n\n credentials_str = '{0}:{1}'.format(CLIENT_ID, CLIENT_SECRET)\n encoded_credentials_str = base64.urlsafe_b64encode(credentials_str.encode()).decode()\n\n headers = {\n 'Authorization': 'Basic {}'.format(encoded_credentials_str),\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n body = {\n 'grant_type': 'client_credentials'\n }\n\n token_req = requests.post(SPOTIFY_AUTH_URL, headers=headers, data=body)\n self.token = token_req.json()", "title": "" }, { "docid": "27d7bb5d778220ea2385d5e679173397", "score": "0.63755614", "text": "def gettoken_call():\n auth_url = \"https://accounts.spotify.com/api/token\"\n auth_body_params = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": SPOTIFY_ID,\n \"client_secret\": SPOTIFY_SECRET,\n }\n auth_response = requests.post(auth_url, data=auth_body_params)\n auth_data = auth_response.json()\n return auth_data", "title": "" }, { "docid": "7fa2b9ce1deb2b53e94dd7503235d9cc", "score": "0.63646156", "text": "def getToken():\n expireTime = \"30\"\n tokenKey = u'token'\n expiresKey = u'expires'\n tokenPath = \"/sharing/generateToken\"\n\n clientIP = socket.gethostbyname(socket.gethostname())\n\n if Local.USER is None or Local.PASS is None:\n userName = raw_input(\"User Name: \")\n passWord = getpass.getpass()\n else:\n userName = Local.USER\n passWord = Local.PASS\n\n pw = hashlib.md5()\n pw.update(passWord)\n if pw.hexdigest() != Local.MD5HASH:\n print \"Unknown password -- md5hash:\", pw.hexdigest()\n print \"To use your password, copy and paste the md5hash into Local.py.\"\n sys.exit(1)\n \n params = { 'username': userName,\n 'password': passWord,\n 'expiration': expireTime,\n 'client': \"referer\",\n 'referer': clientIP }\n\n response = getQueryResponse(tokenPath, params)\n checkKey(tokenKey, response)\n checkKey(expiresKey, response)\n return (response[u'token'], response[u'expires'])", "title": "" }, { "docid": "decd0efd2201213f3f9c07717c022920", "score": "0.63551205", "text": "def aut_request(self, data=None, url=None):\r\n url = url + \"/user-service/api/oauth/token\"\r\n headers = self.api.headers\r\n return_value = None\r\n try:\r\n response = requests.post(url, headers=headers, data=data)\r\n if response.status_code == 200:\r\n return_value = response.text\r\n else:\r\n print('authentication request to %s returned status code %i' % (url, response.status_code))\r\n except requests.ConnectionError as pe:\r\n # TODO: some handling here, for now just print pe\r\n print('when an authentication request to the oc4 api was made, the following error was raised %s' % (pe))\r\n return_value = None\r\n \r\n return return_value", "title": "" }, { "docid": "3462648f38cb7f716336538767377a84", "score": "0.63494843", "text": "def token(username, password):\n click.echo('token...')\n with click_spinner.spinner():\n click.echo(Token(username, password))", "title": "" }, { "docid": "4edcc34b6b3e673cfaabc55247b44da6", "score": "0.6338139", "text": "def post(self, request): \n r = requests.post(\n ROOT+'/o/token/', \n data={\n 'grant_type': \"password\",\n 'username': request.data['username'],\n 'password': request.data['password'],\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n },\n )\n return Response(r.json())", "title": "" }, { "docid": "ad3db75e6b786332e256eada2ceef9d9", "score": "0.6333727", "text": "def get_auth_token():\n response = requests.head(f\"{BASE_URL}/auth\")\n return response", "title": "" }, { "docid": "731c095634ed29c3708e1cfcf50bbef3", "score": "0.63310057", "text": "def generate_token(self):\n\n result = None\n\n # Dictionary containing all the params value.\n payload = {'get_token': \"\"}\n\n # try to perform the request\n try:\n request = requests.get(Settings.api_url + \"/\" + AuthenticateGateway.ENTRY_POINT, params=payload,\n json=self.__auth_data)\n\n # Convert string to list.\n parsed_result = json.loads(request.text[1:-1])\n\n if request.ok:\n result = parsed_result['token']\n elif 'error' in parsed_result:\n print('Error arise: ' + str(parsed_result['error']))\n\n except requests.exceptions.ConnectionError:\n print(\"Connection to server \"\n + Settings.api_url\n + \" failed. Please check if the server is working or if you have internet connection.\")\n except requests.exceptions.Timeout:\n print(\"Request timeout. Maybe set up for a retry, or continue in a retry loop.\")\n except requests.exceptions.TooManyRedirects:\n print(\"Bad URL (\" + Settings.api_url + \"). Try a different one\")\n except requests.exceptions.RequestException as e:\n print(\"Some error occur: \\n\" + str(e))\n except json.decoder.JSONDecodeError as e:\n print(\"There was a problem accessing the response: \\n\" + str(e))\n\n return result", "title": "" }, { "docid": "0bd50029e52a4af7569266f055eef259", "score": "0.6325233", "text": "def get_initial_token(self, code):\n data = {\n \"code\": code,\n \"client_id\": self.client_id,\n \"grant_type\": \"authorization_code\",\n \"client_secret\": self.client_secret,\n \"redirect_uri\": \"urn:ietf:wg:oauth:2.0:oob\"\n }\n url = self.API_HOST.format(path=self.GET_TOKEN_URL)\n r = requests.post(url, data=data)\n return self.check_for_failure(r)", "title": "" }, { "docid": "fb792c36a4e4ea093bdd1e6ec0d49df7", "score": "0.6321718", "text": "async def __call__(self, request: Request) -> str:\n token = request.cookies.get(self._token_name)\n if not token:\n raise HTTPException(status_code=403, detail=\"Not authenticated\")\n return token", "title": "" }, { "docid": "c77488b6afb54c47ead147d6e4e8086f", "score": "0.6320644", "text": "def _get_token(self):\n token = Token.objects.get(\n user=self.request.user,\n application=self.object,\n )\n return token", "title": "" }, { "docid": "0b15a81e233113829177c29829f2d00a", "score": "0.6308535", "text": "def query_token(self, token, token_type_hint, client):\n\n raise NotImplementedError()", "title": "" }, { "docid": "d618967c43b8de165cd1b42fbcc4886d", "score": "0.6307849", "text": "def get_with_token_auth(self, path, token):\n h = Headers()\n h.add('X-Auth-Token', token)\n response = Client.open(self.client, path=path, headers=h)\n return response", "title": "" }, { "docid": "7a20417f76f4e2bfe6f1f91645717107", "score": "0.6302508", "text": "def _token_introspection_request(self, access_token: str) -> TokenIntrospectionResponse:\n request_args = {\n 'token': access_token,\n 'token_type_hint': 'access_token'\n }\n client_auth_method = self._client.registration_response.get('introspection_endpoint_auth_method',\n 'client_secret_basic')\n logger.info('making token introspection request')\n token_introspection_response = self._client_extension.do_token_introspection(\n request_args=request_args, authn_method=client_auth_method, endpoint=self._client.introspection_endpoint)\n\n return token_introspection_response", "title": "" }, { "docid": "ba932a01cc57c03d4d951d49ba7ec91a", "score": "0.6298964", "text": "def get_token():\n return session.get('microsoft_token')", "title": "" }, { "docid": "da7eb226f2d11458422e0490a3a44a5b", "score": "0.6298051", "text": "def get_token(domain, client_id, client_secret, grant_type=\"client_credentials\"):\n\n payload = {\n \"grant_type\": grant_type,\n \"client_id\": client_id,\n \"client_secret\": client_secret,\n \"audience\": \"https://%s/api/v2/\" % domain}\n url = 'https://%s/oauth/token' % domain\n client = RestClient()\n return client.post(url, payload)", "title": "" }, { "docid": "f0157390f66eeab224b09206314a976d", "score": "0.62916434", "text": "def getToken(client_id, client_secret):\n req = {'client_id' : client_id, 'client_secret': client_secret, 'grant_type' : 'client_credentials','scope':'data:read'}\n resp = requests.post('https://developer.api.autodesk.com/authentication/v1/authenticate', req).json();\n return resp['access_token']", "title": "" }, { "docid": "33218ac5da2a98972b79520472408778", "score": "0.62913865", "text": "def get_token(self):\n ext = '/api/token/'\n req = self.root_url + ext\n res = requests.post(req, data={'username': self.user, 'password': self._passwd})\n status = res.json()['status']\n if status == 'ok':\n self.token = Token(res.json()['token'], self.root_url)\n else:\n self.token = None\n if self.verbose:\n print(res.json()['message'])\n return self.token.value", "title": "" }, { "docid": "8cf00fd1b8b6e0a684b9ff26c5a2027a", "score": "0.62910396", "text": "def _get_token(cls):\n print(f\"WeRead: _get_token()\")\n data = {\n \"code\": cls.wx_code,\n \"deviceId\": cls.deviceId,\n \"mailDeviceId\": cls.mailDeviceId,\n \"random\": 937,\n \"signature\": cls.sign[\"signature\"],\n \"timestamp\": cls.sign[\"timestamp\"],\n \"trackId\": \"\"\n }\n resp = requests.post(cls.TOKEN_URL, json=data, headers=cls.weread_headers)\n cls.token = resp.json()\n cls.token[\"from\"] = int(time.time())", "title": "" }, { "docid": "32012c8e7be36a7a678fdd29a29e40f0", "score": "0.62881374", "text": "def getToken(self):\n url = 'https://arcgis.com/sharing/rest/generateToken'\n data = {'username': self.username,\n 'password': self.password,\n 'referer' : 'https://www.arcgis.com',\n 'f': 'json'}\n return requests.post(url, data).json()['token']", "title": "" }, { "docid": "bede6339d7561744158a42bcda173d20", "score": "0.6282395", "text": "def token(self) -> Dict[str, Any]:\n token: Dict[str, Any]\n\n token = self._auth_context.acquire_token_with_client_credentials(\n self.__api_endpoint, self._client_id, self._client_secret\n )\n\n if \"expiresOn\" in token:\n self.__logger.info(\n \"%s: Acquired token expires on: %s\", self, token[\"expiresOn\"]\n )\n else:\n self.__logger.error(\"%s: No valid API token received\", self)\n\n return token", "title": "" }, { "docid": "db9a12740afbe3b7a18f7d87cac3e5a1", "score": "0.6277448", "text": "def get_auth_token():\n payload = {\n \"username\": settings.AUTH_TOKEN_USERNAME,\n \"password\": settings.AUTH_TOKEN_PASSWORD\n }\n headers = {\n \"Content-Type\": \"application/json\",\n 'user-agent': 'SYSTEM'\n }\n try:\n response = requests.post(\n settings.AUTH_TOKEN_ENDPOINT,\n data=json.dumps(payload),\n headers=headers,\n timeout=(settings.CONNECTIVITY, settings.RESPONSE_TIMEOUT)\n )\n response.raise_for_status()\n except requests.exceptions.HTTPError as error:\n raise Exception(\n 'error: could not get content from url because of {}'.format(\n response.status_code))\n except requests.exceptions.ConnectTimeout:\n raise Exception(\n 'error: requests.exceptions.ConnectTimeout while {}'.format(\n \"getting auth_token\"))\n\n return response.json()['token']", "title": "" }, { "docid": "39a06f311a301e8b5b8a0068d4649552", "score": "0.6271232", "text": "def get_token(self):\n return_code, response = self.__send_command(\"get_token\", self.api_key)\n if return_code == 200:\n return str(response[\"token\"])\n else:\n raise ValueError(response[\"error\"])", "title": "" }, { "docid": "607e22f9f47aa271c750039fe1c54f1c", "score": "0.6257102", "text": "def getToken(context, value):", "title": "" }, { "docid": "fa66178953c1bb0813edd370d3fba4fe", "score": "0.6255765", "text": "def get_token(uid):\n token = auth.create_custom_token(uid)\n data = {\n 'token': token.decode(),\n 'returnSecureToken': True\n }\n\n url = f\"https://identitytoolkit.googleapis.com/v1/accounts:signInWithCustomToken?key={API_KEY}\"\n\n response = requests.post(url, json=data)\n\n return response.json()", "title": "" }, { "docid": "2fc5566520b7081e793865387ee2bdf2", "score": "0.62542576", "text": "def get_recomm_token():\n data={\"name\":app.config[\"RECOMM_API_USER\"],\"password\":app.config[\"RECOMM_API_PASS\"]}\n try:\n response=requests.post(app.config[\"RECOMM_API_AUTHENTICATE_URL\"],data=data)\n token=response.json().get('token')\n return token\n except:\n return None", "title": "" }, { "docid": "ae2e12021652f3d745ae1a016012dff8", "score": "0.6251343", "text": "async def get_token(self, type: str = \"csrf\"):\n return await self.http.get_token(type)", "title": "" }, { "docid": "013fb3edccff9c9a59ce909d324111d1", "score": "0.623962", "text": "def getToken(self):\n \n raise NotImplementedError", "title": "" }, { "docid": "cfbbaca519794cdcfea0fe9ed38c863e", "score": "0.62374175", "text": "def get_token(base_url: furl) -> str:\n token_url: furl = base_url.copy()\n token_url.add(path=UPM_API_ENDPOINT)\n token_url.set(args={\"os_authType\": \"basic\"})\n token_response = requests.head(token_url.url)\n token = token_response.headers[\"upm-token\"]\n return token", "title": "" }, { "docid": "499c39d545a87c65423b59292ecf8e70", "score": "0.62373024", "text": "def token_terminal_request(url: str, api_key: str) -> Dict:\n key = api_key\n headers = {\"Authorization\": f\"Bearer {key}\"}\n r = requests.get(url, headers=headers)\n return r.json()", "title": "" }, { "docid": "bcb1f62463bbadd84b87e3994a17f29d", "score": "0.62289155", "text": "def request_token_info(self, access_token):\n\n # build URL for token request\n url = self._build_url(TOKEN_INFO_PATH)\n\n # redefine headers for token request\n headers = dict(self._headers)\n\n # add content-type\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n headers['access_token'] = access_token\n\n # define client credentials payload\n client_credentials_payload = {\n }\n\n # url-encode payload\n data = urlencode(client_credentials_payload)\n\n # request access token and expiry duration\n access_token_request = self._make_requests(data=data, headers=headers, method='POST', url=url)\n\n return access_token_request", "title": "" }, { "docid": "18e78979e0203d5059d6a87aa258eb2c", "score": "0.6218395", "text": "def request_access_token_client_auth_flow(self):\n\n token_obj = self.send_auth_request('client_credentials')\n self.token = token_obj['access_token']\n # calculates a new expiration timestamp based on the received auth token expiration\n self.token_expiration = datetime.datetime.now() \\\n + datetime.timedelta(0, token_obj['expires_in'])", "title": "" }, { "docid": "067560e5360415bf42f8a42da2e8af64", "score": "0.6217953", "text": "def get_token(self):\n path = 'Login'\n payload = {\n 'mode': 4,\n 'username': self.user,\n 'password': b64encode(self.pw.encode('UTF-8')).decode('UTF-8')}\n res = self.request('POST', path, payload=payload)\n data = res.json()\n if 'token' in data and data['token']:\n self.headers['Authtoken'] = data['token']\n return self.headers['Authtoken']\n else:\n msg = 'Commvault user or pass incorrect'\n raise_requests_error(401, msg)", "title": "" }, { "docid": "ba24d98b69bb9f5d398de566296475e0", "score": "0.620913", "text": "def token_response(client, oauth_client):\n return oauth2.get_token_response(client, oauth_client)", "title": "" }, { "docid": "b0ec48d7b904d278689c18a21d80229f", "score": "0.6195311", "text": "def request_token(self, parser=None, redirect_uri=None, **kwargs):\r\n kwargs = kwargs and kwargs or {}\r\n\r\n parser = parser or _default_parser\r\n kwargs.update({\r\n 'client_id': self.client_id,\r\n 'client_secret': self.client_secret,\r\n 'grant_type': 'grant_type' in kwargs and kwargs['grant_type'] or \\\r\n 'authorization_code'\r\n })\r\n if redirect_uri is not None:\r\n kwargs.update({'redirect_uri': redirect_uri})\r\n\r\n # TODO: maybe raise an exception here if status code isn't 200?\r\n msg = urlopen(self.token_endpoint, urlencode(kwargs).encode('utf-8'))\r\n data = parser(msg.read().decode(msg.info().get_content_charset() or 'utf-8'))\r\n\r\n # expires_in is RFC-compliant. if anything else is used by the\r\n # provider, token_expires must be set manually\r\n print data\r\n self.access_token = data['access_token']\r\n self.refresh_token = data['refresh_token']\r\n if hasattr(self, 'expires_in'):\r\n try:\r\n # python3 dosn't support long\r\n seconds = long(self.expires_in)\r\n except:\r\n seconds = int(self.expires_in)\r\n self.token_expires = mktime((datetime.utcnow() + timedelta(\r\n seconds=seconds)).timetuple())", "title": "" }, { "docid": "541d6cffa30a2d2cd6e59f61f777efe6", "score": "0.6183307", "text": "def get_token(self):\n current_app.logger.debug('<Getting token')\n token_url = current_app.config.get('PAYBC_DIRECT_PAY_BASE_URL') + '/oauth/token'\n basic_auth_encoded = base64.b64encode(\n bytes(current_app.config.get('PAYBC_DIRECT_PAY_CLIENT_ID') + ':' + current_app.config.get(\n 'PAYBC_DIRECT_PAY_CLIENT_SECRET'), 'utf-8')).decode('utf-8')\n data = 'grant_type=client_credentials'\n token_response = self.post(token_url, basic_auth_encoded, AuthHeaderType.BASIC, ContentType.FORM_URL_ENCODED,\n data)\n current_app.logger.debug('>Getting token')\n return token_response", "title": "" }, { "docid": "f7b1589666478ba1076109251656c881", "score": "0.61614287", "text": "def get_token():\n headers = {\"Content-Type\": \"application/json\"}\n # import pdb; pdb.set_trace()\n r = requests.post(TOKEN_URL, data=json.dumps(AUTH_BODY), headers=headers)\n if r.ok:\n data = r.json()\n # print json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))\n return r.headers['X-Subject-Token'], data\n return None, {}", "title": "" }, { "docid": "cb4a7d5097ad2f8c72987720de5a9a0f", "score": "0.6144372", "text": "def get_token(request):\n incoming_data = CheckEmailCodeSerializer(data=request.data)\n incoming_data.is_valid(raise_exception=True)\n\n user_serializer = UserTokenSerializer(data=request.data)\n user_serializer.is_valid(raise_exception=True)\n user = user_serializer.save()\n token = get_tokens_for_user(user)\n return Response(token, status=status.HTTP_200_OK)", "title": "" }, { "docid": "573928f881006ca41078081f918cc2fd", "score": "0.6142552", "text": "def get_api_token(self, jamf_url, enc_creds):\n url = jamf_url + \"/\" + self.api_endpoints(\"token\")\n r = self.curl(request=\"POST\", url=url, enc_creds=enc_creds)\n output = r.output\n if r.status_code == 200:\n try:\n token = str(output[\"token\"])\n expires = str(output[\"expires\"])\n\n # write the data to a file\n self.write_token_to_json_file(jamf_url, output)\n self.output(\"Session token received\")\n self.output(f\"Token: {token}\", verbose_level=2)\n self.output(f\"Expires: {expires}\", verbose_level=2)\n return token\n except KeyError:\n self.output(\"ERROR: No token received\")\n else:\n self.output(\"ERROR: No token received\")", "title": "" }, { "docid": "f850116a9edb5b55548dbac7848a1e16", "score": "0.6138746", "text": "async def token(*, user: User = Depends(get_authenticated_user)):\n return user.data", "title": "" }, { "docid": "c992d09ec2f004dd15722e290d38b4ab", "score": "0.61281425", "text": "def send_api_request(self, url, options=None):\n\n # requests a new access token if the current one is expired\n if self.is_token_expired():\n self.request_access_token_client_auth_flow()\n\n headers = {\n 'Authorization': 'Bearer {}'.format(self.token)\n }\n\n req = requests.get(url, headers=headers, params=options)\n json = req.json()\n\n if 'error' in json:\n raise ApiError(json['error']['status'], json['error']['message'])\n\n return req.json()", "title": "" }, { "docid": "0bef8f4f9b2a97b096c840af63490fb6", "score": "0.61228526", "text": "def _request(self, base_url, client_id, client_secret,\n parameters, **kwargs):\n logging.debug('Getting an OAuth token for client \"%s\" with scope \"%s\"',\n client_id, parameters.get('scope'))\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'application/json'}\n\n api = API(base_url,\n auth_username=client_id,\n auth_password=client_secret,\n **kwargs)\n endpoint = api.auth.token\n\n response = yield endpoint.post(body=urllib.urlencode(parameters),\n request_timeout=60,\n headers=headers)\n\n logging.debug('Received token: %s', response.get('access_token'))\n raise Return(response)", "title": "" }, { "docid": "a2ea91f6f57e546304785201fcad0771", "score": "0.61227226", "text": "def token(args):\n import h.auth.tokens\n request = bootstrap(args)\n FakeRequest = collections.namedtuple(\n 'FakeRequest', 'authenticated_userid host_url registry')\n fake_request = FakeRequest(\n authenticated_userid=args.sub,\n host_url=request.host_url,\n registry=request.registry\n )\n print(h.auth.tokens.generate_jwt(fake_request, 3600))", "title": "" }, { "docid": "eaffa3097cfc39c7ef9f281c4c0bef06", "score": "0.6111735", "text": "def get_new_token(protocol, host, port, cred_manager):\n auth_client = setup_auth_client(protocol, host, port)\n return perform_auth_for_handler(auth_client, host, port, cred_manager)", "title": "" }, { "docid": "5a4e4176b3bde6e0ce881236b48e5137", "score": "0.61111844", "text": "def get(self, **kwargs):\n token = self._get_bearer_token(**kwargs)\n resp = requests.get(\n self.get_request_url(), headers={\"Authorization\": \"Bearer %s\" % token}\n )\n return resp", "title": "" }, { "docid": "a30a5bca8f1a0a6ae79d86eaaa17db82", "score": "0.611095", "text": "def _get_token(self, host=False, username=False, password=False):\n if not host:\n host = self.env.company.tyrecheck_host\n if not username:\n username = self.env.company.tyrecheck_username\n if not password:\n password = self.env.company.tyrecheck_password\n\n data = {\n 'grant_type': 'password',\n 'username': username,\n 'password': password\n }\n\n host = \"%s/api/token\" % (host,)\n response = requests.post(host, data=data)\n\n if response.status_code == 200:\n response_json = response.json()\n token = response_json['access_token']\n else:\n raise ValidationError(_('Error retrieving TyreCheck Authorization Token.'))\n\n return token", "title": "" }, { "docid": "b289eabc1b172142f1f0eef7f8f86f8f", "score": "0.61055595", "text": "def get_oauth_token(self, encoded_key_secret):\n auth_url = 'https://api.awhere.com/oauth/token'\n auth_headers = {\n \"Authorization\": \"Basic %s\" % encoded_key_secret,\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n body = \"grant_type=client_credentials\"\n print('\\nget_oauth_token:: Headers: %s' % auth_headers)\n sys.stdout.flush()\n print('\\nget_oauth_token:: Body: %s' % body)\n sys.stdout.flush()\n response = rq.post(auth_url, headers=auth_headers, data=body)\n # .json method is a requests lib method that decodes the response\n responseJSON = response.json()\n print('\\nget_oauth_token:: ResponseJSON: %s' % responseJSON)\n sys.stdout.flush()\n return responseJSON['access_token']", "title": "" }, { "docid": "70ce017732cf5eaaeb122949abd7820e", "score": "0.60929495", "text": "def make_authorized_get_request(service_url):\n credentials, project = google.auth.default()\n authed_session = AuthorizedSession(credentials)\n\n response = authed_session.get(service_url)\n print(response.content)\n return response.content", "title": "" }, { "docid": "ecf4668c21ae857afb69af8875bd94b1", "score": "0.60925984", "text": "def generateToken(self):\n url = \"https://arcgis.com/sharing/rest/generateToken\"\n data = {'username': self.username,\n 'password': self.password,\n 'referer': \"https://www.arcgis.com\",\n 'f': 'json'}\n return requests.post(url, data, verify=False).json()['token']", "title": "" }, { "docid": "5c6c561e741c47ba183ec53d40407010", "score": "0.60894954", "text": "def get_oauth_token():\n return session.get('oauth_token')", "title": "" }, { "docid": "e54a2b82b587ae0e22d028c5da5a4761", "score": "0.6084744", "text": "def get_service_access_token(token_url, audience, client_id, client_secret):\n r = requests.post(\n token_url,\n data={\n 'audience': audience,\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'grant_type': 'client_credentials'\n })\n return r.json()['access_token']", "title": "" }, { "docid": "d72d3f3473631242a160d772eb22821b", "score": "0.608122", "text": "def make_request(self, endpoint):\n token = self.prepare_token()\n url = 'https://api.{0}{1}'.format(self.url, endpoint)\n headers = {'authorization': 'bearer ' + token}\n req = requests.get(url=url, headers=headers)\n return req", "title": "" }, { "docid": "813bed66015871c8d423fb9385eb0d38", "score": "0.60780424", "text": "def get_token(self, key_id: str, secret_key: str) -> str:\n\n auth_token_bytes = f\"{key_id}:{secret_key}\".encode(\"ascii\")\n base64_auth_token_bytes = base64.b64encode(auth_token_bytes)\n base64_auth_token = base64_auth_token_bytes.decode(\"ascii\")\n\n proxies = {\"https\": self.proxy_string} if self.proxy_string else None\n\n params = {\"grant_type\": \"client_credentials\", \"scope\": \"\"}\n\n headers = {\n \"Authorization\": f\"Basic {base64_auth_token}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\",\n \"X-App-Name\": f\"{self.application_name}\",\n }\n\n access_token = requests.post(\n url=urlparse.urljoin(self.url, \"token\"),\n proxies=proxies,\n headers=headers,\n data=params,\n )\n\n token: Optional[str] = access_token.json().get(\"access_token\", None)\n if token:\n return token\n\n raise V4APIError(\"Could not extract 'access_token' from the result\")", "title": "" }, { "docid": "482e14b05d711e14af4059856bc24617", "score": "0.60686547", "text": "def _generate_token_for_request(self, request: Request) -> None:\n assert request.vendor_info is not None\n token_generation_request = TokenGeneration(vendor_info=request.vendor_info)\n\n result = self.send_request(token_generation_request, True)\n token = f'Bearer {result.get(\"token\")}'\n request.bearer_token = token\n if USE_GLOBAL_AUTH_TOKENS:\n request.vendor_info.bearer_token = token", "title": "" }, { "docid": "abac4b2f6a6efc427afbfea8cf03ab30", "score": "0.60638034", "text": "def get_user_access_token(token):\n url = \"{}{}\".format(get_host_url(), URLS.get_access_token.value)\n try:\n headers = {\"Authorization\": \"Token {}\".format(token[\"token\"])}\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n if response.status_code in EVALAI_ERROR_CODES:\n echo(\n style(\n \"\\nUnable to fetch auth token.\\n\",\n bold=True,\n fg=\"red\",\n )\n )\n sys.exit(1)\n except requests.exceptions.RequestException:\n echo(\n style(\n \"\\nCould not establish a connection to EvalAI.\"\n \" Please check the Host URL.\\n\",\n bold=True,\n fg=\"red\",\n )\n )\n sys.exit(1)\n token = response.json()\n return token", "title": "" }, { "docid": "176747f809bbe30618f1c45d6b85b55c", "score": "0.60629624", "text": "def func(request: Request, response: Response) -> Token:\n handle_errors(request, response)\n return Token(response.content, uses_pkce)", "title": "" }, { "docid": "51b62763a4bbb110538f067eac6bd80e", "score": "0.6051607", "text": "def request_access_token(user_key, user_secret):\n \n token_url = 'https://api.eumetsat.int/token'\n\n data = {\n 'grant_type': 'client_credentials'\n }\n \n r = requests.post(token_url, data=data, auth=(user_key, user_secret))\n handle_response_errors(r)\n access_token = r.json()['access_token']\n\n return access_token", "title": "" }, { "docid": "8e6c70bc4341e4c265ebc9da11e15b7b", "score": "0.6049303", "text": "def _get_user_token(self):\n \n args = {}\n result = _send(self, path=\"/user\", post=args, method=\"acctoken\")\n \n if result: \n return result['token']", "title": "" }, { "docid": "bd3ce7a020c0b915344a526cd46ca5f1", "score": "0.6047967", "text": "def _get_token(self, action_result, from_action=False):\n config = self.get_config()\n username = UnicodeDammit(config.get('username')).unicode_markup.encode('utf-8')\n auth = (username, config.get('password'))\n headers = {\n 'Content-Type': 'application/json'\n }\n\n ret_val, resp_json = self._make_rest_call(\"{}{}\".format(self._base_url, \"/auth\"), action_result, verify=self._verify, headers=headers, auth=auth, method='post')\n\n if (phantom.is_fail(ret_val)):\n self._state['session_key'] = None\n self._session_key = None\n self.save_state(self._state)\n return action_result.get_status()\n\n self._state['session_key'] = resp_json\n self._session_key = resp_json\n self.save_state(self._state)\n\n return phantom.APP_SUCCESS", "title": "" }, { "docid": "cd67836f10bc8f7cf840e176a867344b", "score": "0.6047391", "text": "def get_token(self) -> str:\n\n with self._lock:\n if (\n not self._token_info.get(\"access_token\")\n or self.is_refresh_token_expired()\n ):\n self._request_token()\n elif self.is_token_expired():\n self._refresh_token()\n\n return self._token_info.get(\"access_token\")", "title": "" }, { "docid": "c9c27cc8f6b924d777777b3ce2d6373c", "score": "0.604659", "text": "def get_token():\n access_token = _get_access_token()\n click.echo(\"\\nAccess Token: \\n%s\" % access_token)", "title": "" }, { "docid": "07307fd87dd1b4d8b2abdb8e45350f68", "score": "0.60444045", "text": "def request_oauth_token(environ, crypto_box):\n oauth = OAuth1Session(config.CLIENT_KEY, client_secret=config.CLIENT_SECRET)\n\n try:\n fetch_response = oauth.fetch_request_token(config.REQUEST_TOKEN_URL)\n except ValueError as err:\n raise OAuthError(err.message, \"500 Internal Server Error\")\n resource_owner_secret = fetch_response.get('oauth_token_secret')\n\n # encrypt secret\n nonce = nacl.utils.random(nacl.public.Box.NONCE_SIZE)\n oauth_token_secret_encr = base64.urlsafe_b64encode(crypto_box.encrypt(resource_owner_secret.encode(\"utf8\"), nonce)).decode(\"ascii\")\n\n authorization_url = oauth.authorization_url(config.AUTHORIZATION_URL)\n # append callback URL because our callback URL is dynamic and cannot be configured in the consumer settings of osm.org\n query_str_appendix = \"oauth_token_secret_encr={}\".format(urllib.parse.quote(oauth_token_secret_encr))\n callback_url = urllib.parse.quote(reconstruct_url(environ, True, query_str_appendix, config.LANDING_PAGE_URL_PARAM))\n authorization_url += \"&oauth_callback={}\".format(callback_url)\n return authorization_url", "title": "" }, { "docid": "5fa1f1093fbb30858a4362efb917b494", "score": "0.6035142", "text": "async def get_token(self, *scopes: str) -> AccessToken:\n if self._endpoint_available is None:\n # Lacking another way to determine whether the IMDS endpoint is listening,\n # we send a request it would immediately reject (missing a required header),\n # setting a short timeout.\n try:\n await self._client.request_token(scopes, method=\"GET\", connection_timeout=0.3, retry_total=0)\n self._endpoint_available = True\n except (ClientAuthenticationError, HttpResponseError):\n # received a response a pipeline policy choked on (HttpResponseError)\n # or that couldn't be deserialized by AuthnClient (AuthenticationError)\n self._endpoint_available = True\n except Exception: # pylint:disable=broad-except\n # if anything else was raised, assume the endpoint is unavailable\n self._endpoint_available = False\n\n if not self._endpoint_available:\n raise ClientAuthenticationError(message=\"IMDS endpoint unavailable\")\n\n if len(scopes) != 1:\n raise ValueError(\"this credential supports one scope per request\")\n\n token = self._client.get_cached_token(scopes)\n if not token:\n resource = scopes[0]\n if resource.endswith(\"/.default\"):\n resource = resource[: -len(\"/.default\")]\n params = {\"api-version\": \"2018-02-01\", \"resource\": resource}\n if self._client_id:\n params[\"client_id\"] = self._client_id\n token = await self._client.request_token(scopes, method=\"GET\", params=params)\n return token", "title": "" }, { "docid": "0b6d2b5f8604d03ee7caea2cc009fc09", "score": "0.60266113", "text": "def API_call(self, url):\n token = self.get_access_token_for_user()\n self.logger.debug('calling url: {}'.format(url))\n response = requests.get(url, params={'access_token': token})\n if response.status_code == 401:\n if response.json()['error']['message'] == u'Invalid Credentials':\n new_token = self.refresh_access_token_for_user()\n self.logger.debug('new token: {}'.format(new_token))\n self.logger.debug('response: {}'.format(response.content))\n return json.loads(response.content)", "title": "" }, { "docid": "c030fcce555000f5d38a1c5317be1d59", "score": "0.60094506", "text": "def get_auth_token():\n key = get_response1()\n key = key.json()\n auth_token_og = key['auth_token']\n\n return auth_token_og", "title": "" } ]
1d7b961c4d756a1399b3a684b2970aa4
Vypise intervalove a bodove odhady a vrati je jako seznam.
[ { "docid": "44e2ae3e49eff67b9fb8472bb5a35fc1", "score": "0.0", "text": "def report(lkhd):\r\n\r\n ind = min_coord(lkhd)\r\n maxmin = bounds_coord(lkhd)\r\n print('Odhad alfa je {:.1f} a lezi v intervalu {:.1f} az {:.1f}'.format(\r\n alfa[ind[1]], np.amin(alfa[maxmin[1, :]]), np.amax(alfa[maxmin[1, :]])))\r\n print('Odhad beta je {:.2f} a lezi v intervalu {:.2f} az {:.2f}'.format(\r\n beta[ind[0]], np.amin(beta[maxmin[0, :]]), np.amax(beta[maxmin[0, :]])))\r\n return [np.amin(alfa[maxmin[1, :]]), alfa[ind[1]], np.amax(alfa[maxmin[1, :]]),\r\n np.amin(beta[maxmin[0, :]]), beta[ind[0]], np.amax(beta[maxmin[0, :]])]", "title": "" } ]
[ { "docid": "4dba9c6fe59a97d18ff90d5380eee0ed", "score": "0.6994862", "text": "def interval(self):\n return self.__interval", "title": "" }, { "docid": "abf75eb67d30f8eb092840890ba092db", "score": "0.69546163", "text": "def get_interval(self):\n raise NotImplementedError('Implement me!')", "title": "" }, { "docid": "2cbeb39bfabcd3df4e38435374cadd2f", "score": "0.6783422", "text": "def interval ( self ) :\n return self.__interval", "title": "" }, { "docid": "a9a1c8005f4f533546325819a0b5f3f7", "score": "0.6780356", "text": "def get_interval(self) -> float:\n return self.interval", "title": "" }, { "docid": "6f5dfa780325a878473567eaad893f27", "score": "0.67410415", "text": "def interval(self) -> int:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "5564557c74d1c5e411d76df20760f6d5", "score": "0.6681937", "text": "def _get_interval(self):\n return self.__interval", "title": "" }, { "docid": "5564557c74d1c5e411d76df20760f6d5", "score": "0.6681937", "text": "def _get_interval(self):\n return self.__interval", "title": "" }, { "docid": "5564557c74d1c5e411d76df20760f6d5", "score": "0.6681937", "text": "def _get_interval(self):\n return self.__interval", "title": "" }, { "docid": "5564557c74d1c5e411d76df20760f6d5", "score": "0.6681937", "text": "def _get_interval(self):\n return self.__interval", "title": "" }, { "docid": "1b16d645f4d2b0ab6e9f55111f2e75da", "score": "0.65394115", "text": "def interval(self):\r\n return self._interval", "title": "" }, { "docid": "31674ce69a1e393c5da27a9dab43ce1e", "score": "0.64011383", "text": "def __init__(self, interval: int = 5):\n self.interval = interval", "title": "" }, { "docid": "3826358c676f0bbd6fdd3181bda3a40a", "score": "0.6377198", "text": "def test_get_interval():\n assert common.get_interval([0, 10], 1) == 5\n assert common.get_interval([1, 10], 2) == 3\n assert common.get_interval([0, 10], 3) == 5\n assert common.get_interval([0, 9], 4) == 3\n assert common.get_interval([500, 1200], 3) == 350", "title": "" }, { "docid": "5283bc3cf461fa0c5d9218f4f22bf0f1", "score": "0.6336235", "text": "def interval(self):\n return self._interval", "title": "" }, { "docid": "5283bc3cf461fa0c5d9218f4f22bf0f1", "score": "0.6336235", "text": "def interval(self):\n return self._interval", "title": "" }, { "docid": "aefb864c797bd900bfce75fe064d1f9d", "score": "0.6302834", "text": "def interval(self) -> int:\n return self._config.get('interval', 10)", "title": "" }, { "docid": "a6c2e5a0fbba84346e422036f4e56312", "score": "0.6160085", "text": "def interval(self) -> Optional[int]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "a6c2e5a0fbba84346e422036f4e56312", "score": "0.6160085", "text": "def interval(self) -> Optional[int]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "a6c2e5a0fbba84346e422036f4e56312", "score": "0.6160085", "text": "def interval(self) -> Optional[int]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "6799c26d277b875b5e8b043f0aa30271", "score": "0.60926014", "text": "def interval_now(): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "7c1bc55f137e4256e09b8a3c49ee28f0", "score": "0.6078415", "text": "def intervalli(self, oraInizio, minutoInizio, oraFine, minutoFine, giorno=None):\n if giorno is None:\n giorno = self.giornoFeriale\n inizio = giorno.replace(hour=oraInizio, minute=minutoInizio)\n fine = giorno.replace(hour=oraFine, minute=minutoFine)\n if fine < inizio:\n fine = fine + datetime.timedelta(days=1)\n return inizio, fine", "title": "" }, { "docid": "7c1bc55f137e4256e09b8a3c49ee28f0", "score": "0.6078415", "text": "def intervalli(self, oraInizio, minutoInizio, oraFine, minutoFine, giorno=None):\n if giorno is None:\n giorno = self.giornoFeriale\n inizio = giorno.replace(hour=oraInizio, minute=minutoInizio)\n fine = giorno.replace(hour=oraFine, minute=minutoFine)\n if fine < inizio:\n fine = fine + datetime.timedelta(days=1)\n return inizio, fine", "title": "" }, { "docid": "929d8b7cbcf2fdfd2c084266d83ae03c", "score": "0.60353875", "text": "def _interval_(self):\n # always ensures that it is a float\n if hasattr(self, '_interval'):\n return np.mean(self._interval)\n if hasattr(self, '_backup_interval_'):\n return self._backup_interval_", "title": "" }, { "docid": "54847cd77936e9cd7ac51d099c41452f", "score": "0.60199785", "text": "def interval(a, b):\n \"*** YOUR CODE HERE ***\"\n return a, b", "title": "" }, { "docid": "178d9c0f8029cfa2d0744a20b0acdd22", "score": "0.6015794", "text": "def interval( self, epoch_list ):\n\n if self.debug_mode:\n pdb.set_trace()\n\n secs = float(time.mktime(time.localtime( epoch_list[1] - epoch_list[0] ) ) )\n\n #the return float hours in base 10\n return '%.2f'%(float(secs / 60 / 60))", "title": "" }, { "docid": "1a72dcd4326ce739ad87d023293bfc32", "score": "0.6007642", "text": "def interval() -> str:\n return '1w'", "title": "" }, { "docid": "8710feb2a7c4e49027e6d582b21e1e9d", "score": "0.5982972", "text": "def interval(self):\n\n return self._interval", "title": "" }, { "docid": "4d1c75e612b61fa6e400028eb07124bb", "score": "0.5976665", "text": "def _get_ka_interval(self):\n return self.__ka_interval", "title": "" }, { "docid": "d40784692fc9f8cac68a205380bfe8e2", "score": "0.59458447", "text": "def interval(start, end):\n return seconds_since_midnight(end) - seconds_since_midnight(start)", "title": "" }, { "docid": "d40784692fc9f8cac68a205380bfe8e2", "score": "0.59458447", "text": "def interval(start, end):\n return seconds_since_midnight(end) - seconds_since_midnight(start)", "title": "" }, { "docid": "b2c776410339f4050f914b18679c3455", "score": "0.5932719", "text": "def seconds():", "title": "" }, { "docid": "595deb3590beec8444a6a0f8df70fd4b", "score": "0.59268177", "text": "def _intervalOf(self, t):\n elapsedTime = t - self.starttime\n intervalNum = int(elapsedTime / self.interval)\n return intervalNum", "title": "" }, { "docid": "4f4e194d93e840d7ba36fee3e31a915c", "score": "0.5877838", "text": "def interval_sec(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interval_sec\")", "title": "" }, { "docid": "4f4e194d93e840d7ba36fee3e31a915c", "score": "0.5877838", "text": "def interval_sec(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interval_sec\")", "title": "" }, { "docid": "ce51c4cd52e84ed07f767083f51d8e08", "score": "0.5842618", "text": "def time_interval(self) -> dict:\n pass", "title": "" }, { "docid": "1057474373b92580adfe3da29ab93085", "score": "0.57866925", "text": "def __init__(self, min=0,sec=0):\r\n self.min = min #Nbre de minute\r\n self.sec = sec #Nbre de seconde\r", "title": "" }, { "docid": "9bddc0de4624740988a2ee3becb68c63", "score": "0.5739263", "text": "def interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "9bddc0de4624740988a2ee3becb68c63", "score": "0.5739263", "text": "def interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "09fa485563e3e016d5fa60ecc486f74d", "score": "0.5737106", "text": "def _check_interval(self):\n\n if khertz_to_hertz(self.params[\"interval\"]) < MIN_INTERVAL:\n logger.error(\"Low interval provided:{}\".format(self.params[\"interval\"]))\n logger.error(\"Overriding with {}\".format(MIN_INTERVAL))\n self.params[\"interval\"] = MIN_INTERVAL", "title": "" }, { "docid": "63d55752efff9a6d469d5cc84f68c86d", "score": "0.57145864", "text": "def interval(self) -> Optional[str]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "63d55752efff9a6d469d5cc84f68c86d", "score": "0.57145864", "text": "def interval(self) -> Optional[str]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "63d55752efff9a6d469d5cc84f68c86d", "score": "0.57145864", "text": "def interval(self) -> Optional[str]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "63d55752efff9a6d469d5cc84f68c86d", "score": "0.57145864", "text": "def interval(self) -> Optional[str]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "63d55752efff9a6d469d5cc84f68c86d", "score": "0.57145864", "text": "def interval(self) -> Optional[str]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "63d55752efff9a6d469d5cc84f68c86d", "score": "0.57145864", "text": "def interval(self) -> Optional[str]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "63d55752efff9a6d469d5cc84f68c86d", "score": "0.57145864", "text": "def interval(self) -> Optional[str]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "63d55752efff9a6d469d5cc84f68c86d", "score": "0.57145864", "text": "def interval(self) -> Optional[str]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "a5b473a735adc804f6c21d88c18df9d5", "score": "0.5708487", "text": "def get_data_interval(self):\n\n interval = (dt.datetime.combine(dt.date.today(), self.close.columns[1]) -\n dt.datetime.combine(dt.date.today(), self.close.columns[0]))\n self.interval_of_data = int(interval.seconds/60)", "title": "" }, { "docid": "4be86006462d5041add3fb29e047322e", "score": "0.57040346", "text": "def interval(self, prob):\n\n return self.p0rv.interval(prob)", "title": "" }, { "docid": "4be86006462d5041add3fb29e047322e", "score": "0.57040346", "text": "def interval(self, prob):\n\n return self.p0rv.interval(prob)", "title": "" }, { "docid": "237ebad0484db1d65fad9d94122a91b3", "score": "0.57011825", "text": "def InValueLifespan(self):\r\n t = self.__innerwire.GetInValueLifespan()\r\n if t < 0:\r\n return t\r\n return float(t) / 1000.0", "title": "" }, { "docid": "237ebad0484db1d65fad9d94122a91b3", "score": "0.57011825", "text": "def InValueLifespan(self):\r\n t = self.__innerwire.GetInValueLifespan()\r\n if t < 0:\r\n return t\r\n return float(t) / 1000.0", "title": "" }, { "docid": "614640637ee5372359bdca78acc45488", "score": "0.5698848", "text": "def __getTimeInterval(self):\n\n if self.__iss_code > 3: # we switch on the heartbeat for ISS in proximity or error codes\n self.__heartbeat_interval = False\n if self.__heartbeat_interval:\n rv = 2.0\n else:\n rv = 0.3\n self.__heartbeat_interval = not self.__heartbeat_interval\n return rv", "title": "" }, { "docid": "4ab71fb8a6ba7db89145991c5fd6b2e6", "score": "0.56567156", "text": "def __init__(self):\n self.intervals = []", "title": "" }, { "docid": "766ad1c8656543b3c044a601811715d9", "score": "0.5639476", "text": "def set_update_interval(self, t):\n if (t >= 0.1) and (t < 2.0):\n self._interval = t", "title": "" }, { "docid": "fefbad913daac0f7072b989dcbe31519", "score": "0.56091285", "text": "def find_timeint(self):\n with tb.table(self.ms, ack=False) as t:\n Ntimes = len(set(t.getcol('TIME')))\n with tb.table(self.ms+'/OBSERVATION', ack=False) as t:\n deltat = (t.getcol('TIME_RANGE')[0][1]-t.getcol('TIME_RANGE')[0][0])/Ntimes\n logger.debug('%s: Time interval: %f s' (self.ms, deltat))\n return deltat", "title": "" }, { "docid": "6c047e8dfa407f371f43c707a575328c", "score": "0.5603107", "text": "def g_A2R_simu_interval(abslute_time_in_simu_intetrval_no):\n return abslute_time_in_simu_intetrval_no - g_start_simu_interval_no", "title": "" }, { "docid": "fb025c119b142c7cb939dab659548d89", "score": "0.559256", "text": "def InValueLifespan(self):\r\n t = self._subscription.GetInValueLifespan()\r\n if t < 0:\r\n return t\r\n return float(t) / 1000.0", "title": "" }, { "docid": "fb025c119b142c7cb939dab659548d89", "score": "0.559256", "text": "def InValueLifespan(self):\r\n t = self._subscription.GetInValueLifespan()\r\n if t < 0:\r\n return t\r\n return float(t) / 1000.0", "title": "" }, { "docid": "040ef8d68d2961ff1ff1bff526408034", "score": "0.55705196", "text": "def interval_in_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"interval_in_seconds\")", "title": "" }, { "docid": "040ef8d68d2961ff1ff1bff526408034", "score": "0.55705196", "text": "def interval_in_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"interval_in_seconds\")", "title": "" }, { "docid": "0d53e31c14f4b35d9ea12d277282c25b", "score": "0.55526066", "text": "def getCycleInterval(self, **kwargs):\n \n pass", "title": "" }, { "docid": "b4f0ee0f73ca0c2d06ab2bc911c5ea45", "score": "0.5544083", "text": "def get_new_interval(duthost, is_valid):\n\n detection_time, restoration_time = get_detection_restoration_times(duthost)\n if is_valid:\n return max(detection_time, restoration_time) - 10\n else:\n return min(detection_time, restoration_time) + 10", "title": "" }, { "docid": "f5ed3a7c1abd239f5b37fed90153d6b4", "score": "0.55374026", "text": "def minimumIntegrationTime():\n return 0.5", "title": "" }, { "docid": "37b9f0da57a219fc7fea52324812d7fd", "score": "0.5526653", "text": "def pw_linear(interval):\n\n t = interval[1]\n b = maxy / self.ny\n\n if t <= 0:\n return 0\n if 0 < t < b:\n return t / b\n if b <= t:\n return 1", "title": "" }, { "docid": "78f127b79de463b47bb4be482d1c5d1f", "score": "0.5498351", "text": "def setInterval(self, interval):\n\t\tself._interval = interval", "title": "" }, { "docid": "e25ccde8a9b1e2725f9a2162e5a0f5af", "score": "0.5495083", "text": "def InValueLifespan(self):\r\n t = self.__innerpipe.GetInValueLifespan()\r\n if t < 0:\r\n return t\r\n return float(t) / 1000.0", "title": "" }, { "docid": "e5dced7f2e3ed4585cb74dcbdf836ca4", "score": "0.54757977", "text": "def my_function(time):\n if start_time <= time <= end_time:\n y = value\n else:\n y = 0.0\n return y", "title": "" }, { "docid": "71041968229d9a03945863f8cb001dbb", "score": "0.5472091", "text": "def interval_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval_in_seconds\")", "title": "" }, { "docid": "e4f049f6fc421d5dadad571117f052d3", "score": "0.5471972", "text": "def GetTimeConst(self):\r\n tc = self.__GetSomething('OFLT?')\r\n # 1e-5 * 10**np.floor(int(tc)/2) * (1+2*(int(tc)%2)) #numerischer Wert \r\n return int(tc)", "title": "" }, { "docid": "3adcac330c1daa85445be5cc849b3384", "score": "0.54593885", "text": "def estimate_interval(self) -> Dict[str, float]:\n raise NotImplementedError", "title": "" }, { "docid": "23a9304759355632decc1e2b743bfc51", "score": "0.54215336", "text": "def __init__(self, heure=0, min=0, sec=0):\n self.heure = heure # Nombre de minutes\n self.min = min # Nombre de minutes\n self.sec = sec # Nombre de secondes", "title": "" }, { "docid": "8a76941681db4325083337d0adaae893", "score": "0.53904676", "text": "async def get_interval(self) -> int:\n raw_bytes = await self.device.read_gatt_char(self.AR4_READ_INTERVAL)\n return int.from_bytes(raw_bytes, byteorder=\"little\")", "title": "" }, { "docid": "755acc765e302d274ab8a9f0ffa80f69", "score": "0.5384756", "text": "def overtime_interval(self):\n return self._overtime_interval", "title": "" }, { "docid": "7e3371fc1a3053b7099dce479d6587a5", "score": "0.5381649", "text": "def test_interval():\n\n def f(x):\n value = 2*x\n return value\n actual = [f(0),f(.2),f(.4),f(.6),f(.8),f(1)]\n trial = gaussian.interval(f,0,1,.2)\n\n print(\"Testing f(0):\",actual[0],\" ?= \",trial[0])\n nose.tools.assert_almost_equal(actual[0], trial[0], 4)\n\n print(\"Testing f(.2):\",actual[1],\" ?= \",trial[1])\n nose.tools.assert_almost_equal(actual[1], trial[1], 4)\n\n print(\"Testing f(.4):\",actual[2],\" ?= \",trial[2])\n nose.tools.assert_almost_equal(actual[2], trial[2], 4)\n\n print(\"Testing f(.6):\",actual[3],\" ?= \",trial[3])\n nose.tools.assert_almost_equal(actual[3], trial[3], 4)\n\n print(\"Testing interval f(.8):\",actual[4],\" ?= \",trial[4])\n nose.tools.assert_almost_equal(actual[4], trial[4], 4)\n\n print(\"Testing interval f(1.0):\",actual[5],\" ?= \",trial[5])\n nose.tools.assert_almost_equal(actual[5], trial[5], 4)\n\n len_actual = len(actual)\n len_trial = len(trial)\n print(\"Testing length of intervals:\",len_actual,\" ?= \",len_trial)\n nose.tools.assert_almost_equal(len_actual, len_trial)", "title": "" }, { "docid": "4c71ab3ac4290679972cd2adb1b83dba", "score": "0.53768575", "text": "def _set_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/ap-interfaces', defining_module='openconfig-ap-interfaces', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/ap-interfaces', defining_module='openconfig-ap-interfaces', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__interval = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "4c71ab3ac4290679972cd2adb1b83dba", "score": "0.53768575", "text": "def _set_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/ap-interfaces', defining_module='openconfig-ap-interfaces', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/ap-interfaces', defining_module='openconfig-ap-interfaces', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__interval = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "62a43bf290a6ab7814ed74f51478702c", "score": "0.537359", "text": "def _set_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/ap-interfaces', defining_module='openconfig-ap-interfaces', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/ap-interfaces', defining_module='openconfig-ap-interfaces', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__interval = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "62a43bf290a6ab7814ed74f51478702c", "score": "0.537359", "text": "def _set_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/ap-interfaces', defining_module='openconfig-ap-interfaces', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/ap-interfaces', defining_module='openconfig-ap-interfaces', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__interval = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "1af896175fd1ed460099fd601d35db16", "score": "0.53696156", "text": "def index_interval(self, value1, value2):\n return self.index(value2) - self.index(value1)", "title": "" }, { "docid": "b6961d56aa2a80f373f46c1a5c3b8ffe", "score": "0.5358982", "text": "def interval_to_seconds(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "4a5b6f75c1eae8c1d3757e7bad88b679", "score": "0.535812", "text": "def interval_func(x_in, i):\n if i == -1:\n return y[0]\n elif i > n - 2:\n return y[-1]\n t1 = t(x_in, i)\n # Get the i-th set of 4 points.\n a1 = a[:, i]\n multiplier = np.matrix([(1 - t1) ** 3, 3 * t1 * (1 - t1) ** 2, 3 * (1 - t1) * t1 ** 2, t1 ** 3])\n return float(np.matmul(multiplier, a1))", "title": "" }, { "docid": "ffda78458fc6442a9bb3f64450ca6c62", "score": "0.53570175", "text": "def unsplit(self):\n return float(datetime.datetime.now() - self.split_start)", "title": "" }, { "docid": "9fedb8968360a041bc23892d97e0d7ac", "score": "0.53516454", "text": "def check_dilution_range(self):\n for dil, values in self.Cq.items():\n error_msg = 'To vide range of values for dilution: ' + dil + ' : ' + str(values)\n array = numpy.array(self.Cq[dil]) \n diff_from_mean = numpy.absolute(array - mean(array))\n while max(self.Cq[dil])-min(self.Cq[dil])> 0.4:\n ind = self.index[dil]\n if type(ind)==int:\n self.log.write(error_msg)\n self.failed_sample = True\n self.index[dil] = 'Fail'\n return \n else:\n ind = numpy.argmax(diff_from_mean)\n self.index[dil] = int(ind)\n self.poped_dilutes[dil] = self.Cq[dil].pop(ind)\n array = numpy.array(self.Cq[dil])\n diff_from_mean = numpy.absolute(array - mean(array))", "title": "" }, { "docid": "251dfdbfd59cfa09fbecbda6125240a7", "score": "0.5348321", "text": "def stop(self):\n self.stop = datetime.datetime.now()\n return float(self.stop - self.start)", "title": "" }, { "docid": "e27d130588627fe4d829a9c09423cffe", "score": "0.53328353", "text": "def span(self):\n if not self.exons: return 0\n b=self.boundaries() \n return b[1]-b[0]+1", "title": "" }, { "docid": "5fdd04a5cf63f5ffb80a54ec10bb18e9", "score": "0.5329595", "text": "def ticks_per_second(): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "29e2730e0eb2a625bb4b50a791d6df6d", "score": "0.5323456", "text": "def period(self):\n return 20 + self.pulse_width", "title": "" }, { "docid": "4d2057764e7f6b1a55d20004d9c80774", "score": "0.5322433", "text": "def spinner(seconds):\r\n pass", "title": "" }, { "docid": "54bf384a48f0bc3f2b43bd505dcc063c", "score": "0.5315206", "text": "def interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "54bf384a48f0bc3f2b43bd505dcc063c", "score": "0.5315206", "text": "def interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "54bf384a48f0bc3f2b43bd505dcc063c", "score": "0.5315206", "text": "def interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "54bf384a48f0bc3f2b43bd505dcc063c", "score": "0.5315206", "text": "def interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "54bf384a48f0bc3f2b43bd505dcc063c", "score": "0.5315206", "text": "def interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "54bf384a48f0bc3f2b43bd505dcc063c", "score": "0.5315206", "text": "def interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "54bf384a48f0bc3f2b43bd505dcc063c", "score": "0.5315206", "text": "def interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "54bf384a48f0bc3f2b43bd505dcc063c", "score": "0.5315206", "text": "def interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interval\")", "title": "" }, { "docid": "20a0436083c64e32f315c058446c142a", "score": "0.5312596", "text": "def _get_lsp_interval(self):\n return self.__lsp_interval", "title": "" }, { "docid": "46e4932d6d78d8891340b3486af73937", "score": "0.5310097", "text": "def poll_interval(self) -> timedelta:\n return timedelta(minutes=1)", "title": "" }, { "docid": "4edbd9e17bc88a2a110ceffdcade2507", "score": "0.53081197", "text": "def seconds_to_interval(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "efe9b6c42a728fa7dc387e2967b891d0", "score": "0.5302609", "text": "def set_interval(self, interval):\n if type(interval) != float or type(interval) != int:\n raise InvalidInput(\"Interval value must be of float or int\")\n self._config['interval']", "title": "" }, { "docid": "0ede3a52713671010ae2fb71cd0621d6", "score": "0.529422", "text": "def __neg__(self):\n return Intervalo(-self.hi,-self.lo)", "title": "" } ]
4c3ea5f57e7fba027a51a350dc39a2c3
Initialize the record, along with a new key.
[ { "docid": "cf8eccd5cbe67c3396ccd2fc2c6a72eb", "score": "0.7322634", "text": "def __init__(self, *args, **kwargs):\r\n record.Record.__init__(self, *args, **kwargs)\r\n self.key = UserKey()", "title": "" } ]
[ { "docid": "9f83103fd350a79e82cf1005d868d1e8", "score": "0.7128397", "text": "def test_init(self):\r\n fake_key = partial(Key, \"Eggs\", \"Bacon\")\r\n keys = [fake_key(str(uuid.uuid1())) for x in range(10)]\r\n rs = sets.KeyRecordSet(keys, Record)", "title": "" }, { "docid": "f6d677ad7672d5cda874f96aa9b01d4f", "score": "0.69936293", "text": "def __init__(self, key):\r\n self._key = key", "title": "" }, { "docid": "f6d677ad7672d5cda874f96aa9b01d4f", "score": "0.69936293", "text": "def __init__(self, key):\r\n self._key = key", "title": "" }, { "docid": "a483bc02ae5232eeb774c7cc51fc9189", "score": "0.69578224", "text": "def __init__(self):\r\n self.record = {}", "title": "" }, { "docid": "3d0430ec76e772cec414613a146fb283", "score": "0.69389796", "text": "def __init__(self, key):\n\n self.key = key", "title": "" }, { "docid": "6f16fbe48f0be9dfb01f906ef1e8ff5e", "score": "0.68714964", "text": "def __init__(self, key):\n self.key = key", "title": "" }, { "docid": "6f16fbe48f0be9dfb01f906ef1e8ff5e", "score": "0.68714964", "text": "def __init__(self, key):\n self.key = key", "title": "" }, { "docid": "a005109978ac6b702d753b8a4afd67ed", "score": "0.68461466", "text": "def __init__(self, key):\r\n self.key = key", "title": "" }, { "docid": "dd948e7c748324f5640107bfc9cea8b2", "score": "0.67855954", "text": "def __init__(self, key=None):\n self.key = key", "title": "" }, { "docid": "79312e1123e73d6cc87f3ca676b6c445", "score": "0.6717723", "text": "def __init__(self, **kw):\r\n\r\n #: stores record id\r\n self._key = None\r\n\r\n #: stores database specific information\r\n self._payload = None\r\n\r\n #: stores record values\r\n self._values = {}\r\n\r\n #: stores dirty information\r\n self._dirty = {}\r\n\r\n for field in self.fields().values():\r\n if field.name in kw and not field.empty(kw[field.name]):\r\n value = kw[field.name]\r\n elif field.default is not None:\r\n value = field.default\r\n else:\r\n continue\r\n field.__set__(self, value)", "title": "" }, { "docid": "217c8027c53f413c2bcfed3fd98cd8af", "score": "0.6612929", "text": "def __init__(self, key, created_datetime):\n\n self.key = key\n self.created_datetime = created_datetime", "title": "" }, { "docid": "92373019aa57712ba64fb5f9120235b5", "score": "0.659555", "text": "def __init__(self, key):\n self.key = bytes(key, 'utf-8')", "title": "" }, { "docid": "6186a2f51d7ba5bde9e459fbf9970697", "score": "0.65674865", "text": "def __init__(self, record):\n self.record = record", "title": "" }, { "docid": "6186a2f51d7ba5bde9e459fbf9970697", "score": "0.65674865", "text": "def __init__(self, record):\n self.record = record", "title": "" }, { "docid": "76c4881a6df34c5be61aac0aacbfd7cb", "score": "0.6566365", "text": "def __init__(self, key, data):\n if not isinstance(key, Key):\n raise Exception(\"key argument should be a Key instance\")\n self.key = key\n super(Grouping3, self).__init__(data)", "title": "" }, { "docid": "75c769dd386e85b440b1f4f78c8d4b43", "score": "0.6520882", "text": "def __init__(self, first_id=1):\n\n self._key_base = first_id\n self._last_id = None", "title": "" }, { "docid": "dc1c04472e019988de8dd7649d47a601", "score": "0.64986914", "text": "def __init__(self, key):\n self.key = key\n self.key_type, self.key_string, self.key_comment = key.strip().split()\n self._validate_key()\n self.comment = self.key_comment.encode('ascii')\n self.fingerprint = self._fingerprint()", "title": "" }, { "docid": "ec9628d926293fbc2923731845cf2ed8", "score": "0.645218", "text": "def loadFromDBRecord(self, record):\n for keyname in record.keys():\n self[keyname] = record[keyname]", "title": "" }, { "docid": "f73dcd3219edac843c4682a09bf903ea", "score": "0.64423317", "text": "def __init__(__self__, *,\n key_data: Optional[str] = None,\n path: Optional[str] = None):\n if key_data is not None:\n pulumi.set(__self__, \"key_data\", key_data)\n if path is not None:\n pulumi.set(__self__, \"path\", path)", "title": "" }, { "docid": "0f88a0e8c8e16d03e7f3b1a49bea9bbc", "score": "0.643549", "text": "def _new_key(self) -> None:\r\n self._new_item(self.Items.KEY)", "title": "" }, { "docid": "e7a4fcaa28fa64656926b44a1f330213", "score": "0.6420075", "text": "def new(self, *args, **kwargs):\n keytype = kwargs.pop('keytype', 'record')\n return getattr(self, '%s_new'%(keytype))(*args, **kwargs)", "title": "" }, { "docid": "a2400f23e5a41c5b6ee3a794069d0489", "score": "0.6415501", "text": "def __init__(self, data):\n # Initialize key variables\n self.data = data", "title": "" }, { "docid": "a43300b22684c165fd3c523bd5d9661f", "score": "0.64124745", "text": "def initWithRecord(self, record):\n \n self.id = record['Id']\n self.subject = record['Subject']\n self.description = record['Description']\n self.status = record['Status']\n self.activityDate = record['ActivityDate']\n self.lastModifiedDate = datetime.strptime(record['LastModifiedDate'][:-5],'%Y-%m-%dT%H:%M:%S.%f')\n self.ownerName = record['Owner']['Name']\n self.whoName = record['Who']['Name'] if record['Who'] else ''\n self.whoId = record['Who']['Id'] if record['Who'] else None\n self.whatName = record['What']['Name'] if record['What'] else ''\n self.whatId = record['What']['Id'] if record['What'] else None", "title": "" }, { "docid": "b236da8195e7ab4f830ace0ec5306e97", "score": "0.6403544", "text": "def __init__(__self__, *,\n key: Optional[pulumi.Input[str]] = None):\n if key is not None:\n pulumi.set(__self__, \"key\", key)", "title": "" }, { "docid": "b236da8195e7ab4f830ace0ec5306e97", "score": "0.6403544", "text": "def __init__(__self__, *,\n key: Optional[pulumi.Input[str]] = None):\n if key is not None:\n pulumi.set(__self__, \"key\", key)", "title": "" }, { "docid": "50eb37820f9122da1c4bf800c27eb419", "score": "0.6389704", "text": "def __init__(self, key, data):\n if not isinstance(key, Key):\n raise Exception(\"key argument should be a Key instance\")\n self.key = key\n super(Grouping, self).__init__(data)", "title": "" }, { "docid": "0ace8f883bc67b5eabd7c0cb44f70a89", "score": "0.63637286", "text": "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "0ace8f883bc67b5eabd7c0cb44f70a89", "score": "0.63637286", "text": "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "0ace8f883bc67b5eabd7c0cb44f70a89", "score": "0.63637286", "text": "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "0ace8f883bc67b5eabd7c0cb44f70a89", "score": "0.63637286", "text": "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "0ace8f883bc67b5eabd7c0cb44f70a89", "score": "0.63637286", "text": "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "5701d65800b39c7aaaca9937a8886f7d", "score": "0.63551176", "text": "def __init__(self, key):\n logging.info('Creating new layer with key {0}'.format(key))\n self.__key = key", "title": "" }, { "docid": "7a4a097cfde26865412ab6350baa5d01", "score": "0.6331312", "text": "def __init__(self, key, **kwargs):\n key = key if key is not None else kwargs\n self.__dict__.update(key)", "title": "" }, { "docid": "9631e81540cd94849bca4a6faf8b7da0", "score": "0.63146985", "text": "def __init__(self, key, value):\n self.created = time.clock() * 1000\n self.key = key\n self.value = value", "title": "" }, { "docid": "b2bd454207e8040a21817747b0d6e5a6", "score": "0.62930965", "text": "def __init__(self, recordset, keys):\n self._recordset = recordset\n self._keys = keys\n self._data = [] # will hold ((keys), [ids]) pairs", "title": "" }, { "docid": "a8f8c082d56b69309563e46a4c38cd00", "score": "0.6268538", "text": "def __init__(self, key):\n # self.key = key.decode(\"hex\") # Python 2\n self.key = bytes.fromhex(key)", "title": "" }, { "docid": "99b58aaabe062508c5b7440428c5e0c0", "score": "0.6265743", "text": "def _init_record(self, record_type_idstr):\n record_type_data = self._all_supported_record_type_data_sets[Id(record_type_idstr).get_identifier()]\n module = importlib.import_module(record_type_data['module_path'])\n record = getattr(module, record_type_data['query_record_class_name'])\n self._records[record_type_idstr] = record(self)", "title": "" }, { "docid": "d5c8fe013231c2f98f1ed0eacf1cecbd", "score": "0.62630826", "text": "def __init__(__self__, *,\n key: Optional[str] = None,\n value: Optional[str] = None):\n if key is not None:\n pulumi.set(__self__, \"key\", key)\n if value is not None:\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62537867", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62537867", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62537867", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62537867", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62537867", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62537867", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62537867", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62537867", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62537867", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.6252828", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.625206", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.625206", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.625206", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.625206", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.625206", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.625206", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.625206", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.625206", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62513936", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62513936", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62513936", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "b3a7a4f908c1e0b6eaa39978262106a7", "score": "0.62513936", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None,\n optional: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"key\", key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if optional is not None:\n pulumi.set(__self__, \"optional\", optional)", "title": "" }, { "docid": "86f2d691557ff73634d0200bec75f0fa", "score": "0.6244348", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "86f2d691557ff73634d0200bec75f0fa", "score": "0.6244348", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "86f2d691557ff73634d0200bec75f0fa", "score": "0.6244348", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "86f2d691557ff73634d0200bec75f0fa", "score": "0.6244348", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "86f2d691557ff73634d0200bec75f0fa", "score": "0.6244348", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "86f2d691557ff73634d0200bec75f0fa", "score": "0.6244348", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "86f2d691557ff73634d0200bec75f0fa", "score": "0.6244348", "text": "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "title": "" }, { "docid": "5e559571364f297ab52d39357e17ac46", "score": "0.6231701", "text": "def __init__(self, records=None):\r\n CassandraBase.__init__(self)\r\n records = self._transform(records) if records else {}\r\n dict.__init__(self, records)", "title": "" }, { "docid": "b27d613215bffd4e05d692670d013f74", "score": "0.62185407", "text": "def build( self, record ):\n # Nothing to do if the key size is zero!\n if (self.key_buffer_size == 0):\n return None\n\n # Check that the given record is a dict of the given table\n # type\n if not isinstance( record, dict ):\n raise ValueError( \"Given record must be a dict; given %s\"\n % type( record ) )\n # Check all the keys of the given record\n record_keys = record.keys()\n if (record_keys != self.table_column_names):\n raise ValueError( \"Given record must be of the type for GPUdb table '%s'\"\n \" (with columns '%s'); given record has columns '%s' \"\n % (self.table_name,\n self.table_column_names,\n record_keys) )\n\n # Create and populate a RecordKey object\n record_key = RecordKey( self.key_buffer_size )\n for i in range(0, len(self.key_columns_names)):\n # get the key, value pair\n key = self.key_columns_names[ i ]\n value = record[ key ]\n key_type = self.key_types[ i ]\n\n # Add to the record key\n if key_type in [\"char1\", \"char2\", \"char4\", \"char8\", \"char16\"]:\n record_key.add_char( value )\n elif key_type in [\"double\", \"float\", \"int\", \"int8\", \"int16\", \"long\"]:\n record_key.add_number( value )\n elif key_type in [\"string\"]:\n record_key.add_string( value )\n else:\n raise ValueError( \"Unknown key type given: '%s'\" % key_type )\n # end loop\n\n # Compute the key hash and return the key\n record_key.compute_hashes()\n return record_key", "title": "" }, { "docid": "08fe4f1f1c3f05837eb163bf5808ba0b", "score": "0.6208423", "text": "def __init__(self, key, value):\n self.key = key\n self.value = value", "title": "" } ]
17ab4bad13651f35c6e6f6dd301a6c2e
Function used the evaluation values of a ndeval result file
[ { "docid": "5a2a617339fd18521cf95d513b36c691", "score": "0.0", "text": "def extract_ndeval(fields_order, evaluation_file):\n\t# Reading the evaluation line\n\tline = evaluation_file.next()\n\n\t# Creating a dictionnary to the evaluation values from the score fields and the current line scores\n\tquery_evaluation_values = dict(zip(fields_order, line.split(',')))\n\n\t# Getting the query id\n\tquery_id = query_evaluation_values['topic']\n\n\tif query_id == 'amean':\n\t\traise StopIteration\n\n\t# Removing non score values\n\tquery_evaluation_values.pop('runid', None)\n\tquery_evaluation_values.pop('topic', None)\n\n\treturn (query_id, query_evaluation_values)", "title": "" } ]
[ { "docid": "fbe1c46ad4fcda03d97ace45f90a2225", "score": "0.67118865", "text": "def evaluate(self, include_fileset=False):", "title": "" }, { "docid": "4d55883f629ee2dbfe0f7971e66f7607", "score": "0.64297915", "text": "def compile_results(self):\n\n\t\t# highly performant code\n\t\tmae = np.array([ei.mae for ei in self.evalItems])\n\t\trmse = np.array([ei.rmse for ei in self.evalItems])\n\t\tbad4 = 100 * np.array([ei.bad4 for ei in self.evalItems])\n\t\tbad2 = 100 * np.array([ei.bad2 for ei in self.evalItems])\n\t\t\n\t\tdataset_name = self.dataset_name + ' Dataset'\n\t\tself.results_string = get_result_string(mae, rmse, bad4, bad2, '{} - {}'.format(dataset_name, self.mode.upper()))\n\t\tprint(self.results_string)", "title": "" }, { "docid": "1b98f4a341689d55da79c660063308bf", "score": "0.6403356", "text": "def eval(self):", "title": "" }, { "docid": "ced499e386e0fc55dfc37f6a924b0299", "score": "0.63907945", "text": "def evaluate(self, driver):", "title": "" }, { "docid": "1e659c1dafb1b87c22db8b6f5d552050", "score": "0.6360327", "text": "def evaluate(self, results):\n pass", "title": "" }, { "docid": "dc472f1f8bc30e6961284de1a64affa2", "score": "0.6279497", "text": "def test_evaluate():\n fBB='5e6'\n frev='9e5'\n csv = '1'\n\n for i in range(1,4):\n id = str(i)\n Uout_filename = 'csvDateien_K/Uout_' + id + '.csv'\n results_filename = 'csvDateien_K/results.csv'\n print(1)\n signal_output = Signal(dateiName=Uout_filename, frev=frev, fBB=fBB)\n\n sineref_output = SineRef(frev=frev, fBB=fBB, signal_output=signal_output, csv=csv)\n\n verzerrungszahlen_output = Verzerrungszahlen(dataPointsRef=sineref_output, dataPointsSignal=signal_output)\n # quality aus verzerrungszahlen_output raus lesen!!!\n print(verzerrungszahlen_output)\n save(Ueberschreiben='0', verzerrungszahlen_output=verzerrungszahlen_output, output_file=results_filename)\n quality = 0\n return (quality)", "title": "" }, { "docid": "55a56e34cd42510c88b8d3ad88619374", "score": "0.6138195", "text": "def nist_eval(cand_file, ref_file):\n\n exec_file = os.path.join(METRICS_DIR, 'nist', 'mteval-v13a.pl')\n # invoke nist\n cmd = 'perl mteval-v13a.pl -r %s -s %s -t %s' % (exec_file, ref_file,src_file,tst_file)\n status,output = commands.getstatusoutput(cmd)\n\n # parse results\n sections = output.split('# ' + '-'*72)\n\n nist_reg = 'NIST: (\\d\\.\\d+) (\\d\\.\\d+) (\\d\\.\\d+) (\\d\\.\\d+) (\\d\\.\\d+)'\n nist_match = re.search(nist_reg, sections[1])\n nist_scores = map(float, nist_match.groups())\n\n bleu_reg = 'BLEU: (\\d\\.\\d+) (\\d\\.\\d+) (\\d\\.\\d+) (\\d\\.\\d+) (\\d\\.\\d+)'\n bleu_match = re.search(bleu_reg, sections[1])\n bleu_scores = map(float, bleu_match.groups())\n\n return nist_scores, bleu_scores", "title": "" }, { "docid": "36aeff0a01a17b4e9c05bdb07eeee154", "score": "0.61190313", "text": "def evaluate(self):\n pass", "title": "" }, { "docid": "5834001e4109d3f72834c64912b41081", "score": "0.6062196", "text": "def terp_eval(cand_file, ref_file):\n\n param_file = os.path.join(METRICS_DIR, 'terp', 'willie', 'params.param')\n res_file = cand_file\n phrase_db = os.path.join(METRICS_DIR, 'terp', 'data', 'phrases.db')\n with open(param_file, 'w') as f:\n print >>f, 'Phrase Database (filename) : ' + phrase_db\n print >>f, 'Reference File (filename) : ' + ref_file\n print >>f, 'Hypothesis File (filename) : ' + cand_file\n print >>f, 'Output Formats (list) : param nist pra'\n print >>f, 'Output Prefix (filename) : ' + res_file\n\n terpa = os.path.join(METRICS_DIR, 'terp', 'bin', 'terpa')\n\n # invoke terp\n cmd = '%s %s' % (terpa,param_file)\n status,output = commands.getstatusoutput(cmd)\n\n # parse results\n scores = list()\n with open(res_file + '.seg.scr', 'r') as f:\n for line in f:\n scores.append(float(line.split()[3]))\n\n return scores", "title": "" }, { "docid": "6d28fa9dbb87d8f7ef7e04cae6d19ca1", "score": "0.59747434", "text": "def get_results(evaluation_file, print_header=True):\n result_map = {}\n\n # extract the dataset from the filename\n filename = os.path.splitext(os.path.basename(evaluation_file))[0]\n dataset = filename.split(\"_\")[-3]\n run = filename.split(\"_\")[-2]\n embedding_type = filename.split(\"_\")[-1]\n analogy_file = \"_\".join(filename.split(\"_\")[2:-3])\n\n with open(evaluation_file, 'r') as input_file:\n \n # skip the first few lines\n skip = True\n for line in input_file.readlines():\n if line.strip() == \"####\":\n skip = False\n continue\n\n if not skip:\n \n num_dim, standard1, _, _, _, result = line.strip().split(\"\\t\")\n \n # create structure to save statistics\n if num_dim not in result_map:\n result_map[num_dim] = {'total': 0, 'correct': 0, 'rr': []}\n\n result_words = result.split(\" \")\n\n # total count\n result_map[num_dim]['total'] += 1\n\n # accuracy\n if result_words[0] == standard1:\n result_map[num_dim]['correct'] += 1\n\n # reciprocal rank\n reciprocal_rank = return_reciprocal_rank(result_words, standard1)\n result_map[num_dim]['rr'].append(reciprocal_rank)\n\n \n # to check the results (no overlap and right number returned)\n if len(result_words) != len(set(result_words)):\n print(\"error: overlap\")\n print(line)\n print(input_file)\n if reciprocal_rank == 0.0 and len(result_words) != 50:\n print(\"error: num results\")\n print(line)\n print(input_file)\n \n # Now print the results\n results = []\n for num_dim in result_map.keys(): \n results.append({\"filename\": analogy_file, \n \"dataset\": dataset, \n \"run\": run, \n \"embedding_type\": embedding_type, \n \"num_dim\": num_dim,\n \"total_pairs\": result_map[num_dim]['total'], \n \"accuracy\": float(result_map[num_dim]['correct'])/result_map[num_dim]['total'], \n \"rr\": np.array(result_map[num_dim]['rr']).mean()})\n return results", "title": "" }, { "docid": "cbc9cb28dc9ce8abc14e067da3da9eab", "score": "0.5950086", "text": "def evaluate(self, individual):", "title": "" }, { "docid": "d99d00287dd7b7837f35eaf27160c53e", "score": "0.58886796", "text": "def evaluate(self):\n return", "title": "" }, { "docid": "99d7fb4da4120f9d34aa3ec8f8d217b9", "score": "0.58833504", "text": "def GenerateEval(files):\n \n # nankai index\n nkInd = 0\n \n # gt_Vb pickle list \n #files = glob.glob(os.path.join(featurePath,gtpicklePath,pName))\n \n # sort nutural order\n efiles = []\n for path in natsorted(files):\n efiles.append(path)\n \n flag = False\n for fID in efiles:\n with open(fID,\"rb\") as fp:\n # [cell,seq]\n gt = pickle.load(fp)\n gt = gt.T\n \n # last time of non-zero intervals \n nk = np.where(gt[:,nkInd] > 0)[0].shape[0] # nankai\n # get gt nankai length of intervals\n nankaiIntervals = GetyVInterval(gt,fCnt=0,isEval=True)\n \n if not flag:\n # int\n xEvalSeq = nk\n # shape=[number of intervals(=8),cell(=3)]\n xEval = nankaiIntervals.T[np.newaxis].astype(np.float32)\n flag = True\n else:\n xEvalSeq = np.hstack([xEvalSeq,nk])\n xEval = np.concatenate([xEval,nankaiIntervals.T[np.newaxis].astype(np.float32)],0)\n \n #xEval_REG = np.reshape(xEval,[xEval.shape[0],-1])\n \n # xEval.shape=[number of data(=256),intervals(=8),cell(3)]\n # xEval_REG.shape=[number of data(=256),intervals*cell(=24)]\n # xEvalSeq.shaoe=[number of data(=256), maximum of sequence]\n return xEval, xEvalSeq", "title": "" }, { "docid": "d9bb04f3e0f6576149f5c4e413cc08f7", "score": "0.58697855", "text": "def eval_genotyper(vrn_file, ref_file, dbsnp_file, config):\n metrics_file = \"%s.eval_metrics\" % vrn_file\n cl = [\"gatk_variant_eval.py\", config[\"program\"][\"picard\"], vrn_file,\n ref_file, dbsnp_file]\n target = config[\"algorithm\"].get(\"hybrid_target\", \"\")\n if target:\n base_dir = os.path.dirname(os.path.dirname(ref_file))\n cl.append(os.path.join(base_dir, target))\n with open(metrics_file, \"w\") as out_handle:\n subprocess.check_call(cl, stdout=out_handle)", "title": "" }, { "docid": "d7dcddcbdb7f91c00e9652a6426376d0", "score": "0.58627886", "text": "def run():\n evaluator = StudentEvaluator()\n #assignment_analyser.init()\n evaluator.run()\n return(evaluator.exit_value())", "title": "" }, { "docid": "7bb52f19a05a107f02eb03f7657167db", "score": "0.57851464", "text": "def __call__(self, params):\n params_dict = self.params_list_to_params_dict(params)\n # params_dict['n_smooth'] = 1\n # params_dict['n_salt'] = 1\n\n predict_event_list = frame_prediction_to_event_prediction(\n self.output_dict, params_dict)\n\n # Write predicted events to submission file\n write_submission(predict_event_list, self.submission_path)\n\n # SED with official tool\n results = official_evaluate(self.reference_csv_path, self.submission_path)\n \n f1 = results['overall']['f_measure']['f_measure']\n\n return f1", "title": "" }, { "docid": "ed0ed8fb372d6b1e49fc06e902b65177", "score": "0.5764757", "text": "def interpretor(file_list):\n for i in range(len(file_list)):\n title = file_list[i].split(\".\") \n #Defining counters for relevant information\n exon_indels = 0\n gene_indels = 0\n exon_var = 0\n gene_var = 0\n snpEff_exon_var = 0 \n snpEff_indels_exon = 0\n snpEff_gene_var = 0\n snpEff_gene_ind = 0\n with open(\"nuc_variant_calls/\"+file_list[i],'r') as file:\n content = file.readlines()\n for y in content:\n rules=y.split(\"\\t\") #Splits the lines into seperate items.\n if len(rules[0]) > 11 and rules[0][0] == \"B\" and rules[0][11] == \"e\": #Retrieving alignment exon features\n exon_indels += int(rules[3]) # Collecting indel count\n exon_var+= (int(rules[1].split(\".\")[0])-int(rules[2].split(\".\")[0])) #Collecting exon variant count. Splitting creates correct indexing for retrieval \n elif len(rules[0]) > 11 and rules[0][0] == \"B\" and rules[0][11] == \"g\" and len(rules) > 2 : #This step filters out remaining feature lines.\n gene_indels += int(rules[3])\n gene_var += (int(rules[1].split(\".\")[0])-int(rules[2].split(\".\")[0]))\n if len(rules) == 3 and rules[1][0] == \"g\": #retrieve gene features out of snpEff file\n snpEff_gene_var += int(rules[1].split(\" \")[1]) #SnpEff gene feature variant count .\n snpEff_gene_ind += int(rules[2].split(\" \")[2]) #Splitting \" \" creates correct indexes for retrieval\n elif len(rules) == 3 and rules[1][0] == \"G\": #Retrieves exon features out of snpEff file.\n snpEff_exon_var += int(rules[1].split(\" \")[1])\n snpEff_indels_exon += int(rules[2].split(\" \")[2])\n filling_tsv(title,exon_indels,exon_var,gene_indels,gene_var,snpEff_exon_var,snpEff_indels_exon,snpEff_gene_var,snpEff_gene_ind) #After each seperate file the tsv file gets elongated.", "title": "" }, { "docid": "a1a05d7bcae5a4a960c21077fbb2969f", "score": "0.57595813", "text": "def evaluate(self, fretting):\n raise NotImplementedError()", "title": "" }, { "docid": "62d4487b7b560fe02a1b140b990d5311", "score": "0.5747513", "text": "def evaluate(p_results):\n\n ignore = p_results.ignore\n lib = p_results.lib_location\n learner = p_results.learner\n metric = p_results.metric\n output = p_results.output\n log = p_results.log_file\n result = p_results.result\n grow = p_results.grow\n\n train = p_results.train_file\n test = p_results.test_file\n extra = not p_results.score\n\n if extra:\n log.write(\"evaluating using attributes...\\n\")\n\n ranker = RankMethod(lib, learner, metric, output)\n\n with open(result, \"r\") as file:\n with tempfile.NamedTemporaryFile(mode=\"w\") as tmp_file:\n lines = file.readlines()\n if grow:\n features = list(map(int, lines[:len(lines)-ignore]))\n else:\n features = list(map(int, lines[ignore:]))\n\n if len(features) == 0:\n log.write(\"no features, NDCG@10=0.0\")\n return\n feature_file = tmp_file.name\n for feature in features:\n tmp_file.write(str(feature) + \"\\n\")\n tmp_file.flush()\n if extra:\n log.write(\"training...\\n\")\n ranker.train(train, feature_file, output)\n if extra:\n log.write(\"scoring...\\n\")\n ndcg = ranker.score(test, feature_file, output)\n if extra:\n log.write(\"NDCG@10=\" + str(ndcg) + \"\\n\")\n else:\n log.write(str(ndcg) + \"\\n\")", "title": "" }, { "docid": "1d5ce2adb547f8f1f5b93bba52b01109", "score": "0.57370883", "text": "def read_result_and_evaluate(file: str = 'data/evaluation_result.csv'):\n if not isfile(file):\n raise FileNotFoundError(f\"Could not find file at {file}.\")\n\n docs = []\n mentions = []\n logits = []\n labels = []\n with open(file) as f:\n # Skip header\n next(f)\n for line in f:\n col = line.split(';')\n if len(col) < 7:\n break\n docs.append(int(col[0]))\n mentions.append(int(col[1].strip()))\n label = float(col[5].strip())\n logit = col[7].strip()[2:-1].split(' ')\n # Convert the logits (that are not empty string) to floats\n logit = [float(lo) for lo in logit if lo]\n labels.append(label)\n logits.append(logit)\n\n labels = np.expand_dims(np.array(labels), -1)\n logits = np.array(logits)\n\n avg_accuracy, _ = accuracy_over_mentions(logits, labels, docs, mentions)\n print(f\"Test accuracy: {avg_accuracy:.4f}\")", "title": "" }, { "docid": "6c3ef12f054594891b7374ccd9e19f92", "score": "0.571991", "text": "def processData2(filename):\n try:\n file = open(filename)\n print(\"The file %s has been accessed\" % filename)\n n, count = 0, 1\n exp_val, std_dev = 0.0, 0.0\n for numbers in file:\n try:\n exp_val += float(numbers)\n std_dev += float(numbers) ** 2\n n += 1\n count += 1\n except ValueError:\n print(\"A non-numerical entry was found on line %d\" % count)\n count += 1\n \n file.close()\n print(exp_val, std_dev)\n exp_val /= n\n std_dev = sqrt((std_dev / n) - exp_val ** 2)\n\n return n, exp_val, std_dev\n except IOError:\n print(\"Could not read or find the file %s\" % filename)\n return None, None, None", "title": "" }, { "docid": "6f25299c0082b04f50aacb253981203e", "score": "0.5707045", "text": "def evaluate(self) :\n raise NotImplementedError", "title": "" }, { "docid": "62937795d18d2b8f37c736db9b5bfc6b", "score": "0.569293", "text": "def _run_evaluate(self, config, test_set):\r\n files, perp = self.write_prediction(config, test_set)\r\n scores = score_files(files[0], files[1])\r\n scores[\"perplexity\"] = perp\r\n\r\n return scores", "title": "" }, { "docid": "fbda35afb2b2c91ebc1c497b16caa28e", "score": "0.5692899", "text": "def process_run(properties_filename, eval_filename, eval_program):\n\n\t# Extracting the properties contained in the current configuration file\n\tproperties = extract_terrier_properties(properties_filename)\n\n\t# Checking if the evaluation file exists for the current run\n\tif os.path.isfile(eval_filename):\n\t\t# Opening the evaluation file for reading\n\t\teval_file = open(eval_filename)\n\t\t# Generating matrix rows from de run evaluation\n\t\ttry:\n\t\t\t# Read header for the ndeval eval files in order to guarantee the right order\n\t\t\tif eval_program == 'ndeval':\n\t\t\t\torder = eval_file.next().split(',')\n\t\t\twhile True:\n\t\t\t\t# Extracting the next matrix row\n\t\t\t\tif eval_program == 'ndeval':\n\t\t\t\t\tquery_id, evaluation_values = extract_ndeval(order, eval_file)\n\t\t\t\t\tprint_matrix_row(properties, query_id, ndeval_fields, evaluation_values)\n\t\t\t\telse:\n\t\t\t\t\tquery_id, evaluation_values = extract_trec_eval(eval_file)\n\t\t\t\t\tprint_matrix_row(properties, query_id, trec_eval_fields, evaluation_values)\n\t\texcept StopIteration:\n\t\t\tpass", "title": "" }, { "docid": "ef744e2500c17423ed36ec89389d8416", "score": "0.56819254", "text": "def read_parsing_evaluation(evaluation_file_path):\n try:\n with open(evaluation_file_path, 'r') as f:\n lines = f.readlines()\n las = float(lines[0].split('=')[1].strip('% \\n'))\n uas = float(lines[1].split('=')[1].strip('% \\n'))\n acc = float(lines[2].split('=')[1].strip('% \\n'))\n except:\n las = 0.0\n uas = 0.0\n acc = 0.0\n return las, uas, acc", "title": "" }, { "docid": "166a0d66629c81584ee6370b69207539", "score": "0.5677469", "text": "def test_ner(results, path):\n output_file = path+\"_predict.utf8\"\n with open(output_file, \"w\",encoding='utf8') as f:\n to_write = []\n for block in results:\n for line in block:\n to_write.append(line + \"\\n\")\n to_write.append(\"\\n\")\n\n f.writelines(to_write)\n eval_lines = return_report(output_file)\n return eval_lines", "title": "" }, { "docid": "c5455831b3a67d6502f7c368045fd355", "score": "0.5649693", "text": "def evaluate(self):\n raise NotImplementedError", "title": "" }, { "docid": "2edfb58d5f084fbc5db0fd8322c7237e", "score": "0.5630503", "text": "def describe(filename_or_result_type):\r\n if isinstance(filename_or_result_type,str):\r\n filename_or_result_type = load(filename_or_result_type)\r\n print(filename_or_result_type.simulation)", "title": "" }, { "docid": "19a753df793717f84c806f27e1d1219c", "score": "0.56300855", "text": "def system_evaluation(self):\n if not self.dataset.reference_data_present:\n return ' No reference data available for dataset.'\n else:\n output = ''\n if self.params.get_path('evaluator.scene_handling') == 'scene-dependent':\n overall_metrics_per_scene = {}\n for scene_id, scene_label in enumerate(self.dataset.scene_labels):\n if scene_label not in overall_metrics_per_scene:\n overall_metrics_per_scene[scene_label] = {}\n\n segment_based_metric = sed_eval.sound_event.SegmentBasedMetrics(\n event_label_list=self.dataset.event_labels(scene_label=scene_label),\n time_resolution=1.0,\n )\n\n event_based_metric = sed_eval.sound_event.EventBasedMetrics(\n event_label_list=self.dataset.event_labels(scene_label=scene_label),\n evaluate_onset=True,\n evaluate_offset=False,\n t_collar=0.5,\n percentage_of_length=0.5\n )\n\n for fold in self._get_active_folds():\n result_filename = self._get_result_filename(fold=fold,\n scene_label=scene_label,\n path=self.params.get_path('path.recognizer'))\n\n results = MetaDataContainer().load(filename=result_filename)\n #print(result_filename)\n #print(self.dataset.test(1, scene_label=scene_label))\n\n for file_id, item in enumerate(self.dataset.test(fold, scene_label=scene_label)):\n\n # Select only row which are from current file and contains only detected event\n #print(self.dataset.absolute_to_relative(item['file']))\n #exit(0)\n #print (file_id) ### a012.wav\n #print (item)\n #exit(0)\n #print(self.dataset.relative_to_absolute_path(item['file']))\n current_file_results = []\n for result_item in results.filter(filename=self.dataset.absolute_to_relative(item['file'])): \n #print(result_item)\n if 'event_label' in result_item and result_item.event_label:\n current_file_results.append(result_item)\n\n \n \n #print(file)\n #exit(0)\n meta = [] \n \n #print('processing META--------------------------------------------------------------') \n #print(self.dataset.absolute_to_relative(item['file']))\n #print(self.dataset.absolute_to_relative(item['file']))\n #print(self.dataset.file_meta('audio/street/a099.wav'))\n #print(self.dataset.file_meta(self.dataset.absolute_to_relative(item['file'])))\n #exit(0) \n\n for meta_item in self.dataset.file_meta(self.dataset.absolute_to_relative(item['file'])): \n if 'event_label' in meta_item and meta_item.event_label:\n meta.append(meta_item) \n segment_based_metric.evaluate(\n reference_event_list=meta,\n estimated_event_list=current_file_results\n )\n\n event_based_metric.evaluate(\n reference_event_list=meta,\n estimated_event_list=current_file_results\n )\n\n overall_metrics_per_scene[scene_label]['segment_based_metrics'] = segment_based_metric.results()\n overall_metrics_per_scene[scene_label]['event_based_metrics'] = event_based_metric.results()\n if self.params.get_path('evaluator.show_details', False):\n output += \" Scene [{scene}], Evaluation over {folds:d} folds\\n\".format(\n scene=scene_label,\n folds=self.dataset.fold_count\n )\n\n output += \" \\n\"\n output += segment_based_metric.result_report_overall()\n output += segment_based_metric.result_report_class_wise()\n overall_metrics_per_scene = DottedDict(overall_metrics_per_scene)\n\n output += \" \\n\"\n output += \" Overall metrics \\n\"\n output += \" =============== \\n\"\n output += \" {event_label:<17s} | {segment_based_fscore:7s} | {segment_based_er:7s} | {event_based_fscore:7s} | {event_based_er:7s} | \\n\".format(\n event_label='Event label',\n segment_based_fscore='Seg. F1',\n segment_based_er='Seg. ER',\n event_based_fscore='Evt. F1',\n event_based_er='Evt. ER',\n )\n output += \" {event_label:<17s} + {segment_based_fscore:7s} + {segment_based_er:7s} + {event_based_fscore:7s} + {event_based_er:7s} + \\n\".format(\n event_label='-' * 17,\n segment_based_fscore='-' * 7,\n segment_based_er='-' * 7,\n event_based_fscore='-' * 7,\n event_based_er='-' * 7,\n )\n avg = {\n 'segment_based_fscore': [],\n 'segment_based_er': [],\n 'event_based_fscore': [],\n 'event_based_er': [],\n }\n for scene_id, scene_label in enumerate(self.dataset.scene_labels):\n output += \" {scene_label:<17s} | {segment_based_fscore:<7s} | {segment_based_er:<7s} | {event_based_fscore:<7s} | {event_based_er:<7s} | \\n\".format(\n scene_label=scene_label,\n segment_based_fscore=\"{:4.2f}\".format(overall_metrics_per_scene.get_path(scene_label + '.segment_based_metrics.overall.f_measure.f_measure') * 100),\n segment_based_er=\"{:4.2f}\".format(overall_metrics_per_scene.get_path(scene_label + '.segment_based_metrics.overall.error_rate.error_rate')),\n event_based_fscore=\"{:4.2f}\".format(overall_metrics_per_scene.get_path(scene_label + '.event_based_metrics.overall.f_measure.f_measure') * 100),\n event_based_er=\"{:4.2f}\".format(overall_metrics_per_scene.get_path(scene_label + '.event_based_metrics.overall.error_rate.error_rate')),\n )\n\n avg['segment_based_fscore'].append(overall_metrics_per_scene.get_path(scene_label + '.segment_based_metrics.overall.f_measure.f_measure') * 100)\n avg['segment_based_er'].append(overall_metrics_per_scene.get_path(scene_label + '.segment_based_metrics.overall.error_rate.error_rate'))\n avg['event_based_fscore'].append(overall_metrics_per_scene.get_path(scene_label + '.event_based_metrics.overall.f_measure.f_measure') * 100)\n avg['event_based_er'].append(overall_metrics_per_scene.get_path(scene_label + '.event_based_metrics.overall.error_rate.error_rate'))\n\n output += \" {scene_label:<17s} + {segment_based_fscore:7s} + {segment_based_er:7s} + {event_based_fscore:7s} + {event_based_er:7s} + \\n\".format(\n scene_label='-' * 17,\n segment_based_fscore='-' * 7,\n segment_based_er='-' * 7,\n event_based_fscore='-' * 7,\n event_based_er='-' * 7,\n )\n output += \" {scene_label:<17s} | {segment_based_fscore:<7s} | {segment_based_er:<7s} | {event_based_fscore:<7s} | {event_based_er:<7s} | \\n\".format(\n scene_label='Average',\n segment_based_fscore=\"{:4.2f}\".format(numpy.mean(avg['segment_based_fscore'])),\n segment_based_er=\"{:4.2f}\".format(numpy.mean(avg['segment_based_er'])),\n event_based_fscore=\"{:4.2f}\".format(numpy.mean(avg['event_based_fscore'])),\n event_based_er=\"{:4.2f}\".format(numpy.mean(avg['event_based_er'])),\n )\n\n elif self.params.get_path('evaluator.scene_handling') == 'scene-independent':\n message = '{name}: Scene handling mode not implemented yet [{mode}]'.format(\n name=self.__class__.__name__,\n mode=self.params.get_path('evaluator.scene_handling')\n )\n\n self.logger.exception(message)\n raise ValueError(message)\n\n else:\n message = '{name}: Unknown scene handling mode [{mode}]'.format(\n name=self.__class__.__name__,\n mode=self.params.get_path('evaluator.scene_handling')\n )\n\n self.logger.exception(message)\n raise ValueError(message)\n\n if self.params.get_path('evaluator.saving.enable'):\n filename = self.params.get_path('evaluator.saving.filename').format(\n dataset_name=self.dataset.storage_name,\n parameter_set=self.params['active_set'],\n parameter_hash=self.params['_hash']\n )\n\n output_file = os.path.join(self.params.get_path('path.evaluator'), filename)\n\n output_data = {\n 'overall_metrics_per_scene': overall_metrics_per_scene,\n 'average': {\n 'segment_based_fscore': numpy.mean(avg['segment_based_fscore']),\n 'segment_based_er': numpy.mean(avg['segment_based_er']),\n 'event_based_fscore': numpy.mean(avg['event_based_fscore']),\n 'event_based_er': numpy.mean(avg['event_based_er']),\n },\n 'parameters': dict(self.params)\n }\n ParameterFile(output_data, filename=output_file).save()\n\n return output", "title": "" }, { "docid": "54849876feb80436418ae8e873d2c068", "score": "0.56291443", "text": "def evaluate(data, model):\n pass", "title": "" }, { "docid": "b5c8bc725a55ee80a14840fa241abd3f", "score": "0.56246793", "text": "def evaluation(fname_to_check, regions_fname=regions_file):\r\n fname_to_check = fix_json_fname(fname_to_check)\r\n regions_fname = fix_json_fname(regions_fname)\r\n\r\n # run command and get output\r\n res = check_output([\"neurofinder\", \"evaluate\", regions_fname, fname_to_check], universal_newlines=True).rstrip()\r\n res_dict = literal_eval(res)\r\n\r\n return res_dict", "title": "" }, { "docid": "800f94872b0eace0d4f6c1f63bc06b94", "score": "0.55956584", "text": "def _evaluate(self, eval_points) -> np.ndarray:\n pass", "title": "" }, { "docid": "7cf64ad86e9cb7274c36b854197fffbe", "score": "0.55915743", "text": "def evaluate(self, input_: Any) -> Any:", "title": "" }, { "docid": "a86d4191f8736328d6fa2d702b181dec", "score": "0.5580222", "text": "def get_data(file_name):\n\ttest_fraction = 0.1\n\tnumber_of_elements = 119\n\tpass", "title": "" }, { "docid": "4b3ea94deb979f747c31783571889825", "score": "0.55796903", "text": "def _evaluate(self):\n raise NotImplementedError", "title": "" }, { "docid": "dac0af67af2856a07c33651b646fb5e8", "score": "0.5568434", "text": "def _nodal_result(self, rnum, result_type):\n # element header\n rnum = self.parse_step_substep(rnum)\n ele_ind_table, nodstr, etype = self._element_solution_header(rnum)\n\n result_type = result_type.upper()\n if self.resultheader['rstsprs'] == 0 and result_type == 'ENS':\n nitem = 11\n elif result_type in ELEMENT_RESULT_NCOMP:\n nitem = ELEMENT_RESULT_NCOMP[result_type]\n else:\n nitem = 1\n\n result_index = ELEMENT_INDEX_TABLE_KEYS.index(result_type)\n\n # Element types for nodal averaging\n elemtype = self.geometry['Element Type'].astype(np.int32)\n\n # if self.version < 14.5: # values stored as double precision\n # tarr = np.empty(1, np.float64)\n # my_dtype = 1\n # else: # values stored as single precision\n # tarr = np.empty(1, np.float32)\n # my_dtype = 0\n\n data, ncount = _binary_reader.read_nodal_values(self.filename,\n self.grid.celltypes,\n ele_ind_table,\n self.grid.offset,\n self.grid.cells,\n nitem,\n self.grid.number_of_points,\n nodstr,\n etype,\n elemtype,\n result_index)\n\n if result_type == 'ENS' and nitem != 6:\n data = data[:, :6]\n\n nnum = self.grid.point_arrays['ansys_node_num']\n if np.isnan(data).all():\n raise ValueError('Result file contains no %s records for result %d' %\n (ELEMENT_INDEX_TABLE_INFO[result_type.upper()], rnum))\n\n # average across nodes\n result = data/ncount.reshape(-1, 1)\n return nnum, result", "title": "" }, { "docid": "e03407545edb5a808d08489dfa1cfaa4", "score": "0.5567536", "text": "def evaluate(self, samples):\n pass", "title": "" }, { "docid": "253de78139a9546b3d9eb8e2136603f4", "score": "0.55630773", "text": "def testRun(self, filename, outFile=None):\n \n self.evalData=None\n doc = self.loadDom(filename)\n doc =self.run(doc)\n self.evalData = self.createRef(doc)\n if outFile: self.writeDom(doc)\n# return self.evalData.serialize('utf-8',1)\n return etree.tostring(self.evalData,encoding='unicode',pretty_print=True)", "title": "" }, { "docid": "cfcaf00be8e6d34eb872082443274aa8", "score": "0.55561084", "text": "def itVals(arrayList, desiredOutput):\n\n pass", "title": "" }, { "docid": "4d2bfe63fac778f0f513fb5e699e175e", "score": "0.5545947", "text": "def get_results(predicted , actual):\n #the four parameters of evaluation\n tn = 0\n tp = 0\n fn = 0\n fp = 0\n for i in range(len(predicted)):\n if (predicted[i] == 1) & (actual[i] == 1):\n tp = tp + 1\n elif (predicted[i] == 1) & (actual[i] == 0):\n fp = fp + 1\n elif (predicted[i] == 0) & (actual[i] == 0):\n tn = tn + 1\n else:\n fn = fn + 1\n \n \"\"\"\n\tspecificity = tn / (tn + fp)\n\tsensitivity = tp / (tp + fn)\n\taccuracy = (tp + tn) / (tp + tn + fp + fn)\n\t\"\"\"\n \n specificity = float(tn) / (tn + fp)\n sensitivity = float(tp)/ (tp + fn)\n accuracy = float(tp + tn) / (tp + tn + fp + fn)\n return specificity, sensitivity, fp, fn, tp, tn", "title": "" }, { "docid": "ac27d84ea7198d110569256e116a4f29", "score": "0.5520786", "text": "def get_eval_result(ann_file, img_path, result_path):\n max_num = 128\n result_path = result_path\n\n dataset_coco = COCO(ann_file)\n img_ids = dataset_coco.getImgIds()\n imgs = dataset_coco.imgs\n if not os.path.exists('temp'):\n os.mkdir('temp')\n for img_id in img_ids:\n file = img_path + imgs[img_id]['file_name']\n img_size = get_img_size(file)\n resize_ratio = get_resize_ratio(img_size)\n\n img_name = imgs[img_id]['file_name']\n\n img_metas = np.array([img_size[1], img_size[0]] + [resize_ratio, resize_ratio])\n\n bbox_result_file = os.path.join(result_path, img_name.split('.')[0] + \"_0.bin\")\n label_result_file = os.path.join(result_path, img_name.split('.')[0] + \"_1.bin\")\n mask_result_file = os.path.join(result_path, img_name.split('.')[0] + \"_2.bin\")\n mask_fb_result_file = os.path.join(result_path, img_name.split('.')[0] + \"_3.bin\")\n\n all_bbox = np.fromfile(bbox_result_file, dtype=np.float16).reshape(1, 100, 5)\n all_label = np.fromfile(label_result_file, dtype=np.int32).reshape(1, 100, 1)\n all_mask = np.fromfile(mask_result_file, dtype=np.bool_).reshape(1, 100, 1)\n all_mask_fb = np.fromfile(mask_fb_result_file, dtype=np.float16).reshape(1, 100, 64, 28, 28)\n\n all_bbox_squee = np.squeeze(all_bbox)\n all_label_squee = np.squeeze(all_label)\n all_mask_squee = np.squeeze(all_mask)\n all_mask_fb_squee = np.squeeze(all_mask_fb)\n\n all_bboxes_tmp_mask = all_bbox_squee[all_mask_squee, :]\n all_labels_tmp_mask = all_label_squee[all_mask_squee]\n _all_mask_fb_tmp_mask = all_mask_fb_squee[all_mask_squee, :, :, :]\n all_mask_fb_tmp_mask = np.zeros((all_bboxes_tmp_mask.shape[0], 28, 28))\n for i in range(all_bboxes_tmp_mask.shape[0]):\n all_mask_fb_tmp_mask[i, :, :] = _all_mask_fb_tmp_mask[i, all_labels_tmp_mask[i]+1, :, :]\n if all_bboxes_tmp_mask.shape[0] > max_num:\n inds = np.argsort(-all_bboxes_tmp_mask[:, -1])\n inds = inds[:max_num]\n all_bboxes_tmp_mask = all_bboxes_tmp_mask[inds]\n all_labels_tmp_mask = all_labels_tmp_mask[inds]\n all_mask_fb_tmp_mask = all_mask_fb_tmp_mask[inds]\n save_result(all_mask_fb_tmp_mask, all_bboxes_tmp_mask, all_labels_tmp_mask,\n img_metas, 'temp/' + img_name.replace('.jpg', '.txt'))", "title": "" }, { "docid": "0d034979f4d67b6d1996a39aeee1c14f", "score": "0.5496294", "text": "def get_evaluation(self, input_data):\n pass", "title": "" }, { "docid": "1424ef7f04898f933de620d24c355c9b", "score": "0.54868925", "text": "def _read_spearmint_query_file(file_name):\n file_handle = open(file_name, 'r')\n true_value = None\n for raw_line in file_handle:\n # value\n line = raw_line.strip()\n if line.startswith('Result'):\n value = -1.0*float(line.split()[-1])\n elif line.startswith('True Result'):\n true_value = -1.0*float(line.split()[-1])\n # point\n try:\n add_np_array_line = line.replace('array', 'np.array')\n point_candidate = eval(add_np_array_line)\n if isinstance(point_candidate, dict):\n point_done = True\n point = point_candidate\n except Exception as e:\n pass\n file_handle.close()\n return point, value, true_value", "title": "" }, { "docid": "10992b3095ccb579da32cc9235b76a67", "score": "0.54868567", "text": "def variables():\n data_list = []\n\n # Read data from the file\n with open('test.txt','r') as f:\n data_list = eval(f.readlines()[0])\n\n total = sum([int(data_dict['value']['get']) for data_dict in data_list[:100]])\n total_inv = sum([int(data_dict['value']['inv']) for data_dict in data_list[:100]])\n\n for index, data_dict in enumerate(data_list[:50]):\n adv = int(data_dict['_id']['advanceperiod'])\n hits = int(data_dict['value']['hit'])\n fail = int(data_dict['value']['fail_set'])\n weights = int(data_dict['value']['get'])\n invs = int(data_dict['value']['inv'])\n\n # Uncomment if the weight are to be calculated in percentages\n #weights = int(float(data_dict['value']['get']/total)*100)\n \n # Uncomment if the weight are to be calculated in percentages\n #invs = int(float(data_dict['value']['inv']/total_inv)*100)\n\n adv+=1\n timeperiod = __timeperiod(adv)\n print timeperiod, ':', adv, ':', hits\n if index ==0:\n x = np.array([[timeperiod, adv]], dtype=int)\n y = np.array([hits])\n else:\n x = np.append(x,[[timeperiod, adv]], axis=0)\n y = np.append(y,[hits])\n\n return x, y", "title": "" }, { "docid": "fc5def68a8686e6f6392312b058275bd", "score": "0.54841185", "text": "def analysis():\n\tpass", "title": "" }, { "docid": "93c70f1e1512ef4eb8a00bd7f226ef3e", "score": "0.5482436", "text": "def main():\n values = get_values_from_input(\"rosalind_iprb.txt\",'r') \n \n k=int(values[0])\n m=int(values[1])\n n=int(values[2])\n\n sum_of_k_m_n=float(k+m+n);\n\n kk_km_kn = (k/sum_of_k_m_n * (k-1)/(sum_of_k_m_n-1)) + (k/sum_of_k_m_n * m/(sum_of_k_m_n-1)) + (k/sum_of_k_m_n * n/(sum_of_k_m_n-1))\n mm_mk_nn = (m/sum_of_k_m_n * (m-1)/(sum_of_k_m_n-1) * 3/4) + (m/sum_of_k_m_n * k/(sum_of_k_m_n-1)) + (m/sum_of_k_m_n * n/(sum_of_k_m_n-1) * 1/2)\n nn_nk_nm = (n/sum_of_k_m_n * k/(sum_of_k_m_n-1)) + (n/sum_of_k_m_n * m/(sum_of_k_m_n-1)* 1/2)\n\n probability = kk_km_kn + mm_mk_nn + nn_nk_nm\n\n print round(probability,5)", "title": "" }, { "docid": "d487126edd5091b7f73e341a765409ed", "score": "0.5471904", "text": "def process_a_file(file_name: str) -> None:\n\n # Opening the given file and analyzing it line by line\n with open(file_name, \"r\") as my_file:\n print(\"file_name\", file_name)\n\n units = []\n quantities = []\n values = []\n errors = []\n for line in my_file.readlines():\n # Saving the real experiment data\n if line_contains_valid_experiment_data(line):\n # Extracting the individual data and converting them to numbers\n delimited_info_in_text = line.strip().split(DELIMITER)\n (\n delimited_info_in_numbers,\n new_errors,\n ) = convert_list_from_string_to_float(delimited_info_in_text)\n values.append(delimited_info_in_numbers)\n\n # Logging the errors that may happen during the conversion\n if new_errors:\n errors.extend(new_errors)\n\n # Saving the units (kPa, C, Nm...)\n elif line.startswith(UNIT_LINE_IDENTIFIER):\n units = line.strip().split(DELIMITER)\n\n # Saving the quantities (p1, Q, t...)\n elif line.startswith(QUANTITIES_LINE_IDENTIFIER):\n quantities = line.strip().split(DELIMITER)\n\n # Calculating the mean value in all the columns\n # We did not want to use numpy, so that script can run without libraries\n # https://stackoverflow.com/questions/15819980/calculate-mean-across-dimension-in-a-2d-array\n mean_values = [float(sum(l)) / len(l) for l in zip(*values)]\n\n # Aggregating the results and saving them into a text file\n # The content is being appended, so the old data in a file will persist\n with open(RESULTS_FILE, \"a\") as results_file:\n results_file.write(f\"file_name: {file_name}\\n\")\n\n # Validating if all the lines were parsed correctly and showing errors\n results_file.write(\n f\"number of expected lines: {EXPECTED_NUMBER_OF_LINES}\\n\"\n )\n results_file.write(f\"number of valid lines: {len(values)}\\n\")\n if len(values) < EXPECTED_NUMBER_OF_LINES:\n results_file.write(\n 80 * \"x\"\n + \"\\nWARNING: NOT ALL OF THE LINES WERE CORRECTLY PARSED!!\\n\"\n )\n if len(errors) > 0:\n results_file.write(f\"{80 * 'x'}\\n{errors}\\n{80 * 'x'}\\n\")\n\n # Writing the real results - mean values\n for quantity, mean_value, unit in zip(quantities, mean_values, units):\n result = f\"Mean of {quantity} [{unit}] = {mean_value}\"\n results_file.write(result + \"\\n\")\n print(result)\n\n results_file.write(80 * \"*\" + \"\\n\")", "title": "" }, { "docid": "770a714654f385b62fb877ebf86629cd", "score": "0.5468116", "text": "def read_results(self,relax=False):\n outfname= 'out.'+self.label\n ergfname= 'erg.'+self.label\n frcfname= 'frc.'+self.label\n strfname= 'strs.'+self.label\n if not os.path.exists(outfname):\n raise RuntimeError(outfname+' does not exists.')\n if not os.path.exists(ergfname):\n raise RuntimeError(ergfname+' does not exists.')\n if not os.path.exists(frcfname):\n raise RuntimeError(frcfname+' does not exists.')\n if not os.path.exists(strfname):\n print('Warning: '+strfname+' does not exists.')\n\n self.results={ k : None for k in self.implemented_properties}\n\n fout= open(outfname,'r')\n lines= fout.readlines()\n if CALC_END_MARK not in lines[-1]:\n raise RuntimeError(self.label+' seems to stop somewhere..')\n if relax:\n relax_converged = False\n num_step_relax = -1\n for line in lines:\n if 'Damped MD converged with' in line:\n relax_converged = True\n num_step_relax = int(line.split()[4])\n break\n if not relax_converged:\n print('')\n print('** Warning: pmd relaxation does not' +\\\n ' seem to be converged**')\n print('')\n self.results['num_step_relax'] = num_step_relax\n fout.close()\n\n with open(ergfname,'r') as f:\n erg = float(f.readline().split()[0])\n self.results['energy'] = erg\n\n with open(frcfname,'r') as f:\n num= int(f.readline().split()[0])\n frcs= np.zeros((num,3))\n for i in range(num):\n data= [ float(x) for x in f.readline().split() ]\n frcs[i,0:3] = data[0:3]\n self.results['forces'] = frcs\n\n if os.path.exists(strfname):\n try:\n with open(strfname,'r') as f:\n strs = np.array([ float(x) for x in f.readline().split() ])\n self.results['stress'] = strs\n except:\n self.results['srress'] = None\n \n if relax:\n posfile = 'pmdfin'\n #nsys = NAPSystem(fname=posfile,specorder=self.specorder)\n nsys = nappy.io.read(fname=posfile,specorder=self.specorder)\n #tmpatoms = read_pmd(fname=posfile,specorder=self.specorder)\n tmpatoms = nsys.to_ase_atoms()\n self.results['relaxed_scaled_positions'] \\\n = tmpatoms.get_scaled_positions()\n self.results['relaxed_cell'] = tmpatoms.get_cell()", "title": "" }, { "docid": "bd570bed1f881886884bba4af91a60f6", "score": "0.5462796", "text": "def _post_process_eval_samples(self):", "title": "" }, { "docid": "626b7f620b2271404a89d5a965aa355e", "score": "0.5449873", "text": "def Executer():\n print \"Reading information for result id: \" + result_id\n\n db_path = os.path.join(app_folder, 'databases', 'storage.db')\n try:\n\t#Connecting to database and reading information\n #con = MySQLdb.connect(host = \"localhost\", user = \"mdat\", passwd = \"mdat\", db = \"mdat\")\n\tcon = sqlite3.connect(db_path)\n\tcon.row_factory = sqlite3.Row\n\tc = con.cursor()\n\tc.execute('SELECT * FROM results WHERE id = \"' + result_id + '\"')\n\tresult = c.fetchone()\n dataset_id = str(result['dataset_id'])\n c.execute('SELECT * FROM datasets WHERE id = \"' + dataset_id + '\"')\n dataset = c.fetchone()\n\tc.close()\n\tcon.close()\n except:\n\tprint \"Error for reading input parameters in Executer:\", sys.exc_info()[0]\n\treturn 0\n\n FDRrate = result['sign_level'] # False Discovery Rate for analysis\n permutation = result['perm_number'] # permutation number for Monte Carlo method\n\n nameoftreat = []\n numofrep = []\n rep_number = result['rep_number']\n temparr = rep_number.split(\"\\n\")\n for i in range(len(temparr)):\n temp = temparr[i].strip()\n temp = temp.split(\" \")\n nameoftreat.append(temp[0])\n numofrep.append(int(temp[1]))\n\n numoftreat = (len(numofrep)) # Finding the number of treatments involved\n\n Indexarray = range(sum(numofrep))\n\n #Reading dataset file\n Header = [] #Header\n probeset = [] #Probe ID\n genesymbol = [] #Gene Symbol\n unigeneid = [] #UniGene ID\n reppubid = [] #Representative Public ID\n otherinfo = [] #Chromosomal Location\n Expressionmatrix = [] #Expression data\n\n #Taking path to dataset file\n file_path = os.path.join(app_folder, 'uploads', dataset['file_name'])\n try:\n\td = file(file_path).read()\n except IOError: \n\tprint \"Error for reading input file in Executer:\", sys.exc_info()[0]\n\treturn 0\n\n rowspre = d.split('\\n')\n rows = []\n for l in rowspre:\n\tif l.strip():\n\t rows.append(l)\n NRows = len(rows) # number of rows in the dataset file\n Header = rows[0].split('\\t')\n for i in range(1, NRows):\n\trowlist = []\n\trowlist = rows[i].split('\\t')\n\tprobeset.append(rowlist[0])\n\tgenesymbol.append(rowlist[1])\n\tunigeneid.append(rowlist[2])\n\treppubid.append(rowlist[3])\n\totherinfo.append(rowlist[4])\n\tExpressionmatrix.append(map(float, rowlist[5:])) # Storing Expression data as 2-D array\n\n\n print \"Finish reading information for result id: \" + result_id\n\n\n \"\"\"\n Analyzing data\n This code uses other modules to perform Kruskal Wallis Test, False discovery rate, calculate Pvalue using MonteCarlo Permutation, perform PostHocComparison and generates patterns.\n It also uses Graphviz software to generate .svg and .png files to show clusters in the form of graphs.\n \"\"\"\n print \"Start selecting significant genes for result id: \" + result_id\n\n\n \"\"\"\n Calculating Hstatistics for the original or unpermuted data\n \"\"\"\n Hstatistics = [] \n for j in range(0,len(Expressionmatrix)): # Finding and storing Hstatistics, KWPvalue \n Hstat = 0; \n\tHstat = KruskalWallisTest(Expressionmatrix[j], numofrep, Indexarray) \n\tHstatistics.append(Hstat)\n\n\n \"\"\"\n Calculating Hstatistics for Permuted data \n \"\"\"\n MonteCarloCounter = [0]*(len(Expressionmatrix)) \n\n from datetime import timedelta\n start = datetime.now()\n\t\n for p in range(permutation):\n\n print \"Calculating Hstats for permutation \" + str(p) + \" of result id \" + str(result_id)\n\n\tIndexarray = Permutation(Indexarray)\n\n\tfor k in range(0,len(Expressionmatrix)): # Finding and storing Hstatistics, KWPvalue \n\t Hstatp = 0\n\t Hstatp = KruskalWallisTest(Expressionmatrix[k], numofrep, Indexarray)\n\t\t\n\t if Hstatp >= Hstatistics[k]:\n\t\tMonteCarloCounter[k] = MonteCarloCounter[k] + 1\n\n\t\"\"\"\n\tCode for calculating approximate running time for Monte Carlo method using the time for 5 loops\n\t\"\"\"\n\tif p == 50:\n\t try:\n\t\t#Connect to database again\n\t\t#con = MySQLdb.connect(host = \"localhost\", user = \"mdat\", passwd = \"mdat\", db = \"mdat\")\n\t\tcon = sqlite3.connect(db_path)\n\t\tc = con.cursor()\n\t\tunit_duration = datetime.now() - start\n\t\tunit_seconds = unit_duration.seconds\n\t\ttotal_seconds = int (unit_seconds * permutation / 50)\n\t\tc.execute('UPDATE results SET description = ? WHERE id = ?', (total_seconds, result_id))\n\t\tcon.commit()\n\t\tc.close()\n\t\tcon.close()\n\t except:\n\t\tprint \"Error for set running time in Executer:\", sys.exc_info()[0]\n return 0\n\n \"\"\"\n Calculating MonteCarloPvalue \n \"\"\"\n MonteCarlopvalue = []\n for counter in MonteCarloCounter:\n MonteCarlopval = 0\n\tMonteCarlopval = float(1 + counter)/float(permutation + 1)\n\tMonteCarlopvalue.append(MonteCarlopval)\n\n\n \"\"\"\n Peforming FDR check for significant differentially expressed genes\n \"\"\"\n\n Significantpvalues = []\n Significantpvalues = FDRAnalysis(MonteCarlopvalue, FDRrate)\n\n\n \"\"\"\n If there is no gene significantly differentially expressed, return\n \"\"\"\n\n if Significantpvalues == []:\n\n\tprint \"No gene is significantly differentially expressed for result id: \" + result_id\n\n\ttry:\n #Connect to database again\n\t #con = MySQLdb.connect(host = \"localhost\", user = \"mdat\", passwd = \"mdat\", db = \"mdat\")\n\t con = sqlite3.connect(db_path)\n\t c = con.cursor()\n\n\t timestamp = datetime.now()\n\n\t status_update_sql='UPDATE results SET finish_time = ?, status = ? WHERE id = ?'\n\n\t c.execute(status_update_sql, (timestamp, \"No gene is significantly differentially expressed\", result_id))\n\t con.commit()\n\n\t c.close()\n\t con.close()\n\n\texcept:\n\t print \"Error for updating database in Executer:\", sys.exc_info()[0]\n\n\treturn 0\n\n else:\n print \"Finish selecting significant genes for result id: \" + result_id\n\n\n\n \"\"\"\n Peforming PostHoc testing and printing the Ternary Pattern for significant differentially expressed gene\n \"\"\"\n print \"Start peforming post hoc comparisons for result id: \" + result_id\n\n \"\"\"\n Taking significant gene indexes and their pvalues, fold-change values\n \"\"\"\n Uniquepvalues = []\n Uniquepvalues = unique(Significantpvalues)\n\n SignificantGenesindex = []\n Pvaluedict = {}\n FoldChangedict = {}\n\n for pvalue in Uniquepvalues:\n genearray = []\n\tgenearray = get_index_new(MonteCarlopvalue, pvalue)\n\tfor genekey in genearray:\n\t SignificantGenesindex.append(genekey)\n\t Pvaluedict[genekey] = MonteCarlopvalue[genekey]\n\n\t exp_mean = []\n\t left = right = 0\n\t count = 0\n\t for m in numofrep:\n\t right += m\n\t\texp_mean.append(math.fsum(Expressionmatrix[genekey][left:right])/m)\n\t\tif count > 0:\n\t\t if genekey not in FoldChangedict:\n\t\t FoldChangedict[genekey] = []\n\t\t FoldChangedict[genekey].append(exp_mean[count]/exp_mean[0])\n\t\tleft = right\n\t\tcount += 1\n\n\n \"\"\"\n Taking patterns and gene indexes in each pattern\n \"\"\"\n CombinationList = [] \n for y in combinations(range(1,(numoftreat+1)),2): CombinationList.append(tuple(y))\n\n\n Patterndict = {}\n IndexPatterndict = {}\n\n for l in SignificantGenesindex:\n\t\t\n print \"Performing post hoc comparison for gene index \" + str(l) + \" of result id \" + str(result_id)\n\n Pattern = ''\n\tTreatmentdic = {}\n\tleft = right = 0\n\tcount = 0\n\n\tfor m in numofrep:\n\t right = right + m\n\t count = count+1\n\t Treatmentdic[count] = Expressionmatrix[l][left:right]\n\t left = right\n\t\t\n\tfor (a,b) in CombinationList:\n\t Pvalue,PvalueC = RanksumComparison(Treatmentdic[a],Treatmentdic[b])\n\n\t if Pvalue < 0.05:\n\t Pattern = Pattern+str(2)\n\t elif PvalueC < 0.05:\n\t\tPattern = Pattern+str(0)\n\t else:\n\t\tPattern = Pattern+str(1)\n\n\tif Pattern in Patterndict:\n\t Patterndict[Pattern].append(probeset[l])\n\t IndexPatterndict[Pattern].append(l)\n\telse:\n\t Patterndict[Pattern] = []\n\t IndexPatterndict[Pattern] = []\n\n\t Patterndict[Pattern].append(probeset[l])\n\t IndexPatterndict[Pattern].append(l)\n\n\n Binarydict = {}\n\n for keys in Patterndict.keys():\n Binarydict[int(keys)] = keys\n\n\n print \"Finish peforming post hoc analysis for result id: \" + result_id\n\n\n \"\"\"\n Creating results and store them in database\n \"\"\"\n\n print \"Start creating results for result id: \" + result_id\n\n #Generating directories where result files will be stored\n result_dir_path = os.path.join(app_folder, 'static', 'results', str(result_id))\n os.mkdir(result_dir_path)\n\n first_dir_path = os.path.join(result_dir_path, 'Firstlevel')\n os.mkdir(first_dir_path)\n\n second_dir_path = os.path.join(first_dir_path, 'Secondlevel')\n os.mkdir(second_dir_path)\n\n \"\"\"\n Creating overview graph\n \"\"\"\n Overviewclusters(SignificantGenesindex, Patterndict, numoftreat, numofrep, nameoftreat, FDRrate, result_dir_path)\n\n \"\"\"\n Creating all graphical patterns for the first level cluster\n \"\"\"\n for pattern in Patterndict.keys():\n\tFirstLevelClusters(pattern, len(Patterndict[pattern]), numoftreat, nameoftreat, first_dir_path)\n\n\n \"\"\"\n Creating all graphical patterns for the second level cluster\n \"\"\"\n Resultdict, Symboldict = SecondLevelClusterDictionary(nameoftreat, numoftreat, Patterndict)\n\n Sumdict = {}\n Totalsum = 0\n\n for a in Resultdict.keys():\n\tSum = 0\n\tfor b in Resultdict[a]:\n\t Sum = Sum + len(Patterndict[b])\n\t Totalsum = Totalsum + len(Patterndict[b])\n\t\n\tSumdict[a] = Sum\n\n\t\t\n Contractratiodict = {}\n\n for second_cluster in Resultdict.keys():\n\n\tallpatterns = []\n\tAllpattern = {}\n\tallpatterns = Resultdict[second_cluster]\n\tcontractiblecount = 0.0;\n\n\tfor eachpattern in allpatterns:\n\t Allpattern[eachpattern] = len(Patterndict[eachpattern])\n\t if Contractibility(numoftreat,str(eachpattern)):\n\t contractiblecount = contractiblecount + len(Patterndict[eachpattern])\n\n\tContractratio = float(contractiblecount)/float(Sumdict[second_cluster])\n\tContractratiodict[second_cluster] = round(Contractratio,3)\n\n\tSecondLevelClusters(second_cluster, Allpattern, Contractratio, Resultdict, Symboldict, Sumdict, Patterndict, numoftreat, nameoftreat, second_dir_path)\n\n\n \"\"\"\n Creates gene files for all significant genes and genes in each pattern\n \"\"\"\n SignificantGeneListFiles(SignificantGenesindex, Pvaluedict, FoldChangedict, Header, probeset, genesymbol, unigeneid, reppubid, otherinfo, numoftreat, nameoftreat, numofrep, Expressionmatrix, result_dir_path)\n \n #geneIndex = range(NRows-1)\n #AllGeneListFiles(geneIndex, Pvaluedict, FoldChangedict, Header, probeset, genesymbol, unigeneid, reppubid, otherinfo, numoftreat, nameoftreat, numofrep, Expressionmatrix, result_dir_path)\n\n for pattern in IndexPatterndict.keys():\n\tPatternGeneListFiles(pattern, IndexPatterndict, Pvaluedict, FoldChangedict, Header, probeset, genesymbol, unigeneid, reppubid, otherinfo, numoftreat, nameoftreat, numofrep, Expressionmatrix, first_dir_path)\n\n for symbol in Symboldict.keys():\n MetaPatternGeneListFiles(symbol, Symboldict, Resultdict, IndexPatterndict, Pvaluedict, FoldChangedict, Header, probeset, genesymbol, unigeneid, reppubid, otherinfo, numoftreat, nameoftreat, numofrep, Expressionmatrix, second_dir_path)\n\n\n \"\"\"\n Storing all results in database\n \"\"\"\n\n import cPickle\n import base64\n\n Binarydict_string = base64.b64encode(cPickle.dumps(Binarydict, cPickle.HIGHEST_PROTOCOL))\n Patterndict_string = base64.b64encode(cPickle.dumps(Patterndict, cPickle.HIGHEST_PROTOCOL))\n IndexPatterndict_string = base64.b64encode(cPickle.dumps(IndexPatterndict, cPickle.HIGHEST_PROTOCOL))\n MonteCarlopvalue_string = base64.b64encode(cPickle.dumps(MonteCarlopvalue, cPickle.HIGHEST_PROTOCOL))\n\n Pvaluedict_string = base64.b64encode(cPickle.dumps(Pvaluedict, cPickle.HIGHEST_PROTOCOL))\n FoldChangedict_string = base64.b64encode(cPickle.dumps(FoldChangedict, cPickle.HIGHEST_PROTOCOL))\n\n Resultdict_string = base64.b64encode(cPickle.dumps(Resultdict, cPickle.HIGHEST_PROTOCOL))\n Symboldict_string = base64.b64encode(cPickle.dumps(Symboldict, cPickle.HIGHEST_PROTOCOL))\n Sumdict_string = base64.b64encode(cPickle.dumps(Sumdict, cPickle.HIGHEST_PROTOCOL))\n Contractratiodict_string = base64.b64encode(cPickle.dumps(Contractratiodict, cPickle.HIGHEST_PROTOCOL))\n\n try:\n\t#Connect to database again to store results\n\t#con = MySQLdb.connect(host = \"localhost\", user = \"mdat\", passwd = \"mdat\", db = \"mdat\")\n\tcon = sqlite3.connect(db_path)\n\tcon.text_factory = str\n\tc = con.cursor()\n\n\t#Set status to Completed\n\ttimestamp = datetime.now()\n\n\tstatus_update_sql='UPDATE results SET finish_time = ?, status = ?, binary_dict = ?, pattern_dict = ?, index_pattern_dict = ?, monte_carlo_pvalue = ?, result_dict = ?, symbol_dict = ?, sum_dict = ?, contract_dict = ?, pvalue_dict = ?, fold_change_dict = ? WHERE id = ?'\n\n\tc.execute(status_update_sql, (timestamp, \"Completed\", Binarydict_string, Patterndict_string, IndexPatterndict_string, MonteCarlopvalue_string, Resultdict_string, Symboldict_string, Sumdict_string, Contractratiodict_string, Pvaluedict_string, FoldChangedict_string, result_id))\n\n\tcon.commit()\n\n\tc.close()\n\tcon.close()\n\n except:\n\tprint \"Error for updating database in Executer:\", sys.exc_info()[0]\n\treturn 0\n\n\n print \"Finish creating results for result id: \" + result_id\n return 1", "title": "" }, { "docid": "bc820deef67a0c23c457aaf6321cf2ea", "score": "0.5447537", "text": "def eval(result_path):\n with open(result_path, 'r') as f:\n df = pd.read_csv(f)\n print(df.head())\n print(len(df))\n df.rename(columns={'label': 'goldlabel'}, inplace=True)\n df.rename(columns={'sim_score': 'sim_scores'}, inplace=True)\n # df.rename(columns={'binary_label': 'goldlabel'}, inplace=True)\n\n df[['goldlabel']] = df[['goldlabel']].astype(int)\n\n # df.sort_values(by='sim_scores', ascending=True, inplace=True)\n threshold_candidates = list(set(df['sim_scores'].values))\n # threshold_candidates = [x for x in threshold_candidates if x>=0.5]\n threshold_candidates.sort(reverse=False)\n print(threshold_candidates)\n # threshold_candidates.sort(reverse=True)\n print(threshold_candidates)\n print(len(threshold_candidates))\n print(df.head())\n max_F = 0\n max_P = 0\n max_R = 0\n max_SP = 0\n optimum_threshold = 0\n ps = []\n rs =[]\n threshs=[]\n for threshold in threshold_candidates:\n df.loc[df[df.sim_scores >= threshold].index, 'syslabel'] = 1\n df.loc[df[df.sim_scores < threshold].index, 'syslabel'] = 0\n syslabels = df['syslabel'].values\n goldlabels = df['goldlabel'].values\n F, P, R, SP = get_eval_metrics(syslabels, goldlabels)\n print(F,P,R)\n # if max_F < F:\n if max_P <= P:\n # if max_SP < SP:\n max_F = F\n max_P = P\n max_R = R\n max_SP = SP\n optimum_threshold = threshold\n ps.append(P)\n rs.append(R)\n threshs.append(optimum_threshold)\n # print(\"max F-measure: {:0.4f}, P: {:0.4f}, R: {:0.4f}, threshold: {:0.6f}\".format(max_F, max_P, max_R, optimum_threshold))\n print(\"max P: {:0.4f}, F: {:0.4f}, R: {:0.4f}, SP: {:0.4f}, threshold: {:0.6f}\".format(max_P, max_F, max_R, max_SP, optimum_threshold))\n # if threshold > 0.33:\n # break\n return ps, rs, threshs", "title": "" }, { "docid": "21d2ee67a225a7928ed3abdba2ae5c70", "score": "0.543208", "text": "def evaluate(self):\n res = 0\n\n if self.order == 0:\n # simple evaluation\n for k, v in self.terms.items():\n v = re.sub('x', 'self.baseVert[0]', v)\n v = re.sub('y', 'self.baseVert[1]', v)\n v = re.sub('z', 'self.baseVert[2]', v)\n res += eval(v)\n\n elif self.order == 1:\n # integration along a line\n for k, v in self.terms.items():\n k0, = k\n det = self.dVerts[0][k[0]]\n v = re.sub('x', '(self.baseVert[0] + xsi*self.dVerts[0][0])', v)\n v = re.sub('y', '(self.baseVert[1] + xsi*self.dVerts[0][1])', v)\n v = re.sub('z', '(self.baseVert[2] + xsi*self.dVerts[0][2])', v)\n self.funcXsiStr = v\n res += scipy.integrate.quad(self.funcXsi, 0., 1.)[0] * det\n\n elif self.order == 2:\n # integration along a triangle\n for k, v in self.terms.items():\n k0, k1 = k\n det = self.dVerts[0][k0] * self.dVerts[1][k1] - self.dVerts[0][k1] * self.dVerts[1][k0]\n v = re.sub('x', '(self.baseVert[0] + xsi*self.dVerts[0][0] + eta*self.dVerts[1][0])', v)\n v = re.sub('y', '(self.baseVert[1] + xsi*self.dVerts[0][1] + eta*self.dVerts[1][1])', v)\n v = re.sub('z', '(self.baseVert[2] + xsi*self.dVerts[0][2] + eta*self.dVerts[1][2])', v)\n self.funcXsiEtaStr = v\n def loEta(xsi):\n return 0.0\n def hiEta(xsi):\n return 1.0 - xsi\n res += scipy.integrate.dblquad(self.funcXsiEta, 0., 1., loEta, hiEta)[0] * det\n\n elif self.order == 3:\n # integration over tetrahedron\n for k, v in self.terms.items():\n k0, k1, k2 = k\n jac = [self.dVerts[0], self.dVerts[1], self.dVerts[2]]\n det = scipy.linalg.det(jac)\n v = re.sub('x', '(self.baseVert[0] + xsi*self.dVerts[0][0] + eta*self.dVerts[1][0] + zet*self.dVerts[2][0])', v)\n v = re.sub('y', '(self.baseVert[1] + xsi*self.dVerts[0][1] + eta*self.dVerts[1][1] + zet*self.dVerts[2][1])', v)\n v = re.sub('z[^et]', '(self.baseVert[2] + xsi*self.dVerts[0][2] + eta*self.dVerts[1][2] + zet*self.dVerts[2][2])', v)\n self.funcXsiEtaZetStr = v\n def loEta(xsi):\n return 0.0\n def hiEta(xsi):\n return 1.0 - xsi\n def loZet(xsi, eta):\n return 0.0\n def hiZet(xsi, eta):\n return 1.0 - xsi - eta\n res += scipy.integrate.tplquad(self.funcXsiEtaZet, 0., 1., loEta, hiEta, loZet, hiZet)[0] * det\n\n else:\n raise RunTimeError, 'ERROR: 0 <= order <= 3 but got {0}'.format(order)\n\n return res", "title": "" }, { "docid": "b17ad3db676f22446dcf429034304e90", "score": "0.5430135", "text": "def __call__(self, filename: str) -> List[float]:\n raise NotImplementedError", "title": "" }, { "docid": "308839bb9cb3373fa2f9f36114a2ce73", "score": "0.543001", "text": "def calculate_and_save_values_and_lcs_from_legacy():\n # facciamo i controlli se sia plagio e aggiungiamo al dataset (fp o tp)\n dictionary, labels, values, val_clustering, val_threshold, avg, result_of_confrontation, file_name1, file_name2 \\\n = calculate_results_and_informations()\n # salva il file del risultato nella cartella temporanea\n write_result_values_in_static_temp(dictionary, avg, result_of_confrontation, file_name1, file_name2)\n # _, _ = clean_and_generate_lcs_and_get_file_names() # non utilizziamo il return dei nomi dei files(già li abbiamo)\n # commentato per permettere il funzionamento su linux", "title": "" }, { "docid": "2183d4830d30e8f413730558efe8c8c1", "score": "0.5427983", "text": "def outputs(self):\n return {\"path_to_dtb_evaluation_result\": File_IO(\n self.node.outputs[0])}", "title": "" }, { "docid": "0f18f7a26f22d2133eff051673de6add", "score": "0.5418956", "text": "def summarize_sampler(sampler, G_raw, true_value, output_filename, nparam = None, corner_plot=True):\n\n\t\n\tsummary_type = \"parameter_estimate\"\t\n\tdres = sampler.results\n\ttf = open(output_filename+ \"_parameter_summary.txt\", \"w+\")\n\n\tsamples = dres.samples #samples\n\tweights = np.exp(dres['logwt'] - dres['logz'][-1]) # normalized weights\n\tparameter_estimate = []\n\tfor num in range(nparam): # for each parameter\n\t CI = dyfunc.quantile(dres['samples'][:, num], [0.025, 0.5, 0.975], weights=weights)\n\t parameter_estimate.append(CI[1])\n\t if num ==0:\n\t print (\"The median estimate and 95% credible interval for beta is \" + str(round(CI[1],5))+\" [\"+ str(round(CI[0],5))+ \",\" + str(round(CI[2],5))+ \"]\")\n\t tf.write(\"The median estimate and 95% credible interval for beta is \" + str(round(CI[1],5))+\" [\"+ str(round(CI[0],5))+ \",\" + str(round(CI[2],5))+ \"]\\n\")\n\t elif num ==1:\n\t print (\"The median estimate and 95% credible interval for epsilon is \" + str(round(CI[1],5))+\" [\"+ str(round(CI[0],5))+ \",\" + str(round(CI[2],5))+ \"]\")\n\t tf.write(\"The median estimate and 95% credible interval for epsilon is \" + str(round(CI[1],3))+\" [\"+ str(round(CI[0],5))+ \",\" + str(round(CI[2],5))+ \"]\\n\")\n\t else:\n\t print (\"Printing median and 95% credible interval for the rest of the unknown parameters\")\n\t print (str(round(CI[1],3))+\" [\"+ str(round(CI[0],3))+ \",\" + str(round(CI[2],3))+ \"]\")\n\t tf.write(\"median and 95% credible interval for the rest of the unknown parameter #\" +str(num)+\"\\n\")\n\t tf.write(str(round(CI[1],3))+\" [\"+ str(round(CI[0],3))+ \",\" + str(round(CI[2],3))+ \"]\\n\")\n\t\n\t\n\tdlogZdynesty = dres.logz[-1] # value of logZ\n\tdlogZerrdynesty = dres.logzerr[-1] # estimate of the statistcal uncertainty on logZ\n\n\t# output log marginal likelihood\n\ttf.write(\"Log marginalized evidence of the network hypothesis is = \" + str(round(dlogZdynesty,3))+ \"+/- \"+ str(round(dlogZerrdynesty,3))+\"\\n\")\n\tprint('Log marginalised evidence (using dynamic sampler) is {} +/- {}'.format(round(dlogZdynesty,3), round(dlogZerrdynesty,3)))\n\n\ttf.close()\n\t\n\tif corner_plot:\n\t\tdpostsamples = resample_equal(samples, weights)\n\t\n\t\tfig = corner.corner(dpostsamples, labels=[r\"$beta$\", r\"$epsilon$\"], quantiles=[0.16, 0.5, 0.84], truths= true_value, truth_color =\"red\" ,hist_kwargs={'density': True})\n\t\n\t\tfig.savefig(output_filename + \"_\" + summary_type +\"_posterior.png\")\n\t\n\treturn parameter_estimate", "title": "" }, { "docid": "d350a72898c8abd1c55e601eb052b4b9", "score": "0.5418062", "text": "def evaluate_detections(self, all_boxes, dontcare):\n self._write_voc_results_file(all_boxes)\n\n\n aps, map = self.do_python_eval()\n return aps, map", "title": "" }, { "docid": "fd7e9a536a17ca36434e0035f9d5d66a", "score": "0.54043525", "text": "def _store_parts_of_eval_sample(self, sample, result):", "title": "" }, { "docid": "647259d4aa0a7d070cd3eb52141656d2", "score": "0.53992623", "text": "def main(file_input):\n file = open(file_input, 'r')\n result_list = file_reader(file)\n file.close()\n print(result_list[1]/result_list[0]*100)\n # return(result_list[1]/result_list[0]*100)", "title": "" }, { "docid": "ca7a52893c7c52022e9e8431ae525cf3", "score": "0.5398665", "text": "def eval(self):\t\n\t\treturn eval_lattice(self)", "title": "" }, { "docid": "f40fdc3e880b83674ccd82b7db4afd8a", "score": "0.5396374", "text": "def generate_results(self,samples,file,iteration,verify):\n self.iteration = iteration\n response = self.evaluate(samples,verify)\n self.save_results(file,samples,response)", "title": "" }, { "docid": "75e7ecaf8c47aed65904317623c33c5c", "score": "0.539595", "text": "def run_eval():\n output_dir = '../outputs/'\n result_dir = '../results/'\n for filename in os.listdir(output_dir):\n if filename.endswith('.txt'):\n with open(result_dir + filename, 'w+') as file:\n os.system('./../trec_eval.9.0/trec_eval -m map -m P.30 ../data/qrels ' + output_dir + filename + ' > ' + result_dir + filename)", "title": "" }, { "docid": "bacb3bb566839182c5f11e509d56b0ae", "score": "0.53953755", "text": "def evaluate(self, IS, x):\n\n random_seed = self._prg.randint(1,100000) \t\t\t# a random int that serves as seed for matlab\n self.ensureBoundaries(x)\n\n fn = -1.0\n FnVar = -1.0\n elapsed_time = 0.0\n\n try:\n start_time = time.time()\n # /usr/local/matlab/2015b/bin/matlab -nodisplay -nosplash -nodesktop -r \"run('ATO_run.m');exit;\"\n # https://www.mathworks.com/matlabcentral/answers/97204-how-can-i-pass-input-parameters-when-running-matlab-in-batch-mode-in-windows\n\n runcmd = \"b=\"+self.convertXtoString(x)+\";length=\"+str(self.getRunlength(IS))+\";seed=\"+str(random_seed)+\";run(\\'ATO_run.m\\');exit;\"\n #print \"runcmd=\"+runcmd\n stdout = subprocess.check_output([\"/usr/local/matlab/2015b/bin/matlab\", \"-nodisplay\", \"-nojvm\",\n \"-nosplash\", \"-nodesktop\", \"-r\", runcmd])\n elapsed_time = time.time() - start_time\n\n posfn = stdout.find(\"fn=\") + 3\n posFnVar = stdout.find(\"FnVar=\") + 6\n if ((posfn > 2) and (posFnVar > 5)):\n posfnEnd = stdout.find(\"\\n\",posfn)\n posFnVarEnd = stdout.find(\"\\n\",posFnVar)\n fn = stdout[posfn:posfnEnd]\n FnVar = stdout[posFnVar:posFnVarEnd]\n except subprocess.CalledProcessError, e:\n elapsed_time = time.time() - start_time\n\n # elapsed_time is a reasonable cost\n # fn and FnVar are the results of interest\n #print \"runlength=\"+str(self.getRunlength(IS))+\", x=\"+self.convertXtoString(x)+\", fn=\"+str(fn)+\" , FnVar=\"+str(FnVar)+\" , elapsed_time=\"+str(elapsed_time)\n # for presentation, can be removed\n\n return self._mult * float(fn) # return only the mean", "title": "" }, { "docid": "66075430ca31715be189676688d7b399", "score": "0.53945017", "text": "def evaluate(self, ind, **kwargs):\n \n \n# strategy =eval(ind.phenotype)\n \n params_dict = dict()\n params_dict['dollar_neutral']=True\n params_dict['short_leverage']=0.5\n params_dict['long_leverage']=0.5\n \n # Evaluate the fitness of the phenotype\n# print(ind.phenotype)\n strategy = eval(ind.phenotype)\n nan_frac = np.count_nonzero(~np.isnan(strategy))/np.size(strategy)\n if nan_frac <=0.6:\n fitness = 0\n else:\n\n string = 'self.fitness_exp(strategy,params_dict)'\n \n fitness = eval(string)\n if fitness == np.inf or fitness == -np.inf:\n fitness = 0\n# logging.basicConfig(format='%(process)d-%(message)s')\n# logging.info(\"%50s : %10s\"%(ind.phenotype,fitness))\n \n \n\n\n\n f = str(round(fitness,3))\n prefix = f + \"\\t : \"\n preferredWidth = 70\n wrapper = textwrap.TextWrapper(initial_indent=prefix, width=preferredWidth,\n subsequent_indent=' '*len(prefix))\n message = ind.phenotype\n\n print(wrapper.fill(message))\n\n print('\\n')\n\n return fitness", "title": "" }, { "docid": "51fadc914a46539385a7eec810bab467", "score": "0.5391205", "text": "def test_dataset(self):\n return self.eval_dataset()", "title": "" }, { "docid": "0a765a48c588a1602184f6e9ad3c5798", "score": "0.5375678", "text": "def train_eval(self):\n\t\tpass", "title": "" }, { "docid": "05d83e5c22efd8d21ffa010fddcf9054", "score": "0.53749824", "text": "def report_ner(self, results, output_file):\n with open(output_file, \"w\", encoding='utf8') as f:\n to_write = []\n for block in results:\n for line in block:\n to_write.append(line + \"\\n\")\n to_write.append(\"\\n\")\n\n f.writelines(to_write)\n eval_lines = return_report(output_file)\n return eval_lines", "title": "" }, { "docid": "b514c4c9adf1246aad275f9976519d21", "score": "0.5372359", "text": "def results_analysis(directory):\r\n files = [f for f in listdir(directory) if isfile(join(directory, f))]\r\n n_realizations = len(files)\r\n sample = load(open(directory + \"/\" + files[0], \"rb\"))\r\n n_techs = len(sample.tech_dict)\r\n # Initialize an array to store a time series of leakage for every LDAR program in every\r\n # realization\r\n leakage_timeseries = np.zeros([n_techs + 1, sample.time.n_timesteps, n_realizations])\r\n leaks_found = [[] for _ in range(n_techs)]\r\n techs = list(sample.tech_dict.keys())\r\n no_repair_npv = dict()\r\n null_npv = dict()\r\n npv_keys = [\"Capital\", \"Maintenance\", \"Repair\", \"Finding\", \"Gas\", \"Total\"]\r\n for key in npv_keys:\r\n no_repair_npv[key] = np.zeros([n_techs, n_realizations])\r\n null_npv[key] = np.zeros([n_techs - 1, n_realizations])\r\n # iterate through each realization\r\n for jindex in range(0, len(files)):\r\n sample = load(open(directory + \"/\" + files[jindex], \"rb\"))\r\n # iterate through each LDAR program\r\n for index in range(0, len(techs)):\r\n leakage_timeseries[index, :, jindex] = sample.tech_dict[techs[index]].leakage\r\n if techs[index] == \"Null\":\r\n leaks_found[index].append(sample.tech_dict[techs[index]].null_repaired)\r\n else:\r\n leaks_found[index].append(sample.tech_dict[techs[index]].leaks_found)\r\n # iterate through each category of value\r\n no_repair_npv_temp, null_npv_temp = npv_calculator(directory + \"/\" + files[jindex])\r\n for key in null_npv.keys():\r\n no_repair_npv[key][:, jindex] = no_repair_npv_temp[key]\r\n null_npv[key][:, jindex] = null_npv_temp[key]\r\n return no_repair_npv, null_npv, leaks_found, leakage_timeseries", "title": "" }, { "docid": "25470d6333631eb97a0439f1bb910b7b", "score": "0.537051", "text": "def save_results(self):\n\t\troot = 'eval_results/LIU/' if self.set == 'liu' else 'eval_results/MiddV3/'\n\t\tos.makedirs(root, exist_ok=True)\n\n\t\tfile_string = root + 'eval_results_{}_{}.txt'.format(self.set, self.mode)\n\t\twrite_eval(file_string, self.details_string, self.results_string)", "title": "" }, { "docid": "745e27d8bb9fe9af4316fd7815233bd6", "score": "0.5365696", "text": "def test_file_all(self, file_name, sess):\n scores = self.extract_f0_file(file_name, sess)\n return scores", "title": "" }, { "docid": "6c620b030ba83b6c110ecd85ebbd9aed", "score": "0.536196", "text": "def main() -> None:\n inputs = load(\"dayx.txt\")\n value = myfunc(inputs)\n\n print(f\"Total number of inputs: {len(inputs)}\")\n [print(input) for input in inputs]\n print(f\"The value: {value}\")", "title": "" }, { "docid": "a1a57aca5dfbfa3e80f912a862a21119", "score": "0.53560966", "text": "def main():\n parser = argparse.ArgumentParser(description=\"mir_eval pattern discovery \"\n \"evaluation\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-o',\n dest='output_file',\n default=None,\n type=str,\n action='store',\n help='Store results in json format')\n parser.add_argument(\"reference_file\",\n action=\"store\",\n help=\"Path to the reference file.\")\n parser.add_argument(\"estimated_file\",\n action=\"store\",\n help=\"Path to the estimation file.\")\n parameters = vars(parser.parse_args(sys.argv[1:]))\n\n # Load in data\n ref_patterns = mir_eval.io.load_patterns(parameters['reference_file'])\n est_patterns = mir_eval.io.load_patterns(parameters['estimated_file'])\n\n # Compute all the scores\n scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns)\n print(\"{} vs. {}\".format(os.path.basename(parameters['reference_file']),\n os.path.basename(parameters['estimated_file'])))\n eval_utilities.print_evaluation(scores)\n\n if parameters['output_file']:\n print('Saving results to: ', parameters['output_file'])\n eval_utilities.save_results(scores, parameters['output_file'])", "title": "" }, { "docid": "3bb3d02c1915bad2a9bbebe6572c96d5", "score": "0.5354958", "text": "def numericEvaluation(track):\n\n# pitchVariety_ = [0.15, 0.25, 0.45]\n\n keyCentric_ = [0.1, 0.3, 0.5]\n\n# noteDensity_ = [0.05, 0.175, 0.25]\n restDensity_ = [0.002, 0.002, 0.006]\n# rhythmicRange_ = [0.1, 0.1333, 0.2]\n# rhythmicVariety_ = [0.1, 0.25, 0.35]\n\n val = 0\n# val += rangeCompare(pitchVariety(track), pitchVariety_)\n val += rangeCompare(keyCentric(track), keyCentric_)\n rhy_features = rhythmEvaluator(track)\n# val += rangeCompare(rhy_features[0], noteDensity_)\n val += rangeCompare(rhy_features[1], restDensity_)\n# val += rangeCompare(rhy_features[2], rhythmicRange_)\n# val += rangeCompare(rhy_features[3], rhythmicVariety_)\n\n return val", "title": "" }, { "docid": "2ed4401ed1ecc5f74f4e8b658839038a", "score": "0.5349962", "text": "def evaluate(self, IS, x):\n\n random_seed = self._prg.randint(1,100000) \t\t\t# a random int that serves as seed for matlab\n self.ensureBoundaries(x)\n\n fn = -1.0\n FnVar = -1.0\n elapsed_time = 0.0\n\n try:\n start_time = time.time()\n # /usr/local/matlab/2015b/bin/matlab -nodisplay -nosplash -nodesktop -r \"run('ATO_run.m');exit;\"\n # https://www.mathworks.com/matlabcentral/answers/97204-how-can-i-pass-input-parameters-when-running-matlab-in-batch-mode-in-windows\n\n runcmd = \"b=\"+self.convertXtoString(x)+\";length=\"+str(self.getRunlength(IS))\\\n +\";seed=\"+str(random_seed)+\";run(\\'\" + self._pathToMATLABScripts + \"ATO_run.m\\');exit;\"\n #print \"runcmd=\"+runcmd\n stdout = subprocess.check_output([\"/usr/local/matlab/2015b/bin/matlab\", \"-nodisplay\", \"-nojvm\",\n \"-nosplash\", \"-nodesktop\", \"-r\", runcmd])\n elapsed_time = time.time() - start_time\n\n posfn = stdout.find(\"fn=\") + 3\n posFnVar = stdout.find(\"FnVar=\") + 6\n if ((posfn > 2) and (posFnVar > 5)):\n posfnEnd = stdout.find(\"\\n\",posfn)\n posFnVarEnd = stdout.find(\"\\n\",posFnVar)\n fn = stdout[posfn:posfnEnd]\n FnVar = stdout[posFnVar:posFnVarEnd]\n except subprocess.CalledProcessError, e:\n elapsed_time = time.time() - start_time\n\n # elapsed_time is a reasonable cost\n # fn and FnVar are the results of interest\n #print \"runlength=\"+str(self.getRunlength(IS))+\", x=\"+self.convertXtoString(x)+\", fn=\"+str(fn)+\" , FnVar=\"+str(FnVar)+\" , elapsed_time=\"+str(elapsed_time)\n # for presentation, can be removed\n\n return self._mult * float(fn) # return only the mean", "title": "" }, { "docid": "695313328b9788fa745b8d728f21275a", "score": "0.53493255", "text": "def evaluate_from_file(args):\n sentence_bleu_scorer = SentenceBleuScorer('')\n\n bleus_hypotheses = [] * len(args.hypotheses)\n for n_system, data_filename in list(enumerate(args.hypotheses)):\n hypotheses = file2list(data_filename)\n references = file2list(args.references[n_system])\n bleus = []\n for hyp_line, ref_line in zip(hypotheses, references):\n sentence_bleu_scorer.set_reference(ref_line.split())\n bleu = sentence_bleu_scorer.score(hyp_line.split()) * 100\n bleus.append(bleu)\n bleus_hypotheses.append(bleus)\n bleus_hypotheses = np.asarray(bleus_hypotheses)\n average_bleus = np.transpose(bleus_hypotheses).mean(axis=1)\n\n\n bleus_baselines = [] * len(args.baselines)\n for n_system, data_filename in list(enumerate(args.baselines)):\n hypotheses = file2list(data_filename)\n references = file2list(args.base_references[n_system])\n bleus = []\n for hyp_line, ref_line in zip(hypotheses, references):\n sentence_bleu_scorer.set_reference(ref_line.split())\n bleu = sentence_bleu_scorer.score(hyp_line.split()) * 100\n bleus.append(bleu)\n bleus_baselines.append(bleus)\n bleus_baselines = np.asarray(bleus_baselines)\n average_bleu_baselines = np.transpose(bleus_baselines).mean(axis=1)\n\n return average_bleus, average_bleu_baselines", "title": "" }, { "docid": "b3f3b718ca6670ee9dec787fac329abf", "score": "0.53345746", "text": "def evaluate(data,outputf,testfile,labels,model):\r\n filept=open(outputf+\"/info_\"+testfile.split(\"/\")[-1].split(\".\")[0]+\"_\"+model+\".csv\", \"w\")\r\n filep=csv.writer(filept)\r\n filep.writerow([\"Number of data-points \",len(data)])\r\n print (\"Number of data-points: \"+str(len(data)))\r\n filep.writerow([\"Number of labels \",len(labels)])\r\n print (\"Number of labels: \"+str(len(labels)))\r\n perf=float(len([row[1] for row in data if row[1]==row[2]]))/float(len(data))\r\n \r\n filep.writerow([\"Accuracy \",str(perf*100)+\"%\"])\r\n filep.writerow([])\r\n print (\"Accuracy: \"+str(perf*100)+\"%\\n\")\r\n \r\n data.sort(key=operator.itemgetter(0),reverse=True)\r\n y_pred=[row[1] for row in data]\r\n y_true=[row[2] for row in data]\r\n\r\n for n in labels:\r\n tp=float(sum([(y_true[i]==n) and (y_pred[i]==n) for i in range(len(y_true))]))\r\n tn=float(sum([(y_true[i]!=n) and (y_pred[i]!=n) for i in range(len(y_true))]))\r\n fp=float(sum([(y_true[i]!=n) and (y_pred[i]==n) for i in range(len(y_true))]))\r\n fn=float(sum([(y_true[i]==n) and (y_pred[i]!=n) for i in range(len(y_true))]))\r\n #p=tp/(tp+fp)\r\n #r=tp/(tp+fn)\r\n #print \"precision = \", p\r\n #print \"recall = \", r\r\n #print \"accuracy =\", ((tp+tn)/(tp+tn+fp+fn))*100\r\n if (tp+fp)==0:\r\n precision=0\r\n else:\r\n precision=(tp/(tp+fp))*100\r\n if (tp+fn)==0:\r\n recall=0\r\n else:\r\n recall=(tp/(tp+fn))*100\r\n fscore=(200*tp)/(2*tp+fp+fn)\r\n #fscore= ((p*r)/(p+r))*2*100\r\n filep.writerow([\"Label \",n])\r\n filep.writerow([\"F-score \",str(fscore)+\"%\"])\r\n filep.writerow([\"Precision \",str(precision)+\"%\"])\r\n filep.writerow([\"Recall \",str(recall)+\"%\"])\r\n filep.writerow([\"TP \",int(tp),\"FP \",int(fp),\"TN \",int(tn),\"FN \",int(fn)])\r\n filep.writerow([])\r\n \r\n print (\"F-score for label-\"+str(n)+\" is: \"+str(fscore)+\"%\")\r\n filept.close()\r\n \r\n print (\"Printing output file\")\r\n with xlsxwriter.Workbook(outputf+\"/output_\"+testfile.split(\"/\")[-1].split(\".\")[0]+\"_\"+model+'.xlsx') as workbook:\r\n worksheet = workbook.add_worksheet()\r\n row=0\r\n col=0\r\n worksheet.write(row, col, \"probabilities\")\r\n worksheet.write(row, col + 1, \"predicted_labels\")\r\n worksheet.write(row, col + 2, \"actual_labels\")\r\n worksheet.write(row, col + 3, \"preprocessed_text\")\r\n worksheet.write(row, col + 4, \"original_text\")\r\n \r\n for line in data:\r\n row+=1\r\n worksheet.write(row, col, line[0])\r\n worksheet.write(row, col + 1, line[1])\r\n worksheet.write(row, col + 2, line[2])\r\n worksheet.write(row, col + 3, line[3])\r\n worksheet.write(row, col + 4, line[4])\r\n \r\n print (\"Printing misclassification file\") \r\n with xlsxwriter.Workbook(outputf+\"/misclassification_\"+testfile.split(\"/\")[-1].split(\".\")[0]+\"_\"+model+'.xlsx') as workbook:\r\n worksheet = workbook.add_worksheet()\r\n row=0\r\n col=0\r\n worksheet.write(row, col, \"probability\")\r\n worksheet.write(row, col + 1, \"predicted_label\")\r\n worksheet.write(row, col + 2, \"actual_label\")\r\n worksheet.write(row, col + 3, \"preprocessed_text\")\r\n worksheet.write(row, col + 4, \"original_text\")\r\n \r\n for line in data:\r\n if line[1]!=line[2]:\r\n row+=1\r\n worksheet.write(row, col, line[0])\r\n worksheet.write(row, col + 1, line[1])\r\n worksheet.write(row, col + 2, line[2])\r\n worksheet.write(row, col + 3, line[3])\r\n worksheet.write(row, col + 4, line[4])\r\n\r\n \r\n\r\n\r\n \"\"\"print (\"Printing output file\")\r\n with open(outputf+\"/output_\"+testfile.split(\"/\")[-1].split(\".\")[0]+\"_\"+\".csv\", \"w\") as f:\r\n writer = csv.writer(f)\r\n writer.writerow([\"y_predicted\",\"y_actual\",\"tweets\"])\r\n for line in data:\r\n writer.writerow(line)\r\n\r\n print (\"Printing misclassification file\")\r\n with open(outputf+\"/misclassification_\"+testfile.split(\"/\")[-1].split(\".\")[0]+\"_\"+\".csv\", \"w\") as f:\r\n writer = csv.writer(f)\r\n writer.writerow([\"y_predicted\",\"y_actual\",\"tweets\"])\r\n for line in data:\r\n if line[0]!=line[1]:\r\n writer.writerow(line)\"\"\"", "title": "" }, { "docid": "672931ed9d80f2f2e1b6c9a8d5a5cc5c", "score": "0.5331412", "text": "def single_tests():\r\n real = 50; #pred 68 , 67 -around 15%\r\n forml2 = '''{ \"m\":\"01\", \"tomate\" :0.74598 , \r\n \"pasta\" :0.1 , \r\n \"salt\" :0.04 , \r\n \"pepper\" :0.0001 } ''' \r\n # real = 73 #pred 73 high, 77,74...\r\n # forml2 = '''{ \"m\":\"1\", \"10\" : 1 } '''\r\n real = 54 #pred 57, 58 low prod\r\n forml2 = '''{ \"m\":\"02\", \"pasta\" :0.31245 , \r\n \"rice\" :0.1 , \r\n \"salt\" :0.001 , \r\n \"pepper\" :0.001 , \r\n \"chicken\" :0.001 , \r\n \"cucumber\" :0.0005 , \r\n \"pinaple\" :0.0005 , \r\n \"tomato\" :0.0005 , \r\n \"c10\" :0.0005 , \r\n \"c20\" :0.00005 } '''; \r\n # real = 101 #pred 101, 100\r\n # forml2 = '''{ \"m\":\"03\", \"c10\" :1 } '''\r\n # beginc = \"\"\"\r\n real = 88 #pred 89,88\r\n forml2 = '''{ \"m\":\"03\", \"pasta\" :0.531 , \r\n \"tomato\" :0.2 , \r\n \"salt\" :0.005 , \r\n \"pepper\" :0.004 } '''\r\n beginc = \"\"\"\r\n \"\"\" #end of comment\r\n \r\n pred = DAO.get(forml2)\r\n print(\"\\n\\n\\n_R = {} and P = {}\" .format(real ,pred ) )\r\n\r\n # print(md.dsp[[\"M\",\"FP\"]]); # print(md.dsp.iloc[0])\r\n # md.print_form2(md.dsp.iloc[0])\r", "title": "" }, { "docid": "b032e908b4afb1d820e7d935e5ce60da", "score": "0.53312266", "text": "def getKnotNumber(data, evalAt=-1.1):\n data = numpy.array(data)\n if len(data) == 3:\n data = data.T\n\n with NamedTemporaryFile() as newfile:\n newfile.write(\"t=0\\n\\n%d\\n\" % (len(data)))\n for j, i in enumerate(data):\n curStr = \"%d %.25lf %.25lf %.25lf\\n\" % tuple([j] + list(i))\n newfile.write(curStr)\n\n name = newfile.name\n newfile.flush()\n\n\n print(\"runnung command {0} {1} -p {4} > {2}_{3}\".format(reduceKnotFilename, name,\n name, \"_output\", evalAt))\n os.system(\"{0} {1} -p {4} > {2}_{3}\".format(reduceKnotFilename, name,\n name, \"_output\", evalAt))\n lines = open(\"%s_%s\" % (name, \"_output\")).readlines()\n print(\"Contents of the output: -----\")\n print(lines)\n print(\"End of the output-----\")\n os.remove(\"%s_%s\" % (name, \"_output\"))\n return lines", "title": "" }, { "docid": "858d829dfd9a5bbdb40c90caa755441c", "score": "0.5329493", "text": "def interpret():", "title": "" }, { "docid": "76d272cb7b80ce2253e8252ee4a61aac", "score": "0.53266144", "text": "def ess_analysis(language):\n k = 3\n\n training_path = '../Datasets/' + language + '/train'\n test_path = '../Datasets/' + language + '/dev.in'\n output_path = '../EvalScript/' + language\n\n optimal_y_dict = {}\n\n train_data = read_in_file(training_path)\n print('done reading training file')\n s_emission_count, s_transition_count, s_y_count, s_x_count = count_sentiment_only(train_data, k)\n print('done counting x, y, emissions for sentiment only')\n\n s_b, s_a = get_parameters(s_emission_count, s_transition_count, s_y_count)\n print('done getting all transition and emission parameters for sentiment only')\n\n e_emission_count, e_transition_count, e_y_count, e_x_count = count_entity_only(train_data, k)\n print('done counting x, y, emissions for entity only')\n\n e_b, e_a = get_parameters(e_emission_count, e_transition_count, e_y_count)\n print('done getting all transition and emission parameters for entity only')\n\n\n test_data = read_in_file(test_path)\n print('done reading test file')\n #\n main_path = os.path.dirname(__file__)\n save_path = os.path.join(main_path, output_path)\n with codecs.open(os.path.join(save_path,'dev.p5.out'), 'w', 'utf-8') as file:\n for sentence in test_data:\n mod_sentence = []\n for word in sentence:\n # To check if word in test data appears in training data\n if word not in s_x_count or s_x_count[word] < k:\n mod_word = '#UNK#'\n else:\n mod_word = word\n mod_sentence.append(mod_word)\n\n # Run viterbi but only to get the sentiments\n sentiment_pi = viterbi_sentiment_only(mod_sentence, s_a, s_b)\n output_states_sentiment = back_propagation_sentiment_only(sentiment_pi)\n\n #Run viterbi but only to get the entities\n entity_pi = viterbi_entity_only(mod_sentence, e_a, e_b)\n output_states_entity = back_propagation_entity_only(entity_pi)\n\n # print('sentiment: ', output_states_sentiment)\n # print('entity: ', output_states_entity)\n\n fixed_output_states = output_states_sentiment\n\n # Compare output states from the viterbi_entity_only and viterbi_sentiment_only\n for i in range(len(sentence)):\n entity_label = output_states_entity[i+1]\n sentiment_label = output_states_sentiment[i+1]\n\n if(entity_label != 'O'):\n if(sentiment_label[0] != 'O'):\n fixed_output_states[i+1][0] = entity_label + sentiment_label[0]\n\n elif(sentiment_label[1] != 'O'):\n fixed_output_states[i+1][0] = entity_label + sentiment_label[1]\n\n else:\n fixed_output_states[i+1][0] = entity_label + 'neutral'\n\n #Check if the beginning of the entity is a B-, not I-\n for j in range(len(sentence)):\n curr_state = fixed_output_states[j+1][0]\n prev_state = fixed_output_states[j][0]\n #Check if all previous entries in the state sequence are Os\n if(curr_state != 'O'):\n if(prev_state == 'O'):\n if('I-' in curr_state):\n curr_state.replace('I-','B-')\n fixed_output_states[j+1][0] = curr_state\n if('I-' in prev_state or 'B-' in prev_state):\n if('B-' in curr_state):\n curr_state.replace('B-','I-')\n fixed_output_states[j+1][0] = curr_state\n\n for i in range(len(sentence)):\n output = sentence[i] + ' ' + fixed_output_states[i+1][0] + '\\n'\n # output = word + ' ' + optimum_y + '\\n'\n file.write(output)\n file.write('\\n')\n\n print('Done!')\n file.close()", "title": "" }, { "docid": "9cd0c17a46c3eb97a610c398931607a3", "score": "0.5324169", "text": "def _analyse(self, results):\n raise NotImplementedError", "title": "" }, { "docid": "f9a9219fc0f601d0c1820ea5c7783a60", "score": "0.5322446", "text": "def test_PD011_fail_values():\n statement = \"result = df.values\"\n tree = ast.parse(statement)\n actual = list(VetPlugin(tree).run())\n expected = [PD011(1, 9)]\n assert actual == expected", "title": "" }, { "docid": "5203c30035fb476420c47c03b00999a7", "score": "0.53220433", "text": "def evaluate(self, input_values, output_values):\n return []", "title": "" }, { "docid": "319f86d117746cb1e811703c00953296", "score": "0.5314575", "text": "def read_def(deff):\n\n dat = pd.read_csv(deff,sep=\"#\",names=(\"VAL\",\"COMMENT\"))\n alpha_ref=None\n texp=None\n molmasssw=False\n n_Texp=None\n ntransf=1\n maxnu=0.0\n for i, com in enumerate(dat[\"COMMENT\"]):\n if \"Default value of Lorentzian half-width\" in com:\n alpha_ref=float(dat[\"VAL\"][i])\n elif \"Default value of temperature exponent\" in com:\n n_Texp=float(dat[\"VAL\"][i])\n elif \"Element symbol 2\" in com:\n molmasssw=True\n elif \"No. of transition files\" in com:\n ntransf=int(dat[\"VAL\"][i])\n elif \"Maximum wavenumber (in cm-1)\" in com:\n maxnu=float(dat[\"VAL\"][i])\n #maxnu=20000.0\n elif molmasssw:\n c=np.unique(dat[\"VAL\"][i].strip(\" \").split(\" \"))\n c=np.array(c,dtype=np.float)\n molmass=(np.max(c))\n molmasssw=False\n\n #SOME DEF FILES CONTAINS ERRORS. THESE ARE THE EXCEPTIONS\n if deff.stem==\"12C-16O2__UCL-4000\":\n ntransf=20\n if deff.stem==\"14N-1H3__CoYuTe\":\n maxnu=20000.0\n \n if ntransf>1:\n dnufile=maxnu/ntransf\n numinf=dnufile*np.array(range(ntransf+1))\n numtag=[]\n for i in range(len(numinf)-1):\n imin='{:05}'.format(int(numinf[i]))\n imax='{:05}'.format(int(numinf[i+1]))\n numtag.append(imin+\"-\"+imax)\n else:\n numinf=None\n numtag=\"\"\n \n return n_Texp, alpha_ref, molmass, numinf, numtag", "title": "" }, { "docid": "2d05412eab1c8f5b75540195ac51b0f5", "score": "0.5305666", "text": "def readResult(filename):\r\n return np.genfromtxt(filename, comments='#', delimiter='\\t',\r\n dtype=[('roi', '|S50'), ('x', 'i8'), ('y', 'i8'), ('z', 'i8'),\r\n ('expression_level', 'f8'), ('num_expressors', 'i8'),\r\n ('cell_diameter', 'f8'), ('grid_area', 'f8')])", "title": "" }, { "docid": "f8d9061c5b5579c5943e04e572f5fe83", "score": "0.53053516", "text": "def run_eval_test(intseed,method,vars_arr,Potential,W_spec,CV_dict,step,N,n,d,params_test,f_type,params_prior,s_type,t_moments):\n if f_type == \"posterior_mean\":\n sampler_type = method[\"sampler\"]\n if sampler_type == \"ULA\":\n traj,traj_grad = ULA_ODE(intseed,Potential,step,params_prior,N,n,d,s_type)\n elif sampler_type == \"MALA\":\n traj,traj_grad,n_accepted = MALA_ODE(intseed,Potential,step,params_prior,N,n,d,s_type)\n else:\n raise \"Not implemented error when choosing sampler in run_eval_test\"\n #lists to save the results of the trajectory\n ints_all = []\n vars_all = []\n #initialize function values\n f_vals = set_function(f_type,[traj],vars_arr,params_test)\n #kill dimension which is not needed\n f_vals = f_vals[0]\n ints_all,vars_all = usual_evaluation(f_vals,traj,traj_grad,CV_dict,W_spec,n,d,vars_arr)\n return ints_all,vars_all\n elif f_type == \"evidence\":\n ints_all = [[] for j in range(len(t_moments))]\n vars_all = [[] for j in range(len(t_moments))]\n f_vals = np.zeros((len(t_moments),n),dtype = float)\n traj = np.zeros((len(t_moments),n,d),dtype = float)\n traj_grad = np.zeros((len(t_moments),n,d),dtype = float)\n for i in range(len(t_moments)):\n if method[\"sampler\"] == \"ULA\":\n f_vals[i],traj[i],traj_grad[i] = ULA_ODE(i+intseed*len(t_moments),Potential, step, params_prior, N, n, d, s_type,t_moments[i])\n elif method[\"sampler\"] == \"MALA\":\n f_vals[i],traj[i],traj_grad[i],n_accepted = MALA_ODE(i+intseed*len(t_moments),Potential,step,params_prior,N,n,d,s_type,t_moments[i])\n ints_all[i],vars_all[i] = usual_evaluation(f_vals[i],traj[i],traj_grad[i],CV_dict[i],W_spec,n,d,vars_arr)\n #now calculate integrals based on new values\n evidence_est = np.zeros(len(ints_all[0]),dtype = float)\n for j in range(len(ints_all[0])):\n for i in range(len(f_vals)-1):\n evidence_est[j] += (ints_all[i+1][j] - inds_all[i][j])*(t_moments[i+1]-t_moments[i])/2\n return evidence_est", "title": "" }, { "docid": "46f6f238b45b99cfe4490f1d35de55b8", "score": "0.5305146", "text": "def eval(self, points):", "title": "" }, { "docid": "1490bd96b8fcec951fc2b6f6349e4e1d", "score": "0.53009355", "text": "def get_stats(DIRECTORY, args):\n workspace = DIRECTORY['WORKSPACE']\n pesq_path = \"pesq_results.txt\"\n with open(pesq_path, 'rt') as f:\n reader = csv.reader(f, delimiter='\\t')\n lis = list(reader)\n \n pesq_dict = {}\n for i1 in range(1, len(lis) - 1):\n li = lis[i1]\n na = li[1]\n pesq = float(li[2])\n noise_type = na.split('.')[1]\n if noise_type not in pesq_dict.keys():\n pesq_dict[noise_type] = [pesq]\n else:\n pesq_dict[noise_type].append(pesq)\n \n avg_list, std_list = [], []\n result_path = os.path.join(workspace, \"result\")\n makedirs(result_path)\n result_path = os.path.join(result_path,\"result.txt\")\n file = open(result_path, \"w\")\n f = \"{0:<16} {1:<16}\"\n file.write(f.format(\"Noise\", \"PESQ\")+\"\\n\")\n file.write(\"---------------------------------\\n\")\n for noise_type in pesq_dict.keys():\n pesqs = pesq_dict[noise_type]\n avg_pesq = np.mean(pesqs)\n std_pesq = np.std(pesqs)\n avg_list.append(avg_pesq)\n std_list.append(std_pesq)\n file.write(f.format(noise_type, \"%.2f +- %.2f\\n\" % (avg_pesq, std_pesq)))\n file.write(\"---------------------------------\\n\")\n file.write(f.format(\"Avg.\", \"%.2f +- %.2f\\n\" % (np.mean(avg_list), np.mean(std_list))))\n file.close()\n print(\"Average PESQ score: %s\" %np.mean(avg_list))", "title": "" }, { "docid": "48ce4aaf0614c3496854d2df90f698e4", "score": "0.5300437", "text": "def calculate_nri(temp_results, membrane_gt, synapse_gt):\n try:\n rh_logger.logger.start_process(\"test_harness\", \"evaluate\", [])\n except:\n pass\n\n rh_logger.logger.report_event('Calculating NRI information')\n\n nri = {}\n\n # Run the NRI Python script and collect the output\n rh_logger.logger.report_event('Running nri.py')\n args = [\n 'python',\n 'nri.py',\n '--segmentation-file',\n os.path.join(temp_results, 'stitched_segmentation.h5',),\n '--synapse-segmentation-file',\n os.path.join(temp_results, 'synapse_segmentation.h5',),\n '--pre-synaptic-map-file',\n os.path.join(temp_results, 'presynaptic_map.h5',),\n '--ground-truth-file',\n os.path.join(membrane_gt),\n '--ground-truth-synapse-file',\n os.path.join(synapse_gt),\n ]\n\n try:\n out = subprocess.check_output(args, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n out = ''\n rh_logger.logger.report_exception(exception=e,\n msg='nri.py failed to run')\n\n # Iterate over each line and capture the precision, recall, and nri values\n rh_logger.logger.report_event('Captured output {}'.format(out))\n for line in out.splitlines():\n if re.match(r'(Precision:)|(Recall:)|(NRI:)', line) is not None:\n words = line.strip().split()\n rh_logger.logger.report_event(\n 'Found result {} {}'.format(words[0], words[1]))\n nri[words[0].strip(':').lower()] = float(words[1])\n\n if 'precision' in nri:\n nri['precision'] = nri['precision'] / 100.0\n if 'recall' in nri:\n nri['recall'] = nri['recall'] / 100.0\n\n rh_logger.logger.report_event('Captured NRI values {}'.format(nri))\n\n return nri", "title": "" }, { "docid": "e642a7bf1cbf365729de6277db9d62e3", "score": "0.5294896", "text": "def get_eval_results(self):\n hist = self.hist\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n #cls_iu = dict(zip(range(self.n_class), iu))\n cls_iu = iu\n return {'Overall Acc': acc,\n 'Mean Acc': acc_cls,\n 'FreqW Acc': fwavacc,\n 'Mean IoU': mean_iu,}, cls_iu", "title": "" }, { "docid": "f9116b73d13c7f599444ede2e75ad4a5", "score": "0.52945185", "text": "def read_data(self):\n \n pat_data=re.compile(r'''^\\s*([\\d.e+-]+|nan)\\s+(?P<value>[\\d.e+-]+|nan)\\s+(?P<err>[\\d.e+-]+|nan)''',re.I)\n pat_end=re.compile(r'''NEW PLOT''',re.I)\n value=0\n err=0\n norm=0\n \n for i in range(0,len(self.file_list)):\n file=self.file_list[i]\n norm+=self.norm[i]\n pos=0\n while 1: #to remove\n pos-=1\n line=file.readline()\n if line=='':\n return 'stop',0\n if pat_data.search(line):\n value+=float(pat_data.search(line).group('value'))\n err+=float(pat_data.search(line).group('err'))\n break\n \n return value/norm,err/norm", "title": "" }, { "docid": "ebf86ab730623baa39b977e5bbb99d99", "score": "0.52942187", "text": "def eval(self):\n\n psnr_arr = []\n ssim_arr = []\n loss_arr = []\n denoised_img_arr = []\n \n self.denoiser.eval()\n\n with torch.no_grad():\n\n for batch_idx, (source, target) in enumerate(self.te_data_loader):\n \n source = source.cuda()\n target = target.cuda()\n \n # Denoise\n source_denoised = self.denoiser(source)\n\n # Update loss\n loss = self.loss(source_denoised, target)\n loss = loss.cpu().numpy()\n\n target = target.cpu().numpy()\n source_denoised = source_denoised.cpu().numpy()\n\n target = np.clip(target, 0, 1)\n source_denoised = np.clip(source_denoised, 0, 1)\n \n # Compute PSRN\n for i in range(source.shape[0]):\n loss_arr.append(loss)\n psnr_arr.append(self.get_PSNR(source_denoised[i,0,:,:], target[i,0,:,:]))\n ssim_arr.append(self.get_SSIM(source_denoised[i,0,:,:], target[i,0,:,:]))\n denoised_img_arr.append(source_denoised[i,0,:,:])\n\n mean_loss = np.mean(loss_arr)\n mean_psnr = np.mean(psnr_arr)\n mean_ssim = np.mean(ssim_arr)\n \n return mean_loss, mean_psnr, mean_ssim", "title": "" }, { "docid": "03a4c196edab5214f23f4ab81305e58f", "score": "0.5292498", "text": "def parse_supermatcher_result(file_name, number):\n #initialize\n identity_percentage = None\n score = None\n #open file\n with open(file_name, \"rb\") as fl:\n #get first line\n line = fl.readline()\n #initialize lists\n identity_percentage_list = []\n score_list = []\n #iterate through lines\n while (line != \"\"):\n #get identity\n if line.startswith(\"# Identity:\"):\n #this linejust converts the identity format to a proper float\n identity_percentage_list.append(eval(compile(line.split(\"# Identity:\")[1].split(\" (\")[0].strip(), '<string>', 'eval', __future__.division.compiler_flag)))\n #get score\n if line.startswith(\"# Score: \"):\n #Get score number as float\n score_list.append(float(line.replace(\"# Score: \", \"\")))\n #next line\n line = fl.readline()\n #return\n return score_list[number], identity_percentage_list[number] #return score and identity percentage of number # alignment", "title": "" }, { "docid": "0a66c2e1c0e1c6d9ff09e667a79799dd", "score": "0.52882314", "text": "def process_rps_output(filepath, evalue):\n results = []\n with open(filepath, \"r\") as fh:\n for record in NCBIXML.parse(fh):\n for align in record.alignments:\n des, d_id, name = process_align(align)\n for hsp in align.hsps:\n if hsp.expect <= evalue:\n dict = {\"HitID\": align.hit_id,\n \"DomainID\": d_id,\n \"Name\": name,\n \"Description\": des,\n \"Expect\": float(hsp.expect),\n \"QueryStart\": int(hsp.query_start),\n \"QueryEnd\": int(hsp.query_end)}\n results.append(dict)\n return results", "title": "" }, { "docid": "445bf8abfe7cf73848c12aa8051a439f", "score": "0.52824044", "text": "def evaluate(data: str, label: str) -> None:\n click.echo(f\"Starting Evaluation of the following files: \\n{data}\")\n\n # Adding individual files to a list\n data_lst = [\n pd.read_csv(file, sep='\\t', index_col=0)\n for file in data\n ]\n\n # Cast string based label argument to a list\n result = do_ss_evaluation(data_lst, list(label))\n\n click.echo('===============Evaluation Results===============')\n click.echo(json.dumps(result, indent=4, sort_keys=True).replace('{', ' ').replace('}', ' '))\n click.echo('================================================')", "title": "" }, { "docid": "accfb5bd07380ed88b15738d43a91bdf", "score": "0.52803004", "text": "def evaluate(opt):\n MIN_DEPTH = 1e-3 \n MAX_DEPTH = opt.max_depth\n\n assert sum((opt.eval_mono, opt.eval_stereo)) == 1, \"Please choose mono or stereo evaluation by setting either --eval_mono or --eval_stereo\"\n\n gt_path = os.path.join(splits_dir, opt.eval_split, \"gt_depths.npz\")\n gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1', allow_pickle=True)[\"data\"]\n\n print(\"-> Loading 16 bit predictions from {}\".format(opt.ext_disp_to_eval))\n pred_disps = []\n pred_uncerts = []\n for i in range(len(gt_depths)):\n src = cv2.imread(opt.ext_disp_to_eval+'/disp/%06d_10.png'%i,-1) / 256. / (0.58*gt_depths[i].shape[1]) * 10\n pred_disps.append(src)\n if opt.eval_uncert:\n uncert = cv2.imread(opt.ext_disp_to_eval+'/uncert/%06d_10.png'%i,-1) / 256.\n pred_uncerts.append(uncert)\n\n pred_disps = np.array(pred_disps)\n\n print(\"-> Evaluating\")\n\n if opt.eval_stereo:\n print(\" Stereo evaluation - \"\n \"disabling median scaling, scaling by {}\".format(STEREO_SCALE_FACTOR))\n opt.disable_median_scaling = True\n opt.pred_depth_scale_factor = STEREO_SCALE_FACTOR\n else:\n print(\" Mono evaluation - using median scaling\")\n\n errors = []\n \n # dictionary with accumulators for each metric\n aucs = {\"abs_rel\":[], \"rmse\":[], \"a1\":[]}\n\n bar = progressbar.ProgressBar(max_value=len(gt_depths))\n for i in range(len(gt_depths)):\n gt_depth = gt_depths[i]\n gt_height, gt_width = gt_depth.shape[:2]\n bar.update(i)\n\n pred_disp = pred_disps[i]\n pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))\n pred_depth = 1 / pred_disp\n\n if opt.eval_uncert:\n pred_uncert = pred_uncerts[i]\n pred_uncert = cv2.resize(pred_uncert, (gt_width, gt_height))\n\n if opt.eval_split == \"eigen\":\n \n # traditional eigen crop\n mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)\n\n crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,\n 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)\n crop_mask = np.zeros(mask.shape)\n crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1\n mask = np.logical_and(mask, crop_mask)\n\n else:\n \n # just mask out invalid depths\n mask = (gt_depth > 0)\n\n # apply masks\n pred_depth = pred_depth[mask]\n gt_depth = gt_depth[mask]\n if opt.eval_uncert:\n pred_uncert = pred_uncert[mask]\n\n # apply scale factor and depth cap\n pred_depth *= opt.pred_depth_scale_factor\n pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH\n pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH\n\n # get Eigen's metrics\n errors.append(compute_eigen_errors(gt_depth, pred_depth))\n if opt.eval_uncert:\n \n # get uncertainty metrics (AUSE and AURG)\n scores = compute_aucs(gt_depth, pred_depth, pred_uncert)\n\n # append AUSE and AURG to accumulators\n [aucs[m].append(scores[m]) for m in uncertainty_metrics ]\n\n # compute mean depth metrics and print\n mean_errors = np.array(errors).mean(0)\n print(\"\\n \" + (\"{:>8} | \" * 7).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\"))\n print((\"&{: 8.3f} \" * 7).format(*mean_errors.tolist()) + \"\\\\\\\\\")\n\n if opt.eval_uncert:\n \n # compute mean uncertainty metrics and print\n\t for m in uncertainty_metrics:\n\t\t aucs[m] = np.array(aucs[m]).mean(0)\n print(\"\\n \" + (\"{:>8} | \" * 6).format(\"abs_rel\", \"\", \"rmse\", \"\", \"a1\", \"\"))\n\t print(\" \" + (\"{:>8} | \" * 6).format(\"AUSE\", \"AURG\", \"AUSE\", \"AURG\", \"AUSE\", \"AURG\"))\n\t print((\"&{:8.3f} \" * 6).format(*aucs[\"abs_rel\"].tolist()+aucs[\"rmse\"].tolist()+aucs[\"a1\"].tolist()) + \"\\\\\\\\\")\n\n # see you next time!\n print(\"\\n-> Done!\")", "title": "" }, { "docid": "f00be4303a99058ab50dc726e470edd5", "score": "0.52799326", "text": "def _evals_calc(self, evnum):\n pass", "title": "" }, { "docid": "29af57918c9bce6ac28c58e7c3387635", "score": "0.52787554", "text": "def system_evaluation(self):\n\n if not self.dataset.reference_data_present:\n return ' No reference data available for dataset.'\n else:\n output = ''\n if self.params.get_path('evaluator.event_handling', 'event-dependent') == 'event-dependent':\n overall_metrics_per_event = {}\n for event_id, event_label in enumerate(self.dataset.event_labels):\n if event_label not in overall_metrics_per_event:\n overall_metrics_per_event[event_label] = {}\n\n segment_based_metric = sed_eval.sound_event.SegmentBasedMetrics(\n event_label_list=[event_label],\n time_resolution=1.0,\n )\n\n event_based_metric = sed_eval.sound_event.EventBasedMetrics(\n event_label_list=[event_label],\n evaluate_onset=True,\n evaluate_offset=False,\n t_collar=0.5,\n percentage_of_length=0.5\n )\n\n for fold in self._get_active_folds():\n result_filename = self._get_result_filename(fold=fold,\n event_label=event_label,\n path=self.params.get_path('path.recognizer'))\n\n results = MetaDataContainer().load(filename=result_filename)\n for file_id, item in enumerate(self.dataset.test(fold, event_label=event_label)):\n # Select only row which are from current file and contains only detected event\n current_file_results = []\n for result_item in results.filter(filename=self.dataset.absolute_to_relative(item['file'])):\n if 'event_label' in result_item and result_item.event_label:\n current_file_results.append(result_item)\n print('META--------------------------------------------------------------')\n meta = []\n for meta_item in self.dataset.file_meta(self.dataset.absolute_to_relative(item['file'])):\n if 'event_label' in meta_item and meta_item.event_label:\n meta.append(meta_item)\n\n segment_based_metric.evaluate(\n reference_event_list=meta,\n estimated_event_list=current_file_results\n )\n\n event_based_metric.evaluate(\n reference_event_list=meta,\n estimated_event_list=current_file_results\n )\n\n overall_metrics_per_event[event_label]['segment_based_metrics'] = segment_based_metric.results()\n overall_metrics_per_event[event_label]['event_based_metrics'] = event_based_metric.results()\n if self.params.get_path('evaluator.show_details', False):\n output += \" Event [{event}], Evaluation over {folds:d} folds\\n\".format(event=event_label,\n folds=self.dataset.fold_count)\n output += \" \\n\"\n\n output += \" Event-based metrics \\n\"\n output += event_based_metric.result_report_overall()\n output += event_based_metric.result_report_class_wise()\n\n output += \" Segment-based metrics \\n\"\n output += segment_based_metric.result_report_overall()\n output += segment_based_metric.result_report_class_wise()\n\n overall_metrics_per_event = DottedDict(overall_metrics_per_event)\n\n output += \" \\n\"\n output += \" Overall metrics \\n\"\n output += \" =============== \\n\"\n output += \" {event_label:<17s} | {event_based_fscore:7s} | {event_based_er:7s} | {segment_based_fscore:7s} | {segment_based_er:7s} |\\n\".format(\n event_label='Event label',\n segment_based_fscore='Seg. F1',\n segment_based_er='Seg. ER',\n event_based_fscore='Evt. F1',\n event_based_er='Evt. ER',\n )\n output += \" {event_label:<17s} + {event_based_fscore:7s} + {event_based_er:7s} + {segment_based_fscore:7s} + {segment_based_er:7s} +\\n\".format(\n event_label='-'*17,\n segment_based_fscore='-'*7,\n segment_based_er='-' * 7,\n event_based_fscore='-' * 7,\n event_based_er='-' * 7,\n )\n avg = {\n 'segment_based_fscore': [],\n 'segment_based_er': [],\n 'event_based_fscore': [],\n 'event_based_er': [],\n }\n for event_id, event_label in enumerate(self.dataset.event_labels):\n output += \" {event_label:<17s} | {event_based_fscore:<7s} | {event_based_er:<7s} | {segment_based_fscore:<7s} | {segment_based_er:<7s} |\\n\".format(\n event_label=event_label,\n segment_based_fscore=\"{:4.2f}\".format(overall_metrics_per_event.get_path(event_label+'.segment_based_metrics.overall.f_measure.f_measure')*100),\n segment_based_er=\"{:4.2f}\".format(overall_metrics_per_event.get_path(event_label+'.segment_based_metrics.overall.error_rate.error_rate')),\n event_based_fscore=\"{:4.2f}\".format(overall_metrics_per_event.get_path(event_label + '.event_based_metrics.overall.f_measure.f_measure') * 100),\n event_based_er=\"{:4.2f}\".format(overall_metrics_per_event.get_path(event_label + '.event_based_metrics.overall.error_rate.error_rate')),\n )\n\n avg['segment_based_fscore'].append(overall_metrics_per_event.get_path(event_label+'.segment_based_metrics.overall.f_measure.f_measure')*100)\n avg['segment_based_er'].append(overall_metrics_per_event.get_path(event_label+'.segment_based_metrics.overall.error_rate.error_rate'))\n avg['event_based_fscore'].append(overall_metrics_per_event.get_path(event_label + '.event_based_metrics.overall.f_measure.f_measure') * 100)\n avg['event_based_er'].append(overall_metrics_per_event.get_path(event_label + '.event_based_metrics.overall.error_rate.error_rate'))\n\n output += \" {event_label:<17s} + {event_based_fscore:7s} + {event_based_er:7s} + {segment_based_fscore:7s} + {segment_based_er:7s} +\\n\".format(\n event_label='-' * 17,\n segment_based_fscore='-' * 7,\n segment_based_er='-' * 7,\n event_based_fscore='-' * 7,\n event_based_er='-' * 7,\n )\n output += \" {event_label:<17s} | {event_based_fscore:<7s} | {event_based_er:<7s} | {segment_based_fscore:<7s} | {segment_based_er:<7s} |\\n\".format(\n event_label='Average',\n segment_based_fscore=\"{:4.2f}\".format(numpy.mean(avg['segment_based_fscore'])),\n segment_based_er=\"{:4.2f}\".format(numpy.mean(avg['segment_based_er'])),\n event_based_fscore=\"{:4.2f}\".format(numpy.mean(avg['event_based_fscore'])),\n event_based_er=\"{:4.2f}\".format(numpy.mean(avg['event_based_er'])),\n )\n\n elif self.params.get_path('evaluator.event_handling') == 'event-independent':\n message = '{name}: Event handling mode not implemented yet [{mode}]'.format(\n name=self.__class__.__name__,\n mode=self.params.get_path('evaluator.event_handling')\n )\n\n self.logger.exception(message)\n raise ValueError(message)\n\n else:\n message = '{name}: Unknown event handling mode [{mode}]'.format(\n name=self.__class__.__name__,\n mode=self.params.get_path('evaluator.event_handling')\n )\n\n self.logger.exception(message)\n raise ValueError(message)\n\n if self.params.get_path('evaluator.saving.enable'):\n filename = self.params.get_path('evaluator.saving.filename').format(\n dataset_name=self.dataset.storage_name,\n parameter_set=self.params['active_set'],\n parameter_hash=self.params['_hash'],\n )\n output_file = os.path.join(self.params.get_path('path.evaluator'), filename)\n\n output_data = {\n 'overall_metrics_per_event': overall_metrics_per_event,\n 'average': {\n 'segment_based_fscore': numpy.mean(avg['segment_based_fscore']),\n 'segment_based_er': numpy.mean(avg['segment_based_er']),\n 'event_based_fscore': numpy.mean(avg['event_based_fscore']),\n 'event_based_er': numpy.mean(avg['event_based_er']),\n },\n 'parameters': dict(self.params)\n }\n ParameterFile(output_data, filename=output_file).save()\n\n return output", "title": "" }, { "docid": "761af31ba51fb51aa2db179bd7edc950", "score": "0.5273058", "text": "def values():", "title": "" }, { "docid": "761af31ba51fb51aa2db179bd7edc950", "score": "0.5273058", "text": "def values():", "title": "" }, { "docid": "6f55a78257961af4afc21f2a194f59b9", "score": "0.527193", "text": "def list_vals(self):\n lsout = \"\"\"\nParameters:\nname is the name of experiment\nsetup is to describe parameters of diffractometer \nobserved_data is the experimental data\nflag_chi2_up, flag_chi2_down are flags for refinement: \"up down\"\nflag_chi2_sum, flag_chi2_diff are flags for refinement: \"sum diff\"\nfile_out is the file name of experimental and model data (basename)\nfile_dir is the working directory\nexcl_tth_min is the list of excluded ttheta from up, down and sum (difference is taken into account), minimal\nexcl_tth_max is the list of excluded ttheta from up, down and sum (difference is taken into account), maximal\n \"\"\"\n print(lsout)", "title": "" } ]
85832575005847dd2e7a417403fc5077
initialize some example perfdata values
[ { "docid": "2c13da305dee4caf21e277503dbac08c", "score": "0.759437", "text": "def setUp(self):\n\n self.perfdata = []\n self.perfdata.append(PERF1)\n self.perfdata.append(PERF2)\n self.perfdata.append(PERF3)\n self.perfdata.append(PERF4)\n\n self.valid_timestamps = [1359642046, 1359642046, 1359642046, 1359642046, 1359642046, 1359642046, 1359642046, 1359642046, 1359642046, 1359642046]\n self.valid_values = [0.00, 57.72, 0.000562, 0, 431.0, 0.074175, 8394629120.0, 6872485888.0, 1522143232.0, 923508736.0]", "title": "" } ]
[ { "docid": "99aad547ec6852fbb8815e8d21a811d8", "score": "0.70477575", "text": "def bench_initialize_data():\n return loaddata(ns, name, data, (0,0,0))", "title": "" }, { "docid": "102869a1a4cb6e2df119fa3c6ca121d8", "score": "0.6749036", "text": "def setUp(self): \n self._gold_standard_data = gold_standard_data\n self._blank_mutation_data =\\\n {'3476160':{},'14500716':{},'12206666':{},'11327835':{}} \n self._pc = PerformanceCalculator(self._gold_standard_data)", "title": "" }, { "docid": "8e9cc311ddade5046f640703df52b90c", "score": "0.65359545", "text": "def init(self, all_data):", "title": "" }, { "docid": "7fce4205c43f67f4232003c37cb7c5de", "score": "0.6454939", "text": "def setUp(self):\n self._pcr = PerformanceCalculatorResult(42,1,0,5)", "title": "" }, { "docid": "9ef4ed4212f822f4b78823c94a4a9635", "score": "0.6443156", "text": "def init_data(self):", "title": "" }, { "docid": "d0d144b136aeb9154400b68a115c523a", "score": "0.6387921", "text": "def _initialize(self):\n self._load_data()\n self._compute_loss_and_other_metrics()", "title": "" }, { "docid": "87f6e2f2dc3bbefc917bdc52ae2ba406", "score": "0.63584363", "text": "def _initialize_instance(self, X, y, X_test, y_test, num_test, \n X_tot=None, y_tot=None,\n sources=None, sample_weight=None):\n data_dir = os.path.join(self.directory, 'data.pkl')\n if not os.path.exists(data_dir):\n self._save_dataset(data_dir, X, y, X_test, y_test, num_test,\n X_tot, y_tot, sources, sample_weight) \n self._load_dataset(data_dir)\n loo_dir = os.path.join(self.directory, 'loo.npy')\n self.vals_loo = None\n if os.path.exists(loo_dir):\n self.vals_loo = np.load(loo_dir)\n self.experiment_number = self._find_experiment_number(self.directory)\n self._create_results_placeholder(\n self.experiment_number, len(self.X), len(self.sources))", "title": "" }, { "docid": "dfb49767b68c7f2c60f80aa547b30023", "score": "0.6346264", "text": "def init_dataset():\n data.clean()\n data.extract_from_api()\n data.desc_sol_data()\n data.entity_data()\n data.intent_data()\n data.code_entity()", "title": "" }, { "docid": "9bb7a87e0d990b35798edc377ee409c9", "score": "0.6333304", "text": "def _initialize_data(self):\n self._pressure1 = 2.0\n self._pressure2 = 3.0\n self._error1 = 0\n self._error2 = 0\n self._units = 0", "title": "" }, { "docid": "b5649b3d101aa03f4b2846e4614e1fc5", "score": "0.62738943", "text": "def _initialize_data(self):\n self.setpoint = 20\n self.pipe_temperature = 25.1\n self.capacitor_bank_temperature = 30.3\n self.fet_temperature = 35.8\n\n self.p, self.i, self.d = 0, 0, 0\n self.sample_time = 100\n\n self.direction_heating = True\n\n self.pid_lower_limit, self.pid_upper_limit = 0, 0\n\n self.pid_mode_automatic = True\n self.running = True\n\n self.psu_voltage, self.psu_current, self.output = 0, 0, 0\n\n self.remote_mode = True\n self.power_supply_on = True\n self.sample_area_led_on = True\n self.hf_on = False\n\n self.psu_overtemp, self.psu_overvolt = False, False\n self.cooling_water_flow = 100\n\n self.sample_holder_material = SampleHolderMaterials.ALUMINIUM\n\n self.thermocouple_1_fault, self.thermocouple_2_fault = 0, 0", "title": "" }, { "docid": "038acd00ba15fc3192838e534ae3b8e6", "score": "0.6221233", "text": "def __init__(self) -> None:\n self.metrics = {}\n self.current = None\n self.run = None", "title": "" }, { "docid": "fe82ede1de3449dc98d38eb858ca4b28", "score": "0.62194026", "text": "def _init_data_struct(self):", "title": "" }, { "docid": "41590c393ac26b34ba8929c78e21c0b4", "score": "0.6211227", "text": "def __init__(self):\n self.dataset = {}\n self.nums = []", "title": "" }, { "docid": "a0ae469fe84f6000f2a0fc28040a543c", "score": "0.6191917", "text": "def _AddData(self):\n testing_common.AddTests(['ChromiumPerf'], ['linux'],\n {'page_cycler': {\n 'warm': {\n 'cnn': {},\n }\n }})\n test_path = 'ChromiumPerf/linux/page_cycler/warm/cnn'\n test = utils.TestKey(test_path).get()\n test.improvement_direction = anomaly.UP\n test.put()\n\n now = datetime.datetime.now()\n last_week = now - datetime.timedelta(days=7)\n rows = dict([(i * 100, {\n 'value': i * 1000,\n 'a_whatever': 'blah',\n 'r_v8': '1234a',\n 'timestamp': now if i > 5 else last_week,\n 'error': 3.3232\n }) for i in range(1, 10)])\n rows[100]['r_not_every_row'] = 12345\n testing_common.AddRows('ChromiumPerf/linux/page_cycler/warm/cnn', rows)", "title": "" }, { "docid": "c7d39c3c6564962b95738b4f708fbba4", "score": "0.6168307", "text": "def _AddSampleData(self):\n testing_common.AddDataToMockDataStore(\n ['master'], ['linux-release', 'android-motoe'],\n {\n 'page_cycler.moz': {'cold_times': {'page_load_time': {}}},\n 'cc_perftests': {'foo': {'bar': {}}},\n })\n return [\n # 0: 200% regression in page_cycler.moz on linux, 201:300\n self._AddAnomaly(\n 'master/linux-release/page_cycler.moz/cold_times/page_load_time',\n start_revision=100201, end_revision=100300,\n median_before_anomaly=50, median_after_anomaly=150,\n bug_id=1234, is_improvement=False),\n # 1: 100% regression in page_cycler.moz on android, 221:320\n self._AddAnomaly(\n 'master/android-motoe/page_cycler.moz/cold_times/page_load_time',\n start_revision=100221, end_revision=100320,\n median_before_anomaly=50, median_after_anomaly=100,\n bug_id=1234, is_improvement=False),\n # 2: 50% regression in cc_perftests on linux, 181:280\n self._AddAnomaly(\n 'master/linux-release/cc_perftests/foo/bar',\n start_revision=100181, end_revision=100280,\n median_before_anomaly=50, median_after_anomaly=75,\n bug_id=2000, is_improvement=False),\n ]", "title": "" }, { "docid": "d886a3acdfb59f38745cc26d5ea58da5", "score": "0.6126197", "text": "def init(add_counter_var, add_performance_counters_var, print_decision_report_var):\n global add_counter, add_performance_counter, print_decision_report\n add_counter = add_counter_var\n add_performance_counter = add_performance_counters_var\n print_decision_report = print_decision_report_var", "title": "" }, { "docid": "9ca5cf440714cd3e4c39da480b16c0b7", "score": "0.6121355", "text": "def setUp(self):\n self.data = {'time': 1}", "title": "" }, { "docid": "5426d3dd1946e092308f8d73457789cb", "score": "0.61053246", "text": "def init_summary_data(in_params, num_images, summary_data):\n summary_data[_('Parameters')] = in_params\n summary_data[_('Number of images')] = num_images\n summary_data[Control.pixel_size.value] = []\n summary_data[Control.bands_len.value] = []\n summary_data[Control.dig_level.value] = []\n summary_data[Control.rad_balance.value] = []\n summary_data[Control.srid.value] = []\n summary_data[Control.nodata.value] = []", "title": "" }, { "docid": "594f94798d7799bb73b6ad2360a9fe64", "score": "0.6104573", "text": "def initialize(\n self,\n t: int,\n get_sample: Callable[[], Sample],\n x_0: dict,\n total_sims: int,\n ):", "title": "" }, { "docid": "c2d830f8d21865980150226c316959dd", "score": "0.6086474", "text": "def __init__(self):\n self.stats = {\n \"pool\": 0,\n \"container\": 0,\n \"object\": 0,\n \"dkey\": 0,\n \"akey\": 0,\n \"array\": 0,\n \"single_value\": 0,\n \"user_value\": 0,\n \"user_meta\": 0,\n \"total_meta\": 0,\n \"nvme_total\": 0,\n \"total\": 0\n }", "title": "" }, { "docid": "c4ebf4f8c32f704122c573289c05525c", "score": "0.60579354", "text": "def dataInit(self,data):\n self.data = data\n self.distMeans()\n self.nksteps(300)", "title": "" }, { "docid": "12e70bf2904ec248f76882e913cfee09", "score": "0.60577834", "text": "def make_test_data(self):\r\n pass", "title": "" }, { "docid": "a427e55e00a08e926b77658c07a2ba35", "score": "0.6030019", "text": "def setUp(self):\n self.dataset = example.fakedataset()\n self.numrows = len(self.dataset)", "title": "" }, { "docid": "39dce96123dbf9dcf36ed9831dddbc2f", "score": "0.60256946", "text": "def test_init():\n\n game = \"MountainCar-v0\"\n memory_size = 5\n dataset = Dataset(game, memory_size)\n\n assert len(dataset.memory) == 0\n assert dataset.memory_size == memory_size\n assert dataset.position == 0\n assert len(dataset.state_space) == 2\n assert dataset.reward_space == [-1, 0]", "title": "" }, { "docid": "6df2c88b8268a1fcea8feb78004656d9", "score": "0.60211134", "text": "def __init__(self,dat):\n self.dat = dat\n self.add_dt_obj()\n self.add_mean_val()\n self.add_track_span()", "title": "" }, { "docid": "7379839bd17df1049a2dc89243e2d9da", "score": "0.5997428", "text": "def __init__(self):\n self.performance_pf_dict = {\n Performance.PERFORMANCE_PF_COL_ITERATION: [],\n Performance.PERFORMANCE_PF_COL_PATHFINDING_ITERATION: [],\n Performance.PERFORMANCE_PF_COL_PERSON_ID: [],\n Performance.PERFORMANCE_PF_COL_PERSON_TRIP_ID: [],\n Performance.PERFORMANCE_PF_COL_PROCESS_NUM: [],\n Performance.PERFORMANCE_PF_COL_PATHFINDING_STATUS: [],\n Performance.PERFORMANCE_PF_COL_NUM_LABELED_STOPS: [],\n Performance.PERFORMANCE_PF_COL_TRACED: [],\n Performance.PERFORMANCE_PF_COL_LABEL_ITERATIONS: [],\n Performance.PERFORMANCE_PF_COL_MAX_STOP_PROCESS_COUNT: [],\n Performance.PERFORMANCE_PF_COL_TIME_LABELING: [],\n Performance.PERFORMANCE_PF_COL_TIME_LABELING_MS: [],\n Performance.PERFORMANCE_PF_COL_TIME_ENUMERATING: [],\n Performance.PERFORMANCE_PF_COL_TIME_ENUMERATING_MS: [],\n Performance.PERFORMANCE_PF_COL_WORKING_SET_BYTES: [],\n Performance.PERFORMANCE_PF_COL_PRIVATE_USAGE_BYTES: [],\n Performance.PERFORMANCE_PF_COL_MEM_TIMESTAMP: []\n }\n\n # maps PERFORMANCE_COLUMN* to arrays of values\n self.step_record_dict = {\n Performance.PERFORMANCE_COL_STEP_NAME: [],\n Performance.PERFORMANCE_COL_ITERATION: [],\n Performance.PERFORMANCE_COL_PATHFINDING_ITERATION: [],\n Performance.PERFORMANCE_COL_SIMULATION_ITERATION: [],\n Performance.PERFORMANCE_COL_START_TIME: [],\n Performance.PERFORMANCE_COL_END_TIME: [],\n # Performance.PERFORMANCE_COL_STEP_DURATION :[], # do this at the end\n Performance.PERFORMANCE_COL_START_MEM_MB: [],\n Performance.PERFORMANCE_COL_END_MEM_MB: []\n }\n\n # will map (iteration, pathfinding_iteration, simulation_iteration) => (step_name, start time (a datetime.datetime), starting mem usage in bytes)\n self.steps = {}", "title": "" }, { "docid": "d9edab5bb0cab5741d94ae1fb3475331", "score": "0.59669155", "text": "def test_data_collector_for_training_init(ms2library):\n DataCollectorForTraining(ms2library)", "title": "" }, { "docid": "03fb85bc6146088baf931ccb3c74180f", "score": "0.5946913", "text": "def init_dataset(self):\n return", "title": "" }, { "docid": "b06f81459b53ec781e8328a2286be768", "score": "0.593088", "text": "def __init__(self): \n self._learning_rate = [-6, 0]\n self._batch_size = [35, 512]\n self._number_filters = [4, 10]\n self.rs_performance = np.zeros(shape = (RUNS, ITERATIONS))\n self.rs_runtime = np.zeros(shape = (RUNS, ITERATIONS))\n self.bo_performance = []\n self.bo_runtime = []", "title": "" }, { "docid": "fdd76aed2e17f7412e02696b5d013e0f", "score": "0.591445", "text": "def initialize(self, runInfo, inputs, initDict):\n #construct a list of all the parameters that have requested values into self.allUsedParams\n self.allUsedParams = set()\n for metricName in self.scalarVals + self.vectorVals:\n if metricName in self.toDo.keys():\n for entry in self.toDo[metricName]:\n self.allUsedParams.update(entry['targets'])\n try:\n self.allUsedParams.update(entry['features'])\n except KeyError:\n pass\n\n #for backward compatibility, compile the full list of parameters used in Basic Statistics calculations\n self.parameters['targets'] = list(self.allUsedParams)\n PostProcessor.initialize(self, runInfo, inputs, initDict)", "title": "" }, { "docid": "950c8f16618e574eef0888179a9d120c", "score": "0.5913513", "text": "def __init__(self, dataset_mean=None, dataset_std=None):\n self.__dataset_mean = dataset_mean\n self.__dataset_std = dataset_std", "title": "" }, { "docid": "3cceac5358bafc1077f9e8ded7a8b987", "score": "0.59101325", "text": "def setup(self):\n # Load a test instrument\n self.testInst = pysat.Instrument()\n self.testInst.data = pds.DataFrame({'f107': np.linspace(70, 200, 160)},\n index=[pysat.datetime(2009, 1, 1)\n + pds.DateOffset(days=i)\n for i in range(160)])", "title": "" }, { "docid": "28026935b9658ffd1cc2ffdfee9343ad", "score": "0.59028393", "text": "def train_initialize(self) -> None:", "title": "" }, { "docid": "90a73a0a181289008c032d1c0140f472", "score": "0.59016335", "text": "def __init__(self, init_mean, init_var, measure_var, motion_var):\n self.mean = init_mean\n self.var = init_var\n self.measure_var = measure_var\n self.motion_var = motion_var", "title": "" }, { "docid": "fc973a47fab357e34f4fdf3360c0d580", "score": "0.5880439", "text": "def __init__(self,data):\n self.my_data=data\n self.supportThreshold=0.25", "title": "" }, { "docid": "5939a51a754b68df52406daa17e0309e", "score": "0.5875447", "text": "def init_data( self ):\n self.raw_i = np.array( [], dtype=np.uint32 )\n self.t_i = np.array( [], dtype=np.float64 )\n self.y_i = np.array( [], dtype=np.float64 )\n\n self.iDiscList = []", "title": "" }, { "docid": "31fd74de7758a0a090a2b90d354084e8", "score": "0.5870857", "text": "def _init_data(self):\n self.hist_macro = self._get_hist_macro()\n self.price = m.data\n self.rf = m._get_rf_data(annulize_factor=12)\n self.scen = self._get_scenario_data()\n self.monthly_return = self._get_portfolio_data()\n self.monthly_premium = self._get_monthly_premium()\n self.monthly_fama = self._get_monthly_fama_factors()\n self.quarterly_fama = self._get_quarterly_fama_factors()\n self.macro_corr = self._get_macro_corr()\n self.fama_model = self._get_fama_model()\n self.fama_macromodel = self._get_macrofama_model()\n self.expfama = self._get_fama_forecast()\n self.expreturn = self._get_return_forecast()", "title": "" }, { "docid": "8a17f5d5ca88b096a08b8f385a55531c", "score": "0.586826", "text": "def __init__(self):\n self.train_df = None\n self.test_df = None\n self.cat_cols = []\n self.ord_cols = []\n self.cols = []\n self.target_label = []\n self.remove_outliers = True\n self.replace = False\n self.quartiles = {}\n self.first_quartile = 0.05\n self.third_quartile = 0.95", "title": "" }, { "docid": "76f6ad8aa4c16c906688adee90524fc7", "score": "0.5867202", "text": "def data_setup(self):\n pass", "title": "" }, { "docid": "347db3e8740e64da67f9fb299b7183b1", "score": "0.5857405", "text": "def __init__(self):\n self.pids = dict()\n self.time = list()\n self.total_cpu_load = list()", "title": "" }, { "docid": "ccb6e4a6d4ea1d8b29cf1b1eb7d2bf69", "score": "0.584629", "text": "def _initialize_data(self):\n self.temperature = 0\n self.heater_range = 0\n self.heater_power = 0\n self.sensor_resistance = 0\n self.control_mode = 0\n\n self.p = 0\n self.i = 0\n self.d = 0\n\n self.connected = True", "title": "" }, { "docid": "268288698285a30c26adaec749618b05", "score": "0.58262604", "text": "def __init__(self, auto=False):\n self.auto = auto\n self.mean = 0\n self.var = 0\n self.count = 0", "title": "" }, { "docid": "9c1b05ea2cb41bc07a517a76eae58be2", "score": "0.58201766", "text": "def __init__(__self__, *,\n completed_datapoint_count: float,\n incremental_dataset_last_refresh_time: str,\n skipped_datapoint_count: float,\n total_datapoint_count: float):\n pulumi.set(__self__, \"completed_datapoint_count\", completed_datapoint_count)\n pulumi.set(__self__, \"incremental_dataset_last_refresh_time\", incremental_dataset_last_refresh_time)\n pulumi.set(__self__, \"skipped_datapoint_count\", skipped_datapoint_count)\n pulumi.set(__self__, \"total_datapoint_count\", total_datapoint_count)", "title": "" }, { "docid": "d6282d42b0abb0ec14e073d74683c00c", "score": "0.58165514", "text": "def __init__(self, samples):\n self.inputs = dict(red=[], green=[], blue=[])\n self.expected_outputs = dict(y=[], cb=[], cr=[])\n self.actual_outputs = dict(y=[], cb=[], cr=[])\n self.samples = samples", "title": "" }, { "docid": "07c4b0b31d7eb02aa0159f5b447458dd", "score": "0.58087003", "text": "def prepare_datatesting(self):\n self.array_features_totest, self.array_labels_totest , self.df_totest = ext.prepare_data(self.df_decks_totest, self.features)\n \n self.next(self.prepare_mlmagic)", "title": "" }, { "docid": "4be4cbe4d0eb05012a163d0369d3407f", "score": "0.58057284", "text": "def __init__(self, initCondition, testDemands, sysParams, unit):\n self.initCond = initCondition\n self.testDemands = testDemands\n self.sysParams = sysParams\n self.ctrlParams = unit.genes\n self.dt = 0.001\n self.eqTimeLimit = 10\n self.eqCounterLimit = 2000\n self.timeHist = []", "title": "" }, { "docid": "16e8f9f681d166dcf6c57ddd64cf5320", "score": "0.58051866", "text": "def __init__(self):\n self.train_set = None\n self.train_label = None\n self.test_set = None\n self.test_label = None\n self.pred_prob = None", "title": "" }, { "docid": "21d27ee20f843c0e567de1f13f683e26", "score": "0.58003294", "text": "def test_initialize():\n data_fetcher.DataFetcher()", "title": "" }, { "docid": "516aa9bb67abe8d87d2fc2c69a9094f6", "score": "0.5800128", "text": "def setUp(self):\n\n self.flag_verbose = 1\n \n self.values = [\n [1], # int\n [0.2], # float\n [\"fiets\"], # string\n [(1,2)], # tuple\n [[1]], # short list\n [[1,2]], # longer list\n [numpy.array([1,2])], # numpy.ndarray\n [{\"a\": 1, \"b\": 2}, bool] # dict\n ]", "title": "" }, { "docid": "91c2c616a4e74e4f3aebe011a91643ae", "score": "0.5789243", "text": "def bench_get_initial_data():\n return checkdata(ns, name, data, (0,0,0))", "title": "" }, { "docid": "a29addf3d6a8afa5687a4ea3cc4ecb2f", "score": "0.5788446", "text": "def setup(self):\n seed = 12345\n np.random.seed(seed)\n self.data_pn = self.create_pn_dataset()\n self.data_sample = self.create_sample_dataset(n = 10000)\n self.data_small = self.create_small_dataset()\n self.data_groups = self.create_another_dataset(n = 20)\n self.data_categorical = self.create_categorical_dataset()\n self.data_mixed = self.create_mixed_datatypes_dataset()", "title": "" }, { "docid": "6fc78f351b7a96c96abe849987ee6b39", "score": "0.57769454", "text": "def __init__(self):\n self.data_struct = defaultdict()\n self.counter_struct = defaultdict(set)\n self.max_count = 0\n self.min_count = 0", "title": "" }, { "docid": "98e26e0843a9446ad1720656b9a248df", "score": "0.57679677", "text": "def init_default(self):\n self.dataRangeHi = False\n self.dataRangeHiVal = \"N/A\"\n self.dataRangeLo = False\n self.dataRangeLoVal = \"N/A\"\n self.dataRange = False\n self.divideByZero = False\n self.__val = 0", "title": "" }, { "docid": "a4b0b51abe9b06ef3cbdbe3022c4b9c1", "score": "0.5755136", "text": "def __init__(self):\n self.counts = dict()\n self.context_counts = dict()\n self.model = dict()\n self.H = 0\n self.W = 0", "title": "" }, { "docid": "6561caf8be798c5cea025adcbf584518", "score": "0.5754884", "text": "def setUp(self):\n cat_file = os.path.join(BASE_DATA_PATH, \"synthetic_test_cat1.csv\")\n raw_data = np.genfromtxt(cat_file, delimiter=\",\")\n neq = raw_data.shape[0]\n self.catalogue = Catalogue.make_from_dict({\n \"eventID\": raw_data[:, 0].astype(int),\n \"year\": raw_data[:, 1].astype(int),\n \"dtime\": raw_data[:, 2],\n \"longitude\": raw_data[:, 3],\n \"latitude\": raw_data[:, 4],\n \"magnitude\": raw_data[:, 5],\n \"depth\": raw_data[:, 6]})\n self.config = {\"reference_magnitude\": 3.0}\n self.completeness = np.array([[1990., 3.0],\n [1975., 4.0],\n [1960., 5.0],\n [1930., 6.0],\n [1910., 7.0]])", "title": "" }, { "docid": "aeadc3b659f86890a2aa4fd50668d005", "score": "0.5753419", "text": "def __init__(self):\n self.data = [None] * 10000", "title": "" }, { "docid": "9c70d72b84dd2b1eeb8397f9ff56ff8a", "score": "0.5739576", "text": "def __init__(self):\n\t\t# all generated data will be stored here with their corresponding w\n\t\t# self.data_l = list() \n\t\tself.gen_w()", "title": "" }, { "docid": "eb989b2b8999dc29fec43c6d327110a3", "score": "0.5739386", "text": "def __init__(self):\n\t\tself.data = pd.Series()\n\t\tself.label = pd.Series()", "title": "" }, { "docid": "e3de68e7deeb3a70aab62466b32bbf23", "score": "0.5736789", "text": "def init_data(self, step=0):\n if self.is_state_or_parameter():\n self.set_constant(val=0.0, step=step)", "title": "" }, { "docid": "21a3425d093236b71990a40df82c9825", "score": "0.573573", "text": "def precompute_data():\n # basecmd = 'python -m wbia.expt.experiment_printres\n # --exec-print_latexsum --rank-lt-list=1,5,10,100 '\n varydict = ut.odict(\n [\n (\n 'preload_flags',\n [\n # '--preload-chip',\n # '--preload-feat',\n # '--preload-feeatweight',\n '--preload',\n '--preindex',\n ],\n ),\n ('dbname', get_dbnames()),\n (\n 'acfg_name',\n ['default:qaids=allgt,species=primary,view=primary,is_known=True'],\n ),\n ('cfg_name', ['default', 'candidacy_baseline', 'candidacy_invariance']),\n ]\n )\n return (varydict, 'preload', 'preload')", "title": "" }, { "docid": "a0385196a65bb2c70c45a7c91815f2f6", "score": "0.5732801", "text": "def _basic_init(self):\n # below lines are just showcase, this function must be rewrite by child classes\n # self.name = 'unknown'\n # self.rate = 0\n # self.price = pd.DataFrame(data={'date':[],'netvalue':[],'comment':[]})\n raise NotImplementedError", "title": "" }, { "docid": "cdbbe842a6b265cd3296f660ec3730f6", "score": "0.5726168", "text": "def setUp(self):\n\t\tnp.random.seed(0)\n\t\tself.data = Experiment('B', *generate_random_data())\n\t\t# Create time column. TODO: Do this nicer\n\t\tself.data.kpis['time_since_treatment'] = \\\n\t\t\tself.data.features['treatment_start_time']\n\t\t# Make time part of index\n\t\tself.data.kpis.set_index('time_since_treatment', append=True, inplace=True)", "title": "" }, { "docid": "6e5e82cbcf307772a3900eb846ded06c", "score": "0.5724012", "text": "def _initialize_score_data(self):\n self._initialize_nodes_from_colors_from_nodes()\n self._initialize_scores_from_colors_from_nodes()\n self._initialize_scores_from_nodes()", "title": "" }, { "docid": "751714b377a69d87b48f6ad3bfe8f593", "score": "0.5712418", "text": "def __init__(self, dataset, block):\n super().__init__(dataset, block)\n\n # these are results for display in the Tab\n self.time_all = None\n self.freq_all = None\n\n self.reset_results_arrays()", "title": "" }, { "docid": "0cb31d9c1325a1de856dff50d6c76175", "score": "0.5708544", "text": "def __init__(self, data, n):\n\t\tself.n = n\n\t\tself.tokens = Counter()\n\t\tself.ng_counts = Counter()\n\t\tself.ctxt_counts = Counter()\n\t\tself.prob = {}\n\t\tfor tune in data:\n\t\t\tself._addTune(tune, self.tokens, self.ng_counts, self.ctxt_counts)", "title": "" }, { "docid": "7fa7b016210ef3e098a82034c85e9dfd", "score": "0.5705584", "text": "def _ts_data_default(self):\n return {\"index\" : arange(100), \"ts1\" : randn(100), \"ts2\" : randn(100)}", "title": "" }, { "docid": "40085014dc1710aa371b83c90205cf6a", "score": "0.5700543", "text": "def setup(self):\n # Load a test instrument\n self.testInst = pysat.Instrument()\n self.testInst.data = pds.DataFrame({'Kp': np.arange(0, 4, 1.0/3.0),\n 'ap_nan': np.full(shape=12, \\\n fill_value=np.nan),\n 'ap_inf': np.full(shape=12, \\\n fill_value=np.inf)},\n index=[pysat.datetime(2009, 1, 1)\n + pds.DateOffset(hours=3*i)\n for i in range(12)])\n self.testInst.meta = pysat.Meta()\n self.testInst.meta.__setitem__('Kp', {self.testInst.meta.fill_label:\n np.nan})\n self.testInst.meta.__setitem__('ap_nan', {self.testInst.meta.fill_label:\n np.nan})\n self.testInst.meta.__setitem__('ap_inv', {self.testInst.meta.fill_label:\n np.inf})\n\n # Load a test Metadata\n self.testMeta = pysat.Meta()", "title": "" }, { "docid": "d3c04d31178ac759cd923408286c07db", "score": "0.5688538", "text": "def __init__(self,\n data=None,\n _skip_initialize=False):\n if _skip_initialize: return\n self.data = [ 0 for dim0 in range(16) ] if data is None else data", "title": "" }, { "docid": "72bc76100a327776dfb9a53a0ebeb59a", "score": "0.56882083", "text": "def setUp(self):\n self.s = TestCountGet.s\n self.c = TestCountGet.c\n self.u = TestCountGet.u\n self.p1 = TestCountGet.p1\n self.p2 = TestCountGet.p2\n self.a1 = TestCountGet.a1\n self.a2 = TestCountGet.a2\n self.a3 = TestCountGet.a3", "title": "" }, { "docid": "da1723f82362204b11e4fc641ea4720a", "score": "0.5678576", "text": "def init_data_map(self):\n\n for i in range(1, len(self.sequence_list[0])+1):\n prob = dict()\n freq = dict()\n for aa in self.aas:\n prob[aa] = 0\n freq[aa] = 0\n self.stats.append(prob)\n self.freq.append(freq)", "title": "" }, { "docid": "a3bfbf00a1e695b79fcc3736aa7ae549", "score": "0.5678504", "text": "def __init__(self):\n self.profile_params = ['v0', 'w_obs_20', 'w_obs_50', 'w_obs_peak', 'psi_obs_max', 'psi_obs_0']", "title": "" }, { "docid": "24fd6e30eec952971ac18802bbab140b", "score": "0.56775504", "text": "def initialize(self, runInfo, inputs, initDict):\n super().initialize(runInfo, inputs, initDict)", "title": "" }, { "docid": "db8bd664b6b5d04b41039ab2ba9d0289", "score": "0.5674166", "text": "def __init__(self, outputs: Dict[str, np.ndarray], times: BenchmarkResult):\n self.outputs = outputs\n self.times = times", "title": "" }, { "docid": "a99a247cb91fb2a92d711973b9c83161", "score": "0.5670362", "text": "def __init__(self):\n super().__init__()\n self.dataDict = {} # Dictionary of all the input data, keyed by the name\n self.compareGroups = [] # List of each of the groups that will be compared\n # self.dataPulls = [] #List of data references that will be used\n # self.referenceData = [] #List of reference (experimental) data\n self.methodInfo = {} # Information on what stuff to do.\n self.fZStats = False\n self.interpolation = \"quadratic\"\n # assembler objects to be requested\n self.addAssemblerObject('Distribution', InputData.Quantity.zero_to_infinity)", "title": "" }, { "docid": "0dcb884f7e340ac40e7924883360032a", "score": "0.5667037", "text": "def setUp(self):\n self.batch_size = 100\n self.mnist = datasets.mnist(self.batch_size)", "title": "" }, { "docid": "8d36009a6b8151608fbd0230405f3231", "score": "0.56658715", "text": "def setUpTestData(cls):\n data_gen.run()", "title": "" }, { "docid": "8d36009a6b8151608fbd0230405f3231", "score": "0.56658715", "text": "def setUpTestData(cls):\n data_gen.run()", "title": "" }, { "docid": "fb67594874b310881de04f8ebdab5ed5", "score": "0.5663617", "text": "def __init__(self, benchmark_names_and_iterations, label_names,\n read_perf_report, event_threshold=None):\n self.event_threshold = event_threshold\n self._label_indices = {name: i for i, name in enumerate(label_names)}\n self.perf_data = {}\n for label in label_names:\n for bench_name, bench_iterations in benchmark_names_and_iterations:\n for i in xrange(bench_iterations):\n report = read_perf_report(label, bench_name, i)\n self._ProcessPerfReport(report, label, bench_name, i)", "title": "" }, { "docid": "13b7c91192e8520359f87cc285fc6b7e", "score": "0.5658054", "text": "def __init__(self):\n self.num_train_hams = 0\n self.num_train_spams = 0\n self.word_counts_spam = {}\n self.word_counts_ham = {}\n self.HAM_LABEL = 'ham'\n self.SPAM_LABEL = 'spam'", "title": "" }, { "docid": "53333f7f238bef43f0a827989d4205db", "score": "0.5656606", "text": "def test_00_setup(self):\n self.loadTestData()", "title": "" }, { "docid": "d10ff1c133f19081fcec241d280c616e", "score": "0.5654219", "text": "def __init__(self):\n self.custom = 0\n self.loci = 0\n self.variant = 0\n self.allele = 0\n self.table_builder = 0\n self.filtered_matrix = 0\n self.exome_data = 0\n self.tables = []", "title": "" }, { "docid": "2736b067b10bcca4402a7eb16adae969", "score": "0.5651867", "text": "def initialize(self, runInfoDict, inputFiles):\n np.random.seed(1086)\n self.mu = np.array([0, 0])\n self.cov = np.array([[1., 0.42], [0.42, 1.]])\n # 1000 observed data, ie. data with shape (1000, 2)\n self.samples = 1000\n self.data = np.random.multivariate_normal(self.mu, self.cov, size=self.samples)", "title": "" }, { "docid": "e96220b5b6d19b19207e14678502b17d", "score": "0.5640618", "text": "def test_data_loader_init():\n data_loader = DataLoader(CONFIG, TARGETS)\n assert data_loader.config == CONFIG\n assert data_loader.targets == TARGETS\n assert data_loader.param_grid is None\n assert data_loader.drop_na_cols is None\n assert data_loader.drop_na_rows is None\n assert data_loader.odds_type is None\n assert data_loader.testing_duration is None", "title": "" }, { "docid": "c58a8b9e42b3f85ef21415155afdad16", "score": "0.5632202", "text": "def test_init(self, setup):\n\n assert setup[\"widget\"].lf_H0.value() == 0.10\n assert setup[\"widget\"].lf_H1.value() == 0.11\n assert setup[\"widget\"].lf_H3.value() == 0.12\n assert setup[\"widget\"].lf_W0.value() == 0.13\n assert setup[\"widget\"].lf_W3.value() == 0.14", "title": "" }, { "docid": "4d27fac34ac9402e52fa38b7a01a6629", "score": "0.563025", "text": "def __init__(self):\n self.metric_model = LFDA_ml()\n self.X_tr = None\n self.y_train = None\n self.X_te = None", "title": "" }, { "docid": "fb43c5bfc71624221273230f5291a828", "score": "0.562897", "text": "def __init__(self):\n self._mean = Mean()\n super(RunningEpochCPUUsage, self).__init__(\n reset_at=\"epoch\", emit_at=\"iteration\", mode=\"train\"\n )", "title": "" }, { "docid": "b8b5065e791094f5c5f494e4a9a65713", "score": "0.56271946", "text": "def setUp(self):\n self.flag_verbose = 1\n\n self.a = [1]\n self.b = [1,2]\n self.c = [1,2,3]\n \n self.d = numpy.array([1,2])\n self.e = numpy.array([1,2,3,4])", "title": "" }, { "docid": "c92637668a066945e6c1624fbc4d1976", "score": "0.5625483", "text": "def initialize():", "title": "" }, { "docid": "36bb4df348f0b271086284225441c86d", "score": "0.5623814", "text": "def test_include_other_values(self):\n instance = PerfData(\"foo\", \"1\", uom=\"b\", warn=\"10:20\", crit=\"20:30\",\n minval=\"1\", maxval=\"5\")\n assert \"foo=1b;10:20;20:30;1;5\" == str(instance)", "title": "" }, { "docid": "924a3b2418a513f307b1ef5c33769011", "score": "0.56169075", "text": "def __init__(self):\n self.initialized = False\n self.isTrain = True\n self.isTest = True", "title": "" }, { "docid": "187aa782516de11407527b1df3e196d5", "score": "0.5609433", "text": "def test___init__(self):\n # Run\n models = {'test': Mock()}\n sampler = Sampler('test_metadata', models)\n\n # Asserts\n assert sampler.metadata == 'test_metadata'\n assert sampler.models == models\n assert sampler.primary_key == dict()\n assert sampler.remaining_primary_key == dict()", "title": "" }, { "docid": "9c07915c11401041f9df98e01bb4ec93", "score": "0.56052285", "text": "def initializeCPU(self,*args):\n self.cpu,self.sets,start,cshape = args\n self.start = numpy.int32(start)\n self.CPUArray = numpy.zeros(cshape)", "title": "" }, { "docid": "6f11a7ad98411a9e1095dcf85f025b7e", "score": "0.5603574", "text": "def __init__(self):\n self._values = defaultdict(list)\n self._data_for_timestamp = {}", "title": "" }, { "docid": "6bf2eee0450e1a17ac2ce6f6866dbfbe", "score": "0.56032133", "text": "def __init__(self, data):\n\t\tself._demands = data.demands", "title": "" }, { "docid": "7432163fb92e78cc7c4f1f8ca30e0669", "score": "0.5599977", "text": "def __init__(self,ftr_vals,ftr_index,predictor_type):\n\t\tself.ftr_index = ftr_index\n\t\tself.stumps = self.__create_stumps__(ftr_index,ftr_vals)\n\t\tself.predictor_type = predictor_type\n\t\tself.stumps_no = len(self.stumps)", "title": "" }, { "docid": "da2a69e773b0ad9e50d124c1ea902545", "score": "0.5595441", "text": "def initParams(self):", "title": "" }, { "docid": "28b2e1f3fb87f4e0e3f3cd9aeb07e279", "score": "0.55947113", "text": "def populate(self):\n populate_standard_test()", "title": "" }, { "docid": "465ed16dbb200bc0c3f0a01c696fb659", "score": "0.5588082", "text": "def __init__(self):\n self.dataPoints = {}", "title": "" }, { "docid": "77e196d0e16eb63f40cefe96b932d2c9", "score": "0.55831677", "text": "def __init__(self):\n self.threshold = []\n self.feature = []\n self.children_right = []\n self.children_left = []\n self.n_outputs = 1\n self.value = []\n self.impurity = []\n self.n_node_samples = []\n self.n_classes = []\n self.kd = []\n self.add()", "title": "" }, { "docid": "cbd7cb66ac094ab5573554e894176070", "score": "0.5581535", "text": "def __init__(self):\n self._anuga = None\n self._time = 0.\n self._values = {}\n self._var_units = {}\n self._grids = {}\n self._grid_type = {}", "title": "" } ]
60307ba7af660df799ca16892d1c3d96
Force a dependency texture to render immediately
[ { "docid": "c989829526008705e68dc27b33ec1a70", "score": "0.6691529", "text": "def renderDependency(self, dep):\n if not dep.hasRenderState():\n dep.attachRenderState(self.rstate)\n dep.viewport.configureOpenGL()\n dep.drawFrame()\n assert(dep.rendered)", "title": "" } ]
[ { "docid": "4755d787a7f1c6803e2f74285aef7817", "score": "0.61700636", "text": "def preRenderUpdate(self):\n self._neighborBuffers[self.currentIndex[0]].setActive(False)\n self._resolveBuffer.setShaderInput(\"lastTex\",\n self._neighborBuffers[self.currentIndex[0]].getColorTexture())\n self.currentIndex[0] = 1 - self.currentIndex[0]\n self._neighborBuffers[self.currentIndex[0]].setActive(True)\n self._resolveBuffer.setShaderInput(\"currentTex\",\n self._neighborBuffers[self.currentIndex[0]].getColorTexture())", "title": "" }, { "docid": "71439427750daf9e0ae030604d9d37a8", "score": "0.61587393", "text": "def __texturesChanged(self, *a):\n self.updateShaderState(alwaysNotify=True)", "title": "" }, { "docid": "4f18ed7f46cf5dec3f62336754d81aca", "score": "0.60822713", "text": "def reload(self):\n self.__texture.reload()", "title": "" }, { "docid": "4b2658f438856bbb07d612fbe7341693", "score": "0.6019064", "text": "def AddTexture(self):", "title": "" }, { "docid": "e35162d2c72ebd08210e57fb3451194b", "score": "0.5988707", "text": "def bind(self):\n self.__texture.bind()", "title": "" }, { "docid": "32a4d6b8a233f4df979c1174bd34c100", "score": "0.59818864", "text": "def FlushUnusedTextures() -> None:\n ...", "title": "" }, { "docid": "20af6a60ef41eedf0e6f3d7d0b5811a3", "score": "0.5964623", "text": "def render_texture(self, texture_name, x, y):\n self.display.blit(texture_name, (\n x - self.camera_x,\n y - self.camera_y\n ))", "title": "" }, { "docid": "c0f67385de2007300dcec74022cf7050", "score": "0.59645987", "text": "def _renderOneShot(self, targetName):\n target = self.targets[targetName]\n target.setActive(True)\n Globals.base.graphicsEngine.renderFrame()\n target.setActive(False)\n target.deleteBuffer()", "title": "" }, { "docid": "bb3640b2ac69f0e12060a9954080f044", "score": "0.59533376", "text": "def drawFrame(self):\n self.integrate(self.time.step())\n if not self.dirty:\n self.viewport.hide()\n return\n\n needReconfigure = False\n for dependency in self.dependencies:\n if not dependency.rendered:\n needReconfigure = True\n self.renderDependency(dependency)\n if needReconfigure:\n self.viewport.configureOpenGL()\n\n self.render()\n\n self.dirty = False\n self.viewport.hide()\n self.rendered = True\n self.renderLifetime = 0\n\n ## Uncomment this to show the textures as they're stored\n #self.viewport.rootView.display.flip()\n #import time\n #print self\n #time.sleep(0.5)", "title": "" }, { "docid": "c5d82c84775d539e4c50dfe314a709fc", "score": "0.58927554", "text": "def postDraw(self):\n\n self.imageTexture .unbindTexture()\n self.cmapTexture .unbindTexture()\n self.negCmapTexture .unbindTexture()\n self.clipTexture .unbindTexture()\n self.modulateTexture .unbindTexture()\n\n fslgl.glvolume_funcs.postDraw(self)", "title": "" }, { "docid": "478107aeda77aaea132862d2f844c69a", "score": "0.5758042", "text": "def update_animation(self):\n if self.frame % self.texture_change_frames == 0:\n self.cur_texture_index += 1\n if self.cur_texture_index >= len(self.textures):\n self.cur_texture_index = 0\n self.texture = self.textures[self.cur_texture_index]\n self.frame += 1", "title": "" }, { "docid": "c01008822a648d82747769ef30260e37", "score": "0.5734747", "text": "def new_texture(material, texture):\n use_texture(material, texture)", "title": "" }, { "docid": "51039568f5729d35063f03988e5be992", "score": "0.5724695", "text": "def use(self):\n self._fbo.use()", "title": "" }, { "docid": "13f6c61b74b0a741ab0aa3f20fbded87", "score": "0.57212794", "text": "def _automatic_rendering(self):\n if self.viewer is not None and self.enable_auto_render:\n self.should_update_rendering = True\n\n if self.automatic_rendering_callback:\n self.automatic_rendering_callback()\n else:\n self.render(self.rendering_mode)", "title": "" }, { "docid": "73642f511546ac235a5df6fefd971c01", "score": "0.57026607", "text": "def spin_once(self):\n self.draw()\n self.tick()", "title": "" }, { "docid": "62aa395d58b226bf6bd46a2d9abb1e63", "score": "0.56929946", "text": "def _on_realize(self, *args):\n # Obtain a reference to the OpenGL drawable\n # and rendering context.\n gldrawable = self.get_gl_drawable()\n glcontext = self.get_gl_context()\n # OpenGL begin.\n if not gldrawable.gl_begin(glcontext):\n return\n\n self.pil_image_texture = Texture()\n #self.pil_image_texture.load_image_to_texture(\"./example.jpg\")\n\n self._set_view(WIDTH / float(HEIGHT))\n\n glEnable(GL_TEXTURE_RECTANGLE_ARB) # 2D)\n glEnable(GL_BLEND)\n glShadeModel(GL_SMOOTH)\n glClearColor(0.0, 0.0, 0.0, 1.0) # black background\n glColor4f(1.0, 1.0, 1.0, 1.0) # default color is white\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n # OpenGL end\n gldrawable.gl_end()", "title": "" }, { "docid": "91af337aa669d857fddda147f159be8e", "score": "0.5624649", "text": "def pre_render(self) -> None:\n self.buffer.fill((0, 0, 0, 0))\n\n self.corner_texture.render(self.buffer, Position(0, 0), Direction.NORTH)\n self.corner_texture.render(self.buffer, Position(self.render_width - self.corner_texture.get_width(), 0), Direction.EAST)\n self.corner_texture.render(self.buffer, Position(self.render_width - self.corner_texture.get_width(), self.render_height - self.corner_texture.get_height()), Direction.SOUTH)\n self.corner_texture.render(self.buffer, Position(0, self.render_height - self.corner_texture.get_height()), Direction.WEST)\n\n for i in range(self.corner_texture.get_width(), self.render_width - self.corner_texture.get_width(), self.side_texture.get_width()):\n self.side_texture.render(self.buffer, Position(i, 0), Direction.NORTH)\n for i in range(self.corner_texture.get_height(), self.render_height - self.corner_texture.get_height(), self.side_texture.get_width()):\n self.side_texture.render(self.buffer, Position(self.render_width - self.corner_texture.get_height(), i), Direction.EAST)\n for i in range(self.corner_texture.get_height(), self.render_height - self.corner_texture.get_height(), self.side_texture.get_width()):\n self.side_texture.render(self.buffer, Position(0, i), Direction.WEST)\n for i in range(self.corner_texture.get_width(), self.render_width - self.corner_texture.get_width(), self.side_texture.get_width()):\n self.side_texture.render(self.buffer, Position(i, self.render_height - self.corner_texture.get_height()), Direction.SOUTH)\n\n for x in range(self.corner_texture.get_width(), self.render_width - self.corner_texture.get_width(), self.inside_texture.get_width()):\n for y in range(self.corner_texture.get_height(), self.render_height - self.corner_texture.get_height(), self.inside_texture.get_height()):\n self.inside_texture.render(self.buffer, Position(x, y))", "title": "" }, { "docid": "a862d1b68902ef85505204944a9c250f", "score": "0.56139815", "text": "def transtition_active(self, surface):\n surface.blit(self.transition_surface, (0, 0))", "title": "" }, { "docid": "2dfe05c3ff639758bbeb5cc3e1535ea8", "score": "0.56118566", "text": "def _imageSyncChanged(self, *a):\n\n self.refreshImageTexture()\n self.updateShaderState(alwaysNotify=True)", "title": "" }, { "docid": "1243a2b7ba9e17030a8885b3afc7b5dc", "score": "0.56029165", "text": "def component_will_render(self) -> None:\n self._is_rendering = True\n self._event_effects.will_unmount.clear()", "title": "" }, { "docid": "5120b0182883963e53822c93f1f48567", "score": "0.5567819", "text": "def AddTexture(self):\n\n pass", "title": "" }, { "docid": "5120b0182883963e53822c93f1f48567", "score": "0.5567819", "text": "def AddTexture(self):\n\n pass", "title": "" }, { "docid": "3841973ca4ce6bb52ae64ef0a15d6af0", "score": "0.55650246", "text": "def render(self):\n self.manager.queue_render()", "title": "" }, { "docid": "d48108500b0bab57ba5148a952911c29", "score": "0.55434585", "text": "def render(self):\n pygame.display.update()\n pygame.display.flip()", "title": "" }, { "docid": "f1047ba2cd6f073ad7937cb501765f80", "score": "0.55431986", "text": "def render(self, screen: pygame.Surface):", "title": "" }, { "docid": "cbac43302fbf702e639897b3a6679bd5", "score": "0.5535705", "text": "def death_animation(self):\n self.texture = self.fall_texture_pair[self.facing_direction]", "title": "" }, { "docid": "ba022c7a62752ad0644247943a61bed8", "score": "0.55254066", "text": "def scroll_effect(self, elapsed_time):\n self.cloud_texture.uvpos = ( (self.cloud_texture.uvpos[0] + elapsed_time) % Window.width, self.cloud_texture.uvpos[1])\n self.floor_texture.uvpos = ( (self.floor_texture.uvpos[0] + elapsed_time / 2.0) % Window.width, self.floor_texture.uvpos[1])\n\n \"\"\"\n Draws the texture over and over\n\n \"\"\"\n texture = self.property('cloud_texture')\n texture.dispatch(self)\n\n texture = self.property('floor_texture')\n texture.dispatch(self)", "title": "" }, { "docid": "69a21b5d8f91ad57b1280e650d3bd33f", "score": "0.55203927", "text": "def draw(self):\n self.screen.fill(pygame.Color('black'))\n for entity in self.observing:\n sprite_artifact = entity.artifacts[SpriteArtifact.NAME]\n if sprite_artifact.drawstage & int(self.current_game_state):\n sprite_artifact.sprite.draw(self.screen, sprite_artifact.positionx, sprite_artifact.positiony)\n if self.current_effect is not None:\n if self.current_effect.dict['type'] == ScreenEffectEvent.BLUR: # czemu nie mog� dac .type\n scale = 1.0 / float(20.0)\n surf_size = self.screen.get_size()\n scale_size = (int(surf_size[0] * scale), int(surf_size[1] * scale))\n surf = pygame.transform.smoothscale(self.screen, scale_size)\n surf = pygame.transform.smoothscale(surf, surf_size)\n self.screen.blit(surf, (0, 0))\n elif self.current_effect.dict['type'] == ScreenEffectEvent.COLOR_EXPLOSION: # czemu nie mog� dac .type\n surf = self.screen\n array = pygame.surfarray.pixels3d(surf)\n array[:, :, self.col:] = 0\n del array\n self.col+=1\n if self.col == 3:\n self.col = 0\n self.screen.blit(surf, (0, 0))\n elif self.current_effect.dict['type'] == ScreenEffectEvent.PAUSE_EFFECT:\n surf = self.screen\n array = pygame.surfarray.pixels3d(surf)\n array[:, :, 1:] = 0\n del array\n self.screen.blit(surf, (0, 0))\n pygame.display.flip()", "title": "" }, { "docid": "947cd2e32f0864bc235f3e3397fc54e6", "score": "0.5516193", "text": "def render(self, display):\r\n if self._render:\r\n display.blit(self.surface, self.pos)", "title": "" }, { "docid": "af711962c66500274e460fa0555f0fd5", "score": "0.54881376", "text": "def textrue(self) -> moderngl.Texture:\n return self._texture", "title": "" }, { "docid": "4fc7f93fc6c3dd1ed8fcd5dd822f522a", "score": "0.54828864", "text": "def on_draw(self):\n arcade.start_render()", "title": "" }, { "docid": "f76501a747a759235b074210f1ae9716", "score": "0.547878", "text": "def on_draw( self ):\r\n self.clear()\r\n self.setup_3D()\r\n self.voxel.draw()", "title": "" }, { "docid": "1c49aabf7fd8f759a0ea668d0f8dc726", "score": "0.5477533", "text": "def on_draw(self):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n\n self.set_3d()\n glColor3d(1, 1, 1)\n self.model.batch.draw()\n self.draw_focused_block()\n # self.set_2d()\n # self.draw_reticle()\n\n # glBindFramebuffer(GL_FRAMEBUFFER, 0);", "title": "" }, { "docid": "4b6f3975682204d3cc38a94068f407a7", "score": "0.54729706", "text": "def draw():\n background(0)\n smooth()\n render(production)", "title": "" }, { "docid": "bee197be6d8b27ed5718c5982d40490a", "score": "0.5468751", "text": "def _on_update_shaders(self):\n\n artellapipe.ShadersMgr().update_shaders()", "title": "" }, { "docid": "5de12656104542216e2d03a12b6a9013", "score": "0.5444994", "text": "def AddTexture(self):\n\t\t\n\t\tpass", "title": "" }, { "docid": "dd7cc31410b6a19bb0013be0ecec9a81", "score": "0.5444469", "text": "def draw(self, surface):\n if self.need_build:\n self.build()\n self.need_build = False\n\n surface.blit(self.background, (0,0))\n super().draw(surface)", "title": "" }, { "docid": "c06cd03fa5ffec1e623a7b15927b9a27", "score": "0.5427205", "text": "def render(self):\r\n s = self.scene\r\n if s is not None:\r\n s.render()", "title": "" }, { "docid": "aa095d331b043e639bb14c51e8b8a4db", "score": "0.5419612", "text": "def textureDeformer(*args, **kwargs):\n pass", "title": "" }, { "docid": "42acafee771a0f998eaad956d739ab4e", "score": "0.54134583", "text": "def _draw(self, texture_id=None) -> None:\n self._back.draw(self._box.rect, texture_id)", "title": "" }, { "docid": "a82da8f3afbf4ff6bd9c3fe5b1e4c6f4", "score": "0.54122037", "text": "def gl_sync():\n gl_lock()\n gl_wait()", "title": "" }, { "docid": "fd58e8b89c78afd51c44638d063c161c", "score": "0.5402659", "text": "def render(self):\n self.screen.fill(cfg.background_color)\n\n if self.active_scene != None:\n self.active_scene.render(self.screen)\n self.sprite_list.draw(self.screen)\n\n if self.show_fps:\n self.screen.blit(self.font.render(\n str(int(self.clock.get_fps())), True, pg.Color('white')), (10, 10))\n\n texture_data = pg.transform.flip(\n self.screen, False, True).get_view('1')\n self.diffuse_texture.write(texture_data)\n self.ctx.clear(14/255, 40/255, 66/255)\n self.diffuse_texture.use()\n\n self.vao.render()\n pg.display.flip()", "title": "" }, { "docid": "17b1565b96bd4a9121cc5c36002b7204", "score": "0.5400747", "text": "def prerender(self, context: OptionalContext) -> None:", "title": "" }, { "docid": "edc7d00392f2388816eb73cff17ff222", "score": "0.5398417", "text": "def bind(self):\n if GLExtension.multitexture:\n glClientActiveTextureARB(GL_TEXTURE0_ARB)\n self.glInterleavedArrays(self.format, 0, self.interleaved)", "title": "" }, { "docid": "ac96ee7241d0b06edc9502672033fe6d", "score": "0.5391154", "text": "def on_draw(self):\n texture = self.create_texture()\n\n # Forces the sprite to be scaled nearest neighbour\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n\n # Scale the sprite to the window size\n texture.width = self.width\n texture.height = self.height\n\n # Draw the texture on screen\n texture.blit(0, 0)\n del texture", "title": "" }, { "docid": "7e5d084cd65699ab238ae871f76849ba", "score": "0.5385664", "text": "def redraw(self):\n self.cur_surf = None", "title": "" }, { "docid": "f1169777f84d3e2f45aefc0273455e99", "score": "0.53789634", "text": "def __init__texture(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "f529704e29df001c569b6cd55df405be", "score": "0.53743976", "text": "def bind(self, rstate=None):\n if not self.rendered:\n raise DynamicTextureException(\"Attempt to bind a DynamicTexture before it has been rendered\")\n Texture.bind(self, rstate)", "title": "" }, { "docid": "2d5a4c71b7283692aec198137dc07b7c", "score": "0.53699154", "text": "def _idle(self): # pylint: disable=no-self-use\n glutPostRedisplay()", "title": "" }, { "docid": "73c33fce173274ee3f693605f7e335e7", "score": "0.5358487", "text": "def toolsReloadShader(self,widget):\n\t\tself.glarea.compileshader(self.veditor.get_text(),self.feditor.get_text())\n\t\tself.glarea.queue_draw()", "title": "" }, { "docid": "c6059a58cfb138700b4fab31d5dbc3c1", "score": "0.5349137", "text": "def display_rendered_content():\n glutSwapBuffers()", "title": "" }, { "docid": "1f17995ffe82c90c7d259230cc47b823", "score": "0.5346007", "text": "def renderScene():\n scene = bpy.context.scene\n if scene.render.image_settings.file_format == 'PNG':\n scene.render.filepath += len(str(scene.frame_end))*'#'\n\n bpy.ops.render.render(animation=True, write_still=True)", "title": "" }, { "docid": "5f9341bbad3ce7ebabb7647085508434", "score": "0.5345683", "text": "def submit_texture(self, eye, texture):\n\n try:\n # Retrieve the texture OpenGL binding\n texture_context = texture.prepare_now(0, self.base.win.gsg.prepared_objects, self.base.win.gsg)\n handle = texture_context.get_native_id()\n if handle != 0:\n ovr_texture = openvr.Texture_t()\n ovr_texture.handle = texture_context.get_native_id()\n ovr_texture.eType = openvr.TextureType_OpenGL\n ovr_texture.eColorSpace = self.color_space\n self.compositor.submit(eye, ovr_texture)\n except Exception as e:\n if hasattr(self, 'on_texture_submit_error'):\n if not self.on_texture_submit_error_notified:\n print(\"WARNING: 'on_texture_submit_error()' is deprecated and will be removed in a future release\")\n self.on_texture_submit_error_notified = True\n self.on_texture_submit_error(e)\n else:\n if self.submit_error_handler is not None:\n self.submit_error_handler(e)\n else:\n # by default, just reraise the exception\n raise e", "title": "" }, { "docid": "de027e00aae405d8aba9a03536b4dee2", "score": "0.5345117", "text": "def update_surface(self):\r\n surface_to_blit = pg.Surface((0, 0))\r\n if self.surface_to_blit_function:\r\n surface_to_blit = self.surface_to_blit_function()\r\n self.resize_to_surface()\r\n\r\n self.surface.blit(surface_to_blit, (0, 0))\r\n self.remake_border()", "title": "" }, { "docid": "372cd0ae9f317a53d4fd4cdd9c929c8a", "score": "0.534148", "text": "def onrender(self) -> None:\n flip = False\n if self.refill:\n self.display.fill(self.background)\n self.dirty = True\n self.refill = False\n if self.dirty:\n # UI dirty\n self.draw_ui_el(self.title)\n if not self.game:\n self.display.blit(self.logo, self.logo_rect)\n for el in self.clickables:\n if el.hidden:\n continue\n self.draw_ui_el(el)\n flip = True\n self.dirty = False\n if self.game and self.game_dirty:\n # game dirty; call :func:`game.onrender`\n self.game_dirty = self.game.onrender()\n if self.game:\n # blit game to display\n self.display.fill(self.background, self.game_rect)\n self.display.blit(self.game_surface, self.game_rect.topleft)\n flip = True\n if flip:\n # either game or main display updated: flip\n pygame.display.flip()", "title": "" }, { "docid": "0df623453d52ceb9027085e92433e753", "score": "0.534113", "text": "def change_texture(self, colour: Optional[int] = 0) -> None:\n self.texture = self.textures[colour]", "title": "" }, { "docid": "f80a2a933620b8dbd97faf5075ccda07", "score": "0.5337519", "text": "def render(self, display):\r\n display.blit(self.surface, self.pos)", "title": "" }, { "docid": "c1a9c8adda5591ea8aebaf3e4d104654", "score": "0.5321366", "text": "def on_draw(self):\n self.clear()\n self.set_3d()\n glColor3d(1, 1, 1)\n #self.block_shaders[0].bind()\n self.model.batch.draw()\n #self.block_shaders[0].unbind()\n \n x = 0.25 #1/(Potenzen von 2) sind sinnvoll, je größer der Wert, desto stärker der Kontrast\n glColor3d(x, x, x)\n glEnable(GL_COLOR_LOGIC_OP)\n glLogicOp(GL_XOR)\n\n self.draw_focused_block()\n self.set_2d()\n self.draw_reticle()\n\n glDisable(GL_COLOR_LOGIC_OP)\n glColor3d(1, 1, 1)\n self.model.hud_batch.draw()\n \n if self.debug_info_visible:\n self.draw_debug_info()", "title": "" }, { "docid": "5d16cf5b49a44a3d5ddfd142688c2b96", "score": "0.53165007", "text": "def on_draw(self):\n self.clear()\n self.set_3d()\n glColor3d(1, 1, 1)\n self.model.main_batch.draw()\n self.model.bomb_batch.draw()\n self.set_2d()\n self.draw_label()\n self.status.draw()", "title": "" }, { "docid": "186a5a4acf7c4d1e0d6e50997d2f9a42", "score": "0.53158283", "text": "def queue_render_order(self, order_name, model):\n self.ctx.makeCurrent(self.offscreen_surface)\n GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)\n GL.glViewport(0, 0, OffscreenModelRendererThread.TEX_SIZE, OffscreenModelRendererThread.TEX_SIZE)\n GL.glClearColor(0, 0, 0, 0)\n GL.glClear(GL.GL_DEPTH_BUFFER_BIT | GL.GL_COLOR_BUFFER_BIT)\n self.renderer.setup_data_for_block_model(model)\n self.renderer.resize(OffscreenModelRendererThread.TEX_SIZE, OffscreenModelRendererThread.TEX_SIZE)\n self.renderer.draw_loaded_model(glm.lookAt(glm.vec3(15, 5, 5), glm.vec3(5, 5, 5), glm.vec3(0, 1, 0)), \"gui\",\n glm.ortho(-10, 10, 10, -10, 0.1, 50))\n tex_str = GL.glReadPixels(0, 0, OffscreenModelRendererThread.TEX_SIZE, OffscreenModelRendererThread.TEX_SIZE,\n GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, outputType=bytes)\n GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)\n qimage = QImage(tex_str, OffscreenModelRendererThread.TEX_SIZE, OffscreenModelRendererThread.TEX_SIZE,\n OffscreenModelRendererThread.TEX_SIZE * 4, QImage.Format_RGBA8888)\n qimage = qimage.mirrored(vertical=True)\n self.renderedTexture.emit(order_name, qimage)", "title": "" }, { "docid": "3eb39f384a91c078b7321e11c903836e", "score": "0.531496", "text": "def render(self, clear=None):\n\t\tif clear:\n\t\t\ttry:\n\t\t\t\tdel self._fastim\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\treturn\n\n\t\tif self.fb.opengl:\n\t\t\tself._fastim = pygame.image.tostring(self.im,'RGBA', 1)\n\t\telse:\n\t\t\tself._fastim = self.im.convert()\n\n\t\t#added opengl convert 12-jan-2006 shinji", "title": "" }, { "docid": "69f2fab894109ea5b52a23e8d4926568", "score": "0.5312781", "text": "def begin_render(self):\n\t\tself._state_event.set()", "title": "" }, { "docid": "a43a59df85632f29d4536b84610d9d95", "score": "0.53113115", "text": "def update_pipeline(self):\r\n if (len(self.inputs) == 0) or \\\r\n (len(self.inputs[0].outputs) == 0):\r\n return\r\n\r\n self._tcoord_generator_mode_changed(self.tcoord_generator_mode)\r\n self.render()", "title": "" }, { "docid": "19a5595f545c2bbb1b0443f8eee729bf", "score": "0.5311028", "text": "def RenderTexture(self, vtkVolume, vtkRenderer, p_int=..., p_int=..., *args, **kwargs):\n ...", "title": "" }, { "docid": "eb2bdf8a161c2580d1e0c7883109db63", "score": "0.53094965", "text": "def render(self):\n self.draw()\n self.loadBackbuffer(self.viewport.size, format=self.format)", "title": "" }, { "docid": "b46a11b4a1c3a5790d3173fb514be8c1", "score": "0.5306943", "text": "def _setupResolveBuffer(self):\n self._resolveBuffer = RenderTarget(\"SMAA-Resolve\")\n self._resolveBuffer.addColorTexture()\n self._resolveBuffer.prepareOffscreenBuffer()", "title": "" }, { "docid": "b7bfe06714add1d1d6dc082425e7e3dc", "score": "0.5295089", "text": "def render(self):\n\t\tif self.fb.opengl:\n\t\t\tself.fastim = pygame.image.tostring(self.im,'RGBA', 1)\n\t\telse:\n\t\t\tself.fastim = self.im.convert()\n\n\t\t#added opengl convert 12-jan-2006 shinji", "title": "" }, { "docid": "e4414f01a86a1ab0691a1a6c262b02ed", "score": "0.5294587", "text": "def destroy(self):\n if self._tx and self._renderer_ref[0]:\n render.SDL_DestroyTexture(self._tx)\n self._tx = None", "title": "" }, { "docid": "10f7784f1c53b33858339efd95a5b0f6", "score": "0.5292072", "text": "def re_paint(self):\n self.evo_flock.main_loop()\n self.clear_scene()\n self.draw_world()\n self.populate_world()", "title": "" }, { "docid": "2e82ccdc1e9e4f9df14278482f34c832", "score": "0.52908576", "text": "def __draw(self):\n\n## self.display.fill((0,0,0))\n## if not self.__autofill_int:\n## self.__images[1].fill((0,0,0))\n self.display.blit(self.__images[self.__autofill_int], (0,0))\n pg.display.flip()", "title": "" }, { "docid": "bff31f80b1c466ce4b214feb1ebda119", "score": "0.5285347", "text": "def blitme(self):\n self.sreen.blit(self.image, self.rect)", "title": "" }, { "docid": "e5d4b4793007a83f1fc0b3bb1daf5011", "score": "0.52850515", "text": "def texturAnAus(self):\r\n if(self.t == True):\r\n self.t = False\r\n self.texture.setTexture(loader.loadTexture(\"models/borm.JPG\"), 1)\r\n else:\r\n self.t = True\r\n self.chooseTexture()", "title": "" }, { "docid": "c2b1f46178e308145310c1babfd70794", "score": "0.5283907", "text": "def ForceBlit(self):\n self.gameDisplay.blit(self.TextSurf, self.TextRect)", "title": "" }, { "docid": "8aa95042a91555d3b3c37baf30ee2e10", "score": "0.5281683", "text": "def render(self, dt):\n self.clear()\n self.camera.apply()\n\n for blob in self.core.game_world.blobs:\n anim = blob.get_component(AnimatedSpriteComponent)\n phys = blob.get_component(PhysicsComponent)\n if anim and phys:\n anim.animated_sprite.update(dt)\n anim.animated_sprite.set_rotation(phys.get_rotation())\n anim.draw(phys.get_position())\n\n if self.enable_physics_debug:\n self.draw_physics_debug()\n\n if self.enable_fps_display:\n self._fps_display.draw()\n \n if self.enable_camera_debug:\n self.camera.debug_draw()", "title": "" }, { "docid": "6dc32ec1613f828145085072c2df01a2", "score": "0.5267127", "text": "def write_initial_state(self):\n self.texture_1.write(array('B', self.gen_initial_data(self.size[0] * self.size[1] * 3)))", "title": "" }, { "docid": "cb87a9a9e0f7c2960d06567cf10f76a7", "score": "0.52651316", "text": "def gl_render(self):\n\n pass", "title": "" }, { "docid": "e577e33d67d248f6686a782c0bea5078", "score": "0.52621514", "text": "def _setup_textures(self):\r\n for i in range(self._aux_count):\r\n self._targets[\"aux_{}\".format(i)] = Texture(\r\n self.debug_name + \"_aux{}\".format(i))\r\n for tex in itervalues(self._targets):\r\n tex.set_wrap_u(SamplerState.WM_clamp)\r\n tex.set_wrap_v(SamplerState.WM_clamp)\r\n tex.set_anisotropic_degree(0)\r\n tex.set_x_size(self._size.x)\r\n tex.set_y_size(self._size.y)\r\n tex.set_minfilter(SamplerState.FT_linear)\r\n tex.set_magfilter(SamplerState.FT_linear)", "title": "" }, { "docid": "172fd21694dd87b5bced40e046236dac", "score": "0.5260925", "text": "def in_surface():\n pass", "title": "" }, { "docid": "534ddad59e023496bf8d69d02aba73b5", "score": "0.52496105", "text": "def render(self):\n if self.rendering:\n # if already rendering, don't execute\n self.timer.start() # Don't forget to do this last render request\n else:\n try:\n self.rendering = True\n self.render_helper()\n finally:\n # No longer rendering, OK to receive another rendering call\n self.rendering = False", "title": "" }, { "docid": "c80522ccab2a575b722644d161943031", "score": "0.52494425", "text": "def render(self, display):\n display.blit(self.surface, self.pos)", "title": "" }, { "docid": "3bb332fc36e81c741043665dc5c182e7", "score": "0.52475643", "text": "def __emit_texture(self, texture, bump_bool, scene, node=None, material_name=None, scene_texture=False):\n\n # Nothing to do if this texture was already emitted.\n\n if not node:\n if texture in self._textures_set:\n return\n\n self._textures_set.add(texture)\n\n if scene_texture:\n # texture is an absolute file path string.\n # Assume the path ends with '.png' or '.exr'.\n texture_name = texture.split(util.sep)[-1][:-4]\n filepath = texture\n color_space = 'srgb'\n elif node is not None:\n texture_name = node.get_node_name()\n filepath = util.realpath(node.file_path)\n color_space = node.color_space\n else:\n if texture.image.colorspace_settings.name == 'Linear':\n color_space = 'linear_rgb'\n elif texture.image.colorspace_settings.name == 'XYZ':\n color_space = 'ciexyz'\n else:\n color_space = 'srgb'\n\n filepath = util.realpath(texture.image.filepath)\n texture_name = texture.name + \"_bump\" if bump_bool is True else texture.name\n\n self.__open_element('texture name=\"{0}\" model=\"disk_texture_2d\"'.format(texture_name))\n self.__emit_parameter(\"color_space\", color_space)\n self.__emit_parameter(\"filename\", filepath)\n self.__close_element(\"texture\")\n\n # Now create texture instance.\n self.__emit_texture_instance(texture, texture_name, bump_bool, node, material_name, scene_texture)", "title": "" }, { "docid": "a20c3b20c35b286ecc5bf4aeb6cb5ceb", "score": "0.5246178", "text": "def append_texture(self, texture):\n self.textures.append(texture)", "title": "" }, { "docid": "52bbfcf9df649520cf080f92e24d4d86", "score": "0.5234939", "text": "def setDiffuseTextureUnit(self, *args):\n return _osgFX.BumpMapping_setDiffuseTextureUnit(self, *args)", "title": "" }, { "docid": "4e7a0488ed7daf84f1bae696083b9da5", "score": "0.52334094", "text": "def __emit_texture_instance(self, texture, texture_name, bump_bool, node=None, material_name=None, scene_texture=False):\n\n if scene_texture:\n mode = \"clamp\"\n elif node is not None:\n mode = node.addressing_mode\n else:\n mode = \"wrap\" if texture.extension == \"REPEAT\" else \"clamp\"\n\n self.__open_element('texture_instance name=\"{0}_inst\" texture=\"{1}\"'.format(texture_name, texture_name))\n self.__emit_parameter(\"addressing_mode\", mode)\n self.__emit_parameter(\"filtering_mode\", \"bilinear\")\n self.__emit_parameter(\"alpha_mode\", \"detect\")\n self.__close_element(\"texture_instance\")", "title": "" }, { "docid": "0943319fb1f352bca5c6ef341d89a1a2", "score": "0.52331066", "text": "def init_texture(self, src, width, height):\n texture = arcade.load_texture(src)\n\n self.textures.append(texture)\n self.set_texture(0)\n self.height = height\n self.width = width", "title": "" }, { "docid": "4f44df45e2eed344468ba9b34f6c83ae", "score": "0.52331066", "text": "def getImmediateRendering():\n\treturn False", "title": "" }, { "docid": "a50084b612908c020c39bd331723cfd7", "score": "0.52204895", "text": "def dead_texture(self, file, folder, pos_x, pos_y, size_x, size_y, alpha):\n self.dead_texture = texture_for_sprite(file, folder, pos_x, pos_y, size_x, size_y, alpha)", "title": "" }, { "docid": "ce2940b099ef993ad74db374c4245320", "score": "0.5216109", "text": "def draw(self, **kwargs):\r\n if len(self.sprite_list) == 0:\r\n return\r\n\r\n # What percent of this sprite list moved? Used in guessing spatial hashing\r\n self._percent_sprites_moved = self._sprites_moved / len(self.sprite_list) * 100\r\n self._sprites_moved = 0\r\n\r\n # Make sure window context exists\r\n if self.ctx is None:\r\n self.ctx = get_window().ctx\r\n # Used in drawing optimization via OpenGL\r\n self.program = self.ctx.sprite_list_program_cull\r\n\r\n if self._vao1 is None:\r\n self._calculate_sprite_buffer()\r\n\r\n self.ctx.enable(self.ctx.BLEND)\r\n if \"blend_function\" in kwargs:\r\n self.ctx.blend_func = kwargs[\"blend_function\"]\r\n else:\r\n self.ctx.blend_func = self.ctx.BLEND_DEFAULT\r\n\r\n self._texture.use(0)\r\n\r\n if \"filter\" in kwargs:\r\n self._texture.filter = self.ctx.NEAREST, self.ctx.NEAREST\r\n\r\n self.program['Texture'] = self.texture_id\r\n\r\n texture_transform = None\r\n if len(self.sprite_list) > 0:\r\n # always wrap texture transformations with translations\r\n # so that rotate and resize operations act on the texture\r\n # center by default\r\n texture_transform = Matrix3x3().translate(-0.5, -0.5).multiply(self.sprite_list[0].texture_transform.v).multiply(Matrix3x3().translate(0.5, 0.5).v)\r\n else:\r\n texture_transform = Matrix3x3()\r\n self.program['TextureTransform'] = texture_transform.v\r\n\r\n if not self.is_static:\r\n if self._sprite_pos_changed:\r\n self._sprite_pos_buf.orphan()\r\n self._sprite_pos_buf.write(self._sprite_pos_data)\r\n self._sprite_pos_changed = False\r\n\r\n if self._sprite_size_changed:\r\n self._sprite_size_buf.orphan()\r\n self._sprite_size_buf.write(self._sprite_size_data)\r\n self._sprite_size_changed = False\r\n\r\n if self._sprite_angle_changed:\r\n self._sprite_angle_buf.orphan()\r\n self._sprite_angle_buf.write(self._sprite_angle_data)\r\n self._sprite_angle_changed = False\r\n\r\n if self._sprite_color_changed:\r\n self._sprite_color_buf.orphan()\r\n self._sprite_color_buf.write(self._sprite_color_data)\r\n self._sprite_color_changed = False\r\n\r\n if self._sprite_sub_tex_changed:\r\n self._sprite_sub_tex_buf.orphan()\r\n self._sprite_sub_tex_buf.write(self._sprite_sub_tex_data)\r\n self._sprite_sub_tex_changed = False\r\n\r\n self._vao1.render(self.program, mode=self.ctx.POINTS, vertices=len(self.sprite_list))", "title": "" }, { "docid": "082bd438f55fb418f2ac94958a46dc01", "score": "0.52104336", "text": "def requestRedraw(self):\n self._reInitBuffer = True", "title": "" }, { "docid": "6500b6078e6976dabce940ba3d8ef05f", "score": "0.52068037", "text": "def redraw_blocking(self):\n logger.debug(\"Blocking redraw\")\n self.window_tabber.activeGLWindow.glWidget.updateScenes()", "title": "" }, { "docid": "d2f40751a378e4529c481ab93d26eac2", "score": "0.520604", "text": "def partial_draw(self):", "title": "" }, { "docid": "040d94bb80108e9d97f91e6e0ccdd068", "score": "0.52058333", "text": "def draw_progress(self):\n self.bitmap = self.bitmap_from_compute()\n self.Refresh()\n self.Update()\n wx.CallAfter(self.fire_event, AptusRecomputedEvent)\n wx.SafeYield(onlyIfNeeded=True)", "title": "" }, { "docid": "559ca831abd02fbb20fe81b42160ea0c", "score": "0.52016234", "text": "def render_2d_front() -> None:\n pass", "title": "" }, { "docid": "178d6cd9da989dbc744bfeaf8049381d", "score": "0.52010787", "text": "def on_draw(self):\n arcade.start_render()\n self.current_scene.draw()", "title": "" }, { "docid": "94deef71fab01b412ebe3fc569b8509f", "score": "0.5199474", "text": "def on_fresh_drawable_shape_ready(self):", "title": "" }, { "docid": "b50efb72cacd7d0e3327e396e1cbcb12", "score": "0.5197601", "text": "def rebuildSurface(*args, **kwargs):\n pass", "title": "" }, { "docid": "327350dda84413d2411ad3d7b868916e", "score": "0.5197518", "text": "def begin_render(self):\n self._state_event.set()", "title": "" }, { "docid": "6a101cee3c49c63b1f21d607d0333733", "score": "0.51908845", "text": "def on_draw(self):\n\t\tself.render()", "title": "" }, { "docid": "85354c1a56de9f93b9cb08f710db031a", "score": "0.5189136", "text": "def _initGL(self):\n self._genColourBarTexture()", "title": "" }, { "docid": "4856392d7af23b3a40de0a13a796fa94", "score": "0.5185628", "text": "def toolsAnimatePreview(self,widget):\n\t\tself.glarea.redraw=True\n\t\tself.glarea.queue_draw()", "title": "" } ]
ec81f3492a646512203d20127819cc35
while a GUEST user is connected GC will not remove none of its projects nor the user itself
[ { "docid": "e05592dcc37128643debb195e9757a51", "score": "0.5476483", "text": "async def test_t1_while_guest_is_connected_no_resources_are_removed(\n disable_garbage_collector_task: None,\n client: TestClient,\n socketio_client_factory: Callable,\n aiopg_engine: aiopg.sa.engine.Engine,\n tests_data_dir: Path,\n osparc_product_name: str,\n):\n assert client.app\n logged_guest_user = await login_guest_user(client)\n empty_guest_user_project = await new_project(\n client, logged_guest_user, osparc_product_name, tests_data_dir\n )\n await assert_users_count(aiopg_engine, 1)\n await assert_projects_count(aiopg_engine, 1)\n\n await connect_to_socketio(client, logged_guest_user, socketio_client_factory)\n await asyncio.sleep(SERVICE_DELETION_DELAY + 1)\n await gc_core.collect_garbage(app=client.app)\n\n await assert_user_in_db(aiopg_engine, logged_guest_user)\n await assert_project_in_db(aiopg_engine, empty_guest_user_project)", "title": "" } ]
[ { "docid": "664815eaee10249845f72ab0dcc66027", "score": "0.6786619", "text": "def cleanup(self):\n\n LOG.info('Cleaning up user accounts')\n for u in self.user_manager.iterator():\n LOG.info('User: %s' % u.username)\n\n u_projects = u.api.projects.list()\n for p in u_projects:\n LOG.info(' Deleting Project: %s' % p.name)\n u.api.projects.delete(p.name)\n\n while True:\n LOG.info('Waiting for projects to be deleted...')\n total_projects = 0\n for u in self.user_manager.iterator():\n u_projects = u.api.projects.list()\n total_projects += len(u_projects)\n\n if total_projects == 0:\n LOG.info('Project clean up complete')\n break\n else:\n time.sleep(5)", "title": "" }, { "docid": "25987ad3e66118fb2bf0f39fca081f38", "score": "0.66345143", "text": "def deleteProjects(self):\n\n self.logger.info(\"Checking if there are projects to be deleted\")\n # generate a list of all the sub-groups\n sg_list = self._getSubGroupsList()\n\n# if (self.logger.getEffectiveLevel() == 10):\n# print \"Sub-groups:\"\n# pp = pprint.PrettyPrinter(indent=4)\n# pp.pprint(sg_list)\n\n s = set(self.conf.internal_project_list)\n for proj_name in [x for x in self.irods_projects.keys() if x not in s]:\n##TODO add filtering criteria as in addProjects\n quotaFlag = False\n if self.conf.quota_active and proj_name in self.projects.keys():\n quotaFlag = \\\n len((self.projects[proj_name][self.conf.quota_attribute]).strip()) == 0 \\\n or int(self.projects[proj_name][self.conf.quota_attribute]) == 0\n # projects are in iRODS and not in userDB\n if (not(proj_name in sg_list.keys())\n and (not(proj_name in self.projects.keys()) or quotaFlag)):\n self.logger.info(\"The project: \" + proj_name + \" should be deleted\")\n if not(self.dryrun):\n if self.conf.quota_active:\n self._deleteQuota(proj_name)\n if self.conf.notification_active:\n # send message\n message = \"project \" + proj_name + \" should be \" \\\n + \"deleted from irods\"\n mailsnd = MailSender()\n mailsnd.send(message, self.conf.notification_sender, \n self.conf.notification_receiver)\n self.logger.debug(\"project [%s]: request for deletion \"\n + \"sent\", proj_name)\n else:\n print \"project \" + proj_name + \" should be deleted from \" \\\n + \"irods\"", "title": "" }, { "docid": "975757b9ff0196747359bc7ff496bd3b", "score": "0.6555909", "text": "def updateProjects(self):\n\n self.logger.info(\"Checking if an update for the projects is required\")\n\n for proj_name in [x for x in self.projects.keys() \n if x in self.irods_projects.keys()]:\n self.logger.info(\"Updating the project: \" + proj_name)\n if self.conf.quota_active:\n self._updateQuota(proj_name)\n # users are in the UserDB and not in iRODS\n self.addUsersToProject(proj_name, self.projects[proj_name])\n # add users from irods externals\n user_list = self.projects[proj_name]['members']\n if 'PI' in self.projects[proj_name].keys():\n user_list.append(self.projects[proj_name]['PI'])\n for sg in self.projects[proj_name]['groups'].keys():\n user_list += self.projects[proj_name]['groups'][sg]\n user_list = set(user_list)\n # users are in iRODS and not in the userDB\n for user in [x for x in self.irods_projects[proj_name]['members'] \n if not(x in user_list)]:\n self.logger.info(\"Deleting the user: \" + user + \", from the group: \"\n + proj_name)\n if not(self.dryrun):\n if self.conf.notification_active:\n message = \"user \" + user + \" should be deleted from \" \\\n + \"project \" + proj_name\n mailsnd = MailSender()\n mailsnd.send(message, self.conf.notification_sender, \n self.conf.notification_receiver)\n self.logger.info(\"Request for user deletion sent\")\n else:\n print \"user \" + user + \" should be deleted from project \" \\\n + proj_name\n self._addSubGroups(proj_name, self.projects[proj_name])", "title": "" }, { "docid": "fe7a4d72b9355ce46efc11082d64139f", "score": "0.6471396", "text": "async def test_t7_project_shared_with_group_transferred_from_one_member_to_the_last_and_all_is_removed(\n disable_garbage_collector_task: None,\n client: TestClient,\n aiopg_engine: aiopg.sa.engine.Engine,\n tests_data_dir: Path,\n osparc_product_name: str,\n):\n assert client.app\n u1 = await login_user(client)\n u2 = await login_user(client)\n u3 = await login_user(client)\n\n # creating g1 and inviting u2 and u3\n g1 = await get_group(client, u1)\n await invite_user_to_group(client, owner=u1, invitee=u2, group=g1)\n await invite_user_to_group(client, owner=u1, invitee=u3, group=g1)\n\n # u1 creates project and shares it with g1\n project = await new_project(\n client,\n u1,\n osparc_product_name,\n tests_data_dir,\n access_rights={str(g1[\"gid\"]): {\"read\": True, \"write\": True, \"delete\": False}},\n )\n\n # mark u1 as guest\n await change_user_role(aiopg_engine, u1, UserRole.GUEST)\n\n await assert_projects_count(aiopg_engine, 1)\n await assert_users_count(aiopg_engine, 3)\n await assert_user_is_owner_of_project(aiopg_engine, u1, project)\n\n # await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n await gc_core.collect_garbage(app=client.app)\n\n # expected outcome: u1 was deleted, one of the users in g1 is the new owner\n await assert_one_owner_for_project(aiopg_engine, project, [u2, u3])\n await assert_user_not_in_db(aiopg_engine, u1)\n\n # find new owner and mark hims as GUEST\n q_u2 = await fetch_user_from_db(aiopg_engine, u2)\n q_u3 = await fetch_user_from_db(aiopg_engine, u3)\n q_project = await fetch_project_from_db(aiopg_engine, project)\n assert q_project\n\n new_owner: UserInfoDict | None = None\n remaining_users = []\n for user in [q_u2, q_u3]:\n assert user\n if user[\"id\"] == q_project[\"prj_owner\"]:\n new_owner = user\n else:\n remaining_users.append(user)\n\n assert new_owner is not None # expected to a new owner between the 2 other users\n # mark new owner as guest\n await change_user_role(aiopg_engine, new_owner, UserRole.GUEST)\n\n # await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n await gc_core.collect_garbage(app=client.app)\n\n # expected outcome: the new_owner will be deleted and one of the remainint_others wil be the new owner\n await assert_one_owner_for_project(aiopg_engine, project, remaining_users)\n await assert_user_not_in_db(aiopg_engine, new_owner)\n\n # only 1 user is left as the owner mark him as GUEST\n for user in remaining_users:\n # mark new owner as guest\n await change_user_role(aiopg_engine, user, UserRole.GUEST)\n\n # await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n await gc_core.collect_garbage(app=client.app)\n\n # expected outcome: the last user will be removed and the project will be removed\n await assert_projects_count(aiopg_engine, 0)\n await assert_users_count(aiopg_engine, 0)", "title": "" }, { "docid": "640ff197b59351cb546591bdc30cef35", "score": "0.64619774", "text": "async def test_t9_project_shared_with_other_users_transferred_between_them_and_then_removed(\n client: TestClient,\n aiopg_engine: aiopg.sa.engine.Engine,\n tests_data_dir: Path,\n osparc_product_name: str,\n):\n u1 = await login_user(client)\n u2 = await login_user(client)\n u3 = await login_user(client)\n\n q_u2 = await fetch_user_from_db(aiopg_engine, u2)\n assert q_u2\n q_u3 = await fetch_user_from_db(aiopg_engine, u3)\n assert q_u3\n\n # u1 creates project and shares it with g1\n project = await new_project(\n client,\n u1,\n osparc_product_name,\n tests_data_dir,\n access_rights={\n str(q_u2[\"primary_gid\"]): {\"read\": True, \"write\": True, \"delete\": False},\n str(q_u3[\"primary_gid\"]): {\"read\": True, \"write\": True, \"delete\": False},\n },\n )\n\n # mark u1 as guest\n await change_user_role(aiopg_engine, u1, UserRole.GUEST)\n\n await assert_users_count(aiopg_engine, 3)\n await assert_projects_count(aiopg_engine, 1)\n await assert_user_is_owner_of_project(aiopg_engine, u1, project)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n # expected outcome: u1 was deleted, one of the users in g1 is the new owner\n await assert_user_not_in_db(aiopg_engine, u1)\n await assert_one_owner_for_project(aiopg_engine, project, [u2, u3])\n\n # find new owner and mark hims as GUEST\n q_u2 = await fetch_user_from_db(aiopg_engine, u2)\n q_u3 = await fetch_user_from_db(aiopg_engine, u3)\n q_project = await fetch_project_from_db(aiopg_engine, project)\n\n new_owner = None\n remaining_others = []\n for user in [q_u2, q_u3]:\n assert user\n if user[\"id\"] == q_project[\"prj_owner\"]:\n new_owner = user\n else:\n remaining_others.append(user)\n\n assert new_owner is not None # expected to a new owner between the 2 other users\n # mark new owner as guest\n await change_user_role(aiopg_engine, new_owner, UserRole.GUEST)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n # expected outcome: the new_owner will be deleted and one of the remainint_others wil be the new owner\n await assert_user_not_in_db(aiopg_engine, new_owner)\n await assert_one_owner_for_project(aiopg_engine, project, remaining_others)\n await assert_users_count(aiopg_engine, 1)\n await assert_projects_count(aiopg_engine, 1)\n\n # only 1 user is left as the owner mark him as GUEST\n for user in remaining_others:\n # mark new owner as guest\n await change_user_role(aiopg_engine, user, UserRole.GUEST)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n # expected outcome: the last user will be removed and the project will be removed\n await assert_users_count(aiopg_engine, 0)\n await assert_projects_count(aiopg_engine, 0)", "title": "" }, { "docid": "670a2bebd4833011dc4a193cc51f5db9", "score": "0.6321107", "text": "def rm() -> None:\n logging.info(\"user rm\")", "title": "" }, { "docid": "f282bd6d2b9a538998a641a475ec1a71", "score": "0.612781", "text": "async def test_t6_project_shared_with_group_transferred_to_last_user_in_group_on_owner_removal(\n client: TestClient,\n aiopg_engine: aiopg.sa.engine.Engine,\n tests_data_dir: Path,\n osparc_product_name: str,\n):\n u1 = await login_user(client)\n u2 = await login_user(client)\n u3 = await login_user(client)\n\n # creating g1 and inviting u2 and u3\n g1 = await get_group(client, u1)\n await invite_user_to_group(client, owner=u1, invitee=u2, group=g1)\n await invite_user_to_group(client, owner=u1, invitee=u3, group=g1)\n\n # u1 creates project and shares it with g1\n project = await new_project(\n client,\n u1,\n osparc_product_name,\n tests_data_dir,\n access_rights={str(g1[\"gid\"]): {\"read\": True, \"write\": True, \"delete\": False}},\n )\n\n # mark u1 as guest\n await change_user_role(aiopg_engine, u1, UserRole.GUEST)\n\n await assert_users_count(aiopg_engine, 3)\n await assert_projects_count(aiopg_engine, 1)\n await assert_user_is_owner_of_project(aiopg_engine, u1, project)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n # expected outcome: u1 was deleted, one of the users in g1 is the new owner\n await assert_user_not_in_db(aiopg_engine, u1)\n await assert_one_owner_for_project(aiopg_engine, project, [u2, u3])\n\n # find new owner and mark hims as GUEST\n q_u2 = await fetch_user_from_db(aiopg_engine, u2)\n q_u3 = await fetch_user_from_db(aiopg_engine, u3)\n q_project = await fetch_project_from_db(aiopg_engine, project)\n\n new_owner = None\n remaining_others = []\n for user in [q_u2, q_u3]:\n assert user\n if user[\"id\"] == q_project[\"prj_owner\"]:\n new_owner = user\n else:\n remaining_others.append(user)\n\n assert new_owner is not None # expected to a new owner between the 2 other users\n # mark new owner as guest\n await change_user_role(aiopg_engine, new_owner, UserRole.GUEST)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n # expected outcome: the new_owner will be deleted and one of the remainint_others wil be the new owner\n await assert_user_not_in_db(aiopg_engine, new_owner)\n await assert_one_owner_for_project(aiopg_engine, project, remaining_others)", "title": "" }, { "docid": "516ca9f45a1d4fd66002c8ecf6d0df93", "score": "0.6093863", "text": "def removeUser(self, user: str):\n if self.projects.get(user) is not None:\n del self.projects[user]\n return True\n\n return False", "title": "" }, { "docid": "9d7234ba19b544a529b638ad317d33d4", "score": "0.60579276", "text": "async def test_t4_project_shared_with_group_transferred_to_user_in_group_on_owner_removal(\n client: TestClient,\n aiopg_engine: aiopg.sa.engine.Engine,\n tests_data_dir: Path,\n osparc_product_name: str,\n):\n u1 = await login_user(client)\n u2 = await login_user(client)\n u3 = await login_user(client)\n\n # creating g1 and inviting u2 and u3\n g1 = await get_group(client, u1)\n await invite_user_to_group(client, owner=u1, invitee=u2, group=g1)\n await invite_user_to_group(client, owner=u1, invitee=u3, group=g1)\n\n # u1 creates project and shares it with g1\n project = await new_project(\n client,\n u1,\n osparc_product_name,\n tests_data_dir,\n access_rights={str(g1[\"gid\"]): {\"read\": True, \"write\": True, \"delete\": False}},\n )\n\n # mark u1 as guest\n await change_user_role(aiopg_engine, u1, UserRole.GUEST)\n\n await assert_users_count(aiopg_engine, 3)\n await assert_projects_count(aiopg_engine, 1)\n await assert_user_is_owner_of_project(aiopg_engine, u1, project)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n # expected outcome: u1 was deleted, one of the users in g1 is the new owner\n await assert_user_not_in_db(aiopg_engine, u1)\n await assert_one_owner_for_project(aiopg_engine, project, [u2, u3])", "title": "" }, { "docid": "649e02284bf92784ec6122c614b7a6f8", "score": "0.6019801", "text": "def delete(self):\n if self.source == \"DB\":\n source = \"DB\"\n else:\n source = \"LDAP\"\n\n username = self.username\n\n orgs = []\n if is_pro_version():\n orgs = ccnet_api.get_orgs_by_user(username)\n\n # remove owned repos\n owned_repos = []\n if orgs:\n for org in orgs:\n owned_repos += seafile_api.get_org_owned_repo_list(org.org_id,\n username)\n else:\n owned_repos += seafile_api.get_owned_repo_list(username)\n\n for r in owned_repos:\n seafile_api.remove_repo(r.id)\n\n # remove shared in repos\n shared_in_repos = []\n if orgs:\n for org in orgs:\n org_id = org.org_id\n shared_in_repos = seafile_api.get_org_share_in_repo_list(org_id,\n username, -1, -1)\n\n for r in shared_in_repos:\n seafile_api.org_remove_share(org_id,\n r.repo_id, r.user, username)\n else:\n shared_in_repos = seafile_api.get_share_in_repo_list(username, -1, -1)\n for r in shared_in_repos:\n seafile_api.remove_share(r.repo_id, r.user, username)\n\n # clear web api and repo sync token\n # when delete user\n try:\n clear_token(self.username)\n except Exception as e:\n logger.error(e)\n\n # remove current user from joined groups\n ccnet_api.remove_group_user(username)\n\n ccnet_api.remove_emailuser(source, username)\n signals.user_deleted.send(sender=self.__class__, username=username)\n\n Profile.objects.delete_profile_by_user(username)\n if config.ENABLE_TERMS_AND_CONDITIONS:\n from termsandconditions.models import UserTermsAndConditions\n UserTermsAndConditions.objects.filter(username=username).delete()\n self.delete_user_options(username)", "title": "" }, { "docid": "7b0940f7fef5a63cca6ca759e6fa2e72", "score": "0.6008798", "text": "def clear_project_cache(self):\n self.projects = None", "title": "" }, { "docid": "6a8d81bc271b77b0a2ecc0f97ad3d308", "score": "0.59872776", "text": "def on_removeProject(self):\n self.log.detail(\">>> Launch 'remove Project' ...\")\n selItems = self.tw_myProjects.selectedItems() or []\n if selItems:\n #--- Check Project ---#\n if selItems[0].project not in self.pinedProjects:\n pQt.errorDialog(\"!!! Project %r not found, Skipp !!!\" % selItems[0].project, self)\n else:\n #--- Remove Poject ---#\n self._users._user.delPinedProject(selItems[0].project)\n self._users._user.writeFile()\n #--- Refresh ---#\n self.buildTree('myProjects')", "title": "" }, { "docid": "cd6475e24cb9b330201e5cbf7d67f5ec", "score": "0.5925115", "text": "def __del__(self):\n self.save_users()", "title": "" }, { "docid": "a8fc067ed0ca44a9c8e1fc1efbcc182b", "score": "0.5907536", "text": "def delete_user2project(db, uid, pid):\n meta, conn = db\n\n with conn.begin():\n tbl = meta.tables[__table_user2project]\n stmt = tbl.update().\\\n where(sql.and_(tbl.c.uid==uid, tbl.c.pid==pid)).\\\n values(deleted=True)\n conn.execute(stmt)\n conn.execute(\"commit\")", "title": "" }, { "docid": "f999855948827473ee277d509a437e1e", "score": "0.5898827", "text": "def user_projects():\n return _user_projects", "title": "" }, { "docid": "d7ebaba86a73d35d44dc7c7d330a7243", "score": "0.58854914", "text": "def remove_user_and_group(username=False, project_name=False, project_type='git'):\n _prod()\n _remove_user_and_group(username, project_name, project_type)", "title": "" }, { "docid": "b6469760c67a9396cf09fbce07f0021b", "score": "0.58730024", "text": "async def test_t8_project_shared_with_other_users_transferred_to_one_of_them_until_one_user_remains(\n client: TestClient,\n aiopg_engine: aiopg.sa.engine.Engine,\n tests_data_dir: Path,\n osparc_product_name: str,\n):\n u1 = await login_user(client)\n u2 = await login_user(client)\n u3 = await login_user(client)\n\n q_u2 = await fetch_user_from_db(aiopg_engine, u2)\n assert q_u2\n q_u3 = await fetch_user_from_db(aiopg_engine, u3)\n assert q_u3\n\n # u1 creates project and shares it with g1\n project = await new_project(\n client,\n u1,\n osparc_product_name,\n tests_data_dir,\n access_rights={\n str(q_u2[\"primary_gid\"]): {\"read\": True, \"write\": True, \"delete\": False},\n str(q_u3[\"primary_gid\"]): {\"read\": True, \"write\": True, \"delete\": False},\n },\n )\n\n # mark u1 as guest\n await change_user_role(aiopg_engine, u1, UserRole.GUEST)\n\n await assert_users_count(aiopg_engine, 3)\n await assert_projects_count(aiopg_engine, 1)\n await assert_user_is_owner_of_project(aiopg_engine, u1, project)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n # expected outcome: u1 was deleted, one of the users in g1 is the new owner\n await assert_user_not_in_db(aiopg_engine, u1)\n await assert_one_owner_for_project(aiopg_engine, project, [u2, u3])\n\n # find new owner and mark hims as GUEST\n q_u2 = await fetch_user_from_db(aiopg_engine, u2)\n q_u3 = await fetch_user_from_db(aiopg_engine, u3)\n q_project = await fetch_project_from_db(aiopg_engine, project)\n\n new_owner = None\n remaining_others = []\n for user in [q_u2, q_u3]:\n assert user\n if user[\"id\"] == q_project[\"prj_owner\"]:\n new_owner = user\n else:\n remaining_others.append(user)\n\n assert new_owner is not None # expected to a new owner between the 2 other users\n # mark new owner as guest\n await change_user_role(aiopg_engine, new_owner, UserRole.GUEST)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n # expected outcome: the new_owner will be deleted and one of the remainint_others wil be the new owner\n await assert_user_not_in_db(aiopg_engine, new_owner)\n await assert_one_owner_for_project(aiopg_engine, project, remaining_others)\n await assert_users_count(aiopg_engine, 1)\n await assert_projects_count(aiopg_engine, 1)", "title": "" }, { "docid": "fa0052be1e509556e40d2e5becc47cf9", "score": "0.5866332", "text": "def remove_user_from_proj_admin_group(session, user, project_name):\n group = session.user_groups.get(project_name+\"_mgr\")\n group.removemember(user)", "title": "" }, { "docid": "038419145abafcb0f02b98b57149b59f", "score": "0.5858815", "text": "def remove_old_users():\n critical_time = arrow.now().shift(days=-90)\n for user in Path(os.path.join(ROOT, 'users')).glob('*'):\n phage_id = str(user)[str(user).rfind('/') + 1:]\n user_time = arrow.get(user.stat().st_mtime)\n if phage_id != \"Phlash.db\" and user_time < critical_time:\n shutil.rmtree(user)\n db.session.query(Files).filter_by(phage_id=phage_id).delete()\n db.session.query(Annotations).filter_by(phage_id=phage_id).delete()\n db.session.query(Blast_Results).filter_by(phage_id=phage_id).delete()\n db.session.query(Gene_Calls).filter_by(phage_id=phage_id).delete()\n db.session.query(Tasks).filter_by(phage_id=phage_id).delete()\n db.session.query(Users).filter_by(id=phage_id).delete()\n db.session.commit()", "title": "" }, { "docid": "6f94d5cd500f9023fc8778826bdbb88e", "score": "0.5839619", "text": "def clean(self):\n if self.presence_ping_task:\n self.presence_ping_task.stop()\n\n project_id = self.project_id\n\n if project_id:\n self.application.remove_connection(\n project_id, self.user, self.uid\n )\n\n if project_id and self.channels is not None:\n channels = self.channels.copy()\n for channel_name, channel_info in six.iteritems(channels):\n yield self.application.engine.remove_presence(\n project_id, channel_name, self.uid\n )\n self.application.engine.remove_subscription(\n project_id, channel_name, self\n )\n project, error = yield self.application.get_project(project_id)\n if not error and project:\n namespace, error = yield self.application.get_namespace(\n project, channel_name\n )\n if namespace and namespace.get(\"join_leave\", False):\n self.send_leave_message(channel_name)\n\n self.channels = None\n self.channel_info = None\n self.default_info = None\n self.project_id = None\n self.is_authenticated = False\n self.sock = None\n self.user = None\n self.timestamp = None\n self.expire_timeout = None\n self.uid = None\n raise Return((True, None))", "title": "" }, { "docid": "3bcbfac6e71c14f99b621463caf20b0a", "score": "0.5744355", "text": "def remove_user_from_proj_group(session, user, project_name):\n group = session.user_groups.get(project_name)\n group.removemember(user)", "title": "" }, { "docid": "5086730850c51cd65dcd7094994cd8ea", "score": "0.57376295", "text": "def deluser():\n for i in users:\n sudo(\"userdel -r %s\" % i[2])", "title": "" }, { "docid": "0c009ceb245b5c6bf6a8c7aecd276622", "score": "0.5729922", "text": "def drop(self):\n self.users.clear()", "title": "" }, { "docid": "e8c10e055eb7c25cad876bec66f6e6e0", "score": "0.56879765", "text": "def __remove_from_all(self, uid):\n if not self.__user_exists(uid):\n raise exception.LDAPUserNotFound(user_id=uid)\n role_dns = self.__find_group_dns_with_member(\n FLAGS.role_project_subtree, uid)\n for role_dn in role_dns:\n self.__safe_remove_from_group(uid, role_dn)\n project_dns = self.__find_group_dns_with_member(\n FLAGS.ldap_project_subtree, uid)\n for project_dn in project_dns:\n self.__safe_remove_from_group(uid, project_dn)", "title": "" }, { "docid": "e7781f3c6b250b1903d5b6b08964eba9", "score": "0.5669826", "text": "async def test_t5_project_shared_with_other_users_transferred_to_one_of_them(\n client: TestClient,\n aiopg_engine: aiopg.sa.engine.Engine,\n tests_data_dir: Path,\n osparc_product_name: str,\n):\n u1 = await login_user(client)\n u2 = await login_user(client)\n u3 = await login_user(client)\n\n q_u2 = await fetch_user_from_db(aiopg_engine, u2)\n assert q_u2\n q_u3 = await fetch_user_from_db(aiopg_engine, u3)\n assert q_u3\n\n # u1 creates project and shares it with g1\n project = await new_project(\n client,\n u1,\n osparc_product_name,\n tests_data_dir,\n access_rights={\n str(q_u2[\"primary_gid\"]): {\"read\": True, \"write\": True, \"delete\": False},\n str(q_u3[\"primary_gid\"]): {\"read\": True, \"write\": True, \"delete\": False},\n },\n )\n\n # mark u1 as guest\n await change_user_role(aiopg_engine, u1, UserRole.GUEST)\n\n await assert_users_count(aiopg_engine, 3)\n await assert_projects_count(aiopg_engine, 1)\n await assert_user_is_owner_of_project(aiopg_engine, u1, project)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n # expected outcome: u1 was deleted, one of the users in g1 is the new owner\n await assert_user_not_in_db(aiopg_engine, u1)\n await assert_one_owner_for_project(aiopg_engine, project, [u2, u3])", "title": "" }, { "docid": "f9c8007e085aa27c9cb4fcde7da2d830", "score": "0.56366354", "text": "def tearDown(self):\n del self.user", "title": "" }, { "docid": "716e0c8bcd1dea2511223e9d6d6f3590", "score": "0.56135976", "text": "def close(name):\n del projects[name]", "title": "" }, { "docid": "4ebf707cadc7f4270dca8c9e0a1af9b3", "score": "0.5600507", "text": "def keyremove():\n for user_name, ct in Counter(x.user_name\n for x in MemberBucketStore.query).items():\n if ct > 1:\n rows = MemberBucketStore.query.filter_by(user_name=user_name).\\\n order_by(desc(MemberBucketStore.id)).all()\n rows.pop(0)\n for row in rows:\n row.delete()\n return render_template('log_auth/home.html')", "title": "" }, { "docid": "152e8ecb1a38ae431a7f5e1d62c50357", "score": "0.5586763", "text": "def delete_project(project_name):\n proj_keys = []\n cursor = rdb.table(\"projects\").filter({\"name\": project_name})\\\n .pluck(\"id\").run(g.rdb_conn)\n for entry in cursor:\n proj_keys.append(entry[\"id\"])\n\n if len(proj_keys) > 1:\n print((\n \"####### WARNING: DELETING MORE THAN ONE PROJECT WITH NAME %s. \"\n \"DELETING PROJECTS WITH KEYS %s ########\") % (\n project_name, \", \".join(proj_keys)))\n elif len(proj_keys) == 0:\n print((\n \"####### WARNING: flask_app.delete_project() - NO PROJECT \"\n \"WITH NAME %s.\") % project_name)\n return 0\n for proj_key in proj_keys:\n # Delete associated data (features, models, predictions)\n for table_name in (\"datasets\", \"features\", \"models\", \"predictions\"):\n n_deleted = delete_associated_project_data(table_name, proj_key)\n print(\"Deleted\", n_deleted, table_name,\n \"entries and associated data.\")\n # Delete relevant 'userauth' table entries\n rdb.table(\"userauth\").filter({\"projkey\": proj_key})\\\n .delete().run(g.rdb_conn)\n # Delete project entries\n msg = rdb.table(\"projects\").get_all(*proj_keys).delete().run(g.rdb_conn)\n print(\"Deleted\", msg['deleted'], \"projects.\")\n return msg['deleted']", "title": "" }, { "docid": "556652227ea3a52e7b22cdd29daf51b2", "score": "0.5582131", "text": "def test_resource_user_group_resource_remove_users_from_user_group_delete(self):\n pass", "title": "" }, { "docid": "4fe34d14b8add2c8e1ad54168319fef8", "score": "0.55797815", "text": "def tearDown(self):\n User.users_list.clear()", "title": "" }, { "docid": "08ba0ce4e640d06038b1b8adfd828c0f", "score": "0.55717045", "text": "def test_delete_cloud_user(self):\n pass", "title": "" }, { "docid": "2f199fbf58ec45de36968a6994761dc9", "score": "0.5571273", "text": "def tearDown(self):\n self.user1.delete()\n self.user2.delete()\n self.group.delete()", "title": "" }, { "docid": "686fdc669cb347c5cd095825769eae7b", "score": "0.5558585", "text": "def delete_user_from_project(request, pk, user_id):\n project = Project.objects.get(pk=pk)\n user = User.objects.get(pk=user_id)\n if(user not in project.users.all()):\n messages.error(request, \"L'utilisateur n'est pas dans le projet\")\n else:\n project.users.remove(user)\n tbot = telepot.Bot(TELEGRAM_TOKEN)\n for bot in project.bot_set.all():\n if(bot.verified):\n msg = \"Modification du projet \" + project.title + \"\\nL'utilisateur \" + user.username + \" a été retiré au projet.\\n\" + request.META['HTTP_HOST'] + \"/projects/\" + str(project.pk)\n tbot.sendMessage(bot.chatId, msg)\n messages.success(request, \"L'utilisateur a bien été retiré du projet\")\n return redirect(reverse('projects:project', kwargs={'pk': pk}))", "title": "" }, { "docid": "141c2008406c8d53fe33b9859fe08bad", "score": "0.555847", "text": "async def clean_database(self, ctx):\n conn, c = await utilities.load_db()\n with conn:\n c.execute(\"SELECT uid, user FROM guild_members WHERE gid = (:gid)\", {'gid': ctx.guild.id})\n db_member_ids = c.fetchall()\n current_guild_ids = [member.id for member in ctx.guild.members]\n for user in db_member_ids:\n uid, name = user\n if uid not in current_guild_ids:\n try:\n c.execute(\"DELETE FROM guild_members WHERE gid = (:gid) and uid = (:uid)\",\n {'gid': ctx.guild.id, 'uid': user[0]})\n print(f'{name} ({uid}) cleaned from {ctx.guild.name}.')\n except Exception as e:\n print(f'An error occurred when attempting to clean the database: {e}')", "title": "" }, { "docid": "b04e04d4c5df2f29c3411e7d4c753f88", "score": "0.554831", "text": "def _clean_network_resource(self, userid):\n self._delete_mac(userid)\n self._delete_switch(userid)\n self._delete_host(userid)", "title": "" }, { "docid": "b9975b59c965fb7a51b13641f186a911", "score": "0.55407953", "text": "def clear(self):\n self._projects.clear()\n self._write()\n if self._verbose:\n _print('pppp: Cleared')\n _print()\n self.list()", "title": "" }, { "docid": "b4257784276de798206be2ae6a86f7df", "score": "0.5532444", "text": "def invalidate_project(user, project_id):\n project_obj = ProjectManagementCache.get_project(user, project_id)\n\n if project_obj:\n project_obj.purge()\n\n return project_obj", "title": "" }, { "docid": "f2d2c9604db425d36d8e674a0905ac81", "score": "0.55287576", "text": "def main(github_token, github_org):\n # Instantiate all_repos value for return\n all_repos = []\n # Instantiate GitHub access\n gh_acct = Github(github_token)\n gh_user = gh_acct.get_user()\n for repo in gh_user.get_repos():\n if github_org in repo.full_name:\n all_repos.append(repo.name)\n print(\"Unwatching %s\" % repo.full_name)\n gh_user.remove_from_watched(repo)", "title": "" }, { "docid": "836c6291dc18b9acf392d3be38b256e0", "score": "0.5518052", "text": "def delete(self, user_id):\n args = projectsParser.parse_args()\n project_name = args['project_name']\n\n if project_name is None:\n abort(400)\n\n user = User.objects.filter_by(id=user_id)[0]\n\n try:\n project_to_remove = user.req_projs.filter_by(name=project_name)[-1]\n project_to_remove.delete()\n except Exception as e:\n {'status': 'error', 'message':\n 'Project name is wrong.'}\n\n return {'status': 'success', 'message':\n 'The project has been delete from your project list'}", "title": "" }, { "docid": "afb3b42cb38e916ca096f83ce6e08db9", "score": "0.5496405", "text": "def user(auth_app):\n assert crud.count_projects() == 0\n user = crud.create_user(DB, 1)\n assert crud.count_users() == 1\n yield user\n crud.delete_everything(DB)", "title": "" }, { "docid": "f8e6036f20a39bdd9cc169cdd5327e93", "score": "0.54899454", "text": "def tearDown(self):\n\n self.user1.delete()", "title": "" }, { "docid": "05a390a8f39f20be6e373f074b1093fd", "score": "0.5481164", "text": "def delete_repo():\n if 'personal_cloud' in os.listdir(home_dir):\n rmtree(f'{home_dir}/personal_cloud') # delete repo for a fresh start", "title": "" }, { "docid": "5cb751874f283cc591d7008ef965777a", "score": "0.5478742", "text": "def removeProject(self, uid):\n if uid in self.__projects:\n project = self.__projects[uid]\n del self.__projects[uid]\n self.projectRemoved.emit(project)\n self.setDirty(True)", "title": "" }, { "docid": "42adaf2d301a6f4101817cf3ec8018a2", "score": "0.5475209", "text": "def _revoke_from_db(self, sess, to_delete):\n for (username, project_auth_id) in to_delete:\n q = (\n sess.query(AccessPrivilege)\n .filter(AccessPrivilege.project.has(auth_id=project_auth_id))\n .join(AccessPrivilege.user)\n .filter(func.lower(User.username) == username)\n .all()\n )\n for access in q:\n self.logger.info(\n \"revoke {} access to {} in db\".format(username, project_auth_id)\n )\n sess.delete(access)", "title": "" }, { "docid": "84c18f2ef8af73fa487447989de98f4e", "score": "0.54651237", "text": "def cleanup(self):\n user_home_dir = f\"/home/{self.alias}\"\n logging.info(\"Removing home directory for user %s (%s)\", self.alias, user_home_dir)\n self._default_user_remote_command_executor.run_remote_command(f\"sudo rm -rf {user_home_dir}\")", "title": "" }, { "docid": "2a5e3ade4ad57918d0ef0e94361fff50", "score": "0.5450136", "text": "def _purge_group_users(self, name, force):\n users = yield self._get_group_users(name)\n\n if not force and users:\n self.log.warning(\n (\n \"Will not be able to delete this group \"\n \"without first removing all of its members. \"\n \"Use the `force` option to purge all members.\"\n )\n )\n self.log.warning(\"Group members: %s\" % \", \".join(users))\n\n if not force:\n raise gen.Return()\n\n tasks = []\n for user in users:\n tasks.append(self._remove_user_from_group(user, name))\n yield tasks", "title": "" }, { "docid": "c0d7db58c41f6814d66b66c7f61cde16", "score": "0.54349554", "text": "def _git_gc(self, host):\n for repo in self.env['runbot.repo'].search([]):\n try:\n repo._git(['gc', '--prune=all', '--quiet'])\n except CalledProcessError as e:\n message = f'git gc failed for {repo.name} on {host.name} with exit status {e.returncode} and message \"{e.output[:60]} ...\"'\n self.warning(message)", "title": "" }, { "docid": "8bb7ea567ddfaa9cf6a96950c92bb9c9", "score": "0.5428331", "text": "def remove_project(self, name):\n self.redis.srem(self.key, name)", "title": "" }, { "docid": "fb17d7514da101cf05437003ae312aae", "score": "0.5422131", "text": "def test_remove_user_from_project(self):\n query_string = [('account_id_session', 789),\n ('token', 'token_example')]\n response = self.client.open('/projects/{project_id}/users/{account_id}'.format(project_id=789, account_id=789),\n method='DELETE',\n content_type='application/json',\n query_string=query_string)\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "title": "" }, { "docid": "33dd07ab14fd1ee044fcacfa0fa4c3da", "score": "0.54180187", "text": "def cog_unload(self):\n print('unloading GroupManager cog')\n self.groups_db.close()", "title": "" }, { "docid": "aff87e731be8feb9dc59de4d6349d0dd", "score": "0.5417419", "text": "def prune():\n cloud.prune(force=True)\n return success('Pruning...')", "title": "" }, { "docid": "932619e7e4707d9c0bac51f57aba6a57", "score": "0.54160947", "text": "def setUp():\n DataRequestProjectMember.objects.all().delete()", "title": "" }, { "docid": "67ef11cb644aa9f6d4cdb6c2f36ddc85", "score": "0.54150933", "text": "def remove_from_db(self):\r\n\r\n for group in list(self.pools):\r\n self.pools[group].remove_from_db()\r\n self.pools = {}", "title": "" }, { "docid": "77f997dbee030bfd1985f5441a8d7b9b", "score": "0.54063094", "text": "def clear_all():\n global _profiles\n _profiles.clear()", "title": "" }, { "docid": "df49197e6d2e9e5fbeb75a84ca77a9c9", "score": "0.54021597", "text": "def tearDown(self):\n self.user.delete()", "title": "" }, { "docid": "b9a1bfab5a186e58d4a741de1e2fb45e", "score": "0.53967893", "text": "def rebuild():\n\tUsers.require_not_root()\n\tclean()\n\t_build()", "title": "" }, { "docid": "23c565cc169a6839f5a04932cfb854f0", "score": "0.5386809", "text": "def cleanup():\r\n\r\n with settings(hide('warnings', 'stderr', 'stdout'), warn_only = True):\r\n # Flush the database\r\n #run('cd %(path)s/releases/current/%(project_name)s; ../../../bin/python manage.py flush --noinput' % {'path': env.path, 'project_name': env.project_name})\r\n\r\n # Delete the Apache and lighttpd vhost config files\r\n sudo('rm /usr/local/etc/apache22/sites-available/%(project_domain)s.conf' % {'project_domain': env.project_domain})\r\n sudo('rm /usr/local/etc/apache22/sites-enabled/%(project_domain)s.conf' % {'project_domain': env.project_domain})\r\n sudo('rm /usr/local/etc/lighttpd/%(project_domain)s.conf' % {'project_domain': env.project_domain})\r\n\r\n # Remove the include statement from the lighttpd config file for our vhost\r\n sudo('sed \\'/\\/usr\\/local\\/etc\\/lighttpd\\/%(project_domain)s.conf/d\\' /usr/local/etc/lighttpd.conf > /usr/local/etc/lighttpd.conf.1; mv /usr/local/etc/lighttpd.conf.1 /usr/local/etc/lighttpd.conf' % {'project_domain': env.project_domain})\r\n\r\n # Uninstall installed dependencies\r\n run('cd %(path)s; pip uninstall -E . -r ./releases/current/requirements.txt -y' % {'path': env.path})\r\n\r\n # Remove directory packages, releases and other (if exists)\r\n sudo('rm -rf %(path)s/packages/' % {'path': env.path})\r\n sudo('rm -rf %(path)s/releases/' % {'path': env.path})\r\n\r\n _reload_apache()\r\n _reload_lighttpd()", "title": "" }, { "docid": "96090cc9275e6ca86f182b39aec9cd57", "score": "0.53862375", "text": "def unmanage_all(self):\n while self.clients:\n self.unmanage(iter(self.clients).next()) # TODO: expensive?", "title": "" }, { "docid": "d4439036708d2540fb8870394bd82167", "score": "0.53820544", "text": "def before_delete(self, user): # pylint: disable=arguments-differ\n user.remove_groups()\n audit('delete user', user.username)", "title": "" }, { "docid": "8a784bd3e023b3f592273dbff40523ba", "score": "0.5373529", "text": "def clear_recent_projects(self):\n self.recent_projects = []\n self.set_conf('recent_projects', self.recent_projects)\n self._setup_menu_actions()", "title": "" }, { "docid": "3cf8fe6005d4d4deec83bcce31405c8c", "score": "0.53534544", "text": "def unshare(self, user):\n with qdb.sql_connection.TRN:\n sql = \"\"\"DELETE FROM qiita.analysis_users\n WHERE analysis_id = %s AND email = %s\"\"\"\n qdb.sql_connection.TRN.add(sql, [self._id, user.id])\n qdb.sql_connection.TRN.execute()", "title": "" }, { "docid": "b63b8813ea588fdf37c3a59a1f608a62", "score": "0.5350022", "text": "def test_delete_scope_project_user_params_user_project(self):\n self.assertEqual(AppSetting.objects.count(), 16)\n app_settings.delete(\n EXAMPLE_APP_NAME,\n 'project_user_str_setting',\n project=self.project,\n user=self.user,\n )\n self.assertEqual(AppSetting.objects.count(), 15)", "title": "" }, { "docid": "ead0e071be26717d13ed5b9e21ae22ce", "score": "0.5335907", "text": "def clean(self):\n if self.host and self.path:\n self.host.rmfiles(self.path)", "title": "" }, { "docid": "9b36dfd60a86d3048a95868f3f9067f3", "score": "0.53345865", "text": "def removeDBEntries(user, db):\n\tcon = db.cursor()\n\totherUser = getPartner(user, con)\n\t#If user has a partner, delete pairing and user\n\tif otherUser:\n\t\t#find their pairing-number\n\t\tpairNum = getPair(user, con)\n\t\t#delete user\n\t\tdeleteUser(user, db)\n\t\tif type(pairNum) is tuple:\n\t\t\tpairNum = pairNum[0]\n\t\t#delete pairing\n\t\tcon.execute(\"DELETE FROM pairs WHERE id=?\", (pairNum,))\n\t\tdb.commit()\n#\t\tDEV\n\t\tlogging.info(printDB(\"users\", con))\n\telse:\n\t\tlogging.error(\"WTFFF\")", "title": "" }, { "docid": "baaf554bd1c9015107a8230ddc1e37b6", "score": "0.53300714", "text": "def rm():", "title": "" }, { "docid": "6f7124bbf57cc32307c01dd8b2b73859", "score": "0.5327369", "text": "def unpaid_delete():\n\n # Set options for override & test mode.\n UDB.setopt(OPT)\n ACC.setopt(OPT)\n\n for username in UDB.list_unpaid_grace():\n usr = RBUser(uid=username)\n UDB.get_user_byname(usr)\n print('User deleted:', username)\n UDB.delete(usr)\n print('Account deleted:', username)\n ACC.delete(usr)", "title": "" }, { "docid": "1ec1cf1069d7f0b888779c18b4aa85a3", "score": "0.532031", "text": "async def test_t10_owner_and_all_shared_users_marked_as_guests(\n client: TestClient,\n aiopg_engine: aiopg.sa.engine.Engine,\n tests_data_dir: Path,\n osparc_product_name: str,\n):\n u1 = await login_user(client)\n u2 = await login_user(client)\n u3 = await login_user(client)\n\n q_u2 = await fetch_user_from_db(aiopg_engine, u2)\n q_u3 = await fetch_user_from_db(aiopg_engine, u3)\n assert q_u2\n assert q_u3\n\n # u1 creates project and shares it with g1\n project = await new_project(\n client,\n u1,\n osparc_product_name,\n tests_data_dir,\n access_rights={\n str(q_u2[\"primary_gid\"]): {\"read\": True, \"write\": True, \"delete\": False},\n str(q_u3[\"primary_gid\"]): {\"read\": True, \"write\": True, \"delete\": False},\n },\n )\n\n # mark all users as guest\n await change_user_role(aiopg_engine, u1, UserRole.GUEST)\n await change_user_role(aiopg_engine, u2, UserRole.GUEST)\n await change_user_role(aiopg_engine, u3, UserRole.GUEST)\n\n await assert_users_count(aiopg_engine, 3)\n await assert_projects_count(aiopg_engine, 1)\n await assert_user_is_owner_of_project(aiopg_engine, u1, project)\n\n await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)\n\n await assert_users_count(aiopg_engine, 0)\n await assert_projects_count(aiopg_engine, 0)", "title": "" }, { "docid": "b261566678682122070f49ecf4e5c2e4", "score": "0.5317826", "text": "def test_delete_scope_project_user_params_project(self):\n self.assertEqual(AppSetting.objects.count(), 16)\n app_settings.delete(\n EXAMPLE_APP_NAME,\n 'project_user_str_setting',\n project=self.project,\n )\n self.assertEqual(AppSetting.objects.count(), 15)", "title": "" }, { "docid": "1e7128384e1281ec570257e203a8f5c9", "score": "0.53177387", "text": "def __del__(self):\n del self._scheduler\n del self._user\n del self._host\n del self._run_mode", "title": "" }, { "docid": "05f3bbdbb610978b34565e4ced82da7f", "score": "0.5313089", "text": "def delete_project(self):\n\n # get a list of all projects\n\n response = Project.list(api_client=self.TEST_USER1_CLIENT)\n\n # we will delete all projects owned by self.TEST_USER1_CLIENT\n\n for mo in response:\n mo.delete()\n\n response = Project.list(api_client=self.TEST_USER1_CLIENT)\n\n self.assertEqual(len(response), 0)", "title": "" }, { "docid": "2ca88fcff4c3d5f00b94bb949caffc92", "score": "0.5304993", "text": "def do_scrub(cs, args):\r\n networks_list = cs.networks.list()\r\n networks_list = [network for network in networks_list\r\n if getattr(network, 'project_id', '') == args.project_id]\r\n search_opts = {'all_tenants': 1}\r\n groups = cs.security_groups.list(search_opts)\r\n groups = [group for group in groups\r\n if group.tenant_id == args.project_id]\r\n for network in networks_list:\r\n cs.networks.disassociate(network)\r\n for group in groups:\r\n cs.security_groups.delete(group)", "title": "" }, { "docid": "2ca88fcff4c3d5f00b94bb949caffc92", "score": "0.5304993", "text": "def do_scrub(cs, args):\r\n networks_list = cs.networks.list()\r\n networks_list = [network for network in networks_list\r\n if getattr(network, 'project_id', '') == args.project_id]\r\n search_opts = {'all_tenants': 1}\r\n groups = cs.security_groups.list(search_opts)\r\n groups = [group for group in groups\r\n if group.tenant_id == args.project_id]\r\n for network in networks_list:\r\n cs.networks.disassociate(network)\r\n for group in groups:\r\n cs.security_groups.delete(group)", "title": "" }, { "docid": "7626a870518e359978481e13f6611963", "score": "0.53013206", "text": "def addUsersToProject(self, proj_name, project, new_project_flag=False):\n\n self.logger.info(\"checking if there are users to be added to the group \"\n + proj_name)\n user_list = project['members']\n if 'PI' in project.keys():\n user_list.append(project['PI'])\n # eliminate duplicates when a PI is also a member of the project\n user_list = set(user_list)\n for user in [x for x in user_list\n if (new_project_flag or \n not(x in self.irods_projects[proj_name]['members']))]:\n self.logger.info(user)\n if not(self.dryrun):\n if not(user in self.irods_users.keys()):\n response = self.irodsu.createIrodsUsers(user)\n if response[0] != 0:\n if self.conf.notification_active:\n message = \"creation of the irods user \" + user \\\n + \" failed\"\n mailsnd = MailSender()\n mailsnd.send(message, self.conf.notification_sender,\n self.conf.notification_receiver)\n self.logger.error(\"failed to create the irods user %s\", user)\n else:\n self.logger.debug(\"created irods user %s\", user)\n if self.conf.quota_active:\n # quota from userDB is set in GB, while iRODS uses bytes\n quota = self.toBytes(\n int(project[self.conf.quota_attribute]),\n self.conf.quota_unity)\n else:\n if self.conf.quota_active:\n quota_limit = self.irodsu.listIrodsUserQuota(user)\n # quota from userDB is set in GB, while iRODS uses bytes\n quota = quota_limit + self.toBytes(\n int(project[self.conf.quota_attribute] \n ), self.conf.quota_unity)\n self.irodsu.setIrodsUserQuota(user,str(quota))\n self.logger.debug(\"defined quota limit to %s GB for the user %s\",\n str(quota), user) \n self.irodsu.addIrodsUserToGroup(user, proj_name)\n self.logger.debug(\"added irods user %s to the group %s\", \n user, proj_name)\n else:\n print \"added user %s to the group %s\" % (user, proj_name)\n if self.conf.quota_active:\n quotaGB = project[self.conf.quota_attribute]\n print \"and set the related user quota limit to %s GB\" \\\n % (quotaGB,)", "title": "" }, { "docid": "9b61e8343d1ce0a5389be734a2fb0b79", "score": "0.5298083", "text": "async def test_t2_cleanup_resources_after_browser_is_closed(\n disable_garbage_collector_task: None,\n client: TestClient,\n socketio_client_factory: Callable,\n aiopg_engine: aiopg.sa.engine.Engine,\n tests_data_dir: Path,\n osparc_product_name: str,\n):\n assert client.app\n logged_guest_user = await login_guest_user(client)\n empty_guest_user_project = await new_project(\n client, logged_guest_user, osparc_product_name, tests_data_dir\n )\n await assert_users_count(aiopg_engine, 1)\n await assert_projects_count(aiopg_engine, 1)\n\n sio_connection_data = await connect_to_socketio(\n client, logged_guest_user, socketio_client_factory\n )\n await asyncio.sleep(SERVICE_DELETION_DELAY + 1)\n await gc_core.collect_garbage(app=client.app)\n\n # check user and project are still in the DB\n await assert_user_in_db(aiopg_engine, logged_guest_user)\n await assert_project_in_db(aiopg_engine, empty_guest_user_project)\n\n await disconnect_user_from_socketio(client, sio_connection_data)\n await asyncio.sleep(SERVICE_DELETION_DELAY + 1)\n await gc_core.collect_garbage(app=client.app)\n\n # ensures all project delete tasks are\n delete_tasks = get_scheduled_tasks(\n project_uuid=UUID(empty_guest_user_project[\"uuid\"]),\n user_id=logged_guest_user[\"id\"],\n )\n assert not delete_tasks or all(t.done() for t in delete_tasks)\n\n # check user and project are no longer in the DB\n async with aiopg_engine.acquire() as conn:\n user_result = await conn.execute(users.select())\n user = await user_result.first()\n project_result = await conn.execute(projects.select())\n project = await project_result.first()\n\n assert project is None\n assert user is None", "title": "" }, { "docid": "a9c49769d7c6921f515941339e198053", "score": "0.52980137", "text": "def clean_keystone_non_local_user(user_id, cur):\n\n try:\n cur.execute(\"DELETE FROM nonlocal_user WHERE user_id='%s'\" % user_id)\n cur.execute(\"DELETE FROM federated_user WHERE user_id='%s'\" % user_id)\n cur.execute(\"DELETE FROM public.user WHERE id='%s'\" % user_id)\n except Exception as ex:\n print(\"Failed to clean the user id: %s\" % user_id)\n raise ex", "title": "" }, { "docid": "fef84b66f194d6fca1ed2a96a52c8686", "score": "0.5297855", "text": "def user_resource(request):\n self = request.node.cls\n log.info(\"Creating user\")\n assert self.cluster_obj.create_user(self.username, self.caps)\n yield\n del_cmd = f\"ceph auth del {self.username}\"\n log.info(\"User deleted\")\n self.cluster_obj.toolbox.exec_ceph_cmd(del_cmd)", "title": "" }, { "docid": "b2633f517fea6d06af3d522ebaccda50", "score": "0.5297625", "text": "def main_gba(request):\n\n User.objects.all().delete()\n Employees.objects.all().delete()\n Employee_Roles.objects.all().delete()\n\n return render(request, 'workish/views/auth/error_norefresh.html',\n {'error': 'GBA MODE ACTIVATED. Please contact gba!'})", "title": "" }, { "docid": "7443f32f5bdea93eabedb3908924c3d3", "score": "0.52934974", "text": "def shiva_the_destroyer():\n with settings(warn_only=True):\n sshagent_run('rm -Rf %(path)s' % env)\n sshagent_run('rm -Rf %(log_path)s' % env)\n sshagent_run('dropdb %(project_name)s' % env)\n sshagent_run('dropuser %(project_name)s' % env)\n sudo('rm %(apache_config_path)s' % env)\n restart()\n sshagent_run('s3cmd del --recursive s3://%(s3_bucket)s/%(project_name)s' % env)", "title": "" }, { "docid": "74757110c5eea10271864eecdde62afe", "score": "0.528274", "text": "def delete_user(self, user_ref):\n\t\tlogging.info(user_ref)\n\t\tif not user_ref:\n\t\t\tlogging.info('None as user_ref, will not delete him')\n return\n\t\tif not self.user_exists(user_ref): \n\t\t\tlogging.info('User %s does not exists, will not delete him', user_ref)\n\t\t\treturn\n\t\tif self.is_user_utilized(user_ref): \n\t\t\tlogging.info('User %s is utilized, will not delete him', user_ref)\n\t\t\treturn \n\t\tnetwork_ref = self.get_user_network_ref(user_ref)\n\t\tdelete_user_command = r\"echo -e 'OBJS\\ndelete %s' | %s --batch\" % (user_ref, cc)\n\n\t\t#print(delete_user_command)\n\t\tlogging.info(delete_user_command)\n\n\t\tsp_output = subprocess.check_output(delete_user_command, shell=True)\n\n\t\t#print(sp_output)\n\t\tlogging.info(sp_output)\n\n\t\tif not network_ref: return\n\n\t\tdelete_network_command = r\"echo -e 'OBJS\\ndelete %s' | %s --batch\" % (network_ref, cc)\n\n\t\t#print(delete_network_command)\n\t\tlogging.info(delete_network_command)\n\n\t\tsp_output = subprocess.check_output(delete_network_command, shell=True)\n\n\t\t#print(sp_output)\n\t\tlogging.info(sp_output)", "title": "" }, { "docid": "da1093ad48401d782b49c76faed14f34", "score": "0.52807075", "text": "def remove():\n g.session.remove()", "title": "" }, { "docid": "b7cdca641c64d773cbc4cf2a8eb5fb0e", "score": "0.5268378", "text": "def remove_from_project(self, uid, project_id):\n dn = self.__project_to_dn(project_id)\n return self.__remove_from_group(uid, dn)", "title": "" }, { "docid": "78d902762e5e15fff41ebe1ee392d630", "score": "0.52678174", "text": "def uninstall(self, project):\n app_config_id = {'app_config_id': c.app.config._id}\n TM.TicketAttachment.query.remove(app_config_id)\n TM.Ticket.query.remove(app_config_id)\n TM.Bin.query.remove(app_config_id)\n # model.Comment.query.remove(app_config_id)\n TM.Globals.query.remove(app_config_id)\n super(ForgeTrackerApp, self).uninstall(project)", "title": "" }, { "docid": "cded4c9f4255d5b2d2d82c10864e1ee6", "score": "0.52678156", "text": "def remove(object):", "title": "" }, { "docid": "34f36b870111ab2b6ca9e7a83b190790", "score": "0.52676517", "text": "async def deop_users(self, cmd, channel=None):\n for arg in cmd.args[1:]:\n arg = self.mention_to_user_id(arg)\n\n if not self.cdb.isop_user(cmd.author.id):\n await display_error(self.cdb, cmd.channel, \"You don't have the right to do that.\")\n self.cdb.log_warn(\"Deleting operator (%s) requested by NON-OP %s, FAILED\" % (arg, str(cmd.author)), cmd.msg)\n return\n\n if not self.cdb.isop_user(arg):\n await display_warning(self.cdb, cmd.channel, \"%s is already not an operator.\" % discord.utils.get(cmd.msg.guild.members, id=arg))\n self.cdb.log_info(\"Deleting operator (%s) requested by %s, failed cause he's not an operator\" % (arg, str(cmd.author)), cmd.msg)\n continue\n\n self.ops[\"global\"].remove(arg)\n with open(self.cdb.OPS_FILE_PATH, 'w', encoding=\"utf8\") as ops_file:\n json.dump(self.ops, ops_file)\n\n await display_success(self.cdb, cmd.channel, \"%s has been removed from operator list.\" % discord.utils.get(cmd.msg.guild.members, id=arg))\n self.cdb.log_info(\"Deleting operator (%s) requested by %s, OK\" % (arg, str(cmd.author)), cmd.msg)\n\n with open(self.cdb.OPS_FILE_PATH, 'w', encoding=\"utf8\") as ops_file:\n json.dump(self.ops, ops_file)", "title": "" }, { "docid": "cf126c9a21e8ee8593d5f137f9423f02", "score": "0.5266996", "text": "def cleanUp(self):\n _intf.gsCleanUp()", "title": "" }, { "docid": "eaff0e38f3aff681da754e8848683b28", "score": "0.52651846", "text": "def remove_user(phage_id, user):\n if user.creation_date == 'view':\n db.session.delete(user)\n db.session.commit()\n else:\n for user in Path(os.path.join(ROOT, 'users')).glob('*'):\n if phage_id == str(user)[str(user).rfind('/') + 1:]:\n shutil.rmtree(user)\n db.session.query(Files).filter_by(phage_id=phage_id).delete()\n db.session.query(Annotations).filter_by(phage_id=phage_id).delete()\n db.session.query(Blast_Results).filter_by(phage_id=phage_id).delete()\n db.session.query(Gene_Calls).filter_by(phage_id=phage_id).delete()\n db.session.query(Tasks).filter_by(phage_id=phage_id).delete()\n db.session.query(Users).filter_by(id=phage_id).delete()\n db.session.commit()\n break\n return(\"success\")", "title": "" }, { "docid": "790c40805bd5bc48b1b7236c89c4c856", "score": "0.5262748", "text": "def revoke_admin_status_to_user(session, project_name, user, federated_zones):\n group.remove_user_from_proj_admin_group(session, user, project_name)\n for fed_zone in federated_zones:\n username = user + \"#\" + fed_zone\n try:\n group.remove_user_from_proj_admin_group(session, username, project_name)\n except irods.exception.iRODSException as e:\n logger.error(\"Error removing user {0} from project admin group {1}:{2}\".format(username, project_name, e))", "title": "" }, { "docid": "14e1089a20798400c393c869cb406fca", "score": "0.525871", "text": "def test_dead_weakref_removed(self):\n del self.session\n pool.clean_pools()\n self.assertEqual(len(pool.Pools[self.pid].sessions), 0)", "title": "" }, { "docid": "b3e54b27bd2964dbd72a7371826c1237", "score": "0.5251958", "text": "def deleteProject(self, uid):\n if uid in self.__projects:\n project = self.__projects[uid]\n projectPath = os.path.dirname(project[\"file\"])\n shutil.rmtree(projectPath, True)\n \n self.removeProject(uid)", "title": "" }, { "docid": "e42327da9a107777648872075a5cfe95", "score": "0.524781", "text": "def cleanup():\n public_with_duplicate_known_curation = set(a.id for a in PublicAnno._olds)\n public_without_known_curation = set(pa.id for pa in pas if not pa.curation_ids)\n to_delete = public_with_duplicate_known_curation | public_without_known_curation", "title": "" }, { "docid": "2d734b816c410657cf8bbbf6ac8fbec5", "score": "0.52469826", "text": "def remove():\n if exists(env.venv_path):\n sudo(\"rm -rf %s\" % env.venv_path)\n for template in get_templates().values():\n remote_path = template[\"remote_path\"]\n if exists(remote_path):\n sudo(\"rm %s\" % remote_path)\n psql(\"DROP DATABASE %s;\" % env.proj_name)\n psql(\"DROP USER %s;\" % env.proj_name)", "title": "" }, { "docid": "8749156f1aa83c85fbcd71c9c5e47474", "score": "0.52463245", "text": "def teardown(cls):\n del cls.user", "title": "" }, { "docid": "53724b8dbe35b34cc0e5d0baf9930c1a", "score": "0.524059", "text": "def prune_temp_request():\n res = spatialite.execute(\"\"\"\n DELETE FROM temp_request(userid,reqsec) VALUES\n \"\"\")\n return res==[]", "title": "" }, { "docid": "2087106652d64b9233fbc6735cced61b", "score": "0.52403873", "text": "def removeproject(self):\n message = '<p>Progetto rimosso correttamente.</p>'\n # ricevo l'id del progetto\n project = request.values.get('project')\n # se non è già presente prevengo l'eccezione\n try:\n if project:\n self._model.remove_user_project(session['userid'], project)\n else:\n message = '<p>Nessun progetto selezionato.</p>'\n except AssertionError:\n message = '<p>Il progetto non è presente in lista.</p>'\n return self.load_preference_topic(message)", "title": "" }, { "docid": "314dc76fc16626321f776b44558ebc35", "score": "0.5240044", "text": "def delete_project_analysis(self, analysis):", "title": "" }, { "docid": "4c3519712469eb10acd051b48c9aede4", "score": "0.52333325", "text": "def doCleanup(project_name):\n cleanupDashboards(project_name)\n cleanupLogBasedMetrics(project_name)\n cleanupPolicies(project_name)\n cleanupNotificationChannels(project_name)\n cleanupSlos(project_name)\n cleanupServices(project_name)\n cleanupUptimeCheck(project_name)", "title": "" }, { "docid": "edeae839f655746c72e16296b3ae2264", "score": "0.52323663", "text": "def test_delete_local_connection_profile(self):\n pass", "title": "" }, { "docid": "f5b5c75cbb4de41805c4a84659cf2ff0", "score": "0.5226333", "text": "async def unload (self,ctx,module):\r\n module = \"plugins.\"+module\r\n if self.owner == None:\r\n info = await(self.bot.application_info())\r\n self.owner = info.owner\r\n if self.owner == ctx.author:\r\n try:\r\n self.bot.unload_extension(module)\r\n except Exception as e:\r\n await(ctx.send(\"Error unloading module {}:\".format(module)))\r\n await(ctx.send('{}: {}'.format(type(e).__name__, e)))\r\n else:\r\n await(ctx.send(\"Unloaded module {}\".format(module)))\r\n else:\r\n ctx.send(\"that's a grudgin', {}\".format(ctx.author.mention))\r\n f = open(\"urist-grudges.txt\", \"a\")\r\n f.write(\"GRUDGED [!load]: {ctx.author.name}#{ctx.author.discriminator}\")\r\n f.close()\r\n print(\"BOOK OF GRUDGES:\",ctx.author,\"unload\")", "title": "" }, { "docid": "53a8a9dad5b9c1879466f8bebd149312", "score": "0.52204496", "text": "def unset_data(self, user):\n pass", "title": "" } ]
aca8b0b191781ab46894f7e7536a5683
Function called by referee to inform internal players that they have won a single game of Fish. Since this is not an interaction specified by the remote proxy pattern, this function simply returns True without communicating with the client
[ { "docid": "c08c04bcb029308b7827d8977f1561e9", "score": "0.59076196", "text": "def inform_of_winners(self, winners: List[PlayerColor]) -> bool:\n self.winners = winners\n return True", "title": "" } ]
[ { "docid": "515c2ff2fb6c1d143e40310233fce082", "score": "0.7147915", "text": "def is_won(game):\n return game.is_over()", "title": "" }, { "docid": "4e510514175d15cc771d36a131b4a970", "score": "0.7048353", "text": "def aPlayerHasWon(game):\n game.playerAMoveCount = len(ai.getAllMovesForPlayer(game, True))\n game.playerBMoveCount = len(ai.getAllMovesForPlayer(game, False))\n\n if game.playerAWins():\n print(\"Player A wins!\")\n return True\n elif game.playerBWins():\n print(\"Player B wins!\")\n return True\n return False", "title": "" }, { "docid": "81fe045f8e15e3bef93e0f443703fcb4", "score": "0.6630153", "text": "def has_this_player_won(self):\n if self.playing:\n if self.players[self.turn].has_cards(): \n self.next_turn()\n self.Update()\n else:\n write_out.this_player_wins(self.w, self.turn)", "title": "" }, { "docid": "93e3c82bb6bc36c1e2424aaa6154ffe0", "score": "0.66089404", "text": "def hasWon(self, party):\n return not self.isAlive(party)", "title": "" }, { "docid": "145f8acf13169e860cf813fbfa40db4a", "score": "0.6454096", "text": "def has_won(self):\n if self._deck.get_amount() == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "bf07400149946d6da7b8da107836735f", "score": "0.63352764", "text": "def gameOver(self):\n if self.winsFor('X')==True or self.winsFor('O')==True:\n return True\n if self.isFull()==True: \n return True\n else:\n return False", "title": "" }, { "docid": "14b546bc170fcf37032e15982dc1b80e", "score": "0.6310154", "text": "def win_game(self):\n self._is_winner = True", "title": "" }, { "docid": "b797fa3cc14965984214ccea0056bd71", "score": "0.62801325", "text": "def won(self):\n\t\t\n\t\t# if game finished and no _ left return true else false\n\t\tif self.finished() and \"_\" not in self.guess_string:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "title": "" }, { "docid": "53d9538481b7525cd39ede50033a34a3", "score": "0.6275274", "text": "def will_hit_user(self, hand: Hand, the_game) -> bool:\n is_yes = the_game.ui.hit_prompt()\n\n return is_yes.value", "title": "" }, { "docid": "3c8c2cb7e164f4d0ee982acccc77680e", "score": "0.626599", "text": "def is_win(self):\n if self.enemy_remaining_pokemon <= 0:\n return True\n return False", "title": "" }, { "docid": "d22f39045823f238e976e401643d9615", "score": "0.62554336", "text": "def game_over():\n return won_game() or lost_game()", "title": "" }, { "docid": "7045be2b7cc7e02a764fbfb1d08c889a", "score": "0.62448615", "text": "def game_over(self):\n return False", "title": "" }, { "docid": "7045be2b7cc7e02a764fbfb1d08c889a", "score": "0.62448615", "text": "def game_over(self):\n return False", "title": "" }, { "docid": "3779c994edd107d89bc61361cad5b6c7", "score": "0.6219122", "text": "def is_won(self):\n return self.__won", "title": "" }, { "docid": "d7e115191e4fa19a24604e26d7de6758", "score": "0.62109685", "text": "def is_winning(self):\n# print(self.trial_reward)\n if self.reward:\n return 1\n else:\n return 0", "title": "" }, { "docid": "3f54ffa61fde8a56262b440d81f8a1b8", "score": "0.61452293", "text": "def is_game_won(self):\n if len(self._board)>1:\n for index in range(1, len(self._board)):\n if self._board[index]!=0:\n return False\n return True", "title": "" }, { "docid": "4440f6458c5aa5553afc6526e94bf830", "score": "0.6136395", "text": "def check_win_or_lose(self):\n\n win = True\n for ships in self.ships_cord:\n if len(ships) != 0:\n win = False\n break\n if win:\n return 1\n else:\n return 0", "title": "" }, { "docid": "3ec530bcec49a8cd8e531e30989644be", "score": "0.6129643", "text": "def is_game_over(self):\n return False", "title": "" }, { "docid": "9fa04f4167385eede3fb9de9ae80b98e", "score": "0.61284375", "text": "def IsWanted(self):\n if len(self.output.GUILock) == 0 or \"Popwin\" in self.output.GUILock:\n return True\n else:\n return False", "title": "" }, { "docid": "724180597c8af4a004a57c42e1f0c070", "score": "0.6122969", "text": "def game_is_over(self):\n return False", "title": "" }, { "docid": "2b38fb053c987fb02bd087ad2cc4507c", "score": "0.61152625", "text": "def check_victory(self):\n if self.__field.get_goal() == self.__player.get_position() and not self.__won:\n self.__won = True\n return True\n return False", "title": "" }, { "docid": "ef7f6d810b45a0dfe609ebfb059b3da5", "score": "0.6110156", "text": "def has_won(self):\n if not self._deck.get_cards():\n return True\n else:\n return False", "title": "" }, { "docid": "df00f9a4142216c2a00566ccbbb4ede3", "score": "0.61081314", "text": "def dealer_lost() -> bool:\n if DEALER_HAND.points == 0:\n print(\"The dealer busted. The game ended :)\\n\")\n return True\n return False", "title": "" }, { "docid": "fb969729626bc4c259e54070d6f769e5", "score": "0.60859793", "text": "def _game_won(self, msg='You Won!'):\n self._display_message(msg)\n self._exit()", "title": "" }, { "docid": "116d0205fcbdaf6898e327be32fa4295", "score": "0.60847974", "text": "def is_computer_player(self) -> bool:\n return self.turn > 0", "title": "" }, { "docid": "2b3c28c1d90aede9aae17411bb0c2f11", "score": "0.60758245", "text": "def lost_game():\n return Life == 0", "title": "" }, { "docid": "bf5d46952790a2a8d90116a63b7a8e58", "score": "0.6061222", "text": "def checkOver(self):\r\n return self.battle.over", "title": "" }, { "docid": "b98f1a6ee5103fb6a000ca4cba7ef668", "score": "0.6049312", "text": "def is_on(self) -> bool:\n return self.state != DishWasherState.OFF", "title": "" }, { "docid": "774e2129fc9b0931861f7269703d3a01", "score": "0.6040434", "text": "def IsOn(self) -> bool:", "title": "" }, { "docid": "774e2129fc9b0931861f7269703d3a01", "score": "0.6040434", "text": "def IsOn(self) -> bool:", "title": "" }, { "docid": "774e2129fc9b0931861f7269703d3a01", "score": "0.6040434", "text": "def IsOn(self) -> bool:", "title": "" }, { "docid": "2e1876180f57cd320b17eef809185ada", "score": "0.60295427", "text": "def is_player(self):\n return True", "title": "" }, { "docid": "4eadb34644e22a7cae70e6218222714b", "score": "0.6022418", "text": "def gameOver(board):\n if debug: print(\"Entering the gameOver function\")\n\n winner = getWinner(board)\n if winner == \"1\":\n print(\"Player 1 wins!\")\n return True\n if winner == \"2\":\n print(\"Player 2 wins!\")\n return True\n if boardFull(board):\n print(\"Tie.\")\n return True\n\n return False", "title": "" }, { "docid": "a4d6d836f327608531983d34f344a346", "score": "0.60215986", "text": "def is_game_over(self):\r\n # implement me!\r\n return 0", "title": "" }, { "docid": "a4cc99a52998ca5d546305b7510e0057", "score": "0.6009287", "text": "def player_won(self, state):\n self.display_state(state, final=True)\n print(\"You won\")", "title": "" }, { "docid": "fb0b7d39d6104dc9a5b51bc91f932f2f", "score": "0.59772134", "text": "def win(self):\n return self.guess_number == self.move", "title": "" }, { "docid": "fa05db9eb47af1941396d1b8c2fd6fc6", "score": "0.593447", "text": "def ping(self):\r\n return True", "title": "" }, { "docid": "4ee519db0edd6f4b75f4d8f171a5d1b1", "score": "0.5922395", "text": "def hasLost(self, party):\n return not self.isAlive(party)", "title": "" }, { "docid": "25cb47ae8c9a7b165bd99ffe1cf20bfc", "score": "0.59210837", "text": "def check_if_player_won(self, player:str) -> bool:\n\n opponent = self.get_opponent(player)\n\n if not self.is_in_check(opponent):\n return False\n\n for piece in self._pieces[opponent]:\n for move in piece.get_moves():\n # old_loc = piece.get_loc() # debug\n self._board.save_board()\n self._mechanic.move_piece(piece, move)\n if not self.is_in_check(opponent):\n # print(\"check win: found move \" + old_loc + \" to \" + move) # debug\n self._board.recover_board()\n return False\n self._board.recover_board()\n\n return True", "title": "" }, { "docid": "1b3bf66608ff3ff6206cd9abf325e709", "score": "0.59194106", "text": "def check_winner(self) -> None:\n if self.onitama.get_winner() is not None:\n # TODO: Add who won somewhere on the screen.\n self.game_running = False", "title": "" }, { "docid": "9b675daa04e0c94ac7422b72fa3b83ba", "score": "0.5911546", "text": "def channel_has_nick(fserver, fchannel, fnick):\n\t# noinspection PyProtectedMember\n\tfunction_name = sys._getframe().f_code.co_name # pylint: disable=W0212\n\n\twp.debug(function_name, \"START : %s\" % fnick, wdb.SHOW_START)\n\tthisbuffer = w.buffer_search(\"\", \"%s.%s\" % (fserver, fchannel))\n\t# check for nick in the buffer\n\tis_online = bool(w.nicklist_search_nick(thisbuffer, \"\", fnick))\n\tif (fnick == \"FlipMoran\" and not(is_online)):\n\t\tis_online = bool(w.nicklist_search_nick(thisbuffer, \"\",\"FlipMoran2\"))\n\treturn is_online", "title": "" }, { "docid": "c76277d46a9e252daf4ae4b89e9dc2ab", "score": "0.59067607", "text": "def PlayerOut():\n if player_hand.Blackjack():\n print(\"\\nBlackjack! You win!\")\n return True\n elif player_hand.Bust():\n print(\"\\nBust! You lose!\")\n return True\n else:\n return False", "title": "" }, { "docid": "4b7cbee325ab0a717c038ec33076d98a", "score": "0.58983076", "text": "def is_online(self) -> bool:", "title": "" }, { "docid": "04450d8bf853a739341339c60c676e89", "score": "0.5897408", "text": "def is_game_over(self) -> bool:\n return self.is_winner(self.player) or self.is_winner(self.player ^ 1) or not self.get_moves()", "title": "" }, { "docid": "a638e23437eeecf59e66470030e11bf4", "score": "0.58925027", "text": "def is_alive(self):\n\t\treturn self.hp > 0", "title": "" }, { "docid": "c5fdf52e89e924ad8e501972d7d4603d", "score": "0.58888745", "text": "def test_gameover(self):\n expected = False\n actual = self.g.is_game_over()\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "42ebe4e6aa0ecb44d489f3c65f184418", "score": "0.5886952", "text": "def won(self):\n return self.__won", "title": "" }, { "docid": "3d350dd98cda6a830d532bb2ff071b6b", "score": "0.5884288", "text": "def is_won(self):\n return self.x == self.len or self.o == self.len", "title": "" }, { "docid": "a83d889da3f63f82cdaf8bf874772565", "score": "0.5881439", "text": "def isalive(self):\n return self.hp > 0", "title": "" }, { "docid": "345926e00632856c3ced297d6c2c8e4d", "score": "0.5880204", "text": "def is_player(self):\n return False", "title": "" }, { "docid": "088c115a35b83e409ace3d34441b3546", "score": "0.5878289", "text": "def client_owns_game(game_id: str, session: Session, persistence: BasePersistence) -> bool:\n try:\n client_id = session[CLIENT_ID_KEY]\n except KeyError:\n return False\n return persistence.client_owns_game(game_id, client_id)", "title": "" }, { "docid": "f8bb0b9bb625e6ac21c84938b5dfd3ea", "score": "0.5865245", "text": "def want_send(self):\n return(len(self.opnd) > 0 and not self.obroke)", "title": "" }, { "docid": "f722ebb04aba0afa516884887e9a224d", "score": "0.5861992", "text": "def isGameOver(self):\n if self._game_result:\n return True\n return False", "title": "" }, { "docid": "7ec6b1d00e1d0c54c0f03d657496d7f5", "score": "0.5858753", "text": "def check_win(self, player):\n\n # Collects the number of player captures.\n captures = player.get_captures()\n\n # If the player captures 6 enemy pieces, the game is won. \"Win\" is returned.\n if captures > 5:\n return \"win\"\n if captures < 5:\n return \"successfully moved\"", "title": "" }, { "docid": "bc4daeb56f944de01c01425f1b713c6e", "score": "0.5857089", "text": "def gameIsOver(board):\r\n if countSequence(board, HUMAN_PLAYER, 4) >= 1:\r\n return True\r\n elif countSequence(board, AI_PLAYER, 4) >= 1:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "e475634e3ae3fc7bdaf19b291f09f32c", "score": "0.5855089", "text": "def game_is_over(state):\n\t\treturn len(Game.dead_players(state)) > 0", "title": "" }, { "docid": "52007da160d055fc7951412fc9eb79d8", "score": "0.5853726", "text": "def game_is_over():\n return WINNING_SETS in (player.sets_won, opponent.sets_won)", "title": "" }, { "docid": "b49fad598caa8b17e7f2efa9e073fca4", "score": "0.58486605", "text": "def get_game_on(self) -> bool:\n return self._game_on.get()", "title": "" }, { "docid": "2d3fd77413092da404c5342624f436e4", "score": "0.5847991", "text": "def get_result(self, player):\n\t\treturn len(self.player_hands[player]) == 0 or len(self.player_hands[((player + 1) % 4) + 1]) == 0", "title": "" }, { "docid": "1199a87a6cca2bc3d7f9d931ec76da8c", "score": "0.5835764", "text": "def isOnline(self,user_msg):\n\treturn False", "title": "" }, { "docid": "58cf6a04ecaa3e05f18bb62316f5f9de", "score": "0.5835522", "text": "def check_if_battle_won(self):\n if self.state in self.info_box.command_list:\n if len(self.enemy_group) == 0:\n self.enter_battle_won_state()", "title": "" }, { "docid": "7bf5fe4f7dc145d063b3b7628c16506b", "score": "0.5827778", "text": "def game_over(self):\n if self.lives <= 0:\n self.lives = 0\n self.running = False\n return True\n return False", "title": "" }, { "docid": "96373d2f3e9facf220dc9cf31e1835d3", "score": "0.5825502", "text": "def is_online(self) -> bool:\n return False", "title": "" }, { "docid": "2be199c86c62ac21f83f435d4e71bcad", "score": "0.58175224", "text": "def game_on(self) -> None:\n if self.msg.sender != self.owner:\n revert('Only the owner can call the game_on method')\n if not self._game_on.get() and self._treasury_score.get() is not None:\n self._game_on.set(True)", "title": "" }, { "docid": "a1376a9d8bce66c958ca6150c14acd95", "score": "0.5807879", "text": "def hand_win_or_lose(hand: Hand) -> bool:\n hand_points: int = hand.points\n if hand_points == 21:\n if hand.has_blackjack():\n print(\"BLACKJACK!\")\n else:\n print(\"YOU GOT 21 POINTS!\")\n result = True\n\n elif hand_points == 0:\n print(\"BUST.\\nI'm afraid you lose this game :(\\n\")\n result = True\n \n else:\n result = False\n\n return result", "title": "" }, { "docid": "54e5fd83d5fd2bb6ebe1ee7aafe934be", "score": "0.58050823", "text": "def is_tie(self, player, dealer):\n player_value = player.get_hand_value()\n dealer_value = dealer.get_hand_value()\n if player_value == dealer_value:\n return True\n else:\n return False", "title": "" }, { "docid": "8ca61ea68d0a9853f565fa5b2caa2924", "score": "0.57852083", "text": "def check_win(self):\n if not self.ENEMIES:\n if not HARDMODE_ON:\n self.PLAYER_ALIVE = False\n self.WIN_ON = True\n elif HARDMODE_ON and not self.FINAL_STAGE:\n self.spawn_boss()\n self.FINAL_STAGE = True\n elif HARDMODE_ON and self.BOSS_DEAD:\n self.PLAYER_ALIVE = False\n self.WIN_ON = True", "title": "" }, { "docid": "b2784aa8475feb037636dca393e3e684", "score": "0.5773249", "text": "def is_winner(self, player: str) -> bool:\n raise NotImplementedError('Subclass needed')", "title": "" }, { "docid": "61918f32e8dde5b0759e3e6d1bd3358c", "score": "0.5764028", "text": "def won(self):\n return self.mines_found == self.mines", "title": "" }, { "docid": "61918f32e8dde5b0759e3e6d1bd3358c", "score": "0.5764028", "text": "def won(self):\n return self.mines_found == self.mines", "title": "" }, { "docid": "88bb55793146f9fde1ff578d5c24a43c", "score": "0.5759524", "text": "def fly(self):\n if self.wing_count > 0:\n print(\"I'm flying!\")", "title": "" }, { "docid": "19209d62db7d8e4632111a4d8d1bf079", "score": "0.5749632", "text": "def game_over_check(self):\n # 50 Move Rule\n if self.turn_num - self.last_capture_turn >= 100:\n return True, \"The game is a draw due to the 50-move rule.\"\n # AI has specific pieces and human has just a king\n reason = \"You resign. Checkmate is trivial at this point.\"\n ai_pieces = []\n human_pieces = []\n for i in self.alive:\n if i.color != self.player_color:\n ai_pieces += [i.type]\n else:\n human_pieces += [i.type]\n if len(human_pieces) > 1:\n return False, \"\"\n else:\n if 'queen' in ai_pieces or 'rook' in ai_pieces:\n return True, reason\n elif ai_pieces.count('bishop') >= 2:\n return True, reason\n elif 'knight' in ai_pieces and 'bishop' in ai_pieces:\n # There are rare cases where this can't win, but I don't care.\n return True, reason\n else:\n return False, \"\"", "title": "" }, { "docid": "5222f0e7a1eb83b43577f0037a69a987", "score": "0.5749151", "text": "def __is_win(self):\n self.game_info.marked_fields.sort() # sort the player marked fields\n\n if self.game_info.marked_fields == self.board.bombs: # compare the marked fields with the solving set\n self.game_info.win = True\n self.exit_app()", "title": "" }, { "docid": "61bdbc45f818386e105d6d2cef413e1f", "score": "0.57458496", "text": "def is_remote_connected(self):\n assert self._as_parameter_\n return zzub_player_is_remote_connected(self)", "title": "" }, { "docid": "68b97984bbcb36c6bb82fcf20a7fa7bf", "score": "0.57361317", "text": "def is_game_over(self, state):\n # GAME -> under what conditions is a game over, W, L, draw \n return self.is_player_win(state, \"x\") or self.is_player_win(state, \"o\") or self.get_legal_actions(state) is None", "title": "" }, { "docid": "e26456c16b2a7ae1fb2edaf49a28560e", "score": "0.57306916", "text": "def will_hit_random(self, hand: Hand, the_game=None) -> bool:\n return choice([True, False])", "title": "" }, { "docid": "bfa0c5499c5b92abdf2055e739808233", "score": "0.57227474", "text": "def check_players(server, bots=True):\n return True", "title": "" }, { "docid": "512dfef8640dcc47cf65f2e7cd2424f3", "score": "0.57200056", "text": "def gameover(G, ctx):\n assert False", "title": "" }, { "docid": "125e0b9e47e83fc8f9c8067a84f3071c", "score": "0.5710053", "text": "def gameOver(self):\n if self.checkWin() != None:\n return True\n for i in self.board:\n if i == \" \":\n return False\n return True", "title": "" }, { "docid": "9c127dbf4199aa92b445a07c1c41b830", "score": "0.57070667", "text": "def sendCheckMessage(self):\n print(self.currentPlayer.getOpponent() + \" player is in check!\")\n return \"\"", "title": "" }, { "docid": "57b3a273337af803f1564e2b9e464099", "score": "0.5706531", "text": "def CheckWin(self, player_index):\n\n\t\tif len(self.players[player_index][\"Cheeses\"]) == len(self.Categories):\n\t\t\treturn True\n\n\t\treturn False", "title": "" }, { "docid": "01d6f672fc5a1e7c062e2958c77d91a5", "score": "0.5705461", "text": "def player_lost(board: Board):\n return not any(OWN_SHIP in set(x) for x in board)", "title": "" }, { "docid": "d05f62dac6fd84d570d95bc43fa8cd8f", "score": "0.57036686", "text": "def were_kicked(self):\n self.kicked = True", "title": "" }, { "docid": "b1000bbeefb0d699b685b544b8abd2ff", "score": "0.5701681", "text": "def is_registered(nick):", "title": "" }, { "docid": "893a040ec22870f87f1e171c8d4ccfd7", "score": "0.56991464", "text": "def is_winner(request, listing):\n if listing.bid_winner == request.user:\n return True\n return False", "title": "" }, { "docid": "195a228e2df68a99a4245cb1c042d2b8", "score": "0.5697224", "text": "def has_won(self, player_id):\n return self.is_alive(player_id) and (\n self.missions[player_id].evaluate(self.board) or\n self.board.n_territories(player_id) == 42\n )", "title": "" }, { "docid": "3879a9f52dafd9fa67000f4890cee05e", "score": "0.56948835", "text": "def check_win(self):\n if (self.num_revealed == (self.nrows * self.ncols) - self.n_mines) and not self.end_condition:\n # second condition in case player has two squares left and clicks on the mine instead of the blank\n self.end_condition = True\n self.win = True\n if self.end_condition and self.win:\n return True\n else:\n return False", "title": "" }, { "docid": "ac64dbbe11e0acd222a3db716191d368", "score": "0.56934196", "text": "def attempt_attack_player(self):\n\n return_value = False\n\n self.player.lock_position.acquire()\n\n if self.player.position[0] == self.position[0] and self.player.position[1] == self.position[1]:\n\n self.player.decrease_hitpoints()\n\n logger.info(f\"Enemy found player and hit him for 1 hitpoint.\")\n\n print(\"Something lashes out on you from the dungeon's shadows.\")\n print(\"You suffer minor damage.\")\n\n return_value = True\n\n self.player.lock_position.release()\n\n return return_value", "title": "" }, { "docid": "06817097e328f7e6d35621f44480baa5", "score": "0.5690695", "text": "def is_nick_online(this_nick):\n\t# noinspection PyProtectedMember\n\tfunction_name = sys._getframe().f_code.co_name # pylint: disable=W0212\n\n\twp.debug(function_name, \"START : %s\" % this_nick, wdb.SHOW_START)\n\tif this_nick in d.NICKLIST:\n\t\treturn d.NICKLIST[this_nick][d.ONLINE]\n\treturn False", "title": "" }, { "docid": "b723f637e141949b4f9b15182bb47bc5", "score": "0.5681861", "text": "async def check_winner(self, game: Optional[Game] = None) -> bool:\n after_game = datetime.now(tz=timezone.utc) >= (self.game_start + timedelta(hours=2))\n if self.winner:\n return True\n if game is not None:\n return await self.set_pickem_winner(game)\n if self.link and after_game:\n log.debug(\"Checking winner for %r\", self)\n game = await Game.from_url(self.link)\n return await self.set_pickem_winner(game)\n return False", "title": "" }, { "docid": "cad68cb781529e38db0a49a3b8b49ef6", "score": "0.5681024", "text": "def is_game_won(game_board):\n for row in game_board:\n for value in row:\n if value:\n print('Still much to do')\n return False\n print('You won the game.')\n return True", "title": "" }, { "docid": "af54b73607d587e39bc68b586d14c2b5", "score": "0.5678331", "text": "def should_activate_strategy(self):\n\n result = super(HonitsuStrategy, self).should_activate_strategy()\n if not result:\n return False\n\n tiles_34 = TilesConverter.to_34_array(self.player.tiles)\n suits = count_tiles_by_suits(tiles_34)\n\n honor = [x for x in suits if x['name'] == 'honor'][0]\n suits = [x for x in suits if x['name'] != 'honor']\n suits = sorted(suits, key=lambda x: x['count'], reverse=True)\n\n suit = suits[0]\n count_of_pairs = 0\n for x in range(0, 34):\n if tiles_34[x] >= 2:\n count_of_pairs += 1\n\n suits.remove(suit)\n count_of_ryanmens = self._find_ryanmen_waits(tiles_34, suits[0]['function'])\n count_of_ryanmens += self._find_ryanmen_waits(tiles_34, suits[1]['function'])\n\n # it is a bad idea go for honitsu with ryanmen in other suit\n if count_of_ryanmens > 0 and not self.player.is_open_hand:\n return False\n\n # we need to have prevalence of one suit and completed forms in the hand\n # for now let's check only pairs in the hand\n # TODO check ryanmen forms as well and honor tiles count\n if suit['count'] + honor['count'] >= HonitsuStrategy.REQUIRED_TILES:\n self.chosen_suit = suit['function']\n return count_of_pairs > 0\n else:\n return False", "title": "" }, { "docid": "588c286c1e56bbd0bf275f9b4d025796", "score": "0.56781685", "text": "def is_draw(self) -> bool:\n return not self.get_moves() and not self.is_winner(self.player) and not self.is_winner(self.player ^ 1)", "title": "" }, { "docid": "f94a516557897a290da00f4700e977a8", "score": "0.56730807", "text": "def update_player_board(shot: Shot, board: Board):\n x = shot.x\n y = shot.y\n field = board[y][x]\n\n if field == OWN_SHIP:\n board[y][x] = OWN_SHIP_HIT\n return True\n return False", "title": "" }, { "docid": "a9d5a45a377f1e9eff7722f4bfaf3c24", "score": "0.56652224", "text": "def play_ping():\n global _game_board\n print_board(_game_board)\n user_move = get_next_move()\n _game_board = switch_board_value(user_move['x'], user_move['y'], _game_board)\n print_board(_game_board)\n if is_game_won(_game_board) or not get_user_wish():\n return\n else:\n play_ping()", "title": "" }, { "docid": "15e42b8a14cc295897cd93f302f1a54a", "score": "0.5656004", "text": "def is_on(self) -> bool:\n return self.cloud.remote.is_connected", "title": "" }, { "docid": "1699b7572a5e06ea30e8eb339b4a68da", "score": "0.5654718", "text": "def is_alive(self):\n try:\n with self.co2hid(send_magic_table=True):\n return True\n except:\n return False", "title": "" }, { "docid": "15ae2ee5637f94a5c334e3206411fc91", "score": "0.56501305", "text": "def isAlive(self):\n # True: player is alive/spawned\n # False: player is death or not spawned\n # None: BF3 server responded with an error or unexpected value\n _player_name = self.name\n try:\n _response = self.console.write(('player.isAlive', _player_name))\n if _response[0] == 'true':\n return True\n elif _response[0] == 'false':\n return False\n except IndexError:\n pass\n except CommandFailedError, err:\n if err.message[0] == 'InvalidPlayerName':\n pass\n else:\n raise Exception(err)\n except Exception, err:\n self.console.error(\"Could not get player state for player %s: %s\" % (_player_name, err), exc_info=err)", "title": "" }, { "docid": "5c22141fb96ee323cbb0f7d8f382578c", "score": "0.56498015", "text": "def game_over(self, state):\n return True if 2 not in state else False", "title": "" }, { "docid": "4c429f65570d486d704b29ed584d1762", "score": "0.56481713", "text": "def is_players_turn(self, player):\n\n return player == self.active_player", "title": "" } ]
d448ec83f80bfab685df5598fb3a40a0
Unit test for macro including check in rst files test_rst_orphan uses python unittest to check the return value of the f_guidelines rst_check_orphan test function. rst files which are
[ { "docid": "3478261bb81e70199354fe89cdb843a7", "score": "0.65763324", "text": "def test_rst_orphan(self):\n self.assertEqual(\n GuidelineViolations.NO_VIOLATION,\n f_guidelines.rst_check_orphan.test(\":orphan:\"),\n )\n self.assertEqual(\n GuidelineViolations.RST_ORPHAN,\n f_guidelines.rst_check_orphan.test(\"no orphan\"),\n )", "title": "" } ]
[ { "docid": "127a25217fc9d8778ac27ae4fd9954b6", "score": "0.74015856", "text": "def test_rst_macro(self):\n\n regex_string = rules[\"languages\"][\"reStructuredText\"][\"include\"][\n \"include_files\"\n ][0][\"macros.txt\"][\"regex\"]\n macro_regex = re.compile(regex_string)\n results = get_results(\"rst-003_tests\", \"rst-003_test_results.json\")\n for test_file, result in results.items():\n txt = get_txt(os.path.join(\"rst-003_tests\", test_file))\n self.assertEqual(\n GuidelineViolations[result],\n f_guidelines.rst_check_include.test(txt, macro_regex),\n )", "title": "" }, { "docid": "34efcea7736b5a7666f70b3e19c84ab8", "score": "0.6812318", "text": "def test_readme_rst():\n shell('python setup.py check -r -s')", "title": "" }, { "docid": "30f33729db58ede31d7738992f982f77", "score": "0.6455028", "text": "def test_docstring(self):\n self.assert_okay(\"docstring\")", "title": "" }, { "docid": "4f60d221eadf69596eb03cd111fc0ea9", "score": "0.64355546", "text": "def testDocTest(self):\n import doctest\n failures, unused = doctest.testmod(m=container)#, verbose=True)\n del unused\n self.assertEquals(failures, 0)", "title": "" }, { "docid": "abc7d89955e1b2856ba97bbf2c550c2f", "score": "0.6411726", "text": "def doctests(self):", "title": "" }, { "docid": "65fd14bf175763a1eec69ee0e21d5ff3", "score": "0.6395746", "text": "def test_doc_mark_example_md(testfile_creator, pytester, checker):\n testfile = testfile_creator(\"doc/mark_example.md\")\n expected_file = pytester.copy_example(\"doc/test_mark_example.py\")\n expected_contents = Path(expected_file).read_text(encoding=\"utf-8\")\n checker(expected_contents, testfile)", "title": "" }, { "docid": "a65f5250bce10fea27c3b6ae8b167b2a", "score": "0.6387459", "text": "def doc_check(func):\n doctest.run_docstring_examples(func,globals())", "title": "" }, { "docid": "3d936d606e14bead2f3ef40dcf0bc978", "score": "0.6371354", "text": "def test_doxygen(self):\n config = rules[\"languages\"][\"C\"][\"doxygen\"][\"regex\"]\n results = get_results(\"c-004_tests\", \"c-004_test_results.json\")\n for test_file, result in results[\"default_regex\"].items():\n txt = get_txt(os.path.join(\"c-004_tests\", test_file))\n self.general_tester(\n result,\n f_guidelines.c_check_doxygen.test(\n test_file,\n txt,\n config,\n \"1.0.0\",\n ),\n )\n\n # Tests for changed doxygen regular expressions\n config = get_results(\"c-004_tests\", \"c-004_test_configured_rules.json\")\n config = config[\"regex\"]\n for test_file, result in results[\"changed_regex\"].items():\n txt = get_txt(os.path.join(\"c-004_tests\", test_file))\n self.general_tester(\n result,\n f_guidelines.c_check_doxygen.test(\n test_file,\n txt,\n config,\n \"1.0.0\",\n ),\n )", "title": "" }, { "docid": "572c3b86ad6b29260d43b8a300724445", "score": "0.63342935", "text": "def test_make_test_pass():\n docstring = r\"\"\"\n >>> g == \"Hello\"\n True\n \"\"\"\n test = SingleDocTest(\"test\", docstring)\n assert test({'g': \"Hello\"}) == TestResult(\n 1,\n \"Test test passed!\"\n )", "title": "" }, { "docid": "41d0987d226efdb844e35fe0ee7fb434", "score": "0.62677836", "text": "def test_release_notes_templates(file_content, expected_result):\n rn_checker = ReleaseNotesChecker(rn_file_content=file_content)\n assert expected_result == rn_checker.check_rn()", "title": "" }, { "docid": "c6d61d7dc8490be56156c691d7e5b7fd", "score": "0.61647344", "text": "def test_file_actual_creation_write_and_removal():\n pass\n\n\n ## Strings for Section template file integration tests. ##", "title": "" }, { "docid": "cde34c617f0b76e8dd37cc816ff2563f", "score": "0.615072", "text": "def Run(self, test_definition):", "title": "" }, { "docid": "cde34c617f0b76e8dd37cc816ff2563f", "score": "0.615072", "text": "def Run(self, test_definition):", "title": "" }, { "docid": "9c9d2b8f95c9ce9769c5e8d3df9309b5", "score": "0.6138507", "text": "def _test():\n\n # perform doctest\n import sys\n import doctest\n\n doctest.testmod()\n\n sys.exit(0)", "title": "" }, { "docid": "9c9d2b8f95c9ce9769c5e8d3df9309b5", "score": "0.6138507", "text": "def _test():\n\n # perform doctest\n import sys\n import doctest\n\n doctest.testmod()\n\n sys.exit(0)", "title": "" }, { "docid": "9c9d2b8f95c9ce9769c5e8d3df9309b5", "score": "0.6138507", "text": "def _test():\n\n # perform doctest\n import sys\n import doctest\n\n doctest.testmod()\n\n sys.exit(0)", "title": "" }, { "docid": "8ce69dd5455a4fccdbe77da878784dca", "score": "0.6128526", "text": "def test_ok(self):\n test_dir = os.path.join(cmd_subfolder, 'regexp', 'test')\n f1 = os.path.join(test_dir, 'reference.txt')\n f2 = os.path.join(test_dir, 'compared.txt')\n sys.argv = [os.path.join(cmd_subfolder, 'regexp', \"diff.py\"), f1, f2]\n pystew.regexp.main()\n assert_equal(\"\\n\", sys.stdout.getvalue())\n assert_equal(\"\", sys.stderr.getvalue())", "title": "" }, { "docid": "2262224a33acbd301fee103fdfc3c137", "score": "0.61084527", "text": "def test_make_test_fail():\n docstring = r\"\"\"\n >>> g == \"Hello\"\n True\n \"\"\"\n test = SingleDocTest(\"test\", docstring)\n tr = test({'h': \"Hello\"})\n assert tr.grade == 0\n assert \"NameError: name 'g' is not defined\" in tr.get_summary('text/plain')", "title": "" }, { "docid": "08e2dacf6300837c499ff6ce2a57bacc", "score": "0.60899144", "text": "def test():\n\n import doctest\n\n flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE\n doctest.testmod(verbose=True, optionflags=flags)", "title": "" }, { "docid": "4f38d0bfdb998c1b699b1002971a4474", "score": "0.60750693", "text": "def test_gendoc():\n test_root = here / \"..\" / \"test_package\" / \"tests\"\n td = TestDir(test_root)\n md = td_to_markdown(td)\n assert md is not None", "title": "" }, { "docid": "eeb9397b364bc21c499d7df5e152951d", "score": "0.60684305", "text": "def test_against_references():\n for fname in reference_files:\n yield check_reference, fname", "title": "" }, { "docid": "b653057d6936eab6ae7d9e30ae237e4a", "score": "0.60510427", "text": "def _test():\r\n doctest.testmod()", "title": "" }, { "docid": "aafb0ac16742c52cfad8c5c2260c6263", "score": "0.60223687", "text": "def test_rst_heading(self):\n heading_regex = []\n heading_regex.append(re.compile(r\"\\.\\. _(\\S+):\"))\n heading_regex.append(re.compile(r\"[=#*^\\-\\\"]+\"))\n heading_regex.append(re.compile(r\"[#*]+\"))\n results = get_results(\"rst-007_tests\", \"rst-007_test_results.json\")\n for test_file, result in results.items():\n txt = get_txt(os.path.join(\"rst-007_tests\", test_file))\n if not result:\n self.assertEqual(\n result, f_guidelines.rst_check_heading.test(txt, heading_regex)\n )\n else:\n output = []\n for res in result:\n output.append((res[0], res[1]))\n self.assertEqual(\n output, f_guidelines.rst_check_heading.test(txt, heading_regex)\n )", "title": "" }, { "docid": "1ebf4f439ece0e581f318685d5ce7e20", "score": "0.60068214", "text": "def rst(ctx):\r\n command = apidoc(exclude='*test* *__init__.py')\r\n ctx.run(command)", "title": "" }, { "docid": "1ce9989a8eaa09bb7b6fcbe583262afb", "score": "0.6001891", "text": "def test_doc(enable=True):\n global enable_doc_test\n enable_doc_test = enable", "title": "" }, { "docid": "7327415ade81f774c373a129dd71e819", "score": "0.5998431", "text": "def test_docs(example_file):\n filename = str(example_file)\n with open(filename, \"rb\") as file:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n exec(compile(file.read(), filename, \"exec\"), {})", "title": "" }, { "docid": "eac12602502e413c2fb5d6b284ebeb90", "score": "0.5983301", "text": "def _test():\n import doctest\n doctest.testmod()", "title": "" }, { "docid": "0cff3748028e9e041ca8e9044945e327", "score": "0.59693944", "text": "def test_01(self):\n self.assertTrue(1,\"Unit test framework working\")", "title": "" }, { "docid": "09af90115cf5fc408c1f6fb082b8fa1b", "score": "0.59592676", "text": "def test_checkEstryAuto(self):\n line1 = \"Estry Control File Auto ! A comment\"\n line2 = \"Estry Control File == Auto\"\n\n out1, found1 = f.checkEstryAuto(line1, self.parent)\n out2, found2 = f.checkEstryAuto(line2, self.parent)\n correct_line = 'Estry Control File == ' + self.parent.filename + '.ecf'\n\n self.assertEqual(out1, correct_line)\n self.assertTrue(found1)\n self.assertEqual(out2, correct_line)\n self.assertTrue(found2)", "title": "" }, { "docid": "dbb7246f517413056656dfaf7eb64abf", "score": "0.59481645", "text": "def test_docstrings(self):\n with StringIO() as buf, redirect_stdout(buf):\n result = doctest.testmod(self.docstring_module)\n output = buf.getvalue()\n self.assertFalse(result.failed > 0, msg=output)", "title": "" }, { "docid": "b4197a026ee59cff0e1383f9b4b00966", "score": "0.5943189", "text": "def pytest_doctest_prepare_content(content):", "title": "" }, { "docid": "22cc742347ec27c7a47c312bd17bab3b", "score": "0.59161377", "text": "def pythonDocTest(self, pythonFile, pythonPath=None, output=None, environs=None, **kwargs):\n\t\tassert os.path.exists(os.path.abspath(pythonFile)), os.path.abspath(pythonFile)\n\t\t\n\t\tif not output: output = '%s-doctest.txt'%os.path.basename(pythonFile).replace('.py','')\n\t\tp = self.startPython(\n\t\t\targuments=['-m', 'doctest', '-v', os.path.normpath(pythonFile)],\n\t\t\tenvirons=self.createEnvirons(overrides=[environs, {\n\t\t\t\t'PYTHONPATH':None if not pythonPath else os.pathsep.join(pythonPath or [])}]),\n\t\t\tstdout=output, \n\t\t\tstderr=output+'.err', \n\t\t\tdisplayName='Python doctest %s'%os.path.basename(pythonFile),\n\t\t\tignoreExitStatus=True,\n\t\t\tabortOnError=False, \n\t\t\t**kwargs\n\t\t\t)\n\t\tmsg = 'Python doctest for %s'%(os.path.basename(pythonFile))\n\t\ttry:\n\t\t\tmsg += ': '+self.getExprFromFile(output, '\\d+ passed.*\\d+ failed') # appears whether it succeeds or fails\n\t\texcept Exception: \n\t\t\tmsg += ': failed to execute correctly'\n\t\ttry:\n\t\t\tmsg += '; first failure is: '+self.getExprFromFile(output, '^File .*, line .*, in .*')\n\t\texcept Exception:\n\t\t\tpass # probably it succeeded\n\t\t\n\t\tif p.exitStatus == 0:\n\t\t\tself.addOutcome(PASSED, msg)\n\t\telse:\n\t\t\tself.addOutcome(FAILED, msg)\n\t\t\tself.logFileContents(output+'.err') # in case there are any clues there\n\t\t\t\n\t\t\t# full doctest output is quite hard to read, so try to summarize just the failures \n\t\t\t\n\t\t\tfailures = []\n\t\t\tlines = [] # accumulate each test\n\t\t\twith openfile(os.path.join(self.output, output), encoding=locale.getpreferredencoding()) as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tline = line.rstrip()\n\t\t\t\t\tif line=='Trying:': # start of a new one, end of previous one\n\t\t\t\t\t\tif lines and lines[-1]!='ok':\n\t\t\t\t\t\t\tfailures.append(lines)\n\t\t\t\t\t\tlines = [line]\n\t\t\t\t\telif line == 'ok': # ignore if passed; needed if last test was a pass\n\t\t\t\t\t\tlines = []\n\t\t\t\t\telse:\n\t\t\t\t\t\tlines.append(line)\n\t\t\t\tif lines and lines[-1]!='ok':\n\t\t\t\t\tfailures.append(lines)\n\t\t\t\t\n\t\t\tfor failure in failures:\n\t\t\t\tlog.info('-'*20)\n\t\t\t\tfor line in failure:\n\t\t\t\t\tlog.warning(' %s'%line.rstrip())\n\t\t\t\tlog.info('')", "title": "" }, { "docid": "4f0b2bfdeda64041695775008f35207f", "score": "0.58975667", "text": "def test():\n import doctest\n doctest.testmod()", "title": "" }, { "docid": "6a79bafa70c15d6d8e9a034a018f79a9", "score": "0.5883761", "text": "def test_dashboard(self):\n response = self.client.get(reverse(v-dashboard)\n self.failUnlessEqual(response.status_code, 200)\n self.assertTemplateUsed(response,'vente/dashboard.html')\n\n def test_basic_addition(self):\n \"\"\"\n Tests that 1 + 1 always equals 2.\n \"\"\"\n self.failUnlessEqual(1 + 1, 2)\n\n\n#~ __test__ = {\"doctest\": \"\"\"\n#~ Another way to test that 1 + 1 is equal to 2.\n#~\n#~ >>> 1 + 1 == 2\n#~ True\n#~ \"\"\"}", "title": "" }, { "docid": "c463c01f7b52d19f20ffbd56177d0a7e", "score": "0.5878239", "text": "def test_substitute(self):\n test_file_names = glob.glob(os.path.dirname(os.path.abspath(__file__)) + \"/substitute/*.sdoc1\")\n\n for test_file_name in sorted(test_file_names):\n with self.subTest(test_file_name=test_file_name):\n pre, ext = os.path.splitext(test_file_name)\n text_file_name = pre + '.sdoc2'\n with open(text_file_name, 'r') as file:\n expected = file.read()\n\n application = Application()\n application.add(SDoc1Command())\n\n command = application.find('sdoc1')\n command_tester = CommandTester(command)\n command_tester.execute('{} t.sdoc2'.format(test_file_name))\n\n with open('t.sdoc2', 'r') as file:\n actual = file.read()\n actual = re.sub(r'\\\\position\\{[^\\}]*\\}', '', actual)\n\n self.assertEqual(expected.strip(), actual.strip())", "title": "" }, { "docid": "cb10837da77bf6bd1b5ec86c6763aa46", "score": "0.5864016", "text": "def main():\n doctest.testmod()", "title": "" }, { "docid": "cb10837da77bf6bd1b5ec86c6763aa46", "score": "0.5864016", "text": "def main():\n doctest.testmod()", "title": "" }, { "docid": "ded2c9d01b8f2345f823fcfdca0cf545", "score": "0.58567166", "text": "def handle_test(self, v):\n import doctest\n import unittest\n suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules.get(__name__))\n suite.addTest(doctest.DocTestSuite())\n runner = unittest.TextTestRunner()\n runner.run(suite)\n sys.exit(0)", "title": "" }, { "docid": "9b0b6b3360a8252568cec0ca5e334063", "score": "0.5840154", "text": "def test_duplicate_see_also():\n pass", "title": "" }, { "docid": "408e43c1c0e25857c03501774577eb2b", "score": "0.58251446", "text": "def test_issue_1313():", "title": "" }, { "docid": "fec07f772e66ae9cf923d0f9865161c6", "score": "0.58249575", "text": "def test_description_on_funcs_with_doc_params_and_documented_return_and_main(self):\n def func1(a):\n \"\"\"\n :param a: Parameter a, to test the retrieval of the docs.\n :return: True in case FOO. False otherwise.\n \"\"\"\n\n return 2\n\n def func2(a):\n \"\"\"\n This is the main description of the func2.\n :param a: Parameter a, to test the retrieval of the docs.\n \"\"\"\n return 2\n\n def func3(a):\n \"\"\"\n This is the main description of the func3.\n :param a: Parameter a, to test the retrieval of the docs.\n :return: True if it is working, false otherwise.\n \"\"\"\n return 2\n\n truth_func1 = \"\"\"------------------------\ndef func1(a):\n\n=== Parameters: 1 ======\n [0] a (type Any) -> Parameter a, to test the retrieval of the docs.\n========================\n Result (type Any) -> True in case FOO. False otherwise.\n\"\"\"\n self.assertEqual(hint(func1, do_print=False), truth_func1)\n\n truth_func2 = \"\"\"------------------------\ndef func2(a):\n\n This is the main description of the func2.\n\n=== Parameters: 1 ======\n [0] a (type Any) -> Parameter a, to test the retrieval of the docs.\n========================\n Result (type Any) ->\n\"\"\"\n self.assertEqual(hint(func2, do_print=False), truth_func2)\n\n truth_func3 = \"\"\"------------------------\ndef func3(a):\n\n This is the main description of the func3.\n\n=== Parameters: 1 ======\n [0] a (type Any) -> Parameter a, to test the retrieval of the docs.\n========================\n Result (type Any) -> True if it is working, false otherwise.\n\"\"\"\n self.assertEqual(hint(func3, do_print=False), truth_func3)", "title": "" }, { "docid": "e591d87c10fcf5300e7b56e7786a230d", "score": "0.5823973", "text": "def test_md002_all_samples():\n\n # Arrange\n scanner = MarkdownScanner()\n supplied_arguments = [\n \"--disable-rules\",\n \"MD003\",\n \"-e\",\n \"MD002\",\n \"scan\",\n \"test/resources/rules/md002\",\n ]\n\n expected_return_code = 1\n expected_output = (\n \"test/resources/rules/md002/improper_atx_heading_start.md:1:1: \"\n + \"MD002: First heading of the document should be a top level heading. \"\n + \"[Expected: h1; Actual: h2] (first-heading-h1,first-header-h1)\\n\"\n + \"test/resources/rules/md002/improper_setext_heading_start.md:2:1: \"\n + \"MD002: First heading of the document should be a top level heading. \"\n + \"[Expected: h1; Actual: h2] (first-heading-h1,first-header-h1)\\n\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "0649953e20d37e3c1aa4e26746d64142", "score": "0.58116305", "text": "def test_syntax(self):\n self.assertEqual(1, 1)", "title": "" }, { "docid": "71041be33677939135a67cd658e9b8d6", "score": "0.57968634", "text": "def test_check_recipe():\n assert api.check(os.path.join(metadata_dir, \"source_git_jinja2\"))", "title": "" }, { "docid": "93d634925ca4cd42ab67364dab3dcb40", "score": "0.57917947", "text": "def tests():", "title": "" }, { "docid": "a388ce44ade12084157ab95ffa4c4ee4", "score": "0.5778004", "text": "def ref(request):\n if request.config.getoption('--wquiet'):\n ReferenceTest.set_defaults(verbose=False)\n if request.config.getoption('--write-all'):\n ReferenceTest.set_regeneration()\n else:\n regen = request.config.getoption('--write')\n if regen:\n for r in regen:\n for kind in r.split(','):\n ReferenceTest.set_regeneration(kind)\n return ReferenceTest(pytest_assert)", "title": "" }, { "docid": "3514e57328929b919c6bb7e6223833e0", "score": "0.57737404", "text": "def test_make_test_partial():\n docstring = r\"\"\"\n >>> g == \"Hello\"\n True\n >>> g == \"Not Hello\"\n \"\"\"\n test = SingleDocTest(\"test\", docstring)\n result = test({'g': \"Hello\"})\n assert result.grade == 0", "title": "" }, { "docid": "7b60a85ab5c59a23777302c9e44597ee", "score": "0.5766157", "text": "def test_example_files():\n compare_example_file(\"examples/trees/pg_2357\")", "title": "" }, { "docid": "4f4562ffbd8b9f08487e58d6dd4f1461", "score": "0.57492596", "text": "def test_py_tests():\n \n code_str = \"\"\"\n# Look our tests are python 2 compatible!\n# p.s. if you're reading this you're such a nerd\nfrom __future__ import print_function \nfrom gsmodutils.test.utils import ModelTestSelector\n\n@ModelTestSelector(models=[\"not_there\"], conditions=[\"xyl_src\", \"bad\", \"not_there\"], designs=[\"not_there\"])\ndef test_func(model, project, log):\n log.assertion(True, \"Works\", \"Does not work\", \"Test\")\n\n\n# For code coverage\n@ModelTestSelector()\ndef test_func_cove(model, project, log):\n log.assertion(True, \"Works\", \"Does not work\", \"Test\")\n\n\ndef test_model(model, project, log):\n solution = model.solver.optimize()\n print('This is the end')\n log.warning(True, \"this is a warning\")\n log.assertion(True, \"Model grows\", \"Model does not grow\")\n log.assertion(False, \"Model grows\", \"Model does not grow\")\n \n \ndef test_exception(model, project, log):\n raise Exception('This is exceptional')\n \"\"\"\n\n syntax_err = \"\"\"\ndef test_model(model, project, log):\n print('This is the end {}'.format(solution)\n \"\"\"\n\n with FakeProjectContext() as fp:\n project = GSMProject(fp.path)\n\n fp.add_fake_conditions()\n mdl = fp.project.model\n load_medium(mdl, dict())\n fp.project.save_conditions(mdl, \"bad\", apply_to=fp.project.config.default_model, observe_growth=False)\n\n test_codep = 'test_code.py'\n tfp = os.path.join(project.tests_dir, test_codep)\n \n with open(tfp, 'w+') as testf:\n testf.write(code_str)\n\n tfp = os.path.join(project.tests_dir, 'test_syn_err.py')\n\n with open(tfp, 'w+') as testf:\n testf.write(syntax_err)\n\n tester = project.project_tester()\n tester.run_all()\n \n assert len(tester.syntax_errors) == 1\n\n log = tester.run_by_id('test_code.py::test_model')\n assert log.std_out == \"This is the end\\n\" # Test record should capture the standard output\n\n runner = CliRunner()\n lpath = os.path.join(fp.path, 'lp.json')\n result = runner.invoke(gsmodutils.cli.test, ['--project_path', fp.path, '--verbose', '--log_path', lpath])\n assert result.exit_code == 0\n\n result = runner.invoke(gsmodutils.cli.test,\n ['--project_path', fp.path, '--verbose', '--test_id', test_codep])\n\n assert result.exit_code == 0\n result = runner.invoke(gsmodutils.cli.test,\n ['--project_path', fp.path, '--verbose',\n '--test_id', '{}::test_func'.format(test_codep)])\n\n assert result.exit_code == 0\n\n result = runner.invoke(gsmodutils.cli.test,\n ['--project_path', fp.path, '--verbose', '--test_id', 'test_syn_err.py'])\n\n assert result.exit_code == -1\n\n project.run_tests()", "title": "" }, { "docid": "8eebd7ed63c993155327a5fcbdc1aa92", "score": "0.5747528", "text": "def test_basic():", "title": "" }, { "docid": "9a347d847e9522f27238ee0f9c7ef543", "score": "0.57246214", "text": "def _test():\n import doctest\n import os\n if os.path.isdir(os.path.join(\"..\", \"..\", \"Tests\")):\n print \"Runing doctests...\"\n cur_dir = os.path.abspath(os.curdir)\n os.chdir(os.path.join(\"..\", \"..\", \"Tests\"))\n doctest.testmod()\n os.chdir(cur_dir)\n del cur_dir\n print \"Done\"\n elif os.path.isdir(os.path.join(\"Tests\", \"Fasta\")):\n print \"Runing doctests...\"\n cur_dir = os.path.abspath(os.curdir)\n os.chdir(os.path.join(\"Tests\"))\n doctest.testmod()\n os.chdir(cur_dir)\n del cur_dir\n print \"Done\"", "title": "" }, { "docid": "a78f6faa1641e628ff762e6edf5b21bd", "score": "0.57102853", "text": "def test_scan_model(self):\n root = os.path.join(os.environ[DM], 'doc', 'examples')\n files = scan_model(root)\n expected = set([os.path.join(root, f) for f in ('badModel.rst',\n 'sdR.rst',\n 'spPlate.rst')])\n found = set([p.filename for p in files])\n self.assertEqual(expected, found)", "title": "" }, { "docid": "f927b842f62e8beab18680493713ccde", "score": "0.569676", "text": "def test_check__multi_fail(capsys, tmp_path):\n file_path1 = tmp_path / \"test_markdown1.md\"\n file_path2 = tmp_path / \"test_markdown2.md\"\n file_path1.write_text(UNFORMATTED_MARKDOWN)\n file_path2.write_text(UNFORMATTED_MARKDOWN)\n run((str(tmp_path), \"--check\"))\n captured = capsys.readouterr()\n assert str(file_path1) in captured.err\n assert str(file_path2) in captured.err", "title": "" }, { "docid": "fdd20d9c9e2664a5ac5acb44f0665b07", "score": "0.5695465", "text": "def test_story_us05(self):\n self.assertEqual(Team4_Project_File.US05(),'US05 - Error : Individual - I3, I6 have marriage before death')", "title": "" }, { "docid": "e38c23b226a38269deac5aebe3557c99", "score": "0.5694312", "text": "def test_trademark_linter(tmp_path: Path):\n test_file = tmp_path / \"test.md\"\n test_file.write_text(TEST_FILE_CONTENTS)\n apply_lint_function(test_file)\n assert test_file.read_text() == GROUND_TRUTH", "title": "" }, { "docid": "437002a87841921ef0e3ea7917373a2b", "score": "0.56895095", "text": "def test_own_readme():\n readme = Path(__file__).parent.parent / \"README.rst\"\n rendered = render(readme.read_text(encoding=\"utf-8\"))\n assert rendered is not None", "title": "" }, { "docid": "95193401ec38e5923b557b286c04ffdf", "score": "0.5687558", "text": "def test_md009_good_paragraph_no_extra():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md009\", \"good_paragraph_no_extra.md\"\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "bdcdf203e3a05d5fd18f5aac99438723", "score": "0.5687056", "text": "def testA():\n print(\"\\t\" + \"Running testA\")\n lab06.assert_equals('')\n pass # PLACEHOLDER - REPLACE WITH YOUR CODE", "title": "" }, { "docid": "2bced46e0691ca47cdc90c82e88d5489", "score": "0.56813955", "text": "def test_single_story_label():\n pass", "title": "" }, { "docid": "1de9225b942d1277a44158491c820e3d", "score": "0.5680589", "text": "def test_readmeExamplesAreExecutable(self) -> None:\n from tests.source_files import readme_examples", "title": "" }, { "docid": "3d7fd522fea4de964fe5236f4a46f294", "score": "0.5680228", "text": "def test_story_us05(self):\n self.assertEqual(Team4_Project_File.US04(),'US04 - Error : Family - F3, F6 have been divorced before marriage')", "title": "" }, { "docid": "ff870b02a84361bd136a8bdb2be5fcf7", "score": "0.56710947", "text": "def test_Campbell15():", "title": "" }, { "docid": "6720f4643a1f8f700c014f683fb389c3", "score": "0.5666835", "text": "def test_demo(self):\n\t\tself.assertTrue(1==1)", "title": "" }, { "docid": "bd037ce84427b549f64f51bc29f0fd72", "score": "0.56640846", "text": "def test(lang, file_manager, tool, line_numbers, forced_lines, description, \\\n indices, initial_fname=None, check_forced=True, classpath=None, sourcepath=None):\n if not indices:\n return True\n\n if check_forced and not Utilities.is_subset(forced_lines, indices):\n return True\n\n compiler = tool.get_compiler()\n original_fname = file_manager.get_source_path()\n if initial_fname:\n fname = initial_fname\n else:\n fname = file_manager.get_trial_source_path()\n \n if lang == 'java':\n result = compiler.compile(original_fname, fname, \\\n file_manager.get_build_dir(), classpath, sourcepath)\n elif lang == 'c':\n result = compiler.compile(original_fname, \\\n file_manager.get_build_object_path(), \n fname)\n\n if result == 0:\n failed = False\n if lang == 'java':\n tool.handle_compile_file(file_manager.get_build_class_path(), \\\n compiler.get_command(original_fname), \\\n classpath=classpath, sourcepath=sourcepath)\n elif lang == 'c':\n tool.handle_compile_file(original_fname, \\\n fname, \\\n compiler.get_command(original_fname))\n parsed_output = tool.get_tool_output(filtered=False)\n for (file_name, lines, _, desc) in parsed_output:\n # TODO: include multiple lines like in M B NP: com.javacodegeeks.example.findBugsPluginExample.OfConcernRankBugs.equals(Object) does not check for null argument At OfConcernRankBugs.java:[lines 32-33]\n if (len(lines)) > 1:\n continue\n fname1 = os.path.basename(file_name)\n fname2 = os.path.basename(file_manager.get_original_source_path())\n equal_description = True\n if description:\n desc1 = re.sub(r'line \\d+', 'line X', description).strip()\n desc2 = re.sub(r'line \\d+', 'line X', desc).strip()\n if '?' in desc1 or '?' in desc2:\n equal_description = are_equal_but(desc1, desc2, '?')\n else:\n equal_description = desc1 == desc2\n if fname1 == fname2 and \\\n check_indices(indices, line_numbers, lines) and \\\n equal_description:\n failed = True\n break\n\n return not failed\n else:\n return True", "title": "" }, { "docid": "0b376981f74fc7ebc80546979fdb2d5b", "score": "0.56488097", "text": "def test_doc_string_generate_deck_single_expression():\n assert generate_deck_single_expression.__doc__ is not None, \"missing docstring\"", "title": "" }, { "docid": "f984e331fe7d73b478796e9518216044", "score": "0.5644209", "text": "def check_doc(fn, content, sample):\n fp = os.path.abspath(os.path.join(KIOutils.dir_path(__file__), 'samples', sample, fn))\n with open(fp, 'rb') as f:\n sample_content = f.read()\n assert sample_content == content, \\\n '{}(len={}) != {}(len={})'.format('sample', len(sample_content), fn, len(content))", "title": "" }, { "docid": "29a400d29e7659d0d61048e34a5bc72d", "score": "0.5644096", "text": "def test_dummy_compiles_with_codelinter_languages(\n app: Sphinx, status: StringIO\n) -> None:\n if app.builder is not None:\n app.builder.build_all()\n\n assert os.path.exists(app.outdir)\n assert not os.listdir(app.outdir)\n assert \"[Line 6] linting json\" not in status.getvalue()\n assert \"[Line 10] linting\" not in status.getvalue()\n assert \"[Line 14] linting json\" not in status.getvalue()\n assert \"[Line 18] linting yaml\" not in status.getvalue()\n assert \"[Line 26] linting yaml\" not in status.getvalue()\n assert \"[Line 34] linting json\" not in status.getvalue()\n assert \"[Line 38] linting\" not in status.getvalue()", "title": "" }, { "docid": "edf69e1aaeb51baead5be0173fae2fc1", "score": "0.56365997", "text": "def test_xyz():", "title": "" }, { "docid": "f06e115d47f24cf292abf7eef0ea23ae", "score": "0.5629055", "text": "def test_ReviewFunctions__doc(self):\n for function in self.review:\n self.assertIsNot(function[1].__doc__, None, '{:s} method should be\\\n properly documented'.format(function[0]))\n self.assertTrue(len(function[1].__doc__) >= 1, '{:s} method should\\\n be properly documented'.format(function[0]))", "title": "" }, { "docid": "ebfe426a08ea4e1b5025f9ce52a608a6", "score": "0.56277114", "text": "def test_generate_run_project(testfile_creator, testfile_tester):\n testfile = testfile_creator(\"project.md\")\n result = testfile_tester(\n contents=testfile, pytest_options=[\"-v\", \"--doctest-modules\"]\n )\n result.assert_outcomes(passed=4)", "title": "" }, { "docid": "97cf92ec673f5003127cadaa14f6f7fc", "score": "0.5623756", "text": "def check_asserttruefalse(logical_line, filename):\n\n if 'ovn_octavia_provider/tests/' in filename:\n if re.search(r\"assertEqual\\(\\s*True,[^,]*(,[^,]*)?\", logical_line):\n msg = (\"N328: Use assertTrue(observed) instead of \"\n \"assertEqual(True, observed)\")\n yield (0, msg)\n if re.search(r\"assertEqual\\([^,]*,\\s*True(,[^,]*)?\", logical_line):\n msg = (\"N328: Use assertTrue(observed) instead of \"\n \"assertEqual(True, observed)\")\n yield (0, msg)\n if re.search(r\"assertEqual\\(\\s*False,[^,]*(,[^,]*)?\", logical_line):\n msg = (\"N328: Use assertFalse(observed) instead of \"\n \"assertEqual(False, observed)\")\n yield (0, msg)\n if re.search(r\"assertEqual\\([^,]*,\\s*False(,[^,]*)?\", logical_line):\n msg = (\"N328: Use assertFalse(observed) instead of \"\n \"assertEqual(False, observed)\")\n yield (0, msg)", "title": "" }, { "docid": "5857ae92345a7e66ac64321cd93def7c", "score": "0.56024694", "text": "def test_spec_reference(self):\n\n good_fasta = '{0}/spec/test1.fasta'.format(GOLDEN_DIR)\n bad_contig_fasta = '{0}/spec/test2.fasta'.format(GOLDEN_DIR)\n bad_md5_fasta = '{0}/spec/test3.fasta'.format(GOLDEN_DIR)\n bad_ids_fasta = '{0}/spec/broken.fasta'.format(GOLDEN_DIR)\n\n good_ref = '{0}/good_ref.flat'.format(self.data_dir)\n bad_contig_ref = '{0}/bad_contig_ref.flat'.format(self.data_dir)\n bad_md5_ref = '{0}/bad_md5_ref.flat'.format(self.data_dir)\n\n test_bam = '{0}/spec/test.bam'.format(GOLDEN_DIR)\n test_spec = '{0}/test.spec'.format(self.data_dir)\n test_local_bam = '{0}/test.bam'.format(self.data_dir)\n\n test_bam = '{0}/spec/test.bam'.format(GOLDEN_DIR)\n\n self.check(\"fasta2ref {0} {1}\".format(good_fasta, good_ref))\n self.check(\"fasta2ref {0} {1}\".format(bad_contig_fasta, bad_contig_ref))\n self.check(\"fasta2ref {0} {1}\".format(bad_md5_fasta, bad_md5_ref))\n\n self.check(\"bam2spec --in {0} --ref {1} --out {2} --no-match-reference > /dev/null 2>&1\".format(test_bam, good_ref, test_spec))\n spec2bam_cmd = \"spec2bam --in {0} --ref {1} --out {2} > /dev/null 2>&1\".format(test_spec, good_ref, test_local_bam)\n self.check(spec2bam_cmd)\n os.remove(test_local_bam)\n spec2bam_cmd = \"spec2bam --in {0} --ref {1} --out {2} > /dev/null 2>&1\".format(test_spec, bad_contig_ref, test_local_bam)\n self.assertEqual(2, subprocess.call(spec2bam_cmd, shell=True), \"spec2bam should fail on a reference with wrong contigs, but didn't\")\n spec2bam_cmd = \"spec2bam --in {0} --ref {1} --out {2} > /dev/null 2>&1\".format(test_spec, bad_md5_ref, test_local_bam)\n self.assertEqual(2, subprocess.call(spec2bam_cmd, shell=True), \"spec2bam should fail on a reference with different contigs, but didn't\")\n\n cmd = \"fasta2ref {0} invalid.ref > /dev/null 2>&1\".format(bad_ids_fasta)\n self.assertEqual(2, subprocess.call(cmd, shell=True), \"fasta2ref should fail on a reference with duplicate ids, but didn't\")\n\n cmd = \"bam2spec --in {0} --ref {1} --out {2}/none.spec -f > /dev/null 2>&1\".format(test_bam, bad_contig_ref, self.data_dir)\n self.assertEqual(1, subprocess.call(cmd, shell=True), \"bam2spec should fail on a reference with wrong contigs, but didn't\")\n\n cmd = \"bam2spec --in {0} --ref {1} --out {2}/none.spec -f --no-match-reference > /dev/null 2>&1\".format(test_bam, bad_contig_ref, self.data_dir)\n self.check(cmd)", "title": "" }, { "docid": "47daf845f0a3a35c19fbc7abc557ab7f", "score": "0.55937225", "text": "def load_tests(_, tests, __): # pylint: disable=redefined-outer-name\n in_len = tests.countTestCases()\n tests = _load_tests(_, tests, __)\n out_len = tests.countTestCases()\n # Test whether at least one doctest is found (which could be the\n # doctest for this function)\n assert in_len < out_len\n\n return tests", "title": "" }, { "docid": "b55980d010b0d0ce95c6f1c5afe78f16", "score": "0.55880183", "text": "def check_docstrings_are_in_md():\n files_with_rst = []\n for file in Path(PATH_TO_TRANSFORMERS).glob(\"**/*.py\"):\n with open(file, encoding=\"utf-8\") as f:\n code = f.read()\n docstrings = code.split('\"\"\"')\n\n for idx, docstring in enumerate(docstrings):\n if idx % 2 == 0 or not is_rst_docstring(docstring):\n continue\n files_with_rst.append(file)\n break\n\n if len(files_with_rst) > 0:\n raise ValueError(\n \"The following files have docstrings written in rst:\\n\"\n + \"\\n\".join([f\"- {f}\" for f in files_with_rst])\n + \"\\nTo fix this run `doc-builder convert path_to_py_file` after installing `doc-builder`\\n\"\n \"(`pip install git+https://github.com/huggingface/doc-builder`)\"\n )", "title": "" }, { "docid": "37d3afe4087e4d4dea30e70dfdcf9666", "score": "0.5573609", "text": "def load_tests(loader, tests, ignore):\n\n tests.addTests(doctest.DocFileSuite(\"tests.txt\"))\n\n return tests", "title": "" }, { "docid": "4e65d2ea1d4aadb6777a833f5cc525b1", "score": "0.5571319", "text": "def test_buildlatex_no_output():\n assert not re.search(br'\\\\(sphinx)?includegraphics\\{+plantuml-',\n readfile('plantuml_fixture.tex'))", "title": "" }, { "docid": "63c4f1fe47d2aa2073298296651cfbde", "score": "0.55678254", "text": "def has_test_docs(self):\n pass", "title": "" }, { "docid": "58b080a7035077ac8224c9dbc58a5a73", "score": "0.55628055", "text": "def tests():\n pass", "title": "" }, { "docid": "e9a03f73efd577104acf763af1838f04", "score": "0.5560621", "text": "def test_trivial():\n assert True", "title": "" }, { "docid": "782e7401a2a9bd26998fd21da1cf0947", "score": "0.5556147", "text": "def testSkeleton(self):\r\n\r\n #self.fail(\"Test if the testcase is working.\")\r\n self.assert_(True)", "title": "" }, { "docid": "30192dfe11a52ebb8e5c0bba5a306b99", "score": "0.55520916", "text": "def test_missing_file_lines(self):\n\n with self.assertRaises(SnippetMissingError):\n self.check_markdown(\n R'''\n --8<-- \":3:4\"\n ''',\n '''\n ''',\n True\n )", "title": "" }, { "docid": "70b5805d50fbba5a194150664e926ff6", "score": "0.55499995", "text": "def test_function():", "title": "" }, { "docid": "1996573f8fd08abd8c20cb78f5d337bd", "score": "0.55431944", "text": "def self_test():", "title": "" }, { "docid": "413b5285a494a65b439e0d8ef6a7ffba", "score": "0.5542598", "text": "def test_positive_examples(self):\n n = 0\n for rpt in self._test_file_iter('should_pass'):\n print(f\"TESTED+ : {rpt.rdf_file}\")\n n += 1\n if not rpt.all_successful:\n print(f\"FAILURES IN : {rpt.rdf_file}\")\n for (inst, sc, reason) in rpt.fail_list:\n print(f\"FAIL: {inst} {sc} REASON: {reason}\")\n self.assertTrue(rpt.all_successful)\n print(f\"Ran {n} positive examples\")", "title": "" }, { "docid": "7cb318e9fdf844a0895a96b618aa3042", "score": "0.5541938", "text": "def test_other():", "title": "" }, { "docid": "90055cf9683f3463e641ecae2c43aa7c", "score": "0.554127", "text": "def test_module_docstring(self):\n self.assertTrue(len(state.__doc__) >= 1)", "title": "" }, { "docid": "2dd8b73e0d1d446ccbbfba8d72083ba0", "score": "0.55399823", "text": "def test_the_tests(self):\n self.assertEqual(True,True)", "title": "" }, { "docid": "b48361993a95c56b0e2467d5a6b9eea7", "score": "0.55382925", "text": "def test_generate_readme_contents():\n readme = gen_readme.generate_readme()\n\n # Look for a string we know should appear in the README.\n assert \"### zmake build\\n\" in readme", "title": "" }, { "docid": "c9ab5a1104e5ec97e8c58b04d33fe994", "score": "0.55338514", "text": "def test_completeness_of_macros_in_generated(self):\n missing_macros =\\\n check_completeness_of_macros_in_generated(pmdk_path)\n error_msg =\\\n linesep + 'List of missing macros in the generated directory:'\n for macro in missing_macros:\n error_msg += linesep + macro\n self.assertFalse(missing_macros, error_msg)", "title": "" }, { "docid": "38b21e7ef3b4fe6b4f9ee0fc5ee373df", "score": "0.55335474", "text": "def test(self):\n self.assertTrue(approval_defs.is_valid_field_id(1))\n self.assertTrue(approval_defs.is_valid_field_id(2))\n self.assertFalse(approval_defs.is_valid_field_id(3))", "title": "" }, { "docid": "e48e7b9c0e423c843a71a2474579d0b1", "score": "0.55326515", "text": "def test_utilities():\n from .aberdeen import test_generateBatches, test_path, test_dict_file, \\\n test_extract, test_find, display_configure, display_resources\n from .direct import display_direct\n\n test_generateBatches()\n test_path()\n test_dict_file()\n test_extract()\n #test_find()\n display_direct()\n display_configure()\n display_resources()\n print('<@test.py> All utility tests have been passed')", "title": "" }, { "docid": "507cf8cc8469a83a37f2fc7f498e38ae", "score": "0.55298567", "text": "def test_add_two(module):\n yield Check('add_two(1)')\n\n # This will fail the first time, but will be corrected with ECF.\n # Both facts will be included in the feedback\n yield Check('add_two(99)')", "title": "" }, { "docid": "b2eaa477d60af4dba8a92f645216fd20", "score": "0.5507019", "text": "def test_simple(ctestdir):\n ctestdir.makepyfile(\"\"\"\n import pytest\n\n @pytest.mark.dependency()\n def test_a():\n pass\n\n @pytest.mark.dependency()\n def test_b():\n assert False\n\n @pytest.mark.dependency(depends=[\"test_b\"])\n def test_c():\n pass\n\n @pytest.mark.dependency(depends=[\"test_c\"])\n def test_d():\n pass\n \"\"\")\n result = ctestdir.runpytest(\"--verbose\", \"-rs\")\n result.assert_outcomes(passed=1, skipped=2, failed=1)\n result.stdout.re_match_lines(r\"\"\"\n .*::test_a PASSED\n .*::test_b FAILED\n .*::test_c SKIPPED(?:\\s+\\(.*\\))?\n .*::test_d SKIPPED(?:\\s+\\(.*\\))?\n \"\"\")\n result.stdout.fnmatch_lines_random(\"\"\"\n SKIP* test_c depends on test_b\n SKIP* test_d depends on test_c\n \"\"\")", "title": "" }, { "docid": "098ef63488a580df7319f797ce15f947", "score": "0.5505462", "text": "def _test_make_documentation(self, tppath):\n args = [self.execpath, 'make', 'documentation']\n retval = subprocess.call(args, cwd=tppath)\n self.assertEqual(0, retval)\n\n # Check that the documentation files exist.\n exp_files = [\n 'documentation/test.html',\n 'documentation/test.md',\n 'documentation/documentation_styles.css',\n 'documentation/navtree.js'\n ]\n exp_files = [os.path.join(tppath, filepath) for filepath in exp_files]\n\n for exp_file in exp_files:\n self.assertTrue(\n os.path.isfile(exp_file),\n msg='Could not find expected file: {0}'.format(exp_file)\n )", "title": "" }, { "docid": "3c962e9a486865514ed50e4c1e6b65b9", "score": "0.55051285", "text": "def do_asserts_work():\n pass", "title": "" }, { "docid": "5437fee1a6a9ec1d8c8d7eed53400f63", "score": "0.5500815", "text": "def create_writer_tests(files):\n for rst_file in files:\n pass", "title": "" }, { "docid": "ddec6dd7ba22815eca7241b367deb64f", "score": "0.5492957", "text": "def test_abc():\n\n assert", "title": "" }, { "docid": "271c9064e53fa7a65ed0385250674724", "score": "0.5491335", "text": "def test_test():\n pass", "title": "" }, { "docid": "66342ab51d8d53981de648ca89724675", "score": "0.5491141", "text": "def test_something(self):\r\n return True", "title": "" }, { "docid": "635d084887d28b51ba2d332cc72fe669", "score": "0.54909986", "text": "def test_content_analyst_in_the_qa_view_render_the_markdown_7687(self):\n self.ps.test_updates['name'] = 'cc1.04.030' \\\n + inspect.currentframe().f_code.co_name[4:]\n self.ps.test_updates['tags'] = [\n 'cc1',\n 'cc1.04',\n 'cc1.04.030',\n '7687'\n ]\n self.ps.test_updates['passed'] = False\n\n # Test steps and verification assertions\n self.content.login()\n self.content.open_user_menu()\n self.content.wait.until(\n expect.visibility_of_element_located(\n (By.LINK_TEXT, 'QA Content')\n )\n ).click()\n # change book to physics\n self.content.wait.until(\n expect.visibility_of_element_located(\n (By.ID, 'available-books')\n )\n ).click()\n self.content.wait.until(\n expect.visibility_of_element_located(\n (By.XPATH,\n '//div[@class=\"title-version\"]' +\n '/span[contains(text(),\"Physics\")]')\n )\n ).click()\n # click on a non-intro section\n self.content.wait.until(\n expect.visibility_of_element_located(\n (By.XPATH, '//li[@data-section=\"2.4\"]')\n )\n ).click()\n # click show content\n self.content.wait.until(\n expect.visibility_of_element_located(\n (By.XPATH, '//li[@class=\"teacher-edition\"]/a')\n )\n ).click()\n # check for a graph\n self.content.wait.until(\n expect.visibility_of_element_located(\n (By.XPATH, '//img[contains(@alt,\"graph\")]')\n )\n )\n self.ps.test_updates['passed'] = True", "title": "" } ]
d0d73c71f77331a5d87c6cba7055c1aa
Builds graph from this network and returns it.
[ { "docid": "a7656b063a75969adf00c965d0bda3d4", "score": "0.7587391", "text": "def buildGraph(self):\n self.normalize()\n \n g = nx.DiGraph()\n \n for k, n in self.nodes.items():\n g.add_node(n.node_id, color=n.color(), n_type=n.neuron_type)\n \n for l in self.links:\n if l.node_in not in self.nodes or l.node_out not in self.nodes:\n raise Exception('Incorrect link detected')\n elif l.node_in == l.node_out:\n nx.add_cycle(g, [l.node_in, l.node_in], weight=l.weight)\n else:\n g.add_edge(l.node_in, l.node_out, weight=l.weight) \n \n return g", "title": "" } ]
[ { "docid": "49435ede5f626dcb445420c22594340d", "score": "0.72999275", "text": "def build_graph(self):\n raise NotImplementedError(\"Subclass must override build_graph()!\")", "title": "" }, { "docid": "a8b3c9bb9f9d47bd9ab83839a14249f9", "score": "0.7252171", "text": "def _build(self, graph, prev_state):\n\n nodes_to_collect = []\n\n if self._use_received_edges:\n nodes_to_collect.append(self._received_edges_aggregator(graph))\n\n if self._use_sent_edges:\n nodes_to_collect.append(self._sent_edges_aggregator(graph))\n\n if self._use_nodes:\n blocks._validate_graph(graph, (NODES,), \"when use_nodes == True\")\n nodes_to_collect.append(graph.nodes)\n\n if self._use_globals:\n nodes_to_collect.append(blocks.broadcast_globals_to_nodes(graph))\n\n collected_nodes = tf.concat(nodes_to_collect, axis=-1)\n updated_nodes, update_state = self._node_model(collected_nodes, prev_state)\n return graph.replace(nodes=updated_nodes), update_state", "title": "" }, { "docid": "cb1e45d6b8923d465c9f0b15caf85752", "score": "0.7207046", "text": "def get_graph(self):\n G = networkx.Graph() if self.is_symmetric() else networkx.DiGraph()\n G.graph['name'] = self.name\n G.graph['comment'] = self.comment\n G.graph['type'] = self.type\n G.graph['dimension'] = self.dimension\n G.graph['capacity'] = self.capacity\n G.graph['depots'] = self.depots\n G.graph['demands'] = self.demands\n G.graph['fixed_edges'] = self.fixed_edges\n\n if not self.is_explicit():\n for i, coord in self.node_coords.items():\n G.add_node(i, coord=coord)\n\n for i, j in self.get_edges():\n weight = self.wfunc(i, j)\n is_fixed = (i, j) in self.fixed_edges\n G.add_edge(i, j, weight=weight, is_fixed=is_fixed)\n\n return G", "title": "" }, { "docid": "1fc510724f26d79d11f2980558ccf7b5", "score": "0.7025545", "text": "def get_graph(self):\n if not self.g:\n self.make_graph()\n\n return self.g", "title": "" }, { "docid": "1fc510724f26d79d11f2980558ccf7b5", "score": "0.7025545", "text": "def get_graph(self):\n if not self.g:\n self.make_graph()\n\n return self.g", "title": "" }, { "docid": "53baa80a9368a5309875812e85dba735", "score": "0.700314", "text": "def _get_graph(self):\n graph = nx.DiGraph()\n topo = self.get_topology()\n\n for node in topo.get(\"node\", []):\n if 'opendaylight-topology-inventory:inventory-node-ref' in node:\n graph.add_node(node[\"node-id\"])\n\n for link in topo.get(\"link\", []):\n src = link[\"source\"]\n dst = link[\"destination\"]\n\n if src[\"source-node\"] in graph.nodes() and dst[\"dest-node\"] in graph.nodes():\n graph.add_edge(src[\"source-node\"], dst[\"dest-node\"], attr_dict={\n \"src_tp\": src[\"source-tp\"],\n \"dst_tp\": dst[\"dest-tp\"]\n })\n graph.add_edge(dst[\"dest-node\"], src[\"source-node\"], attr_dict={\n \"src_tp\": dst[\"dest-tp\"],\n \"dst_tp\": src[\"source-tp\"]\n })\n\n return graph", "title": "" }, { "docid": "b703739ecea1399654705f35d3a16389", "score": "0.6973986", "text": "def _build(self, graph, prev_state):\n blocks._validate_graph(\n graph, (SENDERS, RECEIVERS, N_EDGE), \" when using an EdgeBlock\")\n\n edges_to_collect = []\n\n if self._use_edges:\n blocks._validate_graph(graph, (EDGES,), \"when use_edges == True\")\n edges_to_collect.append(graph.edges)\n\n if self._use_receiver_nodes:\n edges_to_collect.append(blocks.broadcast_receiver_nodes_to_edges(graph))\n\n if self._use_sender_nodes:\n edges_to_collect.append(blocks.broadcast_sender_nodes_to_edges(graph))\n\n if self._use_globals:\n edges_to_collect.append(blocks.broadcast_globals_to_edges(graph))\n\n collected_edges = tf.concat(edges_to_collect, axis=-1)\n updated_edges, updated_state = self._edge_model(collected_edges, prev_state)\n return graph.replace(edges=updated_edges), updated_state", "title": "" }, { "docid": "a927ca62ca5c458ecb6c3a321a5f4e72", "score": "0.69119686", "text": "def to_graph(self):\n for n1 in self.network:\n self.graph.get_node(n1.__str__()).attr['fontsize'] = (\n (n1.activation)*25/100+15)\n # if len(n1.linksOut)+len(n1.linksIn) = =0:\n # self.graph.get_node(n1.__str__()).attr['style'] = \"invis\"\n # else:\n # self.graph.get_node(n1.__str__()).attr['style'] = \"\"\n for n1 in self.network:\n for d, l in n1.linksOut.items():\n if(len(l)==2):\n self.graph.get_edge(\n n1.__str__(), d.__str__()).attr['weight'] = l[0]\n else:\n self.graph.get_edge(\n n1.__str__(), d.__str__()).attr['weight'] = l[0]\n return self.graph", "title": "" }, { "docid": "6f51ba4c14b008034f6bbe2ea088578e", "score": "0.68935305", "text": "def return_graph(self):\n return self.__graph", "title": "" }, { "docid": "03bd86587fde1abda5c3fead9b5f0873", "score": "0.6877495", "text": "def build_graph(method):\n builder=PydronGraphBuilder(method)\n builder.build()\n return builder.graph", "title": "" }, { "docid": "6ed391ea6babc8c6b7209079286b2ee8", "score": "0.68421054", "text": "def build_network(self):\n\n # Create output neurons\n for ii in range(self.output_size):\n label = self.output_labels[ii]\n\n self.g_dict[self.node_id] = {\n 'type': 'output',\n 'label': label,\n 'connections': None,\n 'function': ActivationFunction(function=self.o_function)\n }\n\n self.node_id += 1\n\n # Create input neurons and connect them to the output neurons\n for ii in range(self.input_size):\n label = self.input_labels[ii]\n cons = {}\n\n for nn in self.g_dict.keys():\n if self.g_dict[nn]['type'] == 'output':\n cons[nn] = {\n 'weight': random.gauss(0, 1),\n 'enabled': True\n }\n\n self.g_dict[self.node_id] = {\n 'type': 'input',\n 'label': label,\n 'connections': cons,\n 'function': ActivationFunction(function='identity')\n }\n\n self.node_id += 1\n\n return", "title": "" }, { "docid": "28534d4f7df1ffa9d9f75727021e85e9", "score": "0.6826172", "text": "def build_graph(self):\n with self.graph.as_default():\n # Build essential graph components\n self.build_core_graph()\n # Build training graph if specified\n if self.is_training:\n self.build_train_graph()\n # Create global initialization function\n init = tf.global_variables_initializer()\n self.endpoints['init'] = init", "title": "" }, { "docid": "336b93463e27cb6af5751df7729358cd", "score": "0.6813405", "text": "def build_graph() -> nx.DiGraph:\n graph = nx.DiGraph()\n home = pathlib.Path.home()\n conn = sqlite3.connect(home / \".emacs.d\" / \".local\" / \"cache\" / \"org-roam.db\")\n\n # Query all published nodes first\n nodes = conn.execute(\"SELECT DISTINCT nodes.file, nodes.id, nodes.title, tags.tag \"\n \"FROM nodes \"\n \"JOIN tags on nodes.id = tags.node_id \"\n \"WHERE level = 0 AND tags.tag='\\\"publish\\\"';\")\n # A double JOIN to get all nodes that are connected by a link\n links = conn.execute(\"SELECT n1.id, nodes.id FROM ((nodes AS n1) \"\n \"JOIN links ON n1.id = links.source) \"\n \"JOIN (nodes AS n2) ON links.dest = nodes.id \"\n \"WHERE links.type = '\\\"id\\\"';\")\n # Populate the graph\n graph.add_nodes_from((n[1], {\n \"label\": n[2].strip(\"\\\"\"),\n \"tooltip\": n[2].strip(\"\\\"\"),\n \"lnk\": to_rellink(n[0]).lower(),\n \"id\": n[1].strip(\"\\\"\")\n }) for n in nodes)\n graph.add_edges_from(n for n in links if n[0] in graph.nodes and n[1] in graph.nodes)\n conn.close()\n return graph", "title": "" }, { "docid": "4e1652e2ff591de78559c69d7df7c857", "score": "0.67688674", "text": "def graph(self) -> nx.DiGraph:\n return self._graph", "title": "" }, { "docid": "e3650305b172b325695ea19c468080d2", "score": "0.6761877", "text": "def _build(self, graph, prev_state):\n globals_to_collect = []\n\n if self._use_edges:\n blocks._validate_graph(graph, (EDGES,), \"when use_edges == True\")\n globals_to_collect.append(self._edges_aggregator(graph))\n\n if self._use_nodes:\n blocks._validate_graph(graph, (NODES,), \"when use_nodes == True\")\n globals_to_collect.append(self._nodes_aggregator(graph))\n\n if self._use_globals:\n blocks._validate_graph(graph, (GLOBALS,), \"when use_globals == True\")\n globals_to_collect.append(graph.globals)\n\n collected_globals = tf.concat(globals_to_collect, axis=-1)\n updated_globals, updated_state = self._global_model(collected_globals, prev_state)\n return graph.replace(globals=updated_globals), updated_state", "title": "" }, { "docid": "6083503551e62347b4ebbc0065c22ba6", "score": "0.66976994", "text": "def get_graph(self):\n return self.graph", "title": "" }, { "docid": "78fc581898fc8c038905660209c4bc95", "score": "0.6684714", "text": "def build_graph(self, sim_mat):\n\n sim_graph = nx.from_numpy_array(sim_mat)\n return sim_graph", "title": "" }, { "docid": "adcc12cc7ce992d1c029242b2d1edb17", "score": "0.6651257", "text": "def get_underlying_graph(self):\r\n # update the graph label.\r\n label = self.label + ' [underlying graph]'\r\n # create a static graph.\r\n graph = self._staticclass(label)\r\n # for each edge in the original graph.\r\n for edge in self.edges.set:\r\n # add the edge to the snapshot.\r\n graph.add_edge(edge.node1.label, edge.node2.label)\r\n # for each node in the graph.\r\n for node in self.nodes.set:\r\n # add the node to the snapshot.\r\n graph.add_node(node.label)\r\n # return the snapshot.\r\n return graph", "title": "" }, { "docid": "8ec2202833ac261ef147513bda696481", "score": "0.66297376", "text": "def graph(self):\n return self.graph", "title": "" }, { "docid": "75ac9e1296d99da07432fce1152e721c", "score": "0.6620961", "text": "def build_graph(self, topology):\n\t\tself.G = nx.Graph()\n\t\tnodes_pos_dict = topology.nodes_2D_pos()\n\t\t# add prop\t\t\n\t\tself.n_nodes = len(nodes_pos_dict)\n\t\t# add nodes\n\t\tself.G.add_nodes_from([i for i in range(self.n_nodes)])\n\n\t\t# add weighted edges or edges\n\t\tif self.is_weight:\n\t\t\tself.G.add_weighted_edges_from(topology.weighted_edges())\n\t\telse:\n\t\t\tself.G.add_edges_from(topology.edges())\n\n\t\t# add prop\n\t\tself.n_edges = len(self.G.edges())\n\t\t# add node attributes\n\t\tnx.set_node_attributes(self.G, name='position', values=nodes_pos_dict)", "title": "" }, { "docid": "d043475b92462b0c4f2bca700d4615ef", "score": "0.6578204", "text": "def graph(self):\n return self._graph", "title": "" }, { "docid": "d043475b92462b0c4f2bca700d4615ef", "score": "0.6578204", "text": "def graph(self):\n return self._graph", "title": "" }, { "docid": "69ec803dca81ca78898090fc44d760f1", "score": "0.6567824", "text": "def _generate_graph(self, directed=True):\n\n if directed:\n graph = nx.DiGraph()\n else:\n graph = None\n nx.parse_edgelist([i[\"source\"] + \",\" + i[\"target\"] for i in self.json_graph[\"links\"]],\n delimiter=\",\", nodetype=str, create_using=graph)\n self.g = graph\n return self", "title": "" }, { "docid": "c208ca0132d7300e64d0c9541908c5fa", "score": "0.646341", "text": "def create_networkx(self):\n graph = networkx.DiGraph(source=self.directory)\n self.graph = graph\n \n # Add nodes excluding llt\n levels = ['soc', 'hlgt', 'hlt', 'pt']\n for level in levels:\n read_function = getattr(self, 'read_{}'.format(level))\n for row in read_function():\n code = row['{}_code'.format(level)]\n name = row['{}_name'.format(level)]\n row['name'] = name\n row['level'] = level\n graph.add_node(code, row)\n \n # Add edges\n relation_files = ['soc_hlgt', 'hlgt_hlt', 'hlt_pt']\n fieldnames = ['parent_code', 'child_code']\n for name in relation_files:\n for relation in self.read_asc(name, fieldnames):\n graph.add_edge(relation['parent_code'], relation['child_code'])\n \n assert networkx.is_directed_acyclic_graph(graph)\n return self.graph", "title": "" }, { "docid": "88d25574b014dfe34378849c3ac6041e", "score": "0.6459986", "text": "def _build_graph(self):\n hparams = self.hparams\n model, scorer = self._build_nrms()\n return model, scorer", "title": "" }, { "docid": "78ab148f725d99ccc40e364e3630fbc8", "score": "0.6433289", "text": "def create_graph(self):\r\n self.G = nx.MultiDiGraph();\r\n for flight in self.flights:\r\n self.G.add_edge(\r\n flight[0], # u\r\n flight[1], # v\r\n origin = flight[0],\r\n dest = flight[1],\r\n departureTime = flight[2],\r\n departureDelay = flight[3],\r\n arrTime = flight[4],\r\n arrDelay = flight[5],\r\n elapsedTime = flight[6],\r\n flightCost = flight[7])\r\n \r\n for airport in self.G.nodes():\r\n self.G.nodes[airport]['prev'] = None\r\n self.G.nodes[airport]['cost'] = sys.maxsize\r\n self.G.nodes[airport]['wait_time'] = sys.maxsize\r\n \r\n return self.G", "title": "" }, { "docid": "1c81e19e4c53b9eab2e343a047730c70", "score": "0.639176", "text": "def build_graph(self):\n # with tf.device(self.main_device):\n # Configure input and output tensors\n self.config_placeholders()\n # Build the backbone network, then:\n # In training mode, configure training ops (loss, metrics, optimizer, and lr schedule)\n # Also, config train logger and, optionally, val logger\n # In validation mode, configure validation ops (loss, metrics)\n if self.mode in ['train_noval', 'train_with_val']:\n if self.num_gpus == 1:\n self.build_model()\n self.config_train_ops()\n else:\n self.build_model_towers_loss()\n # default 1 gpu\n elif self.mode in ['val', 'val_notrain']:\n self.build_model()\n self.setup_metrics_ops()\n\n else: # inference mode\n if self.num_gpus == 1:\n self.build_model()\n else:\n self.build_model_towers()\n\n\n # Set output tensors\n self.set_output_tnsrs()\n # Init saver (override if you wish) and load checkpoint if it exists\n self.init_saver()\n self.load_ckpt()", "title": "" }, { "docid": "6536af8226719c7a075daa861a7b8bf6", "score": "0.6366115", "text": "def initialize_graph(self):\n self.graph = ox.graph_from_place(\n self.place,\n network_type=self.transportation_mode\n )\n return self.graph", "title": "" }, { "docid": "842a59157fde57b20d037beb8856a9ac", "score": "0.635127", "text": "def build_graph(self):\n self.declare_placeholders()\n self.declare_variables()\n logit = self.inference()\n self.optimize(logit)\n self.predict(logit)\n self.compute_accuracy()", "title": "" }, { "docid": "b837026b60604e3edfcbb07824d86199", "score": "0.63488334", "text": "def get_graph():\n d = sumtri.Node(8, None, None)\n e = sumtri.Node(16, None, None)\n f = sumtri.Node(32, None, None)\n\n b = sumtri.Node(4, d, e)\n c = sumtri.Node(2, e, f)\n\n return sumtri.Node(1, b, c)", "title": "" }, { "docid": "3346edf1fa1da08cbcf5737d0c2d56b1", "score": "0.6333759", "text": "def copyGraph(self):\n g = Graph(self.__nrVertices)\n for edge in self.__cost.keys():\n g.addEdge(edge[0],edge[1], self.__cost[edge])\n return g", "title": "" }, { "docid": "79f8474429e8780708b8eca7fc7ccae8", "score": "0.6316442", "text": "def collectGraph(self, graph=None):\n\n # Initialize graph if necessary\n if graph is None:\n graph = {'nodes': list(), 'edges': list()}\n\n # Cycle detection - continue, but don't re-add this node to the graph\n if id(self) in [id(n) for n in graph['nodes']]:\n return graph\n\n # Add this node to the graph\n graph['nodes'].append(self)\n # Add all edges\n graph['edges'].extend([(x, self) for x in self.children()])\n for x in self.children():\n # Recursively add children and edges to the graph. This mutates\n # graph\n x.collectGraph(graph)\n\n # Return the graph\n return graph", "title": "" }, { "docid": "dc93595c840fdd71789f4c5dd51b7482", "score": "0.6308257", "text": "def to_networkx(self):\r\n\r\n G = nx.MultiDiGraph()\r\n for start in self.connections:\r\n ends = self.connections[start]\r\n for end in ends:\r\n G.add_edge(start, end)\r\n\r\n return G", "title": "" }, { "docid": "eabd807db853fdad619e0f848a229b00", "score": "0.62927616", "text": "def build_graph():\n # G = nx.DiGraph()\n G = nx.Graph()\n with open('data/initial_label_facts.json') as f_in:\n label_triples = json.load(f_in)\n rel_colors = {\n \"use\":\"red\",\n \"different from\":'green',\n \"subclass of\":'blue', \n \"has quality\":'purple',\n \"instance of\":'yellow', \n \"facet of\":'brown'}\n for rel, vals in label_triples.items():\n for pair in vals:\n G.add_edge(pair[0], pair[1], color=rel_colors.get(rel, 'black'))\n # print(len(list(nx.connected_components(G))))\n # print(sorted(d for n, d in G.degree()))\n # print(nx.clustering(G))\n print(approximation.average_clustering(G))\n # write_dot(G, 'test.dot')\n # $ dot -Tpng test.dot >test.png\n\n return label_triples", "title": "" }, { "docid": "706146855a679601850f1337d481e6a4", "score": "0.6253574", "text": "def build_graph(self, graph_id):\n\n if graph_id == 1:\n self.graph_icnet.clear()\n for transporter in self.transporters_icnet:\n self.graph_icnet.add_node(transporter.id)\n for cargo_owner in self.cargo_owners:\n self.graph_icnet.add_node(100000 + cargo_owner.id)\n for bid_icnet in self.bids_icnet:\n if self.graph_icnet.has_edge(bid_icnet.transporter_id, 100000 + bid_icnet.cargo_owner_id):\n self.graph_icnet[bid_icnet.transporter_id][100000 + bid_icnet.cargo_owner_id][\"weight\"] += 1\n else:\n self.graph_icnet.add_path([bid_icnet.transporter_id, 100000 + bid_icnet.cargo_owner_id], weight = 1)\n\n if graph_id == 2:\n self.graph_aicnet.clear()\n for transporter in self.transporters_icnet:\n self.graph_aicnet.add_node(transporter.id)\n for cargo_owner in self.cargo_owners:\n self.graph_aicnet.add_node(100000 + cargo_owner.id)\n for bid_aicnet in self.bids_aicnet:\n if self.graph_aicnet.has_edge(bid_aicnet.transporter_id, 100000 + bid_aicnet.cargo_owner_id):\n self.graph_aicnet[bid_aicnet.transporter_id][100000 + bid_aicnet.cargo_owner_id][\"weight\"] += 1\n else:\n self.graph_aicnet.add_path([bid_aicnet.transporter_id, 100000 + bid_aicnet.cargo_owner_id], weight = 1)", "title": "" }, { "docid": "2379ec645b9c78fe1e064cf2d5417635", "score": "0.6236189", "text": "def graph(self):\n if self.circuit is None:\n return None\n\n # returned DAG has all parameters set to 0\n bb = blackbird.loads(self.circuit)\n\n if bb.is_template():\n params = bb.parameters\n kwargs = {p: 0 for p in params}\n\n # initialize the topology with all template\n # parameters set to zero\n topology = to_DiGraph(bb(**kwargs))\n\n else:\n topology = to_DiGraph(bb)\n\n return topology", "title": "" }, { "docid": "cfb4112157b404a2503917db11324266", "score": "0.6229526", "text": "def build_graph(self, proto_data):\n logger.info(\"Start to build graph\")\n start_time = time.time()\n\n # Notice:\n # The following methods are interdependent and cannot be switched at will.\n self._parse_data(proto_data)\n self._add_variable_nodes(NodeTypeEnum.PARAMETER.value)\n self._build_aggregation_scope_nodes()\n self._process_independent_layout()\n self._build_name_scope_nodes()\n\n # Since const nodes are not aggregated, adding them at the end can save a lot of computation.\n self._add_variable_nodes(NodeTypeEnum.CONST.value)\n self._calc_subnode_count()\n self._leaf_nodes = self._get_leaf_nodes()\n self._full_name_map_name = self._get_leaf_node_full_name_map()\n\n precision = 6\n time_consuming = round(time.time() - start_time, precision)\n logger.info(\"Build graph end, all node count: %s, const count: %s, parameter count: %s, time-consuming: %s s.\",\n self.normal_node_count, len(self._const_node_temp_cache),\n len(self._parameter_node_temp_cache), time_consuming)", "title": "" }, { "docid": "10dc55d93397203b7f0ee7754cd53618", "score": "0.6228555", "text": "def extract_graph(self):\n\n # Extract edges from circuit\n driver_dictionary = self._build_driver_dictionary()\n self.edge_list = self._get_edge_list(driver_dictionary)\n\n # Now we can build the graph since we've the vetices(instantiations) and\n # the edges (circuit point-to-point connections).\n self.graph_dict = self._get_graph_dictionary(self.edge_list)\n\n # Determine which layer of the schematic the blocks belong on\n self.layer_dict = {}\n self.layer_dict = self._determine_layering(self.graph_dict,\n col_dict=self.layer_dict)\n\n # Insert dummy nodes to break up long edges - make the graph 'proper'\n self._split_long_edges()\n\n # DEBUG\n if DEBUG:\n libdb.show_dictionary(\"Graph Edges Dictionary\", self.graph_dict )\n libdb.show_dictionary(\"Graph Layer Dictionary\", self.layer_dict )\n self.show_connections()\n\n return self.graph_dict", "title": "" }, { "docid": "b696da2a1e00092cbd7ee6e749a61349", "score": "0.62175137", "text": "def compute_graph(self) -> gt.Graph:\n if self._significant_left is None:\n sig_plus, sig_minus = self.compute_backbone()\n else:\n sig_plus, sig_minus = self._significant_right, self._significant_left\n\n G = gt.Graph(directed=True)\n G.add_vertex(self.N)\n G.add_edge_list(np.transpose(sig_plus.nonzero()))\n G.add_edge_list(np.transpose(sig_minus.nonzero()))\n weights = np.concatenate(\n (\n np.ones(sig_plus.nnz, dtype=int),\n np.zeros(sig_minus.nnz, dtype=int),\n )\n )\n G.ep[\"weight\"] = G.new_edge_property(\"short\", vals=weights)\n\n return G", "title": "" }, { "docid": "cca3881d2949e01dd6ad07fb6fb082af", "score": "0.6208593", "text": "def construct_graph(self):\n graph = nx.DiGraph()\n\n # data items connected to sink with weight abs(ln(prob_const))\n for idx, item in self.changes.items():\n capacity = abs(math.log(item.prob_const))\n graph.add_edge('d' + str(idx), 't', capacity=capacity)\n\n # source connected to fis with prob = min(abs(ln(prob_change)))\n for idx, fi in self.fis.items():\n changeable_fi_deps = fi.data & set(self.changes.keys())\n if len(changeable_fi_deps) == 0:\n continue\n capacity = min(\n (\n abs(math.log(self.changes[idx].prob_change))\n\n # effectively infinite weight when inactive\n if self.changes[idx].active else 100\n )\n for idx in changeable_fi_deps\n )\n graph.add_edge('s', 'f' + str(idx), capacity=capacity)\n\n # connect fi to dependent data with effectively \"infinite\" weight\n for item in changeable_fi_deps:\n graph.add_edge(\n 'f' + str(idx),\n 'd' + str(item),\n capacity=100,\n )\n\n return graph", "title": "" }, { "docid": "53cad874065dd588a04d4c953c3d6e73", "score": "0.61952126", "text": "def _buildGraph(self):\n\t\t# TODO(jchaloup): extend nodes on a level of files?\n\t\tvertices = []\t# [rpm]\n\t\tedges = []\t# [(rpm,rpm)]\n\t\talphabet = []\t# [package]\n\t\tparents = {}\t# alphabet -> vertices\n\t\tlabels = {}\t# label can contain (package, package) each with a package not in alphabet\n\t\tlabels[\"devel\"] = {}\t# (rpm,rpm | \"\") -> label\n\t\tlabels[\"main\"] = {}\t# (rpm,rpm | \"\") -> label\n\t\tlabels[\"tests\"] = {}\t# (rpm,rpm | \"\") -> label\n\n\t\t# nodes: rpms\t\t\t\t// list of rpms provided by specified packages\n\t\t# edge: (rpm, rpm)\t\t\t// list of used rpms\n\t\t# alphabet: [package]\t\t\t// list of golang packages defined by all rpms\n\t\t# parents: package -> rpm mapping\t// list of used packages (each package belongs to one and only one rpm)\n\t\t# label: ([(package, package), ...], [(package, package), ...], [(package, package), ...])\n\t\t# devel main unit-test\n\t\t# This way I get a graph from which I can run analysis on level of rpms and level of golang packages.\n\t\t# Each analysis will preprocess the graph and get what it needs:\n\t\t# - rpm level: just picks nodes and edges as it is (this will not give a list of missing packages)\n\t\t# - golang package level: from labels collect edges, from parents collect a list of missing packages, from alphabet get a list of all packages\n\t\t#\n\t\t# TODO:\n\t\t# - how to detect missing packages on rpm-level?\n\t\tcategories = [\"devel\"]\n\t\tif self.with_main:\n\t\t\tcategories.append(\"main\")\n\t\tif self.with_tests:\n\t\t\tcategories.append(\"tests\")\n\n\t\tfor v, _ in self._requirements:\n\t\t\tfor key in v:\n\t\t\t\t(build, rpm) = key\n\t\t\t\tvertices.append(rpm)\n\t\t\t\t# symbols\n\t\t\t\tfor category in categories:\n\t\t\t\t\talphabet = alphabet + v[key][category]\n\n\t\t\t\t\t# parents\n\t\t\t\t\tfor l in v[key][category]:\n\t\t\t\t\t\tparents[l] = {\"build\": build, \"rpm\": rpm}\n\n\t\tfor _, e in self._requirements:\n\t\t\tfor key in e:\n\t\t\t\t(build, rpm) = key\n\t\t\t\tfor category in categories:\n\t\t\t\t\tfor (a, b) in e[key][category]:\n\t\t\t\t\t\t# edges\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\ttarget_rpm = parents[b][\"rpm\"]\n\t\t\t\t\t\t\tedges.append((rpm, target_rpm))\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\t#print \"Missing node: %s\" % b\n\t\t\t\t\t\t\ttarget_rpm = \"\"\n\n\t\t\t\t\t\t# labels\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tlabels[category][(rpm,target_rpm)].append((a,b))\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tlabels[category][(rpm,target_rpm)] = [(a,b)]\n\n\t\t# make the list of edges unique\n\t\tedges = list(set(edges))\n\t\t# make alphabet unique\n\t\talphabet = list(set(alphabet))\n\n\t\treturn GraphDataset(vertices, edges, alphabet, parents, labels)", "title": "" }, { "docid": "f1af70198df4a454d239c179761c5343", "score": "0.618986", "text": "def build_train_graph(self):\n # Get required endpoints from core graph.\n target_q = self.endpoints['target_q']\n online_q = self.endpoints['online_q']\n global_step = self.endpoints['global_step']\n # Build the loss function\n actions, rewards, loss = self.build_loss(target_q, online_q)\n # Build the gradient descent optimization operations\n learning_rate, train = self.build_optimizer(loss, global_step)\n # Build an operation to copy weights from \"online\" DQN to \"target\" DQN\n update_target = self.build_update_target()\n\n # Update inputs with training placeholders\n self.inputs.update({\n 'actions': actions,\n 'rewards': rewards,\n 'learning_rate': learning_rate\n })\n # Update endpoints with training model objects\n self.endpoints.update({\n 'loss': loss,\n 'train': train,\n 'update_target': update_target,\n })", "title": "" }, { "docid": "fa124ef739f692dcae692d17137b57fd", "score": "0.6189228", "text": "def build_graph(self):\n # The training data. A text file.\n (words, counts, words_per_epoch, self._epoch, self._words, examples,\n labels) = word2vec.skipgram_word2vec(filename=self._cnf.get('train_data'),\n batch_size=self._cnf.get(\n 'batch_size'),\n window_size=self._cnf.get(\n 'window_size'),\n min_count=self._cnf.get(\n 'min_count'),\n subsample=self._cnf.get('subsample'))\n vocab_words, vocab_counts, words_per_epoch = self._session.run(\n [words, counts, words_per_epoch])\n self._cnf.update({'vocab_words': vocab_words,\n 'vocab_counts': vocab_counts, 'words_per_epoch': words_per_epoch})\n self._cnf.update({'vocab_size': self._cnf.get('vocab_words')})\n print(\"Data file: \", self._cnf.get('train_data'))\n print(\"Vocab size: \", self._cnf.get('vocab_size') - 1, \" + UNK\")\n print(\"Words per epoch: \", self._cnf.get('words_per_epoch'))\n self._examples = examples\n self._labels = labels\n self._id2word = self._cnf.get('vocab_words')\n for i, w in enumerate(self._id2word):\n self._word2id[w] = i\n true_logits, sampled_logits = self.model(examples, labels)\n loss = self._nce_loss(true_logits, sampled_logits)\n tf.contrib.deprecated.scalar_summary(\"NCE loss\", loss)\n self._loss = loss\n self._optimizer(loss)\n\n # Properly initialize all variables.\n tf.global_variables_initializer().run()\n\n self.saver = tf.train.Saver()", "title": "" }, { "docid": "b1c03f9da770416a9d0291bd5232c1ef", "score": "0.6187668", "text": "def build_graph(self):\n edges_by_types = {}\n npy = self.edge_file_list[0][1] + \".npy\"\n if os.path.exists(npy):\n log.info(\"load data from numpy file\")\n\n for pair in self.edge_file_list:\n edges_by_types[pair[0]] = np.load(pair[1] + \".npy\")\n\n else:\n log.info(\"load data from txt file\")\n for pair in self.edge_file_list:\n edges_by_types[pair[0]] = self.load_edges(pair[1])\n # np.save(pair[1] + \".npy\", edges_by_types[pair[0]])\n\n for e_type, edges in edges_by_types.items():\n log.info([\"number of %s edges: \" % e_type, len(edges)])\n\n if self.symmetry:\n tmp = {}\n for key, edges in edges_by_types.items():\n n_list = key.split('2')\n re_key = n_list[1] + '2' + n_list[0]\n tmp[re_key] = edges_by_types[key][:, [1, 0]]\n edges_by_types.update(tmp)\n\n log.info([\"finished loadding symmetry edges.\"])\n\n node_types = self.load_node_types(self.node_types_file)\n\n assert len(node_types) == self.num_nodes, \\\n \"num_nodes should be equal to the length of node_types\"\n log.info([\"number of nodes: \", len(node_types)])\n\n node_features = {\n 'index': np.array([i for i in range(self.num_nodes)]).reshape(\n -1, 1).astype(np.int64)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=self.num_nodes,\n edges=edges_by_types,\n node_types=node_types,\n node_feat=node_features)", "title": "" }, { "docid": "b550da677498283a47fa72113b7322ef", "score": "0.61814976", "text": "def construct_graph(self, file):\n ways, nodes = self.extract_route_data(file)\n graph = nx.Graph()\n graph.add_nodes_from(nodes)\n for way in ways:\n way_nodes = [nodes[node_id] for node_id in way]\n for a, b in zip(way_nodes, way_nodes[1:]):\n graph.add_edge(a, b, weight=a.get_distance_to(b))\n return self.clean_graph(graph)", "title": "" }, { "docid": "c951c8704fcc92183e445653b343f1d4", "score": "0.61751676", "text": "def build_graph(self, **options):\n # Building the Graph:\n self.graph = bonobo.Graph() \n\n # Creating the main method chain for the graph:\n self.graph.add_chain(\n self.transform_posts,\n self.load_posts,\n _input=None # Input set to None so self.transform_posts does not stat untill params are passed.\n ) \n\n # Adding the first leg that extracts the top posts for the day: \n self.graph.add_chain(\n self.extract_daily_top_posts, \n _output=self.transform_posts # Output of this extraction method is fed into self.transform_posts.\n )\n\n # Adding the second leg that extracts the current rising posts:\n self.graph.add_chain(\n self.extract_rising_posts,\n _output= self.transform_posts\n )\n\n return self.graph", "title": "" }, { "docid": "e7645c1f8d30cad5596aba8337ca7ad9", "score": "0.61656", "text": "def deep_copy(self):\n gc = Graph()\n # create nodes\n for s_l, s in self.nodes.items():\n for t in s.dest_nodes:\n gc.add_edge(s_l, t.label, dual_dir=False)\n return gc", "title": "" }, { "docid": "6741cd6bdf74ac79756e1375b74cd433", "score": "0.6148967", "text": "def get_graph_by_id(self, network_id):\n network = self.get_network_by_id(network_id)\n return network.as_bel()", "title": "" }, { "docid": "0b235dd5b9c5f9d4628a5d59ff5db79d", "score": "0.61454886", "text": "def to_nx(self) -> Union[nx.Graph, nx.DiGraph]:\n if not self._undirected:\n nx_graph = nx.DiGraph()\n nx_graph.add_nodes_from(self._nodes)\n nx_graph.add_edges_from(self._directed.keys())\n bidirected = {(i, j) for i, j in self._bidirected.keys()}\n nx_graph.add_edges_from(bidirected | {(j, i) for i, j in bidirected})\n nx.set_edge_attributes(nx_graph, self._directed, name='label')\n nx.set_edge_attributes(nx_graph, self._bidirected, name='label')\n nx.set_edge_attributes(nx_graph, {(j, i): l for (i, j), l in self.bidirected.items()}, name='label')\n return nx_graph\n if not self._directed and not self._bidirected:\n nx_graph = nx.Graph()\n nx_graph.add_nodes_from(self._nodes)\n nx_graph.add_edges_from(self._undirected.keys())\n nx.set_edge_attributes(nx_graph, self._undirected, 'label')\n return nx_graph\n else:\n raise ValueError(\"Can only convert if the graph has only undirected edges or no undirected edges\")", "title": "" }, { "docid": "b96e168257c74d7057d1b07bc535fcc0", "score": "0.6138603", "text": "def generateGraph(network, edges):\r\n\r\n g = Graph()\r\n\r\n for person in network:\r\n g.addNode(person.getName())\r\n\r\n for edge in edges:\r\n g.addEdge(edge)\r\n g.addReverseEdge(edge)\r\n\r\n return g", "title": "" }, { "docid": "f78c363a971570a444989f4b776a7b5b", "score": "0.6128634", "text": "def copy_graph(self):\r\n copy_graph = UndirectedGraph()\r\n for v in self.adj_list:\r\n copy_graph.add_vertex(v)\r\n for u in self.adj_list[v]:\r\n copy_graph.adj_list[v].append(u)\r\n return copy_graph", "title": "" }, { "docid": "d58e85e55e834a6ae4cb184616714a7f", "score": "0.61268103", "text": "def make_network_visualizer(self, graph):\n return NetworkVisualizer(graph)", "title": "" }, { "docid": "bfcac6f186be052efb04faa2623b066f", "score": "0.6120649", "text": "def gen_graph(self):\n\n # Generate nodes\n s_tor = len(self.tor_idx_range)\n s_agg = len(self.aggregation_idx_range)\n s_sp = len(self.spine_idx_range)\n G = gen_nodes(s_tor, s_agg, s_sp)\n\n # Prestructure the switch indexes to make link creation more concise\n current_idx = 1\n # TOR\n assert (current_idx == self.tor_idx_range[0])\n tors = []\n for _ in range(0, self.aggregation_block_count):\n tor_agg = []\n for tor in range(0, self.tors_per_aggregation_block):\n tor_agg.append(current_idx)\n current_idx += 1\n tors.append(tor_agg)\n # Aggregation\n assert (current_idx == self.aggregation_idx_range[0])\n aggregation = []\n for agg in range(0, self.aggregation_block_count):\n agg_block = []\n for middle_block in range(0, 8):\n agg_block.append(current_idx)\n current_idx += 1\n aggregation.append(agg_block) \n # Spine\n assert (current_idx == self.spine_idx_range[0])\n spines = []\n for spine in range(0, self.spine_block_count):\n spines.append(current_idx)\n current_idx += 1\n\n # Links\n # TOR to middle blocks:\n tor_idx = 0\n for aggregation_block in range(0, self.aggregation_block_count):\n for tor in range(0, self.tors_per_aggregation_block):\n for out in range(0, 8):\n v1 = self.tor_idx_range[tor_idx]\n v2 = aggregation[aggregation_block][out]\n G.add_edge(v1, v2)\n G.add_edge(v2, v1)\n tor_idx += 1\n \n # Aggregation to spine blocks:\n agg_idx = 0\n spine_block_idx = 0 \n for aggregation_block in range(0, self.aggregation_block_count):\n for middle_block in range(0, 8):\n # spine blocks less than number of MBs in AB leading to multiple links between an MB and an SB\n if self.ports_per_middle_block_up >= self.spine_block_count:\n for spine in range(0,len(self.spine_idx_range)):\n v1 = self.aggregation_idx_range[agg_idx]\n v2 = self.spine_idx_range[spine]\n G.add_edge(v1, v2)\n G.add_edge(v2, v1)\n agg_idx += 1\n \n # spine blocks more than MBs leading to MBs connecting to a subset of spine blocks \n else:\n for _ in range(0,self.ports_per_middle_block_up):\n v1 = self.aggregation_idx_range[agg_idx]\n v2 = spines[spine_block_idx]\n G.add_edge(v1, v2)\n G.add_edge(v2, v1)\n spine_block_idx += 1\n \n if spine_block_idx == self.spine_block_count:\n spine_block_idx = 0\n agg_idx += 1\n\n # Initialize Capacities\n G = self.init_capacities(G)\n\n return G", "title": "" }, { "docid": "4d5cdf60795e462b71bc950fd581b296", "score": "0.61022323", "text": "def graph(self, name):\n return Graph(self._conn, self._executor, name)", "title": "" }, { "docid": "f7f3ea4490cdb823ce7dca4e466f41b1", "score": "0.6101759", "text": "def build_graph(self):\n\t\t# for key in self.user_dic.keys():\n\t\t# self.graph.add_node(key)\n\t\tprint \"start building the graph\"\n\t\tdistinct_user = Set([])\n\n\t\tdistinct_user.union(set( self.user_dic.keys() ))\n\n\t\tfor value in self.user_dic.values():\n\t\t\tdistinct_user.union(set(value))\n\n\t\tfor eachuser in distinct_user:\n\t\t\tself.graph.add_node(key)\n\n\t\tfor key in self.user_dic.keys():\n\t\t\tfor value in self.user_dic[key]:\n\t\t\t\t# g.add_edges( [(1,2)] )\n\t\t\t\tself.graph.add_edge(key, value)\n\n\t\tfor node in self.graph.nodes():\n\t\t\tself.color_dic[node] = \"white\"\n\n\t\tself.node_color = [self.color_dic[node] for node in self.graph.nodes()]\n\n\t\tallgraph = list(nx.connected_component_subgraphs(self.graph))\n\n\t\tmincut = nx.minimum_edge_cut(self.graph)\n\t\tprint \"mincut is \", mincut\n\t\tprint \"length of all connected component is \", len(allgraph)\n\n\t\tfor graph in allgraph:\n\n\t\t\t# min_weighted_dominating_set(graphUD, weight=None)\n\n\t\t\tprint graph.number_of_nodes()\n\n\t\tprint \"finish building the graph\"", "title": "" }, { "docid": "5fe431b131a794ed6b9374e312bb2b62", "score": "0.6095332", "text": "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._predict()\n self._compute_loss()\n with tf.name_scope('optim'):\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))\n self.merged = tf.summary.merge_all()", "title": "" }, { "docid": "7622c45058cb9a8c1afb0e6a84d15537", "score": "0.6072326", "text": "def get_graph(self):\n if self.self_loops:\n u = GraphView(self.u, efilt=self.eweight.fa > 0)\n else:\n es = edge_endpoint_property(self.u, self.u.vertex_index, \"source\")\n et = edge_endpoint_property(self.u, self.u.vertex_index, \"target\")\n u = GraphView(self.u, efilt=numpy.logical_and(self.eweight.fa > 0,\n es.fa != et.fa))\n return u", "title": "" }, { "docid": "55d61884e346e8b550e3f733848296b6", "score": "0.6071345", "text": "def build_graph(kmer_dict):\n graph = nx.DiGraph()\n for kmer, val in kmer_dict.items():\n graph.add_edge(kmer[:-1], kmer[1:], weight=val)\n return graph", "title": "" }, { "docid": "b28df7aad532bddfce5911c5eb1c8162", "score": "0.6053324", "text": "def crate_graph():\n graph.load_graph()\n return ' '.join(str(graph.g).strip('<>').split()[0:9])", "title": "" }, { "docid": "5da3ee907d731287dd0c5ee6c6ba8194", "score": "0.6051974", "text": "def generate_graph(self, nodes = [], edges = []):\n return self.start", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "65fad3df23c929066b4489cff35da48c", "score": "0.6033559", "text": "def build_graph(self):\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()\n self.summaries = tf.merge_all_summaries()", "title": "" }, { "docid": "60327e911f6d4a924e2d56b2fd332a25", "score": "0.60318136", "text": "def get_graph(self, options):\n return self._graph", "title": "" }, { "docid": "834120d66b9868c377598e8557d03505", "score": "0.60161936", "text": "def build_graph(kmer_dico):\n graph = nx.DiGraph()\n for kmer, poids in kmer_dico.items():\n graph.add_edge(kmer[:-1], kmer[1:], weight=poids)\n\n return graph", "title": "" }, { "docid": "9fdc115622c6058fb52df8928595c6c3", "score": "0.60039485", "text": "def _create_big_graph(self) -> nx.DiGraph:\n G = nx.DiGraph()\n for i in range(self.env.height):\n for j in range(self.env.width):\n coord_transition = self.env.rail.get_full_transitions(i, j)\n # if rail detected\n if coord_transition != 0:\n for direction in [0, 1, 2, 3]:\n\n cell_transition = [i for i, v in enumerate(self.env.rail.get_transitions(*(i, j), direction)) if\n v == 1]\n for cc in cell_transition:\n dx, dy = self._get_coords(cc)\n # if it is a dead end\n if coord_transition & 0b0010000000000000 > 0:\n G.add_edge((i, j, direction), (i, j, cc))\n G.add_edge((i, j, cc), (i + dx, j + dy, cc))\n else:\n G.add_edge((i, j, direction), (i + dx, j + dy, cc))\n\n return G", "title": "" }, { "docid": "ccd113f9b6e77dd2c6ddde8a5f37119d", "score": "0.5998538", "text": "def _buildNetwork(self):\n\n #Core Network (reusable)\n conv1 = self._convLayer(self.obsPH, filterDims = 8, outputFilters = 16, strides = 4,name=\"conv1\", activation = tf.nn.relu)\n conv2 = self._convLayer(conv1 , filterDims = 4, outputFilters = 32, strides = 2,name=\"conv2\", activation = tf.nn.relu)\n fc1 = self._fcLayer (conv2 , numOutputs = 256,name=\"fc1\", activation = tf.nn.relu )\n\n #Outputs\n self.valueEst = tf.squeeze(self._fcLayer(fc1, numOutputs = 1,name=\"valueEst\", activation = lambda x: x), axis=[1])\n self.policyProb = self._fcLayer(fc1, numOutputs = self.actionSpace,name=\"policyProb\", activation = tf.nn.softmax)\n \n #Log results\n self.log.addHist(\"valueEst\", self.valueEst)\n self.log.addHist(\"policyProb\", self.policyProb)", "title": "" }, { "docid": "0a71fa41ce55479868d1e465ba970957", "score": "0.5986303", "text": "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n self.loss = self.loss + self.gamma*self.cluster_layer(self.walker_layer)\n self.loss = self.loss + self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n if self.args.ricci_weights == \"Compute\":\n self.weights = ricci_curvature_weight_generator(self.graph, self.args.ricci_transform_alpha)\n elif self.args.raw_ricci:\n self.weights = ricci_curvature_weight_generator_raw(self.graph, ricci_weights_reader(self.args.ricci_weights))\n else:\n self.weights = ricci_curvature_weight_generator_precomputed(self.graph, self.args.ricci_transform_alpha, ricci_weights_reader(self.args.ricci_weights))", "title": "" }, { "docid": "061df3956273aea672c9eb425b3e0b02", "score": "0.5985277", "text": "def create_graph(self, input_config):\n self.graph = nx.DiGraph()\n self.graph.add_nodes_from(range(0, int(input_config[\"graph\"][\"num_nodes\"])))\n self.nstart = int(input_config[\"graph\"][\"start_node\"])\n self.nend = int(input_config[\"graph\"][\"end_node\"])\n edges_list = input_config[\"graph\"][\"edges_list\"]\n for key in edges_list.keys():\n self.graph.add_edges_from(\n [(int(key.split(',')[0]), int(key.split(',')[1]))], \n color=edges_list[key][\"color\"],\n allowed_transports=edges_list[key][\"allowed_transports\"],\n volume=edges_list[key][\"volume\"],\n free_flow_travel_time=edges_list[key][\"free_flow_travel_time\"],\n capacity=edges_list[key][\"capacity\"]\n )", "title": "" }, { "docid": "c2ae79dfb05d52d17eac80daf84028d2", "score": "0.5969558", "text": "def graph(self, name):\n return Graph(self.url, name)", "title": "" }, { "docid": "4f9067f2e3366856d0b8d0b4cb2aceab", "score": "0.5964989", "text": "def get_graph(N: int, k: int, quantity: str) -> nx.Graph:\n adj = get_adjacency(N, k, quantity)\n return nx.from_scipy_sparse_matrix(adj)", "title": "" }, { "docid": "3dea9f7ca35ec128a2d49169c4a6aeeb", "score": "0.5963267", "text": "def building_social_network(vertexs,edges):\n Undirected_G = nx.Graph()\n Undirected_G.add_nodes_from(vertexs)\n Undirected_G.add_edges_from(edges)\n return Undirected_G", "title": "" }, { "docid": "9e88a036dbd07ae03971471e7fa5eb14", "score": "0.5961826", "text": "def _graph_read(self):\n g = Graph()\n g.bind('skos', SKOS)\n g.bind('dc', DC)\n g.bind('dct', DCT)\n g.bind('rdfs', RDFS)\n g.parse(self.file, format=self.file_format)\n return g", "title": "" }, { "docid": "d5fb5cd85f0dc1c2cb09c669a0eecfc2", "score": "0.59599304", "text": "def convertNetowrk(self):\n resultDict = self.loadedNetwork\n GOld, segmentList, partitionInfo, chosenVoxels, segmentInfoDictOld, nodeInfoDictOld, resultADANDict = itemgetter('G', 'segmentList', 'partitionInfo', 'chosenVoxels', 'segmentInfoDict', 'nodeInfoDict', 'resultADANDict')(resultDict)\n segmentIndexList = list(segmentInfoDictOld.keys())\n heartLoc = (255, 251, 26) # change as needed\n DG = self.reduceGraph(GOld, segmentList, segmentIndexList)\n\n G = nx.Graph()\n nodeInfoDict, edgeInfoDict = {}, {}\n nodeIndex, edgeIndex = 0, 0\n maxNodeDepth = np.max([DG.node[node]['depthLevel'] for node in DG.nodes()])\n for currentDepth in range(maxNodeDepth + 1):\n nodesAtCurrentDepth = [node for node in DG.nodes() if DG.node[node]['depthLevel'] == currentDepth]\n for node in nodesAtCurrentDepth:\n G.add_node(nodeIndex, depth=DG.node[node]['depthLevel'], nodeIndex=nodeIndex, coord=node)\n DG.node[node]['nodeIndexHere'] = nodeIndex\n if node == heartLoc:\n G.node[nodeIndex]['isEntryNode'] = True\n rootNode = nodeIndex\n else:\n G.node[nodeIndex]['isEntryNode'] = False\n \n nodeIndex += 1\n \n for edge in DG.edges():\n depth = np.min([DG.node[edge[0]]['depthLevel'], DG.node[edge[1]]['depthLevel']])\n DG[edge[0]][edge[1]]['depth'] = depth\n \n maxEdgeDepth = np.max([DG[edge[0]][edge[1]]['depth'] for edge in DG.edges()])\n for currentDepth in range(maxEdgeDepth + 1):\n edgesAtCurrentDepth = [edge for edge in DG.edges() if DG[edge[0]][edge[1]]['depth'] == currentDepth]\n for edge in edgesAtCurrentDepth:\n G.add_edge(DG.node[edge[0]]['nodeIndexHere'], DG.node[edge[1]]['nodeIndexHere'], depth=currentDepth, edgeIndex=edgeIndex)\n edgeIndex += 1\n \n currentNodeIndex = nodeIndex\n currentEdgeIndex = edgeIndex\n\n edgeList = [[]] * edgeIndex\n for edge in G.edges():\n edgeIndex = G[edge[0]][edge[1]]['edgeIndex']\n edgeList[edgeIndex] = edge\n\n nodeIndexList = [G.node[node]['nodeIndex'] for node in G.nodes()]\n edgeIndexList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in edgeList]\n\n for node in DG.nodes():\n nodeIndexHere = DG.node[node]['nodeIndexHere']\n nodeInfoDict[nodeIndexHere] = DG.node[node]\n nodeInfoDict[nodeIndexHere]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset\n nodeInfoDict[nodeIndexHere]['coord'] = []\n \n for edge in DG.edges():\n edgeIndex = G[DG.node[edge[0]]['nodeIndexHere']][DG.node[edge[1]]['nodeIndexHere']]['edgeIndex']\n segmentIndex = DG[edge[0]][edge[1]]['segmentIndex']\n edgeInfoDict[edgeIndex] = DG[edge[0]][edge[1]]\n edgeInfoDict[edgeIndex]['length'] = DG[edge[0]][edge[1]]['pathLength'] # backward compatibility\n edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset\n \n # Sync between G and nodeInfoDict\n for node in G.nodes():\n for key, value in G.node[node].items():\n nodeInfoDict[node][key] = value\n\n # Save\n self.G = G\n self.edgeIndex = currentEdgeIndex\n self.nodeIndex = currentNodeIndex\n self.edgeList = edgeList\n self.nodeIndexList = nodeIndexList\n self.edgeIndexList = edgeIndexList\n self.nodeInfoDict = nodeInfoDict\n self.edgeInfoDict = edgeInfoDict\n self.rootNode = rootNode", "title": "" }, { "docid": "aa5a5413f457ab91cf8da9f4e6194e97", "score": "0.595877", "text": "def extractgraph(self):\n\n # initialize edges and edge_lengths\n self.edges = []\n self.edge_lengths = {}\n\n # find all vertices with degree 2 that are not in an isolated\n # island ring (loop) component. These are non-articulation\n # points on the graph representation\n non_articulation_points = self._yield_napts()\n # retain non_articulation_points as an attribute\n self.non_articulation_points = list(non_articulation_points)\n\n # start with a copy of the spatial representation and\n # iteratively remove edges deemed to be segments\n self.edges = copy.deepcopy(self.arcs)\n self.edge_lengths = copy.deepcopy(self.arc_lengths)\n\n # mapping all the 'network arcs' contained within a single\n # 'graph represented' edge\n self.arcs_to_edges = {}\n\n # build up bridges \"rooted\" on the initial\n # non-articulation points\n bridge_roots = []\n\n # iterate over all vertices that are not contained within\n # isolated loops that have a degree of 2\n for s in non_articulation_points:\n\n # initialize bridge with an articulation point\n bridge = [s]\n\n # fetch all vertices adjacent to point `s`\n # that are also degree 2\n neighbors = self._yieldneighbor(s, non_articulation_points, bridge)\n while neighbors:\n\n # extract the current node in `neighbors`\n cnode = neighbors.pop()\n # remove it from `non_articulation_points`\n non_articulation_points.remove(cnode)\n # add it to bridge\n bridge.append(cnode)\n # fetch neighbors for the current node\n newneighbors = self._yieldneighbor(\n cnode, non_articulation_points, bridge\n )\n # add the new neighbors back into `neighbors`\n neighbors += newneighbors\n\n # once all potential neighbors are exhausted add the\n # current bridge of non-articulation points to the\n # list of rooted bridges\n bridge_roots.append(bridge)\n\n # iterate over the list of newly created rooted bridges\n for bridge in bridge_roots:\n\n # if the vertex is only one non-articulation\n # point in the bridge\n if len(bridge) == 1:\n\n # that the singular element of the bridge\n n = self.adjacencylist[bridge[0]]\n # and create a new graph edge from it\n new_edge = tuple(sorted([n[0], n[1]]))\n\n # identify the arcs to be removed\n e1 = tuple(sorted([bridge[0], n[0]]))\n e2 = tuple(sorted([bridge[0], n[1]]))\n\n # remove the network arcs (spatial) from the\n # graph-theoretic representation\n self.edges.remove(e1)\n self.edges.remove(e2)\n\n # remove the former network arc lengths from the\n # graph edge lengths lookup\n length_e1 = self.edge_lengths[e1]\n length_e2 = self.edge_lengths[e2]\n self.edge_lengths.pop(e1, None)\n self.edge_lengths.pop(e2, None)\n\n # and add the new edge length in their place\n self.edge_lengths[new_edge] = length_e1 + length_e2\n\n # update the pointers\n self.arcs_to_edges[e1] = new_edge\n self.arcs_to_edges[e2] = new_edge\n\n # if there are more than one vertices in the bridge\n else:\n cumulative_length = 0\n start_end = {}\n\n # initialize a redundant set of bridge edges\n redundant = set([])\n\n # iterate over the current bridge\n for b in bridge:\n # iterate over each node in the bridge\n for n in self.adjacencylist[b]:\n # start the bridge with this node\n if n not in bridge:\n start_end[b] = n\n # or create a redundant edge with the current\n # node and `b`\n else:\n redundant.add(tuple(sorted([b, n])))\n\n # initialize a new graph edge\n new_edge = tuple(sorted(start_end.values()))\n\n # add start_end redundant edge\n for k, v in start_end.items():\n redundant.add(tuple(sorted([k, v])))\n\n # remove all redundant network arcs while\n # adjusting the graph edge lengths lookup\n # and the edges_to_arcs lookup\n for r in redundant:\n self.edges.remove(r)\n cumulative_length += self.edge_lengths[r]\n self.edge_lengths.pop(r, None)\n self.arcs_to_edges[r] = new_edge\n\n # finally, add the new cumulative edge length\n self.edge_lengths[new_edge] = cumulative_length\n\n # add the updated graph edge\n self.edges.append(new_edge)\n\n # converted the graph edges into a sorted set to prune out\n # duplicate graph edges created during simplification\n self.edges = sorted(set(self.edges))", "title": "" }, { "docid": "c017f86fa67479235da36cf748c42147", "score": "0.5955086", "text": "def copy(self):\n\t\treturn Graph(self.edges)", "title": "" }, { "docid": "b6e899377b8f9cc0d05ff2cd1fc075da", "score": "0.59538484", "text": "def create_network(self):\n return Chain(max_size=3)", "title": "" }, { "docid": "79bcc2ea63d244e449a4efc4bddef680", "score": "0.59534055", "text": "def get_graph(self):\n if self.graph == None:\n raise Exception(\"No best path yet calculated\")\n return self.graph", "title": "" }, { "docid": "1104adc7c1647d745aab426954a1631b", "score": "0.5946625", "text": "def get_graph_for_sugiyama(self):\n\n edges = self.extract_graph()\n block_dict = self._build_special_vertices(self.module)\n\n # flip layer dict so it's indexed by the layer\n layers = self._invert_dict(self.layer_dict)\n\n # use this layer dict to make the special vertices list of lists\n l = layers.keys()\n l.sort()\n special_vertices = []\n for i in l:\n tmp = []\n for thing in layers[i]:\n tmp.append( block_dict[thing] )\n special_vertices.append(tmp)\n\n # Now for the graph\n g = graph.Graph(self.module.name)\n layer = 0\n for vertices in special_vertices:\n for v in vertices:\n g.add_vertex(layer, v)\n layer += 1\n\n for edge in self.edge_list:\n g.add_edge(edge)\n\n g.update()\n return g", "title": "" }, { "docid": "f085cb21635699d06939ebda62e2bae4", "score": "0.5931191", "text": "def build_graph (json_iter):\n\n global DEBUG, WordNode\n\n graph = nx.DiGraph()\n\n for meta in json_iter:\n if DEBUG:\n print(meta[\"graf\"])\n\n for pair in get_tiles(map(WordNode._make, meta[\"graf\"])):\n if DEBUG:\n print(pair)\n\n for word_id in pair:\n if not graph.has_node(word_id):\n graph.add_node(word_id)\n\n try:\n graph.edge[pair[0]][pair[1]][\"weight\"] += 1.0\n except KeyError:\n graph.add_edge(pair[0], pair[1], weight=1.0)\n\n return graph", "title": "" }, { "docid": "a4a70b6179bfd0586dc3c8e01004e71b", "score": "0.5920229", "text": "def build_graph(self, gfile):\n f = open(gfile, 'r+')\n count = 0\n for line in f:\n if count == 0:\n self.num_vertices = int(line)\n self.non_weight_graph = [[False for j in range(self.num_vertices)] for i in range(self.num_vertices)]\n self.vertices = [None] * self.num_vertices\n # initialise vertex list\n for i in range(self.num_vertices):\n self.vertices[i] = Vertex()\n\n else:\n\n line = line.strip(\"\\n\")\n line = line.split(\" \")\n self.edge_list.append(line)\n count += 1\n\n for e in self.edge_list:\n u, v, w = int(e[0]), int(e[1]), int(e[2])\n self.add_edge(u, v, w)", "title": "" }, { "docid": "23ae499342943684d6f952f6aa172102", "score": "0.5916179", "text": "def make_graph(matrix):\n return graph", "title": "" }, { "docid": "5ed6131c402b484cffb5ab7fd191da4f", "score": "0.59159094", "text": "def _build(self):\n self._control_outputs.clear()\n for n in self._graph.nodes:\n for control_input in n.control_inputs:\n if control_input not in self._control_outputs:\n self._control_outputs[control_input] = []\n if n not in self._control_outputs[control_input]:\n self._control_outputs[control_input].append(n)\n self._version = self._graph.version", "title": "" }, { "docid": "c2c1cf78e3821283613782e6147124b8", "score": "0.5902037", "text": "def build_graph(self, class_num):\r\n graph = []\r\n # stem_conv: process the input image\r\n # add padding zeros to let ifmap and ofmap have the same size\r\n px = int((3 - 1) / 2)\r\n py = px\r\n padding_size = (px, py)\r\n stem_conv = nn.Conv2d(\r\n in_channels=3, \r\n out_channels=self.out_channels, \r\n kernel_size = 3, \r\n padding=padding_size,\r\n stride=1)\r\n graph.append(stem_conv)\r\n # major part of the graph consisting of all NasLayers\r\n for _ in range(self.num_layers):\r\n graph.append(NasLayer(self.out_channels))\r\n # fc\r\n fc = nn.Linear(self.out_channels, class_num, bias=True)\r\n graph.append(fc)\r\n # create a ModuleList, or the parameters cannot be added\r\n graph = nn.ModuleList(graph)\r\n\r\n return graph", "title": "" }, { "docid": "36d9fe820a7e1d232555f4c60046aaf3", "score": "0.5899007", "text": "def to_networkx(self) -> nx.MultiDiGraph:\n graph = nx.MultiDiGraph()\n for state in self._states:\n graph.add_node(state.value,\n is_start=state == self._start_state,\n is_final=state in self.final_states,\n peripheries=2 if state in self.final_states else 1,\n label=state.value)\n if state == self._start_state:\n add_start_state_to_graph(graph, state)\n if self._start_stack_symbol is not None:\n graph.add_node(\"INITIAL_STACK_HIDDEN\",\n label=json.dumps(self._start_stack_symbol.value),\n shape=None,\n height=.0,\n width=.0)\n for key, value in self._transition_function:\n s_from, in_symbol, stack_from = key\n s_to, stack_to = value\n graph.add_edge(\n s_from.value,\n s_to.value,\n label=(json.dumps(in_symbol.value) + \" -> \" +\n json.dumps(stack_from.value) + \" / \" +\n json.dumps([x.value for x in stack_to])))\n return graph", "title": "" }, { "docid": "dc9071530e3b1d9a191f62dbf41bf183", "score": "0.58871263", "text": "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n if self.args.ricci_weights == \"Compute\":\n self.weights = ricci_curvature_weight_generator(self.graph, self.args.ricci_transform_alpha)\n elif self.args.raw_ricci:\n self.weights = ricci_curvature_weight_generator_raw(self.graph, ricci_weights_reader(self.args.ricci_weights))\n else:\n self.weights = ricci_curvature_weight_generator_precomputed(self.graph, self.args.ricci_transform_alpha, ricci_weights_reader(self.args.ricci_weights))", "title": "" }, { "docid": "c7a5d06f38bc7e15d43ee1c6c26fbd08", "score": "0.5873242", "text": "def build_new_graph(self):\n # Build a new graph with new two vertices for each vertices to represent ice loc, ice cream loc and non loc\n new_vertices_lst = [None] * (self.num_vertices * 3)\n num_old_vertices = self.num_vertices\n for i in range(self.num_vertices * 3):\n new_vertices_lst[i] = Vertex()\n\n for edge in self.edge_list:\n\n # constants for current loop\n source, target, weight = int(edge[0]), int(edge[1]), int(edge[2])\n\n # represent new vertices for ice location\n ice_source = source + num_old_vertices\n ice_target = target + num_old_vertices\n\n # represent new vertices for ice cream location\n ice_cream_source = source + num_old_vertices * 2\n ice_cream_target = target + num_old_vertices * 2\n\n # represent new edge, \"bi\" means bidirectional\n new_edge = Edge(source, target, weight)\n new_edge_bi = Edge(target, source, weight)\n\n new_vertices_lst[source].edges.append(new_edge)\n new_vertices_lst[target].edges.append(new_edge_bi)\n\n ice_new_edge = Edge(ice_source, ice_target, weight)\n ice_new_edge_bi = Edge(ice_target, ice_source, weight)\n\n new_vertices_lst[ice_source].edges.append(ice_new_edge)\n new_vertices_lst[ice_target].edges.append(ice_new_edge_bi)\n\n ice_cream_new_edge = Edge(ice_cream_source, ice_cream_target, weight)\n ice_cream_new_edge_bi = Edge(ice_cream_target, ice_cream_source, weight)\n\n new_vertices_lst[ice_cream_source].edges.append(ice_cream_new_edge)\n new_vertices_lst[ice_cream_target].edges.append(ice_cream_new_edge_bi)\n\n # if source or target vertex is an ice location or ice cream location, create a new edge to link and\n # connect the normal location to ice location; ice location to ice cream location\n\n # if self.vertices[source].is_ice :\n # link_edge = Edge(source, ice_target, weight)\n # new_vertices_lst[source].edges.append(link_edge)\n\n\n if self.vertices[target].is_ice:\n link_edge = Edge(source, ice_target, weight)\n new_vertices_lst[source].edges.append(link_edge)\n\n\n # if self.vertices[source].is_ice_cream:\n # link_edge = Edge(ice_source, ice_cream_target, weight)\n # new_vertices_lst[ice_source].edges.append(link_edge)\n\n\n if self.vertices[target].is_ice_cream:\n link_edge = Edge(ice_source, ice_cream_target, weight)\n new_vertices_lst[ice_source].edges.append(link_edge)\n\n\n if self.vertices[source].is_ice :\n link_edge = Edge(source, ice_source, 0)\n new_vertices_lst[source].edges.append(link_edge)\n #new_vertices_lst[ice_source].edges.append(link_edge_bi)\n\n if self.vertices[source].is_ice_cream:\n link_edge = Edge(ice_source, ice_cream_source, 0)\n new_vertices_lst[ice_source].edges.append(link_edge)\n\n\n\n return new_vertices_lst, num_old_vertices", "title": "" }, { "docid": "38725318a10f399f8dfeaaa7ad8c3788", "score": "0.5871548", "text": "def get_graph(args, n):\n k = args['k']\n p = args['p']\n m = args['m']\n\n if args['graph_mode'] == 'WS':\n graph = gen_WS_graph(k=k, p=p, n=n, seed=np.random.randint(0, MAXINT32))\n # nx.generators.connected_watts_strogatz_graph(n=n, k=k, p=p, seed=np.random.randint(0, MAXINT32))\n elif args['graph_mode'] == 'ER':\n graph = gen_ER_graph(p=p, n=n, seed=np.random.randint(0, MAXINT32))\n # nx.generators.erdos_renyi_graph(n=n, p=p, seed=np.random.randint(0, MAXINT32))\n elif args['graph_mode'] == 'BA':\n graph = gen_BA_graph(m=m, n=n, seed=np.random.randint(0, MAXINT32))\n # nx.barabasi_albert_graph(n=n, m=m, seed=np.random.randint(0, MAXINT32))\n\n dgraph = nx.DiGraph()\n dgraph.add_nodes_from(graph.nodes)\n dgraph.add_edges_from(graph.edges)\n\n in_node = []\n out_node = []\n for indeg, outdeg in zip(dgraph.in_degree, dgraph.out_degree):\n if indeg[1] == 0:\n in_node.append(indeg[0])\n if outdeg[1] == 0:\n out_node.append(outdeg[0])\n\n sorted = list(nx.topological_sort(dgraph))\n\n # pygraphviz_graph = nx.drawing.nx_agraph.to_agraph(dgraph)\n # pygraphviz_graph.add_subgraph(in_node, rank='same')\n # pygraphviz_graph.add_subgraph(out_node, rank='same')\n # pygraphviz_graph.draw(path='graph_image.png', prog='dot')\n\n return dgraph, sorted, in_node, out_node", "title": "" }, { "docid": "e96f56c0527383b27622f07a925cb4b0", "score": "0.5865522", "text": "def get_graph(self, include_variants=False, symmetric_variant_links=False,\n include_components=True, symmetric_component_links=True,\n edge_filter_func=None):\n # This import is done here rather than at the top level to avoid\n # making pybel an implicit dependency of the model checker\n from indra.assemblers.pybel.assembler import belgraph_to_signed_graph\n if self.graph:\n return self.graph\n # NOTE edge_filter_func is not currently used in PyBEL\n signed_edges = belgraph_to_signed_graph(\n self.model,\n include_variants=include_variants,\n symmetric_variant_links=symmetric_variant_links,\n include_components=include_components,\n symmetric_component_links=symmetric_component_links,\n propagate_annotations=True)\n self.graph = signed_edges_to_signed_nodes(\n signed_edges, copy_edge_data={'belief'})\n self.get_nodes_to_agents()\n return self.graph", "title": "" }, { "docid": "b2d4d282dd024b59f5dd05f64d93e6d4", "score": "0.5860518", "text": "def build_graph(self, doc):\n pass", "title": "" }, { "docid": "fd9b0cdfde31cc7d37a841b945497381", "score": "0.5850071", "text": "def _create_network(self):\n # Functions useful to create the graph\n def weight_variable(shape):\n \"\"\"Generate a non-void matrix\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n def bias_variable(shape):\n \"\"\"Generate a non-void vector\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n # Graph initialization\n self._graph.as_default()\n # inputs of the graph\n with tf.variable_scope(\"Inputs\", reuse=tf.AUTO_REUSE):\n x = tf.placeholder(tf.float32, [None, NeuronalNetwork.INPUT_VECTOR_SIZE], name=\"vectorized_pictures\")\n y_ = tf.placeholder(tf.float32, [None, self._nb_output_classes], name=\"correct_labels\")\n keep_prob = tf.placeholder(tf.float32, name=\"keep_prob_dropout\") # Also an input: probability of not dropping out\n # the first hidden/convolutional layer\n with tf.variable_scope(\"First_layer\", reuse=tf.AUTO_REUSE):\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n x_image = tf.reshape(x, [-1, NeuronalNetwork.INPUTS_WIDTH, NeuronalNetwork.INPUTS_HEIGHT, 1])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n # the second hidden/convolutional layer\n with tf.variable_scope(\"Second_layer\", reuse=tf.AUTO_REUSE):\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n # last hidden layer\n with tf.variable_scope(\"Third_layer\", reuse=tf.AUTO_REUSE):\n W_fc1 = weight_variable([7*7*64, 1024])\n b_fc1 = bias_variable([1024])\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n # dropout to avoid overfitting\n with tf.variable_scope(\"Dropout\", reuse=tf.AUTO_REUSE):\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n # last layer\n with tf.variable_scope(\"Last_layer\", reuse=tf.AUTO_REUSE):\n W_fc2 = weight_variable([1024, self._nb_output_classes])\n b_fc2 = bias_variable([self._nb_output_classes])\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n with tf.variable_scope(\"Outputs\", reuse=tf.AUTO_REUSE):\n y_output = tf.argmax(y_conv, 1)\n probability_output = tf.reduce_max(tf.nn.softmax(y_conv), axis=[1])\n tf.summary.histogram(\"class_output\", y_output) # for TensorBoard\n tf.summary.histogram(\"probability_output\", probability_output) # How sure of its answer is the net?\n # add placeholders and output to the collection to have access to\n # them later when guess numbers\n graph = tf.get_default_graph()\n graph.add_to_collection(\"x\", x)\n graph.add_to_collection(\"y_\", y_)\n graph.add_to_collection(\"keep_prob\", keep_prob)\n graph.add_to_collection(\"y_conv\", y_conv)\n graph.add_to_collection(\"y_output\", y_output)\n graph.add_to_collection(\"probability_output\", probability_output)", "title": "" }, { "docid": "3b86e1bc7c4be369f2e35a43ff5d6754", "score": "0.5846569", "text": "def buildFor(cls, graph: LGraph) -> \"NeighborhoodInformation\":\n ni = NeighborhoodInformation()\n ni.nodeCount = 0\n for layer in graph.layers:\n ni.nodeCount += len(layer)\n\n # cache indexes of layers and of nodes\n ni.layerIndex = {}\n ni.nodeIndex = {}\n for lIndex, layer in enumerate(graph.layers):\n ni.layerIndex[layer] = lIndex\n for nIndex, n in enumerate(layer):\n ni.nodeIndex[n] = nIndex\n\n # determine all left and right neighbors of the graph's nodes\n ni.leftNeighbors = {}\n cls.determineAllLeftNeighbors(ni, graph)\n ni.rightNeighbors = {}\n cls.determineAllRightNeighbors(ni, graph)\n\n return ni", "title": "" }, { "docid": "ef2bb6252b24fdbd4696c07f1b2b8b27", "score": "0.5837972", "text": "def build_net(nodes_data, edges_data):\n for index, info in nodes_data.iterrows():\n G.add_node(info['name'], group=info['group'], nodesize=info['nodesize'])\n\n for index, info in edges_data.iterrows():\n G.add_edge(info['source'], info['target'], weight=info['value'])\n return None", "title": "" } ]
176b2178ec759add208e5f2e37c56bcc
draw the cursor to the screen, if in the correct frame.
[ { "docid": "9914733cd4a98c403be05fbbb2bc738c", "score": "0.7841671", "text": "def draw_cursor(self):\n self.frames += 1\n if self.frames > CURSORFRAMES:\n # now, draw the cursor.\n self.cursor = self.rel_cursor\n else:\n self.cursor = \"\"\n\n if self.frames >= CURSORFRAMES * 2:\n # reset cursor frames to 0.\n self.frames = 0", "title": "" } ]
[ { "docid": "9dee6bb52a2c987bc487ef2555435009", "score": "0.7041617", "text": "def _draw_frame(self):\n\n self.game._draw_frame(self.display_screen)", "title": "" }, { "docid": "0edf21cb213c09a4787aab2c799c1603", "score": "0.70361966", "text": "def draw(self, screenin):", "title": "" }, { "docid": "47d6166d18a805254bfb071aae35685d", "score": "0.6946705", "text": "def drawCursor(self):\n self.canvas.delete(\"cursor\") #--Removes the cursor from any other potentially highlighted cell\n if self.row >= 0 and self.col >= 0:\n \"\"\"\n Makes two points (a left upper point & a right lower point), which it sends to a method to create a rectangle.\n The borders are offset to be one pixel within the cell itself, so that you can see the outline better.\n \"\"\"\n x0 = marginSize + self.col * cellWidth + 1\n y0 = marginSize + self.row * cellWidth + 1\n x1 = marginSize + (self.col + 1) * cellWidth - 1\n y1 = marginSize + (self.row + 1) * cellWidth - 1\n self.canvas.create_rectangle(x0, y0, x1, y1,outline=\"dark slate blue\", tags=\"cursor\")", "title": "" }, { "docid": "a7f2803ba36644273042bddaccd1c3e7", "score": "0.6945244", "text": "def frame(self, frame):\n self.draw(frame)", "title": "" }, { "docid": "e4a085091cd16908d9316e0fdaae248f", "score": "0.68804044", "text": "def draw(self):\n if self.active == True:\n self.screen.blit(self.image, (self.x, self.y))", "title": "" }, { "docid": "7304ed58bec6d9b115d3d002fca351b1", "score": "0.6869795", "text": "def redraw_cursor(self):\n new_image = self.background_and_border.copy()\n basic_blit(new_image, self.text_image, self.text_image_rect)\n if self.cursor_on and self.is_enabled:\n cursor_len_str = self.text[:self.edit_position]\n cursor_size = self.font.size(cursor_len_str)\n self.cursor.x = (cursor_size[0] + self.text_image_rect.x +\n self.padding[0] - self.start_text_offset)\n\n if not isinstance(self.text_colour, ColourGradient):\n pygame.draw.rect(new_image, self.text_colour, self.cursor)\n else:\n cursor_surface = pygame.surface.Surface(self.cursor.size,\n flags=pygame.SRCALPHA, depth=32)\n cursor_surface.fill(pygame.Color('#FFFFFFFF'))\n self.text_colour.apply_gradient_to_surface(cursor_surface)\n basic_blit(new_image, cursor_surface, self.cursor)\n\n self.set_image(new_image)", "title": "" }, { "docid": "82ff798254401fc963312461f43726f8", "score": "0.68647176", "text": "def draw(self):\n global con, map\n\n if ((in_fov(self.x, self.y)) or (self.always_visible and\n map[self.x][self.y].explored)):\n libtcod.console_set_default_foreground(con, self.color)\n libtcod.console_put_char(con, self.x, self.y, self.char,\n libtcod.BKGND_NONE)", "title": "" }, { "docid": "3b07add80824c180b347862f6a6c214e", "score": "0.67401695", "text": "def draw() -> None:\n screen.addstr(0, 0, chr(0x2500) * (width + 2), curses.color_pair(2))\n screen.addstr(height + 1, 0, chr(0x2500) * (width + 2), curses.color_pair(3))\n for i in range(height + 2):\n screen.addstr(i, 0, chr(0x2502), curses.color_pair(4))\n screen.addstr(i, width + 1, chr(0x2502), curses.color_pair(5))\n screen.addstr(0, 0, chr(0x250c), curses.color_pair(6))\n screen.addstr(height + 1, 0, chr(0x2514), curses.color_pair(7))\n screen.addstr(0, width + 1, chr(0x2510), curses.color_pair(8))\n screen.addstr(height + 1, width + 1, chr(0x2518), curses.color_pair(9))\n\n for i in range(height):\n screen.addstr(i + 1, 1, \".\" * width, curses.color_pair(10))\n screen.addstr(pos[1] + 1, pos[0] + 1, \"@\", curses.color_pair(1))\n screen.refresh()", "title": "" }, { "docid": "b005d10c301eca02d72330d6614186a0", "score": "0.6618578", "text": "def draw_frame(self):\n pass", "title": "" }, { "docid": "23f474815b96aa867ef69b7be3b5f4f4", "score": "0.6581924", "text": "def draw(self):\n self.screen.blit(self.image, (self.x, self.y))", "title": "" }, { "docid": "f92eb52061dafedf906db6e9165c55f1", "score": "0.65797627", "text": "def cursor(self, position: xy):\n self._position = position\n\n if self.size.y == 1:\n if self._position.x < 8:\n self.command(0x80 + self._position.x)\n else:\n self.command(0x80 + 0x40 + (self._position.x - 8))\n else:\n if self.size.y == 2:\n self.command(\n 0x80\n + (0x40 if (self._position.y > 0) else 0x00)\n + self._position.x)\n else:\n self.command(\n 0x80\n + (0x40 if (self._position.y & 0x01) else 0x00)\n + (0x14 if (self._position.y & 0x02) else 0x00))", "title": "" }, { "docid": "c4892abfe5108a0626b1aa0f570503f3", "score": "0.65776443", "text": "def draw(self):\r\n self.frame.draw()", "title": "" }, { "docid": "e44552f61b33f38e48c400a32eea34d1", "score": "0.65689385", "text": "def draw(self, screen):\n frame = self.sequence.frames[self.currentframe]\n ox, oy = frame.offset\n x, y = self.pos\n sprite = frame.sprite\n if self.dir == 'right':\n sprite = flip(sprite, True, False)\n screen.blit(sprite, (x + ox, y + oy))", "title": "" }, { "docid": "9cb595999b9098f20f8ea7091987d658", "score": "0.65539384", "text": "def draw(self, win):\n if self.color == BROWN:\n win.blit(TRAP, (self.x, self.y))\n else:\n win.blit(MOUSE, (self.x, self.y))", "title": "" }, { "docid": "d521a1c04d1bf7c5397de435c08cd4f5", "score": "0.65125877", "text": "def draw(self, screen: object):\r\n if self.active:\r\n screen.blit(self.image, self.rect)\r\n screen.blit(self.text_image, self.text_rect)", "title": "" }, { "docid": "d521a1c04d1bf7c5397de435c08cd4f5", "score": "0.65125877", "text": "def draw(self, screen: object):\r\n if self.active:\r\n screen.blit(self.image, self.rect)\r\n screen.blit(self.text_image, self.text_rect)", "title": "" }, { "docid": "c8ab4979e9fd0a628ebc36e5d8f4d828", "score": "0.64985627", "text": "def draw(self, _screen, _positionx, _positiony):\n _screen.blit(self.image, (_positionx, _positiony))", "title": "" }, { "docid": "93fc9ec66ce205dab24269cb16da608d", "score": "0.6489872", "text": "def draw(self, screen, **kwargs):\n screen.blit(self.image, self.pos)", "title": "" }, { "docid": "e825828bb79ee1d6a7071e75bc27a091", "score": "0.64861774", "text": "def draw(self, screen):\n pygame.draw.line(screen,\n pygame.Color(0,0,0),\n (self.x_upperleft,self.y_upperleft),\n (self.x_upperleft + self.width,self.y_upperleft))\n pygame.draw.line(screen,\n pygame.Color(0,0,0),\n (self.x_upperleft + self.width, self.y_upperleft),\n (self.x_upperleft + self.width,self.y_upperleft + self.height))\n pygame.draw.line(screen,\n pygame.Color(0,0,0),\n (self.x_upperleft + self.width,self.y_upperleft + self.height),\n (self.x_upperleft, self.y_upperleft + self.height))\n pygame.draw.line(screen,\n pygame.Color(0,0,0),\n (self.x_upperleft, self.y_upperleft+ self.height),\n (self.x_upperleft, self.y_upperleft))", "title": "" }, { "docid": "6fe7548d13766a061e296af3606128f9", "score": "0.64484507", "text": "def draw(self,screen):\n screen.blit(self.image, self.rect)", "title": "" }, { "docid": "f14a1b886c632b76b0284d6c75f556d5", "score": "0.64359915", "text": "def draw(self):\n if self.is_active:\n self.select()", "title": "" }, { "docid": "991142ec5731c3f4cf0f512fbe719601", "score": "0.6429051", "text": "def draw(self):\n pyxel.pset(self.position_x, self.position_y, self.color)", "title": "" }, { "docid": "1fcfa4fc573ae2006554cabc3306339d", "score": "0.6414794", "text": "def __changeCursor__(self, position):\n (x, y) = self.cursor\n self.pole[x][y] = 0\n (x, y) = position\n self.pole[x][y] = 1\n self.cursor = (x, y)", "title": "" }, { "docid": "f1c7e47f5bdc16030510e46a5119450f", "score": "0.6414062", "text": "def __draw_screen(self):\n\n self._screen = pygame.display.set_mode(self._size, self._format)\n\n if not self._screen:\n raise AssertionError('Screen not defined')\n\n self._screen.fill((0, 0, 0))\n pygame.font.init()\n\n # Mouse hider -- Comment the next line to see mouse over the display\n pygame.mouse.set_visible(0)\n pygame.display.update()", "title": "" }, { "docid": "2c89a4c60757436bb49cd3b5b40377d6", "score": "0.64117974", "text": "def draw_frame(self):\r\n if not self.running:\r\n return\r\n\r\n # Get the elapsed time in seconds.\r\n now = time.time()\r\n delta_time = now - self.last_time\r\n self.last_time = now\r\n\r\n target = self.mouse_location\r\n\r\n # Move the Boids.\r\n for boid in self.boids:\r\n boid.move(self.boids, target, self.target_mass, \\\r\n delta_time, self.attraction_wgt, \\\r\n self.repulsion_wgt, self.target_wgt)\r\n\r\n # Redraw.\r\n self.draw_canvas()\r\n\r\n # Repeat.\r\n self.window.after(20, self.draw_frame)", "title": "" }, { "docid": "ab1f93606322ce7c7ded4722725e6e46", "score": "0.6402534", "text": "def redraw(self, rect=None):", "title": "" }, { "docid": "437922924c052667adbe476db26af145", "score": "0.63927037", "text": "def draw(self, screen: object):\r\n if self.active:\r\n screen.blit(self.image, self.rect)\r\n screen.blit(self.text_image, self.text_rect)\r\n # Line going around button\r\n # pygame.draw.lines(screen, settings.WHITE, True, (\r\n # (self.rect.x, self.rect.y),\r\n # (self.rect.x, self.rect.y + self.rect.height),\r\n # (self.rect.x + self.rect.width, self.rect.y + self.rect.height),\r\n # (self.rect.x + self.rect.width, self.rect.y)))\r\n # Line going around text/input\r\n # pygame.draw.lines(screen, settings.WHITE, True, (\r\n # (self.rect.x + 2, self.rect.y + 2),\r\n # (self.rect.x + 2, self.rect.y + self.rect.height - 3),\r\n # (self.rect.x + self.rect.width - 3,\r\n # self.rect.y + self.rect.height - 3),\r\n # (self.rect.x + self.rect.width - 3, self.rect.y + 2)))\r", "title": "" }, { "docid": "8dd99a7a046a064c79f8442872ed9013", "score": "0.63733834", "text": "def draw(self, screen: object):\r\n if self.active:\r\n screen.blit(self.text_image, self.text_rect)", "title": "" }, { "docid": "31dfd02b7a1024c5cb5f163e88451572", "score": "0.63687825", "text": "def draw(self,win):\r\n if self.walkCount + 1 >= 27:\r\n self.walkCount = 0\r\n\r\n if self.left:\r\n win.blit(walkLeft[self.walkCount//3], (self.x,self.y))\r\n self.walkCount += 1\r\n elif self.right:\r\n win.blit(walkRight[self.walkCount//3], (self.x,self.y))\r\n self.walkCount +=1\r\n else:\r\n win.blit(char, (self.x,self.y))\r\n self.hitbox = (self.x + 15, self.y + 5, 35, 60)\r\n #pygame.draw.rect(win, (255,0,0), (self.hitbox),2)\r", "title": "" }, { "docid": "0340e877d6b90703f7a2832024f35700", "score": "0.63551843", "text": "def draw_block(self):\n pygame.draw.rect(self.screen, self.colour, self.rect)", "title": "" }, { "docid": "5d785a8d12f38bc1c0643b8254de7e17", "score": "0.63551414", "text": "def paint_cursor(index):\n if index[1] < 0 or index[0] < 0 or index[1] >= gs.BOARD_SIZE or gs.current_state[index[0], index[1]] == gs.BLOCK_SYM:\n return\n if gs.current_state[index[0],index[1]] > 0 and gs.current_state[index[0], index[1]] != gs.BLOCK_SYM:\n color = gs.player1._color\n elif gs.current_state[index[0],index[1]] < 0:\n color = gs.player2._color\n else:\n color = (255,255,255)\n\n level_text = ENDGAME_FONT.render(\"{}\".format(int(abs(gs.current_state[index[0],index[1]]))), False, color)\n level_surf = pygame.Surface(level_text.get_size())\n level_surf.fill(BACKGROUND)\n level_surf.blit(level_text, (0,0))\n surface = pygame.display.get_surface()\n surface.blit(level_surf, (600 + INFO_SIZE*2-level_surf.get_size()[0],0))\n current_color = surface.get_at((index[1] * gs.SQUARE_SIZE + INFO_SIZE, index[0] * gs.SQUARE_SIZE + INFO_SIZE))\n if current_color[0] + 50 > 255:\n r = 255\n else:\n r = current_color[0] + 50\n if current_color[1] + 50 > 255:\n g = 255\n else:\n g = current_color[1] + 50\n if current_color[2] + 50 > 255:\n b = 255\n else:\n b = current_color[2] + 50\n pygame.draw.rect(surface, (r, g, b),\n (index[1] * gs.SQUARE_SIZE + INFO_SIZE, index[0] * gs.SQUARE_SIZE + INFO_SIZE, gs.SQUARE_SIZE,\n gs.SQUARE_SIZE))\n pygame.display.flip()", "title": "" }, { "docid": "ad911ac0bd02f581943b75687c088e79", "score": "0.6323747", "text": "def draw(self, object, position=(0, 0)):\n draw(object, self.display, position)", "title": "" }, { "docid": "57ff38467e4d34cd34658f9ee2fc61d3", "score": "0.6309066", "text": "def draw(self, window):\n window.blit(self.IMAGE, (self.x_start, self.y))\n window.blit(self.IMAGE, (self.x_end, self.y))", "title": "" }, { "docid": "0281378b0625e0c03c2ef80f3b90437c", "score": "0.6307568", "text": "def draw_character(self):\n #Draw and blit character to screen\n screen.blit(self.surf,(self.x - 20, self.y - 20))", "title": "" }, { "docid": "0e70adecc14220a3ef49b90bfef075e3", "score": "0.6289876", "text": "def display_frame(self, screen):\n screen.fill(constants.WHITE)\n \n self.all_sprite_list.draw(screen)\n \n pygame.display.flip()", "title": "" }, { "docid": "7e727733801325d165442544498c53e5", "score": "0.62646043", "text": "def draw(self, screen):\n st_pos = self.start_gate.wire_begin_pos()\n gate_index = self.end_gate.inputs.index(self.start_gate)%2\n end_pos = (self.end_gate.wire_end_pos())[gate_index]\n pygame.draw.line(screen, self.BLUE, st_pos, end_pos, 4)", "title": "" }, { "docid": "acba4a4dcb1274387377c57624d6ea7c", "score": "0.62636876", "text": "def redraw(self):\r\n self.clear_terminal()\r\n self._writer.write(self._screen)", "title": "" }, { "docid": "3d921ccce1c12a0d1869c9459c8c4323", "score": "0.6258237", "text": "def draw(self, screen):\n screen.blit(Player.head_img, (self.x, self.y))\n\n temp = self.dirty_rects\n self.dirty_rects = []\n return temp", "title": "" }, { "docid": "5f6ec530a13394e7c5c95d08909b3ab4", "score": "0.62445426", "text": "def draw(self):\r\n tcod.console_set_default_foreground(con, self._color)\r\n tcod.console_put_char(con, self._x, self._y, self._char, tcod.BKGND_NONE)", "title": "" }, { "docid": "ad43bdf3bc0f29857e105e82775a5280", "score": "0.6238737", "text": "def draw(self, object, position=(0, 0)):\n u.draw(object, self.display, position)", "title": "" }, { "docid": "9abc8a409ce08644d1057834c3d13560", "score": "0.6235929", "text": "def set_cursor(self, x, y):\n self.move(x, y, None)\n self.update()", "title": "" }, { "docid": "757266e69a03e2b4ceb7bf7a3f1e5788", "score": "0.62358004", "text": "def on_draw(self):\n self.clear() # Clear the screen\n self.game.draw()\n self.fps_display.draw()", "title": "" }, { "docid": "0a673bd4b6d9b9da2ea24d571bb34b43", "score": "0.6216203", "text": "def update(self):\n self._state.update()\n self.draw_cursor()", "title": "" }, { "docid": "fb4b3c498b989e1a0db7e111892b3028", "score": "0.62126803", "text": "def draw(self):\n measure = (self.x, self.y, self.width, self.height)\n self.rect = pygame.draw.rect(self.surface, self.color, measure)", "title": "" }, { "docid": "3a4380e381753d5d132eef1947d51299", "score": "0.6206217", "text": "def draw(self):\n self.actor.x = self.positionX\n self.actor.y = World.HEIGHT - self.positionY\n self.actor.draw()", "title": "" }, { "docid": "e2b96236879eafb64b3cad8a0a8f01e3", "score": "0.61946625", "text": "def draw(self,character,coords,color):\n\t\t#self.display.draw(character,coords,color)\n\t\tself.draw_buffer.append((character,coords,color))", "title": "" }, { "docid": "f10345bc9cca23f64a2146aefca0ad53", "score": "0.6189489", "text": "def complete_drawing(self):\n if self.print_scores: self.draw_scores()\n pygame.display.flip()\n if self.auto_draw: FPS_CLOCK.tick(QFPS)\n else: FPS_CLOCK.tick(FPS)", "title": "" }, { "docid": "726e5955a3e673fe2cb80b31de3502ff", "score": "0.6186354", "text": "def set_cursor( self, line_col ):\n\t\tself.__cursor.x = line_col[1]*8\n\t\tself.__cursor.y = line_col[0]*LINE_HEIGHT", "title": "" }, { "docid": "aa2c2d6092c79b8d91117efd76d52b1e", "score": "0.6184902", "text": "def draw(self):\n coord_pixels = self.__coordinate.get_pixel_tuple()\n res = coord_pixels[0] + 8, coord_pixels[1]\n self.__game_display.blit(self.__image, res)", "title": "" }, { "docid": "21f9619742bfb6cda1d097c0eab7c458", "score": "0.6184875", "text": "def on_render(self, screen: pygame.display) -> None:\r\n #super().on_render(screen)\r\n\r\n x, y = self.engine.mouse_location\r\n\r\n # Draw a rectangle around the targeted area, so the player\r\n # can see the affected tiles\r\n\r\n render_bar(screen, self.engine)\r\n #Need to add square code\r", "title": "" }, { "docid": "7cd277db40d3c947bcd823aed9f86719", "score": "0.61785483", "text": "def draw_selection(self) -> None:\n pygame.draw.rect(self.window, HIGHLIGHT, \n (self.grid_x + (self.selected[0] * CELL_WIDTH) + self.selected[0],\n self.grid_y + (self.selected[1] * CELL_HEIGHT) + self.selected[1],\n CELL_WIDTH, CELL_HEIGHT))", "title": "" }, { "docid": "f5d3363481c89155e670e0c9e5fd8f61", "score": "0.6174886", "text": "def draw(self):\r\n if not self.mouseIsOverMe:\r\n pygame.draw.rect(self.screen, self.secondaryColour, self.rect)\r\n self.screen.blit(self.defaultText, self.textRect)\r\n else:\r\n pygame.draw.rect(self.screen, self.primaryColour, self.rect)\r\n self.screen.blit(self.secondaryText, self.textRect)", "title": "" }, { "docid": "a16a9420e7cb44f3e1b59d2d6a4ee204", "score": "0.61707073", "text": "def draw(self):\n self.image.draw(self.s, (self.x, 0))", "title": "" }, { "docid": "14bc9fb850bb16b232b56a33de629ff7", "score": "0.61691976", "text": "def draw(self, screen):\n\t\traise NotImplemented(\"You have to implement method draw.\")", "title": "" }, { "docid": "34dbc21cfa7c0cfa7bdc0351485c8571", "score": "0.61666644", "text": "def on_render(self, screen: pygame.display) -> None:\r\n #super().on_render(screen)\r\n\r\n x, y = self.engine.mouse_location\r\n\r\n # Draw a rectangle around the targeted area, so the player\r\n # can see the affected tiles\r\n\r\n render_bar(screen, self.engine)\r\n #Need to add rectangle/circle code\r", "title": "" }, { "docid": "fb26766c01ddd41716db98d4a6485159", "score": "0.61607116", "text": "def draw(self):\r\n self.__display.blit(self.__surf, self.__rect)", "title": "" }, { "docid": "336a79465dbb18253c2c3da851329025", "score": "0.616021", "text": "def draw(self):\n self._screen.blit(self._image, self._rect)", "title": "" }, { "docid": "f328a23d03d7266cc0ca6dcd79edac25", "score": "0.61564505", "text": "def drawPygame(self,screen,color,width):\n destPoint=self.getPt(1000)\n pygame.draw.line(screen,color,(self.mOrigin[0],self.mOrigin[1]),(destPoint[0],destPoint[1]),width)", "title": "" }, { "docid": "eb2b9800f45b2f2274c99d8b85325ffd", "score": "0.6152978", "text": "def move_cursor(self, inc):\n inc = max(0, min(len(self.buffer), self.pos+inc)) - self.pos\n\n old_y, old_x = self.stdscr.getyx()\n h, w = self.stdscr.getmaxyx()\n\n old_pos = old_y * w + old_x\n new_pos = old_pos + inc\n new_pos_x, new_pos_y = new_pos % w, new_pos // w\n\n self.stdscr.move(new_pos_y, new_pos_x)\n self.pos = self.pos + inc", "title": "" }, { "docid": "6443da240aba4f4a284e377835d62224", "score": "0.61503834", "text": "def draw_ball(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "fee61d440e9be06a82f00d770ceb4fe7", "score": "0.6136681", "text": "def draw(self, screen):\n if self.draw_type == 'maze_node': \n pygame.draw.rect(screen, self.color, pygame.Rect(self.data[0]*40 + 1, self.data[1]*40 + 1, 40,40))\n\n if self.draw_type == 'path':\n pygame.draw.line(screen, (255,255,0), (self.data[0]*40 +20, self.data[1]*40+20), (self.root.data[0]*40+20, self.root.data[1]*40+20))\n\n if self.draw_type == 'search': \n pygame.draw.rect(screen, (153,0,76) , pygame.Rect(self.data[0]*40+15, self.data[1]*40 + 15, 10,10))", "title": "" }, { "docid": "af8e0faeb3badd1f424d613a740e76ba", "score": "0.613539", "text": "def set_cursor_position(self, x, y):\n # type: (int,int) -> None\n\n self._cursor_position = (x, y)\n sys.stdout.write('\\x1b[{y};{x}H'.format(x=x + 1, y=y + 1))\n sys.stdout.flush()", "title": "" }, { "docid": "cb60cbed422b4fff49756a59cb04e323", "score": "0.61317146", "text": "def display_frame(self, screen):\n\n # Limpa a tela\n screen.fill(BGCOLOR)\n\n # Desenha os blocos de sprites\n self.all_sprites.draw(screen)\n\n # Desenha a grade\n draw_grid(screen)\n\n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()", "title": "" }, { "docid": "7270253e47affb735e0e7382c0fa0819", "score": "0.6130689", "text": "def draw(self, screen):\n self.zone.fill(Colors.DARKGREEN)\n for row_index, row in enumerate(self.rows):\n left, top = self.get_coord_zero(row_index)\n pygame.draw.rect(self.zone, self.color, (left, top, self.card_size[0], self.card_size[1]), 1)\n for index, card in enumerate(row):\n offset = self.get_coord_card(row_index, index)\n card.draw(self.zone, offset)\n screen.blit(self.zone, self.offset_zone)", "title": "" }, { "docid": "7270253e47affb735e0e7382c0fa0819", "score": "0.6130689", "text": "def draw(self, screen):\n self.zone.fill(Colors.DARKGREEN)\n for row_index, row in enumerate(self.rows):\n left, top = self.get_coord_zero(row_index)\n pygame.draw.rect(self.zone, self.color, (left, top, self.card_size[0], self.card_size[1]), 1)\n for index, card in enumerate(row):\n offset = self.get_coord_card(row_index, index)\n card.draw(self.zone, offset)\n screen.blit(self.zone, self.offset_zone)", "title": "" }, { "docid": "8d846efd6d755c8cbbef7bd421a50700", "score": "0.6118672", "text": "def draw(self, screen):\r\n text=self._getImage()\r\n textrect=self._getRect()\r\n screen.blit(text, textrect)", "title": "" }, { "docid": "a73bb2e47df00d2301941ffc0c8af669", "score": "0.6113599", "text": "def draw(self, game_display, color):\n\n pygame.draw.rect(game_display, color, (self.x, self.y, self.size, self.size))", "title": "" }, { "docid": "a987b01b0e669ecb4ae59b9734bb7115", "score": "0.61111623", "text": "def draw(self,win):\r\n pygame.draw.circle(win, self.color, (self.x,self.y), self.radius)\r\n self.hitbox = (self.x + -10, self.y + -10, 20, 20)\r\n #pygame.draw.rect(win, (255,0,0), self.hitbox,2)\r", "title": "" }, { "docid": "5a03ecd17b0b3d11933efdfec96c62fa", "score": "0.6106057", "text": "def _move_cursor(self, rows=0, cols=0):\n\n self.y += rows\n self.x += cols", "title": "" }, { "docid": "7453bfa7334e973d96ab786d5c7c20be", "score": "0.6101775", "text": "def draw_player(self):\r\n self.screen.blit(self.image, self.rect)", "title": "" }, { "docid": "4851838242feb5f288db91c8d9d78bc2", "score": "0.6098468", "text": "def draw(self, screen):\n y = 0\n x = 0\n for row in self.table:\n for col in row:\n if col == 0:\n screen.addstr(y, x, \".\")\n else:\n screen.addstr(y, x, \"o\")\n x = x + 1\n y = y + 1\n x = 0", "title": "" }, { "docid": "607ebc7c2e503f73e8f2308a97c1b2ab", "score": "0.6093654", "text": "def draw(self):\n pygame.draw.rect(SURFACE, BLACK, self.rect, 7)\n if self.hovered:\n pygame.draw.rect(SURFACE, GRAY, self.rect, 7)\n if self.active:\n pygame.draw.rect(SURFACE, BLACK, self.rect)", "title": "" }, { "docid": "eb3d317bb7e9eeb90ae1cbfadd9fe010", "score": "0.60856175", "text": "def __display_loop(self):\n\n while self.running:\n # Overlay flash cursor if enabled\n paper_image_plus_cursor = self.paper_image.copy()\n if self.__cursor_flash and self.cursor_enabled:\n # Recolour cursor image to pen colour\n cursor_image = recolour(self, self.CURSOR_IMAGE, (0, 0, 0), self.COLOUR_TABLE[self.runtime_colours[self.screen_mode][self.pen_colour]], has_alpha=True)\n # Paste cursor image on paper_image_plus_cursor at x, y\n # Flip y\n x, y = fix_coord(self.SCREEN_MODES[self.screen_mode], colrows_to_xy(self.SCREEN_MODES[self.screen_mode], self.cursor_position))\n # Change point-of-reference to bottom-left corner position\n y -= 2\n # And paste cursor\n paper_image_plus_cursor.paste(cursor_image, (x, y))\n # Normalized paper image\n normalized_paper_image = paper_image_plus_cursor.resize(self.NORMALIZED_PAPER_SIZE, resample=Image.NEAREST)\n # Create background image with border colour\n background_image = Image.new(\n 'RGB',\n self.BACKGROUND_SIZE,\n self.__get_rgb(self.border_colour)\n )\n # Overlay normalized paper image on background\n background_image.paste(normalized_paper_image, (self.border_size, self.border_size))\n # Handle full screen\n if self.full_screen:\n # Calculate new dimensions, resize and calculate x offset\n scale = self.__full_screen_display_size[1] / self.BACKGROUND_SIZE[1]\n new_size = (int(self.BACKGROUND_SIZE[0] * scale), self.__full_screen_display_size[1])\n background_image = background_image.resize(new_size, resample=Image.BICUBIC)\n display_x_offset = int((self.__full_screen_display_size[0] - new_size[0]) / 2)\n else:\n # Windows, so no x offset or resizing\n display_x_offset = 0\n # Create the video image, blit it and flip it\n video_image = pil_to_pygame_image(background_image)\n self.__pygame_display.blit(video_image, (display_x_offset, 0))\n pygame.display.flip()", "title": "" }, { "docid": "69255a3aa6620c67a7920c3984fe5d05", "score": "0.60832447", "text": "def draw(self):\r\n cur = self.current\r\n self.current = 0\r\n for _ in xrange(cur):\r\n self.poke()", "title": "" }, { "docid": "2b865cf199e433fc4e0b7bf0bfdd2bbc", "score": "0.60819167", "text": "def draw(self, rect: Rect):\n pass", "title": "" }, { "docid": "50117c145b9735e67e26239a68055a8c", "score": "0.6075259", "text": "def draw(self):\n if not self._move:\n pos = self.pos * self.size\n destrect = Rect(pos.x, pos.y, self.size.x, self.size.y)\n self.tile.draw(self.frame, destrect)\n else:\n self._move.draw()", "title": "" }, { "docid": "12f3cfc0ba15b9a2e262396d5f24cef9", "score": "0.6069377", "text": "def begin_draw():\n \n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n enable2D( (0, SCREEN_SIZE[0], 0, SCREEN_SIZE[1]) )", "title": "" }, { "docid": "e4ff391ab6f8245e941161131230a12a", "score": "0.60684013", "text": "def update_mouse(self):\r\n\r\n updates = self.draw_mouse(True)\r\n\r\n if updates:\r\n pygame.display.update(updates)", "title": "" }, { "docid": "1835d65327c816acd93334ee4f823c9a", "score": "0.6067981", "text": "def draw(self, win):\n win.fill(self.color)", "title": "" }, { "docid": "7b27ecd9cbbb65d10008741a0360472c", "score": "0.6064163", "text": "def draw(self, screen):\n\n # Get PyGame surface of box size\n self.rend = pygame.Surface((self.size, self.size))\n\n # Assign box location to surface\n self.rect = self.rend.get_rect(center=self.pos)\n\n # Set box color\n self.rend.fill(self.get_color())\n\n # Display box\n screen.blit(self.rend, self.rect)", "title": "" }, { "docid": "f4afce2f9842d25de68798a8c03f598f", "score": "0.60578436", "text": "def on_draw(self):\n self.screen.clear()\n self.main_batch.draw()", "title": "" }, { "docid": "f0d4863392a385e3f5d4bd97ef875c2b", "score": "0.6053945", "text": "def draw(self):\n\t\tglobal visible_tiles\n\n\t\tif (self.x, self.y) in visible_tiles or \\\n\t\t\t(self.always_visible and gmap.is_explored(self.x, self.y)):\n\t\t\tset_colour(self.rgb, 255)\n\t\t\tt.put(self.x, self.y, self.char)\n\t\t\t#con.draw_char(self.x, self.y, self.char, self.colour, bg=None)", "title": "" }, { "docid": "2dd7c5ea80c347eccf2403612619b8b0", "score": "0.6053287", "text": "def _move_cursor(self, rows=0, cols=0):\n\n if rows:\n self.out.write(\"{}{}{}\".format(self.escape_seq, abs(rows), \n 'B' if rows > 0 else 'A'))\n self.y += rows\n if cols:\n self.out.write(\"{}{}{}\".format(self.escape_seq, abs(cols), \n 'C' if cols > 0 else 'D'))\n self.x += cols\n self.out.flush()", "title": "" }, { "docid": "834c7bcd63a11e63168c5f7ae7adc9f0", "score": "0.60445887", "text": "def draw(self):\n\t\tfor i in range(9):\n\t\t\tpygame.draw.line(screen, WHITE, [i * WIDTH / 8, 0], [i * WIDTH / 8, HEIGHT], 5)\n\t\t\tpygame.draw.line(screen, WHITE, [0, i * HEIGHT / 8], [WIDTH, i * HEIGHT / 8], 5)\n\t\tfont = pygame.font.SysFont('Calibri', MARK_SIZE, False, False)\n\t\tfor r in range(len(self.game_board)):\n\t\t\tfor c in range(len(self.game_board[r])):\n\t\t\t\tmark = self.game_board[r][c]\n\t\t\t\tif self.players[self.turn % 2] == mark.lower():\n\t\t\t\t\tcolor = YELLOW\n\t\t\t\telse:\n\t\t\t\t\tcolor = WHITE\n\t\t\t\tif self.selected_token:\n\t\t\t\t\tif self.selected_token[0] == r and self.selected_token[1] == c:\n\t\t\t\t\t\tcolor = RED\n\t\t\t\tif mark != '-':\n\t\t\t\t\tmark_text = font.render(self.game_board[r][c], True, color)\n\t\t\t\t\tx = WIDTH / 8 * c + WIDTH / 16\n\t\t\t\t\ty = HEIGHT / 8 * r + HEIGHT / 16\n\t\t\t\t\tscreen.blit(mark_text, [x - mark_text.get_width() / 2, y - mark_text.get_height() / 2])", "title": "" }, { "docid": "4a733518b5e32b6d13e95ffdf056a1f5", "score": "0.6033196", "text": "def render(self):\n ul= ( max(0,int(self.pos[0]-self.wsize[0]/2.))\n , max(0,int(self.pos[1]-self.wsize[1]/2.))\n )\n\n self.drawscreen.fill(self.bgcolor)\n \n self.drawscreen.blit(self.screen, (0,0), area = pygame.Rect(ul,self.wsize))\n\n for t in self.things:\n t.draw_ui(self.drawscreen)\n\n if self.usemini:\n self.minimap.draw(self.screen, self.drawscreen)", "title": "" }, { "docid": "10e4e93b235ac006370d665cff647755", "score": "0.60316455", "text": "def draw(self, screen):\n self.zone.fill(Colors.DARKGREEN)\n for index, card in enumerate(self.cards):\n offset = self.get_coord_card(0, index)\n card.draw(self.zone, offset)\n screen.blit(self.zone, self.offset_zone)", "title": "" }, { "docid": "a07b07fe88b7b4bea786b3f92ce353f2", "score": "0.6028885", "text": "def update(self, screen):\r\n mouse_pos = pygame.mouse.get_pos()\r\n self.centerx = mouse_pos[0] - 75/2\r\n self.centery = mouse_pos[1] - 75/2\r\n screen.blit(self.orb, (self.centerx, self.centery))", "title": "" }, { "docid": "320de4a4dcfdb579d20c122a14510798", "score": "0.6028455", "text": "def draw(self):\n # Draws first --> last\n self.screen.fill(BGCOLOR)\n self.draw_grid()\n self.draw_circles()\n self.draw_ghost_plates()\n for sprite in self.g.all_sprites:\n self.screen.blit(sprite.image, self.camera.apply(sprite))\n self.draw_resources()\n self.draw_gui()\n pg.display.flip()", "title": "" }, { "docid": "548ea3c867049baa36549cba1a841ef4", "score": "0.6014035", "text": "def draw(self, screen):\n left, top = self.get_coord_zero(0)\n self.zone.fill(Colors.DARKGREEN)\n pygame.draw.rect(self.zone, self.color, (left, top, self.card_size[0], self.card_size[1]), 1)\n for index, card in enumerate(self.cards):\n offset = self.get_coord_card(0, index)\n card.draw(self.zone, offset)\n screen.blit(self.zone, self.offset_zone)", "title": "" }, { "docid": "548ea3c867049baa36549cba1a841ef4", "score": "0.6014035", "text": "def draw(self, screen):\n left, top = self.get_coord_zero(0)\n self.zone.fill(Colors.DARKGREEN)\n pygame.draw.rect(self.zone, self.color, (left, top, self.card_size[0], self.card_size[1]), 1)\n for index, card in enumerate(self.cards):\n offset = self.get_coord_card(0, index)\n card.draw(self.zone, offset)\n screen.blit(self.zone, self.offset_zone)", "title": "" }, { "docid": "4d47a8f795ce510ec2d5198144c6b4ac", "score": "0.6009235", "text": "def draw(self):\n self.background.draw()\n self.batch.draw()\n if self.batch_key != \"\": # if batch_key isn't empty\n self.batch_key.draw()", "title": "" }, { "docid": "d1aa81dde5a82dfc1a472f42e3d9efa3", "score": "0.60089314", "text": "def draw(self):\n self.screen.fill((125, 100, 158))\n self.draw_text(f\"Delay: {self.TIMESTEP} ms\", 40, (51, 16, 97), self.scr_width//2, 50)\n # self.all_sprites.draw(self.screen)\n for sprite in self.all_sprites:\n self.screen.blit(sprite.image, (sprite.rect.x, sprite.rect.y))\n\n pygame.display.flip()", "title": "" }, { "docid": "eff668ce3ee35d12998725f73c4251e7", "score": "0.6006791", "text": "def render(self):\n\n self.screen.fill(BLACK)\n\n self.__drawBase__()\n self.__drawSides__()\n self.__drawVertices__()\n self.__drawCellNumbers__()\n self.__drawFps__()\n self.__drawMoveMsg__()\n self.__drawClickedCellCoords__()\n\n # Update the screen\n pygame.display.update()", "title": "" }, { "docid": "d1108fa23996a9b91ee913845d374bfe", "score": "0.6005639", "text": "def redraw():\n canvas.clear()\n canvas.markit(start_x, start_y, r=SMALLSTEP)\n canvas.markit( tx, ty, r=SMALLSTEP )\n drawGraph(G)\n for o in obstacles: canvas.showRect(o, outline='blue', fill='blue')\n canvas.delete(\"debug\")", "title": "" }, { "docid": "0268f6a931303cb853f138f585854bb7", "score": "0.5988769", "text": "def draw(self):\n\t\tfor i,cell in enumerate(self.draw_cells):\n\t\t\tscreen.draw.filled_rect(cell.rect, cell.color)", "title": "" }, { "docid": "52df75ec3d4daaa1aa0cd9ad08434a79", "score": "0.59816337", "text": "def draw(self):\n if self.show is True:\n self.background.draw()\n self.batch.draw()", "title": "" }, { "docid": "03aabd97ee51180d6ada1f984302dd80", "score": "0.5981509", "text": "def draw(self, screen, cell_size):\n\n first_row, first_col = 3, 5\n x, y = self.pos\n x, y = ((SIDE+x-first_row)*cell_size, (y-first_col)*cell_size)\n for off_y, row in enumerate(self.shape):\n for off_x, _ in filter(lambda x: x[1], enumerate(row)):\n correction = lambda x, off_x: x+off_x*cell_size+(cell_size*(1-BLOCK_SIZE))/2\n width = cell_size*BLOCK_SIZE\n r = pygame.Rect(correction(x, off_x), correction(y, off_y), width, width)\n pygame.draw.rect(screen, self.color, r)", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.5980518", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.5980518", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.5980518", "text": "def draw(self):\n pass", "title": "" } ]
41ddf87b8aee80fd7cf604541f22294a
Helper to create a Variable stored on CPU memory.
[ { "docid": "ac97d0bd00f3797ebc78ca2edbc129b9", "score": "0.66969126", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" } ]
[ { "docid": "be3d6a4f1ce7cad92376233912acba95", "score": "0.70131457", "text": "def create_variable(self, name, alloc, type_, max_stack_depth, batch_size):\n del name\n if alloc is instructions.VariableAllocation.NULL:\n return instructions.NullVariable()\n elif alloc is instructions.VariableAllocation.TEMPORARY:\n return instructions.TemporaryVariable.empty()\n else:\n dtype, event_shape = type_\n value = np.zeros([batch_size] + list(event_shape), dtype=dtype)\n if alloc is instructions.VariableAllocation.REGISTER:\n return RegisterNumpyVariable(value)\n else:\n return FullNumpyVariable(value, _create_stack(max_stack_depth, value))", "title": "" }, { "docid": "c010a95d15febcbef3b8a6077c689d7b", "score": "0.6733512", "text": "def create_variable(x: ArrayLike, volatile=False, requires_grad=False) -> Variable:\n if not isinstance(x, Variable):\n x = Variable(T(x), volatile=volatile, requires_grad=requires_grad)\n # return to_gpu(x, async=True)\n return x", "title": "" }, { "docid": "91c5bdb48c07d745eecc5be5dcb19345", "score": "0.669595", "text": "def _variable_on_cpu(name, shape, initializer, use_fp16=False, trainable=True):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)\n return var", "title": "" }, { "docid": "2b4ec7def871234f1e01841a367e33ee", "score": "0.66828537", "text": "def _variable_on_cpu(self, name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "2970fecb7d68dc0dd6cb4835312ad7c5", "score": "0.66779804", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "b0776ff07bb51aa7e16fa3dc6ea76e12", "score": "0.6656224", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer)\n return var", "title": "" }, { "docid": "2c93ae4f33bedba604263ade9f2c38cd", "score": "0.6656081", "text": "def make_variable(tensor, volatile=False):\n if torch.cuda.is_available():\n tensor = tensor.cuda()\n return Variable(tensor, volatile=volatile)", "title": "" }, { "docid": "ca9eb7fe5d741007b4122d6475fe5426", "score": "0.66533124", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer)\n return var", "title": "" }, { "docid": "ca9eb7fe5d741007b4122d6475fe5426", "score": "0.66533124", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer)\n return var", "title": "" }, { "docid": "9c3f5d3c1fcef21048d4c384b6615866", "score": "0.66387063", "text": "def _variable_on_cpu(name, shape, initializer):\n\n with tf.device('/cpu:0'):\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "6d97f0cc2d03a4f69a50bd45b4461765", "score": "0.6638313", "text": "def make_variable(tensor):\n tensor = tensor.to(DEVICE)\n return tensor", "title": "" }, { "docid": "bde7432948c623eaeb78f18abebee2cf", "score": "0.6622528", "text": "def _variable_on_cpu(name, \n shape, \n initializer=WEIGHT_INITIALIZER,\n trainable=True):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32, trainable=trainable)\n return var", "title": "" }, { "docid": "03a50f315b4e4357e04afc76871e5533", "score": "0.6581512", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/gpu:0'):\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "0d0f8eb094247358046e6bb6a6179beb", "score": "0.65773016", "text": "def _variable_on_cpu(name, shape, initializer, use_fp16=False):\n with tf.device(\"/cpu:0\"):\n dtype = tf.float16 if use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "3173b5d1a159bd7e9d2d7dc2240b60de", "score": "0.65444493", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float32# if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "cd3b901390c87b363ba6caffa65e274c", "score": "0.6515185", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "cd3b901390c87b363ba6caffa65e274c", "score": "0.6515185", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "cd3b901390c87b363ba6caffa65e274c", "score": "0.6515185", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "cd3b901390c87b363ba6caffa65e274c", "score": "0.6515185", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "79c590e21fa61aa6dd04aa3596bc5aee", "score": "0.64998305", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "3b3485b235576004036adef616815696", "score": "0.646847", "text": "def _create_local_variable(self, address, dest_offset, update_symbols_table=True):\n try:\n mir_inst_builder = \\\n self.mir_function.get_instruction_builder_by_address(\n address, False)\n\n var_type_preffix = \"i\"\n var_name = \"%(var_type_preffix)s_0x%(dest_offset)X\" % vars()\n #int_ptr = MiddleIrTypePointer(MiddleIrTypeInt())\n int_ptr = MiddleIrTypeInt()\n\n mir_var = mir_inst_builder.alloca(\n int_ptr, None, var_name)\n\n if update_symbols_table:\n #print \"---> Adding local variable offset %d (%s) : %s\" % \\\n # (address, var_name, mir_var)\n self.current_symbols_table.add_local_variable(\n dest_offset, var_name, mir_var)\n\n return mir_var\n\n except MiddleIrException, err:\n print format_exc() + '\\n'\n raise PowerPc32GccIdiomAnalyzerException(err)", "title": "" }, { "docid": "d4c610bb2e21bd8461211d4f37913d50", "score": "0.64653546", "text": "def _variable_on_cpu(name, shape, initializer):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "8d5713c21827189a321ee81fb14430d2", "score": "0.6463294", "text": "def to_var(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)", "title": "" }, { "docid": "8d5713c21827189a321ee81fb14430d2", "score": "0.6463294", "text": "def to_var(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)", "title": "" }, { "docid": "977f585a1478a4e01545bba263f6ec91", "score": "0.6461661", "text": "def variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if USE_FP16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "title": "" }, { "docid": "aa36d7b61fc4dc6f61343458c8a6aebd", "score": "0.640581", "text": "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n # We instantiate all variables using tf.get_variable() instead of\n # tf.Variable() in order to share variables across multiple GPU training runs.\n # If we only ran this model on a single GPU, we could simplify this function\n # by replacing all instances of tf.get_variable() with tf.Variable().\n var = tf.get_variable(name, shape, initializer=initializer)\n return var", "title": "" }, { "docid": "f70ae1fe2af9bd8c1c8689560c188b97", "score": "0.6378275", "text": "def to_var(self, x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)", "title": "" }, { "docid": "f70ae1fe2af9bd8c1c8689560c188b97", "score": "0.6378275", "text": "def to_var(self, x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)", "title": "" }, { "docid": "00be042a0451e2293c8549969a597026", "score": "0.63716286", "text": "def _variable_on_cpu(name, shape, initializer, dtype=tf.float32, master_weight_type=None, trainable=True):\n # with tf.device('/cpu:0'):\n # dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n # dtype = tf.float32\n if not master_weight_type==None:\n var = tf.get_variable(name, shape, initializer=initializer, dtype=master_weight_type, trainable=trainable)\n var = tf.cast(var, dtype=dtype)\n else:\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)\n return var", "title": "" }, { "docid": "48bf3f6fc1aa6fe910058903bf9c6b39", "score": "0.62224287", "text": "def to_var(x):\n if torch.cuda.is_available():\n x = x.cuda()\n # return Variable(x)\n return x", "title": "" }, { "docid": "f79e9ede4eb26f4b5190a850601752d5", "score": "0.6191811", "text": "def get_variable(x):\n tensor = torch.cuda.LongTensor(x) if CUDA else torch.LongTensor(x)\n return Variable(tensor)", "title": "" }, { "docid": "e038465f7eb744034d1a16657726d41a", "score": "0.61829364", "text": "def to_var(x, requires_grad=False, volatile=False):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x, requires_grad=requires_grad, volatile=volatile)", "title": "" }, { "docid": "d80aecb82cb9ceaa75afa3316173e40e", "score": "0.61770004", "text": "def make_var(self, name=\"var\"):\n\t\tnew_var = DriverVariable(name)\n\t\tself.variables.append(new_var)\n\t\treturn new_var", "title": "" }, { "docid": "66ce6085fc03eced25aba55597ce2d48", "score": "0.5987547", "text": "def create_variable(self, name: str=None, value: Any=None, is_parameter: bool=False, dest_ops: List[OperationBase]=None, source_op: OperationBase=None, **kwargs) ->Variable:\n if name is None:\n name = f'PPQ_Variable_{self._num_of_generated_var}'\n self._num_of_generated_var += 1\n created = Variable(name=name, value=value, is_parameter=is_parameter, dest_ops=dest_ops, source_op=source_op)\n self.append_variable(created)\n return created", "title": "" }, { "docid": "2fa0eec9359c9aeb767be8347f3650c7", "score": "0.59829736", "text": "def assign_variable(self, name, value):\n\n # if isinstance(value, ObjectInTime):\n # self._tl_interpreter.assign_var(name, value)\n # elif isinstance(value, SpatialInterface):\n # self._tl_interpreter.assign_var(name, StaticObject(value))\n # else:\n self._spatial_interpreter.assign_var(name, value)", "title": "" }, { "docid": "f0648e250b2ad8bf6ccd5dfd64b143b7", "score": "0.59413004", "text": "def get_variable_impl(self, address):\n arch = self._arch\n\n seg_ref = [None]\n address, backup_var_type = _invent_var_type(address, seg_ref)\n if not backup_var_type:\n raise InvalidVariableException(\n \"No variable defined at or containing address {:x}\".format(address)\n )\n\n assert not isinstance(backup_var_type, VoidType)\n\n tif = ida_typeinf.tinfo_t()\n if not ida_nalt.get_tinfo(tif, address):\n if ida_typeinf.GUESS_FUNC_OK != ida_typeinf.guess_tinfo(tif, address):\n tif = backup_var_type\n\n # Try to handle a variable type, otherwise make it big and empty.\n try:\n var_type = get_type(tif, TYPE_CONTEXT_GLOBAL_VAR)\n if isinstance(var_type, VoidType):\n var_type = backup_var_type\n\n except UnhandledTypeException as e:\n print(\n \"Could not assign type to variable at address {:x}: {}\".format(\n address, str(e)\n )\n )\n var_type = backup_var_type\n\n assert not isinstance(var_type, VoidType)\n assert not isinstance(var_type, FunctionType)\n\n self.add_symbol(address, _variable_name(address))\n var = IDAVariable(\n arch, address, var_type, _find_segment_containing_ea(address, seg_ref)\n )\n return var", "title": "" }, { "docid": "8393373c48f08f5de12f2140c9fba821", "score": "0.58655536", "text": "def tensor2variable(self, x):\n if self.use_gpu: x = x.cuda()\n return Variable(x)", "title": "" }, { "docid": "aa342716bbffbb8ab21ff92ff79ebd85", "score": "0.58381855", "text": "def create_memory(name, bitwidth, size):\n idx_width = bits_needed(size)\n return (\n f'{name} = prim std_mem_d1({bitwidth}, {size}, {idx_width});',\n idx_width\n )", "title": "" }, { "docid": "664e7174cf5b8df8bc095d6634f0d3d5", "score": "0.58286804", "text": "def alloc_variable(self):\n self.implicit_variable_cnt += 1\n return \"implicit_\" + str(self.implicit_variable_cnt)", "title": "" }, { "docid": "e803659ff238cf2357d5b175ffec8445", "score": "0.58251035", "text": "def get_variable(x):\n\tif use_cuda:\n\t\treturn x.cuda()\n\treturn x", "title": "" }, { "docid": "890101b59bcb55726ca4456196f95130", "score": "0.5809216", "text": "def assign_variable(self, u, var):\n if isinstance(var, pc.PhysicalConstant):\n u.vector()[:] = var.real\n\n elif isinstance(var, float) or isinstance(var, int):\n u.vector()[:] = var\n \n elif isinstance(var, np.ndarray):\n u.vector().set_local(var)\n u.vector().apply('insert')\n \n elif isinstance(var, Expression) or isinstance(var, Constant) \\\n or isinstance(var, GenericVector) or isinstance(var, Function):\n u.interpolate(var)\n\n elif isinstance(var, str):\n File(var) >> u\n\n else:\n s = \"*************************************************************\\n\" + \\\n \"assign_variable() function requires a Function, array, float,\\n\" + \\\n \" int, Vector, Expression, or string path to .xml, not \\n\" + \\\n \"%s. Replacing object entirely\\n\" + \\\n \"*************************************************************\"\n print_text(s % type(var) , 'red')\n u = var", "title": "" }, { "docid": "09aeb7ce490c1c7a97484773273f57a9", "score": "0.579234", "text": "def variable(self, name, numpyVar,**kwargs):\n checkload = kwargs.get('checkload', broadcast_x_like_y)\n #if iscomplex(numpyVar):\n # if self.loaded_state.has_key(name):\n # bx = checkload(self.loaded_state[name],numpyVar,name=name)\n # else:\n # bx = numpyVar\n # tfVarRe = tf.Variable(np.real(bx) , name=name+'_re')\n # tfVarIm = tf.Variable(np.imag(bx) , name=name+'_im')\n # tfVar = tf.complex(tfVarRe,tfVarIm)\n # self.splitcpx[tfVar] = (tfVarRe,tfVarIm)\n\n if self.loaded_state.has_key(name):\n print 'initalizing %s from %s ' % (name, self.load)\n numpyVar = checkload(self.loaded_state[name],numpyVar,name=name)\n else:\n print 'default initialization for %s' % (name)\n\n assert not iscomplex(numpyVar),'TODO'\n\n if name in self.frozen:\n print 'FROZEN AS CONSTANT:%s' % name\n tfVar = tf.constant( numpyVar )\n self.saveconstants[name] = numpyVar\n else:\n tfVar = tf.Variable( numpyVar )\n self.variables[name] = tfVar\n return tfVar", "title": "" }, { "docid": "fde9346b2ddb76a719fc059496317a7e", "score": "0.5741717", "text": "def variable(self, value: BaseNode):\n unwrapped = _unwrap(None, value)\n self._dotnet_instance.Variable = next(unwrapped)", "title": "" }, { "docid": "fde9346b2ddb76a719fc059496317a7e", "score": "0.5741717", "text": "def variable(self, value: BaseNode):\n unwrapped = _unwrap(None, value)\n self._dotnet_instance.Variable = next(unwrapped)", "title": "" }, { "docid": "fc95d8028a78c143be6bcae720b5d2aa", "score": "0.57317764", "text": "def __init__(__self__, *,\n cpu: float,\n memory: float):\n pulumi.set(__self__, \"cpu\", cpu)\n pulumi.set(__self__, \"memory\", memory)", "title": "" }, { "docid": "aaca4ca16d5c0f5e5cdfbcdd3d808eeb", "score": "0.5716985", "text": "def _create_storage_var(lvalue, rvalue):\n rvalue_type = type(rvalue)\n\n # Check for allowed types.\n if rvalue_type in _MYPY_PY_TYPE_MAPPING:\n py_type, attr = _MYPY_PY_TYPE_MAPPING[rvalue_type]\n return Variable(lvalue.name, py_type, getattr(rvalue, attr))\n\n # Check for forbidden types\n if rvalue_type in _STORAGE_FORBIDDEN_TYPES_MAPPINGS:\n attr, err_fmt = _STORAGE_FORBIDDEN_TYPES_MAPPINGS[rvalue_type]\n raise ValueError(err_fmt.format(getattr(rvalue, attr)))\n\n # Unhandled cases\n raise NotImplementedError(\n f\"line {lvalue.line}: error: Storage value {rvalue} is not allowed yet\"\n f\" by the Pikcio contract compiler.\"\n )", "title": "" }, { "docid": "41cf8435396bda307023d875cc60c043", "score": "0.57054734", "text": "def create_var(self, name):\n if self.local_vars != None:\n self.local_vars.append(name)\n else:\n self.global_vars.append(name)", "title": "" }, { "docid": "8568aa39ea06648158ca686307a5878a", "score": "0.5684103", "text": "def assign_variable(self, u, var):\n if isinstance(u, Indexed):\n u = project(u, self.Q)\n \n if isinstance(var, PhysicalConstant):\n u.vector()[:] = var.real\n\n elif isinstance(var, float) or isinstance(var, int):\n u.vector()[:] = var\n \n elif isinstance(var, np.ndarray):\n u.vector().set_local(var)\n u.vector().apply('insert')\n \n elif isinstance(var, Expression):\n u.interpolate(var)\n\n elif isinstance(var, GenericVector):\n u.vector().set_local(var.array())\n u.vector().apply('insert')\n\n elif isinstance(var, Function):\n u.vector().set_local(var.vector().array())\n u.vector().apply('insert')\n \n elif isinstance(var, Indexed):\n u.vector().set_local(project(var, self.Q).vector().array())\n u.vector().apply('insert')\n\n elif isinstance(var, str):\n File(var) >> u\n\n else:\n print \"*************************************************************\"\n print \"assign_variable() function requires a Function, array, float,\" + \\\n \" int, \\nVector, Expression, Indexed, or string path to .xml, \" + \\\n \"not \\n%s\" % type(var)\n print \"*************************************************************\"\n exit(1)", "title": "" }, { "docid": "37d6293e00d1ebda3a04e56a763ffc51", "score": "0.5653959", "text": "def new_var():\n self.var_count += 1\n return ast.Variable(\"::{}\".format(self.var_count))", "title": "" }, { "docid": "75c9780f208ff051f0fc38fc45b8a28c", "score": "0.5641634", "text": "def get_variable(x):\n if torch.cuda.is_available():\n return x.cuda()\n return x", "title": "" }, { "docid": "0b4dc3c97a9d834b1a631b7c846655f9", "score": "0.5629483", "text": "def put_on_gpu_like(cpu_var, gpu_var):\n gpu_no = gpu_no_of_var(gpu_var)\n if type(gpu_no) == int:\n cpu_var = cpu_var.cuda(gpu_no)\n return cpu_var", "title": "" }, { "docid": "7afa34a44fd40eaf9592ffe8fa9a7ee6", "score": "0.56251585", "text": "def _variable(name, shape, initializer, trainable=True):\n\tif trainable:\n\t\tvar = tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32)\n\telse:\n\t\tvar = tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32, trainable=trainable)\n\treturn var", "title": "" }, { "docid": "6129a3ea7eb274946f585183695089a5", "score": "0.56216323", "text": "def variable(self) -> BaseNode:\n dotnet_result = self._dotnet_instance.Variable\n return _wrap(dotnet_result)", "title": "" }, { "docid": "6129a3ea7eb274946f585183695089a5", "score": "0.56216323", "text": "def variable(self) -> BaseNode:\n dotnet_result = self._dotnet_instance.Variable\n return _wrap(dotnet_result)", "title": "" }, { "docid": "67d26df56b43b150111489834402da09", "score": "0.55796593", "text": "def var(name, attr=None, shape=None, lr_mult=None, wd_mult=None, dtype=None,\n init=None, stype=None, profiler_scope=None, **kwargs):\n if not isinstance(name, string_types):\n raise TypeError('Expect a string for variable `name`')\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolCreateVariable(c_str(name), ctypes.byref(handle)))\n ret = Symbol(handle)\n attr = attribute.current().get(attr)\n attr = {} if attr is None else attr\n if shape is not None:\n attr['__shape__'] = str(shape)\n if lr_mult is not None:\n attr['__lr_mult__'] = str(lr_mult)\n if wd_mult is not None:\n attr['__wd_mult__'] = str(wd_mult)\n if dtype is not None:\n np_dtype = _numpy.dtype(dtype)\n if np_dtype == _numpy.dtype([('bfloat16', _numpy.uint16)]):\n attr['__dtype__'] = str(_DTYPE_NP_TO_MX[np_dtype])\n else:\n attr['__dtype__'] = str(_DTYPE_NP_TO_MX[_numpy.dtype(dtype).type])\n if init is not None:\n if not isinstance(init, string_types):\n init = init.dumps()\n attr['__init__'] = init\n if stype is not None:\n attr['__storage_type__'] = str(_STORAGE_TYPE_STR_TO_ID[stype])\n if profiler_scope is not None:\n attr['__profiler_scope__'] = profiler_scope\n else:\n attr['__profiler_scope__'] = _current_profiler_scope.get()\n for k, v in kwargs.items():\n if k.startswith('__') and k.endswith('__'):\n attr[k] = str(v)\n else:\n raise ValueError('Attribute name=%s is not supported.'\n ' Additional attributes must start and end with double underscores,'\n ' e.g, __yourattr__' % k)\n ret._set_attr(**attr)\n return ret", "title": "" }, { "docid": "bfe607910f5ad24f8527c40ed2b3c6bb", "score": "0.5551135", "text": "def dwordp_assign(*args):\n return _pymaxwell.dwordp_assign(*args)", "title": "" }, { "docid": "ac779074365f24660f93b291f495ae85", "score": "0.5531068", "text": "def make_xpu(hyper):\n xpu = device.XPU.cast(hyper.xpu)\n return xpu", "title": "" }, { "docid": "072350968b12ad3ae98b31eb89beaa05", "score": "0.5524365", "text": "def make_32bit_var(module, var):\n value = getattr(module, var)\n dname = var + '64'\n if not hasattr(module, dname):\n # Save the 64bit value before overwriting the variable\n setattr(module, dname, value)\n # Overwrite with 32bit value\n setattr(module, var, np.float32(value))", "title": "" }, { "docid": "139cff3a8f0c1606638ceef3bfa6a096", "score": "0.5517015", "text": "def variable(value: T):\n\n return dataclasses.field(\n default_factory=lambda: value,\n init=False,\n repr=False,\n )", "title": "" }, { "docid": "7310d1f17dd7c35eb6ced189a5a9a91c", "score": "0.5501858", "text": "def variable_device(device, name):\n if callable(device):\n var_name = tf.get_variable_scope().name + '/' + name\n var_def = tf.NodeDef(name=var_name, op='Variable')\n device = device(var_def)\n if device is None:\n device = ''\n return device", "title": "" }, { "docid": "9b0f003fe8e40642dbaa5da0d24d8b4d", "score": "0.54497427", "text": "def put_value_in_temp(controller: VMCController, operand: Operand):\n if operand.is_register():\n return operand.value, ''\n temp = controller.temp_allocator.alloc_temp()\n return temp, controller.set_num(temp, operand.value)", "title": "" }, { "docid": "7be8e0dd61b18c0b164a873b36e57fdc", "score": "0.54471534", "text": "def assign(self, value, use_locking=False, name=None, read_value=True):\n value = tf.expand_dims(value, 0)\n value = self._to_auto(value)\n res = self._var.assign(value, use_locking, name, read_value)\n if read_value:\n res = self._to_manual(res)\n res = tf.squeeze(res, 0)\n return res", "title": "" }, { "docid": "20d97f8483d258e9d8a0baed53839236", "score": "0.5442426", "text": "def memory(self, value):\n self._memory = value", "title": "" }, { "docid": "20d97f8483d258e9d8a0baed53839236", "score": "0.5442426", "text": "def memory(self, value):\n self._memory = value", "title": "" }, { "docid": "20d97f8483d258e9d8a0baed53839236", "score": "0.5442426", "text": "def memory(self, value):\n self._memory = value", "title": "" }, { "docid": "d4c009bb269007a313cf24c6b0185e97", "score": "0.5408398", "text": "def to_variable(given):\n if isinstance(given, Variable):\n return given\n return Variable(given)", "title": "" }, { "docid": "28c9fc9b91ea2177e079cb03633265e6", "score": "0.53873235", "text": "def runtime(self, _from='0'):\n instructions = []\n instructions.append(comment('XPMEM'))\n instructions.append(copy(_from=_from, src=self.prefix,\n dest=self.prefix))\n if self.ldconfig:\n instructions.append(shell(\n commands=[self.ldcache_step(\n directory=os.path.join(self.prefix, 'lib'))]))\n if self.__environment_variables:\n instructions.append(\n environment(variables=self.__environment_variables))\n return '\\n'.join(str(x) for x in instructions)", "title": "" }, { "docid": "3b11acfa28b3887f4b68ea258b57cd09", "score": "0.53755385", "text": "def saved_variable(shape, name):\n return tf.Variable(tf.zeros(shape=shape, dtype=tf.float32),\n trainable=False, name=name)", "title": "" }, { "docid": "204bf8f563b30131d194646cc62fa1ae", "score": "0.53690404", "text": "def create_variable(self, name, data_type, headers, **kwargs):\n var = self.dataset.createVariable(name, data_type, (\"time\",\"index\"), fill_value=-1e+20)\n\n # get the values\n values = np.transpose(np.array([self.df[headers[n]] for n in range(self.index_length)]))\n\n # convert any nan values to fill values\n values[np.isnan(values)] = self.fill_value\n var[:] = values\n\n # mask the data according to the qc\n var_masked = np.transpose(np.array([self.df_masked[headers[n]].astype(data_type) for n in range(self.index_length)]))\n \n # Set variable attributes\n var.valid_min = np.nanmin(var_masked) # get from valid values\n var.valid_max = np.nanmax(var_masked)\n\n for k, v in kwargs.items():\n setattr(var, k, v)", "title": "" }, { "docid": "e3c3c997dbbfb06403cd547ede6fb9e3", "score": "0.53440964", "text": "def cpu(self):\n module = self._apply(lambda t: t.cpu())\n _del_attributes(module, MVN_LAZY_PROPERTIES)\n return module", "title": "" }, { "docid": "37babbb9960a3b1f3f17eda07a529b60", "score": "0.5335138", "text": "def _to_var(x):\n shape = x.shape\n dtype = x.dtype\n name = getattr(x, \"name\", None) or NG.name(\"feed\")\n return paddle.static.data(shape=shape, dtype=dtype, name=name)", "title": "" }, { "docid": "8d456f88fbe0ab13f6414b28f9f0296f", "score": "0.53199196", "text": "def visit_Assign(self, node):\n # Currently, we only allow one output target\n target = node.targets[0]\n index = 0\n content = None\n is_tvm = False\n dtype = \"float32\"\n\n\n # Analyze right hand side first\n if isinstance(node.value, ast.Call):\n call = node.value\n call_type = self.check_call_type(call)\n if len(call_type) == 1:\n # External function call. We do not support it right now\n content = self.visit(call)\n else:\n args = call.args\n keywords = call.keywords\n # Currently we only support tvm calls\n if call_type[0] == \"tvm\":\n is_tvm = True\n if call_type[1] == \"var\": # tvm.var\n assert isinstance(target, ast.Name), \"target of tvm.var must be a name\"\n for keyword in keywords: # check every keyword in tvm.var\n if keyword.arg == \"dtype\":\n dtype = keyword.value.s\n elif keyword.arg == \"name\":\n pass\n else:\n raise ValueError(\"Unknown/Unsupported keyowrds to tvm.var: \" + str(keyword[0]))\n name = target.id\n tvm_var = tvm.var(name, dtype = dtype)\n var = {'var': tvm_var, 'type': 'tvm', 'allocated': False}\n if name in self.arg_list: # check whether this var belongs to io\n self.io_dict[name] = {'arg': tvm_var}\n var['allocated'] = True\n self.insert_var(name, var)\n content = None\n elif call_type[1] == \"placeholder\": # tvm.placeholder\n assert isinstance(target, ast.Name), \"target of tvm.placeholder must be a name\"\n for keyword in keywords: # check every keyword in tvm.var\n if keyword.arg == \"dtype\":\n dtype = keyword.value.s\n elif keyword.arg == \"name\":\n pass\n else:\n raise ValueError(\"Unknown/Unsupported keyowrds to tvm.placeholder: \" + str(keyword[0]))\n name = target.id\n shape = self.get_shape(call.args[0])\n placeholder = tvm.placeholder(shape, name = name, dtype = dtype)\n buff = tvm.decl_buffer(placeholder.shape, placeholder.dtype, placeholder.name)\n buffer = {'tensor': placeholder, 'buffer': buff, 'type': 'input', 'ast': node, 'shape': shape, 'allocated': False}\n if name in self.arg_list:\n self.io_dict[name] = {'arg': buff}\n buffer['allocated'] = True\n self.insert_buffer(name, buffer)\n content = None\n elif call_type[1] == \"compute\":\n name = target.id\n shape = self.get_shape(call.args[0])\n placeholder = tvm.placeholder(shape, name = name, dtype = dtype)\n buff = tvm.decl_buffer(placeholder.shape, placeholder.dtype, placeholder.name)\n buffer = {'tensor': placeholder, 'buffer': buff, 'type': 'compute', 'ast': node, 'shape': shape, 'allocated': False}\n if name in self.arg_list:\n self.io_dict[name] = {'arg': buff}\n buffer['allocated'] = True\n self.insert_buffer(name, buffer)\n lamb = call.args[1]\n assert isinstance(lamb, ast.Lambda), \"The second argument to tvm.compute must be a lambda function\"\n self.scope += 1\n ret = self.visit(lamb)\n args = lamb.args.args\n if len(shape) == 1:\n var_name = args[0].id\n var = tvm.var(var_name, \"int32\")\n st = tvm.make.Store(buff.data, ret, var, self.true)\n if not isinstance(ret, tuple):\n ret = self.ReplaceVar(var_name, var).mutate(ret)\n st = tvm.make.Store(buff.data, ret, var, self.true)\n content = tvm.make.For(var, 0, shape[0], 0, 0, st)\n else:\n ret[0] = self.ReplaceVar(var_name, var).mutate(ret[0])\n ret[1] = self.ReplaceVar(var_name, var).mutate(ret[1])\n st = tvm.make.Store(buff.data, ret[1], var, self.true)\n content = tvm.make.For(var, 0, shape[0], 0, 0, tvm.make.Block(ret[0], st))\n else:\n var_name1 = args[0].id\n var_name2 = args[1].id\n var1 = tvm.var(var_name1, \"int32\")\n var2 = tvm.var(var_name2, \"int32\")\n if not isinstance(ret, tuple):\n ret = self.ReplaceVar(var_name1, var1).mutate(ret)\n ret = self.ReplaceVar(var_name2, var2).mutate(ret)\n st = tvm.make.Store(buff.data, ret, (var1 * shape[1] + var2), self.true)\n expr = tvm.make.For(var2, 0, shape[1], 0, 0, st)\n else:\n if ret[0] is not None:\n ret0 = self.ReplaceVar(var_name1, var1).mutate(ret[0])\n ret0 = self.ReplaceVar(var_name2, var2).mutate(ret0)\n ret1 = self.ReplaceVar(var_name1, var1).mutate(ret[1])\n ret1 = self.ReplaceVar(var_name2, var2).mutate(ret1)\n st = tvm.make.Store(buff.data, ret1, (var1 * shape[1] + var2), self.true)\n if ret[0] is not None:\n expr = tvm.make.For(var2, 0, shape[1], 0, 0, tvm.make.Block(ret0, st))\n else:\n expr = tvm.make.For(var2, 0, shape[1], 0, 0, st)\n content = tvm.make.For(var1, 0, shape[0], 0, 0, expr)\n self.scope -= 1\n else:\n raise ValueError(\"Unkown/Unsupported tvm function: tvm.\" + call_type[1])\n return content\n else: # if call_type[1] == \"tvm\"\n raise ValueError(\"Currently we only support tvm functions\")\n else: # if isinstance(node.value, ast.Call)\n content = self.visit(node.value)\n # left hand side\n var, name, _type = self.get_target(target)\n if _type == 'name':\n if var is None:\n var = tvm.var(name, \"float32\")\n self.insert_var(name, {'var': var, 'type': 'intermediate', 'allocated': False})\n else:\n var = var['var']\n else:\n index = self.visit(target)\n var = var['buffer'].data\n\n assert (not is_tvm)\n if isinstance(node.value, ast.IfExp):\n then = tvm.make.Store(var, content[1], index)\n orelse = tvm.make.Store(var, content[2], index)\n return tvm.make.IfThenElse(content[0], then, orelse)\n else:\n return tvm.make.Store(var, content, index)", "title": "" }, { "docid": "bf23fd2f15bf6eb6fd5d50e838657ba6", "score": "0.5293648", "text": "def NotTyping():\n return Memory(0x68C144, Exactly, 0)", "title": "" }, { "docid": "c11feb13ada9ecad3670b4f5e4720b85", "score": "0.52833045", "text": "def new_variable(self, input: InputNode) -> Symbol:\n base_name = self.base_name(input)\n loc = self.make_location(input)\n sym = self.gen(base_name)\n self.vtrack[base_name] = sym\n return sym", "title": "" }, { "docid": "92b24e382d574d4c7948b428264537b8", "score": "0.528025", "text": "def fresh_variable(self) -> str:\n prefix = 'noodler_var_'\n self.next_variable_id += 1\n new_var = f'{prefix}{self.next_variable_id-1}'\n self.variables.add(new_var)\n return new_var", "title": "" }, { "docid": "203198770a6346c624425ff33744e6ec", "score": "0.5274049", "text": "def _create_storage(spec, slot_name):\n shape = [self.length] + spec.shape.as_list()\n new_storage = tf.Variable(\n name=slot_name,\n initial_value=tf.zeros(shape, dtype=spec.dtype),\n shape=None,\n dtype=spec.dtype)\n return new_storage", "title": "" }, { "docid": "69f538484877229727e379dac945fd74", "score": "0.5272556", "text": "def VV_(x: ArrayLike) -> Variable:\n return create_variable(x, volatile=True)", "title": "" }, { "docid": "b45bf5d0806674476f045d773371b050", "score": "0.5269404", "text": "def tpu(self):\n array_ndim, array_type, array_shape, array_handle = \\\n _pyEMsoft.f90wrap_stackingfaulttype__array__tpu(self._handle)\n if array_handle in self._arrays:\n tpu = self._arrays[array_handle]\n else:\n tpu = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t,\n self._handle,\n _pyEMsoft.f90wrap_stackingfaulttype__array__tpu)\n self._arrays[array_handle] = tpu\n return tpu", "title": "" }, { "docid": "737a49dd40f9dee82486bcd811af1a5b", "score": "0.52637935", "text": "def __init__(__self__, *,\n cpu: Optional[pulumi.Input[float]] = None,\n memory: Optional[pulumi.Input[str]] = None):\n if cpu is not None:\n pulumi.set(__self__, \"cpu\", cpu)\n if memory is not None:\n pulumi.set(__self__, \"memory\", memory)", "title": "" }, { "docid": "fdd3c2a74a14103f8753cfba339aea20", "score": "0.5246389", "text": "def make_variables():\n variables = make_grid()\n for i in range(9):\n for j in range(9):\n variables[i][j] = Terms.new_uninterpreted_term(int_t)\n return variables", "title": "" }, { "docid": "9ca488ff504e56d1ddd0fcc3bbc761e9", "score": "0.52339935", "text": "def _create_local(name, shape, collections=None, validate_shape=True,\n dtype=dtypes.float32):\n # Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES\n collections = list(collections or [])\n collections += [ops.GraphKeys.LOCAL_VARIABLES]\n return variables.Variable(\n initial_value=array_ops.zeros(shape, dtype=dtype),\n name=name,\n trainable=False,\n collections=collections,\n validate_shape=validate_shape)", "title": "" }, { "docid": "a861100a18851e35e98b922ff0b7028b", "score": "0.5230129", "text": "def create_z3_var(ctx: z3.Context, type_py: Type, name: str):\n logger.debug(\"creating Z3 variable for [%s] with type [%s]\",\n name, type_py)\n py_to_z3 = {float: z3.Real,\n bool: z3.Bool,\n str: z3.String,\n int: z3.Int}\n try:\n type_z3 = py_to_z3[type_py]\n except KeyError:\n raise exceptions.UnsupportedVariableType(type_py)\n var = type_z3(name, ctx=ctx)\n logger.debug(\"created Z3 variable for [%s]: %s\", name, var)\n return var", "title": "" }, { "docid": "8e4f8c54626b334f6faaa65ef7db1934", "score": "0.5207599", "text": "def variable(name, shape=None, dtype=tf.float32, initializer=None,\n regularizer=None, trainable=True, collections=None, device='',\n restore=True):\n\n if isinstance(initializer, str):\n initializer = tflearn.initializations.get(initializer)()\n\n if isinstance(regularizer, str):\n regularizer = tflearn.losses.get(regularizer)\n\n with tf.device(device):\n\n try:\n var = tf.get_variable(name, shape=shape, dtype=dtype,\n initializer=initializer,\n regularizer=regularizer,\n trainable=trainable,\n collections=collections)\n # Fix for old TF versions\n except Exception as e:\n var = tf.get_variable(name, shape=shape, dtype=dtype,\n initializer=initializer,\n trainable=trainable,\n collections=collections)\n if regularizer is not None:\n tflearn.add_weights_regularizer(var, regularizer)\n\n if not restore:\n tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, var)\n\n return var", "title": "" }, { "docid": "e69fcf49c4a1e4aa1994870e9a3f01f7", "score": "0.5206628", "text": "def force_value_in_temp(controller: VMCController, operand: Operand):\n temp = controller.temp_allocator.alloc_temp()\n if operand.is_immediate():\n code = controller.set_num(temp, operand.value)\n else:\n code = controller.copy_num(operand.value, temp)\n return temp, code", "title": "" }, { "docid": "8f03327591c90eeb11e08cb453b6e8a3", "score": "0.5203709", "text": "def init_variable(self, queue=False):", "title": "" }, { "docid": "44ed90037db453a1908a384e45150558", "score": "0.51973623", "text": "def __init__(self, init_value, dtype, name):\n super(TFScalarVariableWrapper, self).__init__()\n self.variable = tf.get_variable(name,\n shape=[],\n trainable=False,\n dtype=dtype,\n initializer=tf.constant_initializer(init_value))\n self.placeholder = tf.placeholder(dtype, shape=[], name='{}_pl'.format(name))\n self.assign_op = tf.assign(self.variable, self.placeholder)", "title": "" }, { "docid": "793ef3c232b68a734477d96650e8085b", "score": "0.5186565", "text": "def get_variable_value(self, name):\n return load_variable(self._model_dir, name)", "title": "" }, { "docid": "40cc561d3b63fac52365a89da2333928", "score": "0.51765966", "text": "def _create_variables(self, dev_str):\n return {}", "title": "" }, { "docid": "0b2c43239c8776054159386b26df3c76", "score": "0.5172301", "text": "def var(self, name: str) -> ObjectInTime:\n try:\n return self.vars[name.lower()]\n except KeyError:\n raise Exception(\"Variable not found: %s\" % name)", "title": "" }, { "docid": "8fa03d33b8a887677fc1834bb7860f78", "score": "0.5156962", "text": "def _get_cpu(self):\n return self.__cpu", "title": "" }, { "docid": "e226d4d893a4287955e854c4031291bc", "score": "0.5143029", "text": "def createVariableName(self):\n self.globalData.numberForEqualElementsNames += 1\n variableName = \"var_\" + str(self.globalData.numberForEqualElementsNames)\n while True:\n if variableName in self.globalData.currentElementsNamesDict or variableName in self.globalData.variables:\n self.globalData.numberForEqualElementsNames += 1\n variableName = \"var_\" + str(self.globalData.numberForEqualElementsNames)\n else: \n break\n return variableName", "title": "" }, { "docid": "c5dcb8e4687e51fd4d9bef96cce46d3c", "score": "0.5141984", "text": "def regvar(self, name,val):\n self._vars[name] = tf.Variable(val,dtype=self.dtype)\n return self._vars[name]", "title": "" }, { "docid": "50155b18e807456757088ac704b6ad4b", "score": "0.5141112", "text": "def getMemory():\n return tracemalloc.take_snapshot()", "title": "" }, { "docid": "50155b18e807456757088ac704b6ad4b", "score": "0.5141112", "text": "def getMemory():\n return tracemalloc.take_snapshot()", "title": "" }, { "docid": "859eea3d83e590053e7dd1c02b05c1e7", "score": "0.5128848", "text": "def cpu(self):\n print('call cpu')\n return self._type(torch.FloatTensor)", "title": "" }, { "docid": "61bbfff9177ef014559c2009ebe38d4c", "score": "0.5120552", "text": "def __init__(self):\n # create 256 bites of memory\n self.ram = [0] * 256\n # 8 bit register\n self.reg = [0] * 8\n # program counter PC\n self.pc = 0", "title": "" }, { "docid": "d3a66b0f83bd20b01a32bb4f77e74ca7", "score": "0.5114127", "text": "def assign_var(self, name: str, value: ObjectInTime):\n if isinstance(value, (int, float)):\n self._spatial_interpreter.assign_var(name, value)\n else:\n\n assert isinstance(value,\n ObjectInTime), '<SpatialInterpreter>: value must be of type ' \\\n 'ObjectInTime or int/float! Got {}'.format(\n value)\n assert isinstance(name, str), '<SpatialInterpreter>: name must be of type string! Got {}'.format(name)\n\n self.vars[name.lower()] = value\n return value", "title": "" }, { "docid": "af8f95c6fd30fe90a4e7be98ca3717fb", "score": "0.5110003", "text": "def add_variable(self, varname, value, unit):\r\n if varname not in self.units:\r\n self.units[varname] = unit\r\n setattr(self, varname, value)", "title": "" }, { "docid": "c5cd10c0450b12506f78de9192511ce0", "score": "0.51089317", "text": "def v(self, name, *args, **kwargs):\n return self.get_variable(name, *args, **kwargs)", "title": "" }, { "docid": "40736f01d8116ef798b2cb08b8f489a8", "score": "0.51028377", "text": "def __init__(self):\n self.pc = 0 \n self.registers = [0] * 8\n # self.ram =[[0] * 8] * 256 #256 bytes of ram\n self.SP = 7\n self.ram = [0] * 256\n self.FL = 0", "title": "" } ]
f94c54ea292a2de26628b579b34fc3eb
Return a new scenario tree with some scenarios replaced by their average value.
[ { "docid": "5cda6255fc665cb4292de1dd123a9e91", "score": "0.6675452", "text": "def average(scenario_tree: ScenarioTree, \n map_stage_to_rvar_names: Optional[Dict[int, List[str]]] = None) -> ScenarioTree:\n scen_tree = copy.deepcopy(scenario_tree)\n scen_tree.average(map_stage_to_rvar_names)\n return scen_tree", "title": "" } ]
[ { "docid": "05091a307b7093f5a2a0aaf3c23828af", "score": "0.60327834", "text": "def _average_across_tree(self, map_stage_to_rvar_names: Optional[Dict[int, List[str]]] = None):\n if map_stage_to_rvar_names is None:\n map_stage_to_rvar_names = self.map_stage_to_rvar_names\n for stage in map_stage_to_rvar_names.keys():\n for var_name in map_stage_to_rvar_names[stage]:\n avg_scen = np.mean(self.to_numpy({stage: [var_name]}), axis=0)\n for node in self.nodes_at_level(stage):\n node.data['scenario'][var_name] = avg_scen", "title": "" }, { "docid": "8d0d491f48d27048769e4323bd603565", "score": "0.5706734", "text": "def _average_across_children(self, map_stage_to_rvar_names: Optional[Dict[int, List[str]]] = None):\n if map_stage_to_rvar_names is None:\n map_stage_to_rvar_names = self.map_stage_to_rvar_names\n for stage in map_stage_to_rvar_names.keys():\n for node in self.nodes_at_level(stage - 1):\n for var_name in map_stage_to_rvar_names[stage]: \n avg_scen = np.mean([child.data['scenario'][var_name] for child in node.children], axis=0)\n for child in node.children:\n child.data['scenario'][var_name] = avg_scen", "title": "" }, { "docid": "2ce9a992a2a8cb5f5ef978fde9edefaf", "score": "0.53555316", "text": "def _fill_scenario(self, scenario_process):\n for node in self.nodes:\n node.data[\"W\"] = node.parent.data[\"W\"] * node.data[\"w\"] if not node.is_root else 1\n root_scenario = scenario_process.get_node_scenario(node, path=False)\n if root_scenario is not None:\n node.data[\"scenario\"] = root_scenario", "title": "" }, { "docid": "80ef061246d0d6df36caceba0061b242", "score": "0.51369506", "text": "def buildingTree(values, attributes):\n\t#print(len(attributes))\n\t#print(attributes)\n\t#counts of the different classes available\n\tGreyhoundCount = 0\n\tWhippetCount = 0\n\tfor temp in range(len(values)):\n\t\tif(values[temp][len(attributes)-1] == \"A\"):\n\t\t\tGreyhoundCount += 1\n\t\telse:\n\t\t\tWhippetCount += 1\n\t#print(GreyhoundCount)\n\t#print(WhippetCount)\n\t#If the class contains 60 percent or more similar instances then returning \n\tif((GreyhoundCount/len(values)) >= 0.65):\n\t\tprint(\"leafnode\" + \" A\")\n\t\treturn\n\telif((WhippetCount/len(values)) >= 0.65):\n\t\tprint(\"leafnode\" + \" B\")\n\t\treturn\n\t#Initializing the best attribute, best splot Attribute, best Impurity\t\n\tbestAtrribute = 0\n\tbestSplitAttribute = 0\n\tbestImpurity = math.inf\n\t#for each value in the attributes except the class name running a loop\n\tfor temp in range(len(attributes)-1):\n\t\t#sorting the data according to the selected attribute\n\t\tvalues = sortValues(values, temp)\n\t\t#print(values)\n\t\t#initializing best gini, best split value\n\t\tbestGini = math.inf\n\t\tbestSplitValue = 0\n\t\t#for each value in the attribute running a loop\n\t\tfor index in range(len(values)):\n\t\t\t#assigning the splitvalue\n\t\t\tsplitValue = values[index][temp]\n\t\t\t#print(values[index])\n\t\t\tcountGreyhoundLeft = 0\n\t\t\tcountWhippetLeft = 0\n\t\t\tcountGreyhoundRight = 0\n\t\t\tcountWhippetRight = 0\n\t\t\tcountInstanceLeft = 0\n\t\t\tcountInstanceRight = 0\n\t\t\tginiRight = 0\n\t\t\tginiLeft = 0\n\t\t\tprobabilityGreyhoundRight = 0\n\t\t\tprobabilityGreyhoundLeft = 0\n\t\t\tprobabilityWhippetRight = 0\n\t\t\tprobabilityWhippetLeft = 0\n\t\t\t# for each value in the range of values present in the attribute counting the left and the right\n\t\t\tfor index1 in range(len(values)):\n\t\t\t\tif values[index1][temp] <= splitValue:\n\t\t\t\t\tif values[index1][len(attributes)-1] == \"A\":\n\t\t\t\t\t\tcountGreyhoundLeft += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tcountWhippetLeft += 1\n\t\t\t\t\tcountInstanceLeft += 1\n\t\t\t\telse:\n\t\t\t\t\tif values[index1][len(attributes)-1] == \"A\":\n\t\t\t\t\t\tcountGreyhoundRight += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tcountWhippetRight += 1\n\t\t\t\t\tcountInstanceRight += 1\n\t\t\t#calculating the probabilities of the left and the right\n\t\t\tif countInstanceRight != 0:\n\t\t\t\tprobabilityGreyhoundRight = countGreyhoundRight/ countInstanceRight\n\t\t\t\tprobabilityWhippetRight = countWhippetRight/ countInstanceRight\n\t\t\t#calculating the gini index for right\n\t\t\tginiRight = 1 - (probabilityGreyhoundRight**2 + probabilityWhippetRight**2)\n\t\t\t#print(giniRight)\n\t\t\tif countInstanceLeft != 0:\n\t\t\t\tprobabilityGreyhoundLeft = countGreyhoundLeft/ countInstanceLeft\n\t\t\t\tprobabilityWhippetLeft = countWhippetLeft/ countInstanceLeft\n\t\t\t#calculating the gini index for left\n\t\t\tginiLeft = 1 - (probabilityGreyhoundLeft**2 + probabilityWhippetLeft**2)\n\t\t\t#print(giniLeft)\n\t\t\t#calculating the mixed gini index\n\t\t\tmixedGini = (((countInstanceRight/(countInstanceRight+countInstanceLeft))*giniRight) + ((countInstanceLeft/(countInstanceRight+countInstanceLeft))*giniLeft))\n\t\t\t#print(mixedGini)\n\t\t\t#updating the best Gini index if mixed Gini is better than the bestGini\n\t\t\tif mixedGini < bestGini:\n\t\t\t\tbestGini = mixedGini\n\t\t\t\tbestSplitValue = splitValue\n\t\t#print(bestGini)\n\t\t#print(bestSplitValue)\n\t\t#updating the best Impurity index if best Gini is better than the bestImpurity\n\t\tif bestGini < bestImpurity:\n\t\t\tbestImpurity = bestGini\n\t\t\tbestAtrribute = temp\n\t\t\tbestSplitAttribute = bestSplitValue\n\tprint(\"attribute =\" +str(bestAtrribute))\n\tprint(\"best split = \" + str(bestSplitAttribute))\n\t#initializing two list left and right\n\tvalueLeft = []\n\tvalueRight = []\n\t#assigning the values to the left and right lists based on the split value\n\tfor temp in range(len(values)):\n\t\tif values[temp][bestAtrribute] <= bestSplitAttribute:\n\t\t\tvalueLeft.append(values[temp])\n\t\telse:\n\t\t\tvalueRight.append(values[temp])\n\t#recursively calling the buildingTree function with left and right values\n\tbuildingTree(valueLeft,attributes)\n\tbuildingTree(valueRight,attributes)", "title": "" }, { "docid": "f2518f494707348ca74264aed3ddb954", "score": "0.5110703", "text": "def collapse_twostage(tree: ScenarioTree) -> ScenarioTree:\n assert tree.depth == 2, \"Scenario tree should be two-stage\"\n unique_scenarios, inverse_indices = np.unique(tree.to_numpy(), axis=0, return_inverse=True)\n weights = np.array([child.data[\"W\"] for child in tree.children])\n new_weights = [np.sum(weights[inverse_indices == index]) for index in range(len(unique_scenarios))]\n return twostage_from_scenarios(unique_scenarios, tree.map_stage_to_rvar_nb[1], new_weights)", "title": "" }, { "docid": "064f6d8b26807cd60880f42d72530ae6", "score": "0.5032071", "text": "def _fill_epsilon(self, scenario_process):\n self.data[\"w\"] = 1 # root\n for node in self.nodes:\n if not node.is_leaf:\n weights, epsilons = scenario_process.get_children_sample(node)\n random_indices = np.random.permutation(range(len(node.children)))\n for i, child in zip(random_indices, node.children):\n child.data[\"w\"] = weights[i]\n if epsilons is not None:\n child.data[\"eps\"] = epsilons[i]", "title": "" }, { "docid": "5baaee671e3670e2e8b8438ee9479218", "score": "0.49823263", "text": "def decision_tree_learning(\n examples,\n attributes,\n categorical,\n unique,\n parent_examples):\n if examples.empty:\n return plurality_value(parent_examples)\n elif all_the_same(examples):\n return examples[\"Survived\"].iloc[0]\n elif attributes.empty:\n return plurality_value(examples)\n else:\n A, split = importance(attributes, categorical, examples)\n root = Tree(A)\n new_attributes = copy.deepcopy(attributes)\n if split is None:\n # this means we have chosen a categorical attribute\n new_attributes = new_attributes.drop(A)\n for value in unique[A]:\n exs = examples.loc[examples[A] == value]\n # i call the function recursicely on the new shortened examples.\n subtree = decision_tree_learning(\n exs, new_attributes, categorical, unique, examples)\n root.add_child(value, subtree)\n else:\n # this means we have chosen a continuous\n examples = examples.sort_values(by=[A])\n head = examples.loc[examples[A] < split]\n tail = examples.loc[examples[A] > split]\n new_attributes = new_attributes.drop(A)\n # her kaller jeg funksjonen rekursivt på hver av split-eksemplene.\n subtree = decision_tree_learning(\n head, new_attributes, categorical, unique, examples)\n subtree2 = decision_tree_learning(\n tail, new_attributes, categorical, unique, examples)\n #denne split-verdien får jeg bruk for ved senere andledninger.\n root.split_value = split\n root.add_child(\"Less than\", subtree)\n root.add_child(\"Greater than\", subtree2)\n return root", "title": "" }, { "docid": "f1506cba5397749e9a832d2684679d2b", "score": "0.4913963", "text": "def getAverageFeature(func):\n def _func(series, tid):\n \"\"\"Analyzes a statistic and gives the average of it\n\n Arguments:\n series: An ordered list of games (in dict form)\n tid: The team which is the targeted for this series\n \"\"\"\n def _generator():\n \"\"\"The returned new generator. For each call yields the\n targeted team's value ONLY.\n \"\"\"\n # begin with zero since there are no priors for the first\n # game of the season. If predicting on post-season, then\n # possibly manually insert?\n yield 0\n avg = 0\n for i, game in enumerate(series):\n avg = (i * avg + func(game, tid)) / (i + 1.)\n if len(series) > i + 1:\n yield avg\n return _generator()\n return _func", "title": "" }, { "docid": "bc36f4a729b4aea5d6e30839374a74ed", "score": "0.4907241", "text": "def create_all_scenarios(self):\r\n for tree in self.trees:\r\n self.parse_tree(tree)", "title": "" }, { "docid": "28d375f7ad51de08a2e6d2d9892210b3", "score": "0.48580444", "text": "def tree_mean(pytrees_and_weights: Iterable[Tuple[T, float]]) -> T:\n sum_weighted_pytree = None\n sum_weight = 0.\n for pytree, weight in pytrees_and_weights:\n weighted_pytree = tree_weight(pytree, weight)\n if sum_weighted_pytree is None:\n sum_weighted_pytree = weighted_pytree\n else:\n sum_weighted_pytree = tree_sum(sum_weighted_pytree, weighted_pytree)\n sum_weight += weight\n inverse_weight = (1. / sum_weight) if sum_weight > 0. else 0.\n return tree_weight(sum_weighted_pytree, inverse_weight)", "title": "" }, { "docid": "2482c332d3688e5e71e9ebb04de39a8a", "score": "0.48532408", "text": "def test_2():\n test_graph = test_1()\n temp_test_graph = Sigmoid()\n test_graph.insert_tree(temp_test_graph, 'max', 'square')\n return test_graph", "title": "" }, { "docid": "387c98b439b42ea8c880e4d327f11bfc", "score": "0.48441073", "text": "def tree_mean(pytrees_and_weights: Iterable[Tuple[PyTree, float]]) -> PyTree:\n sum_weighted_pytree = None\n sum_weight = 0.\n for pytree, weight in pytrees_and_weights:\n weighted_pytree = tree_weight(pytree, weight)\n if sum_weighted_pytree is None:\n sum_weighted_pytree = weighted_pytree\n else:\n sum_weighted_pytree = tree_add(sum_weighted_pytree, weighted_pytree)\n sum_weight += weight\n return tree_inverse_weight(sum_weighted_pytree, sum_weight)", "title": "" }, { "docid": "32af3adf7fbb8764fdde003494fa678f", "score": "0.48401564", "text": "def merge(self, **kwargs):\n for node in self.nodes:\n if node.is_leaf:\n continue\n to_be_removed = []\n for k, child1 in enumerate(node.children):\n for child2 in node.children[k+1:]:\n # test if same scenario at child node\n is_equal = True\n for var_name in self.map_stage_to_rvar_names[node.level + 1]:\n if not np.isclose(child1.data['scenario'][var_name], \n child2.data['scenario'][var_name], \n **kwargs).all():\n is_equal = False\n break\n if is_equal:\n weight_coef = (child2.data[\"W\"] + child1.data[\"W\"]) / child2.data[\"W\"]\n for n in child2.nodes:\n n.data[\"W\"] *= weight_coef\n to_be_removed.append(child1)\n break\n for child in to_be_removed:\n child.remove()", "title": "" }, { "docid": "28fa02df3e092e906034265695fc22a4", "score": "0.48262548", "text": "def make_small_test_forest():\n # Sample forest, roughtly as a result of predicting 'Y = X_1'.\n _tree1 = make_tree('X_1', [.5], [Leaf(0.25), Leaf(0.75)])\n _tree2 = make_tree('X_1', [.5], [Leaf(0.30), Leaf(0.90)])\n _tree3 = make_tree('X_2', [.5], [\n make_tree('X_3', [.5], [Leaf(0.35), Leaf(0.75)]),\n Leaf(0.45)])\n _tree4 = Leaf(0.45)\n _tree5 = make_tree('X_3', [.5], [Leaf(0.30), Leaf(0.90)])\n _tree6 = make_tree('X_3', [.4, .6], [_tree1, _tree2, _tree3])\n _tree7 = make_tree('X_3', [.4, .6], [_tree6, _tree1, _tree6])\n _forest = Forest([_tree1,\n _tree2,\n _tree3,\n _tree4,\n _tree5,\n _tree5,\n _tree6,\n _tree7,\n make_tree('X_1', [.2, .3], [Leaf(.1), Leaf(.25), Leaf(.65)])])\n return _forest", "title": "" }, { "docid": "28f3ba1e2c16e66add7b5c92f1eb3f77", "score": "0.48241034", "text": "def __replacement__(self, _descendants: list[Board]) -> None:\n\n idx: int = 0 # Index for _descendants\n for _ in range(self.size_poblat):\n if self.individuals[_].get_fn() > self.average:\n self.individuals[_] = _descendants[idx]\n idx += 1\n if idx >= self.size_parents: # len(_descendants)\n break\n self.__list_average__()", "title": "" }, { "docid": "65f693fbf5137a56c36a98320937982f", "score": "0.47893366", "text": "def avg_surveys(survey, depth_of_interest=None):\n if isinstance(depth_of_interest, float) is False:\n depth_of_interest = survey[0][-1]\n\n delta_md, avg_inc, avg_azi, dls = list(), list(), list(), list()\n i = 1\n while survey[0][i] <= depth_of_interest:\n delta_md.append(np.abs(survey[0][i] - survey[0][i - 1]))\n dls.append(dogleg_severity([survey[1][i - 1], survey[1][i]], [survey[2][i - 1], survey[2][i]]))\n avg_inc.append(np.average(np.array([survey[1][i], survey[1][i - 1]])))\n avg_azi.append(np.average(np.array([survey[2][i], survey[2][i - 1]])))\n i += 1\n if i == len(survey[0]):\n break\n\n return delta_md, avg_inc, avg_azi, dls", "title": "" }, { "docid": "0db3d2f65d983c5f0f568fbc60a61100", "score": "0.47884336", "text": "def make_forest(self, trees, amount):\n self.classifiers = []\n #main loop for trees\n for tree in range(0,trees):\n victim = decisionTree.decisionTree(len(self.attributes), True)\n\n for att in self.attributes:\n victim.add_attribute(att)\n victim.add_labels(self.add_labels)\n \n #pick random training amount times\n for trn in range(0,amount):\n rng = random.randint(0,len(self.training))\n victim.add_training(self.training[rng])\n \n #make the individual tree and add it to the list\n victim.make_random('ID3', -1)\n self.classifiers.append(victim)", "title": "" }, { "docid": "f103f1ab85616009c16823ee21b5fcbc", "score": "0.4759136", "text": "def get_mean_yngve(treestrings):\n\tcount = 0\n\ttotal = 0\n\tfor treestring in treestrings:\n\t\tresults = yngve_redux(treestring)\n\t\ttotal += results[0]\n\t\tcount += results[1]\n\treturn float(total / count)", "title": "" }, { "docid": "3ae28156cf64f32cc8424c54d6ca5e4a", "score": "0.47372058", "text": "def grow(self, examples, attributes, branchCount):\n\t\ttree = list()\n\t\tappend = tree.append\n\n\t\t\"\"\" intialize node components to False/None \"\"\"\n\t\tisNode = False\n\t\tbest = None\n\t\tnode = None\n\t\tnumVal = None\n\n\t\t\"\"\" if set of examples is homogenous in class, then make a leaf else make a node and grow again\"\"\"\n\t\tif self.homogenous(examples):\n\t\t\tappend((isNode, best, Leaf.Leaf(examples), numVal))\n\n\t\telse:\n\t\t\tbest = self.bestAttribute(examples, attributes, branchCount)\n\t\t\tsplit = self.split(examples, attributes, best, branchCount)\n\n\t\t\tif attributes.get(best).isNumeric():\n\t\t\t\tnumVal = self.getNumericBranches(examples, best, branchCount)\n\n\t\t\tfor e in split:\n\t\t\t\t\"\"\" only grow sets containing examples (i.e., non-empty sets) \"\"\"\n\t\t\t\tif len(e) > 0:\n\t\t\t\t\tisNode = True\n\t\t\t\t\tnode = Node(e, attributes, branchCount)\n\t\t\t\t\n\t\t\t\t\"\"\" make node \"\"\"\n\t\t\t\tappend((isNode, best, node, numVal))\n\n\t\treturn tree", "title": "" }, { "docid": "7c4de1b7e413c72a2000f5aba426cd17", "score": "0.47266185", "text": "def node_avg():\n node_raw = [\"average\", 0, 0, 0]\n for node in node_stats():\n node_raw[1] += float(node[1])\n node_raw[2] += float(node[2])\n node_raw[3] += float(node[3])\n\n num = len(node_stats())\n node_avg = [\"average\",\n \"{:.2f}\".format(node_raw[1]/num),\n \"{:.2f}\".format(node_raw[2]/num),\n \"{:.2f}\".format(node_raw[3]/num)]\n return node_avg", "title": "" }, { "docid": "f009f991e37e5cd2e55f3b3f9c349831", "score": "0.4703033", "text": "def _calculate_step_average(self, child_1_data, child_2_data):\n # Get Trues (1) where child_2 (TCap) is greater than child_1 (CS)\n # Otherwise False (0)\n bool_series = child_1_data < child_2_data\n # Treat those bools as 1 and 0 and get mean\n bool_scalar_average = bool_series.mean()\n # Create a long array of that mean\n vuln_data = np.full(\n len(bool_series),\n bool_scalar_average\n )\n # And put it in a series\n vuln = pd.Series(\n data=vuln_data,\n index=bool_series.index\n )\n return vuln", "title": "" }, { "docid": "b46c7475b9ebe7f14d0190482d6c2bb5", "score": "0.46987978", "text": "def getRandomAST(self, max_depth):\r\n root = self.getRandomOperationNode()\r\n root.randomGrowth(max_depth, self)\r\n root.updateDepth(max_depth)\r\n return copy.deepcopy(root)", "title": "" }, { "docid": "c0bc47d0b44aa260221601733157de1f", "score": "0.46824756", "text": "def mutate_scenario(self, old_scen, new_scen, startwith=None,\n exclude_solvers=[], ext=None):\n\n if new_scen in self.data:\n raise NameError(\"{} already used as scenario-name\".format(new_scen))\n\n # Copy scenario\n self.data[new_scen] = self.data[old_scen].deepcopy()\n\n # Exclude solvers\n for solver in exclude_solvers:\n for inst in self.data[old_scen]:\n del self.data[new_scen][inst]['solvers'][solver]\n\n if startwith:\n for inst in self.data[old_scen]:\n if not inst.startswith(startwith):\n self.data[new_scen].pop(inst)\n\n if ext:\n raise NotImplementedError()\n for inst in self.data[old_scen]:\n if os.path.splitext(inst)[1] != ext:\n self.data[new_scen][os.path.splitext(inst)[0]+\".\"+ext] = self.data[new_scen].pop(inst)\n\n return self.get_instances(new_scen), self.local_paths(new_scen, self.get_instances(new_scen))", "title": "" }, { "docid": "d5a70caea8fdfcdda3f3ee0a764cc188", "score": "0.46444592", "text": "def _make_leaf(self, training_set):\n return _mean(training_set[1])", "title": "" }, { "docid": "48d00b1f7bc7654e0fbf5400741c2104", "score": "0.4640776", "text": "def testDivisions_golden(self):\n filename = six.text_type(\n os.path.join(resource_loader.get_data_files_path(),\n '../testdata/IMSLP00747.golden.xml'))\n score = etree.fromstring(open(filename, 'rb').read())\n part_staves = musicxml.PartStaves(score)\n self.assertEqual(part_staves.num_partstaves(), 2)\n self.assertEqual(part_staves.num_measures(0), 22)\n self.assertEqual(part_staves.num_measures(1), 22)\n for i in moves.xrange(2):\n for j in moves.xrange(22):\n measure = part_staves.get_measure(i, j)\n self.assertEqual(measure.find('attributes/divisions').text, '8')", "title": "" }, { "docid": "859f0737c0807d2b0d936c465a4dbb0b", "score": "0.46385273", "text": "def make_avg_ratings():\n def get_title_close_match(title):\n \"\"\" Return the closest match for movie title \"\"\"\n matches = dl.get_close_matches(title, possible_titles)\n if len(matches) > 0:\n # get the best match (the valid title from possible_titles)\n return matches[0]\n else:\n return ''\n\n get_title_close_match = np.vectorize(\n get_title_close_match)\n global df_ratings\n df_ratings['title'] = get_title_close_match(\n df_ratings['title']) # get valid titles\n df_ratings = df_ratings[df_ratings['title'] != ''] # remove invalid titles\n df_ratings = df_ratings.sort_values(\n by=['title']).groupby(df_ratings['title']).mean().round(2)", "title": "" }, { "docid": "3a295b60a990f250b2c25a6f6dab99db", "score": "0.46081585", "text": "def update_state_mean(self):\n self.state_mean = np.mean(self.state_ensemble, axis=1)\n self.vanilla_state_mean = np.mean(self.vanilla_state_ensemble, axis=1)", "title": "" }, { "docid": "c21c03fc8801313d88a1b49a756150b8", "score": "0.4592287", "text": "def estimate_random_forest():\n dict_full = get_feature_percentage(\n \"training_sets/kz/training_data_full.json\",\n \"training_sets/kz/paradigm_lengths.json\",\n \"category_description/kazakh.json\"\n )\n dict_thr = get_feature_percentage(\n \"training_sets/kz/training_data_thresholded.json\",\n \"training_sets/kz/paradigm_lengths.json\",\n \"category_description/kazakh.json\"\n )\n print 'Feature\\tin full\\tin thresholded'\n for feature_name in dict_full:\n print '%s\\t%f\\t%f' % (feature_name, dict_full[feature_name], dict_thr[feature_name])", "title": "" }, { "docid": "64cdf84077c67c655f5c75bbe15e03c2", "score": "0.4591497", "text": "def get_average(self):\n return self.standard_goals / self.games", "title": "" }, { "docid": "7fe733e8d5a8c1bdc3f52b55bcb13160", "score": "0.45646083", "text": "def apply_scenario(scenario, test):\n name, parameters = scenario\n scenario_suffix = '(' + name + ')'\n newtest = clone_test_with_new_id(test,\n test.id() + scenario_suffix)\n test_desc = test.shortDescription()\n if test_desc is not None:\n newtest_desc = \"%(test_desc)s %(scenario_suffix)s\" % vars()\n newtest.shortDescription = (lambda: newtest_desc)\n for key, value in parameters.items():\n setattr(newtest, key, value)\n return newtest", "title": "" }, { "docid": "69b4c887ede2c57aedf9aa9c44368f9b", "score": "0.45303738", "text": "def get_weighted_mean(self):\n # Mean physical parameters:\n self.g_no2_vmr = np.divide(self.g_no2_vmr, self.g_gaus_wgt, where=self.g_cnt != 0)\n self.g_cld_fr = np.divide(self.g_cld_fr, self.g_cnt, where=self.g_cnt != 0)\n self.true_no2 = np.divide(self.true_no2, self.g_gaus_wgt, where=self.g_cnt != 0)\n self.g_cld_p = np.divide(self.g_cld_p, self.g_cnt, where=self.g_cnt != 0)\n self.true_o3 = np.divide(self.true_o3, self.g_gaus_wgt, where=self.g_cnt != 0)\n self.g_askut_no2 = np.divide(self.g_askut_no2, self.g_ask_gaus_wgt, where=self.g_as_cnt != 0)\n # Mean cloud-sliced UT NO2 error:\n self.g_slope_err = np.divide(self.g_slope_err, self.g_gaus_wgt, where=self.g_cnt != 0)\n # Mean weights:\n self.g_gaus_wgt = np.divide(self.g_gaus_wgt, self.g_cnt, where=self.g_cnt != 0)\n # No data (nan):\n self.true_no2[self.g_cnt == 0] = np.nan\n self.true_o3[self.g_cnt == 0] = np.nan\n self.g_slope_err[self.g_cnt == 0] = np.nan\n self.g_gaus_wgt[self.g_cnt == 0] = np.nan\n self.g_askut_no2[self.g_as_cnt == 0] = np.nan\n self.g_no2_vmr[self.g_cnt == 0] = np.nan\n self.g_cld_fr[self.g_cnt == 0] = np.nan\n self.g_cld_p[self.g_cnt == 0] = np.nan", "title": "" }, { "docid": "abe1d5b432c3c454ef60ca7ca4ae3292", "score": "0.45260942", "text": "def make_full(self, trees, amount):\n self.classifiers = []\n #main loop for trees\n for tree in range(0,trees):\n victim = decisionTree.decisionTree(len(self.attributes), True)\n\n for att in self.attributes:\n victim.add_attribute(att)\n\n victim.add_labels(self.labels)\n \n #pick random training amount times\n for trn in range(0,amount):\n rng = random.randint(0,len(self.training)-1)\n victim.add_training(self.training[rng])\n \n #make the individual tree and add it to the list\n victim.make_tree('ID3', -1)\n self.classifiers.append(victim)", "title": "" }, { "docid": "a7df01a1bc7515ff6af6adf2addf96a7", "score": "0.45184442", "text": "def summarize(self):\n # To change the file name of stopped trees\n if self.percent_threshold > 0:\n stopped_file_name = 'stopped_'\n else:\n stopped_file_name = 'full_'\n\n # Initial Variables\n all_test_results = pd.DataFrame()\n average_mse = 0\n mse = 0\n\n # Combine the test results into one single data set and average out the MSE\n for index in range(5):\n # Append results\n test_results = self.test_results[index]\n all_test_results = all_test_results.append(test_results)\n\n # Add MSE for this index\n mse += ((test_results.iloc[:, -2] - test_results.iloc[:, -1]) ** 2).sum()\n mse = mse / len(test_results)\n\n # Average for this index\n average_mse += mse\n\n # CSV output\n all_test_results.to_csv(f'output_{self.data_name}\\\\{self.data_name}_{stopped_file_name}test_results.csv')\n\n # Calculate average MSE\n average_mse = average_mse / 5\n\n # Save\n self.summary = {\n 'test': {\n 'threshold': self.percent_threshold,\n 'mse': average_mse\n }\n }\n\n # Output JSON\n with open(f'output_{self.data_name}\\\\{self.data_name}_{stopped_file_name}summary.json', 'w') as file:\n json.dump(self.summary, file)\n\n # If a tune was performed we'll also average over each of the thresholds\n if self.perform_tune:\n # Initial Variables\n self.tune_summary = {}\n\n # Loop through each percent threshold\n for percent_threshold in self.tune_results.keys():\n mse = 0\n\n # Add up MSE\n for index in range(5):\n mse += self.tune_results[percent_threshold][index]\n\n # Average\n mse = mse / 5\n\n # Save\n self.tune_summary.update({percent_threshold: mse})\n\n # Output JSON\n with open(f'output_{self.data_name}\\\\{self.data_name}_tune_summary.json', 'w') as file:\n json.dump(self.tune_summary, file)", "title": "" }, { "docid": "c419c770d4d9fa88a0bd5329a8bf3863", "score": "0.45136347", "text": "def average(self, \n map_stage_to_rvar_names: Optional[Dict[int, List[str]]] = None,\n across_tree: bool = True):\n if across_tree:\n self._average_across_tree(map_stage_to_rvar_names)\n else:\n self._average_across_children(map_stage_to_rvar_names)", "title": "" }, { "docid": "b3429e8da58532fb1b5c434e5bfa241c", "score": "0.45087326", "text": "def sample(self):\n def recurse(cat, depth):\n cur_rules = self.cat_to_rules[cat]\n total_weight = float(sum(r.weight for r in cur_rules))\n rule = numpy.random.choice(\n cur_rules, p=[r.weight / total_weight for r in cur_rules])\n child_values = [recurse(c, depth+1) for c in rule.rhs]\n return rule.apply_to(child_values, )\n root_deriv = recurse(self.ROOT, 0)\n root_deriv.sem = VariableGenerator.normalize(root_deriv.sem_fn(),\n self.create_generator())\n return root_deriv", "title": "" }, { "docid": "9384179172d23333ee54e833c9247303", "score": "0.45001078", "text": "def calc_mean_tree_depths(self) -> float:\n return numpy.mean([tree.depth for tree in self.trees])", "title": "" }, { "docid": "f6c8f793b40b8ddd6f3a48ae9a3d042e", "score": "0.44978374", "text": "def calculate_scenario_ranks(self):\n tree = sputils._ScenTree(self.branching_factors, self.all_scenario_names)\n\n self.scenario_names_to_rank, self._rank_slices, self._scenario_slices =\\\n tree.scen_names_to_ranks(self.n_proc)\n self._scenario_tree = tree", "title": "" }, { "docid": "066525e67f84ea7b6831b2b44e781948", "score": "0.44943357", "text": "def Average(self, *others):\n others_nodes = []\n for other in others:\n if isinstance(other, Node):\n if other._use_dual != self._use_dual:\n raise NotImplementedError(\"Not Implemented!\")\n x = self._tonode(other)\n x._use_dual = self._use_dual\n others_nodes.append(x)\n\n return n_ary(\n self,\n others_nodes,\n \"Average({},{})\".format(\n self._name_no_id, \",\".join(other._name_no_id for other in others_nodes)\n ),\n (\n lambda *args: sum(x for x in args) / len(args)\n if not self._use_dual\n else (\n (\n sum([x[0] for x in args]) / len(args),\n sum(x[1] for x in args) / len(args),\n )\n )\n ),\n )", "title": "" }, { "docid": "2d1d788d715553296c347efa1222c699", "score": "0.44924015", "text": "def feature_average(data: pd.DataFrame, feature: str):\n values = data[feature]\n temp = (values != 0)\n average = values[temp].to_numpy().mean()\n return values.replace(0, average).replace(np.nan, average)", "title": "" }, { "docid": "9f5dbad116169f688e90559fa9ace56b", "score": "0.44912198", "text": "def create_scenario( self, goal_mult = 1 ):\n scenario = copy.copy( self.scenario )\n scenario.restart()\n playfield = Playfield()\n playfield.load( Level.get_filename( self.get_current_level_nr() ) )\n scenario.playfield = playfield\n return scenario", "title": "" }, { "docid": "407e6b0cde157f9600946c057a8c81c3", "score": "0.44879463", "text": "def generate_scenarios(test_or_suite):\n for test in iterate_tests(test_or_suite):\n scenarios = getattr(test, 'scenarios', None)\n if scenarios:\n for newtest in apply_scenarios(scenarios, test):\n newtest.scenarios = None\n yield newtest\n else:\n yield test", "title": "" }, { "docid": "f3573621c93cd99661227b61d2fc1809", "score": "0.44783476", "text": "def average(*attrs):\n if len(attrs) == 1:\n attrs = attrs[0]\n\n return _create_operation_node(\"average\", attrs)", "title": "" }, { "docid": "6f6266e1f6922a5e6ea8f25e02bca6eb", "score": "0.4471035", "text": "def resolve_by_meaningful_avg(table, attribute):\n resolvedTable = copy.deepcopy(table)\n for row in resolvedTable:\n for index in range(len(row)):\n if row[index] == 'NA':\n newTable = build_yr_attr_table(table, row[6], row[attribute], attribute)\n row[index] = average(newTable, index)\n return resolvedTable", "title": "" }, { "docid": "237e89f1892f1e0e14cc802627740734", "score": "0.4464263", "text": "def test_cognitive_average_default_options(\n assert_errors,\n parse_ast_tree,\n template,\n code,\n default_options,\n mode,\n):\n tree = parse_ast_tree(mode(template.format(code)))\n\n visitor = CognitiveComplexityVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [])", "title": "" }, { "docid": "bb33131d920b21af8fc00f22bc536846", "score": "0.44583166", "text": "def get_average(self):\n # TODO: implement me", "title": "" }, { "docid": "98236bd532eec1a5a0f7b2856f8a0504", "score": "0.44561368", "text": "def forward_generation_from_given_width(cls, \n width_vector: List[int], \n scenario_process: ScenarioProcess, \n variability_process: VariabilityProcess, \n alpha: float):\n last_stage = len(width_vector)\n tree = cls.from_data_dict({(): {\"M\": width_vector[0], \"W\": 1, \"w\": 1}}) \n root_scenario = scenario_process.get_node_scenario(tree, path=False)\n if root_scenario is not None:\n tree.data[\"scenario\"] = root_scenario \n tree.data[\"g\"] = variability_process.node_lookback(tree)\n \n # difference between the actual width of the leaves and the target one\n node_gap = lambda tree: sum(leaf.data[\"M\"] for leaf in tree.leaves) - width_vector[tree.depth-1]\n \n for stage in range(1, last_stage):\n # 1. Extend and fill the structure\n tree._extend_tree_by_one_stage(scenario_process)\n for leaf in tree.leaves:\n leaf.data[\"g\"] = variability_process.node_lookback(leaf)\n # 2. Compute the optimal number of child nodes\n if width_vector[stage] == width_vector[stage-1]: \n for leaf in tree.leaves:\n leaf.data[\"M\"] = 1\n else:\n normalization = sum((leaf.data[\"W\"] * leaf.data[\"g\"])**(1/(alpha+1)) for leaf in tree.leaves)\n for leaf in tree.leaves:\n leaf.data[\"m\"] = (width_vector[stage] / normalization) \\\n * (leaf.data[\"W\"] * leaf.data[\"g\"])**(1/(alpha+1))\n leaf.data[\"M\"] = int(max(1, round(leaf.data[\"m\"])))\n\n # 3. Correct the rounding off of the number of child nodes (if necessary) so that the actual width \n # equal the target\n while node_gap(tree) > 0:\n leaf = min([leaf for leaf in tree.leaves if leaf.data[\"M\"] >= 2], \n key = lambda leaf: abs(leaf.data[\"m\"] - (leaf.data[\"M\"] - 1)))\n leaf.data[\"M\"] = leaf.data[\"M\"] - 1 # remove one child\n \n while node_gap(tree) < 0:\n leaf = min(tree.leaves, \n key = lambda leaf: abs(leaf.data[\"m\"] - (leaf.data[\"M\"] + 1)))\n leaf.data[\"M\"] = leaf.data[\"M\"] + 1 # add one child\n \n # extend and fill the last stage\n tree._extend_tree_by_one_stage(scenario_process)\n # delete temporary data\n tree.delete_data([\"M\", \"m\", \"g\"])\n assert tree.width == list(width_vector), (\"Missmatch between the actual tree width and the target one \"\n f\"actual width: {tree.width}, target width: {list(width_vector)}\")\n return tree", "title": "" }, { "docid": "818b0b0eeed4ac3c46ae926c1a7102dc", "score": "0.4450179", "text": "def sub_gr_forest(self, nodes, weights):\n return self._sub_forest(nodes, weights, 1.0, [tree.sub_gr_tree for tree in self._trees])", "title": "" }, { "docid": "167b2615cf7c8bd6a8e2f072ad013bb7", "score": "0.44391316", "text": "def __init__(self, trees):\r\n self.trees = trees\r\n self.scenarios = []", "title": "" }, { "docid": "1a0d78eb2bb20dcb17fc6e47d5f93ab9", "score": "0.44364217", "text": "def division_random_spanning_tree(graph, division_tuples=[(\"COUNTYFP10\", 1)]):\n weights = {edge:1 for edge in graph.edges}\n for edge in graph.edges:\n for (division_col, penalty) in division_tuples:\n if graph.nodes[edge[0]][division_col] != graph.nodes[edge[1]][division_col]:\n weights[edge] += penalty\n graph.edges[edge][\"weight\"] = weights[edge] + random.random()\n\n spanning_tree = tree.minimum_spanning_tree(\n graph, algorithm=\"kruskal\", weight=\"weight\"\n )\n return spanning_tree", "title": "" }, { "docid": "478cb7d50ed225df03b851893383c890", "score": "0.44316638", "text": "def grow_tree(self):\n # Making a df from the data \n df = self.X.copy()\n df['Y'] = self.Y\n #print(self.depth, self.max_depth)\n #print(self.n, self.min_samples_split)\n # If there is GINI to be gained, we split further \n if (self.depth < self.max_depth) and (self.n >= self.min_samples_split):\n\n # Getting the best split \n best_feature, best_value = self.best_split()\n #print(best_feature, best_value)\n if best_feature is not None:\n # Saving the best split to the current node \n self.best_feature = best_feature\n self.best_value = best_value\n\n # Getting the left and right nodes\n left_df, right_df = df[df[best_feature]<=best_value].copy(), df[df[best_feature]>best_value].copy()\n\n # Creating the left and right nodes\n left = Node(\n left_df['Y'].values.tolist(), \n left_df[self.features], \n depth=self.depth + 1, \n max_depth=self.max_depth, \n min_samples_split=self.min_samples_split, \n node_type='left_node',\n rule=f\"{best_feature} <= {round(best_value, 3)}\",\n category_num=self.category_num\n )\n\n self.left = left \n self.left.grow_tree()\n\n right = Node(\n right_df['Y'].values.tolist(), \n right_df[self.features], \n depth=self.depth + 1, \n max_depth=self.max_depth, \n min_samples_split=self.min_samples_split,\n node_type='right_node',\n rule=f\"{best_feature} > {round(best_value, 3)}\",\n category_num=self.category_num\n )\n\n self.right = right\n self.right.grow_tree()", "title": "" }, { "docid": "350a14d559d541ffdb1999256b697067", "score": "0.44269994", "text": "def simulate_tree_depth(general_settings, tree_depth_settings, model):\n # MSE + Variance + Bias + Error = 4\n size_mse_decomp = 4\n # Create an array that describes minimal leaf sizes.\n # As we want to start from high to low, we turn the array around with\n # [::-1].\n # We add the step size again to the maximal value as we want it to be\n # included.\n min_split_array = (\n np.arange(\n tree_depth_settings['min_split'],\n tree_depth_settings['max_split'] +\n tree_depth_settings[\"steps_split\"],\n tree_depth_settings[\"steps_split\"]\n )[::-1]\n )\n\n # Create arrays to save the MSE, Bias, Variance + Noise for each split\n # specification.\n output_array_bagging = (\n np.ones((min_split_array.size, size_mse_decomp)) * np.nan\n )\n output_array_tree = (\n np.ones((min_split_array.size, size_mse_decomp)) * np.nan\n )\n\n # Create a MonteCarloSimulation instance that defines the attributes For\n # the data generating process and will be constant for the tree and\n # bagging simulation.\n simulation_basis = (\n MonteCarloSimulation(\n n_repeat=general_settings['n_repeat'],\n noise=general_settings['noise'],\n data_process=model,\n n_test_train=general_settings['n_test_train'],\n random_seeds=general_settings['random_seeds']\n )\n )\n # We simulate the MSE for Bagging and Trees for the different splits, while\n # keeping the data generating process constant.\n for index, split in enumerate(min_split_array):\n output_bagging = (\n simulation_basis.calc_mse(\n ratio=general_settings['bagging_ratio'],\n bootstrap=True,\n min_split_tree=split,\n b_iterations=general_settings[\"b_iterations\"]\n )\n )\n # Note: Subagging(bootstrap=False) with ratio = 1 -> Tree\n output_tree = (\n simulation_basis.calc_mse(\n ratio=general_settings['bagging_ratio'],\n bootstrap=False,\n min_split_tree=split,\n b_iterations=general_settings[\"b_iterations\"]\n )\n )\n\n output_array_bagging[index, :] = output_bagging\n output_array_tree[index, :] = output_tree\n\n return output_array_bagging, output_array_tree", "title": "" }, { "docid": "e82ee075067b0e518060df87525ea900", "score": "0.44201848", "text": "def create_default_tree() -> SkillDecisionTree:\n # TODO: Return a SkillDecisionTree that matches the one in a2.pdf.\n\n t2 = SkillDecisionTree(MageAttack(), checker2, 3,\n [SkillDecisionTree(RogueSpecial(),\n checker3,\n 4,\n [SkillDecisionTree\n (RogueAttack(),\n general, 6)])])\n t3 = SkillDecisionTree\\\n (MageSpecial(), checker4, 2,\n [SkillDecisionTree(RogueAttack(),\n general, 8)])\n t4 = SkillDecisionTree(\n RogueAttack(), checker5, 1,\n [SkillDecisionTree(RogueSpecial(),\n general, 7)])\n\n t1 = SkillDecisionTree(MageAttack(), checker1, 5, [t2, t3, t4])\n return t1", "title": "" }, { "docid": "c38e43bc4953939a9850e4bf83b4c923", "score": "0.44186708", "text": "def grow_tree(self, node, X, y, depth):\n\n if isinstance(self, RegressionTree):\n node.mean_dist = np.mean(y)\n\n else:\n node.mean_dist = common = stats.mode(y)[0][0]\n if y.size < 2:\n return node\n if isinstance(self, ClassificationTree) and self.test_purity(y):\n return node\n\n data_left, data_right = self.split(X, y, node)\n\n if self.not_worth_splitting(data_left, data_right, depth):\n return node\n\n left = DecisionNode()\n right = DecisionNode()\n node.left = self.grow_tree(left, data_left[0], data_left[1], depth + 1)\n node.right = self.grow_tree(right, data_right[0], data_right[1], depth + 1)\n\n return node", "title": "" }, { "docid": "d3e2257a139e81dbb4f9ad2d3b48c585", "score": "0.43899414", "text": "def average_score(self):\n return self.score / self.instances", "title": "" }, { "docid": "d3e2257a139e81dbb4f9ad2d3b48c585", "score": "0.43899414", "text": "def average_score(self):\n return self.score / self.instances", "title": "" }, { "docid": "8f4022b2b2a0e04860712fe14c46df29", "score": "0.43862313", "text": "def build_tree(data, current_depth=0, max_depth=1e10):\n if len(data) == 0:\n return DecisionNode(is_leaf=True)\n\n if(current_depth == max_depth):\n return DecisionNode(current_results=dict_of_values(data))\n\n if(len(dict_of_values(data)) == 1):\n return DecisionNode(current_results=dict_of_values(data), is_leaf=True)\n\n #This calculates gini number for the data before dividing \n self_gini = gini_impurity(data, [])\n\n #Below are the attributes of the best division that you need to find. \n #You need to update these when you find a division which is better\n best_gini = 1e10\n best_column = None\n best_value = None\n #best_split is tuple (data1,data2) which shows the two datas for the best divison so far\n best_split = None\n\n #You need to find the best feature to divide the data\n #For each feature and each possible value of the feature compute the \n # gini number for that division. You need to find the feature that minimizes\n # gini number. Remember that last column of data is Y\n # Think how you can use the divide_data and gini_impurity functions you wrote \n # above\n\n for i in range(len(data[0]) -1):\n results = defaultdict(int)\n for row in data:\n r = row[i]\n results[r] += 1\n unique_val = dict(results)\n unique_val = unique_val.keys()\n\n for values in unique_val:\n t1, t2 = divide_data(data, i, values)\n g = gini_impurity(t1, t2)\n if g < best_gini:\n best_gini = g\n best_column = i\n best_value = values\n best_split = [t1, t2]\n\n\n if abs(self_gini - best_gini) < 1e-10:\n return DecisionNode(current_results=dict_of_values(data), is_leaf=True)\n else:\n current_depth = current_depth + 1\n d1 =build_tree(best_split[0], current_depth, max_depth = max_depth)\n d2 =build_tree(best_split[1], current_depth, max_depth = max_depth)\n return DecisionNode(column=best_column, value=best_value,\n false_branch=d2, true_branch=d1,\n current_results=dict_of_values(data),\n is_leaf=False,\n results=None)", "title": "" }, { "docid": "cf92c2e327542e01dedf3eaeb3723966", "score": "0.43749413", "text": "def map_as_tree(discussions: list, with_tqdm=True) -> None:\n loop_wrap = tqdm if with_tqdm else lambda x: x\n METADATA[votes] = {}\n METADATA[parents] = {}\n METADATA[children] = {}\n\n discussions = useful_discussions_from(discussions)\n\n for discussion in loop_wrap(discussions):\n claims = discussion['arguments']['claims']\n claims[:] = [merge_location_data(claim, discussion) for claim in claims]\n claims[:] = useful_claims_from(claims)\n \n for claim in claims:\n claim['votes'] = votes_for(claim, discussion['votes'])\n \n \n thesis = next(claim for claim in claims if is_thesis(claim['id']))\n total_votes = discussion['statistics']['voteCount']\n set_tree_metadata(thesis, total_votes)\n \n claims[:] = [claim for claim in claims if 'level' in claim]\n max_tree_level = max([claim['level'] for claim in claims])\n for claim in claims:\n if claim['level'] > 0:\n claim['weight'] = claim['avg_impact'] + (max_tree_level / claim['level'])\n \n discussion['thesis_robustness'] = traverse_robustness(thesis, discussion, assign_values=True)\n discussion['avg_veracity'] = sum(thesis['votes'].values()) / 5\n \n METADATA[votes][discussion['id']] = discussion['votes']\n del discussion['votes']", "title": "" }, { "docid": "ac5d8c5a3a196f7e85faa506b4c5293a", "score": "0.43700767", "text": "def createTree(dataSet,min_sample_split,n_features):\n\n feaLen = len(dataSet[0])-1\n dataLen = len(dataSet)\n dataGini = giniCnt(dataSet)\n bestGain = 0.0\n bestFea = None\n bestValue = None\n \n treeSummary = {'impurity': '%.3f' % dataGini, 'samples': '%d' % dataLen}\n if len(dataSet)<min_sample_split:\n return Tree(results = vote(dataSet), summary = treeSummary, data = dataSet)\n \n # Randomly select candidate features\n feaList = []\n while len(feaList)<feaLen:\n index = randrange(feaLen)\n if index not in feaList:\n feaList.append(index)\n \n # Select the best feature among the candidate features\n for i in feaList:\n feaSet = set([data[i] for data in dataSet])\n for feaType in feaSet:\n left, right = split_data(dataSet,i,feaType)\n prob = len(left)/dataLen\n gain = dataGini-prob*giniCnt(left)-(1-prob)*giniCnt(right)\n if gain > bestGain:\n bestGain = gain\n bestFea = i\n bestValue = feaType\n bestData = (left, right)\n \n if bestGain > 0:\n \n trueBranch = createTree(bestData[0],min_sample_split,n_features)\n falseBranch = createTree(bestData[1],min_sample_split,n_features)\n return Tree(feaValue = bestValue, \\\n trueBranch = trueBranch,\\\n falseBranch = falseBranch,\\\n feaIndex = bestFea,\\\n summary = treeSummary)\n else:\n return Tree(results = vote(dataSet), summary = treeSummary, data = dataSet)", "title": "" }, { "docid": "9f593d7399f1ed36ebe15dc356868dea", "score": "0.4363409", "text": "def get_team_averages(session,team):\n \n # Get league and team info and grab current_week information\n league_df, team_df = get_league_info(session)\n current_week = league_df.loc['current_week','Value']\n \n # Grab team_key and team_name\n team_key = match_team_keys(team_df, [team])[0]\n team_name = team_df.columns.values[int(team_key[-1])-1]\n \n # Get standings information\n standings_df = get_league_standings(session)\n \n # Create list of weeks\n weeks = range(1,current_week)\n\n # Get matchup information\n matchup_df = get_league_matchup(session,team,weeks)\n\n # Predefine stat_dict\n stat_dict = {'H/AB*': [],\n 'R': [],\n 'HR': [],\n 'RBI': [],\n 'SB': [],\n 'AVG': [],\n 'IP*': [],\n 'W': [],\n 'SV': [],\n 'K': [],\n 'ERA': [],\n 'WHIP': [],\n 'Points':[]}\n stat_key = list(stat_dict.keys())\n\n # Preallocate win counter\n wins_per_category = [0]*len(stat_key)\n\n # Loop through each weeks results and record stats. \n # Count the number of wins per category\n for i in range(0,len(matchup_df)):\n for j in range(0,len(stat_key)):\n team_stat = matchup_df[i][stat_key[j]].iloc[0]\n if '*' in stat_key[j]:\n wins_per_category[j] = '-'\n else:\n opponent_stat = matchup_df[i][stat_key[j]].iloc[1]\n if team_stat > opponent_stat:\n wins_per_category[j] += 1\n stat_dict[stat_key[j]].append(matchup_df[i][stat_key[j]].iloc[0]) \n\n # Loop through each stat and assign an average and win percentage\n for i in range(0,len(stat_key)):\n if 'H/AB*' in stat_key[i]:\n total_avg = np.average(stat_dict['AVG'])\n total_at_bats = [int(x.split('/')[1]) for x in stat_dict[stat_key[i]]]\n average_at_bats = np.average(total_at_bats)\n average_hits = int(total_avg*average_at_bats)\n stat_dict[stat_key[i]].append(str(average_hits)+'/'+str(int(average_at_bats)))\n stat_dict[stat_key[i]].append(wins_per_category[i])\n continue\n elif '*' in stat_key[i]:\n stat_dict[stat_key[i]].append(np.average(stat_dict[stat_key[i]]))\n stat_dict[stat_key[i]].append(wins_per_category[i])\n continue\n elif 'Points' in stat_key[i]:\n stat_dict[stat_key[i]].append(np.average(stat_dict[stat_key[i]]))\n stat_dict[stat_key[i]].append(standings_df.loc[team_name,'win_rate'])\n continue\n stat_dict[stat_key[i]].append(np.average(stat_dict[stat_key[i]]))\n stat_dict[stat_key[i]].append(wins_per_category[i]/float(len(weeks)))\n\n # Create row names from weeks\n indeces = []\n for i in weeks:\n indeces.append('Week '+str(i))\n \n # Add Average and Win %\n indeces.append('Average')\n indeces.append('Win %')\n stat_dict['Week'] = indeces\n stat_key.append('Week')\n\n # Combine dictionary into dataframe\n average_df = pd.DataFrame(stat_dict,columns=stat_key)\n average_df = average_df.set_index('Week')\n \n return average_df", "title": "" }, { "docid": "90c6698a33fe1d408d7c88321ad930f1", "score": "0.4361905", "text": "def get_forest_distr(self, instance):\n # combine the distributions predicted by each tree\n # use a simple average to combine distributions\n # TODO: allow other combination options of distributions\n distr = [0 for _ in range(self.numclasses)] # create the distribution container\n tot_trees = len(self.trees)\n for tree in self.trees:\n tree_distr = tree.get_instance_distr(instance)\n for i in range(len(distr)):\n distr[i] += tree_distr[i]\n return [prob/tot_trees for prob in distr] # this gives avg prob dist for trees", "title": "" }, { "docid": "a7e89c53a035b44bca8ab655def988e2", "score": "0.43618605", "text": "def scenario(scale):\n # This is the only variable that depends on scale\n MarginalCostScenario = MarginalCostPT * scale\n MarginalCostPT_scaled = MarginalCostScenario / 10\n\n # The rest of the model is the same for all scenarios\n ASC_CAR = Beta('ASC_CAR', 0, None, None, 0)\n ASC_PT = Beta('ASC_PT', 0, None, None, 1)\n ASC_SM = Beta('ASC_SM', 0, None, None, 0)\n BETA_TIME_FULLTIME = Beta('BETA_TIME_FULLTIME', 0, None, None, 0)\n BETA_TIME_OTHER = Beta('BETA_TIME_OTHER', 0, None, None, 0)\n BETA_DIST_MALE = Beta('BETA_DIST_MALE', 0, None, None, 0)\n BETA_DIST_FEMALE = Beta('BETA_DIST_FEMALE', 0, None, None, 0)\n BETA_DIST_UNREPORTED = Beta('BETA_DIST_UNREPORTED', 0, None, None, 0)\n BETA_COST = Beta('BETA_COST', 0, None, None, 0)\n # Utility functions\n V_PT = (\n ASC_PT\n + BETA_TIME_FULLTIME * TimePT_scaled * fulltime\n + BETA_TIME_OTHER * TimePT_scaled * notfulltime\n + BETA_COST * MarginalCostPT_scaled\n )\n V_CAR = (\n ASC_CAR\n + BETA_TIME_FULLTIME * TimeCar_scaled * fulltime\n + BETA_TIME_OTHER * TimeCar_scaled * notfulltime\n + BETA_COST * CostCarCHF_scaled\n )\n V_SM = (\n ASC_SM\n + BETA_DIST_MALE * distance_km_scaled * male\n + BETA_DIST_FEMALE * distance_km_scaled * female\n + BETA_DIST_UNREPORTED * distance_km_scaled * unreportedGender\n )\n V = {0: V_PT, 1: V_CAR, 2: V_SM}\n MU_NOCAR = Beta('MU_NOCAR', 1.0, 1.0, None, 0)\n CAR_NEST = 1.0, [1]\n NO_CAR_NEST = MU_NOCAR, [0, 2]\n nests = CAR_NEST, NO_CAR_NEST\n prob_pt = models.nested(V, None, nests, 0)\n simulate = {\n 'weight': normalizedWeight,\n 'Revenue public transportation': prob_pt * MarginalCostScenario,\n }\n\n biogeme = bio.BIOGEME(database, simulate)\n biogeme.modelName = '02nestedPlot'\n\n # Read the estimation results from the file\n try:\n results = res.bioResults(pickleFile='01nestedEstimation.pickle')\n except FileNotFoundError:\n sys.exit(\n 'Run first the script 01nestedEstimation.py in order to generate '\n 'the file 01nestedEstimation.pickle.'\n )\n # Simulation\n simulatedValues = biogeme.simulate(results.getBetaValues())\n\n # We calculate the sum for all individuals of the generated revenues.\n revenues_pt = (\n simulatedValues['Revenue public transportation']\n * simulatedValues['weight']\n ).sum()\n return revenues_pt", "title": "" }, { "docid": "1f69c50003d7786d0df953330a142d3d", "score": "0.43616834", "text": "def test_weighted_average(self):\n self.assertEqual(16, weighted_average(self.test_activities, feature='average_speed', weight_feature='distance'))", "title": "" }, { "docid": "2ecc2ec6961ca1121990f565579dfdcb", "score": "0.4360936", "text": "def calculate_mean_ranking(experiment):\n ranking = calculate_ranking(experiment)\n return ranking.groupby(['Classifier', 'Metric'], as_index=False).mean()", "title": "" }, { "docid": "330954b91b295370e38b2c7db9fc3ad8", "score": "0.43604696", "text": "def product(tree1: ScenarioTree, tree2: ScenarioTree):\n assert tree1.depth == 2 and tree2.depth == 2, \"Scenario trees should be two-stage\"\n new_tree = from_bushiness([tree1.width[-1] * tree2.width[-1]])\n new_tree.data[\"W\"] = 1\n for k, child in enumerate(new_tree.children):\n k2, k1 = k // tree1.width[-1], k % tree1.width[-1]\n data1 = tree1.node_at_address((k1,)).data\n data2 = tree2.node_at_address((k2,)).data\n child.data[\"W\"] = data1[\"W\"] * data2[\"W\"]\n child.data[\"scenario\"] = {**data2[\"scenario\"], **data1[\"scenario\"]}\n return new_tree", "title": "" }, { "docid": "109749e12eda609f588b776e16f23aeb", "score": "0.4359431", "text": "def get_average(dataframe):\n ProcessedData = dataframe.groupBy('School Unit Name', 'Gender').agg(\n format_number(mean(\"Elementary School Cases\"), 2).alias(\"ElementarySchoolAverage\"),\n format_number(mean(\"Middle School Cases\"), 2).alias(\"MiddleSchoolAverage\"),\n format_number(mean(\"High School Cases\"), 2).alias(\"HighSchoolAverage\"))\n ProcessedData = ProcessedData.withColumnRenamed(\"School Unit Name\", \"SchoolUnitName\")\n ProcessedData = ProcessedData.fillna(0)\n ProcessedData = ProcessedData.na.drop(subset=['Gender'])\n return ProcessedData", "title": "" }, { "docid": "753fde2e0db114c4e7a8e0a9cd709482", "score": "0.43547153", "text": "def codisp_average(self, forest: list) -> pd.Series:\n\n # Compute average CoDisp\n avg_codisp = pd.Series(0.0, index=np.arange(self.number_of_rows))\n index = np.zeros(self.number_of_rows)\n for tree in forest:\n codisp = pd.Series({leaf: tree.codisp(leaf)\n for leaf in tree.leaves})\n avg_codisp[codisp.index] += codisp\n np.add.at(index, codisp.index.values, 1)\n avg_codisp /= index\n return avg_codisp", "title": "" }, { "docid": "7cf84ca51642eb59356391e48afe0bae", "score": "0.434813", "text": "def partial_average_weights(base_model_list):\n w = {}\n\n for one in base_model_list:\n w[one] = base_model_list[one].state_dict()\n\n keys_list = {}\n key_set = set()\n tep_dict = {}\n for one in w:\n keys_list[one] = w[one].keys()\n key_set = set.union(key_set, list(w[one].keys()))\n\n for i in key_set:\n tep_dict[i] = []\n\n for i in key_set:\n for j in w:\n if i in w[j]:\n tep_dict[i].append(w[j][i])\n\n for i in tep_dict:\n tep_tep_dict = {}\n for tensor in tep_dict[i]:\n if tensor.shape not in tep_tep_dict:\n tep_tep_dict[tensor.shape] = [tensor.float()]\n else:\n tep_tep_dict[tensor.shape].append(tensor.float())\n for shape in tep_tep_dict:\n tep_tep_dict[shape] = torch.mean(torch.stack(tep_tep_dict[shape]),0)\n tep_dict[i] = tep_tep_dict\n\n for user in w:\n for key in w[user]:\n shape = w[user][key].shape\n w[user][key] = tep_dict[key][shape]\n\n for user in base_model_list:\n base_model_list[one].load_state_dict(w[user])\n\n return base_model_list", "title": "" }, { "docid": "2c9f6c144e159bfb7996877e18edbacd", "score": "0.43412182", "text": "def augment_tree(treedata, base, is_gal=False):\n \n dtype_new_quantities = [('np', '<i4'), ('id', '<i4'), ('m', '<f4'), ('mvir', '<f4'),\n ('r', '<f4'), ('rvir', '<f4'), ('tvir', '<f4'), ('cvel', '<f4'),\n ('x', '<f4'), ('y', '<f4'), ('z', '<f4'),\n ('vx', '<f4'), ('vy', '<f4'), ('vz', '<f4'),\n ('ax', '<f4'), ('ay', '<f4'), ('az', '<f4'),\n ('sp', '<f4')]\n if is_gal:\n [dtype_new_quantities.append(i) for i in [('sig', '<f4'), ('sigbulge', '<f4'), ('mbulge', '<f4')]]\n \n New_arr = np.zeros(len(treedata), dtype=dtype_new_quantities)\n import tree.halomodule as hmo\n for nout in np.unique(treedata['nout']):\n # nout and Orig_halo_id are required.\n try:\n gal_org = hmo.Halo(base=base, nout=nout, halofinder='HM', load=True, is_gal=is_gal)\n except:\n print(\"Can't load halo catalog\\n Original data is not modified.\")\n return treedata\n \n # Before we start, remove unnecessary coulmns\n dtype_names = [field[0] for field in dtype_new_quantities]\n gal_org = gal_org.data[dtype_names]\n \n ind_tree_this_nout = np.where(treedata['nout'] == nout)[0]\n ok_gals = treedata['Orig_halo_id'][ind_tree_this_nout]\n \n # Galaxies are from a snapshot. Galaxy ID list must be a unique set.\n assert len(ok_gals) == len(np.unique(ok_gals))\n \n ind_org_gals = [np.where(gal_org['id'] == gal)[0] for gal in ok_gals]\n \n for i, ind in enumerate(ind_org_gals):\n assert sum(New_arr[ind_tree_this_nout[i]]) == 0. # array must be empty\n New_arr[ind_tree_this_nout[i]] = gal_org[ind].copy()\n \n # Drop duplicate fields\n #[\"id\", \"mvir\", \"rvir\", \"x\", \"y\", \"z\", \"vx\", \"vy\", \"vz\"]\n keep_fields = [\"np\", \"r\", \"tvir\", \"cvel\"]\n if is_gal:\n [keep_fields.append(i) for i in ['sig', 'sigbulge', 'mbulge', \"m\"]]\n else:\n keep_fields.append(\"m\") \n # in case of DM halo, mvir is already passed to Ctrees when building it.\n \n \n return join_struct_arrays([treedata, New_arr[keep_fields]])", "title": "" }, { "docid": "a7b0e848d66ece7fda2b9b1456d26634", "score": "0.4338166", "text": "def _initial_state_mean(self):\n alpha = self.alpha_prior_().mean() * np.ones([self.features])\n tau = tf.constant(1., dtype=tf.float64)\n sigma = tf.math.sqrt(tau)\n tau = tf.reshape(tau, [1])\n one_over_sqrt_alpha = self.convert_alpha(alpha)\n w = self.w_prior_(sigma, one_over_sqrt_alpha).mean()\n return tf.concat([w, tau, alpha],0)", "title": "" }, { "docid": "ec13b2f0839731cea799d0c8e9d351ef", "score": "0.43299946", "text": "def getAverage(args, mg):\n x = []\n ex = []\n y = []\n ey = []\n title = \"Average of \"\n graphs = mg.GetListOfGraphs()\n graphs.ls()\n ngr = graphs.GetSize()\n first = True\n for gr in graphs:\n N = gr.GetN()\n title = title + \" \" + gr.GetTitle()\n for i in range(N):\n if first:\n x.append(gr.GetX()[i])\n ex.append(0.0)\n y.append(gr.GetY()[i]/ngr)\n ey.append(pow(gr.GetEY()[i], 2))\n else:\n y[i] = y[i] + gr.GetY()[i]/ngr\n ey[i] = ey[i] + pow(gr.GetEY()[i], 2)/ngr\n first = False\n ey = map(sqrt, ey)\n\n gr = ROOT.TGraphErrors(len(x), array('f', x), array('f', y), array('f', ex), array('f', ey))\n gr.SetNameTitle(\"grAverage\", \"%s;%s;%s\" % (title, args.xtitle, args.ytitle))\n# gr.SetNameTitle(\"grAverage\", \"%s;%s;%s\" % (\"\", args.xtitle, args.ytitle))\n gr.SetMarkerStyle(ROOT.kOpenSquare)\n gr.SetLineStyle(ROOT.kSolid)\n gr.SetMarkerColor(ROOT.kBlack)\n return gr", "title": "" }, { "docid": "fa204e7d6ba14501892a0ac8b3f38010", "score": "0.43279922", "text": "def boosting(features, labels, rounds):\r\n trees = []\r\n tree_weights = []\r\n num_of_sampels = len(labels)\r\n sample_weights = [1.0 / num_of_sampels] * num_of_sampels\r\n\r\n for i in range(rounds):\r\n sampled_features, sampled_labels = sampling_by_weights(features, labels, sample_weights)\r\n tree = dt.build_decision_tree(sampled_features, sampled_labels)\r\n classified_labels = dt.classify_testing_dataset(tree, features)\r\n # calculate the error\r\n classifier_error = sum([sample_weights[i] if classified_labels[i] != labels[i] else 0\\\r\n for i in range(num_of_sampels)]) / sum(sample_weights)\r\n\r\n # print(\"classifier error rate is: %f\" % classifier_error)\r\n # print(min(sample_weights), max(sample_weights))\r\n\r\n # if the error > 50%, start over\r\n if classifier_error > 0.5:\r\n continue\r\n\r\n # calculate the classifier's importance\r\n tree_weight = 1.0 / 2.0 * np.log((1.0 - classifier_error) / classifier_error)\r\n\r\n # update the weights of each record\r\n sample_weights = [sample_weights[i] * np.exp(-classifier_error) \\\r\n if classified_labels[i] == labels[i]\\\r\n else sample_weights[i] * np.exp(classifier_error)\\\r\n for i in range(num_of_sampels)]\r\n sample_weights = sample_weights / sum(sample_weights)\r\n\r\n trees.append(tree)\r\n tree_weights.append(tree_weight)\r\n return trees, tree_weights", "title": "" }, { "docid": "575633836aebde2b77bc9ad43172c6c0", "score": "0.4324237", "text": "def prune_by_flat_tags(self, expr):\n l.ttrace(f\"pruning '{self.name}'\")\n pruned_scenarios: List[Scenario] = []\n for scenario in self.scenarios:\n if expr.evaluate(_flatten_tags(scenario.tags)):\n pruned_scenarios.append(scenario)\n else:\n l.ttrace(f\"pruning scenario with tags {scenario.tags}\")\n scenario.overall_result = STATUS_SKIPPED\n self.results['skipped_scenarios'].append(scenario)\n self.scenarios = pruned_scenarios\n\n pruned_scenario_outlines: List[ScenarioOutline] = []\n for scenario_outline in self.scenario_outlines:\n pruned_scenarios: List[Scenario] = []\n for scenario in scenario_outline.scenarios:\n if expr.evaluate(_flatten_tags(scenario.tags)):\n pruned_scenarios.append(scenario)\n else:\n l.ttrace(\n f\"pruning scenario '{scenario.name}' from examples table '{scenario.examples_name}' with\"\n f\"tags {scenario.tags} in scenario outline '{scenario_outline.raw_name}'\"\n )\n scenario.overall_result = STATUS_SKIPPED\n scenario_outline.results['skipped_scenarios'].append(scenario)\n if len(scenario_outline.scenarios) == len(scenario_outline.results['skipped_scenarios']):\n self.results['skipped_scenario_outlines'].append(scenario_outline)\n scenario_outline.overall_result = STATUS_SKIPPED\n else:\n pruned_scenario_outlines.append(scenario_outline)\n scenario_outline.scenarios = pruned_scenarios\n\n self.scenario_outlines = pruned_scenario_outlines\n\n if len(self.scenarios) == 0 and len(self.scenario_outlines) == 0:\n self.overall_result = STATUS_SKIPPED", "title": "" }, { "docid": "64449d87e4acf090b7e3f0a945e02533", "score": "0.43188074", "text": "def min_forest_mae(leaf_nodes, train_X, val_X, train_y, val_y):\n dict_mae = {}\n for node in leaf_nodes:\n model = DecisionTreeRegressor(max_leaf_nodes=node, random_state=1)\n model.fit(train_X, train_y)\n preds_val = model.predict(val_X)\n mae = mean_absolute_error(val_y, preds_val)\n dict_mae[node] = mae\n node = min(dict_mae, key=dict_mae.get)\n print(f'Best Fit Node modelTree: {node} - MAE: {dict_mae[node]:.0f}')\n return node", "title": "" }, { "docid": "aa8b1553dec63da4948d45d719180237", "score": "0.43089774", "text": "def _avg_subsamples(self, sample_grouping: param_typing.GroupedColumns,\n function: Literal['mean', 'median', 'geometric_mean'] = 'mean',\n new_column_names: Union[Literal['auto'], List[str]] = 'auto'):\n assert isinstance(function, str), \"'function' must be a string!\"\n function = function.lower()\n assert function in {'mean', 'median', 'geometric_mean'}, \\\n \"'function' must be 'mean', 'median', or 'geometric_mean'!\"\n\n averaged_df = pd.DataFrame(index=self.df.index)\n if new_column_names == 'auto':\n new_column_names = []\n for i, group in enumerate(sample_grouping):\n if isinstance(group, str):\n new_column_names.append(group)\n elif validation.isiterable(group):\n new_column_names.append(\",\".join(group))\n else:\n assert validation.isiterable(new_column_names) and validation.isinstanceiter(new_column_names, str), \\\n \"'new_column_names' must be either 'auto' or a list of strings!\"\n assert len(new_column_names) == len(sample_grouping), \\\n f\"The number of new column names {len(new_column_names)} \" \\\n f\"does not match the number of sample groups {len(sample_grouping)}!\"\n\n for group, new_name in zip(sample_grouping, new_column_names):\n if isinstance(group, str):\n assert group in self.columns, f\"Column '{group}' does not exist in the original table!\"\n averaged_df[new_name] = self.df[group].values\n elif isinstance(group, (list, tuple, set)):\n group = parsing.data_to_list(group)\n for item in group:\n assert item in self.columns, f\"Column '{item}' does not exist in the original table!\"\n if function == 'mean':\n averaged_df[new_name] = self.df[group].mean(axis=1).values\n elif function == 'median':\n averaged_df[new_name] = self.df[group].median(axis=1).values\n else:\n averaged_df[new_name] = gmean(self.df[group].values, axis=1)\n else:\n raise TypeError(f\"'sample_list' cannot contain objects of type {type(group)}.\")\n\n return averaged_df", "title": "" }, { "docid": "886318e987340e95f86a7bb53231dd05", "score": "0.43033874", "text": "def avg_value(self):\n return {self._name: np.ravel(self._matches).mean()}", "title": "" }, { "docid": "5c06dd17b4d894d3b008bef0a5a803bd", "score": "0.4299559", "text": "def calculateGain(examples, feature, e):\n children = split(examples, feature)\n total = 0\n\n for c in children:\n labelled_examples = children[c]\n total += (len(labelled_examples) / len(examples)) * getEntropy(labelled_examples)\n\n gain = e - total\n return gain, children", "title": "" }, { "docid": "6bb247e6d20a77b3a2569127e50a5b97", "score": "0.42900342", "text": "def group_link_avg(self, new_cluster, other_cluster):\n nc_num = self.active_clusters[new_cluster]\n oc_num = self.active_clusters[other_cluster]\n if isinstance(other_cluster, basestring):\n ocluster = [(other_cluster)]\n else:\n ocluster = self.cluster_info[other_cluster]\n before_avg = 0\n for y in ocluster:\n for x in self.cluster_info[new_cluster]:\n if self.sim_matrix[y].has_key(x):\n before_avg += self.sim_matrix[y][x]\n else:\n before_avg += self.sim_matrix[x][y]\n avg = before_avg / (nc_num + oc_num)\n return avg", "title": "" }, { "docid": "66c76f374b301279b8eb9855a9eaeeca", "score": "0.42837098", "text": "def decisionTreeLearning(attributeValues,attributes,rootNode,currentNode,nodeLeaves=list(),nodes=1,totalCount=0):\n val = 0\n for value in attributeValues:\n if attributeValues[0][-1] != value[-1]:\n val = 1\n \n \n if val == 0:\n currentNode.data=str(attributeValues[0][-1])\n currentNode.leaf=True\n currentNode.split=\"BASE CASE\"\n nodeLeaves.append(totalCount)\n \n return None,nodeLeaves,nodes\n \n #print(\"ROWS \",rows)\n totalEntropy = entropyCalculation(attributeValues)\n informationGain= informationGainCalculation(attributeValues,attributes,totalEntropy)\n #for i in range(attributes):\n # maximumList = [max(informationGain[i], key = operator.itemgetter(1))]\n \n \n maximumList=[max(informationGain[i],key=operator.itemgetter(1)) for i in range(attributes)]\n if maximumList[0][1] < maximumList[1][1]:\n splitAttribute=1\n splitValue=maximumList[1][0]\n else:\n splitAttribute=0\n splitValue=maximumList[0][0]\n \n #Split by value\n greaterList = list()\n lesserList = list()\n for value in range(len(attributeValues)):\n if attributeValues[value][splitAttribute] <= splitValue:\n lesserList.append(attributeValues[value])\n else:\n greaterList.append(attributeValues[value])\n \n currentNode.split=(splitAttribute+1,splitValue)\n \n for childValue in [lesserList,greaterList]:\n childnodeTree=Tree()\n if childValue == greaterList:\n childnodeTree.classCounter=countClasses(greaterList)\n currentNode.greater=childnodeTree\n \n else:\n childnodeTree.classCounter=countClasses(lesserList)\n currentNode.lesser=childnodeTree\n \n _,leaves,nodes=decisionTreeLearning(childValue,attributes,attributeValues,childnodeTree,nodeLeaves,nodes+1,totalCount+1)\n \n return currentNode,nodeLeaves,nodes", "title": "" }, { "docid": "802b0b7e28ae4ea7375e96cda6704b62", "score": "0.42821705", "text": "def __build_tree__(self, features, classes, depth=0):\n \n # TODO: finish this.\n m=np.size(features,axis=0)\n n=np.size(features,axis=1)\n c=Counter(classes)\n if len(c)==1:\n return DecisionNode(None,None,None,classes[0])\n if depth==self.depth_limit:\n if (c[0]>c[1]):\n return DecisionNode(None,None,None,0)\n else:\n return DecisionNode(None,None,None,1)\n # try to split the features using median\n # 0 is vertical axis, 1 is horizontal axis \n \n mean=np.mean(features,axis=0)\n left=[]\n right=[]\n max_gain=0\n max_gain_index=0\n for i in range(n):\n left_class=classes[features[:,i]<mean[i]]\n right_class=classes[features[:,i]>=mean[i]]\n if np.size(left_class)!=0 and np.size(right_class)!=0:\n gain=gini_gain(classes,[left_class.tolist(),right_class.tolist()])\n else:\n gain=0 \n if max_gain<gain:\n max_gain=gain\n max_gain_index=i\n split_feature=features[:,max_gain_index]\n # you should never compare your feature with the max gini gain\n left_features=features[split_feature<mean[max_gain_index]]\n right_features=features[split_feature>=mean[max_gain_index]]\n left_classes=classes[split_feature<mean[max_gain_index]]\n right_classes=classes[split_feature>=mean[max_gain_index]]\n if np.size(left_classes)==0 or np.size(right_classes)==0:\n if (c[0]>c[1]):\n return DecisionNode(None,None,None,0)\n else:\n return DecisionNode(None,None,None,1)\n left=self.__build_tree__(left_features,left_classes,depth+1)\n right=self.__build_tree__(right_features,right_classes,depth+1)\n return DecisionNode(left,right,lambda features:features[max_gain_index]<mean[max_gain_index])\n \"\"\"\n maxf=np.amax(features,axis=0)\n minf=np.amin(features,axis=0)\n max_gain=0\n max_gain_index=0\n max_gain_attr=0\n max_gain_attr_k=0\n max_gain_k=0\n for i in range(n):\n diff=(maxf[i]-minf[i])/10\n for k in range(1,10):\n left_class=classes[features[:,i]<(minf[i]+diff*k)]\n right_class=classes[features[:,i]>=(minf[i]+diff*k)]\n if np.size(left_class)!=0 and np.size(right_class)!=0:\n gain=gini_gain(classes,[left_class.tolist(),right_class.tolist()])\n else:\n gain=0 \n if max_gain_attr<gain:\n max_gain_attr=gain\n max_gain_attr_k=k\n if max_gain<max_gain_attr:\n max_gain=max_gain_attr\n max_gain_index=i\n max_gain_k=max_gain_attr_k\n split_feature=features[:,max_gain_index]\n diff=(maxf[max_gain_index]-minf[max_gain_index])/10\n # you should never compare your feature with the max gini gain\n left_features=features[split_feature<(minf[max_gain_index]+diff*max_gain_k)]\n right_features=features[split_feature>=(minf[max_gain_index]+diff*max_gain_k)]\n left_classes=classes[split_feature<(minf[max_gain_index]+diff*max_gain_k)]\n right_classes=classes[split_feature>=(minf[max_gain_index]+diff*max_gain_k)]\n left=self.__build_tree__(left_features,left_classes,depth+1)\n right=self.__build_tree__(right_features,right_classes,depth+1)\n return DecisionNode(left,right,lambda features:features[max_gain_index]<(minf[max_gain_index]+diff*max_gain_k))\n \"\"\"", "title": "" }, { "docid": "45300b9a3164cd61d7c6829d3c612ded", "score": "0.42688072", "text": "def transform_forest_fires(self):\n # We'll make a deep copy of our data set\n temp_df = pd.DataFrame.copy(self.data, deep=True)\n\n # Set attributes for ETL object\n self.transformed_data = temp_df\n\n # Feature name/type\n self.feature_names = {feature_name: 'numerical' for feature_name in temp_df.keys()[:-1]}\n self.feature_names.update({'month': 'categorical', 'day': 'categorical'})\n\n # Squared Average Target for percent_threshold\n self.squared_average_target = temp_df.iloc[:, -1].mean() ** 2", "title": "" }, { "docid": "4b891aaecf2620cc9511f6282d7305b3", "score": "0.42645758", "text": "def apply_scenarios(scenarios, test):\n for scenario in scenarios:\n yield apply_scenario(scenario, test)", "title": "" }, { "docid": "c61e38756b1785a3876862029e32088b", "score": "0.42629874", "text": "def calcaverage(original_document):\n document = copy.deepcopy(original_document)\n\n document[\"average\"] = 1.0 * document[\"total\"] / document[\"count\"]\n\n return document", "title": "" }, { "docid": "d18e2a265435583e71856795cc803af2", "score": "0.42607558", "text": "def get_average(self):\n\t\treturn self.goals / self.games", "title": "" }, { "docid": "c2f72c983dc5ca8d46ec09a8a9e21d9c", "score": "0.42532307", "text": "def test_AVL_comprehensive(self):\n\n # visualize some of test in this testcase with https://www.cs.usfca.edu/~galles/visualization/AVLtree.html\n # ensure empty tree is properly handled\n\n def check_node_properties(current: Node, value: int = 0, height: int = 0, balance: int = 0):\n if value is None:\n self.assertIsNone(current)\n return\n self.assertEqual(value, current.value)\n self.assertEqual(height, current.height)\n self.assertEqual(balance, avl.balance_factor(current))\n\n avl = AVLTree()\n avl.insert(avl.origin, 5)\n avl.insert(avl.origin, 1)\n avl.insert(avl.origin, 10)\n self.assertEqual(3, avl.size)\n self.assertEqual(1, avl.min(avl.origin).value)\n self.assertEqual(10, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=5, height=1, balance=0)\n check_node_properties(avl.origin.left, value=1, height=0, balance=0)\n check_node_properties(avl.origin.right, value=10, height=0, balance=0)\n \"\"\"\n Current AVL tree:\n 5\n / \\\n 1 10\n After Removing 5:\n 1\n \\\n 10\n \"\"\"\n avl.remove(avl.origin, 5)\n self.assertEqual(2, avl.size)\n self.assertEqual(1, avl.min(avl.origin).value)\n self.assertEqual(10, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=1, height=1, balance=-1)\n check_node_properties(avl.origin.left, value=None)\n check_node_properties(avl.origin.right, value=10, height=0, balance=0)\n \"\"\"\n Current AVL tree:\n 1\n \\\n 10\n After inserting 0, 20:\n 1\n / \\\n 0 10\n \\\n 20\n \"\"\"\n avl.insert(avl.origin, 0)\n avl.insert(avl.origin, 20)\n self.assertEqual(4, avl.size)\n self.assertEqual(0, avl.min(avl.origin).value)\n self.assertEqual(20, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=1, height=2, balance=-1)\n check_node_properties(avl.origin.left, value=0, height=0, balance=0)\n check_node_properties(avl.origin.right, value=10, height=1, balance=-1)\n check_node_properties(avl.origin.right.right, value=20, height=0, balance=0)\n\n \"\"\"\n Current AVL tree:\n 1\n / \\\n 0 10\n \\\n 20\n After removing 20, inserting -20 and inserting 5\n 1\n / \\\n 0 10\n / /\n -20 5\n \"\"\"\n avl.remove(avl.origin, 20)\n avl.insert(avl.origin, -20)\n avl.insert(avl.origin, 5)\n self.assertEqual(5, avl.size)\n self.assertEqual(-20, avl.min(avl.origin).value)\n self.assertEqual(10, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=1, height=2, balance=0)\n check_node_properties(avl.origin.left, value=0, height=1, balance=1)\n check_node_properties(avl.origin.left.left, value=-20, height=0, balance=0)\n check_node_properties(avl.origin.right, value=10, height=1, balance=1)\n check_node_properties(avl.origin.right.left, value=5, height=0, balance=0)\n\n \"\"\"\n Second part, inserting and removing with rotation\n\n inserting 5, 10:\n 5\n \\\n 10\n \"\"\"\n avl = AVLTree()\n avl.insert(avl.origin, 5)\n avl.insert(avl.origin, 10)\n self.assertEqual(2, avl.size)\n self.assertEqual(5, avl.min(avl.origin).value)\n self.assertEqual(10, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=5, height=1, balance=-1)\n check_node_properties(avl.origin.right, value=10, height=0, balance=0)\n \"\"\"\n Current AVL tree:\n 5\n \\\n 10\n After inserting 8, 9, 12\n 8\n / \\\n 5 10\n / \\\n 9 12\n \"\"\"\n avl.insert(avl.origin, 8)\n avl.insert(avl.origin, 9)\n avl.insert(avl.origin, 12)\n self.assertEqual(5, avl.size)\n self.assertEqual(5, avl.min(avl.origin).value)\n self.assertEqual(12, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=8, height=2, balance=-1)\n check_node_properties(avl.origin.right, value=10, height=1, balance=0)\n check_node_properties(avl.origin.right.left, value=9, height=0, balance=0)\n check_node_properties(avl.origin.right.right, value=12, height=0, balance=0)\n check_node_properties(avl.origin.left, value=5, height=0, balance=0)\n\n \"\"\"\n Current AVL tree:\n 8\n / \\\n 5 10\n / \\\n 9 12\n After inserting 3, 1, 2\n 8\n / \\\n 3 10\n / \\ / \\\n 1 5 9 12\n \\\n 2\n \"\"\"\n avl.insert(avl.origin, 3)\n avl.insert(avl.origin, 1)\n avl.insert(avl.origin, 2)\n self.assertEqual(8, avl.size)\n self.assertEqual(1, avl.min(avl.origin).value)\n self.assertEqual(12, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=8, height=3, balance=1)\n check_node_properties(avl.origin.right, value=10, height=1, balance=0)\n check_node_properties(avl.origin.right.left, value=9, height=0, balance=0)\n check_node_properties(avl.origin.right.right, value=12, height=0, balance=0)\n check_node_properties(avl.origin.left, value=3, height=2, balance=1)\n check_node_properties(avl.origin.left.left, value=1, height=1, balance=-1)\n check_node_properties(avl.origin.left.left.right, value=2, height=0, balance=0)\n check_node_properties(avl.origin.left.right, value=5, height=0, balance=0)\n \"\"\"\n Current AVL tree:\n 8\n / \\\n 3 10\n / \\ / \\\n 1 5 9 12\n \\\n 2\n After removing 5\n 8\n / \\\n 2 10\n / \\ / \\\n 1 3 9 12\n \"\"\"\n avl.remove(avl.origin, 5)\n self.assertEqual(7, avl.size)\n self.assertEqual(1, avl.min(avl.origin).value)\n self.assertEqual(12, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=8, height=2, balance=0)\n check_node_properties(avl.origin.right, value=10, height=1, balance=0)\n check_node_properties(avl.origin.right.left, value=9, height=0, balance=0)\n check_node_properties(avl.origin.right.right, value=12, height=0, balance=0)\n check_node_properties(avl.origin.left, value=2, height=1, balance=0)\n check_node_properties(avl.origin.left.left, value=1, height=0, balance=0)\n check_node_properties(avl.origin.left.right, value=3, height=0, balance=0)\n \"\"\"\n Current AVL tree:\n 8\n / \\\n 2 10\n / \\ / \\\n 1 3 9 12\n After inserting 5, 13, 0, 7, 20\n 8\n / \\\n 2 10\n / \\ / \\\n 1 5 9 13\n / / \\ / \\\n 0 3 7 12 20\n \"\"\"\n avl.insert(avl.origin, 5)\n avl.insert(avl.origin, 13)\n avl.insert(avl.origin, 0)\n avl.insert(avl.origin, 7)\n avl.insert(avl.origin, 20)\n self.assertEqual(12, avl.size)\n self.assertEqual(0, avl.min(avl.origin).value)\n self.assertEqual(20, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=8, height=3, balance=0)\n\n check_node_properties(avl.origin.right, value=10, height=2, balance=-1)\n check_node_properties(avl.origin.right.left, value=9, height=0, balance=0)\n check_node_properties(avl.origin.right.right, value=13, height=1, balance=0)\n check_node_properties(avl.origin.right.right.right, value=20, height=0, balance=0)\n check_node_properties(avl.origin.right.right.left, value=12, height=0, balance=0)\n\n check_node_properties(avl.origin.left, value=2, height=2, balance=0)\n check_node_properties(avl.origin.left.left, value=1, height=1, balance=1)\n check_node_properties(avl.origin.left.left.left, value=0, height=0, balance=0)\n check_node_properties(avl.origin.left.right, value=5, height=1, balance=-0)\n check_node_properties(avl.origin.left.right.right, value=7, height=0, balance=0)\n check_node_properties(avl.origin.left.right.left, value=3, height=0, balance=0)\n\n \"\"\"\n Current AVL tree:\n 8\n / \\\n 2 10\n / \\ / \\\n 1 5 9 13\n / / \\ / \\\n 0 3 7 12 20\n After removing 1, 9\n 8\n / \\\n 2 13\n / \\ / \\\n 0 5 10 20\n / \\ \\ \n 3 7 12\n \"\"\"\n avl.remove(avl.origin, 1)\n avl.remove(avl.origin, 9)\n self.assertEqual(10, avl.size)\n self.assertEqual(0, avl.min(avl.origin).value)\n self.assertEqual(20, avl.max(avl.origin).value)\n # Properties of all nodes\n check_node_properties(avl.origin, value=8, height=3, balance=0)\n\n check_node_properties(avl.origin.right, value=13, height=2, balance=1)\n check_node_properties(avl.origin.right.left, value=10, height=1, balance=-1)\n check_node_properties(avl.origin.right.left.right, value=12, height=0, balance=0)\n check_node_properties(avl.origin.right.right, value=20, height=0, balance=0)\n\n check_node_properties(avl.origin.left, value=2, height=2, balance=-1)\n check_node_properties(avl.origin.left.left, value=0, height=0, balance=0)\n check_node_properties(avl.origin.left.right, value=5, height=1, balance=-0)\n check_node_properties(avl.origin.left.right.right, value=7, height=0, balance=0)\n check_node_properties(avl.origin.left.right.left, value=3, height=0, balance=0)\n\n \"\"\"\n Part Three\n Everything but random, checking properties of tree only\n \"\"\"\n random.seed(331)\n \"\"\"\n randomly insert, and remove alphabet to avl tree\n \"\"\"\n\n def random_order_1(character= True):\n order = random.randint(0, 2)\n if not len(existing_value) or (order and (not character or avl.size < 26)):\n if character:\n inserted = chr(ord('a') + random.randint(0, 25))\n while inserted in existing_value:\n inserted = chr(ord('a') + random.randint(0, 25))\n else:\n inserted = random.randint(0, 100000)\n if inserted == 218:\n print(\"weeeee\")\n avl.insert(avl.origin, inserted)\n existing_value[inserted] = 1\n else:\n removed = random.choice(list(existing_value.keys()))\n avl.remove(avl.origin, removed)\n existing_value.pop(removed)\n\n existing_value = {}\n avl = AVLTree()\n for _ in range(30):\n random_order_1()\n self.assertEqual('a', avl.min(avl.origin).value)\n self.assertEqual('y', avl.max(avl.origin).value)\n # inorder test\n expected = ['a', 'b', 'd', 'f', 'g', 'i', 'k', 'o', 'p', 'q', 'r', 's', 't', 'v', 'w', 'y']\n generator = avl.inorder(avl.origin)\n self.assertIsInstance(generator, types.GeneratorType)\n for num in expected:\n node = next(generator)\n self.assertIsInstance(node, Node)\n self.assertEqual(num, node.value)\n with self.assertRaises(StopIteration):\n next(generator)\n\n expected = ['p', 'f', 'b', 'a', 'd', 'k', 'i', 'g', 'o', 't', 'r', 'q', 's', 'w', 'v', 'y']\n generator = avl.preorder(avl.origin)\n self.assertIsInstance(generator, types.GeneratorType)\n for num in expected:\n node = next(generator)\n self.assertIsInstance(node, Node)\n self.assertEqual(num, node.value)\n with self.assertRaises(StopIteration):\n next(generator)\n\n expected = ['a', 'd', 'b', 'g', 'i', 'o', 'k', 'f', 'q', 's', 'r', 'v', 'y', 'w', 't', 'p']\n generator = avl.postorder(avl.origin)\n self.assertIsInstance(generator, types.GeneratorType)\n for num in expected:\n node = next(generator)\n self.assertIsInstance(node, Node)\n self.assertEqual(num, node.value)\n with self.assertRaises(StopIteration):\n next(generator)\n\n existing_value.clear()\n avl = AVLTree()\n for _ in range(1200):\n if _ > 920:\n print(\"EZ\")\n random_order_1(character=False)\n self.assertEqual(218, avl.min(avl.origin).value)\n self.assertEqual(99893, avl.max(avl.origin).value)\n # inorder test\n expected = [218, 433, 640, 927, 992, 1069, 1175, 1362, 1790, 2011,\n 2118, 2180, 3231, 3592, 3883, 4079, 4322, 4398, 4696, 4861, 5383, 5403, 5690, 5965, 6089, 6266, 6628, 6949, 7135, 7195, 7396, 7691, 7699, 8121, 8187, 8749, 8773, 8785, 8885, 9497, 9691, 10039, 10441, 10524, 11628, 11888, 12640, 12842, 12879, 13053, 13417, 13887, 13968, 14023, 14846, 14913, 15311, 15424, 16168, 16395, 16422, 16706, 17086, 17564, 17596, 17711, 17910, 18704, 19072, 19179, 19484, 19543, 19548, 19555, 19628, 19906, 19971, 20419, 20521, 20618, 20860, 21205, 21353, 21379, 21419, 21723, 22253, 22748, 22752, 23065, 23179, 23220, 23359, 23673, 23710, 24332, 25504, 26494, 26581, 26708, 26717, 26945, 27073, 27161, 27793, 27972, 28018, 28198, 28483, 28682, 28775, 29108, 29184, 29385, 29509, 29822, 30063, 30210, 30478, 30621, 30800, 30807, 31135, 31164, 31189, 31423, 31689, 32398, 32645, 33284, 33747, 34045, 34216, 34242, 34251, 34271, 34508, 34781, 34794, 35325, 35355, 35466, 35481, 35693, 35927, 35943, 36069, 36747, 37629, 37663, 37679, 38153, 39136, 39211, 39265, 39769, 40178, 40298, 40453, 40864, 41360, 41578, 41756, 41973, 42051, 42476, 43057, 43279, 43379, 43601, 44079, 44105, 44302, 44525, 44904, 45373, 45396, 45775, 46027, 46070, 46204, 46376, 46463, 46534, 46557, 46666, 46832, 46961, 47170, 47287, 47856, 47935, 48073, 48581, 48585, 48947, 49040, 49089, 49422, 49441, 49612, 49624, 49782, 49943, 50506, 50724, 50766, 51097, 51568, 51995, 52068, 52283, 52422, 52436, 52553, 52915, 53140, 53224, 53356, 53644, 53851, 53863, 54054, 54683, 55077, 55196, 55289, 55295, 55339, 55579, 55596, 55611, 55822, 55832, 56254, 56785, 56800, 57043, 57066, 57147, 57167, 57199, 57904, 58278, 58299, 58311, 58613, 58751, 58783, 58838, 59125, 59664, 60049, 60226, 61108, 61353, 61504, 61543, 61590, 61730, 61888, 61979, 62257, 62426, 62995, 63102, 63626, 64606, 65089, 65386, 65636, 66156, 66181, 66337, 66837, 67217, 67686, 67763, 68681, 68899, 69351, 69402, 69695, 69725, 70733, 70736, 71258, 71265, 71417, 71473, 71487, 71668, 71884, 71913, 71920, 72171, 72409, 72677, 72927, 73195, 73491, 73865, 74096, 74272, 74291, 74305, 74756, 75150, 75174, 75848, 75892, 76270, 76517, 76796, 76821, 76936, 77062, 77426, 77451, 77662, 77703, 77762, 77836, 78162, 78189, 78241, 78684, 78814, 78885, 79507, 79825, 80142, 80208, 80652, 80817, 80878, 81769, 81967, 82239, 82245, 82330, 82611, 82809, 82999, 83000, 83284, 83626, 83723, 85053, 85418, 85746, 85779, 85846, 85887, 85912, 85968, 86127, 86260, 86846, 86970, 87081, 87137, 87667, 87671, 87790, 88099, 88172, 88778, 88854, 88859, 89546, 89796, 90043, 90174, 90500, 90793, 90837, 91228, 91287, 91819, 91938, 92031, 92133, 92248, 92443, 92636, 93344, 93547, 93585, 93674, 93687, 93710, 94027, 94257, 94374, 94585, 94874, 95337, 96088, 96194, 96889, 97325, 97357, 97454, 97610, 97683, 97685, 98006, 98076, 98268, 99697, 99771, 99888, 99893]\n generator = avl.inorder(avl.origin)\n self.assertIsInstance(generator, types.GeneratorType)\n for num in expected:\n node = next(generator)\n self.assertIsInstance(node, Node)\n self.assertEqual(num, node.value)\n with self.assertRaises(StopIteration):\n next(generator)\n\n expected = [49040, 19628, 9691, 4398, 2180, 992, 640, 218, 433, 927, 1790, 1175,\n 1069, 1362, 2118, 2011, 3592, 3231, 4079, 3883, 4322, 6628, 5965, 5383, 4861, 4696, 5690, 5403, 6266, 6089, 8187, 7195, 7135, 6949, 7691, 7396, 8121, 7699, 8773, 8749, 8885, 8785, 9497, 16395, 13417, 11888, 10524, 10039, 10441, 11628, 12842, 12640, 13053, 12879, 14846, 13968, 13887, 14023, 15424, 15311, 14913, 16168, 17711, 17564, 16706, 16422, 17086, 17596, 19179, 18704, 17910, 19072, 19543, 19484, 19548, 19555, 32645, 23673, 21379, 20419, 19971, 19906, 21205, 20618, 20521, 20860, 21353, 22253, 21723, 21419, 23179, 22752, 22748, 23065, 23359, 23220, 29509, 27793, 26717, 26494, 24332, 23710, 25504, 26708, 26581, 27073, 26945, 27161, 28682, 28018, 27972, 28198, 28483, 29108, 28775, 29385, 29184, 30800, 30210, 29822, 30063, 30478, 30621, 31164, 31135, 30807, 31689, 31423, 31189, 32398, 41360, 35693, 34271, 34216, 33747, 33284, 34045, 34242, 34251, 35325, 34781, 34508, 34794, 35466, 35355, 35481, 37679, 36069, 35943, 35927, 37629, 36747, 37663, 39769, 39211, 38153, 39136, 39265, 40298, 40178, 40864, 40453, 46376, 44904, 43379, 42051, 41756, 41578, 41973, 43057, 42476, 43279, 44302, 44079, 43601, 44105, 44525, 45775, 45373, 45396, 46070, 46027, 46204, 47170, 46666, 46534, 46463, 46557, 46832, 46961, 47935, 47287, 47856, 48581, 48073, 48585, 48947, 69402, 55832, 52915, 50506, 49624, 49441, 49422, 49089, 49612, 49943, 49782, 51568, 50766, 50724, 51097, 52283, 52068, 51995, 52436, 52422, 52553, 53863, 53644, 53224, 53140, 53356, 53851, 55289, 55077, 54683, 54054, 55196, 55579, 55339, 55295, 55611, 55596, 55822, 61590, 58783, 57167, 56800, 56254, 56785, 57066, 57043, 57147, 58278, 57904, 57199, 58613, 58299, 58311, 58751, 60049, 59125, 58838, 59664, 61504, 61108, 60226, 61353, 61543, 64606, 61979, 61730, 61888, 62995, 62426, 62257, 63626, 63102, 67217, 66156, 65386, 65089, 65636, 66337, 66181, 66837, 67763, 67686, 68899, 68681, 69351, 80878, 75848, 72927, 71417, 70733, 69725, 69695, 71258, 70736, 71265, 71913, 71668, 71473, 71487, 71884, 72409, 72171, 71920, 72677, 74291, 74096, 73491, 73195, 73865, 74272, 74756, 74305, 75150, 75174, 78162, 77426, 76796, 76270, 75892, 76517, 76936, 76821, 77062, 77662, 77451, 77762, 77703, 77836, 79825, 78814, 78241, 78189, 78684, 78885, 79507, 80208, 80142, 80652, 80817, 88778, 83723, 82330, 82239, 81769, 81967, 82245, 83000, 82809, 82611, 82999, 83284, 83626, 86970, 85887, 85746, 85418, 85053, 85846, 85779, 85968, 85912, 86260, 86127, 86846, 87790, 87137, 87081, 87667, 87671, 88099, 88172, 94374, 91938, 90174, 89546, 88859, 88854, 90043, 89796, 91228, 90793, 90500, 90837, 91287, 91819, 93547, 92248, 92031, 92133, 92636, 92443, 93344, 93687, 93674, 93585, 94027, 93710, 94257, 97325, 95337, 94874,\n 94585, 96194, 96088, 96889, 98006, 97454, 97357, 97683, 97610, 97685, 99697, 98268, 98076, 99888, 99771, 99893]\n generator = avl.preorder(avl.origin)\n self.assertIsInstance(generator, types.GeneratorType)\n for num in expected:\n node = next(generator)\n self.assertIsInstance(node, Node)\n self.assertEqual(num, node.value)\n with self.assertRaises(StopIteration):\n next(generator)\n\n expected = [433, 218, 927, 640, 1069, 1362, 1175, 2011,\n 2118, 1790, 992, 3231, 3883, 4322, 4079, 3592, 2180, 4696, 4861, 5403, 5690, 5383, 6089, 6266, 5965, 6949, 7135, 7396, 7699, 8121, 7691, 7195, 8749, 8785, 9497, 8885, 8773, 8187, 6628, 4398, 10441, 10039, 11628, 10524, 12640, 12879, 13053, 12842, 11888, 13887, 14023, 13968, 14913, 15311, 16168, 15424, 14846, 13417, 16422, 17086, 16706, 17596, 17564, 17910, 19072, 18704, 19484, 19555, 19548, 19543, 19179, 17711, 16395, 9691, 19906, 19971, 20521, 20860, 20618, 21353, 21205, 20419, 21419, 21723, 22748, 23065, 22752, 23220, 23359, 23179, 22253, 21379, 23710, 25504, 24332, 26581, 26708, 26494, 26945, 27161, 27073, 26717, 27972, 28483, 28198, 28018, 28775, 29184, 29385, 29108, 28682, 27793, 30063, 29822, 30621, 30478, 30210, 30807, 31135, 31189, 31423, 32398, 31689, 31164, 30800, 29509, 23673, 33284, 34045, 33747, 34251, 34242, 34216, 34508, 34794, 34781, 35355, 35481, 35466, 35325, 34271, 35927, 35943, 36747, 37663, 37629, 36069, 39136, 38153, 39265, 39211, 40178, 40453, 40864, 40298, 39769, 37679, 35693, 41578, 41973, 41756, 42476, 43279, 43057, 42051, 43601, 44105, 44079, 44525, 44302, 43379, 45396, 45373, 46027, 46204, 46070, 45775, 44904, 46463, 46557, 46534, 46961, 46832, 46666, 47856, 47287, 48073, 48947, 48585, 48581, 47935, 47170, 46376, 41360, 32645, 19628, 49089, 49422, 49612, 49441, 49782, 49943, 49624, 50724, 51097, 50766, 51995, 52068, 52422, 52553, 52436, 52283, 51568, 50506, 53140, 53356, 53224, 53851, 53644, 54054, 54683, 55196, 55077, 55295, 55339, 55596, 55822, 55611, 55579, 55289, 53863, 52915, 56785, 56254, 57043, 57147, 57066, 56800, 57199, 57904, 58311, 58299, 58751, 58613, 58278, 57167, 58838, 59664, 59125, 60226, 61353, 61108, 61543, 61504, 60049, 58783, 61888, 61730, 62257, 62426, 63102, 63626, 62995, 61979, 65089, 65636, 65386, 66181, 66837, 66337, 66156, 67686, 68681, 69351, 68899, 67763, 67217, 64606, 61590, 55832, 69695, 69725, 70736, 71265, 71258, 70733, 71487, 71473, 71884, 71668, 71920, 72171, 72677, 72409, 71913, 71417, 73195, 73865, 73491, 74272, 74096, 74305, 75174, 75150, 74756, 74291, 72927, 75892, 76517, 76270, 76821, 77062, 76936, 76796, 77451, 77703, 77836, 77762, 77662, 77426, 78189, 78684, 78241, 79507, 78885, 78814, 80142, 80817, 80652, 80208, 79825, 78162, 75848, 81967, 81769, 82245, 82239, 82611, 82999, 82809, 83626, 83284, 83000, 82330, 85053, 85418, 85779, 85846, 85746, 85912, 86127, 86846, 86260, 85968, 85887, 87081, 87671, 87667, 87137, 88172, 88099, 87790, 86970, 83723, 88854, 88859, 89796, 90043, 89546, 90500, 90837, 90793, 91819, 91287, 91228, 90174, 92133, 92031, 92443, 93344, 92636, 92248, 93585, 93674, 93710, 94257, 94027, 93687, 93547, 91938, 94585, 94874, 96088, 96889, 96194, 95337, 97357,\n 97610, 97685, 97683, 97454, 98076, 98268, 99771, 99893, 99888, 99697, 98006, 97325, 94374, 88778, 80878, 69402, 49040]\n generator = avl.postorder(avl.origin)\n self.assertIsInstance(generator, types.GeneratorType)\n for num in expected:\n node = next(generator)\n self.assertIsInstance(node, Node)\n self.assertEqual(num, node.value)\n with self.assertRaises(StopIteration):\n next(generator)", "title": "" }, { "docid": "c8eaaede6d1c1d0ecf71b041f825838a", "score": "0.42484716", "text": "def build_tree(data, disorder_function=\"entropy\"):\n if disorder_function==\"entropy\":\n disorder_estimator = DecisionTree.entropy\n elif disorder_function==\"gini_impurity\":\n disorder_estimator = DecisionTree.gini_impurity\n elif disorder_function==\"variance\":\n disorder_estimator = DecisionTree.variance\n len_data = len(data)\n if len_data==0: return Node()\n current_disorder_level = disorder_estimator(data)\n # track enhancement of disorer's level\n best_enhancement = 0.0\n best_split = None\n best_split_sets = None\n #number columns\n nbr_cols = len(data[0]) - 1 #the last column is reserved for results\n for col in xrange(nbr_cols):\n #get unique values of the current column\n col_values = {}\n for row in data:\n col_values[row[col]] = 1\n for col_value in col_values.iterkeys():\n set1, set2 = DecisionTree.divide_data(data, col, col_value)\n p1 = float(len(set1))/len_data\n p2 = (1 - p1)\n enhancement = current_disorder_level - (p1*disorder_estimator(set1)) - (p2*disorder_estimator(set2))\n if (enhancement>best_enhancement) and (len(set1)>0 and len(set2)>0):\n best_enhancement = enhancement\n best_split = (col, col_value)\n best_split_sets = (set1, set2)\n if best_enhancement > 0:\n t_node = DecisionTree.build_tree(best_split_sets[0])\n f_node = DecisionTree.build_tree(best_split_sets[1])\n return Node(col=best_split[0],value=best_split[1],\n t_node=t_node,f_node=f_node)\n else:\n return Node(results=DecisionTree.count_results(data))", "title": "" }, { "docid": "2c5f91ab27165318df84f26f3a204328", "score": "0.4248302", "text": "def createMeans(sim, solution):\n means = {}\n means['mY'] = []\n means['mM'] = []\n means['mC'] = []\n means['mA'] = []\n \n for i in range(len(solution)):\n #Income\n means['mY'].append(np.mean(sim[i]['Y'], axis=0))\n means['mM'].append(np.mean(sim[i]['M'], axis=0))\n means['mC'].append(np.mean(sim[i]['C'], axis=0))\n means['mA'].append(np.mean(sim[i]['A'], axis=0))\n return means", "title": "" }, { "docid": "03894bcba5783719ba86e49be83908c2", "score": "0.42466655", "text": "def uct(self):\n # each created node is visited while backproping the final score\n # this value should then automically be positive\n assert self.num_visits > 0\n exploration = np.sqrt(np.log(self.parent.num_visits) / self.num_visits)\n return self.score_avg + float(self.c_exp) * exploration", "title": "" }, { "docid": "5ec19d7bc5bf7424d0545bf74a89d752", "score": "0.42446172", "text": "def copy_normalization(self, mean, variance, steps):\n update_mean = tf.assign(self.running_mean, mean)\n update_variance = tf.assign(self.running_variance, variance)\n update_norm_step = tf.assign(self.normalization_steps, steps)\n return tf.group([update_mean, update_variance, update_norm_step])", "title": "" }, { "docid": "f63c30ba9a97102cae253d4507d06cb9", "score": "0.42413425", "text": "def calc_expectation(node):\n # check if node is a leaf (where the house might be)\n if len(node.children) == 0:\n # It is a leaf\n node.expectation = 0\n node.wasted_steps = 0\n node.num_leaves = 1\n return\n\n # This is NOT a leaf\n # We calculate the expectation and wasted_steps here\n\n # After this loop, every child should have node info ready\n for child in node.children:\n calc_expectation(child)\n # print child.nodeid\n\n node.num_leaves = sum([child.num_leaves for child in node.children])\n\n # We try all orders of child nodes and find the order that minimizes\n # the current node's expectation\n min_exp = None\n for strategy in itertools.permutations(node.children,\n len(node.children)):\n # strategy is a list: [Node, Node, Node ..]\n e_sum = 0.0 # e is the sum of expectations\n wasted = 0 # wasted steps for this strategy so far\n for i, child in enumerate(strategy):\n if i == 0:\n wasted = 1\n else:\n if strategy[i-1].has_worm == True:\n # previous child has a worm, we don't need to go into\n # the subtree\n wasted += 0 + 2 # 2 is for going back from previous child and\n # then into the currennt child\n else:\n # previous child has a worm, we need to travel into it\n wasted += strategy[i-1].wasted_steps + 2\n e_sum += (child.expectation + wasted) * child.num_leaves\n\n if i == len(strategy) - 1:\n # this is the last child\n if strategy[i].has_worm == True:\n wasted += 0 + 1 # 1 is for going back from previous child\n # to root node of the subtree\n else:\n wasted += strategy[i].wasted_steps + 1\n\n e_avg = float(e_sum) / node.num_leaves\n if node.expectation == None or e_avg < node.expectation:\n node.expectation = e_avg\n node.wasted_steps = wasted", "title": "" }, { "docid": "7f7493be18fa3a9e3fdb3651e1ece8e7", "score": "0.42391226", "text": "def all_mean(self):\n return {case: round(self.mean(case), 2) for case in self.cases}", "title": "" }, { "docid": "bf7fca99d65e67fe72baf72378147e49", "score": "0.423832", "text": "def average_weights(self):\n ...", "title": "" }, { "docid": "c23af4b6ccaaed621391d3a57ecf76b1", "score": "0.42370525", "text": "def resolve_by_average(table):\n resolvedTable = copy.deepcopy(table)\n for row in resolvedTable:\n for index in range(len(row)):\n if row[index] == 'NA':\n row[index] = average(table, index)\n return resolvedTable", "title": "" }, { "docid": "c8589cce66cf770e0d13617b8ca0f7c0", "score": "0.4235785", "text": "def mutate(individual, minimum_value, maximum_value, mutation_rate, mutation_step):\n for x in range(0,len(individual.weights)):\n for i in range(0, len(individual.weights[x])):\n for j in range(0, len(individual.weights[x][i])):\n if(random.randint(0,100)<mutation_rate):\n alter = random.uniform(0.1,mutation_step)\n add_or_remove = random.randrange(0,2) #0 or 1\n if(add_or_remove == 0): #decide if to add or remove from the weight\n individual.weights[x][i][j] += alter\n else: \n individual.weights[x][i][j] -= alter\n individual.weights[x][i][j] = normalize_gene(individual.weights[x][i][j], minimum_value, maximum_value)\n return individual", "title": "" }, { "docid": "1b4c4acd154c828fa1662c15a071707e", "score": "0.423159", "text": "def specify_scenario(scenario):\n \n P_dict = {} \n \n if scenario == 'A':\n P_dict[\"Nuclear\"] = 0\n P_dict[\"Hydro\"] = 65\n P_dict[\"Other\"] = 15\n P_dict[\"Wind\"] = 100\n P_dict[\"Solar\"] = 95\n P_dict[\"Consumption\"] = 275\n \n elif scenario == 'B':\n P_dict[\"Nuclear\"] = 60\n P_dict[\"Hydro\"] = 65\n P_dict[\"Other\"] = 15\n P_dict[\"Wind\"] = 20\n P_dict[\"Solar\"] = 0\n P_dict[\"Consumption\"] = 160\n \n elif scenario == 'C':\n \n P_dict[\"Nuclear\"] = 0\n P_dict[\"Hydro\"] = 65\n P_dict[\"Other\"] = 15\n P_dict[\"Wind\"] = 64\n P_dict[\"Solar\"] = 0\n P_dict[\"Consumption\"] = 144 \n \n elif scenario == 'D':\n \n P_dict[\"Nuclear\"] = 0\n P_dict[\"Hydro\"] = 65\n P_dict[\"Other\"] = 15\n P_dict[\"Wind\"] = 64\n P_dict[\"Solar\"] = 50\n P_dict[\"Consumption\"] = 194 \n \n return P_dict", "title": "" }, { "docid": "d0ebee17ffb0687818b925dd06d9d9df", "score": "0.423092", "text": "def remove_mean(self):\n return self - self.mean()", "title": "" }, { "docid": "df1b71dc3d47b6bb97810585ee20b47e", "score": "0.42268556", "text": "def test_cal_medium(self):\n tree1 = GrammarTree(\"(x-0.7*2)(3.1/x)(x^5+3)\")\n tree1.build_tree()\n tree1.calculate_value_tree(tree1.root, 3)\n\n tree2 = GrammarTree(\"cos(tan(ln(sin(x^2-2*x))))\")\n tree2.build_tree()\n tree2.calculate_value_tree(tree2.root, 3)\n\n tree3 = GrammarTree(\"2((x-1.1)(x^1.5))(5^x)\")\n tree3.build_tree()\n tree3.calculate_value_tree(tree3.root, 3)\n\n tree4 = GrammarTree(\"2(cos(x-1)sin(x+1))*(pi+e)\")\n tree4.build_tree()\n tree4.calculate_value_tree(tree4.root, 3)\n\n self.assertEqual(tree1.root.value, (3 - 0.7 * 2) * (3.1 / 3) * (3 ** 5 + 3))\n self.assertEqual(tree2.root.value, math.cos(math.tan(math.log(math.sin(3 ** 2 - 2 * 3)))))\n self.assertEqual(tree3.root.value, 2 * ((3 - 1.1) * (3 ** 1.5)) * (5 ** 3))\n self.assertEqual(tree4.root.value, 2 * (math.cos(3 - 1) * math.sin(3 + 1)) * (math.pi + math.e))", "title": "" }, { "docid": "a64324a105455a2f27e977cd3c989452", "score": "0.42262074", "text": "def computeTree(self):\n self._computeEdges()\n self._computeTags()\n tree = self._getTree()\n self._correctTree(tree)\n return tree", "title": "" }, { "docid": "18caf4a6e31d60070d55338ac60dc3e1", "score": "0.4222822", "text": "def sub_fg_forest(self, nodes, weights, scale):\n return self._sub_forest(nodes, weights, scale, [tree.sub_fg_tree for tree in self._trees])", "title": "" }, { "docid": "1a2a6347e49b8e378060e8d8db784692", "score": "0.42213315", "text": "def create_scenario( self, skill = 1 ):\n scenario = copy.copy( self.scenarios[ self.progress ] )\n scenario.restart()\n playfield = Playfield()\n playfield.load( Level.get_filename( self.get_current_level_nr() ) )\n scenario.playfield = playfield\n\n if not scenario.ontime:\n if scenario.timeout is not None:\n scenario.timeout = int(scenario.timeout / skill)\n elif isinstance( scenario, ScenarioDiamondCollect ):\n if scenario.goal is not None:\n scenario.goal = int(scenario.goal * skill / 10) * 10\n else:\n scenario.goal = int(scenario.goal * skill)\n \n return scenario", "title": "" }, { "docid": "fe5e74b2cdb33101e0fca30bd1242c5c", "score": "0.42201412", "text": "def average_data(self):\r\n\t\tself.happiness = float(sum(self.happiness))/(len(self.happiness)-1) # subtract one from denominator to get remove influence of initial State object set to 0 (added 0 happiness, safety to State's record)\r\n\t\tself.safety = float(sum(self.safety))/(len(self.safety)-1)", "title": "" } ]
f15e5784845dfd893a9a0e9205b03fac
Hdf5 export method for the block.
[ { "docid": "38ffe91035bc6cfbc02ef4285e6fecb3", "score": "0.6027526", "text": "def export_hdf5(self, handle):\n handle.create_dataset(\n 'type', (1, ), 'S10', ['flatten'.encode('ascii', 'ignore')]\n )\n handle.create_dataset('shape', data=np.array(self.shape))", "title": "" } ]
[ { "docid": "97eea5b6ccf830da77780d2f683a11af", "score": "0.696666", "text": "def export_hdf5(self, handle):\n pass", "title": "" }, { "docid": "97eea5b6ccf830da77780d2f683a11af", "score": "0.696666", "text": "def export_hdf5(self, handle):\n pass", "title": "" }, { "docid": "9cfad81190d2cc4196addaa86885f6a1", "score": "0.6411993", "text": "def _export_hdf5(self, h5group, metadata_dict=None):\n # set the software and its version\n if metadata_dict is None:\n metadata_dict = {}\n h5group.attrs[\"software\"] = \"afmformats\"\n h5group.attrs[\"software version\"] = version\n enum_key = str(self.enum)\n if enum_key in h5group:\n # random fill-mode (get the next free enum key)\n ii = 0\n while True:\n enum_key = str(ii)\n if enum_key not in h5group:\n break\n ii += 1\n metadata_dict[\"enum\"] = int(enum_key)\n subgroup = h5group.create_group(enum_key)\n for col in self.columns:\n if col == \"segment\":\n ds = subgroup.create_dataset(name=col,\n data=np.asarray(self[col],\n dtype=np.uint8),\n compression=\"gzip\",\n fletcher32=True)\n elif col == \"index\":\n # do not store index column\n continue\n else:\n ds = subgroup.create_dataset(name=col,\n data=self[col],\n compression=\"gzip\",\n fletcher32=True)\n ds.attrs[\"unit\"] = column_units[col]\n for kk in metadata_dict:\n if kk == \"path\":\n subgroup.attrs[\"path\"] = str(metadata_dict[\"path\"])\n else:\n subgroup.attrs[kk] = metadata_dict[kk]", "title": "" }, { "docid": "b4e9936a9c55c89abb19546badb30d64", "score": "0.6303559", "text": "def write_to_h5(self):\n print(\"advancer: write_to_h5: not implemented yet\")\n return", "title": "" }, { "docid": "8523f4dbe0a1bea8932fed4a0e56e0f5", "score": "0.62254363", "text": "def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):\n raise NotImplementedError", "title": "" }, { "docid": "d192c3a5cb78c2c7e89db4ed9ea76ba8", "score": "0.60697734", "text": "def _to_hdf5(self, x5_root):\n raise NotImplementedError", "title": "" }, { "docid": "f34a8571166d2f3665b2ca188bb9f2e1", "score": "0.5935826", "text": "def output_hdf5(array, array_name, halos, npoints, block):\n if not isinstance(array, list):\n array = [array]\n if not isinstance(array_name, list):\n array_name = [array_name]\n assert len(array) == len(array_name)\n with h5py.File('data.h5', 'w') as hf:\n # Create a group\n if (isinstance(block, MultiBlock)):\n all_blocks = block.blocks\n else:\n all_blocks = [block]\n for b in all_blocks:\n g1 = hf.create_group(b.blockname)\n # Loop over all the dataset inputs and write to the hdf5 file\n for ar, name in zip(array, array_name):\n g1.attrs.create(\"dims\", [b.ndim], dtype=\"int32\")\n g1.attrs.create(\"ops_type\", u\"ops_block\",dtype=\"S9\")\n g1.attrs.create(\"index\", [b.blocknumber], dtype=\"int32\")\n block_dset_name = b.location_dataset(name).base\n dset = g1.create_dataset('%s' % (block_dset_name), data=ar)\n set_hdf5_metadata(dset, halos, npoints, b)\n return", "title": "" }, { "docid": "94108bcdf2d9c12a0bfeab4486f085a4", "score": "0.5926845", "text": "def to_hdf5(self, filename):\n\n f = h5py.File(filename, 'w')\n f['xyz'] = self.xyz\n f.close()\n\n return", "title": "" }, { "docid": "9d20c0ff831d80c1c675952e132bbe02", "score": "0.5882384", "text": "def export_hdf5(self, handle):\n def weight(w):\n return self.pre_hook_fx(\n w, descale=True\n ).reshape(w.shape[:2]).cpu().data.numpy()\n\n handle.create_dataset(\n 'type', (1, ), 'S10', ['input'.encode('ascii', 'ignore')]\n )\n handle.create_dataset('shape', data=np.array(self.shape))\n\n if self.neuron is not None:\n if self.weight is not None:\n handle.create_dataset('weight', data=weight(self.weight))\n if self.bias is not None:\n handle.create_dataset('bias', data=weight(self.bias))\n\n for key, value in self.neuron.device_params.items():\n handle.create_dataset(f'neuron/{key}', data=value)", "title": "" }, { "docid": "7ffe75a9b57de50d035ea275566d5820", "score": "0.58606523", "text": "def export(self):\n pass", "title": "" }, { "docid": "9528c8f49b700de1cebbd534a79727df", "score": "0.58387154", "text": "def to_hdf(self, hdf=None, group_name=None):\n super(Gpaw, self).to_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n self.input.to_hdf(hdf5_input)", "title": "" }, { "docid": "48421179574008ca66ffd07e712acf0e", "score": "0.5793877", "text": "def export_hdf5(self, handle):\n handle.create_dataset(\n 'type', (1, ), 'S10', ['average'.encode('ascii', 'ignore')]\n )\n handle.create_dataset('shape', data=np.array(self.neuron.shape))", "title": "" }, { "docid": "48421179574008ca66ffd07e712acf0e", "score": "0.5793877", "text": "def export_hdf5(self, handle):\n handle.create_dataset(\n 'type', (1, ), 'S10', ['average'.encode('ascii', 'ignore')]\n )\n handle.create_dataset('shape', data=np.array(self.neuron.shape))", "title": "" }, { "docid": "1f3fca3e4b98cd47105ee5a2aaf53c76", "score": "0.57423747", "text": "def export_hdf5(self, handle):\n def weight(s):\n return s.pre_hook_fx(\n s.weight, descale=True\n ).reshape(s.weight.shape[:2]).cpu().data.numpy()\n\n def delay(d):\n return torch.floor(d.delay).flatten().cpu().data.numpy()\n\n handle.create_dataset(\n 'type', (1, ), 'S10', ['dense'.encode('ascii', 'ignore')]\n )\n handle.create_dataset('shape', data=np.array(self.neuron.shape))\n handle.create_dataset('inFeatures', data=self.synapse.in_channels)\n handle.create_dataset('outFeatures', data=self.synapse.out_channels)\n\n if hasattr(self.synapse, 'imag'): # complex synapse\n handle.create_dataset(\n 'weight/real',\n data=weight(self.synapse.real)\n )\n handle.create_dataset(\n 'weight/imag',\n data=weight(self.synapse.imag)\n )\n else:\n handle.create_dataset('weight', data=weight(self.synapse))\n\n if self.delay is not None:\n handle.create_dataset('delay', data=delay(self.delay))\n\n # for key, value in self.neuron.device_params.items():\n # handle.create_dataset(f'neuron/{key}', data=value)", "title": "" }, { "docid": "939cb88526ae60964f34e995cca9e88e", "score": "0.57145286", "text": "def _save(self, h5_file_name, time=0.0): # noqa: C901\n # 1. Create hdf5 file.\n # 2. Initialize groups for Eulerian and Lagrangian grids.\n # 3. For Eulerian group, initialize groups for scalar and\n # vector fields (grid is already defined).\n # 4. For Lagrangian group, initialize individual groups for different lagrangian grids.\n # In each of the lagrangian grid group, store the grid information and initialize\n # groups for scalar and vector field.\n # 5. Go over the fields in the dictionary and save them in their corresponding location.\n\n with h5py.File(h5_file_name, \"w\", driver=\"mpio\", comm=MPI.COMM_WORLD) as f:\n # Save time stamp\n f.attrs[\"time\"] = time\n\n # Eulerian save\n if self.eulerian_grid_defined and self.eulerian_fields:\n eulerian_grp = f.create_group(\"Eulerian\")\n # 'Scalar' and 'Vector' fields\n eulerian_scalar_grp = eulerian_grp.create_group(\"Scalar\")\n eulerian_vector_grp = eulerian_grp.create_group(\"Vector\")\n # Go over and save all fields that lie on the common eulerian grid\n # Note : Paraview renders 2D eulerian field on the YZ plane, inconsistently with\n # lagrangian fields rendered on XY plane. This could be a bug in Paraview and\n # I have opened up a question on Paraview's official forum\n # https://discourse.paraview.org/t/2dcorectmesh-displaying-on-yz-plane-instead-of-xy-plane/9535\n # As a workaround, here we extend the dimension of the 2D field, so that\n # it appears as a slice in a 3D space, and Paraview can\n # correctly render the field on the XY plane. We can remove the\n # reshaping when Paraview resolve the issue.\n for field_name in self.eulerian_fields:\n field = self.eulerian_fields[field_name]\n field_type = self.eulerian_fields_type[field_name]\n if field_type == \"Scalar\":\n # Set up dataset with global shape\n dset = eulerian_scalar_grp.create_dataset(\n field_name,\n shape=(1, *self.eulerian_grid_size),\n dtype=self.real_dtype,\n )\n # Write the local chunk of data\n dset[self.local_eulerian_index] = field[\n self.eulerian_field_inner_index\n ].reshape(1, *self.local_eulerian_grid_size)\n elif field_type == \"Vector\":\n # Decompose vector fields into individual component as scalar fields\n for idx_dim in range(self.dim):\n # Set up dataset with global shape\n dset = eulerian_vector_grp.create_dataset(\n f\"{field_name}_{idx_dim}\",\n shape=(1, *self.eulerian_grid_size),\n dtype=self.real_dtype,\n )\n # Write the local chunk of data\n dset[self.local_eulerian_index] = field[idx_dim][\n self.eulerian_field_inner_index\n ].reshape(1, *self.local_eulerian_grid_size)\n else:\n raise ValueError(\n \"Unsupported eulerian_field_type ('Scalar' and 'Vector' only)\"\n )\n # Save eulerian simulation parameters\n eulerian_params_grp = eulerian_grp.create_group(\"Parameters\")\n eulerian_params_grp.attrs[\"origin\"] = self.eulerian_origin\n eulerian_params_grp.attrs[\"dx\"] = self.eulerian_dx\n eulerian_params_grp.attrs[\"grid_size\"] = self.eulerian_grid_size\n\n # Only a single rank generates xdmf. Here we use rank 0.\n if self.mpi_construct.rank == 0:\n self.generate_xdmf_eulerian(h5_file_name=h5_file_name, time=time)\n\n # Lagrangian save\n # Note: We need to reverse the order from (dim, ...) -> (..., dim) for Paraview.\n # For eulerian fields, we mitigate this by splitting each vector\n # component into scalar fields. For lagrangian fields, since N is small\n # compared to the N in Eulerian grid, I have decided to stick with the\n # tranpose/moveaxis approach for now. This pays off later as convenience\n # during post-processing and visualizing these lagrangian points in Paraview.\n lagrangian_grp = f.create_group(\"Lagrangian\")\n # Go over all lagrangian grids\n for lagrangian_grid_name in self.lagrangian_grids:\n lagrangian_grid_grp = lagrangian_grp.create_group(lagrangian_grid_name)\n lagrangian_grid = self.lagrangian_grids[lagrangian_grid_name]\n is_master_rank = (\n self.mpi_construct.rank\n == self.lagrangian_grid_master_rank[lagrangian_grid_name]\n )\n\n # Dataset for lagrangian grid\n dset_grid = lagrangian_grid_grp.create_dataset(\n \"Grid\",\n shape=(\n self.lagrangian_grid_num_node[lagrangian_grid_name],\n self.dim,\n ),\n dtype=self.real_dtype,\n )\n # write only on master_rank where lagrangian grid resides\n if is_master_rank:\n dset_grid[...] = np.transpose(lagrangian_grid)\n # Dataset for lagrangian grid connection, if any\n if lagrangian_grid_name in self.lagrangian_grid_connection:\n dset_connection = lagrangian_grid_grp.create_dataset(\n \"Connection\",\n shape=(self.lagrangian_grid_num_node[lagrangian_grid_name],),\n dtype=np.int64,\n )\n # write only on master_rank\n if is_master_rank:\n dset_connection[...] = self.lagrangian_grid_connection[\n lagrangian_grid_name\n ]\n # Dataset for 'Scalar' and 'Vector' fields\n lagrangian_scalar_grp = lagrangian_grid_grp.create_group(\"Scalar\")\n lagrangian_vector_grp = lagrangian_grid_grp.create_group(\"Vector\")\n # Go over and save all fields that lie on the current lagrangian grid\n for field_name in self.lagrangian_fields_with_grid_name[\n lagrangian_grid_name\n ]:\n field = self.lagrangian_fields[field_name]\n field_type = self.lagrangian_fields_type[field_name]\n if field_type == \"Scalar\":\n dset = lagrangian_scalar_grp.create_dataset(\n field_name,\n shape=(\n self.lagrangian_grid_num_node[lagrangian_grid_name],\n ),\n dtype=self.real_dtype,\n )\n # write only on master_rank\n if is_master_rank:\n dset[...] = field\n elif field_type == \"Vector\":\n dset = lagrangian_vector_grp.create_dataset(\n field_name,\n shape=(\n self.lagrangian_grid_num_node[lagrangian_grid_name],\n self.dim,\n ),\n dtype=self.real_dtype,\n )\n # write only on master_rank\n if is_master_rank:\n dset[...] = np.moveaxis(field, 0, -1)\n else:\n raise ValueError(\n \"Unsupported lagrangian_field_type ('Scalar' and 'Vector' only)\"\n )\n\n # only master_rank owning the lagrangian grid generates the xdmf file\n if is_master_rank:\n self.generate_xdmf_lagrangian(h5_file_name=h5_file_name, time=time)", "title": "" }, { "docid": "49892e961ea730ac0325a8ecb18fe321", "score": "0.5701092", "text": "def Write(self):\n if os.path.exists(self.path):\n print(\"The file %s already exists.\" % self.path)\n check = input(\"Do you want to overwrite this file? [y, n] \")\n if check == 'y':\n os.remove(self.path)\n else:\n print(\"File is not saved\")\n return\n f = h5.File(self.path, 'a')\n\n positions = f.create_group('Positions')\n\n for i, pos in enumerate(self.positions):\n hdf5_position(positions, pos, i)\n\n if self.analysis:\n ana = f.create_group('Analysis')\n for ana_type in self.analysis.keys():\n hdf5_analysis(ana, self.analysis[ana_type], ana_type)\n\n if self.type == 'tiles':\n tiles = f.create_group('Tiles')\n hdf5_tile(tiles, self, 0)\n\n f.flush()\n f.close()", "title": "" }, { "docid": "30692b04c7fd3daa72c75aac607fd6a7", "score": "0.5675069", "text": "def export(self, file_obj):\n file_obj.write(f'{NL}')\n file_obj.write(f'{self.__HEADER__}{NL}')\n file_obj.write(f'stride={self.stride}{NL}')\n file_obj.write(NL)", "title": "" }, { "docid": "23babb001e45625ec462fed59275d1e8", "score": "0.5667675", "text": "def _write(self, h5_group, memo) -> None:\n self.data._write(h5_group, memo)", "title": "" }, { "docid": "2d2607fef3c5bb8922a844de216420a1", "score": "0.5649634", "text": "def write_bed (self, fhd):\n pass", "title": "" }, { "docid": "0c70c19acc54a2d3cb111e9e4c62d955", "score": "0.56444055", "text": "def export_hdf5(self, handle):\n def weight(s):\n return s.pre_hook_fx(\n s.weight, descale=True\n ).reshape(s.weight.shape[:-1]).cpu().data.numpy()\n\n def delay(d):\n return torch.floor(d.delay).flatten().cpu().data.numpy()\n\n # descriptors\n handle.create_dataset(\n 'type', (1, ), 'S10', ['pool'.encode('ascii', 'ignore')]\n )\n handle.create_dataset('shape', data=np.array(self.neuron.shape))\n handle.create_dataset('kernelSize', data=self.synapse.kernel_size[:-1])\n handle.create_dataset('stride', data=self.synapse.stride[:-1])\n handle.create_dataset('padding', data=self.synapse.padding[:-1])\n handle.create_dataset('dilation', data=self.synapse.dilation[:-1])\n\n # weight\n if self.synapse.weight_norm_enabled:\n self.synapse.disable_weight_norm()\n if hasattr(self.synapse, 'imag'): # complex synapse\n handle.create_dataset(\n 'weight/real',\n data=weight(self.synapse.real)\n )\n handle.create_dataset(\n 'weight/imag',\n data=weight(self.synapse.imag)\n )\n else:\n handle.create_dataset('weight', data=weight(self.synapse))\n\n # delay\n if self.delay is not None:\n handle.create_dataset('delay', data=delay(self.delay))\n\n # neuron\n for key, value in self.neuron.device_params.items():\n handle.create_dataset(f'neuron/{key}', data=value)", "title": "" }, { "docid": "b39b900a9234805bbfc323ef82176e67", "score": "0.5626131", "text": "def to_hdf(self, hdf=None, group_name=None):\n super(RandSpg, self).to_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n self.input.to_hdf(hdf5_input)", "title": "" }, { "docid": "9aed1bc782570d2458a7f258f0bfbd5b", "score": "0.5624918", "text": "def export(self) -> bytes:\n raise NotImplementedError()", "title": "" }, { "docid": "35abe3cf5f48e637906d4dfe583d520c", "score": "0.5620825", "text": "def write_hdf5(self, filepath):\n gridlines = self.get_gridlines()\n write_grid_hdf5(filepath, 'vertex', *gridlines)", "title": "" }, { "docid": "e2cdc164ea691bc96b68c64ab89ba5e5", "score": "0.56072533", "text": "def save_to_hdf5(*args, **kwargs):\n return _GWMetaFile.save_to_hdf5(*args, _class=_TGRMetaFile, **kwargs)", "title": "" }, { "docid": "87d90512f59c9d720b3d1c6cfdb9eda8", "score": "0.55859685", "text": "def export(self, file_obj):\n file_obj.write(f'{NL}')\n file_obj.write(f'{self.__HEADER__}{NL}')\n file_obj.write(f'size={self.size}{NL}')\n file_obj.write(f'stride={self.stride}{NL}')\n file_obj.write(f'padding={self.padding}{NL}')\n file_obj.write(NL)", "title": "" }, { "docid": "b11cc6b78c47fabe3201f4fec30fe1c9", "score": "0.558212", "text": "def save(self, hdf5):\n if isinstance(hdf5, str):\n hdf5 = HDF5File(hdf5, \"w\")\n hdf5.attrs[\"file_version\"] = \"1.0\"\n hdf5.attrs[\"writer_class\"] = str(self.__class__)\n hdf5[\"n_gaussians\"] = self.n_gaussians\n hdf5[\"n_features\"] = self.n_features\n hdf5[\"log_likelihood\"] = float(self.log_likelihood)\n hdf5[\"T\"] = int(self.t)\n hdf5[\"n\"] = np.array(self.n)\n hdf5[\"sumPx\"] = np.array(self.sum_px)\n hdf5[\"sumPxx\"] = np.array(self.sum_pxx)", "title": "" }, { "docid": "8970ef2cf2270e8d0d4753e84a9179b0", "score": "0.5578731", "text": "def _save_sample_to_h5(filename, idx, vol_data, vol_labels, points, e_r, e_theta, e_phi):\n with h5py.File(filename, 'a') as hf:\n hf['vol_data'][idx] = vol_data\n hf['vol_labels'][idx] = vol_labels\n hf['points'][idx] = points.flatten()\n hf['e_r'][idx] = e_r.flatten()\n hf['e_theta'][idx] = e_theta.flatten()\n hf['e_phi'][idx] = e_phi.flatten()", "title": "" }, { "docid": "f62f013f4731d7a7f39b50d0bcf67e9e", "score": "0.55752623", "text": "def export_multiple_hdf5(CEs, out_folder):\n for CE in CEs:\n if CE.input_file_name == None:\n raise Exception(\"This function only works when count estimator were formed from files (i.e. CE.input_filename != None\")\n\n for CE in CEs:\n CE.export(os.path.join(out_folder, os.path.basename(CE.input_file_name) + \".CE.h5\"))\n\n return", "title": "" }, { "docid": "480f1bfa8cfc6d208d74cb2370bcf87d", "score": "0.55728954", "text": "def to_hdf(self, hdf, group_name=None):\n if group_name is not None:\n hdf5_server = hdf.open(group_name)\n else:\n hdf5_server = hdf\n\n hdf5_server[\"TYPE\"] = str(type(self))\n hdf5_server[\"possiblevertexstates\"] = self.possible_vertex_states\n hdf5_server[\"vertexstate\"] = self.vertex_state\n hdf5_server[\"vertexname\"] = self.vertex_name\n hdf5_server[\"nhistory\"] = self.n_history\n self.input.to_hdf(hdf=hdf5_server, group_name=\"input\")\n self.output.to_hdf(hdf=hdf5_server, group_name=\"output\")\n self.archive.to_hdf(hdf=hdf5_server, group_name=\"archive\")", "title": "" }, { "docid": "8e53682428b9652a3e0e7a9473ace7e5", "score": "0.55637497", "text": "def Write(self):\n if os.path.exists(self.path):\n print(\"The file %s already exists.\" % self.path)\n check = input(\"Do you want to overwrite this file? [y, n] \")\n if check == 'y':\n os.remove(self.path)\n else:\n print(\"File is not saved\")\n return\n f = h5.File(self.path, 'a')\n if len(self.scans):\n scans = f.create_group('Scans')\n for i, scan in enumerate(self.scans):\n hdf5_scan(scans, scan, i)\n\n if len(self.force_curves):\n curves = f.create_group('ForceCurves')\n for i, fc in enumerate(self.force_curves):\n hdf5_curve(curves, fc, i)\n\n if len(self.images):\n imgs = f.create_group('Images')\n for i, img in enumerate(self.images):\n name = 'image%s' % str(i).zfill(2)\n hdf5_image(imgs, img, name)\n\n f.flush()\n f.close()", "title": "" }, { "docid": "d8435cf4e04670c65308d7030dea1c41", "score": "0.55119187", "text": "def export(self, file_obj):\n file_obj.write(f'{NL}')\n file_obj.write(f'{self.__HEADER__}{NL}')\n file_obj.write(f'probability={self.dropout_prob}{NL}')\n file_obj.write(NL)", "title": "" }, { "docid": "9a67d15264324bea2cdec56ff25b1a6e", "score": "0.55112594", "text": "def export(self, file_obj):\n file_obj.write(f'{NL}')\n file_obj.write(f'{self.__HEADER__}{NL}')\n if self.batch_normalize:\n file_obj.write(f'batch_normalize={int(self.batch_normalize)}{NL}')\n file_obj.write(f'size={self.size}{NL}')\n file_obj.write(f'stride={self.stride}{NL}')\n file_obj.write(f'pad={self.pad}{NL}')\n file_obj.write(f'filters={self.filters}{NL}')\n file_obj.write(f'activation={self.activation}{NL}')\n file_obj.write(NL)", "title": "" }, { "docid": "bd51aa45e0ccd8cada3b7489e3bd78e8", "score": "0.5490735", "text": "def save(self, hdf5):\n if isinstance(hdf5, str):\n hdf5 = HDF5File(hdf5, \"w\")\n hdf5.attrs[\"file_version\"] = \"1.0\"\n hdf5.attrs[\"writer_class\"] = str(self.__class__)\n hdf5[\"n_gaussians\"] = self.n_gaussians\n hdf5[\"trainer\"] = self.trainer\n hdf5[\"convergence_threshold\"] = self.convergence_threshold\n hdf5[\"max_fitting_steps\"] = self.max_fitting_steps\n hdf5[\"weights\"] = self.weights\n hdf5[\"update_means\"] = self.update_means\n hdf5[\"update_variances\"] = self.update_variances\n hdf5[\"update_weights\"] = self.update_weights\n gaussians_group = hdf5.create_group(\"gaussians\")\n gaussians_group[\"means\"] = self.means\n gaussians_group[\"variances\"] = self.variances\n gaussians_group[\"variance_thresholds\"] = self.variance_thresholds", "title": "" }, { "docid": "b276140a7092d991bc78eea979c76e54", "score": "0.54846096", "text": "def to_hdf(self, hdf=None, group_name=None):\n if hdf is None:\n hdf = self.project_hdf5\n super(CompoundVertex, self).to_hdf(hdf=hdf, group_name=group_name)", "title": "" }, { "docid": "6e1e80cc74a97d4637223271cd423e51", "score": "0.5483706", "text": "def write_block_hdr(self, f):\n hh = np.float64(self.hh)\n hv = np.float64(self.hv)\n z0 = np.float64(self.z0)\n nc = np.int32(self.nc)\n ni = np.int32(self.ni)\n nj = np.int32(self.nj)\n nk = np.int32(self.nk)\n \n block_hdr = [hh,hv,z0,nc,ni,nj,nk]\n for val in block_hdr:\n f.write(val)\n return", "title": "" }, { "docid": "76f4a0713e554c955fec22475db89aae", "score": "0.5473676", "text": "def export(self) -> bytes:\n self._update()\n data = self.ivt.export()\n data += self.bdt.export()\n if self.dcd:\n data += self.dcd.export()\n data += self.app.export()\n data += self.csf.export()\n return data", "title": "" }, { "docid": "76f4a0713e554c955fec22475db89aae", "score": "0.5473676", "text": "def export(self) -> bytes:\n self._update()\n data = self.ivt.export()\n data += self.bdt.export()\n if self.dcd:\n data += self.dcd.export()\n data += self.app.export()\n data += self.csf.export()\n return data", "title": "" }, { "docid": "e3397339f1df089ee633836529e14122", "score": "0.54395705", "text": "def export_hdf5(self, handle):\n def weight(s):\n return s.pre_hook_fx(\n s.weight, descale=True\n ).reshape(s.weight.shape[:2]).cpu().data.numpy()\n\n def delay(d):\n return torch.floor(d.delay).flatten().cpu().data.numpy()\n\n # dense descriptors\n handle.create_dataset(\n 'type', (1, ), 'S10', ['dense'.encode('ascii', 'ignore')]\n )\n handle.create_dataset('shape', data=np.array(self.neuron.shape))\n handle.create_dataset('inFeatures', data=self.synapse.in_channels)\n handle.create_dataset('outFeatures', data=self.synapse.out_channels)\n\n if self.synapse.weight_norm_enabled:\n self.synapse.disable_weight_norm()\n if hasattr(self.synapse, 'imag'): # complex synapse\n handle.create_dataset(\n 'weight/real',\n data=weight(self.synapse.real)\n )\n handle.create_dataset(\n 'weight/imag',\n data=weight(self.synapse.imag)\n )\n else:\n handle.create_dataset('weight', data=weight(self.synapse))\n\n # bias\n has_norm = False\n if hasattr(self.neuron, 'norm'):\n if self.neuron.norm is not None:\n has_norm = True\n if has_norm is True:\n handle.create_dataset(\n 'bias',\n data=self.neuron.norm.bias.cpu().data.numpy().flatten()\n )\n\n # delay\n if self.delay is not None:\n handle.create_dataset('delay', data=delay(self.delay))\n\n # neuron\n for key, value in self.neuron.device_params.items():\n handle.create_dataset(f'neuron/{key}', data=value)\n if has_norm is True:\n if hasattr(self.neuron.norm, 'weight_exp'):\n handle.create_dataset(\n 'neuron/weight_exp',\n data=self.neuron.norm.weight_exp\n )", "title": "" }, { "docid": "cf73067390006a713edb92f660b23555", "score": "0.54159707", "text": "def h5_data(request):\n dummy_file = h5.File('test_load_builtins.hdf5','w')\n dummy_file = h5.File('load_numpy_{}.hdf5'.format(request.function.__name__),'w')\n filename = dummy_file.filename\n test_data = dummy_file.create_group(\"root_group\")\n yield test_data\n dummy_file.close()", "title": "" }, { "docid": "83504a8ac1c3a854e2830a6d2a1d466b", "score": "0.54139847", "text": "def save(self):\n path = self.outfile\n if not path.endswith('.hdf5'):\n path += '.hdf5'\n with h5py.File(path, 'w') as hdf:\n # Store the original configuration data in json form\n config_group = hdf.create_group('config')\n config_group.attrs.update({\n 'outfile': self.outfile,\n 'copies': self.copies\n })\n if isinstance(self.initial, dict):\n initial_group = config_group.create_group('initial')\n initial_group.attrs.update(self.initial)\n else:\n config_group.attrs['initial'] = self.initial\n material_group = config_group.create_group('material')\n if self.material is not None:\n material_group.attrs.update(self.material)\n # Store each EnsembleTask as its own dataset\n ensemble_group = hdf.create_group('ensembles')\n for ensemble_task in self.ensemble_tasks:\n if ensemble_task.result is None:\n # TODO: the fact that this sometimes happens suggests\n # something is horribly wrong\n continue\n dataset = ensemble_group.create_dataset(\n str(ensemble_task.task_id),\n ensemble_task.result.shape,\n dtype=ensemble_task.result.dtype\n )\n # Store data\n dataset[:] = ensemble_task.result[:]\n # Store attributes\n dataset.attrs['task_id'] = ensemble_task.task_id\n dataset.attrs.update(ensemble_task.params)\n dataset.attrs.update(ensemble_task.material.info())\n # Store data for the first initial particle\n particle = ensemble_task.initial[0].initial_state[0]\n # TODO: store better event data\n dataset.attrs.update({\n 'shortname': particle.shortname,\n 'energy': particle.energy,\n 'momentum': particle.momentum,\n 'cos_theta': particle.cos_theta,\n })\n if self.statistics is not None:\n for i, stat in enumerate(self.statistics):\n dataset.attrs['s%d' % i] = stat", "title": "" }, { "docid": "444a3e8511bbebe59b5de58025227963", "score": "0.53963566", "text": "def save_to_h5(self, file):\n with self.file_lock:\n if str(self.idx) in file:\n del(file[str(self.idx)])\n grp = file.create_group(str(self.idx))\n lst_attr = [\n \"map_raw\", \"mask\", \"map_norm\", \"map_q\", \"xyz\", \"tcr\", \"qchi\",\n \"scan_info\", \"ai_args\"\n ]\n pawstools.attributes_to_h5(self, grp, lst_attr)\n grp.create_group('int_1d')\n pawstools.attributes_to_h5(self.int_1d, grp['int_1d'])\n grp.create_group('int_2d')\n pawstools.attributes_to_h5(self.int_2d, grp['int_2d'])\n grp.create_group('poni')\n pawstools.dict_to_h5(self.poni.to_dict(), grp['poni'])", "title": "" }, { "docid": "584cc924a604c4550cec30030baa743e", "score": "0.53915465", "text": "def write_hdf5(self, file_name = None, mode = 'a'):\n\n if self.uuid is None: self.uuid = bu.new_uuid()\n h5_reg = rwh5.H5Register(self.model)\n h5_reg.register_dataset(self.uuid, 'points_patch0', self.coordinates)\n h5_reg.write(file_name, mode = mode)", "title": "" }, { "docid": "669973590464486fd744a644193f8cdb", "score": "0.5379617", "text": "def export(self) -> bytes:\n self._update()\n data = bytes()\n data += self.ivt[0].export()\n data += self.ivt[1].export()\n data += self.bdt[0].export()\n data += self.bdt[1].export()\n if self.dcd:\n data += self.dcd.export()\n data += self.csf.export()\n data += bytes([self.PADDING_VAL] * self._compute_padding(len(data), self.APP_ALIGN - self.offset))\n\n for container in range(self.COUNT_OF_CONTAINERS):\n for image in range(self.bdt[container].images_count):\n data += self.app[container][image].export()\n\n return data", "title": "" }, { "docid": "601dc2641a4dc71b6e0fc9b9b415bce7", "score": "0.53776956", "text": "def export(self, filename, type):\n raise NotImplementedError()", "title": "" }, { "docid": "92a5e33680f9e80fe4215d9dd2b1525d", "score": "0.53645635", "text": "def export(self, filename): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "5397fba22e8e72f34c0d51efc8a5173c", "score": "0.5357771", "text": "def write_hdf5(self,file_name,array,ti,t_len,t_dim_out,dataset_name):\n with h5py_cache.File(self.temp_path + file_name + \".hdf5\",\"r+\",chunk_cache_mem_size=self.chunk_cache_mem_size) as h5pyfile: \n indices = list(range(ti,min(ti+self.t_chunk,t_len)))\n self.reassign_idx(h5pyfile[dataset_name],array,indices,t_dim_out)", "title": "" }, { "docid": "651641c4500d9a5b11ad65bd2d12988b", "score": "0.53499836", "text": "def export(self):\n self.bme280.export()\n self.ccs811.export()", "title": "" }, { "docid": "fbac169c2b86f9fe4415042d706e091a", "score": "0.533923", "text": "def ExportSmoke(self):", "title": "" }, { "docid": "2eed0a4e73c2ff0f7ebc59d00796b046", "score": "0.5338209", "text": "def Save(self):\n self._h5_data.flush()\n self._h5_data.close()", "title": "" }, { "docid": "d9d5857c453f2a616d4b6804f35b0142", "score": "0.53336847", "text": "def _init_h5file(self):\n self.h5file = h5py.File(self.outfile, 'w')\n self._dsdata = self.h5file.create_dataset('data', shape=(0,), maxshape=(None,), chunks=self._chunksize, dtype='f4')\n self._dsrowi = self.h5file.create_dataset('i', shape=(0,), maxshape=(None,), chunks=self._chunksize, dtype='u4')\n self._dscoli = self.h5file.create_dataset('j', shape=(0,), maxshape=(None,), chunks=self._chunksize, dtype='u4')\n self._dsnrows = self.h5file.create_dataset('nrows', shape=(), dtype='u4')\n self._dsncols = self.h5file.create_dataset('ncols', shape=(), dtype='u4')\n self.initialized = True", "title": "" }, { "docid": "73c2769d1be3621dfa1b29c4dd60f540", "score": "0.533127", "text": "def make_fake_h5_chunks(td):\n\n features = ['windspeed_100m', 'winddirection_100m']\n model_meta_data = {'foo': 'bar'}\n shape = (50, 50, 96, 1)\n ws_true = np.random.uniform(0, 20, shape)\n wd_true = np.random.uniform(0, 360, shape)\n data = np.concatenate((ws_true, wd_true), axis=3)\n lat = np.linspace(90, 0, 10)\n lon = np.linspace(-180, 0, 10)\n lon, lat = np.meshgrid(lon, lat)\n low_res_lat_lon = np.dstack((lat, lon))\n\n gids = np.arange(np.product(shape[:2]))\n gids = gids.reshape(shape[:2])\n\n low_res_times = pd_date_range(\n '20220101', '20220103', freq='3600s', inclusive='left'\n )\n\n t_slices_lr = [slice(0, 24), slice(24, None)]\n t_slices_hr = [slice(0, 48), slice(48, None)]\n\n s_slices_lr = [slice(0, 5), slice(5, 10)]\n s_slices_hr = [slice(0, 25), slice(25, 50)]\n\n out_pattern = os.path.join(td, 'fp_out_{t}_{i}_{j}.h5')\n out_files = []\n for t, (slice_lr, slice_hr) in enumerate(zip(t_slices_lr, t_slices_hr)):\n for i, (s1_lr, s1_hr) in enumerate(zip(s_slices_lr, s_slices_hr)):\n for j, (s2_lr, s2_hr) in enumerate(zip(s_slices_lr, s_slices_hr)):\n out_file = out_pattern.format(\n t=str(t).zfill(3),\n i=str(i).zfill(3),\n j=str(j).zfill(3),\n )\n out_files.append(out_file)\n OutputHandlerH5.write_output(\n data[s1_hr, s2_hr, slice_hr, :],\n features,\n low_res_lat_lon[s1_lr, s2_lr],\n low_res_times[slice_lr],\n out_file,\n meta_data=model_meta_data,\n max_workers=1,\n gids=gids[s1_hr, s2_hr],\n )\n\n out = (\n out_files,\n data,\n ws_true,\n wd_true,\n features,\n t_slices_lr,\n t_slices_hr,\n s_slices_lr,\n s_slices_hr,\n low_res_lat_lon,\n low_res_times,\n )\n\n return out", "title": "" }, { "docid": "78eb8f9a1623527b92d9bd9fe8cb35fd", "score": "0.5331078", "text": "def export_route_v5(self, route_id, output_format='csv', all_custom_fields=True, columns=None):\n params = {\n 'api_key': self.params['api_key'],\n }\n data = {'all_custom_fields': all_custom_fields, 'format': output_format}\n if columns is not None:\n data[\"columns\"] = columns\n self.response = self.api._make_request(EXPORTER_V5.format(route_id=route_id), params, data,\n self.api._request_post)\n return self.response.content", "title": "" }, { "docid": "c6c7868ff318cb5a2be0238ff6f821d9", "score": "0.5297244", "text": "def to_hdf(self, hdf, group_name=None):\n super(CompoundVertex, self).to_hdf(hdf=hdf, group_name=group_name)\n self.graph.to_hdf(hdf=hdf, group_name=\"graph\")", "title": "" }, { "docid": "f1d11d57d2f970e94c2ae42b3f43f9ae", "score": "0.52827436", "text": "def export(self, path=None, verbose=None):\n\n self.log.info('Exporting results.')\n\n # Determine export destination.\n path = path or g.Cluster().public_dir\n export_path = path + self.paths['export']\n\n with h5py.File(export_path, 'w') as f:\n # save injected h\n f.create_dataset('hinj', data=self.hinj)\n # save recovered h and s\n for m in self.search_methods:\n grp = f.create_group(m)\n grp.create_dataset('hrec', data=self.hrec[m])\n grp.create_dataset('srec', data=self.srec[m])\n # grp.create_dataset('arec', data=self.arec[m])\n self.log.info('Results exported: ' + export_path)", "title": "" }, { "docid": "bfb32e507a213b03db1daf48e543b907", "score": "0.52794844", "text": "def export_to_h5(checkpoint_dir, export_path, images, end_points, num_samples,\n batch_size, sact):\n output_file = h5py.File(export_path, 'w')\n\n output_file.attrs['block_scopes'] = end_points['block_scopes']\n keys_to_tensors = {}\n for block_scope in end_points['block_scopes']:\n for k in ('{}/ponder_cost'.format(block_scope),\n '{}/num_units'.format(block_scope),\n '{}/halting_distribution'.format(block_scope),\n '{}/flops'.format(block_scope)):\n keys_to_tensors[k] = end_points[k]\n keys_to_tensors['images'] = images\n keys_to_tensors['flops'] = end_points['flops']\n\n if sact:\n keys_to_tensors['ponder_cost_map'] = sact_map(end_points, 'ponder_cost')\n keys_to_tensors['num_units_map'] = sact_map(end_points, 'num_units')\n\n keys_to_datasets = {}\n for key, tensor in keys_to_tensors.iteritems():\n sh = tensor.get_shape().as_list()\n sh[0] = num_samples\n print(key, sh)\n keys_to_datasets[key] = output_file.create_dataset(\n key, sh, compression='lzf')\n\n variables_to_restore = slim.get_model_variables()\n checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)\n assert checkpoint_path is not None\n init_fn = slim.assign_from_checkpoint_fn(checkpoint_path,\n variables_to_restore)\n\n sv = tf.train.Supervisor(\n graph=tf.get_default_graph(),\n logdir=None,\n summary_op=None,\n summary_writer=None,\n global_step=None,\n saver=None)\n\n assert num_samples % batch_size == 0\n num_batches = num_samples // batch_size\n\n with sv.managed_session('', start_standard_services=False) as sess:\n init_fn(sess)\n sv.start_queue_runners(sess)\n\n for i in range(num_batches):\n tf.logging.info('Evaluating batch %d/%d', i + 1, num_batches)\n end_points_out = sess.run(keys_to_tensors)\n for key, dataset in keys_to_datasets.iteritems():\n dataset[i * batch_size:(i + 1) * batch_size, ...] = end_points_out[key]", "title": "" }, { "docid": "8ba8b4043edc9db4e560a76a1e578ecc", "score": "0.52502644", "text": "def save_block(self, block, overwrite=True):\n\n # Store None values as strings\n or_none = lambda val: val if val is not None else \"none\"\n\n # Store all annotations, as well as top-level attributes as attributes on the group for the block\n with h5py.File(self.filename, mode=\"a\") as hf:\n # File is structured /bird_name\n group_name = self._group_name(block)\n g = self._create_group_recursive(hf, group_name, overwrite)\n\n g.attrs[\"name\"] = block.name\n g.attrs[\"date\"] = block.date.strftime(\"%d%m%Y\")\n g.attrs[\"start\"] = block.start.strftime(\"%H%M%S\")\n for key, val in block.annotations.items():\n g.attrs[key] = or_none(val)\n\n # Store the data using pandas built-in to_hdf method\n block.data.to_hdf(self.filename, group_name + \"/data\")\n\n # Records of which data is where in the file are kept in a table called \"values\" at the root of the file\n # Load that table if it exists\n try:\n values = pd.read_hdf(self.filename, \"/values\")\n except KeyError:\n values = None\n\n # Add the table entry if it doesn't yet exist\n if (values is None) or (str(group_name) not in values[\"Path\"].values):\n df = pd.DataFrame({\"Name\": block.name,\n \"Timestamp\": pd.Timestamp(dt.combine(block.date, block.start)),\n \"Path\": str(group_name)},\n index=[0])\n df = df.set_index(\"Timestamp\")\n df.to_hdf(self.filename, \"/values\", format=\"table\", append=True)\n\n return True", "title": "" }, { "docid": "996210bb75b9f9ec14f159c88912b5c3", "score": "0.52384263", "text": "def write(self, group_object):\n logging.debug(\"Writing\" + \" \" + str(self))\n\n self.write_simple_attributes(group_object)\n\n # iterate through all the key/values in _attributes and write to hdf5\n # if a value is numpy.array then convert to h5py.Dataset\n # if a value is a S1xxObject instance then create a group and call it's write function\n # if a value is a S1xxWritesGroupObjects instance then let it create the group and tell it to write into the current group_object\n for key, val in self._attributes.items():\n if isinstance(val, s1xx_sequence_types): # this looks inside the typing.Union to see what arrays should be treated like this\n logging.debug(key + \" array: \" + str(val.shape))\n # convert any strings to bytes or h5py will fail to write the unicode\n # converted_vals = [v if not isinstance(v, str) else v.encode(\"utf-8\") for v in val]\n converted_vals = [v if not isinstance(v, bytes) else v.decode() for v in val]\n try:\n del group_object[key]\n except KeyError:\n pass # didn't exist, no error\n # We are only supporting single column data, check the dtype that the convert function found\n revised_vals = convert_numpy_strings_to_h5py([converted_vals])\n # Now re-package so it shows up as a single column in hdf5 - not sure why the revised_vals array would have\n # to be accessed with two dimensions otherwise -- like val[0][0] instead of val[0]\n revised_2 = numpy.array(converted_vals, dtype=revised_vals.dtype[0])\n try:\n new_dataset = group_object.create_dataset(key, data=revised_2)\n except Exception as e:\n raise e\n elif isinstance(val, S1xxWritesGroupObjects):\n # things that either create a dataset and have to combine data into it or make multiple sub groups that the parent can't predict\n logging.debug(\"{} S100 object - writing itself now...\".format(key))\n val.write(group_object)\n elif isinstance(val, S1xxObject):\n logging.debug(key + \" S100 object - writing itself now...\")\n new_group = group_object.require_group(key)\n val.write(new_group)", "title": "" }, { "docid": "ed4bd7f7799c9fefd6dbe7cbe8e96766", "score": "0.52311367", "text": "def export(self, export_file_name):\n fid = h5py.File(export_file_name, 'w')\n grp = fid.create_group(\"CountEstimator\")\n mins_data = grp.create_dataset(\"mins\", data=self._mins)\n counts_data = grp.create_dataset(\"counts\", data=self._counts)\n if self._kmers:\n kmer_data = grp.create_dataset(\"kmers\", data=[np.string_(kmer) for kmer in self._kmers])\n\n grp.attrs['class'] = np.string_(\"CountEstimator\")\n grp.attrs['filename'] = np.string_(self.input_file_name)\n grp.attrs['ksize'] = self.ksize\n grp.attrs['prime'] = self.p\n grp.attrs['true_num_kmers'] = self._true_num_kmers\n fid.close()", "title": "" }, { "docid": "ad2aed05e531202326b6760a88a3aefe", "score": "0.5228419", "text": "def create_export(self, context, volume, connector):\n pass", "title": "" }, { "docid": "1c1fc26b2481b3d9d460aec363372d56", "score": "0.5220691", "text": "def export_hdf5(self, handle):\n def weight(s):\n return s.pre_hook_fx(\n s.weight, descale=True\n ).reshape(s.weight.shape[:-1]).cpu().data.numpy()\n\n def delay(d):\n return torch.floor(d.delay).flatten().cpu().data.numpy()\n\n # descriptors\n handle.create_dataset(\n 'type', (1, ), 'S10', ['conv'.encode('ascii', 'ignore')]\n )\n handle.create_dataset('shape', data=np.array(self.neuron.shape))\n handle.create_dataset('inChannels', data=self.synapse.in_channels)\n handle.create_dataset('outChannels', data=self.synapse.out_channels)\n handle.create_dataset('kernelSize', data=self.synapse.kernel_size[:-1])\n handle.create_dataset('stride', data=self.synapse.stride[:-1])\n handle.create_dataset('padding', data=self.synapse.padding[:-1])\n handle.create_dataset('dilation', data=self.synapse.dilation[:-1])\n handle.create_dataset('groups', data=self.synapse.groups)\n\n # weights\n if self.synapse.weight_norm_enabled:\n self.synapse.disable_weight_norm()\n if hasattr(self.synapse, 'imag'): # complex synapse\n handle.create_dataset(\n 'weight/real',\n data=weight(self.synapse.real)\n )\n handle.create_dataset(\n 'weight/imag',\n data=weight(self.synapse.imag)\n )\n else:\n handle.create_dataset('weight', data=weight(self.synapse))\n\n # bias\n has_norm = False\n if hasattr(self.neuron, 'norm'):\n if self.neuron.norm is not None:\n has_norm = True\n if has_norm is True:\n handle.create_dataset(\n 'bias',\n data=self.neuron.norm.bias.cpu().data.numpy().flatten()\n )\n\n # delay\n if self.delay is not None:\n handle.create_dataset('delay', data=delay(self.delay))\n\n # neuron\n for key, value in self.neuron.device_params.items():\n handle.create_dataset(f'neuron/{key}', data=value)\n if has_norm is True:\n if hasattr(self.neuron.norm, 'weight_exp'):\n handle.create_dataset(\n 'neuron/weight_exp',\n data=self.neuron.norm.weight_exp\n )", "title": "" }, { "docid": "609fab376bccf49defbfabda912d1f4c", "score": "0.52091414", "text": "def serializeToHdf5(self, hdf5File, projectFilePath):\n # Check the overall file version\n ilastikVersion = hdf5File[\"ilastikVersion\"].value\n\n # Make sure we can find our way around the project tree\n if not VersionManager.isProjectFileVersionCompatible(ilastikVersion):\n return\n\n self.progressSignal.emit(0)\n \n topGroup = self.getOrCreateGroup(hdf5File, self.topGroupName)\n \n # Set the version\n if 'StorageVersion' not in topGroup.keys():\n topGroup.create_dataset('StorageVersion', data=self._version)\n else:\n topGroup['StorageVersion'][()] = self._version\n\n try:\n # Call the subclass to do the actual work\n self._serializeToHdf5(topGroup, hdf5File, projectFilePath)\n finally:\n self.progressSignal.emit(100)", "title": "" }, { "docid": "8542a21f79fae4d07b43dd121af3b3c1", "score": "0.52041376", "text": "def save_plotdata(self):\n #Combine all the data into a single dictionary\n comb_dict = {'Obs':self.gmst,'Anthrop':self.paleo_data,'CMIP5':self.cmip5_data,'AR5-proj':self.ar5_proj}\n \n #Write the data to a .h5 file\n now = datetime.datetime.now()\n dd.io.save('./spmfig/output/fig1.1_plotdata_'+now.strftime('%d%m%Y_%H:%M:%S')+'.h5', comb_dict, compression=None)\n \n return", "title": "" }, { "docid": "a9449610c1c959e356e79ff4e8a07bf8", "score": "0.51630265", "text": "def write(self, group_object):\n # @todo - is there a bug here if some instances are missing attributes leading to a mismatched array?\n # First determine the write order of the keys\n logging.debug(\"Writing\" + \" \" + str(self))\n dataset = None\n if len(self) > 0:\n val = self[0]\n write_keys = []\n if val.get_write_order(): # @todo I think bathycoverage and trackingcoverage in the feature information may want to be ordered\n write_keys.extend(val.get_write_order())\n\n # to preserve order of other keys - iterate instead of using set logic\n for key in val._attributes:\n if key not in write_keys:\n write_keys.append(key)\n # write_keys.extend(set(self._attributes.keys()).difference(write_keys))\n write_array = []\n for val in self:\n list_vals = []\n for key in write_keys:\n try:\n v = val._attributes[key]\n except KeyError as key_err:\n raise KeyError(\n \"{} in {} is missing data, this would give a mismatched array \\n please fill all data {} for all items in the list/dataset\".format(\n key_err.args[0], self.metadata_name, str(write_keys)))\n\n if isinstance(v, bytes):\n # no longer doing this--\n # convert unicode strings into ascii since HDF5 doesn't like the unicode strings that numpy will produce\n # v = v.encode(\"utf-8\")\n\n # convert bytes strings to strings which will be encoded as utf8 later\n v = v.decode()\n elif isinstance(v, Enum): # convert Enums to integars\n v = v.value\n list_vals.append(v)\n\n # list_vals = [v if not isinstance(v, str) else v.encode(\"utf-8\") for v in list_vals]\n write_array.append(list_vals)\n\n # hdf5 needs names to the columns which is done in a record array or structured array.\n # but to create that without specifying type we need to transpose first then call 'fromarrays'\n transposed_array = list(map(list, zip(*write_array)))\n if write_keys:\n rec_array_revised = convert_numpy_strings_to_h5py(transposed_array, write_keys)\n else:\n rec_array_revised = h5py.Empty(\"\")\n raise ValueError(self.metadata_name + \" had no data fields defined to write - this would create an h5py.Empty dataset\")\n try:\n del group_object[self.metadata_name]\n except KeyError:\n pass # didn't exist, no error\n dataset = group_object.create_dataset(self.metadata_name, data=rec_array_revised)\n self.write_simple_attributes(dataset)\n return dataset", "title": "" }, { "docid": "3d09b1c17fbfe7035af7642054a23873", "score": "0.51565284", "text": "def write(self, filename, verbose=False ):\n self.filename = filename\n with open(filename, 'wb') as f:\n self.write_hdr(f)\n if verbose:\n print self\n flush\n for b in np.arange(self.nb):\n rblock = self.rblocks[b]\n if b==0 and ( rblock.nc!=1 or rblock.nk!=1):\n raise ValueError('Error: First block must be topography!')\n rblock.write_block_hdr(f)\n if verbose:\n print block\n flush\n for b in np.arange(self.nb): \n z=self.rblocks[b].data.reshape(self.rblocks[b].data.size)\n z.tofile(f)\n return", "title": "" }, { "docid": "211e81234ea65f09fa45b2c40f77b51f", "score": "0.51397437", "text": "def export(self, category, measure_type, data):", "title": "" }, { "docid": "95652f6b37265cc39e75a9b2207bc06c", "score": "0.51397324", "text": "def export(exp_data: ExportData) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "7e9e3f17a31161db98caaf863976cd82", "score": "0.51339686", "text": "def export(self):\n\n # check if the drawing area object and the path are set\n if self._drawing_area != None and self._path != None and self._path != \"\":\n # instantiate an object written to export the content of the drawing area\n exp = drawing_area_export.DrawingAreaExport()\n # assign the properties\n exp.drawing_area = self._drawing_area\n exp.path = self._path\n \n # check the export format and call the exportation method\n if self._format == self.E_PDF:\n exp.export_as_pdf()\n if self._format == self.E_PS:\n exp.export_as_ps()\n if self._format == self.E_SVG:\n exp.export_as_svg()\n if self._format == self.E_PNG:\n exp.export_as_png()\n if self._format == self.E_JPG:\n exp.export_as_jpg()\n\n return True\n return False", "title": "" }, { "docid": "ba59e00b5f26d233452881653a46031e", "score": "0.5130298", "text": "def to_hdf5(\n self,\n path_or_group: Union[str, h5py.File, h5py.Group],\n save_mesh: bool = True,\n ) -> None:\n if isinstance(path_or_group, str):\n path = path_or_group\n if not path.endswith(\".h5\"):\n path = path + \".h5\"\n if os.path.exists(path):\n raise IOError(f\"Path already exists: {path}.\")\n os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)\n save_context = h5py.File(path, \"w-\", libver=\"latest\")\n else:\n h5_group = path_or_group\n save_context = nullcontext(h5_group)\n with save_context as f:\n f.attrs[\"name\"] = self.name\n f.attrs[\"length_units\"] = self.length_units\n self.layer.to_hdf5(f.create_group(\"layer\"))\n self.film.to_hdf5(f.create_group(\"film\"))\n for terminal in self.terminals:\n terminals_grp = f.require_group(\"terminals\")\n terminal.to_hdf5(terminals_grp.create_group(terminal.name))\n if self.probe_points is not None:\n f[\"probe_points\"] = self.probe_points\n for hole in sorted(self.holes, key=attrgetter(\"name\")):\n group = f.require_group(\"holes\")\n hole.to_hdf5(group.create_group(hole.name))\n if save_mesh and self.mesh is not None:\n self.mesh.to_hdf5(f.create_group(\"mesh\"))", "title": "" }, { "docid": "45c4f159702ff8a2cfa3b94a10356998", "score": "0.51227605", "text": "def embed_data(self, h5_file: h5py.File, embedder: EmbeddingModel, save_states: bool = False):\n raise NotImplementedError(\"embed_data function has not been properly overridden by a child class\")", "title": "" }, { "docid": "b392942082589ef936e8549b5436dc36", "score": "0.51221097", "text": "def output_signature(self):\n return _my_lte_swig.phich_grouping_sptr_output_signature(self)", "title": "" }, { "docid": "e390e7aa40309f96b66915dc6d1811cf", "score": "0.51070523", "text": "def export_bands(self, filename, separator):\n\n energies = self.bands_energies\n positions = self.bands_positions\n header = 'Positions' + separator + separator.join(['Band %s' % f for f in range(1, len(energies) + 1)])\n data = np.transpose(np.insert(energies, 0, positions, axis=0))\n np.savetxt(filename, data, header=header, delimiter=separator, comments='')\n print(self.treetitle + ' Bands exported successfully!')", "title": "" }, { "docid": "68ce9c693db45795ba4789f4522ee968", "score": "0.50945544", "text": "def write_h5_file(self, specfile, overwrite=False):\n if os.path.exists(specfile) and not overwrite:\n raise IOError(\"File %s exists and overwrite=False!\" % specfile)\n f = h5py.File(specfile, \"w\")\n f.create_dataset(\"emin\", data=self.ebins[0].value)\n f.create_dataset(\"emax\", data=self.ebins[-1].value)\n f.create_dataset(\"spectrum\", data=self.flux.value)\n f.close()", "title": "" }, { "docid": "70bad878a1faba0e23dcb496885e3844", "score": "0.508384", "text": "def makeBedFile(self,bedout,name,description,column_names):\n # Write the output BED file header\n fo = open(bedout,'w')\n fo.write('track name=\"%s\" description=\"%s\" visibility=pack itemRgb=\"On\"\\n' %\n (name,description))\n # Write data\n for line in self:\n # Check there's data\n if str(line).strip() == '':\n print \"No data items on line %d, ignoring\" % line.lineno()\n continue\n # Write out the line\n fo.write(\"%s\\n\" % str(line.subset(*column_names)))\n # Finished\n fo.close()", "title": "" }, { "docid": "e0382ad8d060d1b560f3b1b6e3759f9d", "score": "0.5075444", "text": "def h5(ctx, h5_file, dsets, group, process_size, max_workers):\n SummarizeH5.run(h5_file, ctx.obj['OUT_DIR'], group=group, dsets=dsets,\n process_size=process_size, max_workers=max_workers)", "title": "" }, { "docid": "2e3d36c0465160bf5749d420233a8745", "score": "0.50657487", "text": "def export(self, raw_data_dir, **kwargs):", "title": "" }, { "docid": "8cd050c5734c463fa005a3e9c8f8552b", "score": "0.5051442", "text": "def export_h5m(\n self,\n filename: Optional[str] = 'dagmc.h5m',\n skip_graveyard: Optional[bool] = False,\n tolerance: Optional[float] = 0.001,\n graveyard_offset: Optional[float] = 100) -> str:\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".h5m\":\n path_filename = path_filename.with_suffix(\".h5m\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n moab_core, moab_tags = define_moab_core_and_tags()\n\n surface_id = 1\n volume_id = 1\n\n for item in self.shapes_and_components:\n\n item.export_stl(item.stl_filename, tolerance=tolerance)\n moab_core = add_stl_to_moab_core(\n moab_core,\n surface_id,\n volume_id,\n item.material_tag,\n moab_tags,\n item.stl_filename)\n volume_id += 1\n surface_id += 1\n\n if skip_graveyard is False:\n self.make_graveyard(graveyard_offset=graveyard_offset)\n self.graveyard.export_stl(self.graveyard.stl_filename)\n volume_id = 2\n surface_id = 2\n moab_core = add_stl_to_moab_core(\n moab_core,\n surface_id,\n volume_id,\n self.graveyard.material_tag,\n moab_tags,\n self.graveyard.stl_filename\n )\n\n all_sets = moab_core.get_entities_by_handle(0)\n\n file_set = moab_core.create_meshset()\n\n moab_core.add_entities(file_set, all_sets)\n\n moab_core.write_file(str(path_filename))\n\n return str(path_filename)", "title": "" }, { "docid": "4e0a634a0dad14699e7f58f012fd9680", "score": "0.50480694", "text": "def export(self, file_obj):\n file_obj.write(f'{NL}')\n file_obj.write(f'{self.__HEADER__}{NL}')\n file_obj.write(f'mask={list_to_str(self.masks, space=False)}{NL}')\n file_obj.write(f'anchors={anchors_to_str(self.anchors)}{NL}')\n file_obj.write(f'classes={self.classes}{NL}')\n file_obj.write(f'num={self.num_anchors}{NL}')\n file_obj.write(f'jitter={self.jitter}{NL}')\n file_obj.write(f'ignore_thresh={self.ignore_thresh}{NL}')\n file_obj.write(f'truth_thresh={self.truth_thresh}{NL}')\n file_obj.write(f'random={self.random}{NL}')", "title": "" }, { "docid": "ec1fea702dca2736ff85d518e2dd5bfe", "score": "0.5047143", "text": "def save(self):\n path = self.outfile\n if not path.endswith('.hdf5'):\n path += '.hdf5'\n with h5py.File(path, 'w') as hdf:\n # Store the original configuration data in json form\n config_group = hdf.create_group('config')\n config_group.attrs.update({\n 'outfile': self.outfile\n })\n material_group = config_group.create_group('material')\n if self.material is not None:\n material_group.attrs.update(self.material)\n # Store each InitialTask as its own dataset\n ensemble_group = hdf.create_group('initials')\n for initial_task in self.initial_tasks:\n try:\n shape = initial_task.result.shape\n dtype = initial_task.result.dtype\n except AttributeError:\n shape = ()\n dtype = None\n dataset = ensemble_group.create_dataset(\n str(initial_task.task_id),\n shape=shape,\n dtype=dtype\n )\n # Store data\n if initial_task.result is not None:\n dataset[:] = initial_task.result[:]\n dataset.attrs['failed'] = False\n else:\n dataset.attrs['failed'] = True\n # Store attributes\n dataset.attrs['task_id'] = initial_task.task_id\n dataset.attrs.update(initial_task.params)\n dataset.attrs.update(initial_task.material.info())\n dataset.attrs.update({\n 'm_med': initial_task.mmed,\n 'm_dm': initial_task.m1,\n 'n_samples': initial_task.n_samples,\n 'cf_sign': initial_task.cf_sign,\n 'vdf': initial_task.vdf\n })\n if initial_task.omega_max is not None:\n dataset.attrs.update({\n 'omega_max': initial_task.omega_max\n })", "title": "" }, { "docid": "c47fad560b057c7e24ffe84bebf084f6", "score": "0.5040003", "text": "def writeToHDF5(self, group):\n group.create_dataset('delays', data=np.array(self.delays, dtype=int))\n group.create_dataset('flights', data=np.array(self.flights, dtype=int))\n group.create_dataset('timeLimits', data=np.array(self.timeLimits, dtype=int))\n group.create_dataset('conflicts', data=np.array(self.conflicts, dtype=int))\n group.attrs['Number of flights'] = len(self.flights)\n group.attrs['Number of conflicts'] = len(self.conflicts)", "title": "" }, { "docid": "76d11d5c7932a662fe1f16c1b4070f6e", "score": "0.5033836", "text": "def export_fits(self, filename):", "title": "" }, { "docid": "24cc70e84534b9c627d6638cc61bdd65", "score": "0.5032314", "text": "def Output(self):\n self.Open()\n self.Header()\n self.Body()\n self.Footer()", "title": "" }, { "docid": "6a622e1d035110c3dca6e93f1c35eb2e", "score": "0.502898", "text": "def save_as_h5_dataset(\n self,\n out_group,\n acq,\n product,\n compression=H5CompressionFilter.LZF,\n filter_opts=None,\n ):\n if filter_opts is None:\n fopts = {}\n else:\n fopts = filter_opts.copy()\n\n fopts[\"chunks\"] = acq.tile_size\n attrs = self.aux_data.copy()\n attrs[\"crs_wkt\"] = self.geobox.crs.ExportToWkt()\n attrs[\"geotransform\"] = self.geobox.transform.to_gdal()\n dname = DatasetName.PQ_FMT.value.format(product=product.value)\n write_h5_image(self.array, dname, out_group, compression, attrs, fopts)", "title": "" }, { "docid": "5868127ac3c710ada9bb9a1e91710c0d", "score": "0.5018919", "text": "def fig5(self, **kwargs):\n model = self._initModel()\n model = self._checkKwargs(model, **kwargs)\n model.unscattered_spectrum = model.loaded_spectra[0]\n model.scattering_medium.setPressure(4)\n model.scattering_medium.scatterer.norm_factor = 1\n model.scattering_medium.scatterer.inelastic_xsect = 0.0038\n model.algorithm_id = 0\n model.algorithms[0] = Algorithm4\n model.scatterSpectrum()\n \n Iy = np.array(model.simulation.I) - np.min(model.simulation.I)\n Ix = model.loaded_spectra[-1].x\n inel_probs = model.simulation.poiss_inel\n \n data5 = []\n for i in range(len(Iy[1:10,0])):\n data5 += [{'x':Ix,\n 'y':Iy[i,:] * inel_probs[i],\n 'color':'',\n 'limit0':0,\n 'limit1':-1,\n 'x_label':'Kinetic Energy [eV]',\n 'y_label':'Intensity [Cts./Sec.]',\n 'title':'',\n 'linestyle':'solid',\n 'markersize':0,\n 'linewidth':1.5,\n 'marker':'o',\n 'text':''}] \n \n data5 += [{'x':Ix,\n 'y':(np.sum(model.simulation.inel, axis=1) \n - np.min(model.simulation.inel)),\n 'color':'grey',\n 'limit0':0,\n 'limit1':-1,\n 'x_label':'Kinetic Energy [eV]',\n 'y_label':'Intensity [Cts./Sec.]',\n 'title':'Scattered portion of test spectrum in 4 mbar He',\n 'linestyle':'dashed',\n 'markersize':0,\n 'linewidth':1.5,\n 'marker':'o',\n 'text':''}] \n \n data5 += [{'x':Ix,\n 'y':(np.array(model.simulated_spectrum.lineshape) \n - np.min(model.simulated_spectrum.lineshape)),\n 'color':'',\n 'limit0':0,\n 'limit1':-1,\n 'x_label':'Kinetic Energy [eV]',\n 'y_label':'Intensity [Cts./Sec.]',\n 'title':'Test spectrum in 4 mbar He',\n 'linestyle':'solid',\n 'markersize':0,\n 'linewidth':1.5,\n 'marker':'o',\n 'text':''}] \n self.data = data5 \n self.draw_fig(self.data, **kwargs)", "title": "" }, { "docid": "c49dd3368ba644074fb3c3740b84da0f", "score": "0.50178045", "text": "def to_hdf5(self, group):\n group.attrs['type'] = np.string_('uncorrelated')\n if self.angle is not None:\n angle_group = group.create_group('angle')\n self.angle.to_hdf5(angle_group)\n\n if self.energy is not None:\n energy_group = group.create_group('energy')\n self.energy.to_hdf5(energy_group)", "title": "" }, { "docid": "3c16f2e14e7b51390a3091a92ccea2dd", "score": "0.5012811", "text": "def _write_header(self, out_file):\n out_file.write(\"# Process data:\\n\")\n for each_key in self.process_data.keys():\n out_file.write(f\"# {each_key.ljust(30)}: {str(self.process_data[each_key])}\\n\")\n out_file.write(\"#\" * 100 + \"\\n\")\n out_file.write(\"# Calibration data:\\n\")\n for each_key in self.calibration_data.keys():\n out_file.write(f\"# {each_key.ljust(30)}: {str(self.calibration_data[each_key])}\\n\")\n out_file.write(\"#\" * 100 + \"\\n\")\n out_file.write(\"# stage data:\\n\")\n stage_data, rmbc_data = self.gather_stage_process_data()\n for each_key in stage_data:\n out_file.write(f\"# {each_key.ljust(30)}: {str(stage_data[each_key])}\\n\")\n out_file.write(\"#\" * 100 + \"\\n\")\n\n out_file.write(\"# rigid body motion compensation:\\n\")\n for each_key in rmbc_data.keys():\n out_file.write(f\"# {each_key.ljust(30)}: {str(rmbc_data[each_key])}\\n\")\n out_file.write(\"#\" * 100 + \"\\n\")\n\n # get input signals\n out_file.write(\"# SIGNALS:\\n\")\n gom_value_elements = self.project.actual_elements.filter('type', 'value_element')\n for gom_value_element in gom_value_elements:\n out_file.write(\n f\"# {str(gom_value_element.get('name')).ljust(30)}: \"\n f\"{str(gom_value_element.get('type')).ljust(20)}: \"\n f\"{str(gom_value_element.get('input_value'))}\\n\")\n out_file.write('#\\n')\n gom_value_elements = self.project.actual_elements.filter('type', 'analog_input')\n for gom_value_element in gom_value_elements:\n out_file.write(\n f\"# {str(gom_value_element.get('name')).ljust(30)}: \"\n f\"{str(gom_value_element.get('type')).ljust(20)}: \"\n f\"{str(gom_value_element.get('dimension'))}\\n\")\n out_file.write(\"#\" * 100 + \"\\n\")", "title": "" }, { "docid": "22a1b34bc784382eb6f53a84b4023948", "score": "0.5010802", "text": "def hdf5_writer_init(fname: str, pipe, econ, approx, solver_cfg=None, version: int=0):\n f = h5py.File(fname, 'w')\n\n f.attrs[\"pipe_json\"] = json.dumps(pipe._asdict())\n f.attrs[\"econ_json\"] = json.dumps(econ._asdict())\n f.attrs[\"approx_json\"] = json.dumps(approx._asdict())\n if solver_cfg:\n f.attrs[\"solver_json\"] = json.dumps(solver_cfg._asdict())\n f.attrs[\"version\"] = version\n\n return f", "title": "" }, { "docid": "32bae2ec6e0ff5779fe509e85eb72162", "score": "0.50078815", "text": "def to_h5(self, h5_path: Union[str, Path], file_ids: Iterable[int] = None):\n if isinstance(h5_path, str):\n h5_path = Path(h5_path)\n\n if not h5_path.parent.exists():\n h5_path.parent.mkdir(parents=True, exist_ok=True)\n\n if not self.padded:\n self.pad()\n\n if self.h5_path:\n try:\n shutil.copy(self.h5_path, h5_path)\n self.h5_path = h5_path\n except OSError:\n logging.exception(\"Error copying existing h5 file %s to %s\", self.h5_path, h5_path)\n return\n\n h5_file = h5py.File(h5_path, \"w\")\n\n keys = [\n \"inputs\",\n \"targets\",\n \"input_lengths\",\n \"target_lengths\",\n \"piece_lengths\",\n \"key_change_replacements\",\n \"target_pitch_type\",\n ]\n for key in keys:\n if hasattr(self, key) and getattr(self, key) is not None:\n h5_file.create_dataset(key, data=np.array(getattr(self, key)), compression=\"gzip\")\n if file_ids is not None:\n h5_file.create_dataset(\"file_ids\", data=np.array(file_ids), compression=\"gzip\")\n h5_file.close()", "title": "" }, { "docid": "d83d458fbe28c44c3e745445271dd8a3", "score": "0.5005937", "text": "def export_python(self, filename):", "title": "" }, { "docid": "1daefc9bc2cac5223b5b5805f97465db", "score": "0.5005553", "text": "def _write_subset(self,path,sub_type,idxs):\n with tqdm(total=10, desc=f\"Writing {sub_type}\") as pbar:\n with h5py.File(path,'a') as file:\n for key in self.attrs.keys():\n file.attrs[key] = self.attrs[key]\n file.attrs['length'] = len(idxs)\n file.attrs['compile_date'] = str(datetime.datetime.now())\n file.attrs['compile_time'] = 0\n file.attrs['type'] = sub_type\n file.create_dataset('angles', data = self.ang_arr[idxs], compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)\n file.create_dataset('positions', data = self.pos_arr[idxs], compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)\n coord_grop = file.create_group('coordinates')\n dm = coord_grop.create_dataset('depthmaps', data = self.depthmap_arr[idxs], compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)\n dm.attrs['depth_scale'] = self.depth_scale\n coord_grop.create_dataset('pointmaps', data = self.pointmap[idxs], compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)\n img_grp = file.create_group('images')\n img_grp.create_dataset('original', data = self.orig_img_arr[idxs], compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)\n img_grp.create_dataset('segmented', data = self.segmented_img_arr[idxs], compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)\n img_grp.create_dataset('rois', data = self.rois[idxs], compression=\"gzip\",compression_opts=self.compression_level)\n img_grp.create_dataset('camera_poses', data = self.camera_poses[idxs], compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)\n path_grp = file.create_group('paths')\n path_grp.create_dataset('jsons', data = np.array(self.jsons[idxs], dtype=h5py.string_dtype()), compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)\n path_grp.create_dataset('depthmaps', data = np.array(self.maps[idxs], dtype=h5py.string_dtype()), compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)\n path_grp.create_dataset('images', data = np.array(self.imgs[idxs],dtype=h5py.string_dtype()), compression=\"gzip\",compression_opts=self.compression_level)\n pbar.update(1)", "title": "" }, { "docid": "e2a43d24e5a63a7682ba4ef211260a80", "score": "0.5003265", "text": "def write_grid_hdf5(filepath, name, *grid):\n labels = ('x', 'y', 'z')\n f = h5py.File(str(filepath), 'w')\n group = f.create_group(name)\n for i, gridline in enumerate(grid):\n group.create_dataset(labels[i], data=gridline)\n f.close()\n return", "title": "" }, { "docid": "26bc1f3e2fdfc8c486c06dd0a85e33b4", "score": "0.49943414", "text": "def abq_export(fn,nodes,elems,eltype,header=\"Exported by stl_examples.py\"):\n fil = file(fn,'w')\n fe_abq.writeHeading(fil,header)\n fe_abq.writeNodes(fil,nodes)\n fe_abq.writeElems(fil,elems,eltype,nofs=1)\n fil.close()\n print \"Abaqus file %s written.\" % fn", "title": "" }, { "docid": "1a986f1a0ba282093f4152489cbffcfe", "score": "0.49868178", "text": "def exportXMLTree(self, simple_bin=False):\n pretty_ident = getPrettyStrFromRsrcType(self.ident)\n\n elem = ET.Element(pretty_ident)\n if len(self.full_name) > 0:\n comment_elem = ET.Comment(\" {:s} \".format(self.full_name))\n elem.append(comment_elem)\n for snum, section in self.sections.items():\n section_elem = ET.SubElement(elem,\"Section\")\n section_elem.set(\"Index\", str(snum))\n\n if self.vi.ftype == LVrsrcontainer.FILE_FMT_TYPE.LLB:\n block_int5 = section.start.int5\n else:\n block_int5 = None\n\n fname_base = self.exportFilesBase(snum, section)\n\n if section.name_obj is not None:\n subelem = ET.SubElement(section_elem,\"NameObject\")\n\n section.name_obj.exportXML(subelem, fname_base)\n\n elif section.name_text is not None:\n section_elem.set(\"Name\", section.name_text.decode(self.vi.textEncoding))\n if block_int5 is not None:\n section_elem.set(\"Int5\", \"0x{:08X}\".format(block_int5))\n\n if not simple_bin:\n # The rest of the data may be set by a block-specific (overloaded) method\n self.exportXMLSection(section_elem, snum, section, fname_base)\n else:\n # Call base function, not the overloaded version for specific block\n # And _really_ use the base Block, not super() - that doesn't guarantee\n # we get raw form, plus Block itself doesn't have this in superclass.\n Block.exportXMLSection(self, section_elem, snum, section, fname_base)\n\n return elem", "title": "" }, { "docid": "b028006cfc48eabdfc1f90987cae6ef0", "score": "0.49850935", "text": "def output_excel(self):\n\n self.logger.info(\"writing excel\")\n self.logger.debug(self)\n\n #client = self.data_bucket.client\n isin = self.data_bucket.shareclass_isin\n date = self.data_bucket.date\n sc_name = self.data_bucket.get_shareclass_infos(\"shareclass_name\")\n\n # open template excel\n template_file_name = 'AO_TPT_V5.0_Template.xlsx'\n output_file_name = f\"AO_TPT_V5.0_{sc_name}_{isin}_{date}.xlsx\"\n template = openpyxl.load_workbook(self.source_dir / template_file_name)\n report = template.get_sheet_by_name('Report')\n rows = dataframe_to_rows(self.report, index=False)\n\n # map dataframe columns to excel columns \n column_map = {}\n for row_idx, row in enumerate(rows):\n if row_idx == 0:\n assert report.max_column == len(row), \"Number of columns in report and template are different.\"\n for col_idx_pd, column_name in enumerate(row):\n for i in range(len(row)):\n if report.cell(row=1, column=i+1).value == column_name:\n #print(column_name, report.cell(row=1, column=i+1).value)\n column_map[col_idx_pd] = i+1\n assert col_idx_pd in column_map.keys(), f\"Missing {column_name} in template\"\n assert report.cell(row=1, column=column_map[col_idx_pd]).value == row[col_idx_pd]\n \n else:\n for col_idx, value in enumerate(row):\n if value == \"nan\":\n report.cell(row=row_idx+1, column=column_map[col_idx], value=\"\")\n report.cell(row=row_idx+1, column=column_map[col_idx]).alignment = Alignment(horizontal='center')\n else:\n report.cell(row=row_idx+1, column=column_map[col_idx], value=value)\n report.cell(row=row_idx+1, column=column_map[col_idx]).alignment = Alignment(horizontal='center')\n \n # fill SCR sheet\n scr_sheet = template.get_sheet_by_name('SCR')\n scr_sheet.cell(row=2, column=2, value=self.data_bucket.get_shareclass_infos(\"shareclass_name\"))\n \n # shareclass' infos\n scr_sheet.cell(row=6, column=3, value=self.data_bucket.date)\n scr_sheet.cell(row=7, column=3, value=self.data_bucket.get_shareclass_infos(\"shareclass_name\"))\n scr_sheet.cell(row=8, column=3, value=self.data_bucket.get_shareclass_infos().name)\n scr_sheet.cell(row=9, column=3, value=self.data_bucket.get_shareclass_infos(\"shareclass_currency\"))\n scr_sheet.cell(row=10, column=3, value=self.data_bucket.get_shareclass_nav(\"shareclass_total_net_asset_sc_ccy\"))\n\n # sub-module detail\n submodules = [\"interest_rate_risk\", \n \"equity_risk\",\n \"property_risk\",\n \"spread_risk\",\n \"currency_risk\"]\n\n for i, submodule in enumerate(submodules):\n weight_capreq = getattr(self.data_bucket.scr_module, f\"compute_{submodule}_submodule\")()\n scr_sheet.cell(row=16 + i, column=3, value=weight_capreq * self.data_bucket.get_shareclass_nav(\"shareclass_total_net_asset_sc_ccy\"))\n scr_sheet.cell(row=16 + i, column=3).number_format = '#,##0.00'\n scr_sheet.cell(row=16 + i, column=4, value=weight_capreq)\n scr_sheet.cell(row=16 + i, column=4).number_format = '#,##0.000 %'\n \n categories = [\"Interest_rate_risk_Up\",\n \"Interest_rate_risk_Down\",\n \"Equity_Risk_Type_1\",\n \"Equity_Risk_Type_2\",\n \"Property\",\n \"Spread_risk_of_bonds\",\n \"Credit_risk_Structured_Products\",\n \"Credit_risk_Derivatives_Up\",\n \"Credit_risk_Derivatives_Down\",\n \"Currency_risk_Up\",\n \"Currency_risk_Down\"]\n\n # risks specific detail\n for i, category in enumerate(categories):\n weight_capreq = getattr(self.data_bucket.scr_module, category)\n print(category)\n print(weight_capreq)\n scr_sheet.cell(row=23 + i, column=3, value=weight_capreq * self.data_bucket.get_shareclass_nav(\"shareclass_total_net_asset_sc_ccy\"))\n scr_sheet.cell(row=23 + i, column=3).number_format = '#,##0.00'\n scr_sheet.cell(row=23 + i, column=4, value=weight_capreq)\n scr_sheet.cell(row=23 + i, column=4).number_format = '#,##0.000 %'\n\n # total SCR market risk\n scr_sheet.cell(row=13, column=3, value= self.data_bucket.scr_module.compute_total_scr_market_risk())\n scr_sheet.cell(row=13, column=3).number_format = '#,##0.000 %'\n # save produced report\n template.save(self.output_dir / output_file_name)", "title": "" }, { "docid": "2f5a8ef28ec7d06f43b315aec7ce29b8", "score": "0.49828202", "text": "def export(self, private=False):\r\n \r\n pass", "title": "" }, { "docid": "84b20a6ff476a53a9fd5095c6e3d288b", "score": "0.49727985", "text": "def save_plotdata(self,type_name='te'):\n #Combine all the data into a single dictionary\n comb_dict = {'years':self.years,'temp_zero':self.temp_zero,'llems_zero':self.llems_zero,'slcp_zero':self.slcp_zero}\n \n #Write the data to a .h5 file\n now = datetime.datetime.now()\n dd.io.save('./spmfig/output/spmfig_plotdata_'+type_name+'_'+now.strftime('%d%m%Y_%H:%M:%S')+'.h5', comb_dict, compression=None)\n \n return", "title": "" }, { "docid": "1ee3245ea652e1e891486d39a50785bb", "score": "0.49683088", "text": "def exportErp(self, extension_file=None, savepath =None, **kwargs ):\r\n \r\n filename = kwargs.pop('filename', None)\r\n if filename is not None : \r\n self.filename =filename\r\n if extension_file is not None : \r\n self.export_fex = extension_file \r\n if savepath is not None :\r\n self.savepath = savepath \r\n \r\n if self.export_fex.find('csv') <0 and self.export_fex.find('xlsx') <0: \r\n self.export_fex ='.csv'\r\n self.export_fex= self.export_fex.replace('.', '')\r\n \r\n erp_time = '{0}_{1}'.format(datetime.datetime.now().date(), \r\n datetime.datetime.now().time())\r\n \r\n # check whether `savepath` and `filename` attributes are set.\r\n for addf in ['savepath', 'filename']: \r\n if not hasattr(self, addf): \r\n setattr(self, addf, None)\r\n \r\n if self.filename is None : \r\n self.filename = 'erpdf-{0}'.format(\r\n erp_time + '.'+ self.export_fex).replace(':','-')\r\n elif self.filename is not None :\r\n self.filename += '.'+ self.export_fex\r\n \r\n # add name into the workbooks\r\n exportdf = self.erpdf.copy() \r\n \r\n exportdf.insert(loc=1, column='name', value =self.fnames )\r\n exportdf.reset_index(inplace =True)\r\n exportdf.insert(loc=0, column='num', value =exportdf['index']+1 )\r\n exportdf.drop(['id', 'index'], axis =1 , inplace=True)\r\n \r\n if self.export_fex =='xlsx':\r\n\r\n # with pd.ExcelWriter(self.filename ) as writer: \r\n # exportdf.to_excel(writer, index=False, sheet_name='data')\r\n \r\n exportdf.to_excel(self.filename , sheet_name='data',\r\n index=False) \r\n \r\n elif self.export_fex =='csv': \r\n \r\n exportdf.to_csv(self.filename, header=True,\r\n index =False)\r\n\r\n if self.savepath is None :\r\n self.savepath = savepath_('_erpData_')\r\n \r\n if self.savepath is not None :\r\n if not os.path.isdir(self.savepath): \r\n self.savepath = savepath_('_erpData_')\r\n try : \r\n shutil.move(os.path.join(os.getcwd(),self.filename) ,\r\n os.path.join(self.savepath , self.filename))\r\n except : \r\n self._logging.debug(\"We don't find any path to save ERP data.\")\r\n else: \r\n print('--> ERP features file <{0}> is well exported to {1}'.\r\n format(self.filename, self.savepath))", "title": "" }, { "docid": "8ddbe3d52185af5fc3cd3efae25c0b03", "score": "0.49634224", "text": "def write_file(self):\n # open file for writing\n f_fbob = open(self.fn_path, 'w')\n\n # write header\n f_fbob.write('{}\\n'.format(self.heading))\n\n # write sections 1 and 2 : NOTE- what about NOPRINT?\n line = '{:10d}'.format(self.nqfb)\n line += '{:10d}'.format(self.nqcfb)\n line += '{:10d}'.format(self.nqtfb)\n line += '{:10d}'.format(self.iufbobsv)\n if self.no_print or 'NOPRINT' in self.options:\n line += '{: >10}'.format('NOPRINT')\n line += '\\n'\n f_fbob.write(line)\n f_fbob.write('{:10e}\\n'.format(self.tomultfb))\n\n # write sections 3-5 looping through observations groups\n c = 0\n for i in range(self.nqfb):\n # while (i < self.nqfb):\n # write section 3\n f_fbob.write('{:10d}{:10d}\\n'.format(self.nqobfb[i],\n self.nqclfb[i]))\n\n # Loop through observation times for the groups\n for j in range(self.nqobfb[i]):\n # write section 4\n line = '{}{:10d}{:10.4g} {:10.4g}\\n'.format(self.obsnam[c],\n self.irefsp[c],\n self.toffset[c],\n self.flwobs[c])\n f_fbob.write(line)\n c += 1 # index variable\n\n # write section 5 - NOTE- need to adjust factor for multiple\n # observations in the same cell\n for j in range(abs(self.nqclfb[i])):\n # set factor to 1.0 for all cells in group\n if self.nqclfb[i] < 0:\n self.factor[i, :] = 1.0\n line = '{:10d}'.format(self.layer[i, j])\n line += '{:10d}'.format(self.row[i, j])\n line += '{:10d}'.format(self.column[i, j])\n line += ' '.format(self.factor[i, j])\n # note is 10f good enough here?\n line += '{:10f}\\n'.format(self.factor[i, j])\n f_fbob.write(line)\n\n f_fbob.close()\n\n #\n # swm: BEGIN hack for writing standard file\n sfname = self.fn_path\n sfname += '_ins'\n\n # write header\n f_ins = open(sfname, 'w')\n f_ins.write('jif @\\n')\n f_ins.write('StandardFile 0 1 {}\\n'.format(self.nqtfb))\n for i in range(0, self.nqtfb):\n f_ins.write('{}\\n'.format(self.obsnam[i]))\n\n f_ins.close()\n # swm: END hack for writing standard file\n\n return", "title": "" }, { "docid": "b1d469f5b24e28085d9fd23b139942a0", "score": "0.4960736", "text": "def export_csv(self, request, *args, **kwargs):\n resource = HouseHoldSizeResource()\n queryset = HouseHoldSize.objects.filter(is_active=settings.IS_ACTIVE)\n dataset = resource.export(queryset)\n response = HttpResponse(dataset.csv, content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"household_sizes.csv\"'\n return response", "title": "" }, { "docid": "248e3ac2ffa931220e87c1f28ca46914", "score": "0.49560457", "text": "def export_cube(self):\r\n export.export_cube(self.filename, self.data, self.dataview)", "title": "" }, { "docid": "3e62aed618ff3ce38883a69c9aa81dbe", "score": "0.4936128", "text": "def filename(self) -> str:\n\n return f'{self.name}.h5'", "title": "" } ]
81045398f6bf9043f0bdfafd927600a6
Validates a token with a given secret.
[ { "docid": "64b94e0d0462089561d740ee3f3aa6f1", "score": "0.75841236", "text": "def is_token_valid(token: str, secret: str) -> bool:\n try:\n jwt.decode(\n str.encode(token),\n secret,\n algorithms=[config.JWT_ALG],\n )\n except jwt.exceptions.PyJWTError: # type: ignore\n return False\n\n return True", "title": "" } ]
[ { "docid": "e495750d8ead23368478d2c478c57f9a", "score": "0.66990536", "text": "def valid_token(token, config):\n try:\n decode(token, config)\n except Exception:\n warning_log(\"Invalid token\")\n return True", "title": "" }, { "docid": "2104e0e41d18f133dcc033abd75ab3d4", "score": "0.6507088", "text": "def validate_token(token):\n if token is not None and not hasattr(token, \"key\"):\n raise ValueError(\"Invalid token.\")\n return token", "title": "" }, { "docid": "81ea7f8cae990f40234a7b0d9d17fcea", "score": "0.6438988", "text": "def is_valid_token(request):\n if request.form['token'] == conf.TOKEN:\n return True\n return False", "title": "" }, { "docid": "03f490fdb43f7bc011e1a1bf32eb27d6", "score": "0.6371356", "text": "def validate_auth_token(secret, form_data, user, model):\n try:\n expiry = int(form_data.get('expiry', ''))\n except ValueError:\n return False # TODO better logging\n\n now = int(datetime.datetime.now().timestamp())\n if expiry < now:\n return False\n\n req_auth_token = create_auth_token(\n secret, auth_token_data_from_form(form_data, user, model),\n )\n\n return hmac.compare_digest(req_auth_token,\n form_data.get('auth_token', None))", "title": "" }, { "docid": "8c57a73aa4f875da1fe95231f7040725", "score": "0.6369298", "text": "def verify_auth_token(token, keys):\n return (Fernet(keys.private.encode()).decrypt(token.encode()) ==\n keys.secret.encode())", "title": "" }, { "docid": "7985adcbed2e6821855e2ea0f1580f3f", "score": "0.63633364", "text": "def verify_token(request):\n if request.form.get(\"token\") in config.tokens.values():\n return True\n return False", "title": "" }, { "docid": "f364b57d3541d6bfdac497519a7249bc", "score": "0.6351632", "text": "def validate_token(self, request, consumer, token):\n oauth_server, oauth_request = oauth_provider.utils.initialize_server_request(request)\n oauth_server.verify_request(oauth_request, consumer, token)", "title": "" }, { "docid": "9352d627d38818d68fdc99d00aee9578", "score": "0.633496", "text": "def test_if_token_provided_is_valid(self):\n self.assertTrue(self.scraper.check_valid_token())", "title": "" }, { "docid": "7bb2253d370c2434117ea162767b29b3", "score": "0.6328179", "text": "def validate(self, token, **validation_context):\n return token == self.generate()", "title": "" }, { "docid": "67d66b4b593bd01a12b8b5278ff2f957", "score": "0.62814873", "text": "def validate_token(self, token):\n try:\n username, password = self.__decode(token)\n # call db and check that shit\n except:\n return False", "title": "" }, { "docid": "cb5864c76d7d69fe769d4dc59bc5ea76", "score": "0.61509943", "text": "def verify_token_401(token):\n if not token or not verify_auth_token(app.secret_key, token.encode(encoding=\"utf-8\")):\n flask.abort(401)", "title": "" }, { "docid": "fef2af55d9cb278cc1754d571d63dacf", "score": "0.6123444", "text": "def verify_token(token):\n if token in tokens:\n return tokens[token]", "title": "" }, { "docid": "65fe6be426368085ef3173df0642cbb8", "score": "0.61187184", "text": "def ValidateToken(key, user, token, action='*', max_age=DEFAULT_TIMEOUT_):\n if not token or not user:\n return False\n try:\n (timestamp, digest) = token.split(DELIMITER_)\n except ValueError:\n return False\n expected = GenerateToken(key, user, action, timestamp)\n (_, expected_digest) = expected.split(DELIMITER_)\n now = int(time.time())\n if _Compare(expected_digest, digest) and now < int(timestamp) + max_age:\n return True\n return False", "title": "" }, { "docid": "7160d08d3e00b4246278a2f3d02df8c3", "score": "0.6114311", "text": "def validate_token(parsed_token, debug):\r\n if not parsed_token:\r\n dbg_print(\"Empty token!\", debug=debug)\r\n return False\r\n return True", "title": "" }, { "docid": "2280b114301692393d3685d43a9ec83f", "score": "0.6106333", "text": "def validate_token(self, token, scopes_required=None):\n valid = self._validate_token(token, scopes_required)\n if valid is True:\n return True\n else:\n return ErrStr(valid)", "title": "" }, { "docid": "3ee8e3755513f32301b0cdf50e15244f", "score": "0.6085364", "text": "def token_is_valid(token):\n if token is None:\n return False\n if isinstance(token, UUID):\n return True\n try:\n UUID(token)\n except ValueError:\n return False\n return True", "title": "" }, { "docid": "39480e23f79b44779052775d67e66330", "score": "0.60710996", "text": "def checkToken():", "title": "" }, { "docid": "136de14b10b98ea9c3f60738960192d6", "score": "0.60500795", "text": "def is_valid_token(token):\n auth = Auth(token)\n\n try:\n # Make request\n result = auth.test()\n except Error as err:\n # Check for auth errors\n report_event(str(err), {})\n return False\n\n # Check for further errors\n if not result.successful:\n report_event('token_invalid', {'result': result.__dict__})\n return False\n\n # Return successful\n return True", "title": "" }, { "docid": "4ace365f02f3281a8692accbbb38945f", "score": "0.6014292", "text": "def handleInvalidToken(self, token: str) -> None:\n dateprint(\"Invalid token: \" + token)\n print(\"\")\n t = dateinput(\"Please enter your bot token: \")\n vt = self.checkToken(t)\n if vt is None:\n return self.handleInvalidToken(t)\n self.setToken(t)", "title": "" }, { "docid": "f8551d7b92cfd547ab46b505cae03624", "score": "0.5945276", "text": "def validate_secret_identifier(namespace):\n from azure.keyvault.key_vault_id import KeyVaultIdentifier\n\n identifier = getattr(namespace, 'secret_identifier', None)\n try:\n # this throws an exception for invalid format of secret identifier\n KeyVaultIdentifier(uri=identifier)\n except Exception as e:\n raise CLIError(\"Received an exception while validating the format of secret identifier.\\n{0}\".format(str(e)))", "title": "" }, { "docid": "93d5b27dd18871ae098fe45b23d07acd", "score": "0.59292275", "text": "def access_token_validate(self, access_token):\n print(access_token)\n try:\n return FirebaseAPI.verify_id_token(access_token)\n except serializers.ValidationError as e:\n raise serializers.ValidationError(\n \"Invalid Firebase token!\") from e", "title": "" }, { "docid": "4c7b56efbc15363335f003918aed6bb8", "score": "0.59082645", "text": "def check_token(self, instance, token):\n # Parse the token\n try:\n nd_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n nd = base36_to_int(nd_b36)\n except ValueError:\n return False\n\n # Check that the num_days/uid has not been tampered with\n if not constant_time_compare(self._make_token_with_timestamp(instance, nd), token):\n return False\n\n # Check the num_days is within limit\n if (self._num_days(self._today()) - nd) > self._timeout_days():\n return False\n\n return True", "title": "" }, { "docid": "852db8d8d687d05c7e509bf339e054e6", "score": "0.59021974", "text": "def validate_token(self, token):\n try:\n token_data = self.decode_token(token)\n now = datetime.datetime.utcnow()\n return token_data.expiration > now\n except:\n return False", "title": "" }, { "docid": "c146198579fb786bfe4542a8b6cf082a", "score": "0.58616483", "text": "def is_valid_token(req,name):\n # checks the token passed in the header is equal to the cached token\n try:\n bearer_token = req.headers.get('Authorization')\n auth_token = bearer_token.split(\" \")[1]\n cached_token = verify_token(token_cache.get(\"access_token_\" + str(name)))\n token_json = ast.literal_eval(cached_token)\n token = verify_token(token_json)\n return bool(token is not None and token_json['access_token'] == auth_token)\n except RuntimeError as err:\n logger.error(str(err))\n return jsonify({\"status\" : 500 , \"message\": str(err)}) , 500", "title": "" }, { "docid": "c7324513bac9b148c5023bb9d9ff71be", "score": "0.58389634", "text": "def test_validate_test_token(test_token, pem):\n\n # Having a test token\n assert test_token\n\n # Decoding it\n claims = jwt.decode(test_token, pem)\n claims.validate()", "title": "" }, { "docid": "22bb1303982742f9f5437aca4fe08543", "score": "0.58244956", "text": "def valid_token(token):\n try:\n valid = token and unhexlify(token)\n except Exception:\n valid = False\n\n return valid", "title": "" }, { "docid": "96c3d0f2d4b45ff76ded4c463e2f655d", "score": "0.5822029", "text": "def verify_auth_token(eppn, token, nonce, timestamp, generator=sha256):\n current_app.logger.debug('Trying to authenticate user {} with auth token {}'.format(eppn, token))\n shared_key = current_app.config.get('TOKEN_LOGIN_SHARED_KEY')\n\n # check timestamp to make sure it is within -300..900 seconds from now\n now = int(time.time())\n ts = int(timestamp, 16)\n if (ts < now - 300) or (ts > now + 900):\n current_app.logger.debug('Auth token timestamp {} out of bounds ({} seconds from {})'.format(\n timestamp, ts - now, now))\n return False\n\n # verify there is a long enough nonce\n if len(nonce) < 16:\n current_app.logger.warning('Auth token nonce {} too short'.format(nonce))\n return False\n\n # verify token format\n expected = generator('{0}|{1}|{2}|{3}'.format(\n shared_key, eppn, nonce, timestamp)).hexdigest()\n if len(expected) != len(token):\n current_app.logger.warning('Auth token bad length')\n return False\n\n # constant time comparision of the hash, courtesy of\n # http://rdist.root.org/2009/05/28/timing-attack-in-google-keyczar-library/\n result = 0\n for x, y in zip(expected, token):\n result |= ord(x) ^ ord(y)\n current_app.logger.debug('Auth token match result: {}'.format(result == 0))\n return result == 0", "title": "" }, { "docid": "bf7ffbdc1a596b18120a6ba0f196f3bb", "score": "0.5812472", "text": "async def verify_secret_middleware(request, handler):\n if request.headers.get(SECRET_HEADER) != appscale_info.get_secret():\n logger.warn(\"Received bad secret from {client}\"\n .format(client=request.remote))\n return web.Response(status=http.HTTPStatus.FORBIDDEN,\n reason=\"Bad secret\")\n return await handler(request)", "title": "" }, { "docid": "726c0633add1c7397d4d75020459f41e", "score": "0.580409", "text": "def check_secret(self, request):\n secret = request.headers[\"X-Gitlab-Token\"]\n return secret == self.secret", "title": "" }, { "docid": "e2b8e4734603fab44b056b846989191a", "score": "0.5800609", "text": "def require_auth(view: HTTPHandler):\n @wraps(view)\n async def middleware(request: web.Request):\n token = request['token']\n\n is_valid = (\n is_token_valid(token, config.APP_SECRET) or\n is_token_valid(token, config.SOCKET_SECRET)\n )\n if not is_valid:\n raise web.HTTPForbidden()\n\n return await view(request)\n return middleware", "title": "" }, { "docid": "46932044d319e44ca15542207462a37e", "score": "0.5786265", "text": "def token_valid():\n\tcheck_for_active_session()\n\tif not hasattr(session,\"token\") or not session.token:\n\t\treturn False\n\treturn session.token[\"expires_at\"] > time.time()", "title": "" }, { "docid": "1ba7ed23b04d6a33a07cf3236d097853", "score": "0.5765701", "text": "def _validate_token(self, token, scopes_required=None):\n if scopes_required is None:\n scopes_required = []\n scopes_required = set(scopes_required)\n\n token_info = None\n valid_token = False\n has_required_scopes = False\n if token:\n try:\n token_info = self._get_token_info(token)\n except Exception as ex:\n token_info = {'active': False}\n logger.error('ERROR: Unable to get token info')\n logger.error(str(ex))\n\n valid_token = token_info.get('active', False)\n\n if 'aud' in token_info and \\\n current_app.config['OIDC_RESOURCE_CHECK_AUD']:\n valid_audience = False\n aud = token_info['aud']\n clid = current_app.config.get('OIDC_CLIENT_ID')\n if not clid:\n clid = self.client_secrets.get('client_id')\n\n if not clid:\n raise Exception('No \\'client_id\\' defined in client_secrets or OIDC_CLIENT_ID set.')\n\n if isinstance(aud, list):\n valid_audience = clid in aud\n else:\n valid_audience = clid == aud\n\n if not valid_audience:\n logger.error('Refused token because of invalid '\n 'audience')\n valid_token = False\n\n if valid_token:\n token_scopes = token_info.get('scope', '').split(' ')\n else:\n token_scopes = []\n has_required_scopes = scopes_required.issubset(\n set(token_scopes))\n\n if not has_required_scopes:\n logger.debug('Token missed required scopes')\n\n if (valid_token and has_required_scopes):\n g.oidc_token_info = token_info\n return True\n\n if not valid_token:\n return 'Token required but invalid'\n elif not has_required_scopes:\n return 'Token does not have required scopes'\n else:\n return 'Something went wrong checking your token'", "title": "" }, { "docid": "975f3d60dd8f2edefd495d04eb96b512", "score": "0.57433593", "text": "def _decode_token(token, secret_key):\n try:\n payload = jwt.decode(\n jwt=token,\n key=secret_key,\n algorithms=['HS512'],\n verify=True,\n options={'require_exp': True},\n )\n except jwt.ExpiredSignature:\n raise JWTError('JWT is expired')\n except jwt.DecodeError:\n raise JWTError(\"JWT can't be decoded. Invalid signature\")\n\n return payload", "title": "" }, { "docid": "ca8c1570e9a870bcc6c62e889de7109b", "score": "0.57310337", "text": "def validate_tokens(tokens):\n invalid = invalid_tokens(tokens)\n\n if invalid:\n raise APNSInvalidTokenError('Invalid token format. '\n 'Expected hex string: {0}'\n .format(', '.join(invalid)))", "title": "" }, { "docid": "02959fe7146416530264daeefd2de4e1", "score": "0.5705735", "text": "def test_validate_token(self):\n username = \"******\"\n password = \"******\"\n auth = authentication.authentication.Authentication()\n api_token = auth.get_token(username, password)\n print(api_token)\n self.assertTrue(auth.validate_token(api_token), \"Token is invalid or expired!\")", "title": "" }, { "docid": "07844683c353896e04291d82e55bf941", "score": "0.57014024", "text": "def check_token(self, user, token):\n # Parse the token\n try:\n ts_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n ts = base36_to_int(ts_b36)\n except ValueError:\n return False\n\n # Check that the timestamp/uid has not been tampered with\n if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):\n return False\n\n # Check the timestamp is within limit\n if (self._num_seconds(self._now()) - ts) > REGISTRATION_TIMEOUT_DAYS * 86400:\n return False\n\n return True", "title": "" }, { "docid": "16fba600f9b16fd03479372bb1abd24c", "score": "0.5685615", "text": "def _validate_post(self) -> None:\n ct_header = self.request.headers.get(\"Content-Type\", None)\n if ct_header != \"application/json\":\n raise tornado.web.HTTPError(HTTPStatus.FORBIDDEN)\n # verifying that the secret token is the one the user set when the user set one\n if self.secret_token is not None:\n token = self.request.headers.get(\"X-Telegram-Bot-Api-Secret-Token\")\n if not token:\n _LOGGER.debug(\"Request did not include the secret token\")\n raise tornado.web.HTTPError(\n HTTPStatus.FORBIDDEN, reason=\"Request did not include the secret token\"\n )\n if token != self.secret_token:\n _LOGGER.debug(\"Request had the wrong secret token: %s\", token)\n raise tornado.web.HTTPError(\n HTTPStatus.FORBIDDEN, reason=\"Request had the wrong secret token\"\n )", "title": "" }, { "docid": "1c02b2cc49106fc01f93ac0236c916b9", "score": "0.56550205", "text": "def verify_auth_token(self, auth_token):", "title": "" }, { "docid": "dc8d34d0d0a1d22513a01e105834a2fe", "score": "0.56467104", "text": "def token_check():\n\n token = request.forms.get(\"token\")\n\n try:\n res = Token.get(Token.token == token)\n except:\n resp = routing.base.generate_error_response(code=409)\n resp[\"message\"] = \"Invalid authentication token.\"\n return json.dumps(resp) + \"\\n\"\n\n resp = routing.base.generate_bare_response()\n\n if res.has_expired:\n resp[\"expired\"] = True\n\n resp[\"auth\"] = {\n \"username\": res.for_user.username,\n \"expires_at\": str(res.expire_time),\n }\n\n return json.dumps(resp) + \"\\n\"", "title": "" }, { "docid": "89aec4c553c7858d6ee5f06a55583df4", "score": "0.5645321", "text": "def valid_token(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n s = URLSafeSerializer(current_app.config['SECRET_KEY'],\n salt=kwargs['facility'])\n\n if 'token' not in request.args:\n raise InvalidUsage('no token', status_code=401)\n\n try:\n s.loads(request.args.get('token'))\n\n except:\n raise InvalidUsage('bad token', status_code=401)\n\n return f(*args, **kwargs)\n\n return decorated_function", "title": "" }, { "docid": "15a22011a6cc36ca9a53ca90facfa8ca", "score": "0.5641392", "text": "def verify_auth_token(token):\n s = Serializer(secret_key)\n try:\n data = s.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n user = data['userid']\n return user", "title": "" }, { "docid": "5ce2409e59a548056e7998b2505f3cd8", "score": "0.5638519", "text": "def _validate(token):\n if len(token)==1:\n return True\n else:\n # If the token begins or ends with a stop word, we remove the token\n if token[0] in Vocabulary.STOP_WORDS or token[-1] in Vocabulary.STOP_WORDS:\n return False\n return True", "title": "" }, { "docid": "f3b11d0902ed1192ee3aab4f77c69c3b", "score": "0.5635294", "text": "def verify_token(self, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except Exception:\n return None\n return User.query.get(data['id'])", "title": "" }, { "docid": "b4b968a68ff5e14323fc6dbfb96470da", "score": "0.563211", "text": "def is_auth_valid_for_token(token: Optional[SSOToken]) -> bool:\n if token is None:\n logger.debug(\"token is not valid\")\n return False\n\n return token.is_valid", "title": "" }, { "docid": "0d6ed2a41ef4c9b47f69eb148b5a65de", "score": "0.5588379", "text": "def client_access_token_valid(iv, access_token, tag, name):\n\n try:\n raw_token = krypt.decrypt(b\"123456desire9000\", name, iv, access_token, tag)\n except (TypeError, ValueError, Exception) as err:\n raw_token = \"\"\n\n raw_split = raw_token.split(settings.DEFAULT_SEP)\n\n if len(raw_split) == 3:\n key, secret, expires_on = raw_split\n tdata = conn.get('%s%s%s' % (key, settings.DEFAULT_SEP, secret))\n\n if tdata:\n return tdata.get('token') == access_token\n\n return False", "title": "" }, { "docid": "565e88ad02f32c1a71ba8fb4f5cb280a", "score": "0.5578654", "text": "def validate_authtoken(cls, token, ip_address):\n now = datetime.datetime.now()\n auth_tokens = AuthToken.query_authtoken(token)\n if len(auth_tokens) > 0:\n auth_token = auth_tokens[0]\n if auth_token.expires > now:\n if auth_token.ip_address == ip_address:\n return True\n return False", "title": "" }, { "docid": "21a60c8bd5fae191307b741cda29b59b", "score": "0.5572581", "text": "def isValidIdentifier(token: str):\n return bool(re.search('^[a-zA-Z](?:_?[a-zA-Z0-9]+)*$', token)) and token not in params_config", "title": "" }, { "docid": "8bff856fb7eebcb61e6ce71bbbf75c44", "score": "0.55550987", "text": "def token_protected_endpoint(function):\n @wraps(function)\n def decorated(*args, **kwargs):\n auth_token = request.form.get('auth_token')\n if not auth_token:\n return json.dumps({\n 'status': 'fail',\n 'reason': 'You must provide an auth_token',\n })\n\n data = dict(request.form)\n del data['auth_token']\n correct_token = create_token(current_app.config['SECRET_KEY'], data)\n\n if _compare_digest(auth_token, correct_token):\n return function(*args, **kwargs)\n\n else:\n return json.dumps({\n 'status': 'fail',\n 'reason': 'Incorrect auth_token',\n })\n\n return decorated", "title": "" }, { "docid": "0145e74a03c03381fc51e6b33e8974d5", "score": "0.5538127", "text": "def verify_token(token):\n # verifying that the token is not none\n logger.info('verifying the token ..')\n if token is None:\n return jsonify({'status' : 201, 'message' : 'user not authorized to call endpoint '}), 201\n else:\n return token", "title": "" }, { "docid": "078b4a71c39c0fc9cfe0bcd3df660059", "score": "0.5532595", "text": "def verify_jwt(request, secret_key):\n from ..models import User\n\n header = request.headers.get('Authorization', '')\n\n if not header:\n raise JWTError(\"Missing 'Authorization' HTTP header\")\n\n parts = header.split()\n if parts[0].lower() != 'bearer':\n raise JWTError('Unsupported authorization type')\n elif len(parts) == 1:\n raise JWTError(\"JWT missing from 'Authorization' HTTP header\")\n elif len(parts) > 2:\n raise JWTError(\"Token contains spaces or is not JWT\")\n\n payload = _decode_token(parts[1], secret_key)\n\n return _find_user(payload)", "title": "" }, { "docid": "095f80c53142a51b489600864c03785e", "score": "0.54911196", "text": "def validate_google_token(token):\n if not token:\n api.logger.info(\"Empty token, denying access\")\n return False\n\n # In non-prod environments, remove the call google to ease\n # testing.\n resp = None\n if app.config[\"ENV\"] == \"production\":\n resp = requests.get(api.config[\"GOOGLE_OAUTH_URI\"], params={\"id_token\": token})\n else:\n from collections import namedtuple\n\n now = datetime.datetime.now()\n exp = now + datetime.timedelta(days=1)\n resp = namedtuple(\"response\", [\"ok\", \"status_code\", \"json\"])(\n ok=True,\n status_code=200,\n json=lambda: {\n \"iss\": \"accounts.google.com\",\n \"exp\": exp.timestamp(),\n \"aud\": api.config[\"GOOGLE_CLIENT_ID\"],\n },\n )\n\n if not resp.ok:\n api.logger.info(\"Request to %s failed: %s\", resp.url, resp.status_code)\n return False\n\n if resp.status_code != 200:\n api.logger.info(\"Unexpected status code: %s\", resp.status_code)\n return False\n\n resp = resp.json()\n\n # Make sure the ISS claim is as expected\n if resp.get(\"iss\") not in [\"accounts.google.com\", \"https://accounts.google.com\"]:\n api.logger.info(\"Unexpected value in iss field: %s\", resp.get(\"iss\"))\n return False\n\n # Make sure the token is not expired\n exp = resp.get(\"exp\")\n if not exp:\n api.logger.info(\"Expected exp field but it does not exist in %s\", str(resp))\n return False\n\n if datetime.fromtimestamp(exp) >= datetime.timestamp():\n api.logger.info(\"Token has expired (exp %s)\", exp)\n return False\n\n # Make sure the aud claim is as expected\n if resp.get(\"aud\") != api.config[\"GOOGLE_CLIENT_ID\"]:\n api.logger.info(\n \"Expected client ID %s but found %s\",\n api.config[\"GOOGLE_CLIENT_ID\"],\n resp.get(\"aud\"),\n )\n return False\n\n return resp", "title": "" }, { "docid": "9a5da13f54abfc90a1965cd3a8a31adb", "score": "0.5482909", "text": "def token_required(func):\n\n @wraps(func)\n def decorated_function(*args, **kwargs):\n\n token = get_token()\n try:\n flask_env = app_config.FLASK_ENV\n secret_key = app_config.JWT_SECRET_KEY\n decoded_token = jwt.decode(\n token,\n secret_key,\n algorithms=['HS256'],\n options={\n 'verify_signature': True,\n 'verify_exp': True})\n except (\n ValueError,\n TypeError,\n jwt.ExpiredSignatureError,\n jwt.DecodeError,\n jwt.InvalidSignatureError,\n jwt.InvalidAlgorithmError,\n jwt.InvalidIssuerError\n ) as error:\n exception_mapper = {\n ValueError: (jwt_errors['server_error'], 500),\n TypeError: (jwt_errors['server_error'], 500),\n jwt.ExpiredSignatureError: (jwt_errors[\n 'expired_token'], 401),\n jwt.DecodeError: (jwt_errors['invalid_token'], 401),\n jwt.InvalidIssuerError: (jwt_errors['issuer_error'], 401),\n jwt.InvalidAlgorithmError: (jwt_errors['algorithm_error'],\n 401),\n jwt.InvalidSignatureError: (jwt_errors[\n 'signature_error'], 500)\n }\n message, status_code = exception_mapper.get(\n type(error), (jwt_errors['server_error'], 500))\n request_error_message('error', message, status_code)\n # setting the payload to the request object and can be accessed with \\\n # request.decoded_token from the view\n setattr(request, 'decoded_token', decoded_token)\n return func(*args, **kwargs)\n\n return decorated_function", "title": "" }, { "docid": "3397fc3d0a0488b21642562633bc7eac", "score": "0.5480472", "text": "def valid_token(token):\n return len(token.split(':')) == 2", "title": "" }, { "docid": "c8081ef7bb8d8f258f68a97d45675374", "score": "0.5476462", "text": "def validate(self, token):\n info = jwt.decode(token, self.secret, algorithm='HS256')\n if time() - info['time'] > self.expire:\n raise SignatureExpired(\"The token has been expired\")\n return info", "title": "" }, { "docid": "b790c63c756d7e00cf9152abff00992e", "score": "0.5473212", "text": "def check_signature(cls, key, secret, request):\n validator = cls(key, secret)\n validator.is_valid(request)", "title": "" }, { "docid": "66704b9964f3128d369dd992bb0ab2a6", "score": "0.5471105", "text": "def valid_token(self):\n raise NotImplementedError()", "title": "" }, { "docid": "788afd0f183ac56cac1fa78f05de8e6b", "score": "0.5464936", "text": "def verify_auth_token(cls, token: str):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n user = User.get_user_by_id(data['id'])\n return user", "title": "" }, { "docid": "e5ee9e9ea7fa1347522bf435a38aebb8", "score": "0.5460015", "text": "def chk_reg_token(self, token):\n jws_key = JWS(current_app.config['SECRET_KEY'])\n try:\n data = jws_key.loads(token)\n except BadSignature:\n return False\n if data.get('confirm') != self.kname:\n return False\n return True", "title": "" }, { "docid": "d8bcb555c5584e8db5e0590e556080a4", "score": "0.5456378", "text": "def authorize():\r\n token = request.get_json()['token']\r\n try:\r\n token_data = jwt.decode(token, JWT_SECRET, JWT_ALGORITHM)\r\n for user in authorized:\r\n for username, psw in user.items():\r\n if username == token_data['username'] and psw == token_data['password']:\r\n app_log('Used a valid token')\r\n return 'valid'\r\n except jwt.DecodeError:\r\n app_log('Used an invalid token')\r\n return 'not a token'\r\n except jwt.ExpiredSignatureError:\r\n delete_user = jwt.decode(request.get_json()['token'], JWT_SECRET, JWT_ALGORITHM, options={'verify_exp': False})\r\n for user in authorized:\r\n for username, psw in user.items():\r\n if username == delete_user['username']:\r\n authorized.remove(user)\r\n app_log('Used an expired token')\r\n return 'expired token'\r\n return 'error'", "title": "" }, { "docid": "a797b89a1a8aa6062394b44987318028", "score": "0.5451108", "text": "def check_valid_token(token):\n\n server_data = get_data()\n\n # Is this token currently active? If not raise an error\n if not server_data[\"tokens\"].get(token, False):\n raise Value_Error(description=\"Invalid token\")\n\n token_payload = jwt.decode(token, get_secret(), algorithms=['HS256'])\n u_id = token_payload[\"u_id\"]\n\n # if the u_id exists return it, otherwise raise an error\n if u_id in server_data[\"users\"]:\n return u_id\n\n raise Value_Error(description=\"User does not exist\")", "title": "" }, { "docid": "c152cae6d9adbb25a7b0db568a76a86f", "score": "0.5447904", "text": "def verify(self, auth_secret):\n session = OAuth2Session(\n self.client_id,\n state=self.auth_state,\n scope=self.scope,\n redirect_uri=self.callback_uri,\n )\n try:\n token = session.fetch_token(\n XERO_OAUTH2_TOKEN_URL,\n client_secret=self.client_secret,\n authorization_response=auth_secret,\n headers=self.headers,\n )\n # Various different exceptions may be raised, so pass the exception\n # through as XeroAccessDenied\n except Exception as e:\n # oauthlib raises a warning when returned token scope\n # is different from the client scope\n if self.relax_token_scope and isinstance(e, Warning):\n session.token = e.token\n else:\n raise XeroAccessDenied(e)\n self._init_oauth(token)", "title": "" }, { "docid": "d0817b366091781cc698a0877a09aae0", "score": "0.54393435", "text": "async def check_token(self, token):\n now = int(time.time())\n return token['expires_at'] - now < 60", "title": "" }, { "docid": "e76b6b0bfe992969c8b983e12e40020c", "score": "0.5429224", "text": "def validate_claims(self, id_token):", "title": "" }, { "docid": "821f81a0a66f9a48bda8a17bf803884f", "score": "0.5415788", "text": "def _check_token(self, tok, etok):\n if tok is not etok:\n self._raise_unexpected_token(etok, tok)", "title": "" }, { "docid": "2ed9821011ded6b521bcddd527b350f0", "score": "0.5404824", "text": "def verify(token, public_key, validate_nonce=None, algorithms=[DEFAULT_ALGORITHM]):\n try:\n token_data = jwt.decode(token, public_key, algorithms=algorithms)\n except jwt.InvalidTokenError as e:\n logger.info('JWT failed verification', exc_info=e)\n return False\n\n claimed_username = token_data.get('username')\n claimed_time = token_data.get('time', 0)\n claimed_nonce = token_data.get('nonce')\n\n # Ensure time is within acceptable bounds\n current_time = time.time()\n min_time, max_time = (current_time - TIMESTAMP_TOLERANCE, current_time + TIMESTAMP_TOLERANCE)\n if claimed_time < min_time or claimed_time > max_time:\n logger.info('Claimed time is outside of allowable tolerances')\n return False\n\n # Ensure nonce is unique\n if validate_nonce:\n if not validate_nonce(claimed_username, claimed_time, claimed_nonce):\n logger.info('Claimed nonce failed to validate')\n return False\n else:\n logger.warning('validate_nonce function was not supplied!')\n\n # If we've gotten this far, the token is valid\n return token_data", "title": "" }, { "docid": "f0cfbafd1233b86fe867c8970b3d4f19", "score": "0.53860617", "text": "def validate(self, scope, jwt_token):\n try:\n token = self.decode(jwt_token)\n except JWTError:\n return self._env.logger.error('unable to decode token \"{}\"'.format(jwt_token))\n #\n # Make sure the token hasn't expired on us ...\n #\n now = datetime.now().timestamp()\n if now >= token.get('expires_at', now):\n return self._env.logger.error('token has expired')\n #\n # See if there is an intersection between the scopes required for this endpoint\n # end and the scopes available in the token.\n #\n if not set(scope).intersection(token.get('scope', [])):\n return self._env.logger.error('unable to validate scope for \"{}\"'.format(token))\n self._env.logger.debug('validated scope for \"{}\"'.format(token))\n return True", "title": "" }, { "docid": "c1954ab667dfd98d6d4bcd40b0275d0b", "score": "0.5380081", "text": "def valid(self, user, token):\n\n token_generator = PasswordResetTokenGenerator()\n return user is not None and token_generator.check_token(user, token)", "title": "" }, { "docid": "df792d9c7912c54c72782478901699a6", "score": "0.5374647", "text": "def validate_token(self) -> Dict:\n\n return {}", "title": "" }, { "docid": "0c7b38e8b28e66038d7d78912707cf77", "score": "0.53676873", "text": "def test_secret(self):\n # create a new access token\n access_token = get_secret_value(self.service_client,\n self.secret_config,\n 'AWSPENDING',\n token=self.token)\n\n get_user_info_using_access_token(self.login_username, self.otp_seed, access_token)\n self.logger.info('testSecret: Successfully tested secret')", "title": "" }, { "docid": "60d9c0f461741ceae35c68f71548cf6c", "score": "0.53603894", "text": "def validate_user_token(current_user):\n user = User.query.filter_by(public_id=current_user.public_id).first()\n if not user:\n return make_response({\n \"message\":\"This user does not exist, try again or register\",\n \"error\": True }, 404)\n\n\n return make_response({\n \"message\":\"User Verification Sucessfuly\",\n \"error\": False,\n \"username\": user.username}, 200)", "title": "" }, { "docid": "90dee7c21b7d2932180dced853d7fccf", "score": "0.534649", "text": "def token_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n\n if \"x-access-token\" in request.headers:\n token = request.headers[\"x-access-token\"]\n\n if not token:\n return jsonify({\"message\": \"Missing Token\"}), 401\n\n # Using JWT to decode token and if it is invalid, it will raise exception\n try:\n print(application.config[\"SECRET_KEY\"])\n data = jwt.decode(\n token, application.config[\"SECRET_KEY\"], algorithms=[\"HS256\"]\n )\n\n current_user = User.query.filter_by(public_id=data[\"public_id\"]).first()\n except:\n return jsonify({\"message\": \"Invalid Token\"}), 401\n\n # args\n return f(current_user, *args, **kwargs)\n\n return decorated", "title": "" }, { "docid": "9ed361d8627d82b98a0b7202551bd245", "score": "0.53325474", "text": "def _is_valid_token(self, auth_token):\n # Make sure that auth token is hex. If it's None, or something other\n # than hex, this will raise a ValueError.\n try:\n int(auth_token, 16)\n except (TypeError, ValueError):\n return False\n\n # First check if the given token is in our session table; if so it's a\n # salt-api token and we need to get the Salt token from there.\n orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))\n # If it's not in the session table, assume it's a regular Salt token.\n salt_token = orig_session.get(\"token\", auth_token)\n\n # The eauth system does not currently support perms for the event\n # stream, so we're just checking if the token exists not if the token\n # allows access.\n if salt_token:\n # We want to at least make sure that the token isn't expired yet.\n resolved_tkn = self.resolver.get_token(salt_token)\n if resolved_tkn and resolved_tkn.get(\"expire\", 0) > time.time():\n return True\n\n return False", "title": "" }, { "docid": "e0dea8bb54ecb04c24ba8a5a1944caca", "score": "0.53135717", "text": "def test_valid_blacklisted_token_user(self):\n with self.client:\n resp_reguser = RegisterUserApiAction.run(self, user_values.USER_1)\n # blacklist a valid token\n blacklist_token = BlacklistToken(\n token=json.loads(resp_reguser.data.decode())['auth_token'])\n db.session.add(blacklist_token)\n db.session.commit()\n resp_userauth = AuthUserApiAction.run(self, resp_reguser)\n ChkBlackListTokenUserAuth.run(self, user_values.USER_1,\n resp_userauth)", "title": "" }, { "docid": "ab50ace9d1caf119d0e6909891008d81", "score": "0.53058666", "text": "def get_valid_token():\n token = Token.objects.order_by('-id').first()\n if not token:\n return\n\n if not token.is_expired():\n return token.access_token\n\n try:\n new_token = refresh_token(token.refresh_token)\n except TokenException:\n return\n\n Token.objects.create(user=token.user, access_token=new_token['access_token'],\n refresh_token=new_token['refresh_token'], expires_in=new_token['expires_in'])\n\n return new_token['access_token']", "title": "" }, { "docid": "d97f87ebd925450c2f42aaab650068a9", "score": "0.5276442", "text": "def verify_reset_token(token):", "title": "" }, { "docid": "b21e3cd5d98cf9f83d5bd1fe5af7cd10", "score": "0.5261587", "text": "def decode_token(token):\r\n try:\r\n # try to decode the token using our SECRET variable\r\n payload = jwt.decode(token, current_app.config['SECRET_KEY'])\r\n is_blacklisted_token = BlackListToken.check_blacklist(auth_token=payload)\r\n if is_blacklisted_token:\r\n return 'Token blaclisted, Please log in again'\r\n else:\r\n return payload['sub']\r\n except jwt.ExpiredSignatureError:\r\n # the token is expired, return an error string\r\n return \"Expired token. Please login to get a new token\"\r\n except jwt.InvalidTokenError:\r\n # the token is invalid, return an error string\r\n return \"Invalid token. Please register or login\"", "title": "" }, { "docid": "0de51beec5edc8c18e3a1ebb1d6e1dce", "score": "0.52423245", "text": "def token_required(func):\n\n @wraps(func)\n def decorator(request, *args, **kwargs):\n params = QueryDict(request.body)\n token = params.get('token')\n openid = None\n\n if token:\n openid = confirm_validate_token(token)\n guide = models.Guide.objects.filter(openid=openid)\n if len(guide) > 0:\n guide = guide[0]\n if not guide.telephone:\n return HttpResponse(\n json.dumps({'code': 2, 'data': u'Token illegal'}),\n content_type='application/json')\n else:\n return HttpResponse(\n json.dumps({'code': 2, 'data': u'Token illegal'}),\n content_type='application/json')\n if not openid or openid == '':\n return HttpResponse(\n json.dumps({'code': 2, 'data': u'Token illegal'}),\n content_type='application/json')\n return func(request, *args, **kwargs)\n\n return decorator", "title": "" }, { "docid": "6c94e42ee66a9e5b6f1d0b908fc87ede", "score": "0.524185", "text": "def verify_twitter_token(api_key, api_secret_key):\n consumer_key = config('TWITTER_CONSUMER_API_KEY')\n consumer_secret = config('TWITTER_CONSUMER_SECRET_KEY')\n\n try:\n api=twitter.Api(\n consumer_key=consumer_key,\n consumer_secret=consumer_secret,\n access_token_key=api_key,\n access_token_secret=api_secret_key\n )\n twitter_profile = api.VerifyCredentials(include_email=True)\n return twitter_profile\n except Exception as e:\n pass", "title": "" }, { "docid": "9e93cebdda4a8a7d54ba8f5e40810123", "score": "0.52363545", "text": "def token_required(f):\n @wraps(f)\n def decorated(self, *args, **kwargs):\n token = None\n if 'token' in request.headers:\n token = request.headers['token']\n if not token:\n return jsonify({'message':'Token is required to perform this action!'}), 401\n try:\n data = jwt.decode(token, secret_key, algorithm='HS256' )\n cur = connection.cursor()\n cur.execute(\"SELECT used_token FROM tokensblacklisted WHERE used_token=%s\",(token,))\n used_token = cur.fetchone()\n if used_token:\n return jsonify({'message':'Token black listed'}), 401\n cur.execute(\"SELECT * FROM users WHERE user_id=%s\",(data['user_id'], ))\n current_user = cur.fetchall()\n except:\n return jsonify({'message':'Token has expired,please login again'}), 401\n return f(self, current_user, *args, **kwargs)\n return decorated", "title": "" }, { "docid": "826e0f47833c7a562b9b52940defcd96", "score": "0.5221062", "text": "def check_auth():\n if get_oauth_token():\n auth_info = get_auth_info()\n if not auth_info:\n raise MissingTokenInfoException(\"Could not get token info\")\n if datetime.now() >= auth_info['expires_at']:\n msg = (\n 'Token is expired. Now %s but valid until %s',\n datetime.now(), auth_info['expires_at']\n )\n\n log.warn(\n msg[0], *msg[1:]\n )\n raise TokenIsExpired(msg[0] % msg[1:])\n else:\n raise MissingTokenException()", "title": "" }, { "docid": "0440a839b138287562f668b4d5e7e39b", "score": "0.5215083", "text": "def _ValidateSecrets(secrets_dict):\n mount_path_to_secret = collections.defaultdict(list)\n for key, value in six.iteritems(secrets_dict):\n if _SECRET_PATH_PATTERN.search(key):\n mount_path = key.split(':')[0]\n secret_res1 = _SECRET_VERSION_SECRET_RESOURCE_PATTERN.search(value).group(\n 'secret_resource'\n )\n\n if mount_path in mount_path_to_secret:\n secret_res_match1 = _SECRET_RESOURCE_PATTERN.search(secret_res1)\n project1 = secret_res_match1.group('project')\n secret1 = secret_res_match1.group('secret')\n\n for secret_res2 in mount_path_to_secret[mount_path]:\n secret_res_match2 = _SECRET_RESOURCE_PATTERN.search(secret_res2)\n project2 = secret_res_match2.group('project')\n secret2 = secret_res_match2.group('secret')\n\n if _SecretsDiffer(project1, secret1, project2, secret2):\n raise ArgumentTypeError(\n 'More than one secret is configured for the mount path '\n \"'{mount_path}' [violating secrets: {secret1},{secret2}].\"\n .format(\n mount_path=mount_path,\n secret1=secret1\n if project1 == _DEFAULT_PROJECT_IDENTIFIER\n else secret_res1,\n secret2=secret2\n if project2 == _DEFAULT_PROJECT_IDENTIFIER\n else secret_res2,\n )\n )\n else:\n mount_path_to_secret[mount_path].append(secret_res1)", "title": "" }, { "docid": "a7d061ec775ec38d8b44cba4a46c1925", "score": "0.52138233", "text": "def verify_token(self, token, **kwargs):\n\n token = force_bytes(token)\n jws = JWS.from_compact(token)\n header = json.loads(jws.signature.protected)\n\n try:\n header.get(\"alg\")\n except KeyError:\n msg = \"No alg value found in header\"\n raise SuspiciousOperation(msg)\n\n jwk_json = self.retrieve_matching_jwk(header)\n jwk = JWK.from_json(jwk_json)\n\n if not jws.verify(jwk):\n msg = \"JWS token verification failed.\"\n raise SuspiciousOperation(msg)\n\n # The 'token' will always be a byte string since it's\n # the result of base64.urlsafe_b64decode().\n # The payload is always the result of base64.urlsafe_b64decode().\n # In Python 3 and 2, that's always a byte string.\n # In Python3.6, the json.loads() function can accept a byte string\n # as it will automagically decode it to a unicode string before\n # deserializing https://bugs.python.org/issue17909\n return json.loads(jws.payload.decode(\"utf-8\"))", "title": "" }, { "docid": "5b30c8bb2c07d31e646cf901d98cee0d", "score": "0.52116907", "text": "def test_user_validate_token_fail(self):\n # Arrange:\n data = {'username': self.username, 'password': self.password}\n response = self.client.post(self.login_url, data, format='json')\n token = Token.objects.filter(user__username=self.username).first()\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key + 'fake')\n\n # Act:\n response = self.client.get(\n self.validate_token_url, data, format='json'\n )\n\n # Assert after request:\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertNotEqual(\n response.data,\n {'detail': 'Token is valid'},\n 'The response indicates the validation was correct'\n )", "title": "" }, { "docid": "228cc8cc41ba0f51a63cd70f9fa14e86", "score": "0.5211504", "text": "def parse_token(token_value, debug):\r\n token = None\r\n try:\r\n token = jwt.decode(str(token_value).rstrip(), options={\"verify_signature\": False})\r\n except jwt.exceptions.DecodeError as errMsg:\r\n err_print(\"Invalid Access Token!\")\r\n dbg_print(errMsg, debug=debug)\r\n return None\r\n success = validate_token(token, debug=debug)\r\n if not success:\r\n dbg_print(\"Invalid EdgeKV Access Token!\", debug=debug)\r\n token = None\r\n return token", "title": "" }, { "docid": "ef056f74edbe643f43161a4616156dc6", "score": "0.5202083", "text": "def is_token_valid(self):\n if not self.auth_token:\n return False\n\n if not self.auth_token_expires:\n return False\n\n expires = self.auth_token_expires - datetime.timedelta(\n seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS\n )\n\n time_tuple_expires = expires.utctimetuple()\n time_tuple_now = datetime.datetime.utcnow().utctimetuple()\n\n if time_tuple_now < time_tuple_expires:\n return True\n\n return False", "title": "" }, { "docid": "88bfa98a4617ebd3f89faf9d4bb727f3", "score": "0.5201931", "text": "def test_desrialize_token_tampered(self, token):\n user = User.deserialize_token(f\"{token}xyz\")\n assert user is None", "title": "" }, { "docid": "9a99da8a7d5e1869f8903d433ce1da15", "score": "0.51994294", "text": "def __init__(self, secret: str) -> None:\n self.secret = secret", "title": "" }, { "docid": "d4108d3ccb969d6085ea6ff34b449c9e", "score": "0.5192846", "text": "async def validate_access_token(\n self,\n token: str\n ) -> Optional[RefreshToken]:\n try:\n decoded_token = jwt.decode(token, verify=False)\n except jwt.InvalidTokenError:\n return\n\n refresh_token = self.refresh_tokens.get(decoded_token.get(\"iss\"), None)\n if not refresh_token:\n return\n\n # Check if token is expired\n if time.time() > decoded_token.get(\"exp\"):\n return\n\n try:\n jwt.decode(\n token,\n refresh_token.jwt_key,\n issuer=refresh_token.id,\n algorithms=[\"HS256\"])\n except jwt.InvalidTokenError:\n return\n\n return refresh_token", "title": "" }, { "docid": "b65b3b0e0ab0f55581798fd268b95fad", "score": "0.51797545", "text": "def test_validate_expired_token(expired_token, pem):\n\n # When a token is expired\n assert expired_token\n claims = jwt.decode(expired_token, pem)\n\n # Validate token should raise specific\n with pytest.raises(ExpiredTokenError) as excinfo:\n claims.validate()\n assert excinfo.match(r\"The token is expired\")", "title": "" }, { "docid": "dccfdfaabacb469c73950615cf019a35", "score": "0.517305", "text": "def get_secret():\n secret_name = \"BNBKey\"\n region_name = \"us-east-2\"\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(\n service_name='secretsmanager',\n region_name=region_name\n )\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId=secret_name\n )\n except ClientError as e:\n if e.response['Error']['Code'] == 'DecryptionFailureException':\n raise e\n elif e.response['Error']['Code'] == 'InternalServiceErrorException':\n raise e\n elif e.response['Error']['Code'] == 'InvalidParameterException':\n raise e\n elif e.response['Error']['Code'] == 'InvalidRequestException':\n raise e\n elif e.response['Error']['Code'] == 'ResourceNotFoundException':\n raise e\n else:\n if 'SecretString' in get_secret_value_response:\n secret = get_secret_value_response['SecretString']\n tokenResponseUtf8 = (secret.encode('utf8'))\n jsonNewToken = json.loads(tokenResponseUtf8)\n authToken = jsonNewToken['authToken']\n return authToken\n\n else:\n decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])", "title": "" }, { "docid": "2d902e79863e3d43ac7aa16edfb40edc", "score": "0.5159176", "text": "def verify_token():\n path = 'config/bot_token.yaml'\n with open(path, 'r') as file:\n config = yaml.load(file, Loader=yaml.Loader)\n VERIFY_TOKEN = config['MESSENGER_VERIFY_TOKEN']\n return VERIFY_TOKEN", "title": "" }, { "docid": "5938035509667faefea1b161f88bc3e4", "score": "0.51480913", "text": "def token_expired(self) -> bool:", "title": "" }, { "docid": "8115d0681b7870448cf2aee587a3a8f2", "score": "0.51457304", "text": "def verify_auth_token(token):\n user = User.verify_auth_token(token)\n\n return user is not None", "title": "" }, { "docid": "2722748fc7d20e4800ce49bf9a8a7f71", "score": "0.514296", "text": "def test_record_validity_secret(self):\n registry = getUtility(IRegistry)\n record_secretkey = registry.records[\n 'niteoweb.ipn.core.validity.secret']\n self.assertEquals(record_secretkey.value, 'secret')", "title": "" }, { "docid": "4b2d6f501ff7a7faeccd7ee8ee3f52e4", "score": "0.51418793", "text": "async def verify_jwt_data_type(token_data: dict, token_type: str) -> None:\n if token_data[\"type\"] != token_type:\n raise WrongTokenError(\"Only {} tokens are allowed\".format(token_type))", "title": "" }, { "docid": "3905087df4ec1077c7867ea643f0699f", "score": "0.5140014", "text": "def __init__(self, token, token_secret, consumer_key, consumer_secret):\r\n self.token = token\r\n self.token_secret = token_secret\r\n self.consumer_key = consumer_key\r\n self.consumer_secret = consumer_secret", "title": "" }, { "docid": "49edd584f01382418e64c7fe116f8dd3", "score": "0.51365477", "text": "def test_desrialize_token(self, token):\n user = User.deserialize_token(token)\n assert user.email == \"admin@localhost\"", "title": "" }, { "docid": "12e5dbe3cbf31fcf206ea0b492569ebd", "score": "0.51347136", "text": "def validate(token):\n return executeSQL('select uid,username, email from users where otp=\"%s\"', True, str(token))", "title": "" }, { "docid": "b0bd1ae5f9cf7d529181e89ed3a6707a", "score": "0.5132976", "text": "def __init__(self, token, token_secret, consumer_key, consumer_secret):\n self.token = token\n self.token_secret = token_secret\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret", "title": "" }, { "docid": "67c5fd6e44f1553aee724aaef8a56bb9", "score": "0.51318085", "text": "def test_valid_blacklisted_token_logout(self):\n with self.client:\n RegisterUserApiAction.run(self, user_values.USER_1)\n resp_loginuser = LoginUserApiAction.run(self, user_values.USER_1)\n ChkLoginUserRespAction.run(self, resp_loginuser)\n # blacklist a valid token\n blacklist_token = BlacklistToken(\n token=json.loads(resp_loginuser.data.decode())['auth_token'])\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted valid token logout\n resp_logoutuser = LogoutUserApiAction.run(self, resp_loginuser)\n ChkBlacklistLogoutRespAction.run(self, resp_logoutuser)", "title": "" } ]
78810dbe1d7a972670d163c504d04198
Returns wether the protocol of the url is http or https. Returns none if another protocol or no protocol is present in the url.
[ { "docid": "9e0eb7302df8c884ce41107550c7b028", "score": "0.6897188", "text": "def get_protocol(url):\n result = re.search(r\"^https?://\", url)\n return result.group(0) if result else None", "title": "" } ]
[ { "docid": "fb4ea7a5904658bcad37a1c1ff29503a", "score": "0.78023887", "text": "def https(url):\n if url[:8] == 'https://':\n return url\n if url[:7] != 'http://':\n return False\n return 'https://' + url[7:]", "title": "" }, { "docid": "7d9f7a68784ca4580941add2adbfa1d5", "score": "0.74507", "text": "def is_https_url(value):\n url = urlparse(value)\n\n if url.scheme != \"https\":\n raise ValidationError(\"The schema must be HTTPS\")\n\n if url.netloc is None or url.netloc == \"\":\n raise ValidationError(\"The URL is missing the net location\")\n\n return True", "title": "" }, { "docid": "402c39ca736eae6ac2be7058fe2296aa", "score": "0.73608685", "text": "def https_in_url(url):\n return True if url.startswith('https://') else False", "title": "" }, { "docid": "33d7bf593893a30a2bffaa147302c724", "score": "0.70108795", "text": "def validate_url(self, v):\n u = urlparse.urlparse(v)\n if u.scheme.lower() not in ('http', 'https'):\n raise ValueError('URL scheme must be either http:// or https://')\n if not u.netloc:\n raise ValueError('URL must specify a network location.')\n return u.scheme.lower() == 'https'", "title": "" }, { "docid": "7b103a6d2d9018776e20e2153daa865f", "score": "0.6842307", "text": "def protocol(self):\n return 'https' if self.allow_https and self.is_secure else 'http'", "title": "" }, { "docid": "3f3bdda2e49d6e42427404b7eda87643", "score": "0.67717904", "text": "def scheme(self):\n return self.use_ssl and \"https\" or \"http\"", "title": "" }, { "docid": "7f8272f396e675723a4aae00f952256e", "score": "0.6766271", "text": "def url_check(url):\n \n url_tuple = urlparse.urlparse(url)\n if url_tuple[0] == 'http' or url_tuple[0] == 'https' and url_tuple[1] != \"\":\n return url\n else:\n raise Exception('bad url')", "title": "" }, { "docid": "3493cc4bb460980497f4cedda4067a53", "score": "0.66228306", "text": "def has_compatible_scheme(url):\n return url.startswith(('http://', 'https://'))", "title": "" }, { "docid": "6fc50e094b9cdee7d0840b74e31e5bcc", "score": "0.6514369", "text": "def _is_url(s: str) -> bool:\n\n return urlparse(s).netloc != \"\"", "title": "" }, { "docid": "057db4c72b32253eb09f909047aececb", "score": "0.6502479", "text": "def __isUrl(self, url):\n if type(url)==str:\n return url.startswith('http://') or url.startswith('https://')\n return False", "title": "" }, { "docid": "6fc8e96c70f440500e9940b4ba10e415", "score": "0.6501477", "text": "def url_has_netloc(self, url):\n\n parsed_url = urllib.parse.urlparse(url)\n\n if parsed_url.scheme == \"http\":\n port = 80\n elif parsed_url.scheme == \"https\":\n port = 443\n if parsed_url.port:\n port = parsed_url.port\n\n domain = parsed_url.hostname\n if domain:\n if domain != self.domain or port != self.port:\n return False\n return True\n return False", "title": "" }, { "docid": "f85ca1db989c201d5c74d7c0d4ebdf46", "score": "0.64940155", "text": "def is_http_url(string: str) -> bool:\n from urllib.parse import urlparse\n\n parsed_url = urlparse(string)\n return parsed_url.scheme in _http_url_schemes", "title": "" }, { "docid": "a11e03f5e24121d3efb91d72674b5079", "score": "0.63930607", "text": "def is_http(line):\n return line.startswith('http://') or line.startswith('https://')", "title": "" }, { "docid": "e73b48d664952695e10a333d7f757426", "score": "0.6383189", "text": "def get_protocol():\n if https():\n protocol = 'https'\n else:\n protocol = 'http'\n return protocol", "title": "" }, { "docid": "c21d1189a827a71d3efe36b63c61c36b", "score": "0.6334173", "text": "def _isurl(self, path):\n\n # We do this here to reduce the 'import numpy' initial import time.\n from urllib.parse import urlparse\n\n # BUG : URLs require a scheme string ('http://') to be used.\n # www.google.com will fail.\n # Should we prepend the scheme for those that don't have it and\n # test that also? Similar to the way we append .gz and test for\n # for compressed versions of files.\n\n scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)\n return bool(scheme and netloc)", "title": "" }, { "docid": "169d8e26ece14f5d212ef87ef01f66d1", "score": "0.63323957", "text": "def get_protocol(self):\n if self.ssl:\n return \"https\"\n else:\n return \"http\"", "title": "" }, { "docid": "ac5640a8d3d3042b7f6214acddefa36d", "score": "0.63268197", "text": "def _is_url(string):\n return \"http\" in string", "title": "" }, { "docid": "e3218f036ba3fa369a7a46ea4eba950c", "score": "0.62968403", "text": "def url_prepare(url):\n if 'http://' in url or 'https://' in url:\n return url\n try:\n if requests.get('https://' + url):\n return 'https://' + url\n except Exception as ex:\n pprint(ex)\n return 'http://' + url", "title": "" }, { "docid": "45cd8478965fd284301d06c4d9f1141f", "score": "0.62713385", "text": "def is_web_url(string):\n assert isinstance(string, basestring)\n parsed_url = urllib.parse.urlparse(string)\n return ((parsed_url.scheme.lower() == 'http' or\n parsed_url.scheme.lower() == 'https') and\n parsed_url.netloc)", "title": "" }, { "docid": "e1e4b7b57bcd342466a0a90e8dd08362", "score": "0.6229657", "text": "def is_url(val):\n res = urlparse(val)\n return bool(res.scheme and res.netloc and res.params == \"\")", "title": "" }, { "docid": "1275b16f8b7db33fdab8a386921fa8be", "score": "0.6213703", "text": "def is_url(url: str) -> bool:\n logger.info(url)\n result = urlparse(url)\n return all([result.scheme, result.netloc])", "title": "" }, { "docid": "2e45ac686a0031b274cbb9c2b7c2adcc", "score": "0.6183041", "text": "def validate_url(path):\n parsed = urlparse(path)\n return bool(parsed.scheme) and bool(parsed.netloc)", "title": "" }, { "docid": "2ad638cb47565f533ed64dbf7554c8d5", "score": "0.6173243", "text": "def filter_ssl(request):\n if request.scheme == 'https':\n return True\n else:\n return False", "title": "" }, { "docid": "893887d3a040c2f057e55a2538a12c46", "score": "0.6144087", "text": "def is_http_url(form, value):\n scheme, netloc = urlparse.urlparse(value)[:2]\n if scheme not in ('http', 'https') or not netloc:\n raise forms.ValidationError(_(u'A valid HTTP URL is required.'))", "title": "" }, { "docid": "c325f44fa8fae489b872d9a876998095", "score": "0.61421967", "text": "def is_url(url):\n if '://' not in url:\n return False\n proto, addr = url.split('://', 1)\n if proto.lower() not in ['tcp','pgm','epgm','ipc','inproc']:\n return False\n return True", "title": "" }, { "docid": "b9ee3fa937df3924a50ea143d8c81b5c", "score": "0.6127574", "text": "def remote(self):\r\n return self._url.scheme in ('http', 'https')", "title": "" }, { "docid": "420218927be02575f9b098d27799d902", "score": "0.6118695", "text": "def get_http_protocol(self):\n if self.cfg.ssl:\n return \"https\"\n else:\n return \"http\"", "title": "" }, { "docid": "0e152a6c41fed2c61a7d212f18005272", "score": "0.606771", "text": "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('slideshare.net')", "title": "" }, { "docid": "560e6d32cb9c3448d2010d8f6e6439c8", "score": "0.6063335", "text": "def filter_nossl(request):\n if request.scheme == 'http':\n return True\n else:\n return False", "title": "" }, { "docid": "75b4ba54176cc0bcd3a86c182db724ee", "score": "0.6025173", "text": "def query_scheme(self):\n\n return 'https'", "title": "" }, { "docid": "2b77713448a1d72cbd64981d75fc17b9", "score": "0.60149956", "text": "def is_url(self, location):\n\n return bool(urlparse.urlparse(location).netloc)", "title": "" }, { "docid": "7866d7c561ddb34272108977fb9b43e3", "score": "0.60031843", "text": "def _is_checksum_url(checksum):\n if (checksum.startswith('http://') or checksum.startswith('https://')):\n return True\n else:\n return False", "title": "" }, { "docid": "ef6d67486c9492f0fd46c5f23089fe2e", "score": "0.5991925", "text": "def is_url(url: str):\n # https://stackoverflow.com/a/17773849/8314159\n return re.search(r\"(https?://(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9]\"\n r\"[a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|https?://(?:www\\.|(?!www))[a-zA-Z0-9]+\\.[^\\s]{2,}\"\n r\"|www\\.[a-zA-Z0-9]+\\.[^\\s]{2,})\", url)", "title": "" }, { "docid": "9427df5a3236dc9d0d764f6281e4a89d", "score": "0.59629", "text": "def is_url(obj):\n try:\n result = urlparse(obj)\n return all([result.scheme, result.netloc, result.path])\n except Exception:\n return False", "title": "" }, { "docid": "a88fc7c1306be939b7887ad70cdfb918", "score": "0.59568524", "text": "def is_purl(val):\n res = urlparse(val)\n purl_netlocs = [\n \"purl.org\",\n \"purl.oclc.org\",\n \"purl.net\",\n \"purl.com\",\n \"purl.fdlp.gov\",\n ]\n return (\n res.scheme in [\"http\", \"https\"]\n and res.netloc in purl_netlocs\n and res.path != \"\"\n )", "title": "" }, { "docid": "31a3345cf49ddc0f34ffe55f0bc4f564", "score": "0.59468013", "text": "def __find_protocol(self, url):\n match = self.__REGEX_SCHEMA.search(url)\n if match:\n protocol = match.group(0).split(':')[0]\n return protocol\n return None", "title": "" }, { "docid": "05416d40636f7bd698c653c706c04a17", "score": "0.5937591", "text": "def verify_url(url: str) -> bool:\n parsed_url = urlparse(url)\n return all([parsed_url.scheme, parsed_url.netloc])", "title": "" }, { "docid": "3e8f9189b2b45f510e344202da1ea330", "score": "0.5918264", "text": "def https_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"https_only\")", "title": "" }, { "docid": "3e8f9189b2b45f510e344202da1ea330", "score": "0.5918264", "text": "def https_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"https_only\")", "title": "" }, { "docid": "3e8f9189b2b45f510e344202da1ea330", "score": "0.5918264", "text": "def https_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"https_only\")", "title": "" }, { "docid": "5a813ed4fc43ae86c4a09ab60c488f5a", "score": "0.58861464", "text": "def url_checker(url):\n if url.startswith(http_req):\n url_name = url[7:]\n # print('URL check passed. Using http')\n return url_name\n if url.startswith(https_req):\n url_name = url[8:]\n # print('URL check passed. Using https')\n return url_name\n else:\n print('URL check failed. not valid http or https URL')\n print(f'Bad URL:{url}')\n sys.exit()\n # return False", "title": "" }, { "docid": "f37a5e36efc896b7bd2732f17c489eab", "score": "0.58830005", "text": "def _valid_protocol_type(protocol):\n\n if protocol == 'ssh' or protocol == 'https':\n return True\n\n return False", "title": "" }, { "docid": "3497b57708c5230b19a5993b0d3474cb", "score": "0.5864818", "text": "def isRelURL(self, url):\n (scheme, netloc) = urlparse(url)[0:2]\n return not scheme and not netloc", "title": "" }, { "docid": "41ef470f1db0d800ee9e42a4ed089fc2", "score": "0.58482736", "text": "def sanitize_url(url: str, protocol: str = 'https://') -> str:\n sanitized = url[0:-1] if url[-1] == '/' else url\n with_protocol = sanitized if sanitized.startswith('http') else f'{protocol}{sanitized}'\n return with_protocol", "title": "" }, { "docid": "99a4a25964ddfece74292a7eda8410a7", "score": "0.58227414", "text": "def is_web_url(text):\r\n return re.match(r'(http://|https://|www.)(www\\.)?([a-zA-Z0-9-_.]+)(\\.[a-zA-Z0-9]{2,4})(\\S+)', text)", "title": "" }, { "docid": "39d054d8ef9f6e90ba4699c6ab4cfabe", "score": "0.57922304", "text": "def validate_url (url):\n # Parse URL provided\n v = urlparse(url)\n\n # Verify if protocol (http, https, ftp) and hostname are present \n # in the URL provided.\n if v.scheme and v.hostname:\n \n # Get URL base and hostname to form the correct URL base\n u = v.scheme + '://' + v.hostname + '/'\n return u\n\n else:\n # Not a valid URL\n return False", "title": "" }, { "docid": "4a5cf630055acedbde70714d4e1daffc", "score": "0.5768368", "text": "def test_url_add_missing_protocol(self):\n assert ct.url_add_missing_protocol(\"https://www.bad-actor.services/\") == \"https://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\"www.bad-actor.services/\") == \"http://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\"http://www.bad-actor.services/\") == \"http://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\n \"www.bad-actor.services/\",\n default=\"https\") == \"https://www.bad-actor.services/\"", "title": "" }, { "docid": "935d27eba29bdc7994c1d5d31621acd4", "score": "0.5706432", "text": "def is_secure_transport(request: Request) -> bool:\n if request.settings.INSECURE_TRANSPORT:\n return True\n return request.url.lower().startswith(\"https://\")", "title": "" }, { "docid": "202d0475c4160e537768f6682e567add", "score": "0.5691122", "text": "def check_url(url=None, parse_url=None):\n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n unsupported = ['twitcam.', 'new.']\n return parse_url.netloc.endswith('livestream.com')\\\n and not any(x in parse_url.netloc for x in unsupported)\\\n and len(parse_url.path.split('/')) > 2", "title": "" }, { "docid": "629c61ffa3fa33e39261ca9fa66d5fef", "score": "0.5684117", "text": "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n if url:\n parse_url = urlparse.urlparse(url)\n \n #yt_domains = ['youtube.com', 'youtube-nocookie.com', 'youtu.be', 'youtube.googleapis.com']\n #return any(parse_url.netloc.endswith(yt) for yt in yt_domains)\n return re.search('^(.+\\.)*(youtube(-nocookie|\\.googleapis)?.com|youtu.be)+$', parse_url.netloc)", "title": "" }, { "docid": "2ffe7febb9149c4a0dd1841ae27e9186", "score": "0.566871", "text": "def test_url():\n assert is_url(None) is None\n assert is_url('https://t.c') is None\n assert is_url('http://t.c/t.html?t=b#s') is None\n assert is_url('//t.c') is None\n assert is_url('//t.c/t.html?t=b#s') is None\n assert is_url('https://t')\n assert is_url('http://t.')\n assert is_url('//t')\n assert is_url('//t.')", "title": "" }, { "docid": "5ca4c27339b168a6960a987f85c4e162", "score": "0.56645155", "text": "def test_http_url_redirects_to_https(self,pagename):\n\n url = '%s/%s' % (self.http_authority,pagename)\n\n po = self.catalog.load_pageobject('GenericPage')\n po.goto_page(url)\n\n loc = urlparse.urlsplit(po.current_url())\n assert loc.scheme == 'https', \\\n \"After loading %s, scheme is: %s, expected: 'https'\" \\\n % (url, loc.scheme)", "title": "" }, { "docid": "8c02dc34a1f7ed4eba716b2789a158b2", "score": "0.56589687", "text": "def app_protocol(self):\n if settings.INAPP_REQUIRE_HTTPS:\n return 'https'\n else:\n return 'https' if self.is_https else 'http'", "title": "" }, { "docid": "334f00c0e7e0728ece80b45e48078af8", "score": "0.5657184", "text": "def ensure_scheme(url: str, default: str = 'http') -> str:\n\n if not url:\n return url\n\n # purl (or to be precise urlparse) will parse empty host names ('abc.xyz')\n # wrongly, assuming the abc.xyz is a path. by adding a double slash if\n # there isn't one already, we can circumvent that problem\n if '//' not in url:\n url = '//' + url\n\n _url = URL(url)\n\n if _url.scheme():\n return url\n\n return _url.scheme(default).as_string()", "title": "" }, { "docid": "b69607a03703cd51d3e46a896f97e8fb", "score": "0.5650579", "text": "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc == 'dailymotion.com' or parse_url.netloc.endswith('.dailymotion.com')", "title": "" }, { "docid": "4d341f3b529873ad64b99428df8207b8", "score": "0.56201154", "text": "def parseTOURL(url:str, verify:bool) -> typing.Tuple[bool, str, int]:\n\turl = url.rstrip('/')\n\n\tuseSSL, host, port = True, None, 443\n\n\ttry:\n\t\t_ = requests.head(url, verify=verify)\n\texcept requests.exceptions.RequestException as e:\n\t\traise ValueError(\"Cannot contact any server at '%s' (%s)\" % (url, e)) from e\n\n\tif url.lower().startswith(\"http://\"):\n\t\tport = 80\n\t\tuseSSL = False\n\t\turl = url[7:]\n\telif url.lower().startswith(\"https://\"):\n\t\turl = url[8:]\n\n\t# I'm assuming here that a valid FQDN won't include ':' - and it shouldn't\n\tportpoint = url.find(':')\n\tif portpoint > 0:\n\t\thost = url[:portpoint]\n\t\tport = int(url[portpoint+1:])\n\telse:\n\t\thost = url\n\n\treturn useSSL, host, port", "title": "" }, { "docid": "16c2aba1b231d920fbe37f8584e82f98", "score": "0.560501", "text": "def use_https(url, timeout=60):\n \n try:\n response = requests.get(url, timeout=timeout, verify=True)\n if 'https://' in response.url: return True\n\n return False\n except:\n # Defaulting to 'False', probably the web server don't know what to do\n print('Warning: The HTTPS request to {url} failed, assuming a \\'False\\''.format(url=url))\n return False", "title": "" }, { "docid": "ba61659066a54d77f821f4913bbf39a6", "score": "0.5600158", "text": "def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')", "title": "" }, { "docid": "755db18d23e18fa8533f2e7fd5c5f19b", "score": "0.5592997", "text": "def extract_scheme(url):\n return urlsplit(url, \"http\").scheme", "title": "" }, { "docid": "5764fcf04872996367277d786373f74f", "score": "0.5587054", "text": "def ssl(self) -> Optional[bool]:\n return pulumi.get(self, \"ssl\")", "title": "" }, { "docid": "15e7064a0ee0f0604ee6b15f10771bdf", "score": "0.55801696", "text": "def is_ssl(self):\n return self._is_ssl", "title": "" }, { "docid": "11f97272d36eb9bf4b1f78919ebc5dc3", "score": "0.55553764", "text": "def protocol(self, code: str) -> str:\n return 'https'", "title": "" }, { "docid": "183ac7507d4714daebe58062a7901df7", "score": "0.5549035", "text": "def is_url(string):\n try:\n urlparse(string)\n return True\n except:\n return False", "title": "" }, { "docid": "093d5469f1460ecb7b66435f25bfc800", "score": "0.5548476", "text": "def test_host_ssl(self):\n url = create_url(host=\"www.example.com\", ssl=True, scheme_ssl=\"https\")\n self.assertEqual(url, \"https://www.example.com\")", "title": "" }, { "docid": "d98a25de5dd4a38790e128003d3dda1b", "score": "0.55439645", "text": "def url_validator(url: str) -> bool:\n import re\n regex = re.compile(\n r'^(?:http|ftp)s?://'\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|'\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})'\n r'(?::\\d+)?'\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n return re.match(regex, url) is not None", "title": "" }, { "docid": "67cb6883b30f41bd20ea96b83296baa1", "score": "0.55429816", "text": "def ssl(self):\n return self.protocol != \"SASL_PLAINTEXT\"", "title": "" }, { "docid": "0ce6281bd16181aac7dd98cf418e615a", "score": "0.55422425", "text": "def validate_url(url):\n\n # Minimal URL validation with urlparse. This is extremely lenient, we might\n # want to use something like https://github.com/kvesteri/validators instead.\n parsed_url = urlparse(url)\n\n if not parsed_url.scheme:\n parsed_url = urlparse(\"http://\" + url)\n\n if not re.match(\"https?\", parsed_url.scheme):\n raise ValueError('Links must have an \"http\" or \"https\" prefix')\n\n if not parsed_url.netloc:\n raise ValueError(\"Links must include a domain name\")\n\n return parsed_url.geturl()", "title": "" }, { "docid": "c310910ba2f61c120b613be89a685d25", "score": "0.55385435", "text": "def word_is_url(word):\n match = URL_REGEX.search(word)\n return True if match is not None else False", "title": "" }, { "docid": "c2a0c87ff5b6a942764e99e5c14a9377", "score": "0.5532408", "text": "def is_valid(url):\n parsed = urlparse(url)\n return bool(parsed.netloc) and bool(parsed.scheme)", "title": "" }, { "docid": "c2a0c87ff5b6a942764e99e5c14a9377", "score": "0.5532408", "text": "def is_valid(url):\n parsed = urlparse(url)\n return bool(parsed.netloc) and bool(parsed.scheme)", "title": "" }, { "docid": "9343371e558fc7fdcc107bf5f50b783c", "score": "0.5513516", "text": "def check_url(url=None, parse_url=None):\n return False", "title": "" }, { "docid": "789ef0a74e8371fb7421a607659dc7ae", "score": "0.55131584", "text": "def isUrlScheme(urlScheme):\n if not urlScheme:\n return False\n \n #an urlscheme can be anything that starts with alfanumeric charaters followed by a colon. \n pattern = re.compile('^[a-zA-Z][+a-zA-Z0-9.-]*:$')\n return bool(pattern.search(str(urlScheme)))", "title": "" }, { "docid": "1e2edd3035812203132029c1d99a32b6", "score": "0.5507019", "text": "def is_url(url):\n return re.search(r\"^[a-zA-Z][-+\\.\\w]*://[^\\s]+$\", url) is not None and url[:4] != 'uuid'", "title": "" }, { "docid": "b4f5d5a801e2157b54369cea1f54463b", "score": "0.5503125", "text": "def _validate_url(url):\n if urlparse.urlparse(url).scheme not in VALID_SCHEMES:\n _fail(url, \"Invalid URL\")", "title": "" }, { "docid": "cc36788467c5d639a72268f09984233c", "score": "0.55029553", "text": "def is_valid_url(url: str) -> bool:\n try:\n result = urlparse(url)\n return all([result.scheme, result.netloc])\n except ValueError:\n return False", "title": "" }, { "docid": "f2c8b47825e4f98174eed51a868d083a", "score": "0.54815024", "text": "def insecure_ssl(self):\n # type: () -> bool\n return self._insecure_ssl", "title": "" }, { "docid": "79e071695cce234761a0502821aea46f", "score": "0.5446007", "text": "def get_url_components(self, url):\n if 'http://' not in url and 'https://' not in url:\n print(\"Protocol not found, skipping: \" + url)\n return False\n if url[:7] == 'http://':\n protocol = url[:7]\n file_path = url[7:]\n elif url[:8] == 'https://':\n protocol = url[:8]\n file_path = url[8:]\n else:\n print(\"Error when parsing protocol. Skipping: \" + url)\n return False\n # Split the string from the last '/'.\n # To do this, we reverse the string, split from the first '/' and\n # then reverse them both back.\n filename, root_and_directory = [x[::-1] for x in file_path[::-1].split('/', 1)]\n # Replace the lost '/'\n root_and_directory = root_and_directory + '/'\n root, directory = root_and_directory.split('/', 1)\n directory = '/' + directory\n return [protocol, root, directory, filename]", "title": "" }, { "docid": "b3714f7373959b816603d09ab961a683", "score": "0.54393023", "text": "def is_url(url):\n\n return bool(re.match(re_url, url))", "title": "" }, { "docid": "02143b05d9a37228a6fc7ef42fc9caec", "score": "0.5428469", "text": "def validate_url(url, allow_invalid=False):\n val = URLValidator(schemes=[\"http\", \"https\"])\n\n try:\n val(url)\n return url\n except:\n pass\n\n if allow_invalid and \"://\" in url:\n parts = url.split(\"://\")\n # In case of \"http://https://example.com\" this will take the\n # \"https://\" part and not the \"http://\" part.\n if parts[-2] == \"http\" or parts[-2] == \"https\":\n return \"%s://%s\" % (parts[-2], parts[-1])\n\n try:\n val(\"http://%s\" % url)\n return \"http://%s\" % url\n except:\n pass", "title": "" }, { "docid": "9c6c83abc63c3bd7256d902489e4ecb4", "score": "0.54167426", "text": "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "title": "" }, { "docid": "eea80f2e629213d889748cd7de1efc5e", "score": "0.5413371", "text": "def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):\n if url is not None:\n url = url.strip()\n if not url:\n return False\n if allowed_hosts is None:\n allowed_hosts = set()\n elif isinstance(allowed_hosts, str):\n allowed_hosts = {allowed_hosts}\n # Chrome treats \\ completely as / in paths but it could be part of some\n # basic auth credentials so we need to check both URLs.\n return _url_has_allowed_host_and_scheme(\n url, allowed_hosts, require_https=require_https\n ) and _url_has_allowed_host_and_scheme(\n url.replace(\"\\\\\", \"/\"), allowed_hosts, require_https=require_https\n )", "title": "" }, { "docid": "eea80f2e629213d889748cd7de1efc5e", "score": "0.5413371", "text": "def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):\n if url is not None:\n url = url.strip()\n if not url:\n return False\n if allowed_hosts is None:\n allowed_hosts = set()\n elif isinstance(allowed_hosts, str):\n allowed_hosts = {allowed_hosts}\n # Chrome treats \\ completely as / in paths but it could be part of some\n # basic auth credentials so we need to check both URLs.\n return _url_has_allowed_host_and_scheme(\n url, allowed_hosts, require_https=require_https\n ) and _url_has_allowed_host_and_scheme(\n url.replace(\"\\\\\", \"/\"), allowed_hosts, require_https=require_https\n )", "title": "" }, { "docid": "e9abeeeaf0a08b187e255f62ec01b7dc", "score": "0.54085594", "text": "def is_server_address(\n address: str, additional_schemes: Iterable[str] = ()) -> bool:\n schemes = {\"http\", \"https\"}\n if additional_schemes:\n schemes.update(additional_schemes)\n try:\n pieces = urlparse(address)\n scheme = pieces.scheme.lower()\n return scheme in schemes and pieces.netloc is not None\n except Exception: # pylint: disable=broad-except\n return False", "title": "" }, { "docid": "84740074a1d272ad502ab31033eb0205", "score": "0.5405233", "text": "def is_secure(self):\n return self._is_ssl or self._is_socket", "title": "" }, { "docid": "d8b19be5cc578aef7bbdc8b7c5c9fb1b", "score": "0.5380352", "text": "def is_url(self, url):\n return self.is_regex_url(url, self.is_url_regex)", "title": "" }, { "docid": "7e7118fdbb940db23c9bea2dce011da0", "score": "0.53755367", "text": "def same_origin(url1, url2):\n p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)\n return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)", "title": "" }, { "docid": "5b26ecfc6f5b207baa9f48a2015ae565", "score": "0.5371835", "text": "def _detect_upgrade_http_to_https(self, old_domains, new_domain):\n is_upgrade = False\n for old_domain in old_domains:\n if old_domain.domain == new_domain.domain:\n if (\n old_domain.protocol == 'http' and\n new_domain.protocol == 'https'\n ):\n is_upgrade = True\n break\n return is_upgrade", "title": "" }, { "docid": "953d7d3dc98bdbc75493deace2c3377a", "score": "0.5364087", "text": "def validate_url(path: str):\n regex = re.compile(\n r\"^(?:http|ftp)s?://\" # http:// or https://\n r\"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|\" # domain...\n r\"localhost|\" # localhost...\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\" # ...or ip\n r\"(?::\\d+)?\" # optional port\n r\"(?:/?|[/?]\\S+)$\",\n re.IGNORECASE,\n )\n return re.match(regex, path) is not None", "title": "" }, { "docid": "93bf185d843cb3a91e9fa813a798b7f8", "score": "0.53475755", "text": "def _validate_url(server):\n parsed_url = urlparse(server)\n if not all((parsed_url.scheme, parsed_url.netloc)):\n raise ValueError('invalid server url')\n return server", "title": "" }, { "docid": "62df434ee27a222a283ad6965b664d28", "score": "0.5275046", "text": "def test_no_url_or_appid_passed_in_and_is_ssl(self):\n gae_req = AppEngineRequest(use_ssl=True)\n\n url = gae_req.build_url()\n\n self.assertEqual(url, \"https://localhost/\")", "title": "" }, { "docid": "c736a3e846294376ed14ce556aeb12da", "score": "0.52667576", "text": "def check_url(url=None, parse_url=None):\n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return ('vzaar.com' in parse_url.netloc or 'vzaar.tv' in parse_url.netloc)", "title": "" }, { "docid": "42aa654ee707b1a469d035ebad7f2526", "score": "0.5265246", "text": "def is_url_requirement(ireq):\n return bool(ireq.original_link)", "title": "" }, { "docid": "f827f09f952167a1a3575d2227b4cddf", "score": "0.5259399", "text": "def valid_url(url):\n url_regex = re.compile(r\"https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9]\\.[^\\s]{2,}\")\n return url and url_regex.match(url)", "title": "" }, { "docid": "31192eb62f13bfe25adf53c6645a3cee", "score": "0.5257317", "text": "def parse_url(url):\n loc = urlparse(url)\n\n # if the scheme (http, https ...) is not available urlparse wont work\n if loc.scheme == \"\":\n url = \"http://\" + url\n loc = urlparse(url)\n return loc", "title": "" }, { "docid": "d07fe4dd497071e6d3c5638277538ab0", "score": "0.52384025", "text": "def can_https(tls_ver):\n output = True\n\n # check python version\n if sys.version_info < (3, 6): #modify from 3, 7 to 3, 6\n _LOGGER.error(\"PyISY cannot use HTTPS: Invalid Python version. See docs.\")\n output = False\n\n # check that Python was compiled against correct OpenSSL lib\n if \"PROTOCOL_TLSv1_1\" not in dir(ssl):\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Compiled against old OpenSSL \"\n + \"library. See docs.\"\n )\n output = False\n\n # check the requested TLS version\n if tls_ver not in [1.1, 1.2]:\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Only TLS 1.1 and 1.2 are supported \"\n + \"by the ISY controller.\"\n )\n output = False\n\n return output", "title": "" }, { "docid": "c4cfa82e2b0172fcd8422b617e4afe92", "score": "0.52169234", "text": "def test_is_url(self):\n\n url = \"https://shadowrun.needs.management\"\n self.assertTrue(run(verification.is_url(url)))\n\n url = \"https:// www.google.com\"\n self.assertFalse(run(verification.is_url(url)))", "title": "" }, { "docid": "0643d3a141e348d14464c68f7e6177a3", "score": "0.5214985", "text": "def same_domain(url1, url2):\n return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc", "title": "" }, { "docid": "78cbfc772815b00464ada7d93683d820", "score": "0.52100354", "text": "def validate_base_url(base_url):\n parsed_url = urllib.parse.urlparse(base_url)\n if parsed_url.scheme and parsed_url.netloc:\n return parsed_url.geturl()\n else:\n error_message = \"base_url must contain a valid scheme (protocol \" \\\n \"specifier) and network location (hostname)\"\n raise ciscosparkapiException(error_message)", "title": "" }, { "docid": "1a5fbe016e0ff3b0b083cda604de33af", "score": "0.52058184", "text": "def check_url_kwarg_for_http(function):\n @functools.wraps(function)\n def replacement(*args, **kwargs):\n url = kwargs.get('url')\n if url and url.startswith(\"http://\"):\n logging.warn('SECURITY : fetching non-HTTPS url %s' % url)\n return function(*args, **kwargs)\n return replacement", "title": "" }, { "docid": "aa12382e6f6313fe6a7d911c720dc453", "score": "0.5200095", "text": "def url_fix_common_typos(url):\n if url.startswith(\"http//\"):\n url = \"http://\" + url[6:]\n elif url.startswith(\"https//\"):\n url = \"https://\" + url[7:]\n return url", "title": "" } ]
5a142a1cebc1ed0adef91f854e0c3ce4
the actual registering of an endpoint is done here.
[ { "docid": "f9048ac479685c39a2c61886bed642a8", "score": "0.65512407", "text": "def _register_endpoint(self, name, **config):\n\n if self.type == self.REDIRECT_ENDPOINT:\n raise ImproperlyConfigured, 'redirect Endpoint \"%s\" can not register endpoints' % self.name\n\n if name in self._endpoints:\n if config.get('type') not in [self.FILTER_ENDPOINT, self.SELECTOR_ENDPOINT]:\n raise ImproperlyConfigured, 'endpoint \"%s\" found twice' % name\n if (any(endpoint.pattern == config.get('pattern') for endpoint in self._endpoints[name])):\n raise ImproperlyConfigured, 'filtered Endpoint \"%s\" with pattern \"%s\" found twice' % (self.name, config.get('pattern'))\n\n EndpointClass = config.pop('endpoint_class', ApiEndpoint)\n\n if type(config.get('view')) == type(ViewClass):\n config['view_class'] = config.pop('view')\n\n elif config.get('view_config'):\n view_config = config.get('view_config')\n config['view_class'] = EndpointClass.generate_auto_view(\n serializer_cls=view_config.get('serializer_class', None),\n serializer_model=config.get('serializer_model', config.get('model', None)),\n api_root=self.root,\n view_config=view_config,\n endpoint_links=self.links\n )\n\n if 'view_class' in config and getattr(config['view_class'], 'use_endpoint_filter', False):\n if not hasattr(config['view_class'], 'filter_backends'):\n config['view_class'].filter_backends = ()\n config['view_class'].filter_backends += (EndpointFilterBackend, )\n\n endpoint = EndpointClass(\n name=name,\n **config\n )\n\n self.register(endpoint, **config)\n\n return endpoint", "title": "" } ]
[ { "docid": "11ed93be0cd90e54073d90d0a452e2c0", "score": "0.7848222", "text": "def register(endpoint, procedure = None, options = None):", "title": "" }, { "docid": "d57ff7b5b76c5233c71b72af4561a897", "score": "0.717257", "text": "def register(self, endpoint, **config):\n name = endpoint.name\n\n if endpoint.root:\n endpoint=copy.deepcopy(endpoint)\n\n\n endpoint.initialize(self.root, self if self.root is not self else None)\n\n\n self.root.register_view_name(endpoint.view_name, identifier=endpoint.get_complete_namespace(regular=True))\n\n # this way two endpoints can be named the same way,\n # but e.g. be different in their filter-pattern\n if not name in self._endpoints:\n self._endpoints[name] = []\n\n self._endpoints[name].append(endpoint)\n\n self._endpoint_registry[name] = self._endpoint_registry.get(name, config.pop('active', True))\n\n return self", "title": "" }, { "docid": "87da325bbbf102bca4c2f24c3faf662a", "score": "0.68275744", "text": "def register(self):\n pass", "title": "" }, { "docid": "2164afc8819e379ae16ae399ae3829d9", "score": "0.666469", "text": "def add_endpoint(self, ip):\n pass", "title": "" }, { "docid": "864449f0bc61a65070b0d82c44126704", "score": "0.6614792", "text": "def add_endpoint(self, endpoint, callback):\n if not endpoint in self.endpoints:\n self.endpoints[endpoint] = []\n self.app.logger.debug(\"Add callback to endpoint %s\" % endpoint)\n self.endpoints[endpoint].append(callback)", "title": "" }, { "docid": "824d682aaf1a994d663dfd6f2f7aeb7a", "score": "0.65829116", "text": "def register_endpoint(self, name: str, kind: str = \"\") -> HelicsEndpoint:\n ep = helicsFederateRegisterEndpoint(self, name, kind)\n self.endpoints[ep.name] = ep\n return ep", "title": "" }, { "docid": "a2fc32440e3a32e7398cd32369ef84d4", "score": "0.65212566", "text": "def register_endpoint(self, name, **config):\n #if not (\"view\" in config):\n # # e.g. @register_endpoint.***('somename') or @register_endpoint.***(name='somename',..)\n # def dec(view):\n # if not view:\n # raise ImproperlyConfigured, 'view argument missing'\n # return getattr(self, 'register_endpoint')(name, view=view, **kwargs)\n # return dec\n link = config.pop('link_as', None)\n\n endpoint = self._register_endpoint(name, **config)\n \n if link:\n link_name, link_endpoint = link, endpoint\n self.links[link_name] = link_endpoint\n #raise Exception(self.links, link, endpoint.as_url())\n #if self.parent and self.parent.links:\n # self.parent.links[link_name] = link_endpoint\n\n return endpoint", "title": "" }, { "docid": "e7383995b283fd1f69dc6029f171eea5", "score": "0.6491289", "text": "def endpoint(self):", "title": "" }, { "docid": "6aa2125eb3a0ee3154b1a991188dba38", "score": "0.6477436", "text": "def Register():", "title": "" }, { "docid": "ac6f935ae0cec5a3b99222df4e1aa2b3", "score": "0.6435647", "text": "def helicsFederateRegisterGlobalEndpoint(fed: HelicsFederate, name: str, type: str = \"\") -> HelicsEndpoint:\n f = loadSym(\"helicsFederateRegisterGlobalEndpoint\")\n err = helicsErrorInitialize()\n result = f(fed.handle, cstring(name), cstring(type), err)\n if err.error_code != 0:\n raise HelicsException(\"[\" + str(err.error_code) + \"] \" + ffi.string(err.message).decode())\n else:\n return HelicsEndpoint(result, cleanup=False)", "title": "" }, { "docid": "e8090a8dcac3aa851a1b7a2d5a7cb864", "score": "0.642549", "text": "def register(self):\n raise NotImplementedError()", "title": "" }, { "docid": "3423f4e9ee19cecca9a333d52ff0a686", "score": "0.63670987", "text": "def register_endpoint(self, app_name, service_name, host, port):\n zk = self._connect()\n try:\n zk.ensure_path('/'.join([\n self.NAMESPACE,\n app_name,\n service_name,\n self.ENDPOINTS,\n host + ':' + str(port)\n ]))\n finally:\n self._disconnect(zk)", "title": "" }, { "docid": "2b55fd78924e17e7e542cce35edee770", "score": "0.63623315", "text": "def activate(self, name):\n self._endpoint_registry[name] = True", "title": "" }, { "docid": "215fd42a5952d89a0318ea54b8c01d7c", "score": "0.63400316", "text": "def helicsFederateRegisterEndpoint(fed: HelicsFederate, name: str, type: str) -> HelicsEndpoint:\n f = loadSym(\"helicsFederateRegisterEndpoint\")\n err = helicsErrorInitialize()\n result = f(fed.handle, cstring(name), cstring(type), err)\n if err.error_code != 0:\n raise HelicsException(\"[\" + str(err.error_code) + \"] \" + ffi.string(err.message).decode())\n else:\n return HelicsEndpoint(result, cleanup=False)", "title": "" }, { "docid": "089c1f182b92878c29fb68de9ef9b9f5", "score": "0.63294774", "text": "def add_endpoint(self, endpoint):\n self._endpoints.append(endpoint)", "title": "" }, { "docid": "2ddc6b4aac651821ec0aed537b651f86", "score": "0.630076", "text": "def register_endpoint(self, name, endpoint_uuid, description=None):\n data = {\"endpoint_name\": name, \"endpoint_uuid\": endpoint_uuid, \"description\": description}\n\n r = self.post(self.ep_registration_path, json_body=data)\n if r.http_status is not 200:\n raise Exception(r)\n\n # Return the result\n return r.data", "title": "" }, { "docid": "b50ba9e479b478019156d61b67ba5b58", "score": "0.6291955", "text": "def _register(app, router, base_path, **config):\n predicates = config['predicates']\n# _signin_resource(app, router, base_path, predicates)\n# _base_resource(app, router, base_path, predicates)\n #_listing_resource(app, router, base_path, predicates)\n #_item_resource(app, router, base_path, predicates)\n# _add_item_resource(app, router, base_path, predicates)\n _static_resources(app, router, base_path, predicates)", "title": "" }, { "docid": "5d49328cc47e60baea247b428eb8580b", "score": "0.62733793", "text": "def helicsFederateRegisterGlobalTargetedEndpoint(fed: HelicsFederate, name: str, type: str):\n f = loadSym(\"helicsFederateGlobalRegisterTargetedEndpoint\")\n err = helicsErrorInitialize()\n result = f(fed.handle, cstring(name), cstring(type), err)\n if err.error_code != 0:\n raise HelicsException(\"[\" + str(err.error_code) + \"] \" + ffi.string(err.message).decode())\n else:\n return HelicsEndpoint(result, cleanup=False)", "title": "" }, { "docid": "bca19b7bc4d53f91b30bc8c23f964f04", "score": "0.6273021", "text": "async def set_endpoint(\n endpoint: Endpoint, current_user: User = Depends(auth.get_current_user),\n):\n mongo_processor.add_endpoints(\n endpoint.dict(), current_user.get_bot(), current_user.get_user()\n )\n return {\"message\": \"Endpoint saved successfully!\"}", "title": "" }, { "docid": "da6827a87d7ad2841e983af08933b6a8", "score": "0.6269947", "text": "def helicsFederateRegisterTargetedEndpoint(fed: HelicsFederate, name: str, type: str):\n f = loadSym(\"helicsFederateRegisterTargetedEndpoint\")\n err = helicsErrorInitialize()\n result = f(fed.handle, cstring(name), cstring(type), err)\n if err.error_code != 0:\n raise HelicsException(\"[\" + str(err.error_code) + \"] \" + ffi.string(err.message).decode())\n else:\n return HelicsEndpoint(result, cleanup=False)", "title": "" }, { "docid": "897cdf164adef633b43fcfeb0e4eb65f", "score": "0.62337583", "text": "def register(self, *args, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "cf9192dc52dbbd7e012b888ac0ada551", "score": "0.622512", "text": "def register(self, name):\n pass", "title": "" }, { "docid": "6305a819f331f87a36a6c694bbe1bf21", "score": "0.62247133", "text": "def POST_register(self, *args, **kwargs):\r\n return self._handle_register(*args, **kwargs)", "title": "" }, { "docid": "a08668ede3334114a619717557e1246d", "score": "0.61964715", "text": "async def create_endpoint(self, endpoint: str,\n traffic_dict: Dict[str, float], route,\n methods) -> None:\n async with self.write_lock:\n # If this is a headless endpoint with no route, key the endpoint\n # based on its name.\n # TODO(edoakes): we should probably just store routes and endpoints\n # separately.\n if route is None:\n route = endpoint\n\n # TODO(edoakes): move this to client side.\n err_prefix = \"Cannot create endpoint.\"\n if route in self.current_state.routes:\n\n # Ensures this method is idempotent\n if self.current_state.routes[route] == (endpoint, methods):\n return\n\n else:\n raise ValueError(\n \"{} Route '{}' is already registered.\".format(\n err_prefix, route))\n\n if endpoint in self.current_state.get_endpoints():\n raise ValueError(\n \"{} Endpoint '{}' is already registered.\".format(\n err_prefix, endpoint))\n\n logger.info(\n \"Registering route '{}' to endpoint '{}' with methods '{}'.\".\n format(route, endpoint, methods))\n\n self.current_state.routes[route] = (endpoint, methods)\n\n # NOTE(edoakes): checkpoint is written in self._set_traffic.\n await self._set_traffic(endpoint, traffic_dict)\n await asyncio.gather(*[\n router.set_route_table.remote(self.current_state.routes)\n for router in self.actor_reconciler.router_handles()\n ])", "title": "" }, { "docid": "1a62e079e8d370a6c61d65144aa56773", "score": "0.61904293", "text": "def add_endpoint(self, endpoint, local_site):\n try:\n epg = endpoint.get_parent()\n app = epg.get_parent()\n tenant = app.get_parent()\n except AttributeError as e:\n return\n logging.info('endpoint: %s epg: %s app: %s tenant: %s', endpoint.name, epg.name, app.name, tenant.name)\n\n # Ignore events without IP addresses\n if endpoint.ip == '0.0.0.0':\n return\n\n # Ignore MAC moves i.e. Same IP address appears on different MAC address.\n # This is the case in situations such as loadbalancer failover.\n if (tenant.name, app.name, epg.name, endpoint.name) in self.mac_tracker:\n expected_mac = self.mac_tracker[(tenant.name, app.name, epg.name, endpoint.name)]\n if endpoint.mac != expected_mac and endpoint.is_deleted():\n # Ignore this event since it is the old MAC being deleted on a MAC move\n return\n if endpoint.is_deleted():\n if (tenant.name, app.name, epg.name, endpoint.name) in self.mac_tracker:\n del self.mac_tracker[(tenant.name, app.name, epg.name, endpoint.name)]\n else:\n self.mac_tracker[(tenant.name, app.name, epg.name, endpoint.name)] = endpoint.mac\n\n # Track the IP to (Tenant, App, EPG)\n # This is in case the IPs are moving from 1 EPG to another EPG then we want to\n # send the currently queued endpoints before handling this endpoint to avoid\n # a subnet already present error\n if endpoint.name in self.addresses:\n if self.addresses[endpoint.name] != (tenant.name, app.name, epg.name):\n self.push_to_remote_sites(self._monitor._my_collector)\n else:\n self.addresses[endpoint.name] = (tenant.name, app.name, epg.name)\n\n # Get the policy for the EPG\n policy = local_site.get_policy_for_epg(tenant.name, app.name, epg.name)\n if policy is None:\n logging.info('Ignoring endpoint as there is no policy defined for its EPG (epg: %s app: %s tenant: %s)',\n epg.name, app.name, tenant.name)\n return\n\n logging.info('Need to process endpoint %s', endpoint.name)\n # Track the number of endpoint events\n if endpoint.is_deleted():\n self.endpoint_del_events += 1\n else:\n self.endpoint_add_events += 1\n\n # Process the endpoint policy\n for remote_site_policy in policy.get_site_policies():\n for l3out_policy in remote_site_policy.get_interfaces():\n # Remove existing JSON for the endpoint if any already queued since this\n # update will override that\n self._remove_queued_endpoint(remote_site_policy.name, l3out_policy, endpoint)\n\n # Create the JSON\n tag = IntersiteTag(tenant.name, app.name, epg.name, local_site.name)\n remote_tenant = Tenant(l3out_policy.tenant)\n remote_l3out = OutsideL3(l3out_policy.name, remote_tenant)\n remote_epg = OutsideEPG(policy.remote_epg, remote_l3out)\n remote_ep = OutsideNetwork(endpoint.name, remote_epg)\n if ':' in endpoint.name:\n remote_ep.ip = endpoint.name + '/128'\n else:\n remote_ep.ip = endpoint.name + '/32'\n if endpoint.is_deleted():\n remote_ep.mark_as_deleted()\n tenant_json = remote_tenant.get_json()\n\n # Add to the database\n self._merge_tenant_json(remote_site_policy.name, tenant_json)", "title": "" }, { "docid": "c99c07c548590ae3a43555c44cb41a4c", "score": "0.6184247", "text": "def register(self):\n config = self.get_config()\n frontend = find_or_create(config, \"frontend\")\n # Now we're sure frontend exists, so we can add sub elements to it.\n if self.override:\n frontend = self._add_override(frontend)\n else:\n frontend = self._add_route(frontend)\n self.put_config(config)", "title": "" }, { "docid": "135c8c9a66591c143004cde63a1c780b", "score": "0.60937953", "text": "def register_common_endpoint(self, service_name, host, port):\n self.register_endpoint(self.COMMON_APP_NAME, service_name, host, port)", "title": "" }, { "docid": "e7521d092f557c8c48a1e10e41cbd9ba", "score": "0.60863376", "text": "def register(process_class, url, endpoint_prefix):\n module, rst = process_class.rsplit('.', 1)\n python_module = importlib.import_module(module)\n rst_class = getattr(python_module, rst)\n api.add_resource(rst_class, url, endpoint=endpoint_prefix + rst_class.__name__.lower())", "title": "" }, { "docid": "53cc997512e7b33495a72927599eedd6", "score": "0.60809916", "text": "def register_url_response(url_endpoint: str) -> Response:\n\n return Get(url_endpoint + _register).response()", "title": "" }, { "docid": "6ce70ad2e95f5273a7e1b4efb8acae26", "score": "0.6069662", "text": "def __init__(self, endpoint):\n self.endpoint = endpoint", "title": "" }, { "docid": "6ce70ad2e95f5273a7e1b4efb8acae26", "score": "0.6069662", "text": "def __init__(self, endpoint):\n self.endpoint = endpoint", "title": "" }, { "docid": "3c2c233e6247effdc0eb1b52531fd352", "score": "0.59832716", "text": "def register(func):\n return gossip.register('slash.{0}'.format(func.__name__))(func)", "title": "" }, { "docid": "58ab8b0c0b3d5ae54bd8107f4a037688", "score": "0.5975165", "text": "def endpoint(self, endpoint):\n self._endpoint = endpoint", "title": "" }, { "docid": "1b42e29a2f0898a02cf88e14460464c6", "score": "0.5938735", "text": "def register(self, task):\n ...", "title": "" }, { "docid": "80fd792fd14d804b19b8d674808e0aa6", "score": "0.5937014", "text": "def _attach_endpoints(self):\n for name, endpoint in inspect.getmembers(self):\n if inspect.isclass(endpoint) and issubclass(endpoint, self._Endpoint) and (endpoint is not self._Endpoint):\n endpoint_instance = endpoint(self.base_requester)\n setattr(self, endpoint_instance.endpoint, endpoint_instance)", "title": "" }, { "docid": "03d6613ad488dbabbddea05c742b6d04", "score": "0.59368765", "text": "def register(self, blueprint):\n self.flask.register_blueprint(blueprint)", "title": "" }, { "docid": "6582bb4ba1fed03e4a7c2d03d15857f3", "score": "0.5907025", "text": "def register(self):\n print \"ABC - RegisterAgent.register()\"", "title": "" }, { "docid": "89db9e063f71e87b0fde84f8a9f1b97c", "score": "0.588705", "text": "def __init__(self, endpoint):\n pass", "title": "" }, { "docid": "f823a9902c5fe482bd213fbeaa6492d7", "score": "0.5886298", "text": "def register_global_endpoint(self, name: str, kind: str = \"\") -> HelicsEndpoint:\n ep = helicsFederateRegisterGlobalEndpoint(self, name, kind)\n self.endpoints[ep.name] = ep\n return ep", "title": "" }, { "docid": "76e07701d41791f8c36998fff99de2d2", "score": "0.5881645", "text": "def register(self):\n self.manager.register(self._bus_name, self.capabilities)", "title": "" }, { "docid": "52b89b995df59d97575b032a65d6f4fd", "score": "0.58793294", "text": "def register(self, api, routes, parent_route=\"/\"):\n for route in routes:\n path = os.path.join(parent_route, route['path'])\n if \"children\" in route:\n self.register(api, route['children'], parent_route=path)\n else:\n if \"payload\" in route:\n api.add_resource(route['component'], path,\n \t\t\t\tendpoint=route['endpoint'],\n \t\t\t\tresource_class_kwargs=route['payload'])\n else:\n api.add_resource(route['component'], path, endpoint=route['endpoint'])", "title": "" }, { "docid": "4287f8ca4f7f3f346a43dd571a53d399", "score": "0.58635914", "text": "def add_endpoint(self, endpoint=None, endpoint_name=None, handler=None, methods=[]):\n methods = [x.upper() for x in methods]\n self.__app.add_url_rule(endpoint, endpoint_name, handler, None, methods=methods)", "title": "" }, { "docid": "b2a8b939b5e8f61cbef5eec69979203e", "score": "0.5857651", "text": "def register_api(application):\n\n application.register_blueprint(v1.BLUEPRINT, url_prefix=\"/v1\")", "title": "" }, { "docid": "f863e6321c5f3b24918e412284fb8e56", "score": "0.5852659", "text": "def register_instance(self, instance, name = None):\n ##########################################################################\n #for method in instance.__dict__:\n for method in dir(instance):\n if method.startswith('_') is False:\n print \"Json-RPC service registering: \" + method\n if name is None:\n self.register_method(getattr(instance, method))\n else:\n self.register_method(getattr(instance, method),\n name = \"{0}, {1}\".format(name, method))", "title": "" }, { "docid": "aeab5350569e5d725ed8dd756177aeb0", "score": "0.5839903", "text": "def __registerRoutes(self):\n self.logger.debug(\"Registering Routes for %s\" % self.__class__ )\n \n for _,func in self.__class__.__dict__.iteritems():\n if type(func) == FunctionType and hasattr(func,'route'): \n i = 0\n for route in func.route:\n route_kwargs = func.route_kwargs[i]\n self.logger.debug(\"%s - %s - %s\" %(func, route, route_kwargs))\n self.__mapper.connect(route, route, handler=func, **route_kwargs)\n i +=1", "title": "" }, { "docid": "86dc56f01cc3147e15191bf97b8887cf", "score": "0.5819888", "text": "def registration_started(self):\n pass", "title": "" }, { "docid": "69b9d241ef2e4c443aa388a824e9cc71", "score": "0.5792701", "text": "def register(self, app: Sanic, options: Dict[Text, Any]) -> None:\n self.ctx.sio.attach(app, self.ctx.socketio_path)\n super().register(app, options)", "title": "" }, { "docid": "858092d43793d0c0aa997114e92edc11", "score": "0.57840204", "text": "def register_for_new_locations(self):\n pass", "title": "" }, { "docid": "de3f7c79e66088a77bfc15fc255b5330", "score": "0.57787573", "text": "def registerView(target,name,handler):", "title": "" }, { "docid": "c83ed011cd625757ebf72ef43c46bca9", "score": "0.57768196", "text": "def _register_url(self, func, path, methods):\n self.routes[path] = {\"func\": func, \"methods\": methods}", "title": "" }, { "docid": "ecbd985359ba2784b983956c0919e084", "score": "0.57762307", "text": "def register(self, dashboard):\n pass", "title": "" }, { "docid": "0417be16421fe8a6b4c4c8cccfc21c9a", "score": "0.5751978", "text": "def register_endpoint(self, port):\n hostname = sysinfo.hostname()\n self.zkclient.ensure_path(z.TICKET_LOCKER)\n\n node_path = z.path.ticket_locker('%s:%s' % (hostname, port))\n _LOGGER.info('registering locker: %s', node_path)\n if self.zkclient.exists(node_path):\n _LOGGER.info('removing previous node %s', node_path)\n zkutils.ensure_deleted(self.zkclient, node_path)\n\n zkutils.put(self.zkclient, node_path, {}, acl=None, ephemeral=True)", "title": "" }, { "docid": "323942e827b9a6edc9f6525a601175b0", "score": "0.57383835", "text": "def register(name):\n return services.add(name)", "title": "" }, { "docid": "70a74c3a5db07147acd21acf8a410b1b", "score": "0.5736594", "text": "def register(app):\n oauth = OAuth(app)\n # registering flask blueprint with /b2access/login and /b2access/auth endpoints supporting oauth workflow\n B2ACCESS_API_URL=os.environ.get('B2ACCESS_API_URL',default='https://unity.eudat-aai.fz-juelich.de/')\n B2Access = create_b2access_backend('b2access',B2ACCESS_API_URL)\n\n b2accessbp = create_flask_blueprint(B2Access, oauth, handle_authorize_b2access)\n app.register_blueprint(b2accessbp, url_prefix='/b2access')\n\n # registering flask blueprint with /google/login and /google/auth endpoints supporting oauth workflow\n googlebp = create_flask_blueprint(Google, oauth, handle_authorize_google)\n app.register_blueprint(googlebp, url_prefix='/google')\n\n # registering flask endpoint to logout from any of the idp\n @app.route('/b2access/logout')\n @app.route('/google/logout')\n def logout():\n session.pop(AUTH_TOKEN_KEY, None)\n session.pop(AUTH_REFRESH_TOKEN, None)\n session.pop(USER_INFO, None)\n return redirect(BASE_URI,code=302)\n\n # register flask endpoint for getting user information\n @app.route('/userinfo')\n def index():\n if is_logged_in():\n user_info = get_user_info()\n return jsonify(user_info)\n #(token == 'test' and request.host == 'localhost:5000')\n\n # return test user info in case of local deployment\n if (request.headers.get('authorization') == 'Basic dGVzdDp0ZXN0' and request.host=='localhost:5000'):\n users = app.data.driver.db['userprofile']\n a = users.find_one({'id': '0001'})\n ui= {\"name\":\"Test\",\"id\":\"0001\"}\n if a != None:\n #a['token'] = ui['token']\n ui = a\n ui['_id']=str(ui['_id'])\n return jsonify(ui)\n # return guest user info\n return jsonify({\"name\":\"Guest\",\"status\":\"not logged in.\"})\n\n def get_user_info():\n \"\"\"\"function to return user information stored in current session\"\"\"\n print('get_user_info()')\n ui = session[USER_INFO]\n ui['token']=session[AUTH_TOKEN_KEY]\n #if (ui['token'] == 'test' and request.host == 'localhost:5000'):\n # ui['id']='0001'\n #check whether user exists in DB, and update fields accordingly\n users = app.data.driver.db['userprofile']\n a = users.find_one({'id': ui['id']})\n\n if a !=None:\n #print(a)\n a['token']=ui['token']\n ui = a\n ui['_id'] = str(ui['_id'])\n else:\n # do migration\n b = migrateuser(ui)\n if b!=None:\n b['token']=ui['token']\n ui = b\n ui['_id']= str(ui['_id'])\n #print('get_user_info',ui['name'])\n return ui\n\n def migrateuser(ui):\n #do migration\n #users = app.data.driver.db['userprofile']\n print('migrateuser()',ui['email'])\n userstomigrate = app.data.driver.db['userstomigrate']\n usertomigrate = userstomigrate.find_one({'email':ui['email']})\n\n if usertomigrate != None:\n print('migrating user profile from old db records')\n print('id',ui['id'])\n print('email',ui['email'])\n experience: str = 'beginner'\n if (usertomigrate['annotator_exp']=='i'):\n experience='intermediate'\n else:\n if (usertomigrate['annotator_exp']=='a'):\n experience='advanced'\n\n userprofile = app.data.driver.db['userprofile']\n #insert profile\n up = {\n 'id':str(ui['id']),\n 'pseudo':usertomigrate['nickname'],\n 'email':usertomigrate['email'],\n 'firstname':usertomigrate['first_name'],\n 'lastname':usertomigrate['last_name'],\n 'experience':experience,\n 'jobtitle':usertomigrate['job_title'],\n 'org':usertomigrate['organization'],\n 'country':usertomigrate['country']\n }\n userprofile.insert_one(up)\n #remove from lists to migrate\n userstomigrate.delete_one({'email':ui['email']})\n #list all annotations\n annotations = app.data.driver.db['annotations']\n #sets creator.id to all user's annotation\n #creator is in old mongodb array, push to the first entry - creator.0.id\n annotations.update_many({'creator.nickname':usertomigrate['nickname']},{ '$set' :{'creator.0.id':up['id']}})\n return up;\n\n\n @app.route('/interface_main', methods=['GET','POST'])\n def compatibility_redirect():\n #\"\"\" redirects old POST request with target id and source to GET request to new UI with params in url\"\"\"\n targetsource = request.form.get('subject_tofeed','') # source - direct link to file\n # request.form['recordurl_tofeed'] this is ignored in b2note v 1.0\n targetid = request.form.get('pid_tofeed','') # id - landingpage\n redirecturl = ('' if not BASE_URI else BASE_URI) + '/#/b2note_home/id=' + str(targetid) + '&source=' + str(targetsource)\n # /#/b2note_home/id=https:/someurl/sdf&source=http://someurl\n return redirect(redirecturl, code=303)", "title": "" }, { "docid": "c8744bd0afa715cef206cd4a8659064d", "score": "0.5735148", "text": "def initialize(self, **kwargs):\n super().initialize()\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint", "title": "" }, { "docid": "c8744bd0afa715cef206cd4a8659064d", "score": "0.5735148", "text": "def initialize(self, **kwargs):\n super().initialize()\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint", "title": "" }, { "docid": "a201a03395abb516415ac52ff761d0e0", "score": "0.5731288", "text": "def __set_endpoint__(self, endpoint):\n util.assert_valid(endpoint, error_message=\"Cannot proceed with an empty endpoint\")\n self.endpoint = endpoint", "title": "" }, { "docid": "d4e8624a0b5419267241730f7ca437a1", "score": "0.5729765", "text": "def register(self, http_server):\n if hasattr(self, \"PATTERN\"):\n pattern = self.PATTERN\n\n for method in (\"GET\", \"PUT\", \"POST\", \"OPTIONS\", \"DELETE\"):\n if hasattr(self, \"on_%s\" % (method)):\n method_handler = getattr(self, \"on_%s\" % (method))\n http_server.register_path(method, pattern, method_handler)\n else:\n raise NotImplementedError(\"RestServlet must register something.\")", "title": "" }, { "docid": "004192f15d85735776f0cd3ec3ebea43", "score": "0.57276326", "text": "def _on_add(self, source, endpoint):\n self._on_update()", "title": "" }, { "docid": "782f118711e6cc1f85acf36198ceac05", "score": "0.5723538", "text": "def test_rpc_registration(self):\n # Lookup\n svc = RpcReqSink(42)\n with self.assertRaises(OAGraphRetrieveError):\n status = svc.status\n print status[0]._rawdata\n\n # Register\n svc.start()\n self.assertEqual(svc.status[0].role, 'rep')\n self.assertEqual(svc.status[0].owner_id, 42)\n\n # Deregister\n svc.stop()\n with self.assertRaises(OAGraphRetrieveError):\n status = svc.status", "title": "" }, { "docid": "b718a722623f040f2afb7f78822fa4d3", "score": "0.57151735", "text": "def register(app, **kwargs):\n app.add_url_rule('/health', view_func=HealthApi.as_view('health'),\n methods=['GET'])", "title": "" }, { "docid": "6aa6c59265da94a03a938d66a59e327f", "score": "0.57150364", "text": "def initialize(self, **kwargs):\n super().initialize(**kwargs)\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint", "title": "" }, { "docid": "6aa6c59265da94a03a938d66a59e327f", "score": "0.57150364", "text": "def initialize(self, **kwargs):\n super().initialize(**kwargs)\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint", "title": "" }, { "docid": "6aa6c59265da94a03a938d66a59e327f", "score": "0.57150364", "text": "def initialize(self, **kwargs):\n super().initialize(**kwargs)\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint", "title": "" }, { "docid": "6aa6c59265da94a03a938d66a59e327f", "score": "0.57150364", "text": "def initialize(self, **kwargs):\n super().initialize(**kwargs)\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint", "title": "" }, { "docid": "6aa6c59265da94a03a938d66a59e327f", "score": "0.57150364", "text": "def initialize(self, **kwargs):\n super().initialize(**kwargs)\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint", "title": "" }, { "docid": "6aa6c59265da94a03a938d66a59e327f", "score": "0.57150364", "text": "def initialize(self, **kwargs):\n super().initialize(**kwargs)\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint", "title": "" }, { "docid": "6aa6c59265da94a03a938d66a59e327f", "score": "0.57150364", "text": "def initialize(self, **kwargs):\n super().initialize(**kwargs)\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint", "title": "" }, { "docid": "51a554c9f8153906ce76dd44aba1b06a", "score": "0.56985795", "text": "def register(restorator):\n global _global_registry\n _global_registry.register(restorator)\n return restorator", "title": "" }, { "docid": "ad8bb6e6b49187e5edeb5e88bba88e37", "score": "0.56964874", "text": "def _add_routes(self):\n pass", "title": "" }, { "docid": "b23869b9f14a7630bc45f4568d189dd0", "score": "0.56915873", "text": "def __init__(__self__, *,\n endpoint: str,\n name: str,\n routing_type: Optional[str] = None):\n pulumi.set(__self__, \"endpoint\", endpoint)\n pulumi.set(__self__, \"name\", name)\n if routing_type is not None:\n pulumi.set(__self__, \"routing_type\", routing_type)", "title": "" }, { "docid": "b23869b9f14a7630bc45f4568d189dd0", "score": "0.56915873", "text": "def __init__(__self__, *,\n endpoint: str,\n name: str,\n routing_type: Optional[str] = None):\n pulumi.set(__self__, \"endpoint\", endpoint)\n pulumi.set(__self__, \"name\", name)\n if routing_type is not None:\n pulumi.set(__self__, \"routing_type\", routing_type)", "title": "" }, { "docid": "91a3d12b92e4d84628c5ed59a0d66f54", "score": "0.5685638", "text": "def endpoint(self, endpoint):\r\n def decorator(f):\r\n self.view_functions[endpoint] = f\r\n return f\r\n return decorator", "title": "" }, { "docid": "fd2473b980570faa3f98ff961711caa7", "score": "0.5676165", "text": "def register(self, scheme: str, url_handler: T):\n if scheme not in self.registry:\n logger.info(f\"registering url scheme {scheme}\")\n self.registry[scheme] = url_handler", "title": "" }, { "docid": "e47dd111bf94cfac296d26fbacd2c579", "score": "0.5669734", "text": "def default_setup_endpoint_connection(keystone):\n with charm.provide_charm_instance() as instance:\n keystone.register_endpoints(instance.service_type,\n instance.region,\n instance.public_url,\n instance.internal_url,\n instance.admin_url)\n instance.assess_status()", "title": "" }, { "docid": "0c0dbb0d9ce4dfa06db943740d9cbdb2", "score": "0.5664982", "text": "def on_endpoint_event(self, event_type, fn, *args, **kwargs):\n return self.on_object_event(event_type, fn, Endpoint, 'Endpoint',\n *args, **kwargs)", "title": "" }, { "docid": "888fd0fe3db7cddb37ddef524197956f", "score": "0.56606", "text": "def register(app):\n # register converters\n #converters.register(app)\n\n # registerting additional routes\n #register_additional_routes(app)\n\n # API resources registration\n directory = os.path.dirname(os.path.realpath(__file__))\n resources = [os.path.basename(os.path.normpath(i)) \\\n for i in glob.glob(os.path.join(directory, '*/'))]\n for resource in resources:\n module = \"{0}.{1}.api\".format(__name__, resource)\n class_name = \"{0}Resource\".format(inflection.camelize(resource))\n try:\n class_ref = getattr(__import__(module, fromlist=[class_name]),\n class_name)\n class_ref.register(app, route_prefix='api/1.0')\n except ImportError as e:\n print(\"Resource '{0}' does not exists\".format(module))\n except AttributeError as e:\n print(\"Resource class '{0}' does not exists\".format(class_name))", "title": "" }, { "docid": "def97ada1b1c0906190b154a6273887b", "score": "0.5656919", "text": "def register_module(app):\n register_routes(app)\n register_cors(app)", "title": "" }, { "docid": "0a4717ce6d230ba8b5f805f6e0245149", "score": "0.5651032", "text": "def register_blueprints(app):\n print(\"registering blueprints\")\n from app.user_endpoint import bp as user_endpoint_bp\n from app.main import bp as main_bp\n app.register_blueprint(user_endpoint_bp)\n app.register_blueprint(main_bp)", "title": "" }, { "docid": "e38b710604500cd89e0c82ef9fde4e94", "score": "0.5646261", "text": "def setup(self, endpoint):\n self.endpoint = endpoint\n if self.__class__._plugin_type_ == self.__class__.SEND: self.apply(endpoint.send)", "title": "" }, { "docid": "90eba1d910a11278cb05d7b29db5de6a", "score": "0.5630799", "text": "def _authorization_register_handler_cb(self, decoded, payload):\n _log.debug(\"_authorization_register_handler_cb:\\ndecoded={}\\npayload={}\".format(decoded, payload))\n self.node.authorization.pdp.register_node(decoded[\"iss\"], decoded[\"attributes\"])\n reply = response.CalvinResponse(response.OK)\n # Send reply\n msg = {'cmd': 'REPLY', 'msg_uuid': payload['msg_uuid'], 'value': reply.encode()}\n self.network.link_request(payload['from_rt_uuid'], callback=CalvinCB(send_message, msg=msg))", "title": "" }, { "docid": "13e9f569b30d753bed54dfaeb281035c", "score": "0.5629414", "text": "def router(self):", "title": "" }, { "docid": "e6dd2486b3e75d4cc65b2ef5a46a8d12", "score": "0.56026524", "text": "def registrationResol(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "94dbfddcafa5fc1612e8b86982dfb059", "score": "0.55919707", "text": "def add_endpoint_to_sipserver(self, endpoint: str, password: str) -> None:\n password = self.user_password if not password else password\n if isinstance(endpoint, str):\n endpoint = [endpoint]\n for i in endpoint:\n self.sendline(f\"kamctl add {i} {password}\")\n index = self.expect([\"MySQL password for user\"] + self.prompt)\n if index == 0:\n self.sendline(self.mysql.password)\n self.expect(self.prompt)", "title": "" }, { "docid": "086d2b716fb9bde0616fc47d73fae3a0", "score": "0.5587379", "text": "def test_register(self):\n response = register(\"testService\", 12311)\n self.assertEqual(response, \"OK\")", "title": "" }, { "docid": "2d5e609d226432c2df52e610125e15cf", "score": "0.55836266", "text": "def register(self, segments_endpoints: Iterable[SegmentEndpoints],\n *,\n from_test: bool) -> None:", "title": "" }, { "docid": "4ccdea831696255a1c332b33074320db", "score": "0.5577877", "text": "def start(self):\n\n self.skeleton.start()\n self.id, self.hash = self.name_service.register(self.type,\n self.address)", "title": "" }, { "docid": "203f87e2889435a33c60ca50e47f5f80", "score": "0.55703866", "text": "def prepare_endpoint(self):\n self.build_get_by_key()\n self.set_dynamic_model_search()", "title": "" }, { "docid": "ca961e0a73e1b99d7c8bb3b21953cf78", "score": "0.55613714", "text": "def __init__(self, endpoint_1, endpoint_2):\n self.endpoint_1 = endpoint_1\n self.endpoint_2 = endpoint_2", "title": "" }, { "docid": "dfa7cf972e2b2a5dcf270af7b122bb34", "score": "0.5556141", "text": "async def _register_websocket(self):\n try:\n data = {\n \"event\": self.registerEvent,\n \"uuid\": self.pluginUUID\n }\n\n logging.info(\"Registering websocket...\")\n await self.websocket.send(json.dumps(data))\n except Exception as err:\n logging.critical(err)", "title": "" }, { "docid": "0ea945e78d91c3c2143f09d7d891b77e", "score": "0.5552751", "text": "def register_routes(app):\n app.register_blueprint(account_api)\n app.register_blueprint(profile_api)", "title": "" }, { "docid": "d207d8a04313d16685a7f936d0b0ee62", "score": "0.5551401", "text": "def configure(self, *args, **kwargs):\n\n raise NotImplementedError(\n \"configure is not a supported method for REST. \"\n \"post is probably what you are looking for\"\n )", "title": "" }, { "docid": "a71cbeae25a8d2c3bb0163a98a232d00", "score": "0.553261", "text": "def register(self):\n self.session.event_hub.subscribe(\n 'topic=ftrack.action.discover',\n self.discover\n )\n\n self.session.event_hub.subscribe(\n 'topic=ftrack.action.launch and data.actionIdentifier={0}'.format(\n self.identifier\n ),\n self.launch\n )", "title": "" }, { "docid": "5cbd43aca52b1ccd869227454e92efb6", "score": "0.55275494", "text": "def register_location(self, location: Location) -> None:", "title": "" }, { "docid": "8a605fff82a75b0cc8020a113a2e6226", "score": "0.5526663", "text": "def register_identify(self,host):", "title": "" }, { "docid": "db4baf58756307b2eec927f48b2f5d2f", "score": "0.55193263", "text": "def register(self):\n conn = client.HTTPConnection(\"localhost:8000\")\n if path.exists(self.REGISTRATION_KEY_FILE):\n with open(self.REGISTRATION_KEY_FILE) as fp:\n registration_key = fp.readline().strip()\n conn.request(\n \"POST\", \"/api/host/\", urlencode({'registration_key': registration_key}))\n else:\n conn.request(\n \"POST\", \"/api/host/\")\n guid = json.loads(\n conn.getresponse().read().decode('utf-8'))['guid']\n conn.close()\n with open(self.GUID_FILE, 'x') as fp:\n fp.write(guid)\n return True", "title": "" }, { "docid": "23b0812902ac20157940aad21ce7547a", "score": "0.5513938", "text": "def registerService(self, p_str): # real signature unknown; restored from __doc__\n return False", "title": "" }, { "docid": "c3799e395aaf2695a333035cb82fc761", "score": "0.55129963", "text": "def register(cls):\n registry_instance.register(cls)", "title": "" }, { "docid": "530040f95c1ef986fd072c96c997b1bc", "score": "0.5512729", "text": "def register(cls, app: Flask):\n view = cls.as_view(\"health\")\n app.add_url_rule(\"/health\", view_func=view, methods=[\"GET\"])", "title": "" }, { "docid": "6b66508edd6c522ff53021c8d925909f", "score": "0.55016243", "text": "def __init__(self, endpoint_a, endpoint_b):\n self.endpoint_a = endpoint_a\n self.endpoint_b = endpoint_b\n self._uuid = uuid4()\n super().__init__()", "title": "" } ]
e9ef466346bc02a0f0c94016b424fbbd
user_enable() Allow all mechanisms that are ON before `PK11Slot.user_disable()` was called to be available again. Sets disable reason to PK11_DIS_NONE.
[ { "docid": "fe3d49d2d76a409740151d11d523087a", "score": "0.6470902", "text": "def user_enable(self): # real signature unknown; restored from __doc__\n pass", "title": "" } ]
[ { "docid": "e0fe69d17b21cbb553f9c9ea3ca6fabf", "score": "0.66574407", "text": "def user_disable(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "e06e1cc2b999c023e3fc44bcf4ccfaac", "score": "0.631137", "text": "def disable_user(self, manager_user_id: int, user_id: int):\n self.enable_user(manager_user_id, user_id, False)", "title": "" }, { "docid": "cb5196e297e609c5abfb8c23d0e065c9", "score": "0.61563957", "text": "def enable_user(self):\n pass", "title": "" }, { "docid": "cb5196e297e609c5abfb8c23d0e065c9", "score": "0.61563957", "text": "def enable_user(self):\n pass", "title": "" }, { "docid": "52592eb92d0938e8947fffee49c29aa5", "score": "0.60649985", "text": "def set_enabled(cls, course_key: CourseKey, enabled: bool, user: 'User') -> bool:", "title": "" }, { "docid": "82733a47e0cde8b7d965cf774f60b8f6", "score": "0.5836599", "text": "def enable() -> None:\n global _disabled\n _disabled = False", "title": "" }, { "docid": "ad83e8aef6dfe8dbd42a38465bbb194e", "score": "0.58157736", "text": "def disable():", "title": "" }, { "docid": "de656022745ce113b06b2deda3dfa9d1", "score": "0.579795", "text": "def test_user_disabled_true(self):\n self.fake_user.userAccountControl.value = 2\n disabled = recycle.user_disabled(username='alice', ldap_conn=self.fake_ldap_conn)\n\n self.assertTrue(disabled is True)", "title": "" }, { "docid": "5fcedfe81d89f6261f818f93df64a7c5", "score": "0.5696463", "text": "def disable():\n return", "title": "" }, { "docid": "11c0571f7dcf1fcf69ab4f8b38a437b4", "score": "0.5630311", "text": "def SetEnable(self, value):\r\n\r\n self._SetProperty(VACATION_RESPONDER_ENABLE, value)", "title": "" }, { "docid": "33356d70cac5e8db07dcfc7491e340b9", "score": "0.5626627", "text": "def update_disabled_status(user_id, disabled):\n user = get_user_with_id(user_id)\n if user is None:\n raise RuntimeError('No user associated with this id.')\n else:\n user.disabled = disabled\n db.session.commit()", "title": "" }, { "docid": "1dee26018747f408bbb8f1273c95b787", "score": "0.5625135", "text": "def reject_user(user_email):\n if user_email:\n DEFAULT_PASSWORD = \"\"\n update_id = db.unverify_user(user_email, DEFAULT_PASSWORD)\n return json.jsonify(success=\"The user was successfully disable!\", update_id=update_id)\n else:\n return json.jsonify(error=\"There was an error disabling that user!\")", "title": "" }, { "docid": "4ea60404e3a4fb4e4aeed04e238dc821", "score": "0.56088376", "text": "def SetEnable(self, value):\n\n self._SetProperty(VACATION_RESPONDER_ENABLE, value)", "title": "" }, { "docid": "c7e443d0e516571a05f1f59c12d97000", "score": "0.5605296", "text": "def _disable(self):\n self._disabled = True", "title": "" }, { "docid": "0e218a7dc0ce6be36843f4ce0a3cb55d", "score": "0.5604215", "text": "def ipn_disable_transaction(self, user, group):\n # If user just completed an upgrade, refund is part of the upgrade\n # and should not disable user\n if user.get_property('upgrade_completed', False):\n user.set_property('upgrade_completed', False)\n return\n\n if group.addon:\n user.groups.remove(group)\n user.set_property(\n 'addon_{}_valid_to'.format(group.product_id), date.today())\n action = u'Addon \"{}\" disabled'.format(group.name)\n removed_groups = [group, ]\n else:\n user.valid_to = date.today()\n removed_groups = deepcopy(user.groups)\n user.groups = []\n action = u'Disabled'\n\n comment = COMMENT.format(\n action,\n self.provider,\n self.params.trans_id,\n self.params.trans_type,\n 'removed from groups: {}'.format(\n ', '.join([g.name for g in removed_groups])),\n )\n self.request.registry.notify(\n UserDisabled(self.request, user, comment))", "title": "" }, { "docid": "865dbec14426e9418edf3d58c8a1b8f5", "score": "0.5588258", "text": "def enablebypass(self, enablebypass) :\n\t\ttry :\n\t\t\tself._enablebypass = enablebypass\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "3d8dacd550900271ba0067b7021f7ecb", "score": "0.5586075", "text": "def enable_user(self, manager_user_id: int, user_id: int, is_active=True):\n self.update_user(manager_user_id, user_id, {\"is_active\": is_active})", "title": "" }, { "docid": "a5067de6e44a030aa8c67eb6dd3f806f", "score": "0.55383843", "text": "def disable() -> None:\n global _disabled\n _disabled = True", "title": "" }, { "docid": "7db46eafe33ddc34a5002b97e79cd366", "score": "0.5531881", "text": "def disable(self, disable):\n\n self._disable = disable", "title": "" }, { "docid": "1f6e715b6819e55f0c700384ea141698", "score": "0.5517072", "text": "def tf_disable(user):\n tf_clean_session()\n _datastore.tf_reset(user)\n tf_disabled.send(app._get_current_object(), user=user)", "title": "" }, { "docid": "ac978a6a306027fa8252169f60995497", "score": "0.54915273", "text": "def suspend_spam_user(self, user):\n if user.is_ham:\n return False\n self.confirm_spam(save=True, train_spam_services=False)\n self.set_privacy('private', log=False, save=True)\n\n # Suspend the flagged user for spam.\n user.flag_spam()\n if not user.is_disabled:\n user.deactivate_account()\n user.is_registered = False\n mails.send_mail(\n to_addr=user.username,\n mail=mails.SPAM_USER_BANNED,\n user=user,\n osf_support_email=settings.OSF_SUPPORT_EMAIL,\n can_change_preferences=False,\n )\n user.save()\n\n # Make public nodes private from this contributor\n for node in user.all_nodes:\n if self._id != node._id and len(node.contributors) == 1 and node.is_public and not node.is_quickfiles:\n node.confirm_spam(save=True, train_spam_services=False)\n node.set_privacy('private', log=False, save=True, force=True)\n\n # Make preprints private from this contributor\n for preprint in user.preprints.all():\n if self._id != preprint._id and len(preprint.contributors) == 1 and preprint.is_public:\n preprint.confirm_spam(save=True, train_spam_services=False)\n preprint.set_privacy('private', log=False, save=True)", "title": "" }, { "docid": "0eab0bfbcfb8db1bcf12872eb899700b", "score": "0.5460134", "text": "def disable(exaile):\n print('XLCB Disabled')", "title": "" }, { "docid": "112e5a901817a3024cc82fbe07d4902d", "score": "0.54582864", "text": "def Disable(self):\n self._rx.DisableEdid()", "title": "" }, { "docid": "758ab9b97104df8a4e469e709926d1cc", "score": "0.54542345", "text": "def enable_disable_intent(self, hermes, intent_message, action):\n intent = intent_message.slots.intent.first().value\n intent_id = self.intent_id_from_name(intent)\n\n function = getattr(DialogueConfiguration(), action + '_intent')\n dialogue_conf = function(intent_id)\n hermes.configure_dialogue(dialogue_conf)\n\n hermes.publish_end_session(intent_message.session_id,\n getattr(i18n, 'RESULT_' + action.upper() + '_INTENT').format(intent))", "title": "" }, { "docid": "44e89dbb2d80c70b703bd3cd19892576", "score": "0.5445512", "text": "def disable(self):\n self._enabled = False\n self._inform_changes_in_enabled_status()", "title": "" }, { "docid": "bec81c8a0f51cd6cba7cac1b67b193d3", "score": "0.54251945", "text": "def slotTabEnable(self, enable):\r\n self.tuning.setEnabled(bool(enable))\r\n if enable:\r\n self.slotCustomTuningEnable(self.tuning.currentIndex())\r\n else:\r\n self.customTuning.setEnabled(False)", "title": "" }, { "docid": "05b2aa672484963df170049b8140cadc", "score": "0.5424905", "text": "def test_user_disabled_false(self):\n disabled = recycle.user_disabled(username='alice', ldap_conn=self.fake_ldap_conn)\n\n self.assertTrue(disabled is False)", "title": "" }, { "docid": "c2c281431c0dd00cf026ea636a5a385d", "score": "0.54226136", "text": "def deactivate_user(self, user):\n raise NotImplementedError()", "title": "" }, { "docid": "a4da16482cc50fe52199fe6fe1375c4b", "score": "0.5421653", "text": "def enable_user(client, profile, user):\n try:\n response = client.admin_enable_user(\n UserPoolId=profile[\"user_pool_id\"], Username=user.email\n )\n if response[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200:\n print(f\"User {user.email} was enabled successfully\")\n return response\n except client.exceptions.UserNotFoundException as error:\n print(f\"User {user.email} does not exist\")\n return error.response\n except client.exceptions.ClientError as error:\n print(f\"Fail to disable user {user.email}\")\n return error.response", "title": "" }, { "docid": "cd530cfaed40f2ce7696f2fd889f7b28", "score": "0.5414407", "text": "def disable_user(client, profile, user):\n try:\n response = client.admin_disable_user(\n UserPoolId=profile[\"user_pool_id\"], Username=user.email\n )\n if response[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200:\n print(f\"User {user.email} was disabled successfully\")\n return response\n except client.exceptions.UserNotFoundException as error:\n print(f\"User {user.email} does not exist\")\n return error.response\n except client.exceptions.ClientError as error:\n print(f\"Fail to disable user {user.email}\")\n return error.response", "title": "" }, { "docid": "8dccf3234337b77dd7443b9197820df0", "score": "0.53824896", "text": "def disable(self):\n self.bus.write_byte_data(self.address, MODE1, self.bus.read_byte_data(self.address, MODE1) | 0x10)", "title": "" }, { "docid": "83325427c7058aeb971b0c55c31dad3e", "score": "0.5368594", "text": "def disable(self, ):\n\t\tpass", "title": "" }, { "docid": "58b5d8f9a9247bfe2c66233f5ea73655", "score": "0.53642154", "text": "def enable():", "title": "" }, { "docid": "516d030f1315aee502a04afe09bd90d7", "score": "0.5341141", "text": "def disable(self):\n\n return self.enable(False)", "title": "" }, { "docid": "7839f3fb2b370be5229bd21552a32186", "score": "0.5334939", "text": "def enable(self, onOff):\n self._nsObject.setEnabled_(onOff)", "title": "" }, { "docid": "1b103189bd98026cecefdfc79e7dd0bf", "score": "0.5322253", "text": "def x4driver_set_enable(self, value):\n return _moduleconnectorwrapper.PyXEP_x4driver_set_enable(self, value)", "title": "" }, { "docid": "b84aafb7b159663ec49ef4574c4d13f6", "score": "0.53081024", "text": "def setEnabled(self, enabled):\n raise NotImplementedError", "title": "" }, { "docid": "3a593bcb74e58ebc0caf43600c61446e", "score": "0.5307341", "text": "def _disable(self):\n raise NotImplementedError", "title": "" }, { "docid": "c45131c59233409817703e971187b1be", "score": "0.5306311", "text": "def Disable(self):\n self._SwitchRamToMain()", "title": "" }, { "docid": "4c36e5878e52f05830227720f3d47d68", "score": "0.52981395", "text": "def disable(*args,**kw):\n pass", "title": "" }, { "docid": "9d37f7c6523cb5040c7c334ec4a65357", "score": "0.5295421", "text": "def disable_prompts(self):\n data = {}\n return self.post('/concord_commercial_prompts_disabled', data, dotnet_server=False)", "title": "" }, { "docid": "fae03486b3dac8d03e9f59c650c4774d", "score": "0.52914613", "text": "async def disallowuser(self, ctx, user : discord.Member):\n allowed_users = await self.config.guild(ctx.guild).allowed_users()\n if user.id in allowed_users:\n allowed_users.append(user.id)\n await self.config.guild(ctx.guild).allowed_users.set(allowed_users)\n await ctx.send(f\"{user} cannot run `!snail`\")", "title": "" }, { "docid": "656feb76737be4fa0e10c6f9aaa8dc74", "score": "0.5284368", "text": "def disable(self):\n byteArr = self._encodeByteArr([UNIT,SET_MODE,POTENTIOM_OFF,\n NULL,NULL,NULL])\n if not self.connected:\n self._open_port()\n self._sendByteArr(byteArr,self.default_timeout)\n byteArr = self._readBytes(timeout=self.default_timeout)\n self._close_port()\n self.log.info('manual adjustment disabled')\n return byteArr", "title": "" }, { "docid": "b4faaa5820bcb92a8aceb00ec9dd9dea", "score": "0.5278378", "text": "async def disable_users(self, request, resource=None):\n import asyncio\n\n ids = request.query.getall(\"ids\")\n await User.update(is_active=False).where(User.id << ids)\n await asyncio.sleep(1)\n return {\"status\": True, \"ids\": ids, \"message\": \"Users is disabled\"}", "title": "" }, { "docid": "fd996c1b46191095f5242263bd93b5c3", "score": "0.5259077", "text": "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Runtime.disable\", {})", "title": "" }, { "docid": "00791f7fb1d298eba0d039d13e38dcc2", "score": "0.5254029", "text": "def on_enable(self, obj, val):\n self.set_state(enabled=(val != self.disabled_value))", "title": "" }, { "docid": "65fd0d73a0a6f8058c720e0778f4e325", "score": "0.524181", "text": "def test_user_disabled_not_found(self):\n self.fake_ldap_conn.entries = []\n answer = recycle.user_disabled(username='alice', ldap_conn=self.fake_ldap_conn)\n self.assertTrue(answer)", "title": "" }, { "docid": "03f0fdb399cc549a2a60b12c5802f82f", "score": "0.5239046", "text": "def get_disabled_reason(self): # real signature unknown; restored from __doc__\n return 0", "title": "" }, { "docid": "58a279c2ff8c3a535809be779085d3f9", "score": "0.5234166", "text": "def set_enable(self, enable: bool) -> None:\n if self._parent.sleeping:\n raise RuntimeError(\"Tried to modify a PWM while sleeping.\")\n\n self._full_off = not enable\n self._write_state()", "title": "" }, { "docid": "5afc1efc32521f397a7709b4a6beb2a0", "score": "0.52165496", "text": "def enable(self):\n self._enabled = True", "title": "" }, { "docid": "06207b8ab05c36f252fde180d41da3f5", "score": "0.52146286", "text": "async def adminallow(self, ctx, *, yes_no = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tawait ctx.send(Utils.yes_no_setting(ctx,\"Admin disabled command access\",\"AdminDisabledAccess\",yes_no))", "title": "" }, { "docid": "ad0b9a5b19dff69f30d30431d2ceb5c4", "score": "0.5211381", "text": "async def allowsoak(self, ctx, enable: bool):\n if ctx.message.guild is not None:\n await ctx.message.delete()\n\n mysql.set_soak(ctx.message.guild, int(enable))\n if enable:\n await ctx.send(\"Ok! Soaking is now enabled! :white_check_mark:\")\n else:\n await ctx.send(\"Ok! Soaking is now disabled! :no_entry_sign:\")", "title": "" }, { "docid": "5fe1fbd4db1e03bbf9d354d88ca15a7a", "score": "0.5210443", "text": "async def allowuser(self, ctx, user : discord.Member):\n allowed_users = await self.config.guild(ctx.guild).allowed_users()\n allowed_users.append(user.id)\n await self.config.guild(ctx.guild).allowed_users.set(allowed_users)\n await ctx.send(f\"{user} can now run `!snail`\")", "title": "" }, { "docid": "647bb7589545006770bbef49ed6a6744", "score": "0.52075684", "text": "def enable(self):\n try:\n self.disabled = False\n except AttributeError:\n raise AttributeError(\"The disabled property is read-only.\")", "title": "" }, { "docid": "ad456cf8fb62afc8a6198de34791c1af", "score": "0.52033883", "text": "def disable(cls):\n return (\n cls.build_send_payload(\"disable\", {\n }),\n None\n )", "title": "" }, { "docid": "8cf6ee3be31e230bdf62b786deb2ca82", "score": "0.519465", "text": "def disable(self):\n self._enabled = False", "title": "" }, { "docid": "8cf6ee3be31e230bdf62b786deb2ca82", "score": "0.519465", "text": "def disable(self):\n self._enabled = False", "title": "" }, { "docid": "908ebc103a01bfcd94aca8d7e9a02ba0", "score": "0.5187865", "text": "def setEnabled(self, is_enabled):\n self.solenoid.set(is_enabled)", "title": "" }, { "docid": "a9a81454d92f4efa85266ee549455603", "score": "0.5177367", "text": "def _set_enabled(self):\n with self.lock:\n self._enabled = True\n\n for service in self._services:\n service.alert()", "title": "" }, { "docid": "b84aecab97581819d554b07a994eb371", "score": "0.51740056", "text": "def disable(self) -> None:\n self._is_enabled = False", "title": "" }, { "docid": "c5c04d13c2a4f6804217ce2e25abd37d", "score": "0.5172719", "text": "def enable_connect_for_user(self, enable_connect_for_user):\n\n self._enable_connect_for_user = enable_connect_for_user", "title": "" }, { "docid": "3724b7ecddd5b36acb22527f7ded1a7e", "score": "0.5162432", "text": "def disabledPeriodic(self):\n # replace \"pass\" with your code\n pass", "title": "" }, { "docid": "958ccdabbd9a1b88b36101c6809dbacc", "score": "0.5158099", "text": "def allow_privilege_escalation(self, value: bool):\n self._properties[\"allowPrivilegeEscalation\"] = value", "title": "" }, { "docid": "941a164ed76aa91be537c67832d17901", "score": "0.515633", "text": "def enable(self) -> None:\n self._is_enabled = True", "title": "" }, { "docid": "546fd23503def38dec8e53e3e5e26dfa", "score": "0.51517797", "text": "def enable(self):\n self._enabled = True\n self._inform_changes_in_enabled_status()", "title": "" }, { "docid": "c085470086fd22b7ffbae4d5197303b3", "score": "0.5151044", "text": "def disableLockFree(self):\n key = 'lock_free'\n _check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),\n c_str(key), ctypes.c_bool(False)))", "title": "" }, { "docid": "651c2241aa0cb16ea73c6755a037169f", "score": "0.5148358", "text": "def disable(self):\n self.ipcon.send_request(self, BrickDC.FUNCTION_DISABLE, (), '', '')", "title": "" }, { "docid": "1b71a35f94526e463a2f0d3161a7b591", "score": "0.51438016", "text": "def disabled(self):\n while self.isDisabled():\n wpilib.Timer.delay(0.01)", "title": "" }, { "docid": "eff2271ce19658b41dbc5bd39d47b7a4", "score": "0.51352316", "text": "def set_disable(self, flag):\n self.disable = flag\n logger.debug('Set disable: {}'.format(self.disable))", "title": "" }, { "docid": "f1caf64c6502a793adbc8303b3aa6de4", "score": "0.5132103", "text": "def enablebypass(self) :\n\t\ttry :\n\t\t\treturn self._enablebypass\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "3b9fc9ece36ca5afd94c3a697c8c0a11", "score": "0.5122351", "text": "def set_disabled(self):\n self.reset()\n self.lcd.message(\"\\x02 Disabled\\nEnter code:\")\n self.lcd.show_cursor(True)\n self.lcd.blink(True)\n self.lcd.set_color(*WHITE)", "title": "" }, { "docid": "792596e1f988f0352648a17f58d421fc", "score": "0.51212525", "text": "def disable(name):", "title": "" }, { "docid": "8cd27752da946fe6b0805eb45e6b24f6", "score": "0.51193357", "text": "def enabled():", "title": "" }, { "docid": "12177c8196589e6c6956ff6eb9341bbb", "score": "0.51106584", "text": "def disable(self):\n self._activator.disable()", "title": "" }, { "docid": "991b0ddb92324688a37cdd2e25ea1acd", "score": "0.50940293", "text": "def enable(self):\n self.set_enabled(True)", "title": "" }, { "docid": "8fc7541bc23954ab745603301df89546", "score": "0.5087181", "text": "def drivenShieldUpdateEnabled(self,symbol,event):\n component= symbol.getComponent()\n currentVal = bool(event['symbol'].getValue())\n maxVal = component.getSymbolByID(\"NUM_ACQUISITION_GROUPS\").getValue()\n minVal = component.getSymbolByID(\"NUM_ACQUISITION_GROUPS\").getMin()\n for x in range(minVal,maxVal+1):\n if(x == minVal):\n grpId = \"DRIVEN_SHIELD_MENU\"\n else:\n grpId = \"DRIVEN_SHIELD_MENU_\" +str(x)\n component.getSymbolByID(grpId).setVisible(currentVal)", "title": "" }, { "docid": "2a8382513aa2229419ddcaa2a01bd877", "score": "0.5082812", "text": "def user_eligible_p(self, user):\n # registration closed, then eligibility doesn't come into play\n if not self.openreg:\n return False\n \n if self.eligibility is None:\n return True\n \n # is the user eligible for one of these cases?\n for eligibility_case in self.eligibility:\n if user.is_eligible_for(eligibility_case):\n return True\n \n return False", "title": "" }, { "docid": "c3f5d42ecc094a292a19fb6e58245955", "score": "0.507657", "text": "def disabled(self):\n self.leftMotor.set(0)\n self.rightMotor.set(0)\n while self.isDisabled():\n wpilib.Timer.delay(0.01)", "title": "" }, { "docid": "c835fcc46880a934993fa19b78b314de", "score": "0.5072085", "text": "async def badminallow(self, ctx, *, yes_no = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tawait ctx.send(Utils.yes_no_setting(ctx,\"Bot-admin disabled command access\",\"BAdminDisabledAccess\",yes_no))", "title": "" }, { "docid": "9ebbdc9ce539c848c6edba46a02ac383", "score": "0.50703067", "text": "def EnableUserBreak(self):\n if self.force_auto_sync:\n self.get('EnableUserBreak')\n return self._EnableUserBreak", "title": "" }, { "docid": "7e7b2e4659f423dc2ef26e921190683b", "score": "0.506943", "text": "def set_enabled(self, value):\n self.enabled = value\n DB_SESSION.commit()", "title": "" }, { "docid": "42986c6d00dd2eba7fd028dabd7c2c5a", "score": "0.5064483", "text": "def Enable(self):\n self._rx.EnableEdid()", "title": "" }, { "docid": "c3d02a61fc53be094c3a06e37fb24c04", "score": "0.50466347", "text": "def set_suspend_out(self, enable: bool) -> None:\n if self._handle is not None:\n set_suspend_out(self._handle, enable)\n else:\n raise Ft4222Exception(\n Ft4222Status.DEVICE_NOT_OPENED,\n \"This handle is closed!\"\n )", "title": "" }, { "docid": "62d757c209eb1caef5acfbbf75985329", "score": "0.5043777", "text": "def disable(self):\n self.data.update(\n enabled=False)", "title": "" }, { "docid": "d61ba66206673940eef931385e330e10", "score": "0.5034206", "text": "def SetEnable(self, value):\r\n\r\n self._SetProperty(POP_ENABLE, value)", "title": "" }, { "docid": "2195a89b275c5334804d7442bbc32e84", "score": "0.5031635", "text": "def disable_restricted(self, ):\n todisable = [(self.reftrack.duplicate, self.duplicate_tb),\n (self.reftrack.delete, self.delete_tb),\n (self.reftrack.reference, self.reference_tb),\n (self.reftrack.replace, self.replace_tb),]\n for action, btn in todisable:\n res = self.reftrack.is_restricted(action)\n btn.setDisabled(res)", "title": "" }, { "docid": "19fb5a2f39d87c15dc1b92bd88106bc9", "score": "0.5030987", "text": "def schema_locking_enabled(self, value):\n value = self._editor.value_to_boolean(value)\n self._editor.set_element_value(self._schema_locking_enabled_element, value)", "title": "" }, { "docid": "c97021aa3d35fd99c0b56eb28f32db57", "score": "0.5028897", "text": "def disable_mod(name):\r\n return change_mod_status(name, 'disable')", "title": "" }, { "docid": "6bae016ce11f1ec2ccb0960142ec741b", "score": "0.50228876", "text": "def SetEnable(self, value):\n\n self._SetProperty(POP_ENABLE, value)", "title": "" }, { "docid": "77f5ef8888a2d101ed66847d3a14da65", "score": "0.50129926", "text": "def upframe_disable(self): \n rd_data = self.__axi4lite.read(0x10,1)\n wr_data = (rd_data[0] & 0xFD) | 0x00;\n self.__axi4lite.write(0x10, [wr_data],1) \n return None", "title": "" }, { "docid": "0b22860d262ad9849962edecf2021e83", "score": "0.5006902", "text": "def set_x4_io_pin_disable(self, pin):\n return _moduleconnectorwrapper.NotSupported_set_x4_io_pin_disable(self, pin)", "title": "" }, { "docid": "9024fab29089a04e364c21f8de289730", "score": "0.500578", "text": "def set_vehicle_control_manual_override(self, enable):\n self.hud.notification('Set vehicle control manual override to: {}'.format(enable))\n self.vehicle_control_manual_override_publisher.publish((Bool(data=enable)))", "title": "" }, { "docid": "6d42047d11c26bc8f9be4913cd57eea8", "score": "0.50055", "text": "def disableTokenTransfers(self, _disable: bool) -> None:\n self.require_owner_only()\n flexible_token = self.create_interface_score(self._token.get(), FlexibleToken)\n flexible_token.disableTransfer(_disable)", "title": "" }, { "docid": "b78762e27145e9175af25d4c9f90003c", "score": "0.50042635", "text": "def user_disabled(username, ldap_conn):\n search_filter = '(&(objectclass=User)(sAMAccountName=%s))' % username\n ldap_conn.search(search_base=const.AUTH_SEARCH_BASE,\n search_filter=search_filter,\n attributes=['userAccountControl'])\n if ldap_conn.entries:\n user = ldap_conn.entries[0]\n disabled = user.userAccountControl.value >> 1 & 1\n return bool(disabled)\n else:\n # It's a rouge lab or the user has been deleted from AD\n # either way, nuke it.\n return True", "title": "" }, { "docid": "fe974a457b77846c8ccd7228af1ce128", "score": "0.5002861", "text": "def set_wakeup_interrupt(self, enable: bool) -> None:\n if self._handle is not None:\n set_wakeup_interrupt(self._handle, enable)\n else:\n raise Ft4222Exception(\n Ft4222Status.DEVICE_NOT_OPENED,\n \"This handle is closed!\"\n )", "title": "" }, { "docid": "0178acd06f3e3c45614c1ff7a4e377d0", "score": "0.5000888", "text": "def is_disabled(self): # real signature unknown; restored from __doc__\n return False", "title": "" }, { "docid": "533985646c9a005eaf380a3b1e5df046", "score": "0.49995568", "text": "def set_enabled(obj, value):\n if value:\n obj.Enable()\n else:\n obj.Disable()", "title": "" }, { "docid": "79603f9570202388ec700cb768a7d89b", "score": "0.49938062", "text": "def suspend_user(self, username):\n return self(f\"UPDATE users SET suspended=1 WHERE username='{username}'\")", "title": "" }, { "docid": "6b072757ac17588a84a42b7592a07eaa", "score": "0.4987657", "text": "def revoke_on_unenroll_disabled(self):\n if \"revokeOnUnenrollDisabled\" in self._prop_dict:\n return self._prop_dict[\"revokeOnUnenrollDisabled\"]\n else:\n return None", "title": "" }, { "docid": "2559c91b8615674be6c7d1449ed0093a", "score": "0.49859273", "text": "def set_enabled(self, enabled):\n self._enabled = enabled\n # Reset values\n self.reset()", "title": "" } ]
a50c599aad9213465e9e1baf10f6f5ac
Column vector of requirement weightings. Each requirement contributions to the overall model according to its weight.
[ { "docid": "89df50998c31df86946bc1ba30e971fb", "score": "0.8679849", "text": "def weight(self):\n vec = np.array([[reqt.weight for reqt in self.requirements]])\n return vec.T # Return as column vector", "title": "" } ]
[ { "docid": "8ef7c41c0a6c88cc1bdd1fb886ab57b7", "score": "0.688155", "text": "def weights(self):\n pass", "title": "" }, { "docid": "3edddcd5c994c6d7de7defb22a244e47", "score": "0.65906084", "text": "def get_weights(self) -> List[float]:\n return self.weight", "title": "" }, { "docid": "3edddcd5c994c6d7de7defb22a244e47", "score": "0.65906084", "text": "def get_weights(self) -> List[float]:\n return self.weight", "title": "" }, { "docid": "401a8759d49ab334142a538f008b0cd2", "score": "0.6568331", "text": "def weights ( self ) :\n N = len ( self ) \n return array ( 'd' , ( self.weight ( i ) for i in range ( N ) ) )", "title": "" }, { "docid": "627e8a144d634b0aac3feb26b4bae94c", "score": "0.65408933", "text": "def get_weights(self):\r\n return self.weights # returning the weight matrix\r", "title": "" }, { "docid": "83851abc9f287ef3acb56f58c0beb2bd", "score": "0.6531422", "text": "def weights(self):\r\n\t\treturn None", "title": "" }, { "docid": "52036bc33c76dfd5cb963d253f4d13d0", "score": "0.6527212", "text": "def weights(self):\n return self._weights", "title": "" }, { "docid": "45b75d6c663569240d63dbc2d4a55753", "score": "0.6508479", "text": "def get_weights(self):\n return [self._W, self._b]", "title": "" }, { "docid": "d506e384829a3d5e6d7759f8091eeca0", "score": "0.6501087", "text": "def weights(self):\n return self.__weights", "title": "" }, { "docid": "261be54a5dbd546a65e51e45466ef3ec", "score": "0.6481984", "text": "def weights(self) -> List[Param]:\n return []", "title": "" }, { "docid": "41cce492d565df89f812e72d047378c7", "score": "0.6468878", "text": "def weights ( self ) :\n return self.__weights", "title": "" }, { "docid": "41cce492d565df89f812e72d047378c7", "score": "0.6468878", "text": "def weights ( self ) :\n return self.__weights", "title": "" }, { "docid": "405d04b8da777ec785f34ebab340c0c8", "score": "0.64530957", "text": "def weights(self):\n return self._weights", "title": "" }, { "docid": "675c934712ac30f8cba4750244f19614", "score": "0.64354753", "text": "def get_weights(self, ):\n return [w for l in self.weights for w in l.flat]", "title": "" }, { "docid": "dbd3b0e4a782cc5de3e280b0b5a5bd02", "score": "0.6392153", "text": "def getWeights(self):\n if self.weight0 is not None and self.weights is not None:\n return [self.weight0] + list(self.weights)\n else:\n return None", "title": "" }, { "docid": "04406e34ca7745acc17d940328e6d4a3", "score": "0.63585645", "text": "def getWeights(self):\n return self.W, self.b", "title": "" }, { "docid": "32a65fe16826daf00b52bca1e2b5a93e", "score": "0.6340119", "text": "def _b12_weights_ ( self ) :\n N = len ( self ) \n return array ( 'd' , ( self.weight ( i ) for i in range ( N ) ) )", "title": "" }, { "docid": "717de18d8624c4f96480867f3fa538d1", "score": "0.6333302", "text": "def weight_values(self):\n return self.get_weights().data", "title": "" }, { "docid": "7884ea413d33c89c46423e1e9cdb069a", "score": "0.62887454", "text": "def extract_weights(self):", "title": "" }, { "docid": "acb9c9b0d1103b97e61a4c6aae739f47", "score": "0.6286838", "text": "def weights(self, params):\n return np.array([1.0])", "title": "" }, { "docid": "8c54527c50267b8d0a407045f852b78e", "score": "0.62785", "text": "def get_weight(self):", "title": "" }, { "docid": "d428406e661dbacd1bffc418ce048bec", "score": "0.6268145", "text": "def get_weights(self):\n wval = {}\n for q in self.qcomponent:\n for midx, w1 in zip(q.i, q.w):\n if tuple(midx) in wval:\n wval[tuple(midx)] += w1\n else:\n wval[tuple(midx)] = w1\n return wval", "title": "" }, { "docid": "0a5da784b188d90dbd73857fe65e5042", "score": "0.6255061", "text": "def getWeights(self) -> retval:\n ...", "title": "" }, { "docid": "0a5da784b188d90dbd73857fe65e5042", "score": "0.6255061", "text": "def getWeights(self) -> retval:\n ...", "title": "" }, { "docid": "5f2b021e736974c18fd20b4210993ae1", "score": "0.62464136", "text": "def get_weights(self):\r\n return self.w", "title": "" }, { "docid": "65b01108830ff52f6c036203bfed027f", "score": "0.62196124", "text": "def weight_params(self) -> Sequence[Parameter]:\n return self._weight_params", "title": "" }, { "docid": "37a317d44c9f70316df4d65d52b64cc8", "score": "0.62164265", "text": "def weight(self) -> Tensor:\n return self._weights", "title": "" }, { "docid": "8408441b4c581c7866c51b715b3419a6", "score": "0.62142205", "text": "def get_we_weightunit():\n \n return ['lb', 'kg', 'bodyweight']", "title": "" }, { "docid": "134c0591ccf70e5ed484557569a2142f", "score": "0.6203213", "text": "def weight(self):\n return self.specs['weight'] / 1000.0", "title": "" }, { "docid": "6b969afc8d4e45aa753fed1a4b88f97a", "score": "0.6189891", "text": "def add_weight(self, w=1.0):\n # Check if the input weight is valid\n if w <= 0:\n raise ValueError(\"Weight value must be bigger than 0.\")\n\n # Check if we have already added weights\n if len(self._grid_points[0][0]) == self._dimension:\n warnings.warn(\"Weight has already been added. Please use modify_weight() to change weight value\")\n return\n\n # Start adding weights\n weighted_gp = []\n for cols in self._grid_points:\n weighted_gp_row = []\n for row in cols:\n temp = row\n temp[:] = [tmp / w for tmp in temp]\n temp.append(w)\n weighted_gp_row.append(temp)\n weighted_gp.append(weighted_gp_row)\n\n # Update class variables\n self._no_change = True\n self._grid_points = weighted_gp", "title": "" }, { "docid": "f39873edb3a19629dcdbaf8cf048b8b1", "score": "0.6186213", "text": "def gen_weight(self):\n\t\treturn self.w_scale / 2.0", "title": "" }, { "docid": "638249b81683c013f01924ec57d19042", "score": "0.61856574", "text": "def get_weights(self):\n return self.w", "title": "" }, { "docid": "638249b81683c013f01924ec57d19042", "score": "0.61856574", "text": "def get_weights(self):\n return self.w", "title": "" }, { "docid": "638249b81683c013f01924ec57d19042", "score": "0.61856574", "text": "def get_weights(self):\n return self.w", "title": "" }, { "docid": "00cbda8dffa32392a8f76c5dac4ae4ee", "score": "0.61766076", "text": "def get_weights(self):\r\n return self.weights", "title": "" }, { "docid": "764637b263ef7546014494a1602eeee8", "score": "0.6176294", "text": "def calc_weight(self):\r\n coeffs = [8.79055, 4.2928] # the coeffs of the linear eauation (found according UR5 and motoman)\r\n weights = [0] # the wieght of each link\r\n acc_length = 0 # accumelated length\r\n acc_weight = 0 # accumelated weight\r\n for link in self.links[1:]:\r\n acc_length = acc_length + float(link)\r\n weights.append(round(acc_length * coeffs[0] + coeffs[1] - acc_weight, 2))\r\n acc_weight = acc_weight + weights[-1]\r\n while len(weights) < 7:\r\n weights.append(1)\r\n return [str(weight) for weight in weights]", "title": "" }, { "docid": "c61457191aace038dc0e587ceab5b6bf", "score": "0.61598444", "text": "def weight(self):\n return (self.galleons * 31.103) + (self.sickles * 11.34) + (self.knuts * 5.0)", "title": "" }, { "docid": "5cb8d1955c1ee5e3f53bf4a2fa57b365", "score": "0.6122", "text": "def weight ( self , index ) :\n\n return self.__weigths[index]", "title": "" }, { "docid": "0de5b06f63872e977dcabb22219f0434", "score": "0.61213756", "text": "def modify_weight(self, w=-1.0):\n # Check if the input weight is valid\n if w <= 0:\n raise ValueError(\"Weight value must be bigger than 0.\")\n\n # Check if we have already added weights\n if len(self._grid_points[0][0]) != self._dimension:\n warnings.warn(\"Need to add weights first.\")\n return\n\n # Start modifying weights\n weighted_gp = []\n for cols in self._grid_points:\n weighted_gp_row = []\n for row in cols:\n temp = row[0:self._dimension - 1]\n temp[:] = [tmp * row[-1] for tmp in temp]\n temp[:] = [tmp / w for tmp in temp]\n temp.append(w)\n weighted_gp_row.append(temp)\n weighted_gp.append(weighted_gp_row)\n\n # Update grid points\n self._grid_points = weighted_gp", "title": "" }, { "docid": "694f0940bda36b8fb4168cbcb6315d02", "score": "0.6116951", "text": "def weights(self):\n return self._ir_weights", "title": "" }, { "docid": "ce48af58d99a77b916ee912e41a522ca", "score": "0.61153924", "text": "def calculate_weight(self):\n\n\t\tweight = 0\n\t\tfor item in self.items:\n\t\t\tif item == \"Health Potions\" or item == \"Magic Potions\":\n\t\t\t\tweight += self.items[item]\n\n\t\tself.weight = weight", "title": "" }, { "docid": "12c8fc6742d311f4add69a2464765714", "score": "0.609058", "text": "def weights(self):\n var = self.var\n return var / var.sum()", "title": "" }, { "docid": "72e4479b7f0851339479eb5a55b1656e", "score": "0.6063868", "text": "def get_weight(self) -> float:\n raise NotImplementedError", "title": "" }, { "docid": "0a8e370c64655d712fd963527091e1aa", "score": "0.60604787", "text": "def test_weight(self):\n\n t = np.array([[[2, 1, 0], [0, 0, 1]]], dtype='float32')\n\n W = np.array([[[2, 3, 4], [1, 2, 3]]], dtype='float32')\n\n b = np.full(shape=(3,), fill_value=0.0, dtype='float32')\n\n expected_p = np.array([[4, 3, 3]])\n\n with tf.Session() as sess:\n p = sess.run(\n self.comp_model.weight(\n reg_uv=t,\n W=W,\n b=b))\n\n np.testing.assert_allclose(p, expected_p)", "title": "" }, { "docid": "5d3ce60b06ab6b06effa0cb7e9f7d66b", "score": "0.60549206", "text": "def available_weights(self):\n return list(\n {p for entry in self._entries for p in entry.data[\"weights\"].keys()}\n )", "title": "" }, { "docid": "8e06cba9d33267e3e32c51492a9a81b9", "score": "0.6037166", "text": "def get_weights(self):\n return self._weights", "title": "" }, { "docid": "b860aa5cc37fd2b230987a36dc7aac51", "score": "0.60367763", "text": "def merit(self):\n # FIXME: Ignore requirements without relationships! They will\n # result in nan and break this.\n return np.multiply(self.weight, self.satisfaction).sum()", "title": "" }, { "docid": "c207053c55d87834b98f1a160b81baa5", "score": "0.6028998", "text": "def weight(self):\n for r_idx in range(self.matrix.rows):\n row = self.matrix.row(r_idx)\n r_sum = sum(row)\n for c_idx in range(len(row)):\n self.matrix.set(r_idx, c_idx, row[c_idx] / r_sum)", "title": "" }, { "docid": "99976f957ac08c0020e75cd5bf71b0b1", "score": "0.6022437", "text": "def get_weight(self):\n return self.graph_weights.reshape(self.size_graph_rows, self.size_graph_cols)", "title": "" }, { "docid": "b58c500562a6d3a6bbe1447776a4c8da", "score": "0.59908664", "text": "def calcW(self):\n weightsDict = {}\n for k in self.dataSetDict.keys():\n X = np.array([np.ones(2), self.dataSetDict[k][:,0]]).transpose()\n Y = self.dataSetDict[k][:,1]\n weightsDict.update({k:np.dot(np.linalg.pinv(X),Y)})\n return weightsDict", "title": "" }, { "docid": "ba5ec29f1466b1e1dab704052e914527", "score": "0.5986893", "text": "def test_weight(self):\n\n t = np.array([[[2, 1, 0], [0, 0, 1]]], dtype='float32')\n W = np.array([[2], [3]], dtype='float32')\n\n b = np.full(shape=(3,), fill_value=0.0, dtype='float32')\n\n expected_p = np.array([[4, 2, 3]])\n\n with tf.Session() as sess:\n p = sess.run(\n self.comp_model.weight(\n reg_uv=t,\n W=W,\n b=b))\n\n np.testing.assert_allclose(p, expected_p)", "title": "" }, { "docid": "e7faef4501116137dd52b31b5f1cf722", "score": "0.59841883", "text": "def weight_lbs(self):\n return self._weight_lbs", "title": "" }, { "docid": "f122d3347ec8717a7d31e9f272d2d7e5", "score": "0.59819996", "text": "def weights(self):\n return checkpoint_utils.load_variable(\n self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)", "title": "" }, { "docid": "35728910e2479d96d04cad5aa95af81b", "score": "0.59654886", "text": "def get_weight_variables(self):\n var_names = [pv['name'] for pv in self.data['reference']]\n var_weight = {var: weight for var, weight in zip(var_names, self.data['feat_weights'])}\n return var_weight", "title": "" }, { "docid": "6b15b8d4e0592a34591fc4f2de9cec2d", "score": "0.59642845", "text": "def weight(self):\n return self.impl.weight", "title": "" }, { "docid": "33ca5d5ca381eaae3bfc09d349c604f3", "score": "0.5957571", "text": "def get_weight_info(self):\n\t\ttrainable_weights = tf.keras.backend.get_session().run(self.model.trainable_weights)\n\t\tfor layer_weight in trainable_weights:\n\t\t\tlayer_shape = layer_weight.shape\n\t\t\tself.weight_shapes.append(layer_shape)", "title": "" }, { "docid": "44b09360b305b1e5a9e76547d638c1b6", "score": "0.59521526", "text": "def weights(self):\n if self.is_lookup:\n return self.E.as_array()\n else:\n return self.parameters[\"E\"].as_array()", "title": "" }, { "docid": "4de0786d251703df121a6f74d014c589", "score": "0.5950351", "text": "def _compute_weights(self):\n\n n = self.n\n lambda_ = self.alpha**2 * (n +self.kappa) - n\n\n c = .5 / (n + lambda_)\n self.Wc = np.full(2*n + 1, c)\n self.Wm = np.full(2*n + 1, c)\n self.Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)\n self.Wm[0] = lambda_ / (n + lambda_)", "title": "" }, { "docid": "13e149f3b5f5b7be75098cd43057dc0e", "score": "0.5945066", "text": "def weight(self):\n\n return self._weight", "title": "" }, { "docid": "f908dda3f03eeac43700b750f472dc0b", "score": "0.5944574", "text": "def _weights_for_terms(self, terms):\n raise NotImplementedError", "title": "" }, { "docid": "25af87e4c14311f2c77b6af14586ed11", "score": "0.5935008", "text": "def get_weights(self): #返回权值\n return self._weights", "title": "" }, { "docid": "7ca3969d8e0e9a13ecd0df304e954965", "score": "0.5934948", "text": "def easyWeighting(self, weights, values):\n summedVal = 0 \n for k, weight in enumerate(weights): \n summedVal += weight * values[k] \n \n return summedVal", "title": "" }, { "docid": "35829117ea9de1c6f05bcbab7a98106a", "score": "0.5931859", "text": "def weight(self) -> float:", "title": "" }, { "docid": "e1387d6cc7ea6cb2dc4f67df2d4c5559", "score": "0.5927202", "text": "def get_weights(self, weights):\n return self.model.get_weights(weights)", "title": "" }, { "docid": "782dd4e41663cda6b8a7e99c1ff34c3a", "score": "0.59211224", "text": "def weights_column(self):\n return self._parms.get(\"weights_column\")", "title": "" }, { "docid": "782dd4e41663cda6b8a7e99c1ff34c3a", "score": "0.59211224", "text": "def weights_column(self):\n return self._parms.get(\"weights_column\")", "title": "" }, { "docid": "2dbf6e38ac5516c10b559a248ea0abc4", "score": "0.59201044", "text": "def compute_weights(self):\n weights = [sin(pi/( 2+(0.04*day)**4 ))**8 for day in range(-30,1)]\n # Last days are more important\n weights[30] = weights[30]+3\n weights[29] = weights[29]+2\n weights[28] = weights[28]+1\n self.weights = weights", "title": "" }, { "docid": "c6f4942c02fb85d9c0f5f6cea66b8514", "score": "0.59144", "text": "def weights_(self):\n weights = []\n for layer in range(len(self.hidden_units)):\n weights.append(self.get_tensor_value(\n \"encoder/dnn/layer%d/Linear/Matrix:0\" % layer))\n for layer in range(len(self.hidden_units)):\n weights.append(self.get_tensor_value(\n \"decoder/dnn/layer%d/Linear/Matrix:0\" % layer))\n weights.append(self.get_tensor_value(\"linear_regression/weights:0\"))\n return weights", "title": "" }, { "docid": "bf0908ec76539c31c276b411ff394aa8", "score": "0.5890549", "text": "def calculate_weight(self):\r\n weight = np.sum((self.mean_data * self.agreement) / np.sum(self.agreement))\r\n return weight", "title": "" }, { "docid": "d1f5885fe0123758894b3fc7a613398f", "score": "0.58813876", "text": "def assign_weights(self):\n if not self.scaled:\n warnings.warn('Assigning interaction weights without scaling dataset!')\n\n pruned_regulon, sub_expr = self._prune_regulon(self.expr, self.regulon, self.regulon_size)\n self.expr = sub_expr\n # noinspection PyTypeChecker\n r, p = regulon_utils.spearmanr(self.expr)\n\n r_frame = pd.DataFrame(r, columns=self.expr.columns, index=self.expr.columns)\n p_frame = pd.DataFrame(p, columns=self.expr.columns, index=self.expr.columns)\n\n F_statistics = {regulator: regulon_utils.f_regression(\n self.expr.reindex(frame.DownGene, axis=1),\n self.expr.reindex([regulator], axis=1).values.ravel())\n for regulator, frame in pruned_regulon.groupby('UpGene')}\n\n weights = pd.concat([self._structure_weights(regulator,\n pruned_regulon,\n F_statistics,\n r_frame,\n p_frame)\n for regulator in F_statistics])\n\n self.regulon_weights = weights[~np.isinf(weights.MoA)]", "title": "" }, { "docid": "bc7621035845f24d407cfbf224060918", "score": "0.5881271", "text": "def _collect_weights_descriptions(self) -> List[WeightDescription]:", "title": "" }, { "docid": "0070393d6615f7169e85031b609c61b4", "score": "0.5873552", "text": "def weights(self) -> np.ndarray:\n self._check_fitted()\n return np.asarray(self._fit_result.x)", "title": "" }, { "docid": "11cf9d778eabd1cecb8bf06329b9135b", "score": "0.58692986", "text": "def weight(self):\n return self._weight", "title": "" }, { "docid": "11cf9d778eabd1cecb8bf06329b9135b", "score": "0.58692986", "text": "def weight(self):\n return self._weight", "title": "" }, { "docid": "11cf9d778eabd1cecb8bf06329b9135b", "score": "0.58692986", "text": "def weight(self):\n return self._weight", "title": "" }, { "docid": "11cf9d778eabd1cecb8bf06329b9135b", "score": "0.58692986", "text": "def weight(self):\n return self._weight", "title": "" }, { "docid": "11cf9d778eabd1cecb8bf06329b9135b", "score": "0.58692986", "text": "def weight(self):\n return self._weight", "title": "" }, { "docid": "11cf9d778eabd1cecb8bf06329b9135b", "score": "0.58692986", "text": "def weight(self):\n return self._weight", "title": "" }, { "docid": "99d2ea428f27e1423dd1977c896497a3", "score": "0.5841913", "text": "def calculate_weight(self,x,y):\n x1,x2 = x\n y1,y2 = y\n size = np.sqrt(np.power(x2- x1,2)+np.power(y2-y1,2))\n self.w = np.array([size] * 2)", "title": "" }, { "docid": "f18930971d3d29722c0313edceec15dd", "score": "0.5833139", "text": "def get_weight_shapes(self):\n\t\treturn self.weight_shapes", "title": "" }, { "docid": "71bbe38e4a0f3f01f75a98e6ba2e1d10", "score": "0.5831377", "text": "def weight(self):\n return self.__weight", "title": "" }, { "docid": "71bbe38e4a0f3f01f75a98e6ba2e1d10", "score": "0.5831377", "text": "def weight(self):\n return self.__weight", "title": "" }, { "docid": "71bbe38e4a0f3f01f75a98e6ba2e1d10", "score": "0.5831377", "text": "def weight(self):\n return self.__weight", "title": "" }, { "docid": "a5bdf4ef613ca9d6f6bd0b61e1b131a0", "score": "0.5826416", "text": "def get_weights(self):\n return self.model.get_weights()", "title": "" }, { "docid": "7a71139cad8721a433e904684444a289", "score": "0.5826219", "text": "def get_weight(self):\n\t\treturn self.weight", "title": "" }, { "docid": "d7023545e8d0e0c64ec9ed11ec35e621", "score": "0.5825", "text": "def getWeight():\n\t\treturn weight", "title": "" }, { "docid": "1a70199bfa1bad057889da69c7c0f2a6", "score": "0.582442", "text": "def total_weight(self):\n return self.F[0][0] + self.F[0][1]", "title": "" }, { "docid": "6a4e204571eabac38a1b6149b17930fa", "score": "0.58240217", "text": "def weight(self):\n return self.container['weight']", "title": "" }, { "docid": "a65ec057eac5c7f8051ae9160fef1c60", "score": "0.58146673", "text": "def get_weights(self):\n return (\n self._w_forget_gete, \n self._w_update_gate,\n self._w_tanh,\n self._w_output_gate,\n self._w_out,\n self._b_forget_gate,\n self._b_update_gate,\n self._b_tanh,\n self._b_output_gate,\n self._b_out\n )", "title": "" }, { "docid": "339a032e04676d0d883106a06942c4a5", "score": "0.58130157", "text": "def weight ( self , index ) :\n\n return self.__weights[index]", "title": "" }, { "docid": "d98ff7d74be682b23f2c94545ed37205", "score": "0.58125216", "text": "def get_weights(self):\n params = self.weights\n return backend.batch_get_value(params)", "title": "" }, { "docid": "6ff038d0f9570da5d39916eca80203bc", "score": "0.58109665", "text": "def puppy_weights():\n\tfor puppy in session.query(Puppy).order_by(Puppy.weight.asc()).all():\n\t\tprint puppy.name, puppy.weight", "title": "" }, { "docid": "6c26885a9dc841146fbab0ca7ae5612b", "score": "0.5799563", "text": "def my_assign_weights(context, data):\n pass", "title": "" }, { "docid": "7feac4484055b8e5d38d7f0dbbff675b", "score": "0.5797229", "text": "def generate_input_weights(self):\n\n input_weights = np.random.uniform(self.input_weight_bounds[0], self.input_weight_bounds[1], \\\n size=(self.N, self.sequence_dimension + 2)) # 2 added for the distractor and cue\n \n if isinstance(self.input_gain, Sequence):\n for i in input_weights.shape[1]:\n input_weights[:,i] *= self.input_gain[i]\n else:\n input_weights *= self.input_gain\n\n fraction_mask = np.zeros(self.N * (self.sequence_dimension + 2))\n fraction_mask[:int(self.input_fraction * self.N)] = 1.0\n np.random.shuffle(fraction_mask)\n fraction_mask = fraction_mask.reshape((self.N, self.sequence_dimension + 2))\n input_weights = input_weights * fraction_mask\n\n return input_weights", "title": "" }, { "docid": "a22ca975fcbbae1ae45a80ceaea64eed", "score": "0.57971466", "text": "def weights(self) :\n\t\treturn sign(self.L) #1/(self.L + 0.00001) ", "title": "" }, { "docid": "ff0e71e1291cba87dc919c92fd9dbfb4", "score": "0.57965213", "text": "def get_weights(self):\n return WeightMap(self)", "title": "" }, { "docid": "d3f5f83fdec49ccadaf050fef2d99ea5", "score": "0.5795752", "text": "def variables(self):\n return self._weights", "title": "" }, { "docid": "a904fbda0f5e5efe8e0dcce2ae23a2cc", "score": "0.5791861", "text": "def get_weight(self):\r\n return self.weight", "title": "" }, { "docid": "9d5a6f76a9f62b62bee9e4cdaafa4141", "score": "0.578856", "text": "def weight(self):\n\n return self._weight", "title": "" }, { "docid": "21e30e22bd73c9ffc401b69b26fe3dcd", "score": "0.57801604", "text": "def weights_parameters(fields: List[str], weights: List[float]) -> List[str]:\n fields_and_weights: List[List[str]] = [[f\"{field}^{weight}\" for weight in weights] for field in fields]\n return [\" \".join(combination) for combination in itertools.product(*fields_and_weights)]", "title": "" } ]
f270e502ea2e3a9b37363537d159d82c
Return True if cls_name is a message object based on info in unified
[ { "docid": "6bbf76d43d154cf986de127c27fdb77c", "score": "0.62210727", "text": "def class_is_stats_message(cls):\n\n return \"stats_type\" in of_g.unified[cls][\"union\"]", "title": "" } ]
[ { "docid": "289d5549feaa3be809320a36ef7358a7", "score": "0.75476646", "text": "def class_is_message(cls):\n return \"xid\" in of_g.unified[cls][\"union\"] and cls != \"of_header\"", "title": "" }, { "docid": "a21cb78dfc53864e804c668d44d734ad", "score": "0.6970383", "text": "def is_instance_of_message(instance):\n return isinstance(instance, Message)", "title": "" }, { "docid": "2645a62e62322e2e283c39524edbcf07", "score": "0.6921256", "text": "def _isClass(self, **kwargs): # {{{\n if not self._isKeyDict(_l=['_cn', '_o'], **kwargs):\n kwargs['msg'] = \"you must define the '_o' and '_cn' keys, stupid!\"\n self.error(**kwargs)\n return False\n return kwargs['_o'].getClassname() == kwargs['_cn']", "title": "" }, { "docid": "dc3889e796ee3e68367cac108200358b", "score": "0.64902514", "text": "def _is_ms_class(obj):\n return hasattr(obj, '__ms_class__')", "title": "" }, { "docid": "1e6216c19823f148dc16c467c2b1c8f1", "score": "0.635012", "text": "def is_message(self):\n return self.type == \"message\" or self.type == \"pmessage\"", "title": "" }, { "docid": "964a66745cd1dba3fa021c3723fd7721", "score": "0.6301203", "text": "def has_channel_class(self, name):\n return name in self._channelcls", "title": "" }, { "docid": "a17f496d3487194e5816ddb95f8623fd", "score": "0.6271375", "text": "def _check_class(self, obj, classes):\n return obj.type in classes", "title": "" }, { "docid": "d563655d1cef14824f517f79c987955f", "score": "0.6204748", "text": "def has_class(self, cls):\r\n\r\n ns = cls.get_namespace()\r\n tn = cls.get_type_name()\r\n\r\n c = self.classes.get('{%s}%s' % (ns, tn))\r\n if c is None:\r\n return False\r\n\r\n if issubclass(c, ComplexModelBase) and \\\r\n issubclass(cls, ComplexModelBase):\r\n o1 = getattr(cls, '__orig__', None) or cls\r\n o2 = getattr(c, '__orig__', None) or c\r\n\r\n if o1 is o2:\r\n return True\r\n\r\n # So that \"Array\"s and \"Iterable\"s don't conflict.\r\n if o1 is Array or o2 is Array:\r\n return True\r\n\r\n raise ValueError(\"classes %r and %r have conflicting names.\" %\r\n (cls, c))\r\n return True", "title": "" }, { "docid": "ac1a2767a4ae50fde32df66c6a2aea41", "score": "0.61693656", "text": "def has_target_class(self, name):\n return name in self._targetcls", "title": "" }, { "docid": "52286a0861f9865d85e2d78165ae2f95", "score": "0.61616254", "text": "def _check_input_class(self, name, obj, obj_class):\n is_valid = isinstance(obj, obj_class)\n if not is_valid:\n msg = 'The %s must be a %s object (obtained: %s).' \\\n % (name, obj_class.__name__, obj.__class__.__name__)\n self.add_error(msg)\n return is_valid", "title": "" }, { "docid": "ee79aa624489f0fcb4e2d0490c328659", "score": "0.60700214", "text": "def has_name(self, name):\n return name in self.classes", "title": "" }, { "docid": "ee79aa624489f0fcb4e2d0490c328659", "score": "0.60700214", "text": "def has_name(self, name):\n return name in self.classes", "title": "" }, { "docid": "ee79aa624489f0fcb4e2d0490c328659", "score": "0.60700214", "text": "def has_name(self, name):\n return name in self.classes", "title": "" }, { "docid": "054f684fe2a2bf1c0c0061975ca3e040", "score": "0.60479605", "text": "def IsKindOf(self, info):", "title": "" }, { "docid": "4d655931fec4397b3ba8f9e9b5dd4a96", "score": "0.6020335", "text": "def type_is_of_object(m_type):\n # Remove _t from the type id and see if key for unified class\n if m_type[-2:] == \"_t\":\n m_type = m_type[:-2]\n return m_type in of_g.unified", "title": "" }, { "docid": "11bd078e35433e270055e2dfd5adcfff", "score": "0.5968425", "text": "def is_topic(self, obj):", "title": "" }, { "docid": "d2c941e47901f11b49607099abdb2b88", "score": "0.5878854", "text": "def lowlevel_isinstance(obj, cls):\n mro = record_get(LOAD(CLS_OF(obj)), LITERAL(\"mro\"))\n length = sequence_length(mro)\n index = LITERAL(0)\n while index < length:\n if sequence_get(mro, index) is cls:\n return True\n index = number_add(index, LITERAL(1))\n return False", "title": "" }, { "docid": "03046e0b26268bc1d04220ad204ef0ae", "score": "0.5868786", "text": "def supported_class(cls, classname):\r\n try:\r\n cls = get_class(classname)\r\n return True\r\n except exceptions.IncorrectClassError:\r\n return False", "title": "" }, { "docid": "88a947b4eb4a4189f3258dfcee7eabe1", "score": "0.5832544", "text": "def _is_class_instance(obj):\n return isinstance(obj, (nn.Cell, ops.Primitive)) or _is_ms_class(obj) or hasattr(obj, '__parse_method__')", "title": "" }, { "docid": "d640375c49ed849e291af9b67259a720", "score": "0.578951", "text": "def is_class(self) -> bool:\n return inspect.isclass(self.obj)", "title": "" }, { "docid": "b3e4e5169ff691bb82f2c3141ae229c1", "score": "0.5784708", "text": "def is_msg(self):\n try:\n self.data.index('text/plain')\n return True\n except:\n return False", "title": "" }, { "docid": "d8f29a60167f22d8ee6852fce4f9ce78", "score": "0.5751296", "text": "def looks_like_issubclass(obj, classname):\r\n t = obj\r\n if t.__name__ == classname:\r\n return True\r\n for klass in t.__mro__:\r\n if klass.__name__ == classname:\r\n return True\r\n return False", "title": "" }, { "docid": "7ffec9fe61181e7d1db013d4f68cdc6d", "score": "0.57466406", "text": "def is_instance(self, instance, cls):\n if self.strict:\n return type(instance) == cls\n else:\n return isinstance(instance, cls)", "title": "" }, { "docid": "8923e48bfe231cac3ed6e2127108f68c", "score": "0.57437843", "text": "def is_name(self, obj):", "title": "" }, { "docid": "4bc0ba8b97be3ea18c97d58e836fcfb1", "score": "0.57288307", "text": "def is_word_in_class_name(obj, word):\n return word in obj.__class__.__name__", "title": "" }, { "docid": "fcde08520626502a53f245f41aba4505", "score": "0.5721245", "text": "def isTypeOf(self, typeName):\n\n for cls in type.mro(type(self)):\n if cls.__name__ == typeName:\n return True\n\n return False", "title": "" }, { "docid": "a979e98b7459e3e70a79fddea563f6f6", "score": "0.57165504", "text": "def class_is_oxm(cls):\n if cls.find(\"of_oxm\") == 0:\n return True\n return False", "title": "" }, { "docid": "a6960601839e294a3cc768103371ecb8", "score": "0.57017785", "text": "def _is_class_trait(name, cls):\n return isinstance(cls, MetaHasTraits) and name in cls.__class_traits__", "title": "" }, { "docid": "30a70ce9e482ab3ce9337bd127dc7303", "score": "0.56879354", "text": "def validate(self):\r\n\t\treturn self._message_type is not None", "title": "" }, { "docid": "4a1cf9640fdfe59fa82a31b1f10a334d", "score": "0.5673441", "text": "def test_class_has_name(self):\n self.assertTrue(hasattr(self.cls, 'name'))", "title": "" }, { "docid": "413af7f4ec2842afbaf672a61b248d6f", "score": "0.5672162", "text": "def is_a(self, name):\n return self.name == name", "title": "" }, { "docid": "3fdd59fb14da4afcee4f7f5912e3212d", "score": "0.56604606", "text": "def _is_correct_class(self, cls):\r\n return (inspect.isclass(cls) and\r\n not cls.__name__.startswith('_') and\r\n issubclass(cls, self.modifier_class_type))", "title": "" }, { "docid": "42a0f04385e2875e31cc9b2512f37131", "score": "0.5660232", "text": "def _is_msg_from_primary(self, msg, sender: str) -> bool:\n if self._is_msg_for_current_view(msg):\n return self.primary_name == sender\n try:\n return self.primary_names[msg.viewNo] == sender\n except KeyError:\n return False", "title": "" }, { "docid": "d9f6ba31e281c1f42036f2c438cb33d3", "score": "0.5646124", "text": "def matches(cls, iobject):\n return iobject.__class__ == cls.__class__", "title": "" }, { "docid": "88fd7fac8af8eb3028e106234773a9e6", "score": "0.5643419", "text": "def has_class(self, _class):\n\n\t\treturn _class in self.attrs.get(\"class\", \"\").split()", "title": "" }, { "docid": "2c40e6777dd1f3afdc4070404b3f9e66", "score": "0.5626277", "text": "def is_creator(self):\n return isinstance(self.participant, (\n types.ChannelParticipantCreator,\n types.ChatParticipantCreator\n ))", "title": "" }, { "docid": "dc7f45073704b70c6ccf416123e9b6f3", "score": "0.56064636", "text": "def test_message_type(self):\n self.assertEqual(ResourceForecastStateDispatchMessage.CLASS_MESSAGE_TYPE, \n \"ResourceForecastState.Dispatch\")\n self.assertEqual(ResourceForecastStateDispatchMessage.MESSAGE_TYPE_CHECK, True)", "title": "" }, { "docid": "afa32f7ae49dd705c790c074acd737fa", "score": "0.56054586", "text": "def is_instance_of(self, model):\n return model.class_iri in self._types", "title": "" }, { "docid": "b6390ef4737d886a6b4f5afcdfa2a2e9", "score": "0.559013", "text": "def isKindOf(self, class_: Type[Element]):\n return isinstance(self, class_)", "title": "" }, { "docid": "9a9a72130810ee33b863499664560311", "score": "0.55865496", "text": "def class_exists(class_name):\n classes = Actions.__classes.keys()\n return class_name in set(classes)", "title": "" }, { "docid": "75cda3e3a12d61333d0e0da05b4d1ed1", "score": "0.5579693", "text": "def _is_correct_class(self, obj):\n return (inspect.isclass(obj) and\n (not obj.__name__.startswith('_')) and\n issubclass(obj, self.loadable_cls_type))", "title": "" }, { "docid": "75cda3e3a12d61333d0e0da05b4d1ed1", "score": "0.5579693", "text": "def _is_correct_class(self, obj):\n return (inspect.isclass(obj) and\n (not obj.__name__.startswith('_')) and\n issubclass(obj, self.loadable_cls_type))", "title": "" }, { "docid": "c004965eb6b24ab69c98e0f1f652462b", "score": "0.5573834", "text": "def is_a(self, class_uri):\n return ui_is_a(self.ui, class_uri.node)", "title": "" }, { "docid": "dff5914c5f23e42821df50103ed99ceb", "score": "0.55602926", "text": "def test_class_exists(self):\n self.assertEqual(str(type(self.u)), \"<class 'models.user.User'>\")", "title": "" }, { "docid": "f755f6dec34fefb147854a78a83a5270", "score": "0.5557248", "text": "def ec_isinstance (object, klass):\n\n if type(klass) is ClassType:\n return isinstance(object, klass)\n elif hasattr(object, '__class__'):\n return ec_issubclass(object.__class__, klass)\n else:\n return 0", "title": "" }, { "docid": "7f57ff4651a5227227a64d4069e73dfc", "score": "0.55514663", "text": "def match(msg_obj):\n if msg_obj.type == events.TYPE_PRIVMSG:\n return True\n elif msg_obj.type == events.NICK:\n return True\n elif msg_obj.type == events.JOIN:\n return True\n elif msg_obj.type == events.PART:\n return True\n elif msg_obj.type == events.QUIT:\n return True", "title": "" }, { "docid": "a88c703615eeedb1582e585a55a4ad94", "score": "0.55402756", "text": "def is_supported(self, message):\n return isinstance(message, MailBag)", "title": "" }, { "docid": "4f2cb4a2d7c1b6c2e2299daf1bf9a127", "score": "0.55351746", "text": "def _message_matches(self, msg):\n return ((self._current_msg_id is not None and\n msg.mid == self._current_msg_id)\n or\n (self._current_msg_id is None and\n msg.name == self._current_name))", "title": "" }, { "docid": "5eb7d0869ed3c8879bb5676346a7c689", "score": "0.55333745", "text": "def is_a(self, klass):\n return isinstance(self, klass)", "title": "" }, { "docid": "7c6e790bc8e49dd06434c947dd94f508", "score": "0.5519795", "text": "def test_implemented_message_class(self):\r\n msg = messages.StringMessage()\r\n self.assertIsInstance(msg, messages.Message)", "title": "" }, { "docid": "065bd6b6c67f7db0f6a1a2640122564f", "score": "0.5519081", "text": "def has_class(self, csscl):\n return csscl in self.attrs[\"klass\"]", "title": "" }, { "docid": "64edfcec6c9e0fefbc12ca8701d03109", "score": "0.55163807", "text": "def messagePresent (\r\n\r\n self,\r\n attribute = None\r\n ) :\r\n\r\n if utilities.isEmpty( attribute ) : return False\r\n\r\n if not attribute.endswith( \"Message\" ) : attribute = attribute + \"Message\"\r\n \r\n try :\r\n\r\n x = getattr( self, attribute )\r\n \r\n return True\r\n\r\n except Exception, exception :\r\n\r\n return False", "title": "" }, { "docid": "128b7a56bdade9715cbc0e6e6d8e85a9", "score": "0.5513057", "text": "def valid_class(obj):\n return inspect.isclass(obj) \\\n and proj_mod.__name__ == obj.__module__ \\\n and issubclass(obj, CodeJamBase)", "title": "" }, { "docid": "83c6c630e94f92a9ada1bc4addb0dbd4", "score": "0.5501432", "text": "def has_topic_classification(self):\n return topic_picker.is_classified(self.namespacestripped)", "title": "" }, { "docid": "b4493d62b73a578555d1e4c49a474584", "score": "0.5495578", "text": "def test_class_type(self):\n\n self.assertIs(type(self.obj5[\"__class__\"]), str)", "title": "" }, { "docid": "7b7e58e266fe6f0d7816ccc08830ad2c", "score": "0.54944795", "text": "def is_object(self):\n return True", "title": "" }, { "docid": "bd1d1d7bd3999599bfa8f060b01184a4", "score": "0.548347", "text": "def nm_message(self):\n return bool(self._get_attribute('NmMessage'))", "title": "" }, { "docid": "e1d4b9a4fbc83ab85f5b948b61d40849", "score": "0.5480231", "text": "def is_class_test(self):\n try:\n return self.cls_obj.__test__\n except AttributeError:\n return False", "title": "" }, { "docid": "bd73b652842ee3d83aadec0a91a4c71a", "score": "0.5479167", "text": "def isclass(obj):\n return isinstance(obj, type)", "title": "" }, { "docid": "f78b1a2f0679ea227aab2bd929ef21e5", "score": "0.5476879", "text": "def is_class(cls):\n return isclass(cls)", "title": "" }, { "docid": "d1a11dc251bc1ac8722d35273939b7ce", "score": "0.54762065", "text": "def isMsgFromPrimary(self, msg, sender: str) -> bool:\n if self.isMsgForLaterView(msg):\n logger.error(\"{} cannot get primary for a request for a later \"\n \"view. Request is {}\".format(self, msg))\n else:\n return self.primaryName == sender if self.isMsgForCurrentView(\n msg) else self.primaryNames[msg.viewNo] == sender", "title": "" }, { "docid": "76576c4deb7160b4267e1f13d7f4cc87", "score": "0.5467373", "text": "def is_known_msg_type(msg_type):\n return msg_type in _get_all_message_types()", "title": "" }, { "docid": "207a795cda206d69b05bcc8b2a6a7a2d", "score": "0.546686", "text": "def is_instance(obj, s):\n return s in str(type(obj))", "title": "" }, { "docid": "4d27d36726d14e839fbf79c109c95fc3", "score": "0.54632235", "text": "def what_class(self, instance):\n\n is_class = 'unknown'\n try:\n tmp = instance.__class__\n is_class = tmp\n except exceptions.AttributeError:\n #\n # The instance is not a python class. Maybe one of the\n # standard python data types?\n #\n is_class = type(instance)\n\n return is_class", "title": "" }, { "docid": "70534b0321101add96842ae9acbc974c", "score": "0.5453364", "text": "def _match_object(self, _obj: NamedObject) -> bool:\n return True", "title": "" }, { "docid": "658f1bc843490158228899b7e2aee4e3", "score": "0.54503334", "text": "def _check_for_message_composer(self):\n return bool(self.partner_id and self.partner_id.email)", "title": "" }, { "docid": "4bf9ec21da878729e2ba89dbcf58b284", "score": "0.5449764", "text": "def _isinstance2(obj, cls):\n if isinstance(obj, cls):\n return True\n try:\n return _issubclass2(obj.__class__, cls)\n except Exception:\n return False\n return False", "title": "" }, { "docid": "690704f190006090190993ed521828f4", "score": "0.5447413", "text": "def is_actor_type(obj):\n try:\n return issubclass(obj, Actor)\n except TypeError:\n return False", "title": "" }, { "docid": "cc2021082352e2eca8b5a7fcfc582a74", "score": "0.54448503", "text": "def class_is_queue_prop(cls):\n if cls.find(\"of_queue_prop\") == 0:\n return True\n\n # For each vendor, check for vendor specific action\n for exp in of_g.experimenter_name_to_id:\n if cls.find(\"of_queue_prop_\" + exp) == 0:\n return True\n\n return False", "title": "" }, { "docid": "eab3b76c65147755f7833eda23b4d9cf", "score": "0.54388547", "text": "def is_msg_cmd(self):\n try:\n self.data.index('MSG')\n return True\n except:\n return False", "title": "" }, { "docid": "4a92d543ce1a2ce975e97fcd55fd7ad8", "score": "0.54382485", "text": "def _is_testcase_class(self, obj):\n for parent in obj.__bases__:\n if parent.__module__ + \".\" + parent.__name__ == \"seleniumwebtests.testcase.TestCase\":\n return True\n return False", "title": "" }, { "docid": "2b245a80a12d0714870412c1ccec7635", "score": "0.5436717", "text": "def get_msg_cls(cls, jobj):\n if cls in cls.TYPES.itervalues():\n # cls is already registered Message type, force to use it\n # so that, e.g Revocation.from_json(jobj) fails if\n # jobj[\"type\"] != \"revocation\".\n return cls\n\n if not isinstance(jobj, dict):\n raise errors.ValidationError(\n \"{0} is not a dictionary object\".format(jobj))\n try:\n msg_type = jobj[\"type\"]\n except KeyError:\n raise errors.ValidationError(\"missing type field\")\n\n try:\n msg_cls = cls.TYPES[msg_type]\n except KeyError:\n raise errors.UnrecognizedMessageTypeError(msg_type)\n\n return msg_cls", "title": "" }, { "docid": "034f814e98acadf1244c83fa9959f0a8", "score": "0.5431403", "text": "def is_obj(name):\n non_obj_names = ['World', 'Camera', 'Lamp', 'RenderLayers', 'scene']\n for non_obj_name in non_obj_names:\n if non_obj_name in name:\n return False\n return True", "title": "" }, { "docid": "eccaa309bd44286eac6743c2f8d79ff7", "score": "0.5429227", "text": "def isBase(cls):\n return cls.__dict__.get('_label', None) is None", "title": "" }, { "docid": "99fb4df8c8adeb0e5758d23277df23d7", "score": "0.54229337", "text": "def hasObjectWithName(self, name):\n return name in self._mst.object_lut", "title": "" }, { "docid": "b3f5e0aeca67268df4bc945e5331ecb1", "score": "0.5419785", "text": "def _check_class_name(self, device, class_name):\n\n out_2 = device.interface.class_map_get_details(class_map_name=class_name)\n if out_2 is not None:\n if out_2['access_group'] is not None or out_2['vlan'] is not None or\\\n out_2['bridge_domain'] is not None:\n self.logger.info(\"Class Map %s is pre-existing with a match criterion\", class_name)\n return False\n\n return True", "title": "" }, { "docid": "8453e7bfa35f53d5e7e22d883c7af8f6", "score": "0.54099536", "text": "def is_same_class(obj, a_class):\n if type(obj) == a_class:\n return (True)\n return(False)", "title": "" }, { "docid": "a411660e8844692ca37c0f0ed5d3119b", "score": "0.53864175", "text": "def lowlevel_issubclass(cls, other):\n if not lowlevel_isinstance(cls, type):\n raise TypeError()\n mro = record_get(LOAD(cls), LITERAL(\"mro\"))\n length = sequence_length(mro)\n index = LITERAL(0)\n while index < length:\n if sequence_get(mro, index) is other:\n return True\n index = number_add(index, LITERAL(1))\n return False", "title": "" }, { "docid": "8ecd5475e625b3737eea473e686cf73b", "score": "0.53852814", "text": "def class_is_hello_elem(cls):\n if cls.find(\"of_hello_elem\") == 0:\n return True\n return False", "title": "" }, { "docid": "7d511c9e92bd3a1f81c4921f3aba0143", "score": "0.53835154", "text": "def _issubclass2(child, parent):\n # String comparison\n if child.__name__ == parent.__name__:\n if child.__module__ == parent.__module__:\n return True\n # Recurse through classes of obj\n return any(_issubclass2(base, parent) for base in child.__bases__)", "title": "" }, { "docid": "5071a5e75501139543e541de394d7149", "score": "0.53740054", "text": "def hasMessages(self) -> bool:\n ...", "title": "" }, { "docid": "5071a5e75501139543e541de394d7149", "score": "0.53740054", "text": "def hasMessages(self) -> bool:\n ...", "title": "" }, { "docid": "b21f73d6645050a7beef9842a9954513", "score": "0.5373803", "text": "def is_pmp_obj(obj):\n return isinstance(obj, dict) and obj.get('class') == 'PMP'", "title": "" }, { "docid": "25c66a77e936142c072c29141df5fdc4", "score": "0.536825", "text": "def subclassof(obj, classinfo):\n try:\n return issubclass(obj, classinfo)\n except TypeError:\n return False", "title": "" }, { "docid": "3ae7d5dab8fcbba7a566fcdc74be43e0", "score": "0.53664863", "text": "def supported_elem(cls, elemname, parent=None):\r\n try:\r\n cls = get_class(elemname)\r\n return True\r\n except exceptions.IncorrectClassError:\r\n return False", "title": "" }, { "docid": "cb512a22c390505ccde32397f23d434f", "score": "0.53645927", "text": "def _is_msg_for_current_view(self, msg):\n viewNo = getattr(msg, \"viewNo\", None)\n return viewNo == self.view_no", "title": "" }, { "docid": "bb55cc61848fd1d6071d82e93850951e", "score": "0.53633976", "text": "def is_container(self) -> bool:\n\n return self._contained_messages is not None", "title": "" }, { "docid": "fd4bb680a67c44a2d577fb105c936447", "score": "0.53628063", "text": "def is_same_class(obj, a_class):\n if type(obj) is a_class:\n return True\n else:\n return False", "title": "" }, { "docid": "fd4bb680a67c44a2d577fb105c936447", "score": "0.53628063", "text": "def is_same_class(obj, a_class):\n if type(obj) is a_class:\n return True\n else:\n return False", "title": "" }, { "docid": "f52887fa0002b11b635b6ad321531611", "score": "0.5361323", "text": "def isClass(obj):\n from types import ClassType\n return isinstance(object, (type, ClassType))", "title": "" }, { "docid": "c725f8bbbd44a6352267ca7e43a1a85b", "score": "0.53605324", "text": "def is_same_class(obj, a_class):\n return(type(obj) is a_class)", "title": "" }, { "docid": "7be015878313eb86f87bbd73ff9a246c", "score": "0.53466547", "text": "def is_type(self, typeck):\r\n res = self.name.lower() == typeck.lower()\r\n return res", "title": "" }, { "docid": "af2fd688f08cd4128c5cc82ebf79f7ad", "score": "0.53462315", "text": "def hasMessage(self):\n\t\tif(self.message is not None):\n\t\t\treturn True;\n\t\treturn False;", "title": "" }, { "docid": "175b0bccf2567f5fd7db9a54bed69a03", "score": "0.5337358", "text": "def is_same_class(obj, a_class):\n if type(obj) is a_class:\n return True\n return False", "title": "" }, { "docid": "0c11b6071a84fc6714c1b6b3a557d7f1", "score": "0.5337089", "text": "def is_topicmap(self, obj):", "title": "" }, { "docid": "d8e6ce3c0737d659b3012680e381db7f", "score": "0.5336731", "text": "def is_a(self, type_):\n return issubclass(self.model_class, type_)", "title": "" }, { "docid": "0b501e932a0a936410158a632817f981", "score": "0.53356653", "text": "def inherits_from(obj, a_class):\n\n if isinstance(obj, a_class) and type(obj) is a_class:\n return(False)\n else:\n return(True)", "title": "" }, { "docid": "d546a61c550e211be000f4f89cbdf84f", "score": "0.5331959", "text": "def is_struc(o: Any) -> bool:\r\n return '.GeneratedStructureData' in str(type(o))", "title": "" }, { "docid": "6acc4ef1e86a8e260bebcb570e67813d", "score": "0.5331268", "text": "def supported_class(cls, classname):\r\n if classname==\"ConfigurationProxy\":\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "c31afa0024de479b1abf826c05f48d30", "score": "0.53231347", "text": "def is_kind_of_class(obj, a_class):\n return(isinstance(obj, a_class))", "title": "" } ]
cc2bd32ddcea8a8d7dbd16a413b490a0
Execute a shell command and capture its output (expected to be a single line). This is a thin wrapper around system_to_string().
[ { "docid": "a036753f66663e66c6f0767e0436ad49", "score": "0.6720015", "text": "def system_to_one_line(cmd: str, *args: Any, **kwargs: Any) -> Tuple[int, str]:\n rc, output = system_to_string(cmd, *args, **kwargs)\n output = get_first_line(output)\n return rc, output", "title": "" } ]
[ { "docid": "3426284eb7b0b3ad1a0d0cf6a75517ad", "score": "0.7754001", "text": "def shell_output(command: str):\n process = subprocess.run(\n args=[command],\n capture_output=True,\n shell=True,\n text=True # capture STDOUT as text\n )\n if process.returncode == 0:\n return process.stdout\n else:\n raise Exception(\n f'''\n Command {command} exited with status code {process.returncode}\n - STDOUT: {process.stdout if process.stdout != '' else '\"\"'}\n - STDERR: {process.stderr if process.stderr != '' else '\"\"'}\n '''\n )", "title": "" }, { "docid": "b53b1d12016efb65e7c4672d787476c3", "score": "0.74621505", "text": "def _execute_shell_command(command: List[str]) -> str:\n stdout, _ = subprocess.Popen( # nosec\n command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n ).communicate()\n\n return stdout.decode(\"ascii\")", "title": "" }, { "docid": "a2bc6a76ac5ffc9e6392ddca9cc5d3ad", "score": "0.7335533", "text": "def shell_exec(self, command, **kwargs):\n kwargs.setdefault(\"stdout\", subprocess.PIPE)\n kwargs.setdefault(\"stderr\", subprocess.STDOUT)\n p = subprocess.Popen(command, shell=True, **kwargs)\n return p.communicate()[0]", "title": "" }, { "docid": "89e6379027ba26bebd373740a57035b6", "score": "0.7306982", "text": "def shellExecute(command, showOutErr = False):\n print command\n (out,err) = subprocess.Popen(command, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n r = '\\n'.join((out,err))\n if showOutErr:\n print r\n return r", "title": "" }, { "docid": "0d1f6f83be5f5a600da29e21fb131f53", "score": "0.7281206", "text": "def run(command, shell=True):\n result = subprocess.check_output(command, shell=shell)\n return result.decode(\"utf-8\")", "title": "" }, { "docid": "70d6d44a4ae42b2b092eb04bf85eece2", "score": "0.726305", "text": "def run_command_and_get_output(command):\n proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n (stdoutdata, _) = proc.communicate()\n return stdoutdata", "title": "" }, { "docid": "6aa53f2277ef2576008b65e70544e626", "score": "0.72130674", "text": "def shell_command(self, command):\n import subprocess\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n output = process.communicate()[0]\n print output", "title": "" }, { "docid": "7e24c60d576f3e6f6ba7f5d707e81399", "score": "0.7195416", "text": "def get_output(command: Union[str, List[str]], **kwargs) -> str:\n if isinstance(command, str) and not kwargs.get('shell'):\n command = command.split()\n bytes_output = subprocess.check_output(command, **kwargs)\n return bytes_output.decode('UTF-8')", "title": "" }, { "docid": "b0891b2d87cb87655cbd6cdb4c3c7d92", "score": "0.7046614", "text": "def run_shell(command: List[str]) -> Tuple[str, str]:\n catched = run(args=command, shell=False, capture_output=True, text=True)\n return catched.stdout, catched.stderr", "title": "" }, { "docid": "fe850f90f0817d8301cc385892df45df", "score": "0.704169", "text": "def shell(command):\n\n if options['dry_run']:\n print command\n return\n\n try:\n output = subprocess.check_output(command,\n stderr=subprocess.STDOUT,\n shell=True)\n if output:\n print output.rstrip()\n except subprocess.CalledProcessError as e:\n sys.stderr.write(e.output)\n sys.exit(e.returncode)", "title": "" }, { "docid": "fd86ef3033db9aec42cb9862b13fc000", "score": "0.7018849", "text": "def exec_to_string ( cmd ):\n if args.verbose:\n\tmsg('Running command: %s' % cmd) \n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd='/tmp/')\n stdout, stderr = p.communicate()\n return [p.returncode, stdout, stderr ]", "title": "" }, { "docid": "065c423721ea78b3da2b0000f1f9ac3b", "score": "0.69875747", "text": "def get_command_output(command):\n cmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n cmd_value = str(cmd.stdout.readlines()[0]).strip() # Read command output\n sys.stdout.flush()\n return cmd_value", "title": "" }, { "docid": "ae26b0fa7c72a0460224498ee289f668", "score": "0.6977622", "text": "def execute_get_text(command: str) -> str:\n try:\n result = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)\n except subprocess.CalledProcessError as err:\n raise\n\n return result", "title": "" }, { "docid": "b896eaaa2a3cc31da5f5ed177c820585", "score": "0.69642603", "text": "def _get_subprocess_output(command):\r\n proc = sp.Popen(command, stdout=sp.PIPE, close_fds=True)\r\n return proc.stdout.read()", "title": "" }, { "docid": "8523e809c112e378f64745cb2323a904", "score": "0.69434756", "text": "def do_cmd(command, shell=False, timeout=None):\n doit = command\n if shell is True:\n\n for thing in command:\n doit + \" \" + str(thing)\n # command = \" \".join(command)\n\n logging.debug(\"do_cmd, Timeout: %s\\n%s\" % (timeout, command))\n\n res = subprocess.run(\n doit,\n shell=shell,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n timeout=timeout,\n )\n\n stderr = res.stderr.decode(\"utf-8\")\n stdout = res.stdout.decode(\"utf-8\")\n\n logger.debug(\"Return Code: %d\" % res.returncode)\n logger.info(stdout)\n logger.error(stderr)\n\n if res.returncode:\n raise Exception(\"stdout: %s\\n stderr: %s\\n\" % (stdout, stderr))\n\n return stdout", "title": "" }, { "docid": "17ee2523011e416a24aff469d78bcbc4", "score": "0.69429404", "text": "def execute(self, command):\n\t\treturn subprocess.check_output(command, shell=True)", "title": "" }, { "docid": "ee915c0bab7283334a052147a366f62b", "score": "0.6938192", "text": "def execute_and_print_result(command):\n\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in p.stdout.readlines():\n print(str(line, \"utf-8\"))", "title": "" }, { "docid": "5f504fe17520e6f2b2982522d66f7b87", "score": "0.68849814", "text": "def command_run(command):\n print(\"==>\", command)\n run = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n print(\">\", run.stdout.decode())\n return run.stdout.decode()", "title": "" }, { "docid": "3f8e59645ffb17a981b31132e2cb1705", "score": "0.68812937", "text": "def shell_exec(command):\n\n log.debug('Executing: %s', repr(command))\n try:\n output = subprocess.check_output(command,\n shell=True,\n ).decode('utf-8').strip()\n except OSError as oserr:\n log.error(\"OS error\")\n log.error(oserr)\n return oserr\n except subprocess.CalledProcessError as ex:\n log.error(\"CalledProcessError caught\")\n log.error(ex)\n return ex\n except BaseException as ex:\n log.error(\"Exception caught\")\n log.error(ex)\n return ex\n # log.debug('Output: %s', output) ## this creates a mess\n if output:\n return output\n\n return True", "title": "" }, { "docid": "b543b07c194709133c9856756ac8ecba", "score": "0.6880262", "text": "def system_output(command, timeout=None, ignore_status=False,\n retain_output=False, args=()):\n if retain_output:\n out = run(command, timeout=timeout, ignore_status=ignore_status,\n stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,\n args=args).stdout\n else:\n out = run(command, timeout=timeout, ignore_status=ignore_status,\n args=args).stdout\n if out[-1:] == '\\n':\n out = out[:-1]\n return out", "title": "" }, { "docid": "07db34d128057f489154a0c634b8387d", "score": "0.6861454", "text": "def run_local_commandline( command , collect_stdout = False ):\n # get the output\n print '\\n'+ '='*80 + '\\nPerforming system call:\\n' + command + '\\n' + '='*80 +'\\n'\n if not collect_stdout:\n subprocess.call( command , shell = True ) # just the command, no output piping\n\n # older call that pipes the output into Python for manipulation\n else:\n #stdout = subprocess.Popen( command , shell = True , stdout = subprocess.PIPE , stdin = subprocess.PIPE , stderr = subprocess.STDOUT ).communicate()[0].strip()\n # shell = True, do NOT .split the command, = False, DO .split the command\n stdout = subprocess.Popen( command , shell = True , stdout = subprocess.PIPE , stdin = subprocess.PIPE , stderr = subprocess.STDOUT ).communicate()[0]#.strip() # for consistency\n return stdout", "title": "" }, { "docid": "e65b5d7eabe3cfd46033c264384b2c40", "score": "0.6857376", "text": "def run_command(cmd, shell=True):\n logging.debug('Executing command: %s', cmd)\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, shell=shell)\n\n exit_code = None\n line = ''\n stdout = ''\n while exit_code is None or line:\n exit_code = p.poll()\n line = p.stdout.readline().decode('utf-8')\n stdout += line\n logging.debug(line)\n\n return exit_code, stdout", "title": "" }, { "docid": "ccc103b89a64d80009a0a760b7fcd70e", "score": "0.68423927", "text": "def capture_command(command):\n arg_list = [\"bash\", \"-lc\", command]\n\n proc = subprocess.Popen(arg_list, stdout=subprocess.PIPE)\n out = proc.communicate()[0]\n return out", "title": "" }, { "docid": "13d2912e9872f3656fa71952cee2727e", "score": "0.68184894", "text": "def exec_command_stdout(*command_args, **kwargs):\n\n # Value of the passed \"encoding\" parameter, defaulting to None.\n encoding = kwargs.pop('encoding', None)\n\n # If no encoding was specified, the current locale is defaulted to. Else, an encoding was specified. To ensure this\n # encoding is respected, the \"universal_newlines\" option is disabled if also passed. Nice, eh?\n kwargs['universal_newlines'] = encoding is None\n\n # Standard output captured from this command as a decoded Unicode string if \"universal_newlines\" is enabled or an\n # encoded byte array otherwise.\n stdout = subprocess.check_output(command_args, **kwargs)\n\n # Return a Unicode string, decoded from this encoded byte array if needed.\n return stdout if encoding is None else stdout.decode(encoding)", "title": "" }, { "docid": "6f6c8cd8833e179855dd32b4cf5a2f8c", "score": "0.68182313", "text": "def check_output(command):\r\n try:\r\n return sp.check_output(command, shell=True, stderr=sp.DEVNULL).decode()\r\n except sp.CalledProcessError:\r\n return ''", "title": "" }, { "docid": "4c44dee44c8b802824c1c43b74df038a", "score": "0.68112665", "text": "def run(command):\n p = subprocess.Popen(command, stdout=subprocess.PIPE)\n output, err = p.communicate()\n return output", "title": "" }, { "docid": "f1d52c38ed1d5c7fee9192a22a6281ae", "score": "0.6804467", "text": "def shell(logger, cmdline, raise_error=False): \n\n logger.debug(\"Running command line: %s\" % cmdline)\n\n process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\n # XXX: Support stderr interleaving\n out, err = process.communicate()\n\n # :E1103: *%s %r has no %r member (but some types could not be inferred)*\n # pylint: disable=E1103 \n out = out.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n\n if raise_error and process.returncode != 0:\n logger.error(\"Command output:\")\n logger.error(out + err)\n raise ShellCommandFailed(\"The following command did not succeed: %s\" % cmdline)\n\n return (process.returncode, out + err)", "title": "" }, { "docid": "5f6765f50abd2d31647fc7d802156f3a", "score": "0.6802057", "text": "def shell(self, cmd, *args, **kwargs):\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n kwargs['_out'] = stdout\n kwargs['_err'] = stderr\n ret = cmd(*args, **kwargs)\n stdout.seek(0)\n stdout.seek(0)\n return ret.exit_code, stdout.read(), stderr.read()", "title": "" }, { "docid": "b4c643d1263233a7ea6b135eccbe0924", "score": "0.6800792", "text": "def _process_command(self, command, stdout=None, supress_dry_run=False):\n logging.debug('Executing shell command: %s', command)\n if not self._dry_run or supress_dry_run:\n prc = Popen(command, shell=True, stdout=stdout)\n std = prc.communicate()\n return prc.returncode, std\n return 0, ('', '')", "title": "" }, { "docid": "8aab2648a61bfe685fd222a70f68e881", "score": "0.6786787", "text": "def execute(self):\n\n process = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=True)\n (output, error) = process.communicate()\n\n if process.returncode != 0:\n return self.decode_output(error)\n\n return self.decode_output(output)", "title": "" }, { "docid": "4c89c72ce5e9bef68b426f31b89b3513", "score": "0.6784844", "text": "def sh(*cmd, **kwargs) -> str:\n logger.info(cmd[0])\n stdout = (\n subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs\n )\n .communicate()[0]\n .decode(\"utf-8\")\n )\n logger.info(stdout)\n return stdout", "title": "" }, { "docid": "4b85a04d09fb844c15f56c919c8f0f37", "score": "0.6771135", "text": "def system_to_string(\n cmd: str,\n abort_on_error: bool = True,\n wrapper: Optional[Any] = None,\n dry_run: bool = False,\n log_level: Union[int, str] = logging.DEBUG,\n) -> Tuple[int, str]:\n rc, output = _system(\n cmd,\n abort_on_error=abort_on_error,\n suppress_error=None,\n suppress_output=True,\n # If we want to see the output the system call must be blocking.\n blocking=True,\n wrapper=wrapper,\n output_file=None,\n tee=False,\n dry_run=dry_run,\n log_level=log_level,\n )\n output = output.rstrip(\"\\n\")\n return rc, output", "title": "" }, { "docid": "989291c3f3853e0d709e5be75709bb82", "score": "0.6736064", "text": "def shell(cmd):\n\n\tcmd = [str(x) for x in cmd]\n\n\tprocess = Popen(cmd, stdout=PIPE, stderr=PIPE)\n\n\tout, err = process.communicate()\n\n\treturn process.returncode, out, err", "title": "" }, { "docid": "6a45f28dc0fc8c605d0bc4d97d41c36e", "score": "0.6734544", "text": "def get_simple_cmd_output(self,cmd, stderr=STDOUT):\n args = shlex.split(cmd)\n return Popen(args, stdout=PIPE, stderr=stderr).communicate()[0]", "title": "" }, { "docid": "e2a6ec272554d8375d022a9042785706", "score": "0.67235434", "text": "def execute(command: str, cwd=None):\n result = subprocess.run(command.split(), stdout=subprocess.PIPE, check=True, cwd=cwd)\n return result.stdout.decode(\"utf-8\")", "title": "" }, { "docid": "43fd425f0706e9b0de66c7c1c7f5c1f9", "score": "0.6698841", "text": "def run_shell_command(args):\n\n popen = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n for line in iter(popen.stdout.readline, \"\"):\n print(line)\n popen.stdout.close()\n ret = popen.wait()\n\n if ret:\n raise subprocess.CalledProcessError(ret, args)", "title": "" }, { "docid": "ef54f9fd34375f0a34a7d786705bb2a9", "score": "0.66964376", "text": "def sh(cmd):\n result = \"\"\n pipe = None\n try:\n pipe = popen(cmd)\n result = pipe.read()\n finally:\n if pipe: pipe.close()\n return result", "title": "" }, { "docid": "007f5d71eaa80c919526a690d5df5434", "score": "0.6682073", "text": "def get_result(shell):\n try:\n return shell.stdout.read()\n except:\n return 'command complete'", "title": "" }, { "docid": "75519ef6cc52344e70ed7ed963d8888f", "score": "0.668091", "text": "def shell(shell_command):\n location = str(DOWNLOAD_PATH) + os.path.sep\n beautified_command = shell_command.replace(location, '')\n print(beautified_command)\n\n commands = [cmd.strip() for cmd in shell_command.split('|')]\n result = run_piped_commands(commands)\n\n if result.stdout and result.stdout.strip():\n print(result.stdout.strip(), file=sys.stdout)\n\n if result.stderr and result.stderr.strip():\n print(result.stderr.strip(), file=sys.stderr)\n\n return result", "title": "" }, { "docid": "56901ee9beed1f0dc54da290722ceddd", "score": "0.668072", "text": "def run_command(command: List[str], shell=False, timeout=60) -> List[bytes]:\n result = subprocess.run(command, timeout=timeout, capture_output=True)\n\n if result.returncode != 0:\n cmdstr = \" \".join(command)\n log.error(\n f\"pdftk shell-out returned exit code {result.returncode}\",\n extra={\"cmd\": cmdstr, \"stdout\": result.stdout, \"stderr\": result.stderr},\n )\n\n # Raise an error\n result.check_returncode()\n\n return result.stdout.split(b\"\\n\")", "title": "" }, { "docid": "0cac968472ea1c064f31f17b7465fca7", "score": "0.6677521", "text": "def RunShell(command, print_output=False):\n return RunShellWithReturnCode(command, print_output)[0]", "title": "" }, { "docid": "738068c03a3d59d4bcbb505480070455", "score": "0.66537553", "text": "def run_cmd(cmd, *args, silent=False) -> str:\n try:\n raw_output = sp.check_output([cmd] + list(args), stderr=sp.STDOUT)\n return raw_output.decode('utf-8')\n except sp.CalledProcessError as e:\n if not silent:\n print('\\nCould not execute command\\n')\n print(e)\n return ''", "title": "" }, { "docid": "0c6c18b695897a956aa3a4b87d5c46dc", "score": "0.6650272", "text": "def exec_command(cmd, **kwargs):\n import shlex, subprocess\n try:\n args = shlex.split(cmd, None)\n out = subprocess.check_output(args)\n return out.strip()\n except:\n return None", "title": "" }, { "docid": "475bbd49c49d05563cb2906f17d4ee9d", "score": "0.6641433", "text": "def run_command(cmd, redirect_output=True, check_exit_code=True):\r\n if redirect_output:\r\n stdout = subprocess.PIPE\r\n else:\r\n stdout = None\r\n proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)\r\n output = proc.communicate()[0]\r\n if check_exit_code and proc.returncode != 0:\r\n raise Exception('Command \"%s\" failed.\\n%s' % (' '.join(cmd), output))\r\n return output", "title": "" }, { "docid": "470a588eea9747a7a43d46121aae692a", "score": "0.66397053", "text": "def get_output(args: Sequence[str], verbose: bool = False, **kwargs) -> str:\n if verbose:\n cmd = \" \".join(args)\n print(f\"cmd: {cmd}\")\n return subprocess.run(args,\n check=True,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n **kwargs).stdout.strip()", "title": "" }, { "docid": "b27a67fb6fa7cf50374149176dd622ae", "score": "0.6630197", "text": "def run_command(cmd):\n \n output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]\n return output.decode(\"ascii\")", "title": "" }, { "docid": "267ddb84758f94d40056f22afb67161e", "score": "0.6625331", "text": "def command_output(cmd, directory):\n p = subprocess.Popen(cmd,\n cwd=directory,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n (stdout, _) = p.communicate()\n if p.returncode != 0:\n raise RuntimeError('Failed to run %s in %s' % (cmd, directory))\n return stdout", "title": "" }, { "docid": "5628f94d1517c0f175c8ac15f2c0760a", "score": "0.66193825", "text": "def exec_shell_cmd(args, path):\n # print \" \".join(args)\n process = subprocess.Popen(args, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, cwd=path, shell=True)\n return decode_fs_string(process.stdout.read())", "title": "" }, { "docid": "e6f1e5d68846b0da6986af15f91d05ae", "score": "0.66073394", "text": "def call(command, **kwargs):\n\n kwargs['stdout'] = PIPE\n kwargs['stderr'] = PIPE\n command_split = shlex.split(command)\n\n p = Popen(command_split, **kwargs)\n stdout = p.communicate()[0]\n\n if stdout:\n for line in stdout.decode('utf-8').split(\"\\n\"):\n print(line)\n\n return p.returncode", "title": "" }, { "docid": "dbfb1d3f7761d08c6b71c6d37980887b", "score": "0.6584171", "text": "def _run_system_command(self, cmd, sleep=None):\n try:\n self.logger.debug(\"CMD='{0}'\".format(cmd))\n output = subprocess.check_output(cmd, shell=True).strip()\n if output:\n self.logger.debug(\"CMD Output='{0}'\".format(output))\n if sleep:\n time.sleep(sleep)\n return str(output)\n except subprocess.CalledProcessError as ex:\n raise # raise all subprocess errors so that they can be handled appropriately by calling code ", "title": "" }, { "docid": "4651b02d16ac65df158cd52d2dee2552", "score": "0.6580554", "text": "def run_command(command, in_string):\n # Open process\n p = subprocess.Popen(command, shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n # Write to process stdin and get its stdout.\n return p.communicate(input=in_string)[0].strip()", "title": "" }, { "docid": "5e7e2e24b455b92977be7660f2219a79", "score": "0.6577151", "text": "def run_command(command):\n p = os.popen(command, \"r\")\n out = []\n while 1:\n line = p.readline()\n if not line:\n break\n out.append(line.rstrip())\n return out", "title": "" }, { "docid": "ad6dbb300c8e0359ea34e76e07844c96", "score": "0.6571824", "text": "def capture_output(argv):\n result = subprocess.run(\n argv,\n stdout = subprocess.PIPE,\n stderr = subprocess.DEVNULL,\n check = True)\n\n return result.stdout.decode(\"utf-8\").splitlines()", "title": "" }, { "docid": "98a954f65394f55eec032279126807e6", "score": "0.6563765", "text": "def _run(cmd):\n\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True, close_fds=True)\n return p.communicate()", "title": "" }, { "docid": "8e69eab8b4803a9a829cbe4492d935d1", "score": "0.65594834", "text": "def subprocess_output_to_string(output):\n return output", "title": "" }, { "docid": "80c4c69f02ac0ee463330f1d7895e2bc", "score": "0.65564156", "text": "def shellcmd(self, cmd, echo=False):\n if not echo: print('[cmd] {0}'.format(cmd))\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n out = p.communicate()[0]\n if not echo: print(out)\n return out", "title": "" }, { "docid": "45679fadc6c650f3a8151b0a7056c5ab", "score": "0.6555413", "text": "def call_raw_command(raw_command, text):\n process = subprocess.Popen(raw_command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n logging.info('%s: %s...' % (' '.join(raw_command), text))\n return process.communicate()", "title": "" }, { "docid": "8039e5db6c5ab903ae929ec78e457a40", "score": "0.6553943", "text": "def execute_sys_command(args, stdout):\n return subprocess.Popen(args, stdout=stdout, shell=True)", "title": "" }, { "docid": "fdf419c9ef3188dead3bd0fc6ca3960f", "score": "0.6551828", "text": "def shellCommand(command, log):\n \n # did we get something for command?\n if command == \"\":\n raise RuntimeError, \"No shell command specified\"\n\n # run the command and get the 'pipes' back\n process = popen2.Popen3(command, 1)\n process.tochild.close()\n stdout = process.fromchild\n stderr = process.childerr\n stdoutfd = stdout\n stderrfd = stderr\n makeNonBlocking(stdout.fileno())\n makeNonBlocking(stderr.fileno())\n stdouteof = stderreof = 0\n\n # get output of command\n while 1:\n # wait for output from command\n ready = select.select([stdoutfd, stderrfd], [], [])\n\n # is stdout ready with output?\n if stdoutfd in ready[0]:\n data = stdout.read()\n log.msg(data)\n if data == '':\n stdouteof = 1\n\n # is stderr ready with output\n if stderrfd in ready[0]:\n data = stderr.read()\n log.msg(data)\n if data == '':\n stderreof = 1\n\n # are both processes empty?\n if stdouteof and stderreof:\n break\n\n # wait for process to end\n return process.wait()", "title": "" }, { "docid": "b77cecfce7da2454a1aa4574f83eee79", "score": "0.65455675", "text": "def run(command):\n p = Popen(command.split(), stdout=PIPE, stderr=PIPE)\n (stdout, stderr) = p.communicate()\n return (p.returncode, [line.strip() for line in stdout.splitlines()],\n [line.strip() for line in stderr.splitlines()])", "title": "" }, { "docid": "610913b48cc326db1f22f4738881dea8", "score": "0.65386397", "text": "def RunShellWithReturnCode(command, print_output=False):\n p = subprocess2.Popen(\n command,\n cwd=GetRepositoryRoot(),\n stdout=subprocess2.PIPE,\n stderr=subprocess2.STDOUT,\n universal_newlines=True)\n if print_output:\n output_array = []\n while True:\n line = p.stdout.readline()\n if not line:\n break\n if print_output:\n print line.strip('\\n')\n output_array.append(line)\n output = \"\".join(output_array)\n else:\n output = p.stdout.read()\n p.wait()\n p.stdout.close()\n return output, p.returncode", "title": "" }, { "docid": "0cd02cb98a566bdcd9c7470e89c157d3", "score": "0.6529944", "text": "def _run(cmd):\n out = \"\"\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # nosec\n # print(\"Running \", cmd)\n # print(\"*\" * 80)\n for line in proc.stdout:\n line_str = line.decode()\n sys.stdout.write(line_str)\n out += line_str\n for line in proc.stderr:\n line_str = line.decode()\n sys.stderr.write(line_str)\n out += line_str\n proc.wait()\n proc.stdout.close()\n proc.stderr.close()\n # print(\"Finished \", cmd)\n # print(\"*\" * 80)\n return out", "title": "" }, { "docid": "83ae1ef7d0005df82feeafbdf716ea17", "score": "0.6522932", "text": "def _execute_shell_cmd(command, working_dir=None):\n actual_working_dir = os.getcwd() if working_dir is None else working_dir\n process = Popen(command, stdout=PIPE, stderr=PIPE, cwd=actual_working_dir, shell=True)\n\n output, error = process.communicate()\n return_code = process.returncode\n\n return return_code, output.decode('utf-8'), error.decode('utf-8')", "title": "" }, { "docid": "ace94ef85ebd4fba1a3e2308bf63559a", "score": "0.652041", "text": "def getoutput(*cmd):\n\n try:\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n except OSError:\n # ignore errors like program not found\n return str(\"\")\n\n stdout = proc.communicate()[0]\n return encodeutils.safe_decode(stdout)", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.6518024", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.6518024", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "4f6ee196412be81c840cd56eb2d473ed", "score": "0.6517902", "text": "def execute_subprocess(external_command):\n logger = logging.getLogger(__file__)\n try:\n logger.info('Executing $ %s', external_command)\n subprocess_output = subprocess.check_output(shlex.split(external_command), stderr=subprocess.STDOUT)\n #except subprocess.CalledProcessError as error:\n #print(error.output.decode('utf-8'))\n except FileNotFoundError:\n logger.error('Command does not exist within the system!')\n return subprocess_output.decode('utf-8')", "title": "" }, { "docid": "0f3d3590cbed9ebb0d77589e4b593985", "score": "0.65110654", "text": "def bash(command):\n proc = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n\n out, err = proc.communicate()\n return out.strip(), err.strip(), proc.returncode", "title": "" }, { "docid": "7db800b597654b5b4974299e7122c2af", "score": "0.6482036", "text": "def runShellCommand( cmd, stdout_filename):\n\n stdout_f = open(stdout_filename, \"w\" )\n result = subprocess.call(cmd, stdout=stdout_f, shell=True)\n stdout_f.close()\n\n f = open(stdout_filename, \"r\")\n stdout_text = f.read()\n f.close()\n return (result, stdout_text)", "title": "" }, { "docid": "96d3238814ba91ab44a5db4c2636cdc6", "score": "0.64727604", "text": "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args)\n return p.communicate()", "title": "" }, { "docid": "b69fb2941a981cf0e922f3e4efebbf07", "score": "0.6470732", "text": "def run(cmd):\n cmd = [pipes.quote(c) for c in cmd]\n cmd = \" \".join(cmd)\n cmd += \"; exit 0\"\n # print(\"Running {} in {}\".format(cmd, os.getcwd()))\n try:\n output = subprocess.check_output(cmd,\n stderr=subprocess.STDOUT,\n shell=True)\n except subprocess.CalledProcessError as e:\n output = e.output\n\n output = output.decode('utf-8')\n output = output.strip()\n return output", "title": "" }, { "docid": "b13e57476717c70d5e6e7e2b6c534ccf", "score": "0.64595115", "text": "def run_command(command):\n process = subprocess.Popen(shlex.split(command),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n while True:\n output = process.stdout.readline()\n outerr = process.stderr.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n if use_print_future:\n print(output)\n else:\n # print output.strip()\n if output[-1]=='\\n':\n print output[:-1]\n else:\n print output\n if outerr:\n print (outerr.strip())\n rc = process.poll()\n return rc", "title": "" }, { "docid": "3ad98f2feab3bbedadc83e66099c6f4a", "score": "0.6459143", "text": "def run(command, logger=no_logging):\n process = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n while True:\n try:\n sleep(0.1)\n if process.poll() is not None:\n break\n\n # Write new lines in case the command prompts the us for some\n # input.\n process.stdin.write(b'\\n')\n except IOError:\n pass\n\n stdout = process.stdout.read().decode('utf-8')\n stderr = process.stderr.read().decode('utf-8')\n\n # it seems that in py3k if you try to write to a windows process that\n # doesn't read its stdin, and then close that stdin, you get 1 free OSError\n try:\n process.stdin.close()\n except OSError:\n pass\n\n process.stdout.close()\n process.stderr.close()\n\n if process.returncode == RETURN_CODE_SUCCESS:\n return stdout, stderr\n else:\n raise ShellCommandError(command, process.returncode)", "title": "" }, { "docid": "293cfa1552dc903c6d00fda3ffe53f9c", "score": "0.64573693", "text": "def run(command, shell=None):\n out_stream = subprocess.PIPE\n err_stream = subprocess.PIPE\n\n if shell is not None:\n p = subprocess.Popen(command, shell=True, stdout=out_stream,\n stderr=err_stream, executable=shell)\n else:\n p = subprocess.Popen(command, shell=True, stdout=out_stream,\n stderr=err_stream)\n (stdout, stderr) = p.communicate()\n\n return stdout, stderr", "title": "" }, { "docid": "513c5162f80c5a14e2ba908c7400f851", "score": "0.6454457", "text": "async def run_command_shell(command: str) -> CommandResult:\n # Create subprocess\n process = await asyncio.create_subprocess_shell(\n command,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n\n # Status\n logging.info(\n f\"Started:{command}, (pid = {str(process.pid)})\") # , flush=True)\n\n # Wait for the subprocess to finish\n stdout, stderr = await process.communicate()\n\n # Progress\n if process.returncode == 0:\n logging.info(\n f\"Done:{command}, pid = {str(process.pid)}\") # , flush=True)\n result = CommandResult(cmd=command,\n returncode=process.returncode,\n output=stdout.decode().strip())\n else:\n logging.error(\n f\"Failed:{command}, pid = {str(process.pid)}\") # , flush=True)\n result = CommandResult(cmd=command,\n returncode=process.returncode,\n output=stderr.decode().strip())\n\n # Result\n # result = stdout.decode().strip()\n\n # Return stdout\n return result", "title": "" }, { "docid": "07cb35357d218a6a22953eb0996cb831", "score": "0.64541817", "text": "def run_local(command: str, str_output: Optional[bool] = False) -> Union[int, str]:\n print(command)\n\n if str_output:\n process = subprocess.Popen(\n command,\n shell=True,\n stdout=subprocess.PIPE,\n universal_newlines=True\n )\n\n return process.communicate()[0].replace(\"\\n\", \" \")\n\n return subprocess.call(command, shell=True)", "title": "" }, { "docid": "284c78430ce682e88828735d9d705b19", "score": "0.64522797", "text": "def run_command(\n command: str, stdin: Optional[bytes] = None, stdout=None\n) -> Optional[str]:\n if stdout is None:\n stdout = subprocess.PIPE\n command_line = shlex.split(command)\n result: subprocess.CompletedProcess = subprocess.run(\n command_line, input=stdin, stdout=stdout, stderr=subprocess.PIPE\n )\n try:\n result.check_returncode()\n except subprocess.CalledProcessError:\n print(result.stderr)\n print(stdout)\n exit()\n if stdout == subprocess.PIPE:\n output = result.stdout.decode()\n return output", "title": "" }, { "docid": "1ee1961776e184fc8dc8fc3239b66fcb", "score": "0.64502394", "text": "def get_output(args):\n args = shlex.split(args)\n\n return subprocess.check_output(args)", "title": "" }, { "docid": "96f1bdaafda342a3a4998a2228a14a98", "score": "0.6446819", "text": "def subprocess_output(cmd, env=None):\n err = None\n out = None\n try:\n p = subprocess.Popen(\n cmd,\n env=env,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n out, err = p.communicate()\n if p.wait() != 0:\n raise subprocess.CalledProcessError(returncode=p.returncode, cmd=cmd)\n except subprocess.CalledProcessError as e:\n pkdlog(\"{}: exit={} err={}\", cmd, e.returncode, err)\n return None\n if out:\n out = pkcompat.from_bytes(out)\n return out.strip()\n return \"\"", "title": "" }, { "docid": "e05631f2fb55f917b7f263dca8ebb096", "score": "0.64323854", "text": "def run_cmd(cmd):\n return subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode(\"utf-8\")", "title": "" }, { "docid": "e1a6352fd0e4cd8dedf400e61fa72d53", "score": "0.64306027", "text": "def run_shell_command(command_string):\n command = shlex.split(command_string)\n proc = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n stdout = proc.stdout.decode(\"utf-8\")\n stderr = proc.stderr.decode(\"utf-8\")\n\n return_code = proc.returncode\n\n return stdout, stderr, return_code", "title": "" }, { "docid": "55a90f857783cfa9d02d82f31d0cc091", "score": "0.64275473", "text": "def _systemCall(self, cmd, retry=True):\n self._printStep('Executing command: %s' % cmd)\n p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, close_fds=True)\n (child_stdin, child_stdout) = (p.stdin, p.stdout)\n child_stdin = child_stdin\n stdout = []\n while True:\n out = child_stdout.readlines(1)\n if not out:\n break\n stdout.extend(out)\n sys.stdout.writelines(out)\n returnCode = p.wait()\n if returnCode:\n if retry:\n return self._systemCall(cmd, False)\n else:\n raise ClientError(\"Error executing command '%s', with error \"\n \"code: %s\" % (cmd, returnCode))\n return stdout", "title": "" }, { "docid": "36daacbd02dba036662b91af24be1baa", "score": "0.6427535", "text": "def shell_exec(client, cmdline, shell=None, env=None):\n res=\"\"\n try:\n if client.is_android():\n if shell is None:\n shell=\"/system/bin/sh\"\n if shell is None:\n res=client.conn.modules.subprocess.check_output(\n cmdline,\n stderr=subprocess.STDOUT,\n stdin=subprocess.PIPE,\n shell=True,\n universal_newlines=True,\n env=env\n )\n else:\n if client.is_windows():\n command=[shell, '/c', cmdline]\n else:\n command=[shell, '-c', cmdline]\n\n res=client.conn.modules.subprocess.check_output(\n command,\n stderr=subprocess.STDOUT,\n stdin=subprocess.PIPE,\n universal_newlines=True,\n env=env\n )\n\n except Exception as e:\n if hasattr(e,'output') and e.output:\n res=e.output\n else:\n res=str(e)\n\n if client.is_windows():\n try:\n res=res.decode('cp437')\n except Exception:\n pass\n\n return res", "title": "" }, { "docid": "811c377c05fe17f9655291c2100ccac4", "score": "0.6416116", "text": "def __run_command(command):\n output = []\n return_code = 0\n try:\n p = subprocess.Popen(command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n output = p.communicate()\n return_code = p.poll()\n except subprocess.CalledProcessError, error:\n print \"subprocess CalledProcessError.output = \" + error.output\n\n return output, return_code", "title": "" }, { "docid": "d26075670ea94a92d68e82ad824f79b7", "score": "0.6410121", "text": "def get_output_command(command_to_execute):\n print_verbose('Executing:')\n print_verbose(command_to_execute)\n\n p = subprocess.Popen(command_to_execute, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n ret_code = p.returncode\n\n if ret_code != 0:\n print_verbose(ret_code)\n print_verbose('Error:')\n print_verbose(err)\n\n return out, err, ret_code", "title": "" }, { "docid": "522e97ca9e08d28b6675c305046e9faf", "score": "0.64088047", "text": "def exec_command(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE):\n proc = subprocess.Popen(cmnd, shell=True, stdout=stdout, stderr=stderr)\n out, err = proc.communicate()\n if proc.returncode != 0:\n msg = err\n sys.stderr.writelines(f\"FAILED: {cmnd}\\n{msg}\")\n sys.exit(proc.returncode)\n return out.decode(\"utf8\") if out is not None else None", "title": "" }, { "docid": "b5f3788eefe95535470a78997e52b8c2", "score": "0.64079964", "text": "def _exec(command, check):\n complete = subprocess.run(command, stdout=subprocess.PIPE, encoding=\"utf-8\")\n if complete.returncode == 0:\n return complete.stdout[:-1]\n elif check:\n sys.exit(complete.returncode)\n else:\n return None", "title": "" }, { "docid": "4702f5a811922f5304a82b962c3140d7", "score": "0.6407", "text": "def cmd(in_cmd: Union[str, Iterable[str]], check: bool = True, err_text: bool = False) -> str: # run command and return its output\n print('$', in_cmd)\n if isinstance(in_cmd, str):\n in_cmd = in_cmd.split()\n result = run(in_cmd, capture_output=True, text=True)\n if result.stdout:\n print(result.stdout.rstrip())\n if result.stderr:\n print(result.stderr.rstrip())\n if check:\n result.check_returncode() # will raise subprocess.CalledProcessError()\n # return '\\n'.join(result.stdout.splitlines())\n return result.stdout + (result.stderr if err_text else '')", "title": "" }, { "docid": "6c7566a26a196140ce74ad0f10e1d0f0", "score": "0.64036757", "text": "def get_command_output(self, cmd):\n # workaround for pexpect echoing the command back\n shell = self.shell\n shell.sendline(cmd) # run a command\n shell.prompt()\n result = shell.before\n result = [res.strip() for res in shell.before.split(\"\\n\")]\n if result[0] == cmd:\n# First line is echo, return the next line\n return result[1]", "title": "" }, { "docid": "8d7d278a37c9893268b5d75c502098bb", "score": "0.6402119", "text": "def communicate(command):\n return subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True).communicate()", "title": "" }, { "docid": "2272795867d39e1796b911e412f5506b", "score": "0.63808566", "text": "def sh( cmd_arg_str, errout=sys.stderr ):\n # crashes after pyqt QApplication() with mac py 2.5.1, pyqt 4.4.2 \n # subprocess.py _communicate select.error: (4, 'Interrupted system call')\n # see http://bugs.python.org/issue1068268 subprocess is not EINTR-safe\n # QProcess instead of Popen works\n\n (lines, err) = subprocess.Popen( cmd_arg_str,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True ) \\\n .communicate() # wait til process ends\n if errout and err:\n print >>errout, err\n # trim the last \\n so sh( \"ls xx\" ) -> \"xx\" not \"xx\\n\"\n # and split( \"\\n\" ) -> no extra \"\"\n return lines[:-1] if (lines and lines[-1] == \"\\n\") \\\n else lines", "title": "" }, { "docid": "e753f0bf0a3cf745da80f893eabf499c", "score": "0.63728327", "text": "def runCommand(args):\n process = Popen(args, stdout=PIPE, stderr=STDOUT)\n stdout = process.stdout.read()\n exitCode = process.wait()\n if exitCode < 0:\n raise CommandFailed(None, -exitCode, stdout)\n elif exitCode > 0:\n raise CommandFailed(exitCode, None, stdout)\n return stdout", "title": "" }, { "docid": "3292ea93912dd8cb7e65f8b57139844c", "score": "0.6369483", "text": "def _run_command(self, cmd):\r\n out = call(cmd, shell=True)\r\n return out", "title": "" }, { "docid": "dcfdcbaa988b3a3032a871ffd30d16af", "score": "0.63572645", "text": "def test_translate_command_correctly_appends_stdout_redirect(self):\n r3 = commandRunner(tmp_id=self.id_string, tmp_path=self.tmp_path,\n out_globs=self.out_glob,\n command=\"ls $P1 $P2 $P3 $P4 /tmp\",\n params=self.flags_with_options,\n param_values=self.param_values,\n std_out_str=\"str.stdout\")\n test_string = \"ls -l -ah 12 b1 /tmp > str.stdout\"\n self.assertEqual(r3.command, test_string)", "title": "" }, { "docid": "1aeee91bfbca62830b63d2e53fdb6703", "score": "0.63550955", "text": "def _execute_command(command, module, additional_rc=0, shell=True):\n\n proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)\n\n try:\n stdout, stderr = proc.communicate()\n except OSError:\n proc.kill()\n stdout, stderr = proc.communicate()\n message = \"Command {0} failed.\".format(command)\n module.fail_json(msg=message, command_stderr=stderr)\n\n if proc.returncode != 0 and proc.returncode != additional_rc:\n message = \"Command {0} failed.\".format(command)\n module.fail_json(msg=message, command_stderr=stderr)\n\n return stdout.decode('utf-8')", "title": "" }, { "docid": "9dbea294b06685f49c0d6b5365c3f452", "score": "0.635227", "text": "def check_output(command):\n return subprocess.check_output(command).decode('utf-8')", "title": "" }, { "docid": "4fa0c66f011690c700f2bd392be1e8cc", "score": "0.6352048", "text": "def run_command(self, command):\n\n command_str = tuple( map( str, command ) )\n process = subprocess.Popen(command_str,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n output, error = process.communicate()\n output_str = output.decode()\n error_str = error.decode()\n\n return (output_str, error_str)", "title": "" }, { "docid": "f597422de1fe902aa416bdb18623954c", "score": "0.63490653", "text": "def capture(\n args: Sequence[Union[Path, str]],\n *,\n cwd: Optional[Path] = None,\n env: Optional[Dict[str, str]] = None,\n stdin: Union[None, int, IO[bytes], str] = None,\n stderr: Union[None, int, IO[bytes]] = None,\n) -> str:\n input = None\n if isinstance(stdin, str):\n input = stdin\n stdin = None\n\n return subprocess.check_output(\n args, cwd=cwd, env=env, input=input, stdin=stdin, stderr=stderr, text=True\n )", "title": "" }, { "docid": "43010185d1430961b0ea25de1a23ae30", "score": "0.63423526", "text": "def _run_command(cmd, env = None):\n\n\ttokens = shlex.split(cmd)\n\n\tsub_p = subprocess.Popen(\n\t\ttokens,\n\t\tenv = env,\n\t\tstdout = subprocess.PIPE,\n\t\tstderr = subprocess.PIPE)\n\n\tstdout, stderr = sub_p.communicate()\n\n\tstdout = stdout.strip()\n\tstderr = stderr.strip()\n\n\treturn stdout, stderr", "title": "" }, { "docid": "0e52b8f27e652e4688f40b6e9e020d3d", "score": "0.6341394", "text": "def run_cmd(cmd, shell, ignore_err=False, print_output=False):\n try:\n output = subprocess.check_output(cmd, shell=shell)\n if print_output:\n print output\n return output\n except subprocess.CalledProcessError as error:\n print >> sys.stderr, \"Error: {0}\".format(error)\n if not ignore_err:\n sys.exit(1)\n print \"error ignored\"\n return", "title": "" } ]
278e7ae4349320b7f95151fb8856a03b
Contains(self, long pos) > bool Returns true if the given position is within this range. Allow for the possibility of an empty range assume the position is within this empty range.
[ { "docid": "c0e75285b3986e56a0efafa16a793303", "score": "0.58059067", "text": "def Contains(*args, **kwargs):\n return _richtext.RichTextRange_Contains(*args, **kwargs)", "title": "" } ]
[ { "docid": "95343c548c87a7abe9748f2a672de283", "score": "0.8172459", "text": "def contains(self, pos: tuple[int, int]) -> bool:\n\n start = self._start\n end = self._end\n\n return start[0] <= pos[0] <= end[0] and start[1] <= pos[1] <= end[1]", "title": "" }, { "docid": "4d09d61785adeef6acbfeb71e847eda3", "score": "0.72117996", "text": "def contains_position_within_bounding_box(self, pos):\n if pos[0] < self.position[0] + self.nzis_min[0]:\n return False\n if pos[0] > self.position[0] + self.nzis_max[0]:\n return False\n if pos[1] < self.position[1] + self.nzis_min[1]:\n return False\n if pos[1] > self.position[1] + self.nzis_max[1]:\n return False\n return True", "title": "" }, { "docid": "26ce47b170a17fddc268d246fb541b9b", "score": "0.7065128", "text": "def in_bounds(self, pos):\n return ((0 <= pos[0] < self.cols)\n and (0 <= pos[1] < self.rows))", "title": "" }, { "docid": "3849f530ead551dab065d3b862437e37", "score": "0.6801321", "text": "def contains_position(self, pos):\n if self.is_rectangular:\n return self.contains_position_within_bounding_box(pos)\n else:\n return pos in self.offset_nzis", "title": "" }, { "docid": "12ea49bdd16f88efe64a5cc7b7739920", "score": "0.6777384", "text": "def isPositionInRoom(self, pos):\r\n return 0 <= pos.getX() < self.width and 0 <= pos.getY() < self.height", "title": "" }, { "docid": "2964f0ec3c2a2a8439fb67f9abe36c59", "score": "0.6745137", "text": "def in_bounds(self, pos):\n for p, d_max in zip(pos, self.dimension_max):\n if not 0 < p < d_max:\n return False\n return True", "title": "" }, { "docid": "432d6882b28a109e8839a1aa3bd20c93", "score": "0.67009354", "text": "def in_map_bounds(self, pos: Union[Point2, tuple, list]) -> bool:\n return (\n self.game_info.playable_area.x <= pos[0] <\n self.game_info.playable_area.x + self.game_info.playable_area.width and self.game_info.playable_area.y <=\n pos[1] < self.game_info.playable_area.y + self.game_info.playable_area.height\n )", "title": "" }, { "docid": "f3c14bdc91902dbf7b0875a8ecd1c790", "score": "0.65870935", "text": "def __contains__(self: IVL, other: Union[int, PositionTuple, IVL]) -> bool:\n if isinstance(other, GenomeInterval):\n cmp = self.compare(other)\n return cmp[0] == 0 and abs(cmp[2]) > 0\n\n if isinstance(other, tuple):\n contig, pos = other\n if self.contig != contig:\n return False\n else:\n pos = cast(int, other)\n\n return self.start <= pos < self.end", "title": "" }, { "docid": "982029c888e8f92eef4bb37f2c16cc63", "score": "0.65348476", "text": "def isWithinRegion(self, pos):\n return self.south < pos[1] < self.north and self.west < pos[0] < self.east", "title": "" }, { "docid": "647f4931b3cf7049fc1b3f8766ec3ecf", "score": "0.647095", "text": "def in_range(self, position):\n pos_x, pos_y = position\n dx = abs(pos_x - self.position[0])\n dy = abs(pos_y - self.position[1])\n result = False\n if dx + dy <= self.radius:\n result = True\n if dx > self.radius:\n result = False\n if dy > self.radius:\n result = False\n if pow(dx, 2) + pow(dy, 2) <= pow(self.radius, 2):\n result = True\n else:\n result = False\n return result", "title": "" }, { "docid": "81a0191f5e3fe4790253b14ce1c88f0a", "score": "0.635347", "text": "def is_position_in_room(self, pos):\n #\n # Look to see if the X and Y components of a position object\n # lies within the room's overall height and width constraints.\n # If both components within room, return True. Otherwise, return\n # default of False\n #\n inRoom = False\n posX = pos.get_x()\n if (posX < self.width) and (posX >= 0.0):\n posY = pos.get_y()\n if (posY < self.height) and (posY >= 0.0):\n inRoom = True\n\n return inRoom", "title": "" }, { "docid": "578ff46229f6092025b91562bf89086e", "score": "0.6320117", "text": "def __contains__(self, val):\r\n for begin,end,step in self._ranges:\r\n if val>=begin and (end is None or val<=end) and (val-begin)%step==0:\r\n return True\r\n return False", "title": "" }, { "docid": "a9393f78dea43ebae0b35e844c7f5e9e", "score": "0.6310855", "text": "def __contains__(self,point):\n return point in self.absolute", "title": "" }, { "docid": "5999fa5981336c92dc7a278f12b29eb9", "score": "0.62853277", "text": "def is_containing(self, another: 'Range') -> bool:\n return self.start <= another.start and self.end >= another.end", "title": "" }, { "docid": "c7462bc15071c554bde45befef412163", "score": "0.6280187", "text": "def contains(self, x):\n\n return self._lbound <= x and self._rbound >= x", "title": "" }, { "docid": "7340fb9bf010c4204d22e54df1c662b8", "score": "0.6256635", "text": "def __contains__(self, address):\n return self.startaddress <= address <= self.endaddress", "title": "" }, { "docid": "3e04daf28e62b2a66b278de80d7c1464", "score": "0.62473416", "text": "def __contains__(self, item):\n for r in self._boundaries_and_ranges.values():\n if item in r:\n return True\n else:\n continue\n return False", "title": "" }, { "docid": "153b33e6ab377683ce005475fe236e50", "score": "0.61978984", "text": "def __contains__(self, key):\n if isinstance(key, int):\n if not self._in_range(key):\n return False\n try:\n self._find_end(key)\n return True\n except IndexError:\n return False\n return False", "title": "" }, { "docid": "46259b1bc6f9f8b170dc4da1dd90e0c6", "score": "0.6180543", "text": "def __contains__(self, interval: GenomeInterval) -> bool:\n contig = interval.contig\n if contig not in self.interlaps:\n return False\n return interval in self.interlaps[contig]", "title": "" }, { "docid": "67cf48cf4af0977d70f07004f5bd0ebe", "score": "0.6155519", "text": "def containsPoint(self, point):\n return self.begin < point and self.end >= point", "title": "" }, { "docid": "de23ea9835b9372a23fee44ddff52e06", "score": "0.61298823", "text": "def has_pos(self, pos):\n isp = Token.is_pos_func(pos)\n return any(map(isp, self.tokens))", "title": "" }, { "docid": "5845dc035f657a5ea75d2fbd8f90ba4b", "score": "0.61113787", "text": "def isOver(self, pos):\n if pos[0] > self.x and pos[0] < self.x + self.width:\n if pos[1] > self.y and pos[1] < self.y + self.height:\n return True\n\n return False", "title": "" }, { "docid": "0d1e83b2bc979e126749196f1b2862fc", "score": "0.60679144", "text": "def _isFree(self, pos:ndarray):\n map = self._state.getMap()\n return map.isInside(pos) and map.get(pos) != \"*\" \\\n and (self._parameters['allow_robot_overlap'] or not(self._isRobot(pos)))", "title": "" }, { "docid": "a1bd22c77441c816e4a7b2b60a90f4c4", "score": "0.6060213", "text": "def is_colliding(self, pos, radius):\n if self.overlaps(pos, radius + self.r):\n return True\n else:\n\n return False", "title": "" }, { "docid": "051b2696e4a52afbe8d5c33784dfb7c6", "score": "0.6026054", "text": "def contains(self, x):\n return bounds_contains(self.bounds, x)", "title": "" }, { "docid": "822ae64acef693d1e9d736186ce50377", "score": "0.60177416", "text": "def position_occupied(self, position):\n\n return self.grid[position] != '.'", "title": "" }, { "docid": "ad1e704b3eafe3b552996829539e7c12", "score": "0.59813607", "text": "def in_mouse_area(self, pos):\n y = pos[1]\n # Since the the length of the mouse area is the length of the window,\n # the x value doesn't matter.\n return 0 < y <= self.MOUSE_AREA_HEIGHT", "title": "" }, { "docid": "2614c4db2b69798b94e40c3debf1b789", "score": "0.5972361", "text": "def in_placement_grid(self, pos: Union[Point2, Unit]) -> bool:\n assert isinstance(pos, (Point2, Unit)), \"pos is not of type Point2 or Unit\"\n pos = pos.position.rounded\n return self.game_info.placement_grid[pos] == 1", "title": "" }, { "docid": "811fbf9f581fc37bd6b21785ccd30f90", "score": "0.5960338", "text": "def __contains__(self, point: Point) -> bool:\n return bool(self.locate(point))", "title": "" }, { "docid": "e9d298807ed290968f7b446ccfe6d6b9", "score": "0.59505975", "text": "def isOccupied(self, position):\n\n return position in self._occupiedpositions", "title": "" }, { "docid": "95fa590fa4eb123972182c41e24142ac", "score": "0.59491247", "text": "def __contains__(self, addr):\n numeric_addr = addr.parse_ipaddr(addr, self.is_ipv6)\n return self.numeric_range[0] <= numeric_addr <= self.numeric_range[1]", "title": "" }, { "docid": "0d49a2a7921c8fa2ee38e8a40f24d88a", "score": "0.5949023", "text": "def position_on_grid(self, position: list):\n x, y = position\n return (0 <= x < self.size) and (0 <= y < self.size)", "title": "" }, { "docid": "cc2024aee6618df9982e439b5a21915c", "score": "0.59455234", "text": "def IsWithin(*args, **kwargs):\n return _richtext.RichTextRange_IsWithin(*args, **kwargs)", "title": "" }, { "docid": "953a1f7cbad538fccea09bca9c4b734d", "score": "0.593418", "text": "def within_block(self, block_order, pos):\n ref_s, ref_e, qry_s, qry_e = self.block_infos[block_order]\n if pos >= ref_s and pos <= ref_e:\n return True\n elif pos >= ref_e and pos <= ref_s:\n return True\n else:\n return False", "title": "" }, { "docid": "365d58633ea7c957a3dc02a26ffef582", "score": "0.5934124", "text": "def hit_me(self, pos):\n\t\tif pos[1] > self.configuration['height'] or pos[1] < 0:\n\t\t\t# print \"too high: \", self.position, self.configuration['height']\n\t\t\treturn False\n\t\tdis = Util.vector2_distance([pos[0], pos[2]], [self.position[0], self.position[2]])\n\t\tif dis <= self.configuration['body_radius']:\n\t\t\treturn True\n\t\t# print \"dis: \", dis, self.configuration['body_radius']\n\t\treturn False", "title": "" }, { "docid": "65aa2fca8c5777711fefb1ad736697da", "score": "0.59187245", "text": "def is_position_valid(self, pos):\n if self.is_position_in_room(pos):\n return not self.is_position_furnished(pos)\n else:\n return False\n # raise NotImplementedError", "title": "" }, { "docid": "a0b5dba0c4de77616541757ea9921f3d", "score": "0.58818054", "text": "def within_bounds(self, x: int, y: int) -> bool:\n return 0 <= x <= self.size and 0 <= y <= self.size", "title": "" }, { "docid": "6b0b418e088a3910896c2546fa91bbd8", "score": "0.5872159", "text": "def contains(self, other: Union[Vec2, Rect]) -> bool:\n if isinstance(other, Vec2):\n return (self.x <= other.x < self.right) and (\n self.y <= other.y < self.bottom\n )\n else:\n return (\n self.x <= other.x\n and self.right >= other.right\n and self.y >= other.y\n and self.bottom <= other.bottom\n )", "title": "" }, { "docid": "d359a55022c5cfe10f5d9f2d0bc19d04", "score": "0.58679396", "text": "def __contains__(self, other: GenomeInterval) -> bool:\n left = InterLap.binsearch_left_start(\n self._iset, other.start - self._maxlen, 0, len(self._iset)\n )\n # Use a shortcut, since often the found interval will overlap.\n max_search = 8\n if left == len(self._iset):\n return False\n for left_ivl in self._iset[left:(left + max_search)]:\n if left_ivl in other:\n return True\n if left_ivl.start > other.end:\n return False\n\n r = InterLap.binsearch_right_end(self._iset, other.end, 0, len(self._iset))\n return any(s in other for s in self._iset[(left + max_search):r])", "title": "" }, { "docid": "75cdef75fe39b1a8bd15fef8ef3e9898", "score": "0.5858227", "text": "def eh_posicao(pos):\n if not isinstance(pos, int):\n return False\n elif isinstance(pos, bool):\n return False\n elif not (1 <= pos and pos <= 9):\n return False\n return True", "title": "" }, { "docid": "a579620945ac2b37877d80ef599fcb49", "score": "0.5838917", "text": "def in_bounds(self, move: Tuple[int, int]) -> bool:\r\n return 0 <= move[0] <= self.n - 1 and 0 <= move[1] <= self.n - 1", "title": "" }, { "docid": "558271df7b3cd27e79ff56dae8096c7e", "score": "0.5832424", "text": "def in_bounds(self, point: Point) -> bool:\n return 0 <= point.x < self.width and 0 <= point.y < self.height", "title": "" }, { "docid": "c9d4e229f3fed0b9ca19197ff4c974d5", "score": "0.58246696", "text": "def __gt__(self, addr):\n if isinstance(addr, AddressRange):\n addr = addr.end\n return self._start > addr + 1", "title": "" }, { "docid": "ccd3038ba1af6a930f1185033e8660dd", "score": "0.58172864", "text": "def contains(self, addr: ghidra.program.model.address.Address) -> bool:\n ...", "title": "" }, { "docid": "500586cde291beba4925c6b6a97480e8", "score": "0.5816224", "text": "def isItem(self, pos):\n (l, c) = pos\n return self.tab[l][c] < -1 or pos in self.items", "title": "" }, { "docid": "9e173d4954a737f46aaa366549b825f6", "score": "0.5810307", "text": "def contains(self, x, y):\n dx = x - self.origin.x\n if dx < 0 or dx > CELL_SIZE:\n return False\n dy = y - self.origin.y\n if dy < 0 or dy > CELL_SIZE:\n return False\n return True", "title": "" }, { "docid": "ddafb2d6816c10d6de8828eca1752322", "score": "0.5808291", "text": "def __contains__(self, address):\n return self.startaddress <= address <= self.endaddress \\\n or self.FCTL1 <= address <= self.FCTL3+1", "title": "" }, { "docid": "8e2426d23d8477defc7e5e42c5ce6961", "score": "0.58038", "text": "def pos_in_rect(rect, pos):\n pos_x, pos_y = pos\n x, y, width, height = rect\n return (x <= pos_x <= x + width\n and y <= pos_y <= y + height)", "title": "" }, { "docid": "931c60a1844320c928f7a6c279351476", "score": "0.5800669", "text": "def click(self, pos):\n return self.x <= pos[0] <= self.x + self.width and self.y <= pos[0] <= self.y + self.height", "title": "" }, { "docid": "57e6ecf30d12e9519e514920a2880c8e", "score": "0.5796292", "text": "def _in_bounds(v, lb, ub):\n\n return np.logical_and(v >= lb, v <= ub).all()", "title": "" }, { "docid": "975c295098ca1ee9e2b7987e1a0a2077", "score": "0.5768362", "text": "def isInside(self, position, box):\r\n ## en 2D on a collision si xa2>xb1 And xa1<xb2 And ya2>yb1 And ya1<yb2\r\n if box[0].x <= position.x <= box[1].x and box[0].y <= position.y <= box[1].y:\r\n return True\r\n return False", "title": "" }, { "docid": "a6db30e42ac0251115d6c9b02557d34d", "score": "0.5761346", "text": "def Contains(*args, **kwargs):\n return _core_.Rect_Contains(*args, **kwargs)", "title": "" }, { "docid": "09ac898d2802b52fa9f2b0138651fd38", "score": "0.5745336", "text": "def in_bounds(bounds: Tuple[float, float], coord: xarray.Coordinate) -> bool:\n bn, bx = bounds\n return (coord >= bn) & (coord <= bx)", "title": "" }, { "docid": "09ac898d2802b52fa9f2b0138651fd38", "score": "0.5745336", "text": "def in_bounds(bounds: Tuple[float, float], coord: xarray.Coordinate) -> bool:\n bn, bx = bounds\n return (coord >= bn) & (coord <= bx)", "title": "" }, { "docid": "fedd436ec43f58286a5c9f839210ee4f", "score": "0.57371914", "text": "def __contains__(self, key: Any) -> bool:\n hash(key)\n if not isinstance(key, Interval):\n if is_valid_na_for_dtype(key, self.dtype):\n return self.hasnans\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False", "title": "" }, { "docid": "f7100040b18eb3c47f5ae15ed7095538", "score": "0.5730351", "text": "def valid_pos(self, pos, obj_id):\n if pos.x not in range(0, self.width):\n return False\n if pos.y not in range(0, self.height):\n return False\n\n if self.collisions:\n for idx, obj in self.objects.items():\n if idx == obj_id:\n continue\n\n if pos == obj.pos:\n return False\n\n return True", "title": "" }, { "docid": "52ef8356dd532b54601a0450118fcc99", "score": "0.5716064", "text": "def contains(self, other):\n return self.begin <= other.begin and self.end >= other.end", "title": "" }, { "docid": "eace2518f41e88c20e5e8e5dc510a32e", "score": "0.5683846", "text": "def is_position_occupied(self, position: Position) -> Optional[bool]:\n if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):\n return None\n return position in self.__game_object_positions", "title": "" }, { "docid": "5666e4fa15b69da6ffc1f7b5604d937a", "score": "0.5678689", "text": "def _within_boundary(point, boundary):\n if (0 <= point[0] <= boundary[0]) and (0 <= point[1] <= boundary[1]):\n return True\n else:\n raise ValueError('The position value is not within the boundary of the slider widget')", "title": "" }, { "docid": "a177417b5598ff20ab5eafeca9391bd8", "score": "0.56679475", "text": "def in_range(self, bounds):\n if len(bounds) != self.dim:\n raise Exception(\"DIMENSIONAL INCONSISTENCY WHILE CALLING IN_RANGE\")\n for i in range(self.dim):\n if(not(bounds[i][0] <= self.coordinate[i] <= bounds[i][1])):\n return False\n return True", "title": "" }, { "docid": "c55aea7688e28da96a9ad2941b6560e3", "score": "0.56626403", "text": "def contains(self, lat, lng):\n return self.min_lat <= lat <= self.max_lat and self.min_lng <= lng <= self.max_lng", "title": "" }, { "docid": "df20d81b496c36e98629bfbfbdf80f5d", "score": "0.5654975", "text": "def includes(self, *args):\n if len(args) == 1:\n if isinstance(args[0], Bounds):\n b = args[0]\n return (self.isDefined() and b.isDefined() and\n self.xmin <= b.xmin and\n self.xmax >= b.xmax and\n self.ymin <= b.ymin and\n self.ymax >= b.ymax)\n elif isinstance(args[0], Position):\n p = args[0]\n return (self.isDefined() and\n self.xmin <= p.x <= self.xmax and\n self.ymin <= p.y <= self.ymax)\n else:\n raise TypeError(\"Invalid argument %s\"%args[0])\n elif len(args) == 2:\n x, y = args\n return (self.isDefined() and\n self.xmin <= float(x) <= self.xmax and\n self.ymin <= float(y) <= self.ymax)\n elif len(args) == 0:\n raise TypeError(\"include takes at least 1 argument (0 given)\")\n else:\n raise TypeError(\"include takes at most 2 arguments (%d given)\"%len(args))", "title": "" }, { "docid": "1b7c44077d3687eff63731c0e90d9c61", "score": "0.5646777", "text": "def is_visible(self, pos: Union[Point2, Unit]) -> bool:\n # more info: https://github.com/Blizzard/s2client-proto/blob/9906df71d6909511907d8419b33acc1a3bd51ec0/s2clientprotocol/spatial.proto#L19\n assert isinstance(pos, (Point2, Unit)), \"pos is not of type Point2 or Unit\"\n pos = pos.position.rounded\n return self.state.visibility[pos] == 2", "title": "" }, { "docid": "6392378a3addb436431339115e2c337a", "score": "0.5643023", "text": "def __in_range(self, wanted_cor):\n if wanted_cor in self.cell_list():\n return True\n return False", "title": "" }, { "docid": "33fa2a1b9f0051dffe21b9edb936b31c", "score": "0.5637544", "text": "def __contains__(self, other: 'Segment'):\n return (self.start <= other.start) and (self.end >= other.end)", "title": "" }, { "docid": "35b22f776501a8e6f96e003eb515128c", "score": "0.56350636", "text": "def contains(self, offset):\n nmin = self.getoffset()\n nmax = nmin + self.blocksize()\n return (offset >= nmin) and (offset < nmax)", "title": "" }, { "docid": "b97c0b44f3400f1eb215d3c7009484f1", "score": "0.56319535", "text": "def in_bounds(self, point):\n (x, y) = point\n return 0 <= x < self.width and 0 <= y < self.height", "title": "" }, { "docid": "33a8452e67a3821c2e7100abf34e2b34", "score": "0.56269264", "text": "def rect_contains(rect: Tuple, point: Tuple) -> bool:\n\n if point[0] < rect[0]:\n return False\n elif point[1] < rect[1]:\n return False\n elif point[0] > rect[2]:\n return False\n elif point[1] > rect[3]:\n return False\n return True", "title": "" }, { "docid": "ec31c43b1d510990db7198f029f5fa86", "score": "0.5626034", "text": "def contains(self, index):\n return self.first <= index <= self.last", "title": "" }, { "docid": "082dc1356cc18d96a8964194d450d354", "score": "0.56213504", "text": "def within_range(self, x, y, r=50):\n if self.X > x-r and self.X < x+r and self.Y > y-r and self.Y < y+r:\n return True\n return False", "title": "" }, { "docid": "301f2d76fb769f24573dc8b32f0cffb8", "score": "0.562047", "text": "def valid_index(size, pos):\n return True if -1 < pos[0] < size and -1 < pos[1] < size else False", "title": "" }, { "docid": "9b6614ce6dae830ec8dc5f9c48dbf73b", "score": "0.5618637", "text": "def contains(self, time, inclusive=True):\n if inclusive:\n return (self.start <= time and time <= self.end)\n else:\n return (self.start < time and time < self.end)", "title": "" }, { "docid": "606fa267e4f658a9e9c2b5b866f695c2", "score": "0.56099606", "text": "def check_pos(self):\n pos = pygame.mouse.get_pos()\n if pos[0] >= self.rect.left and pos[0] <= self.rect.right:\n if pos[1] >= self.rect.top and pos[1] <= self.rect.bottom:\n return True\n else:\n return False\n else:\n return False", "title": "" }, { "docid": "f894b4780728578894389bdbb1a75f7d", "score": "0.5606576", "text": "def valid_pos(solution, pos):\n for queen in solution:\n if queen[1] == pos[1]:\n return False\n if (queen[0] + queen[1]) == (pos[0] + pos[1]):\n return False\n if (queen[0] - queen[1]) == (pos[0] - pos[1]):\n return False\n return True", "title": "" }, { "docid": "43d0f972a44ddd9234b7cb017c9bd5a1", "score": "0.56035584", "text": "def contains(self, rect: WindowType | Dnd) -> bool:\n if rect.x + rect.width < self.x:\n return False\n if rect.y + rect.height < self.y:\n return False\n\n ow, oh = self.wlr_output.effective_resolution()\n if self.x + ow < rect.x:\n return False\n if self.y + oh < rect.y:\n return False\n\n return True", "title": "" }, { "docid": "9160c431e26beedb46f50b9897c0d8e5", "score": "0.55993026", "text": "def in_bounds(self, x: int, y: int) -> bool:\n return 0 <= x < self.width and 0 <= y < self.height", "title": "" }, { "docid": "b14e30faefead116ce475deb2e8abb91", "score": "0.5581267", "text": "def is_contain(self, x, y):\n if x < 0 or x >= self.width or y < 0 or y >= self.height:\n return False\n return True", "title": "" }, { "docid": "fbe2926b1969f06b7dd2bd219debbdde", "score": "0.55777085", "text": "def _is_position(self, global_step, pos):\n if pos == \"start\":\n step = global_step\n elif pos == \"end\":\n step = global_step - self._start_end_difference\n else:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\n\n return self._track_schedule(step)", "title": "" }, { "docid": "26fe647d935bdaef07a3f72c589827d9", "score": "0.5570947", "text": "def check(self, pos):\n return bool(self.buf[self._bytepos(pos)] & (1 << self._bitpos(pos)))", "title": "" }, { "docid": "932b5ecc79978a55d006a66f2d2ad96e", "score": "0.55670625", "text": "def is_colliding(self, pos, radius):\n if self.fallen:\n # can't collide with fallen trees - they're gone\n return False\n if self.overlaps(pos, radius + self.r):\n return True\n else:\n return False", "title": "" }, { "docid": "932b5ecc79978a55d006a66f2d2ad96e", "score": "0.55670625", "text": "def is_colliding(self, pos, radius):\n if self.fallen:\n # can't collide with fallen trees - they're gone\n return False\n if self.overlaps(pos, radius + self.r):\n return True\n else:\n return False", "title": "" }, { "docid": "47d3f14d4f69a4a14430abe32001c15a", "score": "0.5564439", "text": "def contains(self, point):\n x_upper_bound = self.corner.x + self.width\n y_upper_bound = self.corner.y + self.height\n if (self.corner.x <= point.x < x_upper_bound) and (self.corner.y <= point.y < y_upper_bound):\n return True\n else:\n return False", "title": "" }, { "docid": "bb4b35aa3343fc14fbc750c830e05c56", "score": "0.5560723", "text": "def isWithin(self, rect):\n return rect.contains(self.r.toPointF())", "title": "" }, { "docid": "905ef7b8e167a5018d84924661b186d0", "score": "0.5554967", "text": "def ContainsXY(*args, **kwargs):\n return _core_.Rect_ContainsXY(*args, **kwargs)", "title": "" }, { "docid": "4b5785e3b5252b968c11f89d1e9e7fb1", "score": "0.55446434", "text": "def in_bounds(self, x: int, y: int) -> bool:\n return (0 <= x < self.width) and (0 <= y < self.height)", "title": "" }, { "docid": "dbc4027927b8dde5498cfc51d053497f", "score": "0.55408806", "text": "def __contains__(self, point: Point[Scalar]) -> bool:", "title": "" }, { "docid": "5e8c089474f0e310b58e542ed2fb399a", "score": "0.55386317", "text": "def check_coordinates(chromosome, pos, coordinates):\n chrom_match = CHR_PATTERN.match(chromosome)\n chrom = chrom_match.group(2)\n\n if chrom != coordinates[\"chrom\"]:\n return False\n\n if pos >= coordinates[\"start\"] and pos <= coordinates[\"end\"]:\n return True\n\n return False", "title": "" }, { "docid": "cc574ffbadaccda11b9a1af9ec5bbf9b", "score": "0.55378103", "text": "def contains(self, ele, relX, relY):\n return ((ele.getAbsoluteLeft() <= relX) and\n (ele.getAbsoluteRight() >= relX) and\n (ele.getAbsoluteTop() <= relY) and\n (ele.getAbsoluteBottom() >= relY))", "title": "" }, { "docid": "aab469c46bb3125680c3e7f1dfdbe51a", "score": "0.5534204", "text": "def is_reachable_from(self, position: np.ndarray) -> bool:\n if self.forbidden:\n return False\n longitudinal, lateral = self.local_coordinates(position)\n is_close = math.fabs(lateral) <= 2 * self.width_at(longitudinal) and \\\n 0 <= longitudinal < self.length + self.VEHICLE_LENGTH\n return is_close", "title": "" }, { "docid": "bb5699862c8cc9950ed2a9597da3c72e", "score": "0.5532316", "text": "def has_range(self):\n return self.definition.has_range", "title": "" }, { "docid": "13e9a1b9ba0c37817194a7bb1546f83d", "score": "0.5531086", "text": "def contains(self, coordinate: ShapeCoo) -> bool:\n if rect_contains(self.rect, coordinate):\n return bool(\n self.part[coordinate.idx[0] - self.rect[0], coordinate.idx[1] - self.rect[2]]\n )\n else:\n return False", "title": "" }, { "docid": "3f002a04cf74ef1f7d7a4564c2559f46", "score": "0.5521698", "text": "def contains(self, other):\n\n if isinstance(other, self.__class__):\n if not self:\n return bool(other)\n elif not other or other.startsafter(self) and other.endsbefore(self):\n return True\n else:\n return False\n elif isinstance(other, self.type):\n if self.lower_inc and self.upper_inc:\n return self.lower <= other <= self.upper\n elif self.lower_inc:\n return self.lower <= other < self.upper\n elif self.upper_inc:\n return self.lower < other <= self.upper\n else:\n return self.lower < other < self.upper\n else:\n raise TypeError(\n \"Unsupported type to test for inclusion '{0.__class__.__name__}'\".format(\n other))", "title": "" }, { "docid": "e008088a5ba2f0f65d3040248787cb68", "score": "0.55104876", "text": "def contains(self, other_entity) -> bool:\n return (\n other_entity.char_span.start >= self.char_span.start and other_entity.char_span.stop <= self.char_span.stop\n )", "title": "" }, { "docid": "ba365f18a622e222ed1a53ee9deb5509", "score": "0.55025333", "text": "def __contains__(self, other):\n return self[0] >= other[0] and self[1] >= other[1] and self[2] >= other[2]", "title": "" }, { "docid": "7083f8606594a205a0c8a5c9faf98f3b", "score": "0.5475389", "text": "def __contains__(self, item):\n return self[item] >= self.cutoff", "title": "" }, { "docid": "412f5464b3b76c4f5e3fe7149062e688", "score": "0.54750246", "text": "def is_inside(self, point):\n point = npw.asrealarray(point)\n return ((point - self.origin) >= 0.).all() and ((self.end\n - point) >= 0.).all()", "title": "" }, { "docid": "6c2feac3a9308627974651dd5f401624", "score": "0.54674876", "text": "def _within_bounds(self, x):\n for i, xi in enumerate(x):\n if not self.bounds[i][0] <= xi <= self.bounds[i][1]:\n return False\n return True", "title": "" }, { "docid": "0baa1cb74ec3a418ee3b67acdde70ca2", "score": "0.54583395", "text": "def __is_valid(self, pos: Tuple[int, int], value: int):\n for x in range(9):\n for y in range(9):\n # If the tile being checked is in a position where it needs to be checked, and isnt the same as pos\n if (x // 3 == pos[0] // 3 and y // 3 == pos[1] // 3) or (x == pos[0] or y == pos[1]) and ([x, y] != pos):\n if self.__board[x][y] == value:\n return False\n return True", "title": "" }, { "docid": "1e4d47fa8b3772e2dbbaf3f0b981bc18", "score": "0.5457573", "text": "def contains(self, x, y, precision=100):\n bx, by, bw, bh = self.bounds\n if bx <= x <= bx+bw and \\\n by <= y <= by+bh:\n if self._polygon is None \\\n or self._polygon[1] != precision:\n self._polygon = [(pt.x,pt.y) for pt in self.points(precision)], precision\n # Ray casting algorithm:\n return geometry.point_in_polygon(self._polygon[0], x, y)\n return False", "title": "" }, { "docid": "5717cb88144f259078994f19260dbe82", "score": "0.54464066", "text": "def is_in_bounds(self, x=None):\n if x is None:\n x = self.parent.val\n # check whether bounds are satisfied for all variables\n return True if np.all(np.logical_and(x > self.bounds[0], x < self.bounds[1])) else False", "title": "" } ]
a654cc135a8083e07cfc3aa0eafe3c1c
Defines a convolution transposed convolution net
[ { "docid": "e2e1af57c24c4e88823752d69fb91bad", "score": "0.6090154", "text": "def conv_pool_transconv_one_hot(img_bands = 4, img_rows = 64, img_cols = 64,nb_blocks=4, in_blocks=[3,4,6,3], filter_depth=[64,128,256,512], dense_layers=1 ,categorical=False,nb_classes=-1, droprate=0.9):\n assert len(filter_depth) == nb_blocks, \"filter_depth should have nb_blocks elements\"\n input_img = Input(shape=(img_bands, img_rows, img_cols))\n net = input_img\n for b in range(nb_blocks):\n \n if len(in_blocks) == 1:#same number of residual blocks\n blocks = in_blocks[0]\n else:\n assert len(in_blocks)==nb_blocks, \"res_blocks should have either 1 or nb_blocks elements\"\n blocks = in_blocks[b] \n \n for rb in range(blocks):\n net = Conv2D(filter_depth[b], (3, 3), activation='relu', padding='same', data_format=\"channels_first\", name='block'+str(b)+'_conv'+str(rb) , trainable=True)(net)\n net = Dropout(droprate)(net)\n net = Conv2D(filter_depth[b],(3, 3), strides=2, padding='same', data_format=\"channels_first\", name='block'+str(b)+'_pool')(net) \n \n _,_,_,w = Model(input_img, net).output_shape\n i = 0\n while w != img_rows:\n net = Conv2DTranspose(64, (3, 3), strides=2, activation='relu', padding='same', data_format=\"channels_first\", name='deconv'+str(i) , trainable=True)(net)\n i += 1\n _,_,_,w = Model(input_img, net).output_shape\n if categorical:\n assert nb_classes != -1, 'parameter nb_classes should be defined' \n output = Flatten()(net)\n output = Dense(nb_classes,activation='softmax', name=\"output_layer\")(output)\n else:\n output = Conv2D(nb_classes, (1, 1), activation='softmax', padding='same', data_format=\"channels_first\", name='output' , trainable=True)(net)\n return Model(input_img, output)", "title": "" } ]
[ { "docid": "39880de3ececd47957d4c19d44597456", "score": "0.6619897", "text": "def Conv_Layer(self,inputs,filter_num=64,ks1=3,ks2=3,s1=1,s2=1,\r\n Transpose=False,use_bias=False,only_conv=False):\r\n initializer = tf.random_normal_initializer(0., 0.02)\r\n if Transpose:\r\n x1 = tf.keras.layers.Conv2DTranspose(filters=filter_num,kernel_size=(ks1,ks2),strides=(s1,s2),\r\n padding='same',use_bias=use_bias,kernel_initializer=initializer)(inputs)\r\n else:\r\n x1 = tf.keras.layers.Conv2D(filters=filter_num,kernel_size=(ks1,ks2),strides=(s1,s2),\r\n padding='same',use_bias=use_bias,kernel_initializer=initializer)(inputs)\r\n if only_conv:\r\n return x1\r\n else: \r\n return self.Activation(x1)", "title": "" }, { "docid": "9ed1066a6b65e0ca5b0b99f935c4cc38", "score": "0.6511969", "text": "def __init__(self, out_filters, strides=(1, 1, 1), filter_shape=None, use_bias=False, name='conv_transposed'):\n self.in_shape = None\n self.in_filters = None\n self.out_filters = out_filters\n self.out_shape = None\n self.strides = strides\n self.use_bias = use_bias\n self.filter_shape = filter_shape\n self.full_strides =[1,] + list(self.strides) + [1,]\n\n self._rank = len(list(self.strides))\n assert 1 < self._rank < 4, 'Transposed convolutions are only supported in 2D and 3D'\n\n super(TransposedConvolution, self).__init__(name=name)", "title": "" }, { "docid": "bfb6e6d6c42b70b61657b64f45b6e920", "score": "0.65014595", "text": "def deconv(c_in, c_out, k_size, stride=2, pad=1, bn=True):\n layers = []\n layers.append(nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad))\n if bn:\n layers.append(nn.InstanceNorm2d(c_out))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "3e299ffb40f52fe0fe41bac5530960af", "score": "0.6491198", "text": "def deconv(c_in, c_out, k_size, stride=2, pad=1, bn=True):\n layers = []\n layers.append(nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad, bias=False))\n if bn:\n layers.append(nn.BatchNorm2d(c_out))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "8771ed7e60006184c438ee452294039a", "score": "0.644876", "text": "def tuned_conv(\n x_shape,\n w_shape,\n x_stride,\n w_stride,\n stride,\n padding,\n dilation,\n transposed,\n output_padding,\n groups,\n device,\n dtype,\n adjust_triton=0.95,\n):\n\n sizevars = V.graph.sizevars\n x_shape = [sizevars.size_hint(s) for s in x_shape]\n w_shape = [sizevars.size_hint(s) for s in w_shape]\n x_stride = [sizevars.size_hint(s) for s in x_stride]\n w_stride = [sizevars.size_hint(s) for s in w_stride]\n x = rand_strided(x_shape, x_stride, device=device, dtype=dtype)\n w = rand_strided(w_shape, w_stride, device=device, dtype=dtype)\n # the identifiable args for the layers\n id_args = [\n *x_shape,\n *w_shape,\n stride,\n padding,\n dilation,\n transposed,\n output_padding,\n groups,\n # *x_stride,\n # *w_stride,\n ]\n use_cuda = x.is_cuda\n\n # gen_key\n key = tuple(id_args)\n key = (\"conv\",) + key\n\n # candidate kernels\n kernels = [\"aten.convolution\"]\n if use_cuda:\n kernels += [\"triton_ops.conv\"]\n\n # filter kernels that args/kwargs does not meet requirements\n remove_kernels = []\n if groups > 1 or transposed:\n remove_kernels += [\"triton_ops.conv\"]\n kernels = [k for k in kernels if k not in remove_kernels]\n\n # if only one choice, return that kernel\n if len(kernels) == 1:\n kernel = kernels[0]\n # return kernel(\n # x, w, stride, padding, dilation, transposed, output_padding, groups\n # )\n return kernel\n timings = {}\n if key not in autotune.cache:\n for kernel in kernels:\n runnable_kernel = str2func(kernel)\n if \"triton_ops\" in kernel:\n # because we use nhwc layout by default for triton conv\n x = x.to(memory_format=torch.channels_last)\n run_args = (\n x,\n w,\n None,\n stride,\n padding,\n dilation,\n transposed,\n output_padding,\n groups,\n )\n timing, _, _ = autotune._bench(runnable_kernel, *run_args)\n if \"triton_ops\" in kernel:\n timing = timing * adjust_triton\n timings[kernel] = timing\n autotune.cache[key] = builtins.min(timings, key=timings.get)\n if config.debug:\n print(\"for key = \", key)\n print(\"timing\", timings)\n print(\"best_kernel\", autotune.cache[key])\n best_kernel = autotune.cache[key]\n # if best_kernel == \"triton_ops.conv\":\n # print(key, best_kernel)\n return best_kernel", "title": "" }, { "docid": "c7843300b1c02682ad98ee8dd474e445", "score": "0.6444139", "text": "def _deconv_layer(self, name, kernel_size, output_channels, stride=[1, 1],\n padding='VALID', activation=tf.nn.relu, std=None, mean=0,\n bias=1e-3, wd_w=1e-3, wd_b=1e-3, add_bias=True,\n trainable=True):\n if std is None:\n init_w = tf.initializers.glorot_normal()\n else:\n init_w = tf.keras.initializers.truncated_normal(stddev=std,\n mean=mean)\n init_b = tf.constant_initializer(bias)\n\n lay = tf.keras.layers.Conv2DTranspose(\n filters=output_channels, kernel_size=kernel_size,\n strides=stride, padding=padding, activation=activation,\n use_bias=add_bias,\n kernel_initializer=init_w, bias_initializer=init_b,\n kernel_regularizer=tf.keras.regularizers.l2(l=wd_w),\n bias_regularizer=tf.keras.regularizers.l2(l=wd_b),\n name=name, trainable=trainable)\n\n return lay", "title": "" }, { "docid": "6a2a71659aa8c79e2094163673c62fb0", "score": "0.639423", "text": "def conv_layer(self, inputs, num_filters, filter_size, strides, activation=None, transpose=False):\n self.conv_layer_num += 1\n if transpose:\n outputs = tf.layers.conv2d_transpose(inputs, num_filters, filter_size, strides=strides,\n padding=\"SAME\", activation=activation)\n elif not transpose:\n outputs = tf.layers.conv2d(inputs, num_filters, filter_size, strides=strides,\n padding=\"SAME\", activation=activation)\n return outputs", "title": "" }, { "docid": "36cee1b72ffb101c674acf7cc7e98185", "score": "0.6389438", "text": "def test_conv():\n x = np.array(\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 13.0, 14.0],\n [15.0, 16.0, 17.0, 18.0, 19.0],\n [20.0, 21.0, 22.0, 23.0, 24.0],\n ]\n ]\n ]\n ).astype(np.float32)\n W = np.array(\n [\n [\n [\n [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n ]\n ]\n ).astype(np.float32)\n\n out = onnx_ops.conv(x, W)", "title": "" }, { "docid": "09b2b49479b25553a998ac5b4dedd50a", "score": "0.6382329", "text": "def build_convolution(self):\n raise NotImplementedError(\"Build Convolution Should Be implemented\")", "title": "" }, { "docid": "bb50c28aaa4a6f5e33cee9b6472aa7b4", "score": "0.63804114", "text": "def Conv(onnx_node, ng_inputs): # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode\n return make_convolution_op(onnx_node, ng_inputs)", "title": "" }, { "docid": "e6c51dce0524eb0476e7b907c6b32110", "score": "0.63610744", "text": "def __init__(self,\n window, \n inp_ch_modes, \n out_ch_modes, \\\n ranks, \\\n strides=[1, 1],\\\n padding='SAME',\\\n filters_initializer=torch.nn.init.xavier_uniform_,\\\n cores_initializer=torch.nn.init.xavier_uniform_,\\\n biases_initializer=torch.nn.init.zeros_\n ):\n super(tt_conv, self).__init__()\n self.ranks = ranks \n self.out_modes = out_ch_modes \n self.in_modes = inp_ch_modes\n self.window = window\n self.strides = strides \n if padding=='SAME' and strides==[1,1]:\n self.padding = ((window[0]-1)//2,)#stride=1\n print(type(self.padding))\n # self.padding = padding \n \n filter_shape = [window[0], window[1], ranks[0]]\n if (window[0] * window[1] * 1 * ranks[0] == 1):\n self.filters = torch.empty(*filter_shape, device=device)\n torch.nn.init.ones_(self.filters)\n else:\n self.filters = torch.empty(*filter_shape, device=device)\n filters_initializer(self.filters)\n self.dim = inp_ch_modes.size\n \n self.mat_cores = []\n for i in range(self.dim):\n if type(cores_initializer) == list:\n cinit = cores_initializer[i]\n else:\n cinit = cores_initializer\n w = torch.empty(out_ch_modes[i] * ranks[i + 1], ranks[i] * inp_ch_modes[i], device=device)\n cinit(w)\n self.mat_cores.append(w)\n \n self.fshape = [window[0], window[1]]\n self.order = [0, 1]\n inord = []\n outord = []\n for i in range(self.dim):\n self.fshape.append(inp_ch_modes[i])\n inord.append(2 + 2 * i)\n self.fshape.append(out_ch_modes[i])\n outord.append(2 + 2 * i + 1)\n self.order += inord + outord\n\n if biases_initializer is not None:\n self.biases = torch.empty(np.prod(out_ch_modes), device=device)\n biases_initializer(self.biases)\n else:\n self.biases = torch.empty(np.prod(out_ch_modes), device=device)\n torch.nn.init.zeros_(self.biases)", "title": "" }, { "docid": "fb31a1a5474320ea6367ae6a5ba3b9a3", "score": "0.6344706", "text": "def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n layers = []\n transposed_conv_layer = nn.ConvTranspose2d(in_channels, out_channels, \n kernel_size, stride, padding, bias=False)\n \n # append conv layer\n layers.append(transposed_conv_layer)\n\n if batch_norm:\n # append batchnorm layer\n layers.append(nn.BatchNorm2d(out_channels))\n \n # using Sequential container\n return nn.Sequential(*layers)", "title": "" }, { "docid": "0f1eb0714708247bcbc73af8a2b7c030", "score": "0.63394564", "text": "def conv_net(inputs, hparams):\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n activation_fn=tf.nn.relu,\n weights_initializer=slim.variance_scaling_initializer(\n factor=2.0, mode='FAN_AVG', uniform=True)):\n\n net = inputs\n i = 0\n for (conv_temporal_size, conv_freq_size,\n num_filters, freq_pool_size, dropout_amt) in zip(\n hparams.temporal_sizes, hparams.freq_sizes, hparams.num_filters,\n hparams.pool_sizes, hparams.dropout_keep_amts):\n net = slim.conv2d(\n net,\n num_filters, [conv_temporal_size, conv_freq_size],\n scope='conv' + str(i),\n normalizer_fn=slim.batch_norm)\n if freq_pool_size > 1:\n net = slim.max_pool2d(\n net, [1, freq_pool_size],\n stride=[1, freq_pool_size],\n scope='pool' + str(i))\n if dropout_amt < 1:\n net = slim.dropout(net, dropout_amt, scope='dropout' + str(i))\n i += 1\n\n # Flatten while preserving batch and time dimensions.\n dims = tf.shape(net)\n net = tf.reshape(\n net, (dims[0], dims[1], net.shape[2] * net.shape[3]),\n 'flatten_end')\n\n net = slim.fully_connected(net, hparams.fc_size, scope='fc_end')\n net = slim.dropout(net, hparams.fc_dropout_keep_amt, scope='dropout_end')\n\n return net", "title": "" }, { "docid": "6168c55a0edf9ad92d21385c2be65e27", "score": "0.6303291", "text": "def convolution(network,shape, layer, activation = tf.nn.relu, std = 1.0, bias = 0.1, stride_conv = [1, 1, 1, 1] , k_size=[1, 2, 2, 1],stride_pool= [1, 2, 2, 1], Pool = True, batch_norm = True, batch_renorm = False,keep = 0.7, drop_out = True):\n #assert (batch_norm and not batch_renorm) or (not batch_norm and batch_renorm), \"Can't have batch norm both batch renorm layers\"\n\n \n with tf.variable_scope(layer):\n network = tl.layers.Conv2dLayer(network,\n act = activation,\n shape = shape, \n strides=stride_conv,\n padding='SAME',\n name ='cnn',\n W_init = tf.truncated_normal_initializer(stddev=std*np.sqrt(2./(shape[2] + shape[3]))),\n b_init =tf.constant_initializer(value=bias) )\n \n \n if Pool :\n network = tl.layers.PoolLayer(network,\n ksize=k_size,\n strides=stride_pool,\n padding='SAME',\n pool = tf.nn.max_pool,\n name ='pool')\n if batch_norm :\n network = tl.layers.BatchNormLayer(network, name = \"batch_norm\") \n if batch_renorm :\n network = tl.layers.BatchReNormLayer(network,name = \"batch_renorm\")\n \n if drop_out :\n network = tl.layers.DropoutLayer(network, keep=keep, name='drop')\n \n network.n_units = shape[3]\n \n return(network)", "title": "" }, { "docid": "5d72a49e2a53739609427859ae3e03ae", "score": "0.6293454", "text": "def __convolutional_neural_network(self, x):\r\n # # 5 x 5 x 5 patches, 1 channel, 32 features to compute.\r\n weights = self.__get_weights()\r\n biases = self.__get_biases()\r\n\r\n # reshaping the image to the provided pixels (see init)\r\n x = tf.reshape(x, shape=[-1, self.img_size_px, self.img_size_px, self.slice_count, 1]) #image x, y, z\r\n conv1 = self.__maxpool3d(tf.nn.relu(self.__conv3d(x, weights['W_conv1']) + biases['b_conv1']))\r\n conv2 = self.__maxpool3d(tf.nn.relu(self.__conv3d(conv1, weights['W_conv2']) + biases['b_conv2']))\r\n\r\n fc = tf.reshape(conv2, [-1, 54080])\r\n fc = tf.nn.relu(tf.matmul(fc, weights['W_fc']) + biases['b_fc'])\r\n fc = tf.nn.dropout(fc, self.keep_rate)\r\n\r\n output = tf.matmul(fc, weights['out']) + biases['out']\r\n return output", "title": "" }, { "docid": "0bfef545419496d113029143fcb9ee88", "score": "0.6282218", "text": "def _conv_block(inputs, filters, alpha, kernel=3, stride=1):\n filters = int(filters * alpha)\n x = Conv2d(filters, kernel, act = 'identity', include_bias = False, stride = stride, name = 'conv1')(inputs)\n x = BN(name = 'conv1_bn', act='relu')(x)\n return x, filters", "title": "" }, { "docid": "cd2b9eeefe9515b92417ddb45795b8a3", "score": "0.6250454", "text": "def tconv_nac(inputs, filters=None, kernel_size=1, activation='relu', stride=1,\n l2_scale=0, dropout=0, conv_type='standard', norm_type=None, bn_momentum=0.99,\n norm_gamma=None, kernel_initializer='he_normal', padding='same'):\n\n # flow through variable current\n current = inputs\n\n if filters is None:\n filters = inputs.shape[-1]\n\n # normalize\n if norm_type == 'batch-sync':\n current = tf.keras.layers.experimental.SyncBatchNormalization(\n momentum=bn_momentum)(current)\n elif norm_type == 'batch':\n current = tf.keras.layers.BatchNormalization(\n momentum=bn_momentum)(current)\n elif norm_type == 'layer':\n current = tf.keras.layers.LayerNormalization()(current)\n\n # activation\n current = layers.activate(current, activation)\n\n # convolution\n current = tf.keras.layers.Conv1DTranspose(\n filters=filters,\n kernel_size=kernel_size,\n strides=stride,\n padding='same',\n use_bias=True,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=tf.keras.regularizers.l2(l2_scale))(current)\n\n # dropout\n if dropout > 0:\n current = tf.keras.layers.Dropout(rate=dropout)(current)\n \n return current", "title": "" }, { "docid": "1bf343e2c7c5978ddbdd7e9069d07e39", "score": "0.6249423", "text": "def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n layers = []\n layers.append(layer_init(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False)))\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "784fa4ea3990f4013d777729faa7f355", "score": "0.6225246", "text": "def conv_layer(self, inputs, num_filters, filter_size, strides, activation=None,\n transpose=False, w_size=None, h_size=None):\n self.conv_layer_num += 1\n if transpose:\n outputs = self.upscale(inputs, h_size=h_size, w_size=w_size)\n outputs = tf.layers.conv2d_transpose(outputs, num_filters, filter_size,\n strides=strides,\n padding=\"SAME\", activation=activation)\n elif not transpose:\n outputs = tf.layers.conv2d(inputs, num_filters, filter_size, strides=strides,\n padding=\"SAME\", activation=activation)\n return outputs", "title": "" }, { "docid": "9364a157c27c76c7dda9f1ca4ee7bb9f", "score": "0.61943835", "text": "def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n layers = []\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "ee4c18f70eca1027aac1cc13a2922f06", "score": "0.618634", "text": "def deconv(in_planes, out_planes):\n return nn.Sequential(\n nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=False),\n nn.LeakyReLU(0.1,inplace=True)\n )", "title": "" }, { "docid": "73a3e82853bc394872bb9c17f01e960b", "score": "0.61853814", "text": "def Conv(self, c2_op, inputs):\n X, W, bias = inputs\n\n order = [val.s for val in c2_op.arg if val.name == \"order\"]\n if 1 != len(order):\n raise ValueError(\"Multiple order values in convolution\")\n order = order[0]\n\n if order not in (\"NHWC\", \"NCHW\"):\n raise NotImplementedError(\"Unsupported order in convolution: {}\", order)\n\n # set input axes shape\n ax_N = ng.make_axis(name='N')\n ax_C = ng.make_axis()\n ax_D = ng.make_axis(length=1)\n ax_H = ng.make_axis()\n ax_W = ng.make_axis()\n\n # set kernel axes shape\n ax_kernel_D = ng.make_axis(length=1)\n ax_kernel_H = ng.make_axis()\n ax_kernel_W = ng.make_axis()\n ax_kernel_ofm = ng.make_axis()\n\n # create placeholders for output axes\n oC = ng.make_axis(name='C')\n oD = ng.make_axis(name='D', length=1)\n oH = ng.make_axis(name='H')\n oW = ng.make_axis(name='W')\n\n axes_order = {\n 'NCHW': {'X': [ax_N, ax_C, ax_H, ax_W],\n 'W': [ax_kernel_ofm, ax_C, ax_kernel_H, ax_kernel_W]},\n 'NHWC': {'X': [ax_N, ax_H, ax_W, ax_C],\n 'W': [ax_kernel_ofm, ax_kernel_H, ax_kernel_W, ax_C]},\n }\n\n ng.make_axes(axes_order[order]['X']).set_shape(X.axes.lengths)\n ng.make_axes(axes_order[order]['W']).set_shape(W.axes.lengths)\n\n if 1 != len(bias.axes):\n raise ValueError(\"Bias's must be 1D.\")\n if ax_kernel_ofm.length != bias.axes.lengths[0]:\n raise ValueError(\"Bias's length must equal to number of output feature maps.\")\n\n # strides params\n stride_size = [int(val.i) for val in c2_op.arg if val.name == \"stride\"]\n if len(stride_size) != 1:\n raise ValueError(\"Stride size must be scalar value\")\n str_h = str_w = stride_size[0]\n\n # padding params\n pad_t, pad_b, pad_l, pad_r = \\\n _c2_padding(c2_op,\n in_NHWC=[ax_N.length, ax_H.length, ax_W.length, ax_C.length],\n kernel_HWIO=[ax_kernel_H.length, ax_kernel_W.length,\n ax_C.length, ax_kernel_ofm.length],\n stride_NHWC=[1, str_h, str_w, 1])\n\n if pad_t != pad_b or pad_l != pad_r:\n raise NotImplementedError(\"Requires symmetric padding in ngraph:\"\n \"pad_t(%s) == pad_b(%s) and\"\n \"pad_l(%s) == pad_r(%s)\" %\n (pad_t, pad_b, pad_l, pad_r))\n\n # conv params\n params = dict(pad_d=0, pad_h=pad_t, pad_w=pad_l,\n str_d=1, str_h=str_h, str_w=str_w,\n dil_d=1, dil_h=1, dil_w=1)\n\n # input, weight, output axes\n internal_ax_dict = {\n 'X': ng.make_axes([ax_C, ax_D, ax_H, ax_W, ax_N]),\n 'W': ng.make_axes([ax_C, ax_kernel_D, ax_kernel_H, ax_kernel_W, ax_kernel_ofm])\n }\n\n oC.length = ax_kernel_ofm.length\n oH.length = output_dim(ax_H.length, ax_kernel_H.length, params['pad_h'], params['str_h'])\n oW.length = output_dim(ax_W.length, ax_kernel_W.length, params['pad_w'], params['str_w'])\n internal_ax_dict['Y'] = ng.make_axes([oC, oD, oH, oW, ax_N])\n\n # broadcast input / filter axes\n # flow for NHWC order: | flow for NCHW order:\n # input: | input:\n # expand dims: NHWC -> NDHWC | expand dims: NCHW -> NDCHW\n # reorder: NDHWC -> CDHWN | reorder: NDCHW -> CDHWN\n # weights: | weights:\n # expand dims: (ofm)HWC -> D(ofm)HWC | expand dims: (ofm)CHWC -> D(ofm)CHW\n # reorder: D(ofm)HWC -> CDHW(ofm) | reorder: D(ofm)CHW -> CDHW(ofm)\n\n X = ng.cast_axes(X, ng.make_axes(axes_order[order]['X']))\n X = ng.expand_dims(X, ax_D, 1)\n X = ng.axes_with_order(X, axes=internal_ax_dict['X'])\n W = ng.cast_axes(W, ng.make_axes(axes_order[order]['W']))\n W = ng.expand_dims(W, ax_kernel_D, 0)\n W = ng.axes_with_order(W, axes=internal_ax_dict['W'])\n\n # convolution\n Y = ng.convolution(params, X, W, axes=internal_ax_dict['Y'])\n\n # cast back to proper format\n Y = ng.broadcast(Y, ng.make_axes([ax_N, oD, oH, oW, oC])) if \"NHWC\" == order \\\n else ng.broadcast(Y, ng.make_axes([ax_N, oD, oC, oH, oW])) # NCHW\n\n # slice away the oD\n out_slicing = [slice(None), 0, slice(None), slice(None), slice(None)]\n Y = ng.tensor_slice(Y, out_slicing)\n\n def _conv_bias_add(c2_op, inputs):\n X, bias = inputs\n bias = ng.cast_axes(bias, axes=ng.make_axes([X.axes[1 if 'NCHW' == order else 3]]))\n Y = ng.Add(X, bias)\n return Y\n\n return _conv_bias_add(c2_op, [Y, bias])", "title": "" }, { "docid": "fba1fe5d8b0cf84121ec80ddda35e83f", "score": "0.6184872", "text": "def deconv2d_bn_act(inputs, filters, kernel_size, kernel_init, activation, strides, padding=\"SAME\"):\n _tmp = tf.layers.conv2d_transpose(inputs=inputs, filters=filters, kernel_size=kernel_size,\n kernel_initializer=kernel_init, activation=None, strides=strides, padding=padding)\n _tmp = tf.contrib.layers.batch_norm(_tmp, center=True, scale=True, is_training=phase)\n _tmp = activation(_tmp)\n\n return _tmp", "title": "" }, { "docid": "5244396d6182d9b674b312864b9db7a8", "score": "0.6181952", "text": "def _build(self, inp):\n assert (len(inp.get_shape().as_list()) - 2) == self._rank, \\\n 'The input has {} dimensions but this is a {}D convolution'.format(\n len(inp.get_shape().as_list()), self._rank)\n\n self.in_shape = tuple(inp.get_shape().as_list())\n if self.in_filters is None:\n self.in_filters = self.in_shape[-1]\n assert self.in_filters == self.in_shape[-1], 'Convolution was built for different number of channels'\n\n inp_shape = tf.shape(inp)\n\n if self.filter_shape is None:\n self.up_spatial_shape = [2 * s if s > 1 else 1 for s in self.strides]\n else:\n self.up_spatial_shape = self.filter_shape\n\n self.out_shape = [inp_shape[i] * self.full_strides[i] for i in range(len(self.in_shape) - 1)] + [self.out_filters,]\n\n\n\n self._k = self._get_kernel()\n\n self.variables.append(self._k)\n\n conv_op = tf.nn.conv3d_transpose\n if self._rank == 2:\n conv_op = tf.nn.conv2d_transpose\n\n outp = conv_op(inp, self._k, output_shape=self.out_shape, strides=self.full_strides, padding='SAME',\n name='conv_tranposed')\n\n if self.use_bias:\n self._b = tf.get_variable(\"b\", shape=(self.out_filters,), initializer=tf.constant_initializer())\n self.variables.append(self._b)\n outp += self._b\n outp.set_shape([self.in_shape[i] * self.full_strides[i] if isinstance(self.in_shape[i], int) else None\n for i in range(len(self.in_shape) - 1)] + [self.out_filters,])\n\n return outp", "title": "" }, { "docid": "252ddd2d39c43df293bb4a1ace6f308b", "score": "0.6177555", "text": "def conv_encoder(self, conv_input,number_conv_layers,is_training, kernel_size = (5,), channels = 512,activation = tf.nn.relu ):\n conv_output = conv_input\n for i in range(number_conv_layers):\n conv_output = tf.layers.conv1d(conv_output,filters=channels,kernel_size=kernel_size,activation=None,padding='same')\n batched_output = tf.layers.batch_normalization(conv_output,training=is_training)\n activated_output = activation(batched_output)\n conv_output = activated_output\n # conv_shape = conv_output.get_shape().as_list()\n # if conv_shape[1] is None:\n # conv_output = tf.reshape(conv_output,[-1,-1,conv_shape[2]])\n # else:\n # conv_output = tf.reshape(conv_output,[-1,conv_shape[1],conv_shape[2]])\n return conv_output", "title": "" }, { "docid": "af1254699b20b5d045975c8adccb3dc7", "score": "0.6169582", "text": "def conv_pool_transconv_net(img_bands = 4, img_rows = 64, img_cols = 64,nb_blocks=4, in_blocks=[3,4,6,3], filter_depth=[64,128,256,512], dense_layers=1 ,categorical=False,nb_classes=-1, droprate=0.9):\n assert len(filter_depth) == nb_blocks, \"filter_depth should have nb_blocks elements\"\n input_img = Input(shape=(img_bands, img_rows, img_cols))\n net = input_img\n for b in range(nb_blocks):\n \n if len(in_blocks) == 1:#same number of residual blocks\n blocks = in_blocks[0]\n else:\n assert len(in_blocks)==nb_blocks, \"res_blocks should have either 1 or nb_blocks elements\"\n blocks = in_blocks[b] \n \n for rb in range(blocks):\n net = Conv2D(filter_depth[b], (3, 3), activation='relu', padding='same', data_format=\"channels_first\", name='block'+str(b)+'_conv'+str(rb) , trainable=True)(net)\n net = Dropout(droprate)(net)\n net = Conv2D(filter_depth[b],(3, 3), strides=2, padding='same', data_format=\"channels_first\", name='block'+str(b)+'_pool')(net) \n \n _,_,_,w = Model(input_img, net).output_shape\n i = 0\n while w != img_rows:\n net = Conv2DTranspose(64, (3, 3), strides=2, activation='relu', padding='same', data_format=\"channels_first\", name='deconv'+str(i) , trainable=True)(net)\n i += 1\n _,_,_,w = Model(input_img, net).output_shape\n if categorical:\n assert nb_classes != -1, 'parameter nb_classes should be defined' \n output = Flatten()(net)\n output = Dense(nb_classes,activation='softmax', name=\"output_layer\")(output)\n else:\n output = Conv2D(1, (1, 1), activation='relu', padding='same', data_format=\"channels_first\", name='output' , trainable=True)(net)\n return Model(input_img, output)", "title": "" }, { "docid": "f10db6a41649035dbfec82dfadaaffe0", "score": "0.6153932", "text": "def dwconv3x3_block(\n in_channels: int,\n out_channels: int,\n stride: int = 1,\n padding: int = 1,\n dilation: int = 1,\n bias: bool = False,\n bn_eps: float = 1e-5,\n activation=(lambda: nn.ReLU(inplace=True))\n):\n return dwconv_block(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=bias,\n bn_eps=bn_eps,\n activation=activation\n )", "title": "" }, { "docid": "4081d9fe1e9ee4429619220b139a9d82", "score": "0.6148065", "text": "def __init__(self, dim, in_channels, out_channels, stride=1):\r\n super(conv_block, self).__init__()\r\n\r\n conv_fn = getattr(nn, \"Conv{0}d\".format(dim))\r\n\r\n if stride == 1:\r\n ksize = 3\r\n elif stride == 2:\r\n ksize = 4\r\n else:\r\n raise Exception('stride must be 1 or 2')\r\n\r\n self.main = conv_fn(in_channels, out_channels, ksize, stride, 1)\r\n self.activation = nn.LeakyReLU(0.2)", "title": "" }, { "docid": "39df7339ca6f33aba5f747a7b79959e7", "score": "0.61444396", "text": "def __conv3d(self, x, W):\r\n return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "598216fceed5cc619f90946eea9a99ea", "score": "0.6143972", "text": "def _conv2d_impl(self, input_layer, num_channels_in, filters, kernel_size,\n strides, padding, kernel_initializer):\n if self.use_tf_layers:\n assert not (self.params.tanh_weight_transform or self.params.quant_weight)\n return conv_layers.conv2d(input_layer, filters, kernel_size, strides,\n padding, self.channel_pos,\n kernel_initializer=kernel_initializer,\n use_bias=False)\n else:\n weights_shape = [kernel_size[0], kernel_size[1], num_channels_in, filters]\n # We use the name 'conv2d/kernel' so the variable has the same name as its\n # tf.layers equivalent. This way, if a checkpoint is written when\n # self.use_tf_layers == True, it can be loaded when\n # self.use_tf_layers == False, and vice versa.\n weights = self.get_variable('conv2d/kernel', weights_shape,\n self.variable_dtype, self.dtype,\n initializer=kernel_initializer)\n if self.params.tanh_weight_transform:\n if not (self.params.first_weight_name in weights.name\n or self.params.last_weight_name in weights.name):\n print('Dorefa quantizing weight %s' % weights.name)\n weights = self.dorefa_weight_quantize(\n weights,\n self.params.quant_weight,\n self.params.quant_weight_bits,\n self.params.quant_weight_per_channel,\n self.params.quant_weight_delay)\n elif self.params.quant_weight:\n if not (self.params.first_weight_name in weights.name\n or self.params.last_weight_name in weights.name):\n print('Quantizing weight %s' % weights.name)\n weights = self.last_value_quantize(\n weights,\n per_channel=self.params.quant_weight_per_channel,\n is_training=self.phase_train,\n num_bits=self.params.quant_weight_bits,\n narrow_range=self.params.quant_weight_narrow_range,\n relative_quantile=self.params.quant_weight_relative_quantile,\n freeze=self.params.freeze_weight_range,\n quant_delay=self.params.quant_weight_delay)\n\n if self.data_format == 'NHWC':\n strides = [1] + strides + [1]\n else:\n strides = [1, 1] + strides\n return tf.nn.conv2d(input_layer, weights, strides, padding,\n data_format=self.data_format)", "title": "" }, { "docid": "802af15669b3c6f78cc3cf7bcfe7eb57", "score": "0.61404926", "text": "def alter_op_layout_conv2d(attrs, inputs, tinfos, out_type):\n return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)", "title": "" }, { "docid": "59b11c1035a4b48c2bfc50dd9525de38", "score": "0.613453", "text": "def conv_conv128(DEVICE=torch.device('cpu')):\n encoder = BlockNet(ConvBlock, \n channel_sequence=[1,64,64,128,128], \n size_sequence=[64,32,16,8,1], \n block_count=5,\n kernel_size=3,\n use_block_for_last=True).to(DEVICE)\n\n encp = nn.utils.parameters_to_vector(encoder.parameters()).shape[0]\n print(f\"Encoder has {encp} params\")\n \n decoder = BlockNet(ConvBlock, \n channel_sequence=[128,128,128,64,1], \n size_sequence=[1,8,16,32,64], \n block_count=5,\n kernel_size=3,\n use_block_for_last=True).to(DEVICE)\n\n decp = nn.utils.parameters_to_vector(decoder.parameters()).shape[0]\n print(f\"Decoder has {decp} params\")\n \n autoencoder = nn.Sequential(encoder, decoder, nn.LeakyReLU()).to(DEVICE)\n \n return encoder, nn.Sequential(decoder, nn.LeakyReLU()), autoencoder", "title": "" }, { "docid": "9d3c607767283ab7b84200354a64eb4c", "score": "0.6126814", "text": "def conv3d(x, W):\n return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "95fa2df6ddf48acd7e8fa702d0c998ff", "score": "0.61205804", "text": "def __init__(self, in_channels=3, num_actions=5):\n super(DQN, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=3, stride=1)\n self.fc1 = nn.Linear(32, 16)\n self.fc2 = nn.Linear(16, num_actions)", "title": "" }, { "docid": "3af87049b5c425a2701fb4d44fb25417", "score": "0.61178863", "text": "def base_net(self):\n inputs = Input((self.num_chns, self.patch_size, \n self.patch_size, self.patch_size))\n \n \"\"\" 1st convolution\"\"\" \n conv1 = Conv3D(filters=self.feature_depth[0], \n kernel_size=[4, 4, 4],\n padding='valid', \n data_format='channels_first')(inputs)\n if self.with_bn:\n conv1 = BatchNormalization(axis=1)(conv1)\n conv1 = Activation('relu')(conv1)\n \n \"\"\" 2nd convolution\"\"\"\n conv2 = Conv3D(filters=self.feature_depth[1], \n kernel_size=[3, 3, 3],\n padding='valid', \n data_format='channels_first')(conv1)\n if self.with_bn:\n conv2 = BatchNormalization(axis=1)(conv2)\n conv2 = Activation('relu')(conv2)\n \n \"\"\" pooling 1\"\"\"\n pool1 = MaxPooling3D(pool_size=(2, 2, 2), \n data_format='channels_first')(conv2)\n \n \"\"\" 3rd convolution\"\"\"\n conv3 = Conv3D(filters=self.feature_depth[2], \n kernel_size=[3, 3, 3],\n padding='valid', \n data_format='channels_first')(pool1)\n if self.with_bn:\n conv3 = BatchNormalization(axis=1)(conv3)\n conv3 = Activation('relu')(conv3)\n \n \"\"\" 4th convolution\"\"\"\n conv4 = Conv3D(filters=self.feature_depth[3], \n kernel_size=[3, 3, 3],\n padding='valid', \n data_format='channels_first')(conv3)\n if self.with_bn:\n conv4 = BatchNormalization(axis=1)(conv4)\n conv4 = Activation('relu')(conv4)\n \n \"\"\" pooling 2\"\"\"\n pool2 = MaxPooling3D(pool_size=(2, 2, 2), \n data_format='channels_first')(conv4)\n \n \"\"\" 5th convolution\"\"\"\n conv5 = Conv3D(filters=self.feature_depth[4], \n kernel_size=[3, 3, 3],\n padding='valid', \n data_format='channels_first')(pool2)\n if self.with_bn:\n conv5 = BatchNormalization(axis=1)(conv5)\n conv5 = Activation('relu')(conv5)\n \n \"\"\" 6th convolution\"\"\"\n conv6 = Conv3D(filters=self.feature_depth[5], \n kernel_size=[1, 1, 1],\n padding='valid', \n data_format='channels_first')(conv5)\n if self.with_bn:\n conv6 = BatchNormalization(axis=1)(conv6)\n conv6 = Activation('relu')(conv6)\n if self.with_dropout:\n conv6 = Dropout(self.keep_prob)(conv6)\n \n \n feature_map = Reshape((self.feature_depth[-1], 1),\n name='patch_features')(conv6)\n \n class_prob = Dense(units=1, activation='sigmoid',\n name='patch_prob')(Flatten()(feature_map))\n\n model = Model(inputs=inputs, \n outputs=[feature_map, class_prob], \n name='base_net')\n\n model.summary()\n \n return model", "title": "" }, { "docid": "8b4ae60f0f355870264a4166d2345a92", "score": "0.61169124", "text": "def _embed_conv1d(inputs, out_channels, activation=tf.tanh, bias=True):\n \n #inputs = tf.squeeze(inputs, axis=-2, name='remove_channel')\n \n with tf.variable_scope('pre') as scope:\n ww = _variable_with_weight_decay('kernel',\n shape=[256,out_channels,2],\n stddev=0.707,\n wd=None)\n \n embedded_kernel = tf.nn.embedding_lookup(ww, inputs, name='embedded_kernel')\n \n output = tf.add(embedded_kernel[:,0:-1,:,0], embedded_kernel[:,1:,:,1], name='shift_and_add')\n \n #transform into \"channel first\"\n output = tf.transpose(output, perm=[0,2,1], name='channel_first')\n \n if bias:\n len_in = inputs.get_shape().as_list()[1]\n b = _variable_on_cpu('bias', [len_in-1], tf.constant_initializer(0.0))\n output = tf.nn.bias_add(output, b, name='add_bias')\n if activation:\n output = activation(output, name='tanh')\n \n return output", "title": "" }, { "docid": "ffaef5cdd6d69f5f2d4dd9a5e52d5d20", "score": "0.6105015", "text": "def conv_layer(input_array, filter_size, kp, postfix, is_training):\n # filter weights\n W = tf.get_variable(\"Wc\" + postfix, filter_size, tf.float32, initializer=tf.contrib.layers.xavier_initializer())\n # convolution\n x = tf.nn.conv2d(input_array, W, [1, 1, 1, 1], padding=\"SAME\")\n # batch-norm before activation\n x_norm = tf.layers.batch_normalization(x, training=is_training, axis=3)\n # dropout (activation is applied after dropout)\n output = tf.nn.dropout(x_norm, kp)\n\n return output, W", "title": "" }, { "docid": "546dd312d648f9aeb20ec0b9ec73e4a1", "score": "0.61045855", "text": "def create_conv_net(x, keep_prob, channels, n_class, layers=3, features_root=16, filter_size=3, pool_size=2, summaries=True):\n \n logger.info(\"Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}\".format(layers=layers,\n features=features_root,\n filter_size=filter_size,\n pool_size=pool_size))\n \n nx = tf.shape(x)[1]\n ny = tf.shape(x)[2]\n x_image = tf.reshape(x, tf.stack([-1,nx,ny,channels]))\n in_node = x_image\n batch_size = tf.shape(x_image)[0]\n \n weights = []\n biases = []\n convs = []\n pools = OrderedDict()\n deconv = OrderedDict()\n dw_convs = OrderedDict()\n up_convs = OrderedDict()\n \n # Record the size difference \n in_size = 1000\n size = in_size\n\n # Encode\n for layer in range(0, layers):\n features = 2**layer*features_root\n stddev = np.sqrt(2 / (filter_size**2 * features))\n if layer == 0:\n w1 = weight_variable([filter_size, filter_size, channels, features], stddev)\n else:\n w1 = weight_variable([filter_size, filter_size, features//2, features], stddev)\n \n w2 = weight_variable([filter_size, filter_size, features, features], stddev)\n b1 = bias_variable([features])\n b2 = bias_variable([features])\n \n conv1 = conv2d(in_node, w1, keep_prob)\n tmp_h_conv = tf.nn.relu(conv1 + b1)\n conv2 = conv2d(tmp_h_conv, w2, keep_prob)\n dw_convs[layer] = tf.nn.relu(conv2 + b2)\n \n weights.append((w1, w2))\n biases.append((b1, b2))\n convs.append((conv1, conv2))\n \n size -= 4\n if layer < layers-1:\n pools[layer] = max_pool(dw_convs[layer], pool_size)\n in_node = pools[layer]\n size /= 2\n \n in_node = dw_convs[layers-1]\n \n # Decode\n for layer in range(layers-2, -1, -1):\n features = 2**(layer+1)*features_root\n stddev = np.sqrt(2 / (filter_size**2 * features))\n \n wd = weight_variable_devonc([pool_size, pool_size, features//2, features], stddev)\n bd = bias_variable([features//2])\n h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)\n h_deconv_concat = crop_and_concat(dw_convs[layer], h_deconv)\n deconv[layer] = h_deconv_concat\n \n w1 = weight_variable([filter_size, filter_size, features, features//2], stddev)\n w2 = weight_variable([filter_size, filter_size, features//2, features//2], stddev)\n b1 = bias_variable([features//2])\n b2 = bias_variable([features//2])\n \n conv1 = conv2d(h_deconv_concat, w1, keep_prob)\n h_conv = tf.nn.relu(conv1 + b1)\n conv2 = conv2d(h_conv, w2, keep_prob)\n in_node = tf.nn.relu(conv2 + b2)\n up_convs[layer] = in_node\n\n weights.append((w1, w2))\n biases.append((b1, b2))\n convs.append((conv1, conv2))\n \n size *= 2\n size -= 4\n\n # Output Map\n weight = weight_variable([1, 1, features_root, n_class], stddev)\n bias = bias_variable([n_class])\n conv = conv2d(in_node, weight, tf.constant(1.0))\n output_map = tf.nn.relu(conv + bias)\n up_convs[\"out\"] = output_map\n \n # Summary the results of convolution and pooling\n if summaries:\n with tf.name_scope(\"summary_conv\"):\n for i, (c1, c2) in enumerate(convs):\n tf.summary.image('layer_%02d_01'%i, get_image_summary(c1))\n tf.summary.image('layer_%02d_02'%i, get_image_summary(c2))\n \n with tf.name_scope(\"summary_max_pooling\"):\n for k in pools.keys():\n tf.summary.image('pool_%02d'%k, get_image_summary(pools[k]))\n \n with tf.name_scope(\"summary_deconv\"):\n for k in deconv.keys():\n tf.summary.image('deconv_concat_%02d'%k, get_image_summary(deconv[k]))\n\n with tf.name_scope(\"down_convolution\"):\n for k in dw_convs.keys():\n tf.summary.histogram(\"layer_%02d\"%k + '/activations', dw_convs[k])\n\n with tf.name_scope(\"up_convolution\"):\n for k in up_convs.keys():\n tf.summary.histogram(\"layer_%s\"%k + '/activations', up_convs[k])\n \n # Record all the variables which can be used in L2 regularization\n variables = []\n for w1,w2 in weights:\n variables.append(w1)\n variables.append(w2)\n \n for b1,b2 in biases:\n variables.append(b1)\n variables.append(b2)\n\n \n return output_map, variables, int(in_size - size)", "title": "" }, { "docid": "95bb5023162802e7a4c6a1754787c7a1", "score": "0.6103816", "text": "def _simple_conv_3d(x, k):\n y = tf.nn.conv3d(x, k, [1, 1, 1, 1, 1], padding='SAME')\n return y", "title": "" }, { "docid": "c28da5968a39883f33471ff0af58f763", "score": "0.6099747", "text": "def __init__(self, input_dim=(3, 32, 32), num_filters=[16,16], filter_size = 3,\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n (C, H, W) = input_dim\n F1 = num_filters[0]\n F2 = num_filters[1]\n HH = filter_size\n WW = filter_size\n\n # padding\n P1 = (filter_size - 1)/2\n # stride\n S = 1\n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n # Store weights and biases for the convolutional layer using the keys 'W1' #\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\n # of the output affine layer. #\n ############################################################################\n H_out1 = 1 + (H - HH + 2*P1)/S\n W_out1 = 1 + (W - WW + 2*P1)/S\n H_out2 = 1 + (H_out1/2 - HH + 2*P1)/S\n W_out2 = 1 + (W_out1/2 - WW + 2*P1)/S\n P2 = 2\n S2 = 2\n HHH = 4\n WWW = 4\n self.params['W1'] = weight_scale * np.random.randn(F1,C,HH,WW)\n self.params['b1'] = np.zeros(F1)\n self.params['W2'] = weight_scale * np.random.randn(F2,F1,HH,WW)\n self.params['b2'] = np.zeros(F2)\n self.params['W2_2'] = weight_scale * np.random.randn(F2,C,HHH,WWW)\n self.params['b2_2'] = np.zeros(F2)\n self.params['W3'] = weight_scale * np.random.randn(H_out2*W_out2*F2/4,hidden_dim)\n self.params['b3'] = np.zeros(hidden_dim)\n self.params['W4'] = weight_scale * np.random.randn(hidden_dim,num_classes)\n self.params['b4'] = np.zeros(num_classes)\n self.params['gamma1'] = np.random.rand(F1)\n self.params['beta1'] = np.random.rand(F1)\n self.params['gamma2'] = np.random.rand(F2)\n self.params['beta2'] = np.random.rand(F2)\n self.sbn_params = [{'mode':'train'},{'mode':'train'}]\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "title": "" }, { "docid": "333583016ea90bb973b48d1887674fde", "score": "0.60991526", "text": "def deconv(in_planes, out_planes):\n return nn.Sequential(\n nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=False),\n nn.LeakyReLU(0.1, inplace=True)\n )", "title": "" }, { "docid": "aa7740b5f7c49a7e512410a1792fba3b", "score": "0.60864276", "text": "def conv3x3(in_planes, out_planes, stride=1,dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation,bias=False, dilation=dilation)", "title": "" }, { "docid": "955a8873ab561d97fd30501a7494813e", "score": "0.60832274", "text": "def conv_layer(self, n_filters, filter_size, stride, activation, name, padding='VALID', init='torch',\n incoming=None):\n _, kh, kw, input_channels = incoming.get_shape().as_list()\n with tf.variable_scope(name):\n bias_init, weight_init = self._get_initializers(init, input_channels, filter_size)\n W = tf.get_variable('W', shape=[filter_size, filter_size, input_channels, n_filters],\n initializer=weight_init)\n b = tf.get_variable('b', shape=[n_filters], initializer=bias_init)\n\n self._add_theta(W, b)\n out = activation(tf.nn.bias_add(tf.nn.conv2d(incoming, W, [1, stride, stride, 1], padding), b))\n\n return out", "title": "" }, { "docid": "e2331c88ee7b2bc3a03020fb39ca1852", "score": "0.60764295", "text": "def conv_layer(\n in_ch, out_ch, kernel, activation=nn.LeakyReLU(0.2), stride=1, padding=\"same\"\n):\n if padding == \"same\":\n padding = kernel // 2\n return nn.Sequential(\n nn.Conv2d(in_ch, out_ch, kernel, stride=stride, padding=padding),\n nn.BatchNorm2d(out_ch),\n activation,\n )", "title": "" }, { "docid": "f3660797126c0993df4f5f8959801990", "score": "0.6074785", "text": "def convolution(image, kernel):\n return image", "title": "" }, { "docid": "e16c84714cbaa23738e27a24aecab428", "score": "0.60744464", "text": "def construct_convolution_layer(input_tensor, name, kernel_shape, n_output_channels, padding_mode='SAME', strides=(1, 1, 1, 1)):\n\n with tf.variable_scope(name):\n # define weights\n n_input_channels = input_tensor.shape[-1]\n weights_shape = list(kernel_shape) + [n_input_channels, n_output_channels]\n weights = tf.get_variable(name='_weights', shape=weights_shape)\n print(weights)\n\n # define biases\n biases = tf.get_variable(name='_biases', initializer=tf.zeros(shape=[n_output_channels]))\n print(biases)\n\n # compute convolution\n convolution = tf.nn.conv2d(input=input_tensor, filter=weights, strides=strides, padding=padding_mode)\n print(convolution)\n convolution = tf.nn.bias_add(convolution, biases, name='pre_activation')\n print(convolution)\n convolution = tf.nn.relu(convolution, name='activation')\n print(convolution)\n\n return convolution", "title": "" }, { "docid": "d3ddcb8e489d921c6c7e55fbaa54f869", "score": "0.6069102", "text": "def _conv2d(inputs, filters, kernel_size):\n return tf.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=kernel_size,\n padding='same')", "title": "" }, { "docid": "9ee0aab6414a78bc4505831afced58f9", "score": "0.6064005", "text": "def trans_conv_2d_pad(in_channels, out_channels,\n kernel_size=3, stride=1, padding=1, bias=False):\n if stride == 1:\n return nn.ConvTranspose2d(\n in_channels, out_channels, kernel_size,\n stride=1, padding=padding, bias=bias\n )\n elif stride == 2:\n return nn.ConvTranspose2d(\n in_channels, out_channels, kernel_size,\n stride=2, padding=padding, output_padding=1, bias=bias\n )", "title": "" }, { "docid": "56aec71d736d2a7b1ad9a9a6ea72a80e", "score": "0.6055386", "text": "def conv2d_block(inputs, filters, kernel_size=3, num_convolutions=2):\n \n x = inputs\n for _ in range(num_convolutions):\n \n # Convolve inputs twice, keeping same dimensions\n x = tf.keras.layers.Conv2D(filters, kernel_size, \n padding=\"same\", activation='relu')(x)\n \n return x", "title": "" }, { "docid": "ea9d8cc4f6836af00e252bad806e186d", "score": "0.6054399", "text": "def conv_nac(inputs, filters=None, kernel_size=1, activation='relu', stride=1,\n dilation_rate=1, l2_scale=0, dropout=0, conv_type='standard', residual=False,\n pool_size=1, pool_type='max', norm_type=None, bn_momentum=0.99, norm_gamma=None,\n kernel_initializer='he_normal', padding='same', se=False):\n\n # flow through variable current\n current = inputs\n\n # choose convolution type\n if conv_type == 'separable':\n conv_layer = tf.keras.layers.SeparableConv1D\n else:\n conv_layer = tf.keras.layers.Conv1D\n\n if filters is None:\n filters = inputs.shape[-1]\n\n # normalize\n if norm_type == 'batch-sync':\n current = tf.keras.layers.experimental.SyncBatchNormalization(\n momentum=bn_momentum)(current)\n elif norm_type == 'batch':\n current = tf.keras.layers.BatchNormalization(\n momentum=bn_momentum)(current)\n elif norm_type == 'layer':\n current = tf.keras.layers.LayerNormalization()(current)\n\n # activation\n current = layers.activate(current, activation)\n\n # convolution\n current = conv_layer(\n filters=filters,\n kernel_size=kernel_size,\n strides=stride,\n padding=padding,\n use_bias=True,\n dilation_rate=dilation_rate,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=tf.keras.regularizers.l2(l2_scale))(current)\n\n # squeeze-excite\n if se:\n current = squeeze_excite(current)\n \n # dropout\n if dropout > 0:\n current = tf.keras.layers.Dropout(rate=dropout)(current)\n\n # residual add\n if residual:\n current = tf.keras.layers.Add()([inputs,current])\n \n # Pool\n if pool_size > 1:\n if pool_type == 'softmax':\n current = layers.SoftmaxPool1D(\n pool_size=pool_size)(current)\n else:\n current = tf.keras.layers.MaxPool1D(\n pool_size=pool_size,\n padding=padding)(current)\n\n return current", "title": "" }, { "docid": "df27b3f6c5ba376cca4a4a6c5fcfac89", "score": "0.6054307", "text": "def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True, dropout=True):\n layers = []\n layers.append(nn.Conv2d(c_in, c_out, k_size, stride, pad))\n if bn:\n layers.append(nn.InstanceNorm2d(c_out))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "6d38b4cba0200464ffa83ce9968a1fcb", "score": "0.6051465", "text": "def conv2d_trans(inputs, outshape, out_dim, kernel, stride, stddev=0.02,\n padding=\"VALID\", name=\"conv2d_trans\", normalize=True,\n activation=True, relu_factor=0):\n if outshape is not None:\n pass\n with tf.variable_scope(name):\n conv = tf.contrib.layers.conv2d_transpose(\n inputs, out_dim, kernel, stride, padding, activation_fn=None,\n weights_initializer=tf.truncated_normal_initializer(stddev=stddev),\n biases_initializer=tf.constant_initializer(0.0))\n\n if normalize:\n conv = instance_norm(conv)\n # conv = tf.contrib.layers.batch_norm(conv, decay=0.9,\n # updates_collections=None, epsilon=1e-5, scale=True,\n # scope=\"batch_norm\")\n\n if activation:\n if(relu_factor == 0):\n conv = tf.nn.relu(conv, \"relu\")\n else:\n conv = lrelu(conv, relu_factor, \"lrelu\")\n\n return conv", "title": "" }, { "docid": "25bf43f8ad7d2c1439faf7b36e273c87", "score": "0.6050088", "text": "def deconv2d(X, size, output_shape, name, strides=2):\n with tf.variable_scope(name):\n init = tf.truncated_normal_initializer(stddev=0.02)\n W = tf.get_variable('W', \n [size, size, output_shape[-1], X.shape[-1]],\n initializer=init)\n \n b = tf.get_variable('b',\n [output_shape[-1]],\n initializer=tf.constant_initializer(0.0))\n # check this !\n deconv = tf.nn.conv2d_transpose(X, W, output_shape,\\\n [1, strides, strides, 1]) + b\n \n return deconv", "title": "" }, { "docid": "aef0844ba5746605afa8584b60c229da", "score": "0.6045833", "text": "def deconv2d(layer_input, filters):\n #u = Conv2D(128, kernel_size=3, strides=1, activation='linear', padding='same')(layer_input)\n #u = LeakyReLU(alpha=0.25)(u)\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=3, strides=1, padding='same')(u)\n u = Activation(\"relu\")(u)\n return u", "title": "" }, { "docid": "0ddfafafb1d0f7d4d1b5b2d8c04e74d9", "score": "0.60416853", "text": "def _conv_block(self, inputs, filters, kernel, strides, nl):\n\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n\n return self._return_activation(x, nl)", "title": "" }, { "docid": "e4e7e3380fe9913726713de7ad3cc560", "score": "0.60407543", "text": "def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1):\n if batchNorm:\n # convolution in 2D with Batchnorm and leakyReLU of 0.1\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.LeakyReLU(0.1, inplace=True)\n )\n else:\n # Convolution in 2D with LeakyReLU of 0.1 and without Batchnorm\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False),\n nn.LeakyReLU(0.1, inplace=True)\n )", "title": "" }, { "docid": "51264e8da3ed0f149ca70af5844ad3c7", "score": "0.6016652", "text": "def conv_transpose(inputs, filters, kernel_size, strides, *args, **kwargs):\n dim = inputs.shape.ndims - 2\n if dim == 1:\n output = conv1d_transpose(inputs, filters, kernel_size, strides, *args, **kwargs)\n elif dim == 2:\n output = tf.layers.conv2d_transpose(inputs, filters, kernel_size, strides, *args, **kwargs)\n elif dim == 3:\n output = tf.layers.conv3d_transpose(inputs, filters, kernel_size, strides, *args, use_bias=False, **kwargs)\n return output", "title": "" }, { "docid": "71cedbf099b1e688d29227a280d2e53f", "score": "0.6013422", "text": "def conv3x3(in_channels, out_channels, module_name, postfix,\n stride=1, groups=1, kernel_size=3, padding=1):\n return [\n ('{}_{}/conv'.format(module_name, postfix),\n nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n groups=groups,\n bias=False)),\n ('{}_{}/norm'.format(module_name, postfix),\n nn.BatchNorm2d(out_channels)),\n ('{}_{}/relu'.format(module_name, postfix),\n nn.ReLU(inplace=True)),\n ]", "title": "" }, { "docid": "c7097e4ed9ef6502b63b9d0b0f62c8c4", "score": "0.60123575", "text": "def convolution_layer(input_image, shape):\n kernel = weight_init(shape=shape)\n bias = bias_init([shape[3]])\n \"\"\"convolution\"\"\"\n layer = conv2d(input_image=input_image, kernel=kernel)+bias\n \"\"\"pooling\"\"\"\n layer = pooling(layer)\n \"\"\"activationfunction relu\"\"\"\n layer = tf.nn.relu(layer)\n\n return layer", "title": "" }, { "docid": "fd52465fb62bca804e006e7d1276da30", "score": "0.60088533", "text": "def __init__(self, num_filters=[32, 32], filter_sizes=[3, 3], input_dim=(3,32,32), hidden_dim=100, \n num_classes=10, reg=0.0, weight_scale=1e-2, dtype=np.float32, \n verbose=False):\n self.reg = reg\n self.num_conv_layers = len(num_filters)\n self.dtype = dtype\n self.params = {}\n CONVout_dims = {}\n maxpool_dims = {} \n\n ############################################################################\n # TODO: Initialize weights and biases for the multi-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n # Store weights and biases for the 1st convolutional layer using the keys #\n # 'W1' and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\n # the 2nd convolutional layer, and keys 'W3' and 'b3' for the weights and #\n # of the next convolutional of affine layer, and so on #\n ############################################################################\n num_conv_layers = self.num_conv_layers\n C, H, W = input_dim\n \n # Assign weight and biases for CONV layers\n for layer in range(num_conv_layers):\n if layer is 0:\n filter_depth = C # CONV Layer 1 has same depth as input depth\n # In this configuration, there is 2x2 max pooling after each CONV layer, so there\n # is a 2-to-1 downsampling. In layer 0, it simply downsample the input dimensions.\n CONVout_dims[layer] = (num_filters[layer], H, W)\n maxpool_dims[layer] = (num_filters[layer], H/2, W/2)\n else:\n # Depth of other CONV Layers 1 has the same depth as number of filters in the preceding \n # CONV layer\n filter_depth = num_filters[layer-1]\n # In this configuration, there is 2x2 max pooling after each CONV layer, so there\n # is a 2-to-1 downsampling. In subsequent layer, it simply downsample the dimensions\n # of the preceeding CONV layer output\n __, HH, WW = maxpool_dims[layer-1]\n CONVout_dims[layer] = (num_filters[layer], HH, WW)\n maxpool_dims[layer] = (num_filters[layer], HH/2, WW/2)\n \n # Set up weights for the filters of the CONV layer\n self.params[(layer,'W')] = weight_scale * np.random.randn(num_filters[layer], \n filter_depth, filter_sizes[layer], filter_sizes[layer])\n self.params[(layer,'b')] = np.zeros(num_filters[layer])\n \n # Assign weight and biases for FC layers (num_layer and num_layer+1)\n C, H, W = maxpool_dims[num_conv_layers-1]\n self.params[(num_conv_layers,'W')] = weight_scale * np.random.randn(C*H*W, hidden_dim)\n self.params[(num_conv_layers,'b')] = np.zeros(hidden_dim)\n\n self.params[(num_conv_layers+1,'W')] = weight_scale * np.random.randn(hidden_dim, num_classes)\n self.params[(num_conv_layers+1,'b')] = np.zeros(num_classes)\n \n if verbose:\n for layer in range(num_conv_layers):\n print \"W & b in CONV layer %d\" % (layer+1)\n print self.params[(layer,'W')].shape\n print self.params[(layer,'b')].shape\n print \"CONV output dimension: %d x %d x %d\" % CONVout_dims[layer]\n print \"Maxpool dimension: %d x %d x %d\" % maxpool_dims[layer] \n print \"W & b in FC layers:\"\n print self.params[(num_conv_layers,'W')].shape\n print self.params[(num_conv_layers,'b')].shape \n print self.params[(num_conv_layers+1,'W')].shape\n print self.params[(num_conv_layers+1,'b')].shape \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "title": "" }, { "docid": "bf14df2d3d7dfdedd5c062c96923f896", "score": "0.6006245", "text": "def unet_conv_block(x, filters, kernel_size=3, batch_norm=True, dropout=False,\n name_prefix=\"enc_\", name_suffix=0):\n name_fn = lambda layer, num: '{}{}{}-{}'.format(name_prefix, layer, name_suffix, num)\n\n # First convolution:\n x = Conv2D(filters, kernel_size=kernel_size, activation=None,\n kernel_initializer='he_normal', padding='same',\n name=name_fn('conv', 1))(x)\n if batch_norm:\n x = BatchNormalization(name=name_fn('bn', 1))(x)\n x = LeakyReLU(alpha=0.3, name=name_fn('act', 1))(x)\n if dropout:\n x = Dropout(0.2, name=name_fn('drop', 1))(x)\n\n # Second convolution:\n x = Conv2D(filters, kernel_size=kernel_size, activation=None,\n kernel_initializer='he_normal', padding='same',\n name=name_fn('conv', 2))(x)\n if batch_norm:\n x = BatchNormalization(name=name_fn('bn', 2))(x)\n x = LeakyReLU(alpha=0.3, name=name_fn('act', 2))(x)\n\n return x", "title": "" }, { "docid": "5e0e38ede227a03250eb702b8b2c4a51", "score": "0.6005636", "text": "def vizConvWeights(net):\n \n net_dict = net.state_dict()\n conv_1_wt = net_dict['conv1.0.weight'].numpy()\n conv_2_wt = net_dict['conv2.0.weight'].numpy()\n \n # VISUALIZE Conv_1:\n \n fig1 = plt.figure(figsize=(9, 9))\n plt.title(\"conv_1_weights\")\n num_columns = 5\n num_rows = 2\n \n for i in range(1, num_columns*num_rows + 1 ):\n \n img = conv_1_wt[i - 1][0]\n fig1.add_subplot(num_rows, num_columns, 1)\n plt.axis('off')\n plt.imshow(img, cmap=\"gray\")\n plt.show()\n \n # VISUALIZE Conv_2:\n \n fig2 = plt.figure(figsize=(9, 9))\n plt.title(\"conv_2_weights\")\n \n num_columns2 = 5\n num_rows2 = 4\n for j in range(1, num_columns2*num_rows2 + 1 ):\n \n img2 = conv_2_wt[j - 1][0]\n fig1.add_subplot(num_rows2, num_columns2, 1)\n plt.axis('off')\n plt.imshow(img, cmap=\"gray\")\n plt.show()", "title": "" }, { "docid": "0f7071b9ea2d6539a59f1d930f52cc2f", "score": "0.6005177", "text": "def __init__(self, in_channels, out_channels, kernel_size, bias=True, padding_layer=torch.nn.ReflectionPad2d):\n super(Conv2dSame, self).__init__()\n ka = kernel_size // 2\n kb = ka - 1 if kernel_size % 2 == 0 else ka\n self.net = nn.Sequential(\n padding_layer((ka,kb,ka,kb)),\n nn.Conv2d(in_channels, out_channels, kernel_size, bias=bias),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(True)\n )", "title": "" }, { "docid": "439a8088d3eddf38d5f435d9838340d1", "score": "0.6001062", "text": "def build_conv(output_channels = 0,\n kernel_size = 3,\n strides = 1,\n act = \"relu\",\n up_or_down = \"down\",\n name = None):\n layer_cls = dict(\n up=tf.keras.layers.Conv2DTranspose,\n down=tf.keras.layers.Conv2D)[up_or_down]\n return layer_cls(\n filters=output_channels,\n kernel_size=kernel_size,\n strides=strides,\n activation=act,\n use_bias=True,\n padding=\"same\",\n name=name)", "title": "" }, { "docid": "783fe623309abc69ea4dbfd754462d21", "score": "0.5996015", "text": "def subnet_conv1x1(in_ch, out_ch):\n return nn.Sequential(\n nn.Conv2d(in_ch, 2*in_ch, 1), \n nn.LeakyReLU(), \n nn.Conv2d(2*in_ch, 2*in_ch, 1), \n nn.LeakyReLU(),\n nn.Conv2d(2*in_ch, out_ch, 1))", "title": "" }, { "docid": "5c6df9739b26f2fcd966bbeb3cc851a7", "score": "0.5996003", "text": "def nhwc2hwnc (nhwc, name = 'nhwc2hwnc'): \n with tf.variable_scope(name) as scope:\n out = tf.transpose(nhwc, [1,2,0,3])\n return out", "title": "" }, { "docid": "325cfde269f35364e7d0a003dacf3647", "score": "0.5995874", "text": "def build_conv_model():\n if K.image_dim_ordering()=='tf':\n input_layer = Input(shape=(nx, ny, 1))\n else:\n input_layer = Input(shape=(1, nx, ny))\n layer = Convolution2D(8, 11, 11, border_mode='same')(input_layer)\n layer = Activation('tanh')(layer)\n layer = MaxPooling2D(pool_size=(2,2))(layer)\n layer = Convolution2D(8, 3, 3, border_mode='same')(layer)\n layer = Activation('tanh')(layer)\n layer = MaxPooling2D(pool_size=(3,3))(layer)\n layer = Convolution2D(8, 3, 3, border_mode='same')(layer)\n layer = Activation('tanh')(layer)\n layer = MaxPooling2D(pool_size=(3,3))(layer)\n layer = Flatten()(layer)\n layer = Dropout(0.20)(layer)\n layer = Dense(20)(layer)\n layer = Dropout(0.10)(layer)\n output_layer = Dense(1, activation='sigmoid')(layer)\n model = Model(input=input_layer, output=output_layer)\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n return model", "title": "" }, { "docid": "dc08bca6e866a262cecaa3d56d814f78", "score": "0.59938544", "text": "def _conv2d(self, x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "24a3749d9972c269cbe05fc21f30d1a6", "score": "0.5991178", "text": "def conv3x3(in_planes: int, out_planes: int, stride: int = 1) -> Callable:\n return nn.Conv2D(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias_attr=False)", "title": "" }, { "docid": "75d8358711e8766a3c4d538e64cfbb6c", "score": "0.5989478", "text": "def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n \n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n # Store weights and biases for the convolutional layer using the keys 'W1' #\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\n # of the output affine layer. #\n ############################################################################\n #input data dims\n (C, H, W) = input_dim\n #conv params\n conv_stride = 1\n conv_pad = (filter_size - 1) / 2\n #max pool params\n pool_height = 2\n pool_width = 2\n pool_stride = 2\n\n #infer output dims of intermediate layers\n H_output_conv = 1 + (H + 2 * conv_pad - filter_size) / conv_stride\n W_output_conv = 1 + (W + 2 * conv_pad - filter_size) / conv_stride\n H_output_pool = 1 + (H_output_conv - pool_height) / pool_stride\n W_output_pool = 1 + (W_output_conv - pool_width) / pool_stride\n\n #conv param init\n self.params['W1'] = weight_scale * np.random.randn(num_filters, C, filter_size, filter_size)\n self.params['b1'] = np.zeros(num_filters)\n #1st affine init\n self.params['W2'] = weight_scale * np.random.randn(num_filters*H_output_pool*W_output_pool, hidden_dim)\n self.params['b2'] = np.zeros(hidden_dim)\n\n #2nd affine init\n self.params['W3'] = weight_scale * np.random.randn(hidden_dim, num_classes)\n self.params['b3'] = np.zeros(num_classes)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "title": "" }, { "docid": "5c931b882e919d9a032f0d360d11f018", "score": "0.5988879", "text": "def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):\n weight = weight_variable()\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n weight_init=weight, has_bias=False, pad_mode=\"valid\")", "title": "" }, { "docid": "60a8d1bbca0fbf877ba0a3cb338f8493", "score": "0.59887457", "text": "def __init__(self, in_channels, out_channels, kernel_size=7, stride=1, padding=1):\n super(GCN, self).__init__()\n self.conv_l1 = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1),\n padding=(padding, 0), stride=(stride, 1))\n self.conv_l2 = nn.Conv2d(out_channels, out_channels, kernel_size=(1, kernel_size),\n padding=(0, padding), stride=(1, stride))\n self.conv_r1 = nn.Conv2d(in_channels, out_channels, kernel_size=(1, kernel_size),\n padding=(0, padding), stride=(1, stride))\n self.conv_r2 = nn.Conv2d(out_channels, out_channels, kernel_size=(kernel_size, 1),\n padding=(padding, 0), stride=(stride, 1))", "title": "" }, { "docid": "6052fd2aca9023992c5d2782c5e2560a", "score": "0.5982769", "text": "def conv(self, kernel=circular_kernel(25)):\n\t\tself.img = convolve(self.img, kernel, mode='constant' ,cval=0.0)", "title": "" }, { "docid": "b22ea4b1a7ff327179a5bf78a5e57af1", "score": "0.5978329", "text": "def conv3x3(in_planes, out_planes, stride=1, output_padding=0):\n return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3,\n stride=stride,\n padding=1, output_padding=output_padding,\n bias=False)", "title": "" }, { "docid": "6decdc62cb17850fa7e40d523c7f5482", "score": "0.5978323", "text": "def _conv_block(inputs, filters, kernel, strides):\n\n x = tf.keras.layers.Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = tf.keras.layers.BatchNormalization()(x)\n return relu6(x)", "title": "" }, { "docid": "0fedb35370ed4986ba8f678a7ae13b0b", "score": "0.5977749", "text": "def _create_conv(cls, onnx_node, inputs, opset_version):\n kernel = tuple(onnx_node.attrs[\"kernel_shape\"])\n padding = tuple(onnx_node.attrs[\"pads\"][0:2])\n stride = tuple(onnx_node.attrs[\"strides\"])\n group = onnx_node.attrs[\"group\"]\n\n bias = len(inputs) == 3\n x = inputs[0]\n x_shape = inputs[0].shape\n in_channels = x_shape[1]\n w_shape = inputs[1].shape\n out_channels = w_shape[0]\n assert w_shape[1] == in_channels // group\n\n if inputs[0].device.id() == -1:\n if group != 1:\n raise NotImplementedError\n else:\n handle = singa.ConvHandle(\n x.data,\n kernel,\n stride,\n padding,\n in_channels,\n out_channels,\n bias,\n group\n )\n else:\n handle = singa.CudnnConvHandle(\n x.data,\n kernel,\n stride,\n padding,\n in_channels,\n out_channels,\n bias,\n group\n )\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs, opset_version)\n return handle, forward", "title": "" }, { "docid": "5ee4e6b019ed1a666a458023986ed170", "score": "0.59763414", "text": "def conv3x3(in_planes, out_planes, stride=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)", "title": "" }, { "docid": "7c9b9b18f046f1a5060c091db4f460ff", "score": "0.5969255", "text": "def conv2d(x, w):\n return tf.nn.conv2d(input=x, filter=w, strides=[1, 1, 1, 1], padding='SAME') # strides: 步伐", "title": "" }, { "docid": "7e463740c07bd8b2380e54f3e0db31d6", "score": "0.59689677", "text": "def subnet_conv3x3(in_ch, out_ch):\n return nn.Sequential(\n nn.Conv2d(in_ch, 2*in_ch, 3, padding=1), \n nn.LeakyReLU(), \n nn.Conv2d(2*in_ch, 2*in_ch, 3, padding=1), \n nn.LeakyReLU(),\n nn.Conv2d(2*in_ch, out_ch, 3, padding=1))", "title": "" }, { "docid": "297025a81193cb5d958c91d4e4afef53", "score": "0.5958034", "text": "def deconv2d(x,\n input_size,\n output_size,\n filter_size = 3,\n stride = 2,\n init=xavier_initializer,\n use_batch_norm=False,\n activation=None,\n reuse = False,\n name=None):\n with tf.name_scope(name, 'deconv2d', [x]) as scope:\n w_name, b_name = weight_name(name), bias_name(name)\n with tf.variable_scope('vars', reuse=reuse):\n K = tf.get_variable(w_name, [filter_size, filter_size, output_size, input_size], initializer=init())\n b = tf.get_variable(b_name, [output_size], initializer=init())\n if not reuse:\n tf.add_to_collection('weights', K)\n tf.add_to_collection('biases', b)\n input_shape = tf.shape(x)\n output_shape = tf.stack([input_shape[0], input_shape[1]*2, input_shape[2]*2, output_size])\n h = tf.nn.conv2d_transpose(x, K, output_shape=output_shape, strides=[1, stride, stride, 1], padding='SAME')\n h = tf.nn.bias_add(h, b)\n h = batch_norm(h) if use_batch_norm else h\n h = activation(h) if activation else h\n if not reuse:\n tf.add_to_collection('conv_layers', h)\n return h", "title": "" }, { "docid": "15a60f473bb1bd94567c0f72783bac26", "score": "0.59566444", "text": "def _get_conv_model(\n shape,\n kernel_h,\n kernel_w,\n padding,\n strides,\n dilation,\n groups,\n dtype,\n channels,\n var,\n has_bias=False,\n has_activation=False,\n has_pad=False,\n):\n a = relay.var(next(iter(var)), shape=shape, dtype=dtype)\n input_arr = var[next(iter(var))]\n if has_pad:\n p = ((0, 0), (padding[0], padding[0]), (padding[1], padding[1]), (0, 0))\n a = relay.nn.pad(a, pad_width=p)\n padding = (0, 0, 0, 0)\n else:\n if len(padding) == 2:\n padding = (padding[0], padding[1], padding[0], padding[1])\n shape = (shape[0], shape[1], shape[2] + padding[0] * 2, shape[3] + padding[1] * 2)\n is_depthwise = shape[1] == channels == groups\n\n weight_format = \"OIHW\" if is_depthwise else \"OIHW\"\n if weight_format == \"IOHW\":\n weight_shape = (shape[1] // groups, channels, kernel_h, kernel_w)\n else:\n weight_shape = (channels, shape[1] // groups, kernel_h, kernel_w)\n\n w = tvm.nd.array(np.random.uniform(-1, 1, weight_shape).astype(dtype))\n weights = relay.const(w, dtype)\n out = relay.nn.conv2d(\n a,\n weights,\n kernel_size=(kernel_h, kernel_w),\n data_layout=\"NCHW\",\n kernel_layout=weight_format,\n dilation=dilation,\n strides=strides,\n padding=padding,\n groups=groups,\n channels=channels,\n out_dtype=dtype,\n )\n params = {\"w\": w}\n if has_bias:\n bias_shape = weight_shape[2] if is_depthwise else weight_shape[0]\n b = tvm.nd.array(np.random.uniform(-1, 1, bias_shape).astype(dtype))\n biasc = relay.const(b, dtype)\n out = relay.nn.bias_add(out, biasc, axis=1)\n params[\"b\"] = b\n\n if has_activation:\n out = relay.nn.relu(out)\n\n print(\"Out:\", out)\n\n return out, params", "title": "" }, { "docid": "8eabd67081bec86a30cfdaf2d60805a8", "score": "0.595427", "text": "def conv_layer(self, indata, ksize, padding, name, dilate=1, strides=[1, 1, 1, 1], bias_term=False, active=True,\n BN=True):\n with tf.variable_scope(name):\n W = tf.get_variable(\"weights\", dtype=tf.float32, shape=ksize,\n initializer=tf.contrib.layers.xavier_initializer())\n if bias_term:\n b = tf.get_variable(\"bias\", dtype=tf.float32, shape=[ksize[-1]])\n if dilate > 1:\n if bias_term:\n conv_out = b + tf.nn.atrous_conv2d(indata, W, rate=dilate, padding=padding, name=name)\n else:\n conv_out = tf.nn.atrous_conv2d(indata, W, rate=dilate, padding=padding, name=name)\n else:\n if bias_term:\n conv_out = b + tf.nn.conv2d(indata, W, strides=strides, padding=padding, name=name)\n else:\n conv_out = tf.nn.conv2d(indata, W, strides=strides, padding=padding, name=name)\n if BN:\n with tf.variable_scope(name + '_bn') as scope:\n # conv_out = batchnorm(conv_out,scope=scope,training = training)\n conv_out = self.simple_global_bn(conv_out, name=name + '_bn')\n if active:\n with tf.variable_scope(name + '_relu'):\n conv_out = tf.nn.relu(conv_out, name='relu')\n return conv_out", "title": "" }, { "docid": "ddbda6ead543fa4da42240dac7c349b0", "score": "0.5953443", "text": "def heat_conv(input, kernel):\n input = tf.expand_dims(tf.expand_dims(input, 0), -1)\n\n result = tf.nn.depthwise_conv2d(input, kernel,\n [1, 1, 1, 1],\n padding='SAME')\n\n return result[0, :, :, 0]", "title": "" }, { "docid": "b8f575362841769b58626acc087a2add", "score": "0.59517366", "text": "def conv2d(x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "9186e6d6d17b8ef18c345042383097b7", "score": "0.5944119", "text": "def conv_block_factory(in_channels, out_channels,\n kernel_size=3, stride=1, padding=1,\n conv_type=\"regular\",\n normalization=\"instancenorm\", activation=\"relu\"):\n conv = convolutions[conv_type]\n conv = conv(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, bias=normalization==\"none\")\n\n normalization = _normalization(normalization, out_channels)\n activation = _activation(activation)\n\n return nn.Sequential(conv, normalization, activation)", "title": "" }, { "docid": "0e1a0b0538b673b74cc2dfb125eefdd3", "score": "0.5941964", "text": "def make_conv2d_transpose_tests(options):\n\n test_parameters = [{\n \"input_shape\": [[1, 50, 54, 3]],\n \"filter_shape\": [[1, 1, 8, 3], [1, 2, 8, 3], [1, 3, 8, 3], [1, 4, 8, 3]],\n \"output_shape\": [[1, 100, 108, 8]],\n \"dynamic_output_shape\": [True, False],\n }, {\n \"input_shape\": [[1, 16, 1, 512]],\n \"filter_shape\": [[4, 1, 512, 512]],\n \"output_shape\": [[1, 32, 1, 512]],\n \"dynamic_output_shape\": [True, False],\n }, {\n \"input_shape\": [[1, 128, 128, 1]],\n \"filter_shape\": [[4, 4, 1, 1]],\n \"output_shape\": [[1, 256, 256, 1]],\n \"dynamic_output_shape\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build a transpose_conv graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n\n filter_tensor = tf.placeholder(\n dtype=tf.float32, name=\"filter\", shape=parameters[\"filter_shape\"])\n\n input_tensors = [input_tensor, filter_tensor]\n\n if parameters[\"dynamic_output_shape\"]:\n output_shape = tf.placeholder(dtype=tf.int32, shape=[4])\n input_tensors.append(output_shape)\n else:\n output_shape = parameters[\"output_shape\"]\n\n out = tf.nn.conv2d_transpose(\n input_tensor,\n filter_tensor,\n output_shape=output_shape,\n padding=\"SAME\",\n strides=(1, 2, 2, 1))\n\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(np.float32, parameters[\"input_shape\"]),\n create_tensor_data(np.float32, parameters[\"filter_shape\"])\n ]\n if parameters[\"dynamic_output_shape\"]:\n values.append(np.array(parameters[\"output_shape\"]))\n\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)", "title": "" }, { "docid": "984bd5c5edc57eefa40ee67bd248766c", "score": "0.5939353", "text": "def _simple_conv_2d(x, k):\n y = tf.nn.conv2d(x, k, [1, 1, 1, 1], padding='VALID')\n return y", "title": "" }, { "docid": "57dd1cd56179039ec36d544182e1560c", "score": "0.5937598", "text": "def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):\n # first layer\n x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)\n if batchnorm:\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n # second layer\n x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)\n if batchnorm:\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n return x", "title": "" }, { "docid": "6b539e385edeb0d5923cbb266d1f6eb5", "score": "0.593602", "text": "def three_layer_convnet_init():\r\n params = None\r\n ############################################################################\r\n # TODO: Initialize the parameters of the three-layer network. #\r\n ############################################################################\r\n conv_w1 = tf.Variable(kaiming_normal((5, 5, 3, 32)))\r\n conv_b1 = tf.Variable(tf.zeros([32]))\r\n conv_w2 = tf.Variable(kaiming_normal((3, 3, 32, 16)))\r\n conv_b2 = tf.Variable(tf.zeros([16]))\r\n fc_w = tf.Variable(kaiming_normal((32 * 32 * 16, 10)))\r\n fc_b = tf.Variable(tf.zeros([10]))\r\n params = [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b]\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n return params", "title": "" }, { "docid": "bfad83f13a9573dd7c8f6b2c7e6cb512", "score": "0.59334666", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "bfad83f13a9573dd7c8f6b2c7e6cb512", "score": "0.59334666", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "bfad83f13a9573dd7c8f6b2c7e6cb512", "score": "0.59334666", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "bfad83f13a9573dd7c8f6b2c7e6cb512", "score": "0.59334666", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "f126269d3b196ff0c4c4c30445d04376", "score": "0.5928313", "text": "def build_conv_block(self, dim):\n conv_block = [\n nn.ReflectionPad2d(1),\n nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=False), \n nn.BatchNorm2d(dim), \n nn.ReLU(True),\n nn.ReflectionPad2d(1),\n nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=False), \n nn.BatchNorm2d(dim)\n ]\n\n return nn.Sequential(*conv_block)", "title": "" }, { "docid": "528e8b5c565248056cbc7a89055d4af6", "score": "0.5925348", "text": "def conv3x3(in_planes, out_planes, stride=1):\n pad=nn.ReplicationPad2d(1)\n padding=0\n conv_mod = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=padding, bias=False)\n return nn.Sequential(pad,conv_mod)", "title": "" }, { "docid": "58b0e53389324cc9eff7e85790c36a34", "score": "0.59249383", "text": "def conv2d_transpose(self, output_shape, filter_):\n return self.add_layer(conv2d_transpose, output_shape, filter_)", "title": "" }, { "docid": "236971586912b7288d213dcdbc21b7ce", "score": "0.591392", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(int(in_planes), int(out_planes), kernel_size=3, stride=stride,padding=1, bias=False)", "title": "" }, { "docid": "b82aeadc5839ec03d7e0ef708d66dffc", "score": "0.59124964", "text": "def deconv2d(input_map, num_filters, size_kernel=5, stride=2, name=None, reuse=False):\n return tf.keras.layers.Conv2DTranspose(\n filters=num_filters, \n kernel_size=size_kernel, \n strides=stride,\n kernel_initializer=tf.random_normal_initializer(stddev=0.02),\n bias_initializer=tf.constant_initializer(0.0),\n padding=\"same\",\n name=name)(input_map)", "title": "" }, { "docid": "371cd6d14949e1612f4f5c6f383c2a5c", "score": "0.59107673", "text": "def _conv2d(self, x, kernel_size, num_o, stride, name, biased=False):\n\t\tnum_x = x.shape[self.channel_axis].value\n\t\twith tf.variable_scope(name) as scope:\n\t\t\tw = tf.get_variable('weights', shape=[kernel_size, kernel_size, num_x, num_o])\n\t\t\ts = [1, stride, stride, 1]\n\t\t\to = tf.nn.conv2d(x, w, s, padding='SAME')\n\t\t\tif biased:\n\t\t\t\tb = tf.get_variable('biases', shape=[num_o])\n\t\t\t\to = tf.nn.bias_add(o, b)\n\t\t\treturn o", "title": "" }, { "docid": "8ad67811f30646b5961db664cd39d844", "score": "0.59090525", "text": "def conv3x3_decoder(in_planes, out_planes, stride=1, output_padding=0):\r\n return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n output_padding=output_padding, padding=1, bias=False)", "title": "" } ]
74bd46482e33b59d3bfb90ab88f35618
Returns available information on a given phylome
[ { "docid": "fa7b59e0fe3fe2e63eab76834cf0f64b", "score": "0.6757125", "text": "def get_phylome_info(self, phylome_id):\n\n # Check if the phylome id code is well-constructed\n if not self.__check_input_parameter__(str_number = phylome_id):\n raise NameError(\"get_phylome_info: Check your input data\")\n\n ## Get all available data for the input phylome\n cmd = 'SELECT phylome_id AS id, seed_taxid, s.name AS seed_species, CONCAT'\n cmd += '(code, \".\", seed_version) AS seed_proteome, DATE(ts) AS date, ph.'\n cmd += 'name, comments FROM species AS s, %s AS ph WHERE' % (self._phylomes)\n cmd += '(ph.phylome_id = %s AND ph.seed_taxid = s.taxid)' % (phylome_id)\n\n if self.__execute__(cmd):\n return self._SQL.fetchone()\n return {}", "title": "" } ]
[ { "docid": "fa5a9555f5750d8e42f613d44ab8f13e", "score": "0.7154412", "text": "def phylum_info(self):\n pass", "title": "" }, { "docid": "a2a4e825c3e16dc56fbf36b7cf0d2419", "score": "0.63739514", "text": "def subphylum_info(self):\n pass", "title": "" }, { "docid": "8abfae534b4518dc3a0a78a18c466829", "score": "0.596615", "text": "def get_phylomes(self):\n\n ## Recover all the phylomes stored in the database\n cmd = 'SELECT phylome_id, seed_taxid, s.name AS seed_species, CONCAT(code,'\n cmd += ' \".\", seed_version) AS seed_proteome, DATE(ts) AS date, comments, '\n cmd += 'ph.name FROM species AS s, %s AS ph WHERE seed_' % (self._phylomes)\n cmd += 'taxid = s.taxid'\n\n ## Generate a dictionary with all phylomes\n if self.__execute__(cmd):\n return self.__fomat_MySQL_to_dict__(\"phylome_id\", self._SQL.fetchall())\n return {}", "title": "" }, { "docid": "84aeb9f03c98675b669163e88a364017", "score": "0.59232754", "text": "def phydata(self, name):\n return self.model.nameddata(name, self.data)", "title": "" }, { "docid": "7643cc3c6fc956e8983e67ce2d5de544", "score": "0.58625335", "text": "def genus_info(self):\n pass", "title": "" }, { "docid": "d6f5dcd3e155a5b1a605c981f0446f95", "score": "0.5831219", "text": "def getInfo(self):\n ...", "title": "" }, { "docid": "5b724fb1fee0edddbcbe82f17001a58e", "score": "0.5777063", "text": "def test_Phyloxml(self):\n phx = PhyloXMLIO.read(EX_PHYLO)\n self.assertIsInstance(phx, PX.Phyloxml)\n for tree in phx:\n self.assertIsInstance(tree, PX.Phylogeny)\n for otr in phx.other:\n self.assertIsInstance(otr, PX.Other)", "title": "" }, { "docid": "7d5b76eee8a10a0d25c9e76d8d16f4e6", "score": "0.5756927", "text": "def getinfo(node, ntype):\n\tsearch = search_patt.format(ntype, node)\n\tif DEBUG: print(\"in 'getinfo().....\")\n\tif DEBUG: print('search is:')\n\tprint(search)\n\tif DEBUG: print\n\tans = json.loads(getdata(search))\n\tif DEBUG: print('ans is: ')\n\tif DEBUG: print(type(ans))\n\tif DEBUG: print(ans)\n\tif DEBUG: print(len(ans))\n\tif len(ans) == 0 :\n\t\tprint(\"no associted records found\")\n\treturn ans", "title": "" }, { "docid": "d85b28fce1591a6eb69c8579fd98c433", "score": "0.57512945", "text": "def information(self):", "title": "" }, { "docid": "a73e5e50f990e1aff048f28f87ec3b49", "score": "0.5669529", "text": "def extract_uniprot_genome_metadata(upid):\n\n proteome_dict = {}\n\n # namespace prefix # or register a namespace in the ET\n prefix = \"{http://uniprot.org/uniprot}%s\"\n\n response = requests.get(gc.PROTEOME_XML_URL % upid)\n\n # check if we got an OK http reponse\n if response.status_code == httplib.OK:\n prot_tree_root = ET.fromstring(response.content)\n\n proteome = prot_tree_root.find(prefix % \"proteome\")\n\n upid = proteome.find(prefix % \"upid\")\n proteome_dict[\"upid\"] = upid.text\n proteome_dict[\"ncbi_id\"] = int(proteome.find(prefix % \"taxonomy\").text)\n proteome_dict[\"description\"] = \"\"\n proteome_dict[\"scientific_name\"] = proteome.find(prefix % \"name\").text\n\n # initialization to the default values\n is_reference = proteome.find(prefix % \"is_reference_proteome\").text\n proteome_dict[\"is_reference\"] = 1\n\n if is_reference == \"false\":\n proteome_dict[\"is_reference\"] = 0\n else:\n proteome_dict[\"is_reference\"] = 1\n\n # initialization to the default values\n is_representative = is_reference = proteome.find(prefix % \"is_representative_proteome\").text\n proteome_dict[\"is_representative\"] = 0\n\n if is_representative == \"false\":\n proteome_dict[\"is_representative\"] = 0\n else:\n proteome_dict[\"is_representative\"] = 1\n\n # get sequence accessions\n acc_dict = {}\n other_accs = []\n accession_nodes = proteome.findall(prefix % \"component\")\n\n for node in accession_nodes:\n if node.get(\"name\").find(\"WGS\") != -1:\n # look for all WGS accessions\n gen_acc_nodes = node.findall(prefix % \"genome_accession\")\n\n # single WGS accession\n if len(gen_acc_nodes) == 1:\n acc_dict[\"WGS\"] = node.find(prefix % \"genome_accession\").text\n else:\n for gen_acc_node in gen_acc_nodes:\n other_accs.append(gen_acc_node.text)\n\n elif node.get(\"name\").find(\"Chloroplast\") != -1:\n acc_dict[\"Chloroplast\"] = node.find(prefix % \"genome_accession\").text\n\n elif node.get(\"name\").find(\"Mitochondrion\") != -1:\n acc_dict[\"Mitochondrion\"] = node.find(prefix % \"genome_accession\").text\n\n elif node.get(\"name\").find(\"Genome\") != -1:\n description =node.find(prefix % \"description\").text\n proteome_dict[\"description\"] = description\n\n else:\n\n other_acc = node.find(prefix % \"genome_accession\")\n if other_acc is not None:\n other_accs.append(node.find(prefix % \"genome_accession\").text)\n\n if len(other_accs) > 0:\n acc_dict[\"other\"] = other_accs\n\n proteome_dict[\"accessions\"] = acc_dict\n\n return proteome_dict", "title": "" }, { "docid": "8dbef851f70407d5741c5d136b560948", "score": "0.56268936", "text": "def get_species_in_phylome(self, phylome_id):\n ## Retrieve taxids associated to a phylome\n cmd = 'SELECT taxid from %s WHERE phylome_id=\"%s\"' % \\\n (self._phy_content, phylome_id)\n if self.__execute__(cmd):\n return [values[\"taxid\"] for values in self._SQL.fetchall()]\n else:\n return []", "title": "" }, { "docid": "93a60399fe4875f3a1ac687bbbe85f51", "score": "0.5578425", "text": "def get_proteomes_in_phylome(self, phylome_id):\n\n # Check if the phylome id code is well-constructed\n if not self.__check_input_parameter__(str_number = phylome_id):\n raise NameError(\"get_proteomes_in_phylome: Check your input data\")\n\n ## Retrieve the proteomes associated to the phylome\n cmd = 'SELECT s.taxid, CONCAT(code, \".\", pc.version) AS proteome, s.name,'\n cmd += ' source, date, pc.version FROM species AS s, %s ' % (self._phylomes)\n cmd += 'AS ph, %s AS pc, genome AS g WHERE ph.phylome' % (self._phy_content)\n cmd += '_id = %s AND ph.phylome_id = pc.phylome_id AND pc.' % (phylome_id)\n cmd += 'taxid = s.taxid AND pc.taxid = g.taxid AND pc.version = g.version'\n\n proteomes = {}\n if self.__execute__(cmd):\n for row in self._SQL.fetchall():\n proteomes.setdefault(\"proteomes\", {}).setdefault(row[\"proteome\"], row)\n\n cmd = 'SELECT CONCAT(code, \".\", ph.seed_version) AS proteome FROM '\n cmd += 'species AS s, %s AS ph WHERE (ph.phylome_id = ' % (self._phylomes)\n cmd += '%s AND ph.seed_taxid = s.taxid)' % (phylome_id)\n if self.__execute__(cmd):\n for row in self._SQL.fetchall():\n proteomes[\"seed\"] = row[\"proteome\"]\n\n return proteomes", "title": "" }, { "docid": "9b7333b58a5c78536f6cba8ea1cf07ce", "score": "0.55781037", "text": "def test_Phylogeny(self):\n trees = list(PhyloXMLIO.parse(EX_PHYLO))\n # Monitor lizards\n self.assertEqual(trees[9].name, \"monitor lizards\")\n self.assertEqual(trees[9].description, \"a pylogeny of some monitor lizards\")\n self.assertTrue(trees[9].rooted)\n # Network (unrooted)\n self.assertEqual(\n trees[6].name, \"network, node B is connected to TWO nodes: AB and C\"\n )\n self.assertFalse(trees[6].rooted)", "title": "" }, { "docid": "c12ca2561839f65dd46dd77d8a4cdd06", "score": "0.5531175", "text": "def get_phylome_algs(self, phylome_id):\n\n # Check if the phylome id code is well-constructed\n if not self.__check_input_parameter__(str_number = phylome_id):\n raise NameError(\"get_phylome_tree: Check your input data\")\n\n ## Retrieve all the available alignments for the input phylome\n cmd = 'SELECT CONCAT(\"Phy\", protid, \"_\", code) AS protid, raw_alg, clean_'\n cmd += 'alg FROM %s AS a, %s AS ph, species ' % (self._algs, self._phylomes)\n cmd += 'AS s WHERE (ph.phylome_id = %s AND ph.phylome_id = ' % (phylome_id)\n cmd += 'a.phylome_id AND ph.seed_taxid = s.taxid)'\n\n algs = {}\n if self.__execute__(cmd):\n for row in self._SQL.fetchall():\n algs.setdefault(row[\"protid\"], {})[\"raw_alg\"] = row[\"raw_alg\"]\n algs.setdefault(row[\"protid\"], {})[\"clean_alg\"] = row[\"clean_alg\"]\n return algs", "title": "" }, { "docid": "393fba3cfbd85bf1ca5144632d436052", "score": "0.55092865", "text": "def get_pathway_protein_names(self, pathway):\n self.logging.info(\"Fetching the pathway\")\n # first identify gene from GeneInfo tag\n # this is not XML but HTML\n url = \"http://cgap.nci.nih.gov/Pathways/BioCarta/%s\" % pathway\n html_doc = urllib.urlopen(url).read()\n soup = BeautifulSoup(html_doc, 'html.parser')\n links = soup.find_all('area')\n links = [link for link in links if 'GeneInfo' in link.get('href')]\n\n links = set([link.attrs['href'] for link in links])\n\n self.logging.info(\"Scanning information about %s genes\" % len(links))\n # open each page and get info\n genes = {}\n for link in links:\n html_doc = urllib.urlopen(link).read()\n soup = BeautifulSoup(html_doc, 'html.parser')\n\n table_gene_info = soup.findAll(\"table\")[1]\n\n gene_name = link.rsplit(\"=\", 1)[1]\n self.logging.info(\" - \" + gene_name)\n\n genes[gene_name] = {}\n self.tt = table_gene_info\n for row in table_gene_info.find_all('tr'):\n entry = row.find_all('td')\n try:key = entry[0].text.strip()\n except:continue\n try:value = entry[1].text.strip()\n except:continue\n if \"[Text]\" in key:\n continue\n genes[gene_name][key] = value\n\n\n return genes", "title": "" }, { "docid": "bdb16d9a17109a613a4fd7fa894732d4", "score": "0.55063325", "text": "def test_phylo(self):\n global EX_PHYLO\n orig_fname = EX_PHYLO\n try:\n EX_PHYLO = DUMMY\n self._rewrite_and_call(\n orig_fname,\n (\n (\n ParseTests,\n [\"test_read_phylo\", \"test_parse_phylo\", \"test_shape_phylo\"],\n ),\n (\n TreeTests,\n [\n \"test_Phyloxml\",\n \"test_Other\",\n \"test_Phylogeny\",\n \"test_Clade\",\n \"test_Annotation\",\n \"test_CladeRelation\",\n \"test_Date\",\n \"test_Distribution\",\n \"test_Events\",\n \"test_Property\",\n \"test_Sequence\",\n \"test_SequenceRelation\",\n \"test_Taxonomy\",\n \"test_Uri\",\n ],\n ),\n ),\n )\n finally:\n EX_PHYLO = orig_fname", "title": "" }, { "docid": "1e90f3dbfdf55e77b1620311dc119d4e", "score": "0.550601", "text": "def get_general_info(self):\n\t\treturn self.info", "title": "" }, { "docid": "12cebe929edd911edb89c5851597658a", "score": "0.54959923", "text": "def print_phylo(tree):\n with StringIO() as handle:\n Phylo.write(tree, handle, \"newick\")\n print(handle.getvalue())", "title": "" }, { "docid": "f0243f50b407760c5da4b87d6a8632d0", "score": "0.5488854", "text": "def getSpeciesInfo():\n return Gorilla.speciesInfo", "title": "" }, { "docid": "ad2d0c2e8fa6016cdd7f237fd7bc5d2e", "score": "0.5483789", "text": "def info(self):", "title": "" }, { "docid": "e7b67af53c9b97a8c02f424846bc58c1", "score": "0.5481879", "text": "def tree_information(self):\n self.h1_tree_information()\n self.h3_tree_information()\n self.bvic_tree_information()\n self.byam_tree_information()", "title": "" }, { "docid": "0cbd7ab208d2bb180298bb7109ccec1c", "score": "0.54812336", "text": "def print_info(person_name):\n base_url = \"https://udayogra-find-gender-by-name-v2/\"\n name_url = base_url + \"name/\"\n code_url = base_url + \"analysis/\"\n resp = requests.get(name_url + person_name)\n try:\n pname = json.loads(resp.text)[0]\n female = pname['female']\n print(f\"Female: {', '.join([lang['name'] for lang in female])}\")\n male = pname['male']\n border_names = []\n print(f\"Female: {', '.join([lang['name'] for lang in male])}\")\n except KeyError:\n print(\"Unknown name please use first name\")", "title": "" }, { "docid": "73d6639e938ee17e66c69187caa75034", "score": "0.5476215", "text": "def get_basic_info(request, id='2005'):\n logger.info(\"func 'get_basic_info' get a param id -> \" + id)\n connection = ConnectionPool()\n result = connection.executeQuery(\n \"\\\n match(n)-[]-(m {name: '\" + id + \"'})\\\n return n.name\\\n \"\n )\n name = result[0]['n.name']\n result = connection.executeQuery(\n \"\\\n match(n)-[]-(m {name: '\" + id + \"'})\\\n match(n)-[]-(c)\\\n return labels(c) as c, c.name\\\n \"\n )\n detail = {}\n for item in result:\n if item['c'][0] == \"school_type_node\":\n detail['setting'] = item['c.name'].capitalize()\n if item['c'][0] == \"web_node\":\n detail['website'] = item['c.name']\n if item['c'][0] == \"read_sat_node\":\n detail['read_sat'] = item['c.name']\n if item['c'][0] == \"math_sat_node\":\n detail['math_sat'] = item['c.name']\n if item['c'][0] == \"act_node\":\n detail['act'] = item['c.name']\n if item['c'][0] == \"addr_node\":\n detail['address'] = item['c.name']\n if item['c'][0] == \"city_node\":\n detail['city'] = item['c.name']\n if item['c'][0] == \"state_node\":\n detail['state'] = item['c.name']\n if item['c'][0] == \"zip_node\":\n detail['zip'] = item['c.name']\n if item['c'][0] == \"med_earn_6_years_node\":\n detail['med_earn'] = item['c.name']\n if item['c'][0] == \"tuition_node\":\n detail['tuition'] = item['c.name']\n if item['c'][0] == \"average_aid_node\":\n detail['avg_aid'] = item['c.name']\n if item['c'][0] == \"accept_rate_node\":\n if item['c.name'] != 'N/A':\n detail['accept_rate'] = str(int(float(item['c.name']) * 100)) + '%'\n else:\n detail['accept_rate'] = item['c.name']\n if item['c'][0] == \"app_dead_node\":\n detail['app_dead'] = item['c.name']\n if item['c'][0] == \"app_fee_node\":\n detail['app_fee'] = item['c.name']\n if item['c'][0] == \"undergrad_pop_node\":\n detail['undergrad_pop'] = item['c.name']\n if item['c'][0] == \"grad_pop_node\":\n detail['grad_pop'] = item['c.name']\n if item['c'][0] == 'grad_rate_node':\n detail['grad_rate'] = item['c.name']\n if item['c'][0] == 'fresh_ret_node':\n detail['fresh_ret_rate'] = item['c.name']\n if item['c'][0] == 'emp_rate_node':\n detail['emp_rate'] = item['c.name']\n\n data = {\n \"title_info\": {\n 'main_title': name,\n 'duration': '4-Years',\n 'school_type': detail.get('setting', '') + \" University\",\n 'location': detail.get('city', '') + ', ' + detail.get('state', ''),\n 'school_link': detail.get('website', '')},\n \"desc_info\": {\n 'title': name,\n 'location': detail.get('address', '') + ' ' + detail.get('city', '') + ', ' + detail.get('state',\n '') + ' ' + detail.get(\n 'zip', ''),\n 'avg_score': {\n 'reading': detail.get('read_sat', ''),\n 'math': detail.get('math_sat', ''),\n 'composite': detail.get('act', '')\n },\n 'app_fee': detail.get('app_fee', ''),\n 'expected_salary': detail.get('med_earn', ''),\n 'cost': {\n 'net_price': detail.get('tuition', ''),\n 'avg_aid_award': detail.get('avg_aid', '')\n },\n 'admission': {\n 'acceptance_rate': detail.get('accept_rate', ''),\n 'application_ddl': detail.get('app_dead', '')\n },\n 'students': {\n 'undergraduate': detail.get('undergrad_pop', ''),\n 'graduate': detail.get('grad_pop', ''),\n },\n 'stat': {\n 'graduation_rate': detail.get('grad_rate', ''),\n 'freshman_retention': detail.get('fresh_ret_rate', ''),\n 'employment_rate': detail.get('emp_rate', ''),\n 'median_salary': detail.get('med_earn', '')\n }\n }\n }\n\n return JsonResponseResult(data=data, code=200, msg='success')", "title": "" }, { "docid": "1a2975b62ff08c131c1c3f0580c0640a", "score": "0.5435776", "text": "def info(self,cec=None):", "title": "" }, { "docid": "a78f4709e87b70183202d94af8584965", "score": "0.5427444", "text": "def _rostopic_info(topic):\n print(get_info_text(topic))", "title": "" }, { "docid": "4bf410916f2aa1da02dd0c1848ac79f7", "score": "0.54245794", "text": "def getInfo(self) -> List[List[str]]:\n\t\treturn []", "title": "" }, { "docid": "154f3e22044727e844c192bce757fea6", "score": "0.542408", "text": "def family_info(self):\n pass", "title": "" }, { "docid": "7f63411fe133fc9148457e946e7c3cc9", "score": "0.5400002", "text": "def getinfo(self):\n return [self.name, self.division, self.hometown, self.school, self.anniversary, self.sande, self.citizen,\n self.military, self.geography, self.bowl, self.seed]", "title": "" }, { "docid": "7aec76ccba995012097a05e22950a1e5", "score": "0.5385669", "text": "async def get_info(self, addr, url):\n\n def getText(nodelist):\n rc = []\n for node in nodelist:\n if node.nodeType == node.TEXT_NODE:\n rc.append(node.data)\n return \"\".join(rc)\n\n try:\n txt = None\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n txt = await response.text()\n if txt:\n data = parseString(txt)\n dev = data.getElementsByTagName(\"device\")[0]\n rdata = {\"ip\": addr}\n rdata[\"brand\"] = getText(\n dev.getElementsByTagName(\"manufacturer\")[0].childNodes\n )\n rdata[\"model\"] = getText(\n dev.getElementsByTagName(\"modelName\")[0].childNodes\n )\n rdata[\"serial\"] = getText(\n dev.getElementsByTagName(\"serialNumber\")[0].childNodes\n )\n rdata[\"name\"] = getText(\n dev.getElementsByTagName(\"friendlyName\")[0].childNodes\n )\n logging.debug(f\"Got device: {rdata}\")\n if self.callb:\n self.callb(rdata)\n except Exception as e:\n logging.error(f\"Error: Error when parsing location XML: {e}\")", "title": "" }, { "docid": "f30a95ffe1d8a0a5a79178a501c7e5ac", "score": "0.53511214", "text": "def get_general_information(url):\n pass", "title": "" }, { "docid": "fda5dcb931567a787a8076c37d351664", "score": "0.53484076", "text": "def info():\n return cleos.GetInfo()", "title": "" }, { "docid": "4416d1e4d4e9c9da841d234fea11e26b", "score": "0.5346693", "text": "def get_genome_info(self, genome):\n\n # Check if the input parameter is well-constructed\n if not self.__check_input_parameter__(string = genome):\n raise NameError(\"get_genome_info: Check your input data\")\n\n if genome.count(\".\") != 1:\n raise NameError(\"get_genome_info: Expected input 'SpeciesCode.version'\")\n\n code, version = genome.split(\".\")\n ## If the genome is well-defined, ask for its associated information\n cmd = 'SELECT CONCAT(code, \".\", version) AS genome_id, g.taxid, s.name AS '\n cmd += 'species, version, DATE(date) AS date, source, comments FROM species'\n cmd += ' AS s, genome AS g WHERE(s.taxid = g.taxid AND code = \"%s\"' % (code)\n cmd += ' AND version = %s)' % (version)\n\n ## Return the available information in a dictionary defined for that\n if self.__execute__(cmd):\n return self._SQL.fetchone()\n return {}", "title": "" }, { "docid": "c3ceadd96f2baaf58263f9e5364edc7f", "score": "0.5317228", "text": "def get_info(self,node): \n metadata = node.extra ;\n self.get_and_set_assigned_groupmanager(node)\n \n metadata_request = {\"virtualMachineLocation\": metadata.get(\"virtualMachineLocation\"),\n \"numberOfMonitoringEntries\" : 10\n }\n resp = self.connection_gm.request('/groupmanager?getVirtualMachineMetaData',method='POST',data=json.dumps(metadata_request))\n return json.loads(resp.object)", "title": "" }, { "docid": "ae1d504c5f687b13d9fb9d6f6a7dcc01", "score": "0.5288319", "text": "def get_phylome_trees(self, phylome_id):\n\n # Check if the phylome id code is well-constructed\n if not self.__check_input_parameter__(str_number = phylome_id):\n raise NameError(\"get_phylome_trees: Check your input data\")\n\n ## Get all available trees for a given phylome id\n cmd = 'SELECT CONCAT(\"Phy\", protid, \"_\", code) AS protid, method, lk, '\n cmd += 'newick FROM %s AS t, %s AS ph, ' % (self._trees, self._phylomes)\n cmd += 'species AS s WHERE (ph.phylome_id = %s AND ph.' % (phylome_id)\n cmd += 'phylome_id = t.phylome_id AND ph.seed_taxid = s.taxid)'\n\n trees = {}\n ## For each phylomeDB ID used as a seed, return the different evolutionary\n ## model evaluated during the phylogenetic reconstruction as well as the\n ## phylogenetic tree (an ETE object) and its likelihood\n if self.__execute__(cmd):\n for row in self._SQL.fetchall():\n trees.setdefault(row[\"protid\"], {}).setdefault(row[\"method\"], \\\n [row[\"lk\"], PhyloTree(row[\"newick\"], sp_naming_function=extract_species_name)])\n return trees", "title": "" }, { "docid": "95f4c4dc05d8462984cd484e79428666", "score": "0.5277614", "text": "def sub_information(self, data):\n number = 1\n for info in data:\n print(\"___________________________________\")\n print(\"Substitute N°{}: '{}' \".format(number, info[0]))\n print(\"the nutriscore is '{}'\".format(info[1]))\n # if there is no stores register, print a message\n if info[2] == \" \":\n print(\"Sorry, no information about the store is available.\")\n else:\n print(\"You can find it there: {}\".format(info[2]))\n print(\"For more information, clic on the link: {}\".format(info[3]))\n number += 1", "title": "" }, { "docid": "c625e3e764d5ab6bfde639a1dcc277c7", "score": "0.525626", "text": "def get_details(self):", "title": "" }, { "docid": "6b32be359ad58402342162fdd10e8e76", "score": "0.5241022", "text": "def node_information(self):\n path = '/ws/v1/node/info'\n return self.request(path)", "title": "" }, { "docid": "2245b53328a28e13574ec55e782e3762", "score": "0.52354145", "text": "def get_seq_info_in_tree(self, id, phylome_id, method = None):\n\n # Check if the input parameters are well-constructed\n if not self.__check_input_parameter__(single_id = id, string = method, \\\n str_number = phylome_id):\n raise NameError(\"get_seq_info_in_tree: Check your input data\")\n\n ## Depending on the input parameter, retrieve the best tree associated to\n ## the phylomeDB ID in the given phylomeDB or retrieve the tree generated\n ## under the model indicated in the input parameter. Moreover, all the\n ## available information in the database for tree leaves\n if method:\n return self.get_info_homologous_seqs(id, phylome_id, tree = True, \\\n tree_method = method)\n else:\n return self.get_info_homologous_seqs(id, phylome_id, tree = True)", "title": "" }, { "docid": "f6ac995b66c95c84794308945eecb0d8", "score": "0.5213309", "text": "def kegg_info(database):\n # TODO - return a string \n # TODO - chache and validate the organism code / T numbers?\n # TODO - can we parse the somewhat formatted output?\n #\n # http://rest.kegg.jp/info/<database>\n #\n # <database> = kegg | pathway | brite | module | ko | genome | genes | <org> | vg | ag |\n # ligand | compound | glycan | reaction | rclass | enzyme | network |\n # variant | disease | drug | dgroup | environ\n \n # <org> = KEGG organism code or T number\n return _query(\"info\", database)", "title": "" }, { "docid": "14f70816491765846b21437632206a61", "score": "0.5209354", "text": "def get_info_homologous_seqs(self, protid, phylome_id, tree = None, \\\n tree_method = False, sequence = False):\n\n ## Depending on the input parameters, recover the best tree for the\n ## input phylomeDB ID in the input phylomeDB. Otherwise, the function\n ## will try to recover the tree reconstructed under the model specific\n data = {}\n\n if tree == None:\n tree_db = self.get_tree(protid, phylome_id, best_tree = True)\n\n elif tree and not tree_method:\n tree_db = self.get_tree(protid, phylome_id, best_tree = True)\n\n elif tree and tree_method:\n tree_db = self.get_tree(protid, phylome_id, method = tree_method)\n\n ## Check whether it has been possible to recover a tree from the database\n if not tree_db:\n return data\n else:\n method = tree_db.keys()[0]\n\n ## If it has been required, store the tree in an appropiate data structure.\n if tree:\n data.setdefault(\"tree\", {})\n data[\"tree\"][\"method\"] = method\n data[\"tree\"][\"lk\"] = tree_db[method][\"lk\"]\n data[\"tree\"][\"tree\"] = tree_db[method][\"tree\"]\n data[\"tree\"][\"best\"] = True if not tree_method else False\n\n ## Recover the leaf names taking into account that there is information\n ## associated to the sequence while there is another information associated\n ## for each copy of the sequence.\n leaves = tree_db[method][\"tree\"].get_leaf_names()\n ids = set([\"_\".join(name.split(\"_\")[:2]) for name in leaves])\n protids = self.__parser_ids__(ids)\n\n ## Establish whether the tree contains more than one copy for each sequence\n ## and if the tree leaf names reflect that situation or not\n copy_var_support = True if len(leaves) == len(set(leaves)) else False\n\n ## Join and retrieve all the information available in the database for the\n ## unique sequences in the set\n cmd = 'SELECT CONCAT(\"Phy\", p.protid, \"_\", s.code) AS protid, s.code, CONC'\n cmd += 'AT(s.code, \".\", p.version) AS proteome, p.taxid, p.version, s.name,'\n cmd += ' MAX(copy) AS copy, count(DISTINCT method) AS trees, count(DISTINCT'\n cmd += ' sf.protid, sf.phylome_id) AS collat FROM (protein AS p, species AS'\n cmd += ' s, %s AS ph, %s AS pc) LEFT ' % (self._phylomes,self._phy_content)\n cmd += 'JOIN %s AS t ON (p.protid = t.protid) LEFT JOIN ' % (self._trees)\n cmd += 'seed_friend AS sf ON (p.protid = sf.friend_id) WHERE (p.protid IN '\n cmd += '(%s) AND p.taxid = s.taxid AND p.taxid = ' % (protids)\n cmd += 'pc.taxid AND pc.phylome_id = %s AND ph.phylome_id = ' % (phylome_id)\n cmd += 'pc.phylome_id AND pc.version = p.version) GROUP BY p.protid'\n\n if self.__execute__(cmd):\n data.setdefault(\"seq\", {})\n for row in self._SQL.fetchall():\n data[\"seq\"].setdefault(row[\"protid\"], {})\n\n data[\"seq\"][row[\"protid\"]][\"copy\"] = row[\"copy\"]\n data[\"seq\"][row[\"protid\"]][\"trees\"] = row[\"trees\"]\n data[\"seq\"][row[\"protid\"]][\"taxid\"] = row[\"taxid\"]\n data[\"seq\"][row[\"protid\"]][\"proteome\"] = row[\"proteome\"]\n data[\"seq\"][row[\"protid\"]][\"collateral\"] = row[\"collat\"]\n data[\"seq\"][row[\"protid\"]][\"species_name\"] = row[\"name\"]\n data[\"seq\"][row[\"protid\"]][\"species_code\"] = row[\"code\"]\n\n ## In some cases, it is necessary as well to recover the protein sequence\n ## for each unique homologous sequence.\n if sequence:\n\n cmd = 'SELECT CONCAT(\"Phy\", p.protid, \"_\", s.code) AS protid, seq FROM '\n cmd += 'protein AS p, species AS s, unique_protein AS u WHERE(p.protid IN'\n cmd += ' (%s) AND p.taxid = s.taxid AND p.protid = u.protid)' % (protids)\n if self.__execute__(cmd):\n for row in self._SQL.fetchall():\n data[\"seq\"][row[\"protid\"]][\"seq\"] = row[\"seq\"]\n\n ## Retrieve the external ids for all the unique sequences in the tree\n for protid, external in self.get_external_ids(ids).iteritems():\n data[\"seq\"][protid].setdefault(\"external\", {})\n data[\"seq\"][protid][\"external\"] = external\n\n for protid, external in self.get_go_ids(ids).iteritems():\n data[\"seq\"][protid].setdefault(\"external\", {})\n data[\"seq\"][protid][\"external\"].update(external)\n\n for protid, external in self.get_old_phylomedb_ids(ids).iteritems():\n data[\"seq\"][protid].setdefault(\"external\", {})\n data[\"seq\"][protid][\"external\"].update(external)\n\n for protid, external in self.get_prot_gene_names(ids).iteritems():\n data[\"seq\"][protid].setdefault(\"external\", {})\n data[\"seq\"][protid][\"external\"].update(external)\n\n ## The sequence name is as well stored to considerer how many times a given\n ## sequence has been used. That is related to copy number variation cases\n data.setdefault(\"leaf_names\", leaves)\n\n ## Recover additional information for all leaves in the tree such as\n ## gene/protein names or copy number\n cmd = 'SELECT CONCAT(\"Phy\", p.protid, \"_\", s.code) AS protid, copy, '\n cmd += 'prot_name, gene_name FROM protein AS p, species AS s, '\n cmd += '%s AS ph, %s AS pc WHERE (p.' % (self._phylomes, self._phy_content)\n cmd += 'protid IN (%s) AND p.taxid = s.taxid AND p.taxid = pc.' % (protids)\n cmd += 'taxid AND pc.phylome_id = %s AND ph.phylome_id = pc.' % (phylome_id)\n cmd += 'phylome_id AND pc.version = p.version)'\n\n ## Get specificy information for each tree's leaf, the information will be\n ## the same that for an unique sequence if the copy_var_support is not\n ## set up in advance. The copy_var_support points when there are more than\n ## one copy for at least one unique sequence in the tree and specific\n ## information for each copy can be put it in the tree\n if self.__execute__(cmd):\n data.setdefault(\"leaves\", {})\n\n for row in self._SQL.fetchall():\n code = row[\"protid\"]\n\n if data[\"seq\"][row[\"protid\"]][\"copy\"] > 1 and copy_var_support:\n #code += (\"_%d\") % (row[\"copy\"])\n pass\n if not copy_var_support and row[\"copy\"] > 1 or not code in leaves:\n continue\n\n data[\"leaves\"].setdefault(code, {})\n data[\"leaves\"][code][\"gene\"] = row[\"gene_name\"]\n data[\"leaves\"][code][\"protein\"] = row[\"prot_name\"]\n data[\"leaves\"][code][\"copy_version\"] = row[\"copy\"]\n\n return data", "title": "" }, { "docid": "0309e321aa67b5c37abcc9c4e696f1e6", "score": "0.52042997", "text": "def model_info(magpie):\n\n LOG.debug(\"Requesting traffic model information\")\n\n try:\n response: requests.Response = requests.get(\n magpie.url + \"/model/traffic/heron/model_info\"\n )\n except requests.exceptions.ConnectionError:\n LOG.error(\n \"Unable to connect to Magpie server at: %s, is the server active?\",\n magpie.url,\n )\n else:\n models: List[Dict[str, str]] = response.json()\n\n data: List[List[str]] = []\n\n headings = [\"Name\", \"Description\"]\n\n for model in models:\n data.append([model[\"name\"], model[\"description\"]])\n\n click.echo(\"\\nAvailable Traffic Models for Apache Heron Topologies:\\n\")\n click.echo(format_smart_table(data, headings))", "title": "" }, { "docid": "2e2c6d3c706195bba229f8cec090b52e", "score": "0.5202573", "text": "def get_available_trees_by_phylome(self, id, collateral = True):\n\n # Check if the input parameters are well-constructed\n if not self.__check_input_parameter__(list_id = id, boolean = collateral):\n raise NameError(\"Check your input data\")\n protids = self.__parser_ids__(id)\n\n ## Retrieve which models were used to reconstructed the trees in each\n ## phylome where the input phylomeDB ID was used as a seed\n cmd = 'SELECT DISTINCT phylome_id, method, CONCAT(\"Phy\", p.protid, \"_\", s.'\n cmd += 'code) AS protid FROM protein AS p, species AS s, %s' % (self._trees)\n cmd += ' AS t WHERE (p.protid IN (%s) AND p.protid = t.protid ' % (protids)\n cmd += 'AND p.taxid = s.taxid)'\n\n if not self.__execute__(cmd) and not collateral:\n return {}\n\n t = {}\n ## Create a temporary structure to process the data retrieved\n for r in self._SQL.fetchall():\n t.setdefault(int(r[\"phylome_id\"]), {})\n t[int(r[\"phylome_id\"])].setdefault(r[\"protid\"], set()).add(r[\"method\"])\n\n trees = {}\n ## Create the definitive data structure where the IDs with the methods used\n ## to reconstructed each tree are grouped by phylome\n for phylome in t:\n trees.setdefault(phylome, [])\n for protid in t[phylome]:\n trees[phylome].append([True, protid, [m for m in t[phylome][protid]]])\n\n ## Retrieve the trees where the input phylomeDB ID has been used as part of\n ## the set of homologous sequences to make the tree\n if collateral:\n for seed, phylome in self.get_collateral_seeds(id):\n cmd = 'SELECT method FROM %s WHERE protid = ' % (self._trees)\n cmd += '%s AND phylome_id = %s' % (self.__parser_ids__(seed), phylome)\n if self.__execute__(cmd):\n trees.setdefault(int(phylome), []).append([False, seed, [r[\"method\"] \\\n for r in self._SQL.fetchall()]])\n\n return trees", "title": "" }, { "docid": "b5991c31e2e09be3933fe9d2d899d272", "score": "0.5199677", "text": "def info(self):\n\n def format_path(m_id):\n \"\"\" routing path to micrograph image file \"\"\"\n # basename, ext = os.path.splitext(self.path)\n # name = 'micrograph{}{}'.format(m_id, ext)\n # convert image set to png for web...\n # (Chrome and Firefox don't display TIF by default\n name = 'micrograph{}.png'.format(m_id) \n # return os.path.join(os.sep, current_app.config['DATADIR'], 'data', 'micrographs', name)\n return os.path.join(os.sep, current_app.config['MICROGRAPH_PATH'], name)\n\n if self.sample is not None and self.sample.label is not None:\n annealing_condition = self.sample.label\n else:\n annealing_condition = 'Not available'\n \n micrograph_path = format_path(self.micrograph_id)\n return dict(micrograph_id=self.micrograph_id,\n author_id=self.contributor_key,\n micrograph_path=micrograph_path,\n thumb=micrograph_path.replace('micrographs', 'thumbs'),\n url='',\n annealing_condition=annealing_condition,\n upload_date='today',\n microconstituent=self.primary_microconstituent\n )", "title": "" }, { "docid": "a560156c2d8893c2a0ee375d10bbbb7c", "score": "0.5189979", "text": "def retrieve_gene_info(genes, organism, genomes):\n query_url = 'http://rest.kegg.jp/get/' + '+'.join(genes) \n\n data = urllib2.urlopen(query_url).read().split('\\n')\n\n indices = [i for i, x in enumerate(data) if x == \"///\"]\n\n if len(indices) == 0 or len(indices) == 1:\n extract_gene_info(data, organism, genomes)\n else:\n for i in range(len(indices) - 1):\n if i == 0:\n extract_gene_info(data[:indices[i]], organism, genomes)\n extract_gene_info(\n data[(indices[i] + 1):indices[i + 1]], organism, genomes)", "title": "" }, { "docid": "697839f34baa16b3f26f70efdcf8f3d4", "score": "0.5184284", "text": "def uniprot_acc_to_taxonmy(self, accesion):\n from bioservices import UniProt\n u = UniProt()\n data = u.search(accesion, frmt=\"xml\")\n from bs4 import BeautifulSoup\n soup = BeautifulSoup(data, \"html.parser\")\n return ' (' + ', '.join([t.text for t in soup.find_all('taxon')]) + ')'", "title": "" }, { "docid": "a4ed584058288d6d8d0c11169879c212", "score": "0.5183792", "text": "def test_phylogeny_to_phyloxml(self):\n tree = self.phyloxml.phylogenies[0]\n doc = tree.to_phyloxml_container()\n self.assertIsInstance(doc, PX.Phyloxml)", "title": "" }, { "docid": "7bb751eacf68e5fd29c63aee217a23e2", "score": "0.518373", "text": "def get_international_epidemiology():\n\n # This dict contains the requested countries by the community.\n countries = {\n \"Mexico\": \"México\",\n \"United States\": \"EE. UU.\",\n \"Pakistan\": \"Pakistán\",\n \"Italy\": \"Italia\",\n \"Japan\": \"Japón\",\n \"China (mainland)\": \"China\",\n \"China\": \"China\",\n \"Finland\": \"Finlandia\",\n \"Turkey\": \"Turquía\",\n \"Spain\": \"España\",\n \"Russia\": \"Rusia\",\n \"Iran\": \"Irán\",\n \"South Africa\": \"Sudáfrica\",\n \"Peru\": \"Perú\",\n \"South Korea\": \"Corea del Sur\",\n \"Brazil\": \"Brasil\",\n \"Ecuador\": \"Ecuador\",\n \"Argentina\": \"Argentina\",\n \"Chile\": \"Chile\",\n \"Netherlands\": \"Países Bajos\",\n \"Sweden\": \"Suecia\",\n \"Norway\": \"Noruega\",\n \"Philippines\": \"Filipinas\",\n \"France\": \"Francia\",\n \"Germany\": \"Alemania\",\n \"United Kingdom\": \"Reino Unido\",\n \"Switzerland\": \"Suiza\",\n \"India\": \"India\",\n \"Colombia\": \"Colombia\"\n }\n\n url = \"https://en.wikipedia.org/wiki/Template:2019%E2%80%9320_coronavirus_pandemic_data\"\n table_text = \"| País | Casos Confirmados | Defunciones ^\\(%) |\\n| -- | -- | -- |\\n\"\n\n with requests.get(url, headers=HEADERS) as response:\n\n soup = BeautifulSoup(response.text.replace(\"–\", \"0\").replace(\n \"—\", \"0\").replace(\"No data\", \"0\"), \"html.parser\")\n\n [tag.extract() for tag in soup(\"sup\")]\n\n for row in soup.find(\"table\", \"wikitable\").find_all(\"tr\"):\n\n for k, v in countries.items():\n\n if k in row.text.strip():\n tds = row.find_all(\"td\")\n\n cases = int(tds[2].text.replace(\",\", \"\").strip())\n deaths = int(tds[3].text.replace(\",\", \"\").strip())\n\n table_text += \"| {} | {:,} | {:,} ^{}% |\\n\".format(\n v,\n cases,\n deaths,\n round(deaths / cases * 100, 2)\n )\n\n break\n\n # Add the totals row.\n totals_row = soup.find(\"table\", \"wikitable\").find_all(\"tr\")[\n 1].find_all(\"td\")\n\n cases = int(totals_row[2].text.encode(\n \"ascii\", \"ignore\").decode(\"utf-8\").replace(\",\", \"\").strip())\n\n deaths = int(totals_row[3].text.encode(\n \"ascii\", \"ignore\").decode(\"utf-8\").replace(\",\", \"\").strip())\n\n table_text += \"| __{}__ | __{:,}__ | __{:,} ^{}%__ |\\n\".format(\n \"Global\",\n cases,\n deaths,\n round(deaths / cases * 100, 2)\n )\n\n return table_text", "title": "" }, { "docid": "bd44e7fe63a5c69f144a55f731765a99", "score": "0.51708645", "text": "def _get_heating_info(self):\n self.printer.print(self.now() + ' >>> heating circuit 01 info')\n self.driver.get(local_settings.heating_info_url())\n success = self.__wait_for_component('Heating')\n if success:\n pairs = self.__get_value_pairs(self.driver, 'Heating')\n self.pages.append(pairs)\n success = len(pairs) != 0\n return success", "title": "" }, { "docid": "e485394a8d4bc27c394f12ca1e374d12", "score": "0.5155839", "text": "def info(self, category, common_name):\n # Show information of specified object\n info = self.get_attrs(category, common_name)\n print(\"CN: {}\".format(info.pop(\"cn\")))\n for k, v in sorted(info.items()):\n print(\" {:20}{}\".format(k, v))", "title": "" }, { "docid": "bd46cb02f9fafe449744df1270092b3b", "score": "0.5154392", "text": "async def _nodes_info(self, ctx, tier: str, *, nodes):\n season = 2\n tier = tier.lower()\n pages = []\n if tier in self.aw_maps.keys():\n # nodeNumbers = nodes.split(' ')\n for node in nodes.split(' '):\n print('aw_nodes req: '+node+' '+tier)\n em = await self.get_awnode_details(ctx = ctx, nodeNumber=node,tier=tier)\n mapurl = '{}warmap_3_{}.png'.format(self.basepath,tier.lower())\n em.set_image(url=mapurl)\n pages.append(em)\n # await self.bot.say(embed=em)\n if len(pages) > 0:\n menu = PagesMenu(self.bot, timeout=120, delete_onX=True, add_pageof=True)\n await menu.menu_start(pages=pages, page_number=0)\n else:\n await self.bot.say('Valid tiers include: {}'.format(', '.join(self.aw_maps.keys())))", "title": "" }, { "docid": "d29ba9e48788f3acb0affd1e8e97e6fb", "score": "0.51496667", "text": "def _get_node_info(id_list: Iterable[str]) -> Iterable[str]:\n pheno_list = []\n node_types = get_id_type_map(id_list)\n\n for node in id_list:\n if 'phenotype' in node_types[node]:\n pheno_list.append(node)\n else:\n phenotypes = get_objects_for_subject(\n subject=node, object_category='phenotype', relation='RO:0002200', subject_direct=True\n )\n pheno_list = pheno_list + phenotypes\n return pheno_list", "title": "" }, { "docid": "ca92fdbb6951bd16c28b8fa1e41ad848", "score": "0.5125362", "text": "def gen_info(code):\n repo = PokemonRepo(PokemonDataAccess())\n response = repo.get_gen_info(code)\n\n if (response == None):\n abort(404)\n return response", "title": "" }, { "docid": "17e0194056d4b6958baafa62c8dbe68c", "score": "0.5124372", "text": "def test_phyml(self):\r\n\r\n cmd = PhymlCommandline(\r\n self.phyml_exe,\r\n input=self.EX_PHYLIP,\r\n datatype='nt')\r\n # Smoke test\r\n try:\r\n out, err = cmd()\r\n self.assertTrue(len(out) > 0)\r\n self.assertEqual(len(err), 0)\r\n # Check the output tree\r\n tree = Phylo.read(self.EX_PHYLIP + '_phyml_tree.txt', 'newick')\r\n self.assertEqual(tree.count_terminals(), 13)\r\n finally:\r\n # Clean up generated files\r\n for suffix in ['_phyml_tree.txt', '_phyml_stats.txt']:\r\n fname = self.EX_PHYLIP + suffix\r\n if os.path.isfile(fname):\r\n os.remove(fname)", "title": "" }, { "docid": "8ce21f0b2946cc75dff30637eecc54b1", "score": "0.5107612", "text": "def get_info(self):\n return None", "title": "" }, { "docid": "e476df6dc153078070643349ee5b8356", "score": "0.5103074", "text": "def get_all(self):\n self.open_profile()\n aux = self.get_name()\n if aux == -1:\n return aux\n aux = self.get_bio()\n if aux == -1:\n return aux\n aux = self.get_num_pics()\n if aux == -1:\n return aux\n return 2", "title": "" }, { "docid": "9d9c43c822b9e2816466aa0e5895caca", "score": "0.5092559", "text": "def GetInfo(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "e4a54a8bb87126fadedcb01a5cd807cc", "score": "0.5086279", "text": "def print_device_info(nodemap):\n print \"*** DEVICE INFORMATION ***\"\n\n try:\n result = True\n node_device_information = PySpin.CCategoryPtr(nodemap.GetNode(\"DeviceInformation\"))\n\n if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):\n features = node_device_information.GetFeatures()\n for feature in features:\n node_feature = PySpin.CValuePtr(feature)\n print \"%s: %s\" % (node_feature.GetName(),\n node_feature.ToString() if PySpin.IsReadable(node_feature) else \"Node not readable\")\n\n else:\n print \"Device control information not available.\"\n\n except PySpin.SpinnakerException as ex:\n print \"Error: %s\" % ex.message\n result = False\n\n return result", "title": "" }, { "docid": "e0b0e4116225ccb55cf492525b90dbd1", "score": "0.50713146", "text": "def get_account_info(info):\n users = random.randint(20, 200)\n roles = random.randint(4, 44)\n info.labels(account=\"dev1\").info({'users': str(users), 'roles': str(roles), 'sec': 'weak'})", "title": "" }, { "docid": "b5d163b97144797a86fc783437d261c4", "score": "0.50676423", "text": "def print_device_info(nodemap):\n print \"\\n*** DEVICE INFORMATION ***\\n\"\n\n try:\n result = True\n node_device_information = PySpin.CCategoryPtr(nodemap.GetNode(\"DeviceInformation\"))\n\n if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):\n features = node_device_information.GetFeatures()\n for feature in features:\n node_feature = PySpin.CValuePtr(feature)\n print \"%s: %s\" % (node_feature.GetName(),\n node_feature.ToString() if PySpin.IsReadable(node_feature) else \"Node not readable\")\n\n else:\n print \"Device control information not available.\"\n\n except PySpin.SpinnakerException as ex:\n print \"Error: %s\" % ex\n return False\n\n return result", "title": "" }, { "docid": "10a38358422172e007bc924cbfc1b97f", "score": "0.50623477", "text": "def get_info_text(topic):\n try:\n from cStringIO import StringIO\n except ImportError:\n from io import StringIO\n import itertools\n buff = StringIO()\n def topic_type(t, topic_types):\n matches = [t_type for t_name, t_type in topic_types if t_name == t]\n if matches:\n return matches[0]\n return 'unknown type'\n\n master = rosgraph.Master('/rostopic')\n try:\n state = master.getSystemState()\n\n pubs, subs, _ = state\n # filter based on topic\n subs = [x for x in subs if x[0] == topic]\n pubs = [x for x in pubs if x[0] == topic]\n\n topic_types = _master_get_topic_types(master)\n \n except socket.error:\n raise ROSTopicIOException(\"Unable to communicate with master!\")\n\n if not pubs and not subs:\n raise ROSTopicException(\"Unknown topic %s\"%topic)\n\n buff.write(\"Type: %s\\n\\n\"%topic_type(topic, topic_types))\n\n if pubs:\n buff.write(\"Publishers: \\n\")\n for p in itertools.chain(*[l for x, l in pubs]):\n buff.write(\" * %s (%s)\\n\"%(p, get_api(master, p)))\n else:\n buff.write(\"Publishers: None\\n\")\n buff.write('\\n')\n\n if subs:\n buff.write(\"Subscribers: \\n\")\n for p in itertools.chain(*[l for x, l in subs]):\n buff.write(\" * %s (%s)\\n\"%(p, get_api(master, p)))\n else:\n buff.write(\"Subscribers: None\\n\")\n buff.write('\\n')\n return buff.getvalue()", "title": "" }, { "docid": "53e44888428d029c33f476d0c702f446", "score": "0.5055905", "text": "def print_device_info(nodemap):\n\n print \"*** DEVICE INFORMATION ***\\n\"\n\n try:\n result = True\n node_device_information = PySpin.CCategoryPtr(nodemap.GetNode(\"DeviceInformation\"))\n\n if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):\n features = node_device_information.GetFeatures()\n for feature in features:\n node_feature = PySpin.CValuePtr(feature)\n print \"%s: %s\" % (node_feature.GetName(),\n node_feature.ToString() if PySpin.IsReadable(node_feature) else \"Node not readable\")\n\n else:\n print \"Device control information not available.\"\n\n except PySpin.SpinnakerException as ex:\n print \"Error: %s\" % ex\n return False\n\n return result", "title": "" }, { "docid": "2a9edc543e3b6dc53ab16f3f0158d3f6", "score": "0.5053858", "text": "def _get_details(self):\n # formulate the query\n query = '''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX proms: <http://promsns.org/def/proms#>\n PREFIX prov: <http://www.w3.org/ns/prov#>\n SELECT *\n WHERE {\n GRAPH ?g {\n <%(uri)s>\n rdfs:label ?label .\n OPTIONAL {\n ?a_u prov:used <%(uri)s> .\n }\n OPTIONAL {\n <%(uri)s> prov:value ?v .\n }\n OPTIONAL {\n ?a_g prov:generated <%(uri)s> .\n }\n OPTIONAL {\n <%(uri)s> prov:generatedAtTime ?gat .\n }\n OPTIONAL {\n <%(uri)s> prov:wasAttributedTo ?wat .\n }\n OPTIONAL {\n ?wat prov:wasAttributedTo ?wat_label .\n }\n }\n }\n ''' % {'uri': self.uri}\n\n # run the query\n entity_details = database.query(query)\n\n # extract results into instance vars\n if entity_details and 'results' in entity_details:\n if len(entity_details['results']['bindings']) > 0:\n ret = entity_details['results']['bindings'][0]\n self.label = ret['label']['value']\n self.gat = ret['gat']['value'] if 'gat' in ret else None\n self.value = ret['v']['value'] if 'v' in ret else None\n self.wat = ret['wat']['value'] if 'wat' in ret else None\n self.wat_label = ret['wat_label']['value'] if 'wat_label' in ret else None\n self.a_u = ret['a_u']['value'] if 'a_u' in ret else None\n self.a_g = ret['a_g']['value'] if 'a_g' in ret else None", "title": "" }, { "docid": "e147e173a5ad6adb2048e624204292ff", "score": "0.5052069", "text": "def displayInfos(self):\n paramsList \\\n = [(\"Init\",\n \"\", \n [\"document\", \"HTMLTree\"])]\n\n\n infos = [ ]\n infos.extend(self.describeParams(paramsList))\n \n # Remove the last blank record added after each top-level category\n if len(infos) > 0:\n infos.pop()\n \n return \"\\n\".join(infos)", "title": "" }, { "docid": "22cb757769977dd063757ba21ca044f8", "score": "0.5049306", "text": "def infoLTSPg(site):\n\n fileName = getFileName(site, \"G\")\n nc = xr.open_dataset(fileName)\n depthList = list(nc.DEPTH.values)\n\n table = list()\n ## print file info\n table.append(['Site', site])\n table.append(['Sub-Facility:', str.split(fileName, \"/\")[2]])\n table.append(['Filename for download:', nc.source_file_download])\n table.append(['Filename for connection:', nc.source_file_opendap])\n table.append(['Location:', (str(round(np.mean([nc.geospatial_lat_max, nc.geospatial_lat_min]), 5)) + \" - \" +\n str(round(np.mean([nc.geospatial_lon_max, nc.geospatial_lon_min]), 5)))])\n table.append(['Variables: ', list(nc.data_vars)])\n table.append(['Time coverage:', nc.time_coverage_start + ' through ' + nc.time_coverage_end])\n table.append(['Max DEPTH:', str(nc.geospatial_vertical_max)])\n table.append(['Included DEPTHS:', \", \".join(str(x) for x in depthList)])\n for dd in depthList:\n temp = nc.TEMP.sel(DEPTH=float(dd))\n table.append(['Data Availability at '+ str(dd) + \"m:\", blockPlot(temp, lineLength=40)])\n\n print(tabulate(table))\n print('\\u2591 NO DATA, \\u2593 DATA')\n return", "title": "" }, { "docid": "066f54d74e79dd6af241ff6c5c8e58d4", "score": "0.504805", "text": "def info(self):\n num_in_part={'a':2,'b':2,'c':3,'d':6,'e':4,'f':4,'g':4}\n \n text='\\t** Enlarged Contraint Sector global information **\\n\\n'\n text+= 'Class: '+self.chgt_var.upper()+'\\n'\n text+='particle in ECS : '\n for i in range(0,num_in_part[self.chgt_var]):\n part=self.step[-1].order_content[i]\n if i%3==0 and i!=0:\n text+='\\n\\t\\t\\t\\t '\n if part.external:\n if part.neutrino:\n text+=str(part.MG)+'(missing)\\t'\n else:\n text+=str(part.MG)+'(visible)\\t'\n elif type(part.MG)==str:\n text+=str(part.MG)+'(fuse)\\t'\n else:\n text+=str(part.MG)+'(propagator)\\t'\n \n text+='\\nblob linked are generated by :'\n for blob in self.blob_content:\n if blob.main.MG<0:\n text+=str(blob.main.MG)+'\\t'\n text+='\\n'\n return text", "title": "" }, { "docid": "bedffcfc2cb111c63828c96f65467122", "score": "0.5039448", "text": "async def get_node_info(self) -> dict:\n return await core.GetNodeInfoCommand(self.adapter)()", "title": "" }, { "docid": "41da34afa5794d3de4667869a99a305e", "score": "0.50372934", "text": "def getInfo(self):\r\n return self.info", "title": "" }, { "docid": "270c3026b19243c96756922bced9c283", "score": "0.50358313", "text": "def get_info(ip_address, map=False):\n url = 'https://ipinfo.io/' + ip_address + '/json'\n response = urlopen(url)\n data = load(response)\n info = \"~~~~~~~~~~~~~~~~~~~~~\\nInformation about {}:\\n\".format(ip_address)\n\n if map:\n for attr in data.keys():\n if attr == \"loc\":\n x, y = data[attr].split(\",\")\n x_location_list.append(x)\n y_location_list.append(y)\n else:\n print(\"~~~~~~~~~~~~~~~~~~~~~\\n\"\n \"Information about {}:\".format(ip_address))\n for attr in data.keys():\n if attr == \"readme\" or attr == \"bogon\":\n continue\n # will print the data line by line\n print(\"{}: {}\".format(attr, data[attr]))\n info += \"{}: \\t {}\\n\".format(attr, data[attr])\n #print(attr, ' ' * 13 + '\\t->\\t', data[attr])\n print(\"~~~~~~~~~~~~~~~~~~~~~\\n\")\n info += \"~~~~~~~~~~~~~~~~~~~~~\\n\"\n\n return info", "title": "" }, { "docid": "6528f318c2d36f9e38317cc035c893e4", "score": "0.50349516", "text": "def get_node_info(*args):\n return _ida_graph.get_node_info(*args)", "title": "" }, { "docid": "371bcb149d61e193701583476620867b", "score": "0.50337595", "text": "def wikiinfo(self, mask, target, args):\n wikikey = 'wiki{}'.format(''.join(args['<terms>'])).lower()\n if wikikey in self.bot.db:\n entry = self.bot.db[wikikey]\n yield ('Key {key} registered by {user} in {channel} '\n 'on {date} (UTC)'.format(key=wikikey[4:],\n user=entry['by'],\n channel=entry['channel'],\n date=entry['date']))\n yield 'Response: {}'.format(entry['response'])\n else:\n yield 'No such alias found, so falls back to search.'", "title": "" }, { "docid": "4db3dc79ef2c3fb4c884f9272f23144d", "score": "0.50261897", "text": "def info_finder(web_text):\n email_finder(web_text)\n url_finder(web_text)\n phone_finder(web_text)", "title": "" }, { "docid": "ecd5dd1b3bc8bc1061d4f4d4a4e88028", "score": "0.50210845", "text": "def __get_infos(self):\r\n return [info_node.text for info_node in self.sense_node.findall('s_inf')]", "title": "" }, { "docid": "f462749c3f53d5f1dcda8aa3f6f9d9b2", "score": "0.5020115", "text": "def get_taxa(self, node_id=...): # -> list[Unknown] | None:\n ...", "title": "" }, { "docid": "9bc5582600315257dd5a470302ab0741", "score": "0.5008404", "text": "def __get_infos(self):\r\n return [info_node.text for info_node in self.kanji_element_node.findall('ke_inf')]", "title": "" }, { "docid": "47574a724950924a4ca3edfb7a37fd60", "score": "0.5004391", "text": "def get_node_info(self):\n return get(self.base_url, '/api/v1/node-info')", "title": "" }, { "docid": "f2cc651d85228a0bfe64ab98b8322846", "score": "0.5000668", "text": "def kingdom_info(*args):\n return '''\n Animals (kingdom Animalia) are eukaryotic and multicellular inhabitants \n of planet Earth. {0} is a member of this kingdom.\n '''.format(' '.join(args))", "title": "" }, { "docid": "4a40f52d01c4377a48443cd79723c8e3", "score": "0.49988937", "text": "def main(node, ntype):\n\tif DEBUG: print(\"in 'main.....\")\n\treturn getinfo(node, ntype)", "title": "" }, { "docid": "8ba5d5c791fb4615b6ea8ca974c19cd2", "score": "0.49965668", "text": "def get_info(self):\n return self.send_message('getInfo')", "title": "" }, { "docid": "dd3b0ae582a80e0260dff9c510dccacc", "score": "0.4992901", "text": "def GetInfo(self, term):\n if self.type == 'STR':\n return self.getStrInfo(term)\n elif self.type[0] == 'BLK':\n return self.getBLKInfo(term)\n elif self.type[0] == 'FC':\n return self.getFCInfo(term)\n else:\n return -1", "title": "" }, { "docid": "4058a14b5dcfdeb06595c3c5c62381f2", "score": "0.49900994", "text": "def getInfo(self):\n return self.info", "title": "" }, { "docid": "6994ddc3796cc1fa2160f7bafaee114f", "score": "0.4989857", "text": "def pcieinfo(check, verbose):\n cmd = \"sudo pcieutil show\"\n if check:\n cmd = \"sudo pcieutil check\"\n clicommon.run_command(cmd, display_cmd=verbose)", "title": "" }, { "docid": "529b10c768b59393cfd7798f7286ea98", "score": "0.49789625", "text": "def info(cls, network):\n content = \"\"\n if network.network.num_addresses == 1:\n content = cls._info_address(network)\n else:\n content = cls._info_network(network)\n return content", "title": "" }, { "docid": "e763e2640dc7a1008de12fdf138ed77f", "score": "0.497228", "text": "def information(self, params):\n raise NotImplementedError", "title": "" }, { "docid": "b54299a9ded7b40b70abf08887b65dd3", "score": "0.49722263", "text": "def print_device_info(nodemap):\r\n\r\n print('*** DEVICE INFORMATION ***\\n')\r\n\r\n try:\r\n result = True\r\n node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))\r\n\r\n if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):\r\n features = node_device_information.GetFeatures()\r\n for feature in features:\r\n node_feature = PySpin.CValuePtr(feature)\r\n print('%s: %s' % (node_feature.GetName(),\r\n node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))\r\n\r\n else:\r\n print('Device control information not available.')\r\n\r\n except PySpin.SpinnakerException as ex:\r\n print('Error: %s' % ex)\r\n return False\r\n\r\n return result", "title": "" }, { "docid": "70296ecba24b64b3ee2d83114c3cd975", "score": "0.49672294", "text": "def getAddressInfo():", "title": "" }, { "docid": "c90be7125fbf02b9cbbf51215418dc1d", "score": "0.49642313", "text": "def get_mall_info(driver_page):\n mall_main_info = driver_page.find_element_by_xpath('//div[@class=\"col-xs-6 col-sm-3 col-md-2\"]')\n mall_name = mall_main_info.find_element_by_css_selector('a').get_attribute('title')\n mall_main_link = mall_main_info.find_element_by_css_selector('a').get_attribute('href')\n mall_image = mall_main_info.find_element_by_css_selector('img').get_attribute('src')\n mall_image = check_mall_image(mall_image, MALL_NAME)\n\n mall_info = {\n 'mall_name': mall_name.lower(),\n 'mall_link': mall_main_link,\n 'mall_image': mall_image\n }\n\n return mall_info", "title": "" }, { "docid": "e202f625ec8c40262e8e3865e4f16adb", "score": "0.4958425", "text": "def discover_available_myos():\n\n bus = dbus.SystemBus()\n raw_bluez = bus.get_object('org.bluez', '/org/bluez')\n bluez_introspection = dbus.Interface(raw_bluez, dbus.INTROSPECTABLE_IFACE)\n introsprect_string = ET.fromstring(bluez_introspection.Introspect())\n\n # Connected bluetooth adapters\n nodes = introsprect_string.findall('node')\n adapters = {}\n for node in nodes:\n adapters[node.attrib['name']] = None\n\n # Paired Myos\n myo_base_uuid = 'd506[0-9a-fA-F]{4}-a904-deb9-4748-2c7f4a124842'\n uuid_matcher = re.compile(myo_base_uuid)\n myos = {}\n for adapter in adapters:\n raw_adapter = bus.get_object('org.bluez', '/org/bluez/' + adapter)\n adapter_introspect = dbus.Interface(raw_adapter, dbus.INTROSPECTABLE_IFACE)\n nodes = ET.fromstring(adapter_introspect.Introspect()).findall('node')\n\n for node in nodes:\n node_name = node.attrib['name']\n node_raw = bus.get_object('org.bluez', '/org/bluez/{}/{}'.format(adapter, node_name))\n node_props = dbus.Interface(node_raw, dbus.PROPERTIES_IFACE)\n uuids = node_props.Get('org.bluez.Device1', 'UUIDs')\n for uuid in uuids:\n matches = uuid_matcher.match(uuid)\n if matches is not None:\n address = str(node_props.Get('org.bluez.Device1', \"Address\"))\n if address not in myos:\n path = node_raw.object_path\n given_name = str(node_props.Get('org.bluez.Device1', \"Name\"))\n myos[address] = {'name': given_name, 'adapter': adapter, 'path': path, 'address': address}\n continue\n\n # Check which myos are \"set up\" by checking which have sub-nodes\n for node_name in myos.copy():\n myo_raw = bus.get_object('org.bluez', myos[node_name]['path'])\n myo_introspect = dbus.Interface(myo_raw, dbus.INTROSPECTABLE_IFACE)\n nodes = ET.fromstring(myo_introspect.Introspect()).findall('node')\n if len(nodes) < 1:\n del myos[node_name]\n\n return myos", "title": "" }, { "docid": "79530770235c1ff881e2b1bf8eb1a8a7", "score": "0.4946449", "text": "def get_node_info(self) -> dict:\n\n return self.provider.make_request(\"wallet/getnodeinfo\", {\"visible\": True})", "title": "" }, { "docid": "dda08f8f89cb9614725b51f8c7f47321", "score": "0.49449623", "text": "def get_agency_profile_details(agency_name):\n profile_url = 'https://clutch.co/profile/'\n profile_url = profile_url + agency_name\n print profile_url\n resp = requests.get(profile_url).content\n resp = BeautifulSoup(resp, 'html.parser')\n info_div = resp.find('div', {'class':'contact-dropdown-mpad'})\n #email = info_div.find('div', {'class':'field-item even'}).find('a')\n email = None # TODO\n state = get_decoded_string(info_div.find('div', {'class':'city-name'}).text)\n phone = get_decoded_string(info_div.find('span', {'class':'contact-dropdown-phone-ico'}).text)\n\n return email, state, phone", "title": "" }, { "docid": "627ae039f160fb9a68dfffa3410544e7", "score": "0.49383575", "text": "def information(self):\n\t\tprint(self.name.title() + \" we cook \" + self.cuisine)", "title": "" }, { "docid": "a9ca577cdde8580c9a4b3056ba9cc8bb", "score": "0.49370867", "text": "def viewer_get_node_info(*args):\n return _ida_graph.viewer_get_node_info(*args)", "title": "" }, { "docid": "9087f65f9d26bf1f57aa44ed0b001f49", "score": "0.49367002", "text": "def getDetailedInfo(self):\n info = []\n info.append((\"Name\", self.name))\n info.append((\"Sex\", self.sex))\n info.append((\"Age\", self.age))\n info.append((\"Healthy?\", self.getHealthyEnglish()))\n info.append((\"Species\", self.getSpeciesInfo().getName()))\n info.append((\"Friendly with other animals?\", self.getFriendlyEnoughEnglish()))\n\n return info", "title": "" }, { "docid": "77df1764f4707b9afbd7c61fe4d6210c", "score": "0.4935098", "text": "def get_info(self):\n return cninfo.cninfo().get_info()", "title": "" }, { "docid": "a35c7f8a36b669113952dc38168e6449", "score": "0.49321803", "text": "def get_info(self, jid, node, data):\n if (jid, node) not in self.nodes:\n if not node:\n return DiscoInfo()\n else:\n raise XMPPError(condition='item-not-found')\n else:\n return self.nodes[(jid, node)]['info']", "title": "" }, { "docid": "7bb20ccd810a67f860100b81c44286b5", "score": "0.49269873", "text": "def device_info(self): \r\n\r\n if not gdx.devices:\r\n print(\"device_info - no device connected\")\r\n return\r\n\r\n # The elements in the device_info list are: 0 = name, 1 = description, 2 = battery %, 3 = charger state, 4 = rssi\r\n device_info = [] \r\n \r\n # If there is just one device connected, package the info in a 1D list [device info]\r\n if len(gdx.devices) ==1:\r\n device_info.append(gdx.devices[0]._name)\r\n device_info.append(gdx.devices[0]._description)\r\n device_info.append(gdx.devices[0]._battery_level_percent) \r\n charger_state = [\"Idle\", \"Charging\", \"Complete\", \"Error\"] \r\n device_info.append(charger_state[gdx.devices[0]._charger_state])\r\n device_info.append(gdx.devices[0]._rssi)\r\n return device_info \r\n\r\n # If there is more than one device connected, package the info in a 2D list [[device0 info], [device1 info]]\r\n else:\r\n i = 0\r\n while i < len(gdx.devices):\r\n one_device_info = []\r\n one_device_info.append(gdx.devices[i]._name)\r\n one_device_info.append(gdx.devices[i]._description)\r\n one_device_info.append(gdx.devices[i]._battery_level_percent) \r\n charger_state = [\"Idle\", \"Charging\", \"Complete\", \"Error\"] \r\n one_device_info.append(charger_state[gdx.devices[i]._charger_state])\r\n one_device_info.append(gdx.devices[i]._rssi)\r\n i+=1\r\n device_info.append(one_device_info)\r\n return device_info", "title": "" }, { "docid": "74deca1bd26f9581270803cda8e6c6ef", "score": "0.49252895", "text": "def find_all_phy(self):\n return_value = {}\n for exploit in self._all_exploits:\n if \"PHY\" in exploit.ports.keys():\n return_value.setdefault(exploit.app, []).append(exploit)\n return return_value", "title": "" }, { "docid": "85069448ce477e298fdcac52421c0b8b", "score": "0.49157274", "text": "def getInfo(self) -> str:\n return self.info", "title": "" }, { "docid": "562ebd55455ac620d2553de9cd3bdb2f", "score": "0.491485", "text": "def read_metadata(self):\n\t\tprint('{}\\t{}\\t{}\\t{}'.format(self.organism, self.strain_name, self.molecule_name, self.locus), end='\\n')", "title": "" }, { "docid": "1db804e1fe9a0bc2fbdfb3cfce0b4f9a", "score": "0.49142927", "text": "def find_info(self, name):\n if name in self.list_weapons():\n return self.weapon_info(name)\n if name in self.list_armor():\n return self.armor_info(name)\n if name in self.list_accessories():\n return self.accessory_info(name)\n if name in self.list_items():\n return self.item_info(name)\n if name in self.list_spells():\n return self.spell_info(name)\n raise CommandError(f\"Couldn't find anything by the name {name} for sale\")", "title": "" }, { "docid": "485d9f51693fd679b3e7738f50ee8ff5", "score": "0.49142113", "text": "def get_data_info(self):\n\t\traise NotImplementedError", "title": "" } ]
b95fb77af48d39a5a1e3ddde474dcc60
Declare victory to peers
[ { "docid": "26d3ea3b6b7348df1fe87db9bbe9cb85", "score": "0.68546593", "text": "def declare_victory(self, reason):\n if(self.bully == self.pid):\n print(\"The Leader is: {} as {}\".format(self.pr_leader(),reason))\n for member in self.members.items():\n pid , addr = member\n if self.listener_addr != addr:\n peer = self.get_connection(addr)\n self.states[peer] = State.SEND_VICTORY\n self.send_message(peer)", "title": "" } ]
[ { "docid": "37c5d4f5f7cde9c85b7343d837efc0b6", "score": "0.5788176", "text": "def vending_machines(self):", "title": "" }, { "docid": "eaddc986737d9d4ec7daccf067c3d1ea", "score": "0.544734", "text": "def __init__(self, vending_machine):\n self.vending_machine = vending_machine", "title": "" }, { "docid": "d52c72b10e772bd3f0af26574c58185a", "score": "0.5434333", "text": "def allocate(self, guest):\n self.free = False\n self.guest = guest\n self.occupy_time = time.time()", "title": "" }, { "docid": "027be273a74c8ad9ae3a396b1b661b45", "score": "0.54029024", "text": "def victory(self, agent: Adventurer):\n self.won, self.over = True, True", "title": "" }, { "docid": "aa06da04e80305d3f911888d1e613a4d", "score": "0.52702147", "text": "def create_veggies(self):\n raise NotImplementedError", "title": "" }, { "docid": "d002ae4350a148a55139379638b38b62", "score": "0.5249754", "text": "def __init__(self):\n self.game = TicTacToe()\n self.nodes = {}\n self.add_node(self.game.get_state())\n self.cp_uct = 2.0 ** 0.5", "title": "" }, { "docid": "c4ae41f331e301a182a7df3122d05e0d", "score": "0.5197658", "text": "def __init__(self):\n self.victories = [[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8],[0,4,8],[2,4,6]]\n self.reset_board()", "title": "" }, { "docid": "148f63591768c801e74283f1a17c54c0", "score": "0.5156699", "text": "def init(self) :\n self.players = []\n self.teams = ()\n self.notify(u\"Partie démarrée, qui veut jouer ?\")\n self.set_state('wait_player')", "title": "" }, { "docid": "fe534f169f9312b58f2d736748d1e04c", "score": "0.5148556", "text": "def __init__(self):\r\n self._inventory = dict()\r\n self._membership = dict()", "title": "" }, { "docid": "440f41db67dae18b2e6c527966ea1bef", "score": "0.51095974", "text": "def __init__(self, player):\n if player == \"upper\":\n self.us = \"UPPER\"\n self.opponent = \"LOWER\"\n else:\n self.us = \"LOWER\"\n self.opponent = \"UPPER\"\n\n empty_start = {'s':[],'p':[], 'r':[]}\n self.own_board = Board(empty_start, empty_start, 9, 9, 0, None)\n self.ai = MCTSNode(self.own_board, self.us, None, None)", "title": "" }, { "docid": "1921bf7103eed74819e298293485a705", "score": "0.5108969", "text": "def vassal(player, other_players, board):\n\tpass", "title": "" }, { "docid": "2a4e4bd839aa0b8438103695d6bd3a85", "score": "0.5095939", "text": "def __init__(self):\n self._actors = {}", "title": "" }, { "docid": "cb075e48899012c9424100a77d8cc81f", "score": "0.50862426", "text": "def newplayer(p):\n makeplayercolors(p)\n statbrd.add_player(p)", "title": "" }, { "docid": "0b1e6e06b7c4aea9d663c6ffd320f3cf", "score": "0.50770074", "text": "def __init__(self, host, password, name, max_players,\n ):\n self.host = host\n\t\tcurrent_age = 0\n\t\tcurrent_turn = 0\n self.deck = BuildDeck(current_age)\n self.password = password\n self.name = name\n self.max_players = max_players\n\n\n\n self.players = {}\n self.order = []\n self.perma_banned = set()\n\n self.InitGame()\n self.Ping()", "title": "" }, { "docid": "6f9db54da66b1a2a79a56193416120f4", "score": "0.5068513", "text": "def disk_graph_captors(self, instance):\n G = nx.Graph()\n points_to_communicate_with = self.list_captors + [(0, 0)]\n G.add_nodes_from([(e[0], e[1]) for e in points_to_communicate_with])\n\n E_com = instance.neighbours_Rcom\n for u in points_to_communicate_with:\n for v in E_com[u]:\n if v in points_to_communicate_with:\n G.add_edge((u[0], u[1]), (v[0], v[1]))\n # G.add_edges_from([((u[0], u[1]), (v[0], v[1])) for v in E_com[u]])\n self.disk_graph_com = G", "title": "" }, { "docid": "30ef04684779ee6a01f799d6dd307775", "score": "0.50671625", "text": "def assign_livingspace(self):\n fellows = self.fellows_list\n shuffle(fellows)\n for fellow in fellows:\n if fellow.wants_housing:\n livingspace = self.get_livingspace()\n if livingspace is not None:\n fellow.allocate_livingspace(livingspace)\n livingspace.add_roomie(fellow)\n else:\n self.unallocated.append(fellow)", "title": "" }, { "docid": "caec0e7840290469cb97e6ff24d0e447", "score": "0.5015634", "text": "def __init__(self):\n self.connections = {}\n self.reactions = set([])\n self.species = set([])", "title": "" }, { "docid": "c02393f52554e3f23ceb141367848d94", "score": "0.49830347", "text": "def __init__(self, n, id):\n self.vclock = [0] * n\n self.id = id", "title": "" }, { "docid": "4ec4ba94b0bb04d79e97d84c08cb0877", "score": "0.49657458", "text": "def __init__(self):\n # create a player Sprite using \"tank.png\"\n \n # create empty list called self.coins\n self.coins = []\n self.num_coins = 20\n \n # use a loop to create coins(use self.num_coins)", "title": "" }, { "docid": "edfecd0798a6022abadf4778d97902c6", "score": "0.49385074", "text": "def New_Game(board, players, victory):\n #print('Board:',board)\n #print()\n #print('Players:',players)\n for civ in players:\n for home in civ.start:\n for ter in board:\n if home==ter:\n ter.owner=civ\n #ter.test = 'yay'\n #print (ter.test)\n #print(board)\n turn = 1\n return take_turn(board,players,turn,victory,orders=[])", "title": "" }, { "docid": "c8b04afbce3701943eeeeeacf8c4ed9a", "score": "0.49276808", "text": "def attach(self):\n\n # create connection, reset id\n self.id = self._st.create_connection(\n self.id, \n from_id = self.from_id, \n to_id = self.to_id, \n to_route = self.to_route\n )", "title": "" }, { "docid": "17ae68814527bf32412aad10ddd87688", "score": "0.49262103", "text": "def _initialize_players(self):\n self.user_player_id = random.choice([1,2])\n self.computer_player_id = self.board.get_other_player_id(self.user_player_id)", "title": "" }, { "docid": "6ab3b98a841163eab2b179de339078d4", "score": "0.49251163", "text": "def create_tunnel(cave_from, cave_to):\n caves[cave_from].append(cave_to)\n caves[cave_to].append(cave_from)", "title": "" }, { "docid": "bd1f58990a148b149fb32aade67e3963", "score": "0.49125734", "text": "def __init__(self):\n this = _coin.new_SbDPViewVolume()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "title": "" }, { "docid": "35398544331988d6f5e696fd399eab1f", "score": "0.49042442", "text": "def _create_peering_interface(asys: UserAS, ixp: IXP):\n br = _get_peering_br(asys)\n ip = IXPMember.objects.filter(ixp=ixp, host=br.host).values_list('public_ip', flat=True)[0]\n port = _find_free_port(asys, ipaddress.ip_address(ip), 50000, 51000)\n return br.interfaces.create(public_ip=str(ip), public_port=port)", "title": "" }, { "docid": "3cf7b3ebb269a074a1e1e2259d372020", "score": "0.48977855", "text": "def __init__(self, name):\r\n \r\n RemoteObject.__init__(self, name)\r\n self.clients = []\r\n \r\n # prevents name clashes if multiple PypadServer instances are running\r\n # on same name server\r\n self.clientAccumulator = random.randint(0, 1000);", "title": "" }, { "docid": "8e70a19d0100495dcec79bd3c4cc52a3", "score": "0.48977092", "text": "def create_chain(chain_name, nickname):\n set_chain_name(chain_name)\n set_state('Creating chain ' + get_chain_name())\n kill_old_daemon()\n time.sleep(2)\n # call(\"firewall-cmd\", \"--permanent\", \"--zone=public\", \"--add-port=\" + port + \"/tcp\")\n # call(\"systemctl\", \"restart\", \"firewalld.service\")\n call('rm -rf /root/.multichain/' + chain_name, shell=True)\n call(\"multichain-util create \" + chain_name + \" -default-network-port=\" + default_chain_port + \" -default-rpc-port=\" + default_rpc_port + \" -anyone-can-connect=true -anyone-can-create=true -anyone-can-mine=true -anyone-can-receive=true\", shell=True)\n call(\"multichaind \" + chain_name + \" -daemon -autosubscribe=streams\", shell=True)\n\n time.sleep(5)\n\n apirpc = get_api()\n print('api :')\n print(apirpc)\n\n json_rep = apirpc.createkeypairs()\n address = json_rep[0]['address']\n pubkey = json_rep[0]['pubkey']\n privkey = json_rep[0]['privkey']\n\n print(apirpc.create(\"stream\", \"default_account\", False))\n print(apirpc.send(address, 0))\n\n hex_addr = address.encode(\"hex\")\n hex_priv = privkey.encode(\"hex\")\n\n time.sleep(2)\n\n print(apirpc.publish(\"default_account\", \"address\", hex_addr))\n print(apirpc.publish(\"default_account\", \"pubkey\", pubkey))\n print(apirpc.publish(\"default_account\", \"privkey\", hex_priv))\n\n print(apirpc.create(\"stream\", \"nickname_resolve\", True))\n\n time.sleep(2)\n\n hex_nick = nickname.encode(\"hex\")\n generate_key_pair()\n # a modifier avec les groupes :\n print(apirpc.publish(\"nickname_resolve\", pubkey, str(hex_nick)))\n # apirpc.publish(\"nickname_resolve\", \"pubkey\", pubkey)\n print('create chain finished')\n set_state('Connected to ' + chain_name)", "title": "" }, { "docid": "7aad567ec0c83b7e0cccf03a676cb56e", "score": "0.48771852", "text": "def __init__(self):\n self.vios_keyed = {} # vios-to-cluster information\n self.cluster_keyed = {} # cluster-to-vios mapping\n self.sequence_num = 0 # Topo collection number since process start\n self.hmc_list = None\n self.host_vios_sets = {} # host to vios map for SCG membership track", "title": "" }, { "docid": "6a55edcec27f7270da38707f2d9203de", "score": "0.48724848", "text": "def __init__(self):\n this = _coin.new_SoInteractionKit()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "title": "" }, { "docid": "8961ba3656abe11b36fffef529908e1b", "score": "0.48687115", "text": "def seed_universe(self):\r\n self.add_beacon()", "title": "" }, { "docid": "eb9d79c4ccaa858e266ab49cb5fe59b0", "score": "0.48639148", "text": "def __init__(self):\r\n\r\n self.tacks = []\r\n self.activation_flags = 0", "title": "" }, { "docid": "458f1b34994d1837275ec9e900d5cc7f", "score": "0.4854747", "text": "def __init__(self):\n self.passWins,self.passLosses=0,0\n self.dpWins,self.dpLosses,self.dpPushes=0,0,0", "title": "" }, { "docid": "9e7d9f4b2df0a3608f46f9553868f212", "score": "0.48542553", "text": "def set_partymon2():\n memory = get_memory()\n memory[0xdcd7] = 2\n memory[0xdcd9] = 0x7\n\n memory[0xdd0f] = 0x7\n memory[0xdd10] = 0x1\n\n # moves\n memory[0xdd11] = 0x1\n memory[0xdd12] = 0x2\n memory[0xdd13] = 0x3\n memory[0xdd14] = 0x4\n\n # id\n memory[0xdd15] = 0x1\n memory[0xdd16] = 0x2\n\n # experience\n memory[0xdd17] = 0x2\n memory[0xdd18] = 0x3\n memory[0xdd19] = 0x4\n\n # hp\n memory[0xdd1a] = 0x5\n memory[0xdd1b] = 0x6\n\n # current hp\n memory[0xdd31] = 0x10\n memory[0xdd32] = 0x25\n\n # max hp\n memory[0xdd33] = 0x10\n memory[0xdd34] = 0x40\n\n set_memory(memory)", "title": "" }, { "docid": "677336ec78763fd855b957fc7acfc84f", "score": "0.4852194", "text": "def addPeer(self,ip,port):\n self.state.addPeer(ip,port)", "title": "" }, { "docid": "3bc1d7798573ff098d6d6f5882f32dff", "score": "0.4851634", "text": "def __enter__(self):\n self.mute()", "title": "" }, { "docid": "4c7c7f05bee932a9ce9928737dbcf67c", "score": "0.48372042", "text": "def create_peers():\n\thost_num = args.k ** 3 /4\n\tHostList = create_hostlist(host_num)\n\ttraffics = ['randoms', 'stag_0.1_0.2', 'stag_0.2_0.3', 'stag_0.3_0.3', 'stag_0.4_0.3', 'stride_1', 'stride_2',\n\t\t\t\t'stride_4', 'stride_8', 'hotspot']\n\n\tfor i in range(len(traffics)):\n\t\tif traffics[i].startswith('stag'):\n\t\t\tnumber = 1\n\t\t\ttraffics_prob = traffics[i].split('_')\n\t\t\tedge_prob = float(traffics_prob[1])\n\t\t\tpod_prob = float(traffics_prob[2])\n\t\t\tfile_name = './peers/stag_0' + str(int(edge_prob * 10)) + '_0' + str(int(pod_prob * 10)) + '.py'\n\t\t\tfile_save = open(file_name, 'w')\n\t\t\t# while number <= 10:\n\t\t\tflows_peers = create_stag_peers(HostList, edge_prob, pod_prob, args.flows_num_per_host)\n\n\t\t\t# Shuffle the sequence of the flows_peers.\n\t\t\trandom.shuffle(flows_peers)\n\n\t\t\t# Write flows_peers into a file for reuse.\n\t\t\tfile_save.write('peers' + str(number) + '=%s\\n' % flows_peers)\n\t\t\t# number = number + 1\n\t\t\tfile_save.close()\n\t\telif traffics[i].startswith('stride'):\n\t\t\tnumber = 1\n\t\t\tnumbers = traffics[i].split('_')[1]\n\t\t\tfile_name = './peers/' + traffics[i] + '.py'\n\t\t\tfile_save = open(file_name, 'w')\n\t\t\tflows_peers = create_stride_peers(HostList, args.flows_num_per_host, host_num, int(numbers))\n\n\t\t\trandom.shuffle(flows_peers)\n\n\t\t\tfile_save.write('peers' + str(number) + '=%s\\n' % flows_peers)\n\t\t\tfile_save.close()\n\t\telif traffics[i].startswith('hotspot'):\n\t\t\tnumber = 1\n\t\t\tnumbers = host_num / 8\n\t\t\tfile_name = './peers/' + traffics[i] + '.py'\n\t\t\tfile_save = open(file_name, 'w')\n\t\t\tflows_peers = create_hotspot_peers(HostList, args.flows_num_per_host, host_num, numbers)\n\n\t\t\trandom.shuffle(flows_peers)\n\n\t\t\tfile_save.write('peers' + str(number) + '=%s\\n' % flows_peers)\n\t\t\tfile_save.close()\n\t\telse:\n\t\t\tnumber = 1\n\t\t\tfile_name = './peers/' + traffics[i] + '.py'\n\t\t\tfile_save = open(file_name, 'w')\n\t\t\t# while number <= 10:\n\t\t\tflows_peers = create_random_peers(HostList, args.flows_num_per_host)\n\n\t\t\t# Shuffle the sequence of the flows_peers.\n\t\t\trandom.shuffle(flows_peers)\n\n\t\t\t# Write flows_peers into a file for reuse.\n\t\t\tfile_save.write('peers' + str(number) + '=%s\\n' % flows_peers)\n\t\t\t# number = number + 1\n\t\t\tfile_save.close()", "title": "" }, { "docid": "bf2c7ee4e078f6d2efc74136c0f679fb", "score": "0.48332813", "text": "def add() -> None:\n size = get_network_size(ROOT_HOST)\n port = 10000 + size\n peer = Peer(f\"{ROOT_HOST}:{port}\", f\"{ROOT_HOST}:10000\")\n peer.main()", "title": "" }, { "docid": "768a3fa4b0b501aa689811de74ef65d1", "score": "0.48268095", "text": "def __init__(self):\n self.state = list(' ')\n self.players = Player()\n self.turn = 0", "title": "" }, { "docid": "0ac4489f263a2b2b31fb09dca1fe1b0a", "score": "0.48214915", "text": "def __init__(self):\n self.adjencies = {}", "title": "" }, { "docid": "0ac4489f263a2b2b31fb09dca1fe1b0a", "score": "0.48214915", "text": "def __init__(self):\n self.adjencies = {}", "title": "" }, { "docid": "03ac0e8b2736d91f681aaa8b35c9df65", "score": "0.48091736", "text": "def __init__(self, budget: int) -> None:\n self.budget = budget\n self.adapter = PlayerAdapter()\n self.positions = {\n 1: [\"GK\"],\n 2: [\"LB\", \"RB\", \"LWB\", \"RWB\"],\n 3: [\"CB\", \"LCB\", \"RCB\", \"CDM\", \"LDM\",\n \"RDM\", \"CM\", \"LCM\", \"RCM\", \"LM\", \"RM\"],\n 5: [\"CAM\", \"LAM\", \"RAM\", \"LWF\", \"RWF\", \"CF\", \"LCF\", \"RCF\"]\n }", "title": "" }, { "docid": "a26d9984cdf9a37e187d209e1e78e650", "score": "0.48009232", "text": "def slots(self):\n pass", "title": "" }, { "docid": "391f6eea965767cf32502f002402b829", "score": "0.47942972", "text": "def spawn_vehicle(self) -> None:\n\n random.choice(self.access_points).generate()", "title": "" }, { "docid": "8404f8221b26f86b0d5b76d33c9f7025", "score": "0.47897586", "text": "def __init__(self, id, x, y, capacity):\n self.id = id\n self.x = x\n self.y = y\n self.capacity = capacity\n self.connections = []", "title": "" }, { "docid": "ddd73829e5e73488ec11ad8a40b50a23", "score": "0.4783532", "text": "def __init__(self, owner):\n self.owner = owner\n self.cards = {}", "title": "" }, { "docid": "484bd3778561b414c79fc9537e9deb46", "score": "0.4781063", "text": "def __init__(self,ip,port,nick,ident,realname,channels):\n self.S.connect((ip,port))\n self.S.send(bytes(\"NICK %s\\r\\n\" % nick, \"UTF-8\"))\n self.S.send(bytes(\"USER %s %s meow :%s\\r\\n\" % (ident, ip, realname), \"UTF-8\"))\n for channel in channels:\n self.S.send(bytes(\"JOIN %s\\r\\n\" % channel, \"UTF-8\"))\n self.PrintUsage(channel)\n self.CHANNELS = channels[:]", "title": "" }, { "docid": "b1516ddc57f1be58f39112aff610c8ae", "score": "0.47796428", "text": "def __init__(self):\n self.player = 1\n self.players = 'xo'", "title": "" }, { "docid": "2db546abd22d3dcb7a607e52dccee174", "score": "0.4776862", "text": "def createPlayer():\n for i in range(2):\n playerOne.hand.append(currentDeck.deal())", "title": "" }, { "docid": "0d4e1dd526db426624d013084d3e0d57", "score": "0.4763311", "text": "def __init__(self):\n self.game_handler = UniqueIDGenerator()\n self.player_handler = UniqueIDGenerator()\n self.rooms = {}", "title": "" }, { "docid": "81124e45bc4d9780d9728e764f0148c1", "score": "0.47614095", "text": "def __init__(self, stack_size=100, name='TestPlayer'):\n self.stack = stack_size\n self.seat = None\n self.equity_alive = 0\n self.actions = []\n self.last_action_in_stage = ''\n self.temp_stack = []\n self.name = name\n self.agent_obj = None", "title": "" }, { "docid": "b6b21573c52cf53eb3f07a8df1fe1bbc", "score": "0.47486138", "text": "def __init__(self):\n self.player_pool = []\n self.sessions = {}\n self.gamepool = GamePool()", "title": "" }, { "docid": "ce2510221d1cc2fad7bc536a18781648", "score": "0.47420117", "text": "def __init__(self, V):\n\n # V represent sthe number of vertices\n # Why do we nede V?\n # We will be using a boolean list o represent whether a node has been\n # visited or not\n # So we need to know the total number of nodes in a graph\n self.V = V\n\n # Create an adjacency list in the form of dictionary for\n # this particular node\n\n self.adj_list = {}", "title": "" }, { "docid": "c6c0af2adba4edfccd9e9917a355d996", "score": "0.47391665", "text": "def create_v_network(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "54ebebbca0256d345c2849bf603c6fec", "score": "0.47372463", "text": "def __init__(self, player1, player2):\r\n\t\t# Initialize the game ID\r\n\t\tself.id = NimGame.next_game\r\n\t\t# Increment the class game ID for the next instance\r\n\t\tNimGame.next_game += 1\r\n\t\t# Choose a random number of sets\r\n\t\tm = random.randint(NIM_MIN_SETS, NIM_MAX_SETS)\r\n\t\t# Choose a random amount for each set\r\n\t\tself.sets = [random.randint(NIM_MIN_OBJECTS, NIM_MAX_OBJECTS)\r\n\t\t\tfor _ in range(m)]\r\n\t\t# Player 1 has the first turn\r\n\t\tself.player1 = self.playing = player1\r\n\t\t# Player 2 waits for their turn\r\n\t\tself.player2 = self.waiting = player2\r\n\t\t# Initially no users are observing the game\r\n\t\tself.observers = set()", "title": "" }, { "docid": "f3f63cdb2a701f9a66d57c4505aa24ac", "score": "0.4730368", "text": "def peers_save(self, peer_ip):\n self.peer_dict[peer_ip] = self.config.port", "title": "" }, { "docid": "6e2e002c77a9f3103b6b81c626b6afba", "score": "0.47285655", "text": "def init(self):\r\n self.lives = 3\r\n self.coin_count = 0\r\n self.score = 0\r\n self.sprites = [self]\r\n self.levels[self.level]()\r\n self.state = self.loop", "title": "" }, { "docid": "7f2cb8d1314ec16f3f9a514ab0c512c7", "score": "0.4728307", "text": "def __init__(self):\n # Host - (Interactor,Domain) - Tuple of Constraints in form (positive, negative)\n self.transitions = defaultdict(lambda: defaultdict(lambda: defaultdict(tuple)))", "title": "" }, { "docid": "d78c0e325a4af0b2d6d23fd8b4099bb9", "score": "0.47253484", "text": "def ankleinit(self):\n for i in range(self.jointNum):\n _, returnHandle = vrep.simxGetObjectHandle(self.clientID, self.jointName + str(i+1), vrep.simx_opmode_blocking) \n self.jointHandle[i] = returnHandle\n vrep.simxSynchronousTrigger(self.clientID) # 让仿真走一步 \n for i in range(self.jointNum):\n vrep.simxPauseCommunication(self.clientID, True) \n vrep.simxSetJointTargetPosition(self.clientID, self.jointHandle[i],self.jointangel[i]/self.RAD2DEG, vrep.simx_opmode_oneshot) #设置关节角\n vrep.simxPauseCommunication(self.clientID, False)\n vrep.simxSynchronousTrigger(self.clientID) # 进行下一步 \n vrep.simxGetPingTime(self.clientID) # 使得该仿真步走完\n _, self.position = vrep.simxGetObjectPosition(self.clientID, self.get_handle(), -1, vrep.simx_opmode_oneshot_wait)", "title": "" }, { "docid": "23610768563a9edf13408796f482a54f", "score": "0.47209105", "text": "def reserve():", "title": "" }, { "docid": "3826e824afb72dde9931d742e244ff43", "score": "0.4711834", "text": "def __init__(self):\n self.incidents = []", "title": "" }, { "docid": "e9ff21f5a578b9eb11b10f13391ba3fb", "score": "0.4710447", "text": "def __init__(self):\n self._key = []\n for _ in range(4):\n self._key.append(random.randint(1, 9))\n self._is_guessed = False", "title": "" }, { "docid": "8d30709e8c56070b73f7aede7084a122", "score": "0.4697127", "text": "def __init__(self):\n this = _coin.new_SoVRMLCone()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "title": "" }, { "docid": "8ce4d60afd757ed8ba84ad4df6424818", "score": "0.4691951", "text": "def __init__(self):\n this = _coin.new_SoNodeKit()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "title": "" }, { "docid": "c616c25b7f4b57b17f8d03fab2ec3357", "score": "0.46899736", "text": "def add_force_abstain(var: GameState, votee: User) -> None:\n if votee not in get_players(var):\n return\n _add_count(var, votee, -1)", "title": "" }, { "docid": "1f87275902adba6b95235c8678d5c476", "score": "0.4683481", "text": "def __init__(self):\n this = _coin.new_SoInteraction()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "title": "" }, { "docid": "0012c123ec81b51be0a6b7914f608c11", "score": "0.46829152", "text": "def __init__(self):\n self.patientsWaiting = [] # list of patients in the waiting room", "title": "" }, { "docid": "4d836391c07700cf2bd7489796ef7a27", "score": "0.46822906", "text": "def __init__(self):\n this = _coin.new_SoToVRML2Action()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "title": "" }, { "docid": "5609b266c93b458aba722f63d6549744", "score": "0.46821785", "text": "def kod_to_reserves(self, player):\n self.get_knocked_out(player.team).remove(player)\n self.get_reserves(player.team).append(player)\n player.state.knocked_out = False\n player.state.up = True", "title": "" }, { "docid": "65cc8d928383814706210ad7544f6aab", "score": "0.4679968", "text": "def __init__(self, ride):\n Ticket.id += 1\n self.id = Ticket.id\n self.activated = False\n self.worker = None\n self.ride = ride", "title": "" }, { "docid": "0c2342d6b2945f88c89afbc0499a8655", "score": "0.46771556", "text": "def __init__(self):\n this = _coin.new_SoToVRMLAction()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "title": "" }, { "docid": "31cb8bf9476cddab3e7c4361eb277afd", "score": "0.46701923", "text": "def __init__(self, ahkl, ohkl):\n self._ahkl = ahkl\n self._ohkl = ohkl\n self.flipped = False\n self.connections = []", "title": "" }, { "docid": "c6d3beb66caec4c5588ed92f36f0ddd6", "score": "0.46636695", "text": "def at_player_creation(self):\r\n # set an (empty) attribute holding the characters this player has\r\n lockstring = \"attrread:perm(Admins);attredit:perm(Admins);attrcreate:perm(Admins)\"\r\n self.attributes.add(\"_playable_characters\", [], lockstring=lockstring)", "title": "" }, { "docid": "a18ad3f2ffcced3e98763f6e9cc71528", "score": "0.4661388", "text": "def __init__(self, password, hostPass):\n self.password = password\n self.playerPasswords = [hostPass]\n self.infoList = []\n self.currentState = {}", "title": "" }, { "docid": "eb337850b16e008b8fc6bbd699f9e535", "score": "0.46577626", "text": "def __init__(self, name, pts):\n self.name = name\n self.pts = pts\n self.has_confirmed_wifi = False", "title": "" }, { "docid": "67737ae3c253aa8aca3c0533126237ae", "score": "0.46567342", "text": "def __init__(self):\n this = _coin.new_SbViewVolume()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "title": "" }, { "docid": "a5ad9886ccfef94cb125e2b63ee50474", "score": "0.46525747", "text": "def __init__(self: \"Keeper\") -> \"Keeper\":\n self.player = 0\n self.points = 0\n self.score = [0, 0]", "title": "" }, { "docid": "d47a7754e021ea0d76dcb628dfdc5b1d", "score": "0.4647284", "text": "def __init__(self, v_in):\n self.V = v_in\n self.E = 0\n self.adj = []\n for i in range(v_in):\n self.adj.append([])", "title": "" }, { "docid": "8de737c57dc218c45b19da5eb270b29b", "score": "0.46449772", "text": "def reserve(self):", "title": "" }, { "docid": "a02231142bf788b07ff6415058b216a1", "score": "0.46412405", "text": "def icpw_rebirth(self):\n\n item = NodeRebirthQueueItem()\n self.icpw_enqueue_command(item)", "title": "" }, { "docid": "8477a38872e5fa1731414261f3620772", "score": "0.46372256", "text": "def __init__(self, owner, id):\n self._id = id\n self._owner = owner\n self._weight = 0\n owner._elements[id] = self # cache to prevent duplicate instanciation", "title": "" }, { "docid": "0763244d1e85901973c22b78352c7054", "score": "0.46346158", "text": "def __init__(self, players):\n self.players = players\n self.winningCombo = 3\n self.size = 3\n self.init_board()\n self.nplayer = 1", "title": "" }, { "docid": "0b44f25c007c18ada4cdbb1fd0330826", "score": "0.46292433", "text": "def setNewPeerID(self, prefix = clientPrefix, ver = clientVersion):\n peerID = newPeerID(prefix, ver)\n self.setPeerID(peerID)", "title": "" }, { "docid": "31fcff694bc417047cf954286df0df1a", "score": "0.46258318", "text": "def chassis():", "title": "" }, { "docid": "e9c80faf2e0704140daf469e0ffc4621", "score": "0.4623515", "text": "def __init__(self, v, edges, directed=False):\n self.vertices = [None]*v \n for index in range(v): # create vertices\n self.vertices[index] = Vertex(index)\n for edge in edges: # create edges\n if directed:\n self.create_edge(edge, True)\n else: # undirected edges\n self.create_edge(edge)", "title": "" }, { "docid": "3c6ba8543905d179aa2f09ec4fae8a07", "score": "0.46224126", "text": "def setup_interaction_memory(self):\n self.interaction_memory = interactionmemory.InteractionMemory()", "title": "" }, { "docid": "a488c21ed2e28c4d96417a30e0473f32", "score": "0.46184534", "text": "def __init__( self, listen, wait_time, public_name, privkey,\n\t logger=logging.getLogger(\"myopic_notary\") ):\n\n\t\tself.wait_time = wait_time\n\t\tself.public_name = public_name\n\n\t\tself._server = socket.socket()\n\t\tself._server.bind(listen)\n\t\tself._server.listen(8)\n\n\t\tself._logger = logger\n\n\t\tself.privkey = Blinder.deserialize(privkey)\n\n\t\tself.clients = set()", "title": "" }, { "docid": "f2a5dbb3baf90b11a6e0da977742e39f", "score": "0.46147683", "text": "def _InitiatePeripherals(self):\n for key in self._KEY_PARAMS.keys():\n self._keys[key] = gpio.Key(\n self._KEY_PARAMS[key][1], key, self._KEY_PARAMS[key][0], self._queue)\n for led in self._LED_PARAMS.keys():\n self._leds[led] = gpio.Led(self._LED_PARAMS[led])", "title": "" }, { "docid": "d49afbbd8dc129cbe2940eecaba87161", "score": "0.4610189", "text": "def init_choice(self) :\n\n self.deck = Deck()\n self.player_idx = 0 # Current player\n self.bidder = None\n self.trump_suit = None\n for player in self.players :\n player.cards = []\n\n self.notify_table()\n\n for j in self.players :\n for _ in range(5) :\n j.cards.append(self.deck.cards.pop())\n\n self.propos = self.deck.cards.pop()\n self.notify(u\"Carte proposée : @\", (self.propos,))\n self.notify_cards()\n self.set_state('wait_choice')", "title": "" }, { "docid": "637693559f44782f709ca9d824e58b40", "score": "0.4609979", "text": "def hci_cm_bv_01_c(transport, upperTester, lowerTester, trace):\n RPAs = [ ResolvableAddresses( transport, upperTester, trace, upperIRK ), ResolvableAddresses( transport, lowerTester, trace, lowerIRK ) ];\n ownAddress = Address( SimpleAddressType.PUBLIC, 0x123456789ABC );\n peerAddress = Address( SimpleAddressType.PUBLIC, 0x456789ABCDEF );\n success = RPAs[upperTester].clear() and RPAs[lowerTester].clear();\n success = success and RPAs[upperTester].add( peerAddress, lowerIRK );\n success = success and RPAs[lowerTester].add( ownAddress, upperIRK );\n\n \"\"\"\n Set resolvable private address timeout in seconds ( sixty seconds )\n \"\"\"\n success = success and RPAs[upperTester].timeout(60) and RPAs[lowerTester].timeout(60);\n success = success and RPAs[upperTester].enable() and RPAs[lowerTester].enable();\n\n for iutRole in [ Role.CENTRAL, Role.PERIPHERAL ]:\n ownAddress = Address( ExtendedAddressType.RESOLVABLE_OR_PUBLIC, 0x456789ABCDEF if iutRole is Role.CENTRAL else 0x123456789ABC);\n peerAddress = Address( SimpleAddressType.PUBLIC, 0x123456789ABC if iutRole is Role.CENTRAL else 0x456789ABCDEF);\n if iutRole == Role.CENTRAL:\n advertiser = Advertiser(transport, lowerTester, trace, AdvertiseChannel.ALL_CHANNELS, Advertising.CONNECTABLE_LDC_DIRECTED, ownAddress, peerAddress);\n else:\n advertiser = Advertiser(transport, upperTester, trace, AdvertiseChannel.ALL_CHANNELS, Advertising.CONNECTABLE_LDC_DIRECTED, ownAddress, peerAddress);\n advertiser.responseData = [ 0x04, 0x09 ] + [ ord(char) for char in \"IUT\" ];\n\n initiatorAddress = Address( ExtendedAddressType.RESOLVABLE_OR_PUBLIC );\n if iutRole == Role.CENTRAL:\n initiator = Initiator(transport, upperTester, lowerTester, trace, initiatorAddress, Address( IdentityAddressType.PUBLIC_IDENTITY, toNumber(ownAddress.address) ));\n else:\n initiator = Initiator(transport, lowerTester, upperTester, trace, initiatorAddress, Address( IdentityAddressType.PUBLIC_IDENTITY, toNumber(ownAddress.address) ));\n success = success and advertiser.enable();\n\n connected = initiator.connect();\n success = success and connected;\n\n peerAddress = Address( SimpleAddressType.PUBLIC, 0x456789ABCDEF );\n status, RPA = le_read_peer_resolvable_address(transport, upperTester, peerAddress.type, peerAddress.address, 100);\n trace.trace(6, \"LE Read Peer Resolvable Address Command returns status: 0x%02X RPA: %s\" % (status, formatAddress(RPA)));\n success = success and __check_command_complete_event(transport, upperTester, trace) and (status == 0);\n\n if iutRole == Role.CENTRAL:\n success = success and (initiator.peerRPA() == RPA);\n if initiator.peerRPA() != RPA:\n print((initiator.peerRPA()));\n print(RPA);\n trace.trace(5, \"Expected: %s Received: %s\" % (Address(None, initiator.peerRPA()), Address(None, RPA)));\n else:\n success = success and (initiator.localRPA() == RPA);\n if initiator.localRPA() != RPA:\n trace.trace(5, \"Expected: %s Received: %s\" % (Address(None, initiator.localRPA()), Address(None, RPA)));\n\n transport.wait(200);\n\n if connected:\n connected = not initiator.disconnect(0x13);\n success = success and not connected;\n\n return success;", "title": "" }, { "docid": "97d40005506d8d6959a5b08ac7ee9b85", "score": "0.46038154", "text": "def __init__(self):\n PukeGameBase.__init__(self)\n p1 = Landlord(3, \"landlord\")\n p2 = Peasant(3, \"peasant1\")\n p3 = Peasant(3, \"peasant2\")\n self.players = [p1, p2, p3]\n self.cards = [x for x in range(1, 55)]", "title": "" }, { "docid": "690fe7f92cafa76505f8e1c2fa4a5e0a", "score": "0.4603633", "text": "def __init__(self, name):\n\t\tself.ID=name\n\t\tself.neighbors=[]\n\t\tself.edges=[]\n\t\tself.degree= 0\n\t\tself.chan=0", "title": "" }, { "docid": "f636269600ffa68043d72888d47ca6a4", "score": "0.4602039", "text": "def __init__(self, players): \n self._players = players", "title": "" }, { "docid": "db7a82014eb6dc6b4b2204a543e92784", "score": "0.45901215", "text": "def create_network(voting_history:VotingHistory, selection:list) -> nx.Graph:\n\n\n print(\"create network ...\", flush = True)\n division_nodemap = {}\n mp_nodemap = {}\n\n mp_count = 1e6\n\n G = nx.Graph() \n for i, (divisionnr, division) in tqdm.tqdm(enumerate(sorted(voting_history.divisions.items(), key = lambda x:x[0]))):\n if not int(divisionnr) in selection:\n continue\n G.add_node(int(i), type = 'division', division_nr = divisionnr, date = division['date'], title = division['title'], aye_count = division['aye_count'], noes_count = division['noes_count'])\n division_nodemap[divisionnr] = i\n\n for vote_id, vote in division['votes_raw'].items():\n mp = vote[0].strip('\"')\n # print(mp)\n if not has_node(G, 'name', mp):\n G.add_node(int(mp_count), type = 'mep', name = mp, party = vote[1], constituency = vote[2])\n # parties.add(vote[1])\n\n mp_nodemap[mp] = mp_count\n mp_count += 1\n # G.add_node() \n \n if vote[3].lower() == 'aye':\n G.add_edge(mp_nodemap[mp], division_nodemap[divisionnr])\n\n # print(vote)\n print(mp_count-1e6)\n\n\n return G, division_nodemap, mp_nodemap", "title": "" }, { "docid": "af9fc2915ad93b8fae72a43e17659c6b", "score": "0.45880422", "text": "def _init(self):\n self.state = {\n \"turn\":1,\n \"action\":None,\n \"history\":{}, # For slots that have been informed.\n \"request_slots\":{}, # For slots that user requested in this turn.\n \"inform_slots\":{}, # For slots that belong to goal[\"request_slots\"] or other slots not in explicit/implicit_inform_slots.\n \"explicit_inform_slots\":{}, # For slots that belong to goal[\"explicit_inform_slots\"]\n \"implicit_inform_slots\":{}, # For slots that belong to goal[\"implicit_inform_slots\"]\n \"rest_slots\":{} # For slots that have not been informed.\n }\n if self.parameter.get(\"train_mode\") is True:\n self.goal = random.choice(self.goal_set[\"train\"])\n else:\n self.goal = random.choice(self.goal_set[\"test\"])\n self.episode_over = False\n self.dialogue_status = dialogue_configuration.NOT_COME_YET\n self.constraint_check = dialogue_configuration.CONSTRAINT_CHECK_FAILURE", "title": "" }, { "docid": "58a5aa6cddd33c7d52a13adc08900a56", "score": "0.45877108", "text": "def __init__(self):\n self._MEMBERS = []\n self._PARTNERS = []", "title": "" }, { "docid": "b80a12a85724c0927e16a7187c8289e3", "score": "0.4581407", "text": "def __init__(self):\n self.wires = {}", "title": "" }, { "docid": "50fcaf766ae5d16616548e51e055bfd0", "score": "0.45811236", "text": "def create_player(self):\n self.player = Player()", "title": "" }, { "docid": "39969e8933b629d727f971138e73827c", "score": "0.4580574", "text": "def hci_cm_bv_02_c(transport, upperTester, lowerTester, trace):\n RPAs = [ ResolvableAddresses( transport, upperTester, trace, upperIRK ), ResolvableAddresses( transport, lowerTester, trace, lowerIRK ) ];\n ownAddress = Address( SimpleAddressType.PUBLIC, 0x123456789ABC );\n peerAddress = Address( SimpleAddressType.PUBLIC, 0x456789ABCDEF );\n success = RPAs[upperTester].clear() and RPAs[lowerTester].clear();\n success = success and RPAs[upperTester].add( peerAddress, lowerIRK );\n success = success and RPAs[lowerTester].add( ownAddress, upperIRK );\n\n \"\"\"\n Set resolvable private address timeout in seconds ( sixty seconds )\n \"\"\"\n success = success and RPAs[upperTester].timeout(60) and RPAs[lowerTester].timeout(60);\n success = success and RPAs[upperTester].enable() and RPAs[lowerTester].enable();\n\n for iutRole in [ Role.CENTRAL, Role.PERIPHERAL ]:\n ownAddress = Address( ExtendedAddressType.RESOLVABLE_OR_PUBLIC, 0x456789ABCDEF if iutRole is Role.CENTRAL else 0x123456789ABC);\n peerAddress = Address( SimpleAddressType.PUBLIC, 0x123456789ABC if iutRole is Role.CENTRAL else 0x456789ABCDEF);\n if iutRole == Role.CENTRAL:\n advertiser = Advertiser(transport, lowerTester, trace, AdvertiseChannel.ALL_CHANNELS, Advertising.CONNECTABLE_LDC_DIRECTED, ownAddress, peerAddress);\n else:\n advertiser = Advertiser(transport, upperTester, trace, AdvertiseChannel.ALL_CHANNELS, Advertising.CONNECTABLE_LDC_DIRECTED, ownAddress, peerAddress);\n advertiser.responseData = [ 0x04, 0x09 ] + [ ord(char) for char in \"IUT\" ];\n\n initiatorAddress = Address( ExtendedAddressType.RESOLVABLE_OR_PUBLIC );\n if iutRole == Role.CENTRAL:\n initiator = Initiator(transport, upperTester, lowerTester, trace, initiatorAddress, Address( IdentityAddressType.PUBLIC_IDENTITY, toNumber(ownAddress.address) ));\n else:\n initiator = Initiator(transport, lowerTester, upperTester, trace, initiatorAddress, Address( IdentityAddressType.PUBLIC_IDENTITY, toNumber(ownAddress.address) ));\n success = success and advertiser.enable();\n\n connected = initiator.connect();\n success = success and connected;\n\n peerAddress = Address( SimpleAddressType.PUBLIC, 0x456789ABCDEF );\n status, RPA = le_read_local_resolvable_address(transport, upperTester, peerAddress.type, peerAddress.address, 100);\n trace.trace(6, \"LE Read Local Resolvable Address Command returns status: 0x%02X RPA: %s\" % (status, formatAddress(RPA)));\n success = success and __check_command_complete_event(transport, upperTester, trace) and (status == 0);\n\n if iutRole == Role.CENTRAL:\n success = success and (initiator.localRPA() == RPA);\n else:\n success = success and (initiator.peerRPA() == RPA);\n\n transport.wait(200);\n\n if connected:\n connected = not initiator.disconnect(0x13);\n success = success and not connected;\n\n return success;", "title": "" }, { "docid": "ccf26e5594e0b80da9e573d05986de9a", "score": "0.45760354", "text": "def nextVictim(self):\n victims = self.opponents.keys()\n Debug(\"available victims: %s\" % victims, 5)\n if len(victims) == 0 or self.victim not in victims:\n self.victim = None\n for a in (1,2): #search list twice to wrap, since current victim might be last opponent in list\n for v in victims:\n if self.victim == None:\n self.victim = v\n return\n if v == self.victim:\n self.victim = None #will set victim and return on next go-round", "title": "" }, { "docid": "3fc5826a82f5dd991bee3c5abeac1d33", "score": "0.45751277", "text": "def setup_local_host(self):\n self.localhost = True\n left_side = DISPLAY_SIZE[\"x\"] * 0\n top_side = DISPLAY_SIZE[\"y\"] * 0\n player1 = self.struct_game_dict['Player'][0]\n player1.rect.x = left_side + 1000\n player1.rect.y = top_side + 200", "title": "" } ]
8c48f6972f194d991d3d4fd7a6dd0b07
generate string containing random combinations of % and ASCII symbols
[ { "docid": "bbc619b59abd48787cbab876d1c7b0ba", "score": "0.6609214", "text": "def get_exotic_str(size=10):\n name = u\"\"\n while len(name) < size:\n num = random.randint(1, 3)\n name = name + u\"%\" * num + get_random_str(2)\n return name", "title": "" } ]
[ { "docid": "78ebee311b61a78eeb0842c08318db9d", "score": "0.64790833", "text": "def generate_alphanumerics():\n spaces_before = random.randint(0, 10)\n spaces_after = random.randint(0,10)\n core_length = random.randint(2,20)\n core_string = ''.join(random.choices(string.ascii_letters +\n string.digits, k = core_length))\n final_string = ' '*spaces_before + core_string + ' '*spaces_after\n return final_string", "title": "" }, { "docid": "ea28388ee91caf2f203e63caefd38bee", "score": "0.6345081", "text": "def generate_code():\n import random\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'#'abcdefghijklmnopqrstuvwxyz'\n min = 5\n max = 15\n string=''\n for x in random.sample(alphabet,random.randint(min,max)):\n string+=x\n return string", "title": "" }, { "docid": "314592eaa9097bc2e3f224b535d571d7", "score": "0.63144404", "text": "def random_string(length=8):\n random_symbols = '1234567890bcdfghjklmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ'\n r = ''\n for i in range(length):\n r += random_symbols[random.randint(0,len(random_symbols)-1)]\n return r", "title": "" }, { "docid": "00c0212865b6fc3189ad594e339e55cc", "score": "0.62951916", "text": "def generate_ascii_name(first_symbol, i):\n strlen = random.randint(0, 20)\n return first_symbol + get_random_str(size=strlen) + str(i)", "title": "" }, { "docid": "e000b979bbd75c38efe42845b34a3e76", "score": "0.6274614", "text": "def random_encode(value):\n return \"\".join(random.choice([f\"&#{ord(c)};\", c]) for c in value)", "title": "" }, { "docid": "c4bba439d0144c7f389e54ee0752f19f", "score": "0.62649775", "text": "def urlgen(a,s,t): #a is link to append to, s is the size of the appended string, t is the type of string \n chars = ''\n for i in t:\n if i in string.ascii_lowercase:\n chars += string.ascii_lowercase\n if i in string.ascii_uppercase:\n chars += string.ascii_uppercase\n if i in string.digits:\n chars += string.digits\n if '{}' not in a:\n print('Missing parameters \"{}\" please try again')\n sys.exit()\n print(a.format(''.join(random.choice(chars) for _ in range(s))))", "title": "" }, { "docid": "5af053dde055ffbe231c4c71fd5e84da", "score": "0.6210412", "text": "def get_random_challenge():\n return ''.join(random.choice(_VALID_CHARS) for i in xrange(128))", "title": "" }, { "docid": "041efb5a4404ef35c0ad0fa2b168dfaf", "score": "0.61561", "text": "def generate_ascii() -> str:\n return string.ascii_letters[:]", "title": "" }, { "docid": "7c1258fbc30beb04bdf306288fda4403", "score": "0.6142848", "text": "def create_str(length, p):\n st = \"\"\n for _ in range(0, length):\n r = random.random()\n if r < p:\n st += \"1\"\n else:\n st += \"0\"\n return st", "title": "" }, { "docid": "9d353376adf5c527b6f87e2a27a8a646", "score": "0.6140158", "text": "def stringGenerator():\n\t\n\tcaptchaString = ''\n\n\tlength = setLength()\n\t\n\tfor i in range(length):\n\t\tchSet = setAlphabet() \t\t#chose a set from which character is to de derived.\n\t\tcaptchaString += setChar(chSet)\n\t\n\treturn captchaString", "title": "" }, { "docid": "cdff539722757610edb99e3f4857b255", "score": "0.60985726", "text": "def generate_share_code(length: int):\n letters_and_digits = string.ascii_letters + string.digits\n result_str = \"\".join(\n (random.choice(letters_and_digits) for _ in range(length))\n )\n return result_str", "title": "" }, { "docid": "b5b9d1ca41041f76535ed627fc1d4e58", "score": "0.6096137", "text": "def rand_string(min, max):\n\n int_gen = random.randint\n string_length = int_gen(min, max)\n return ''.join([chr(int_gen(ord('\\t'), ord('~')))\n for i in range(string_length)])", "title": "" }, { "docid": "eb412f4a87698e62f8340565a3742690", "score": "0.60695744", "text": "def generate_secret():\n\n return ''.join(random.choice(string.letters + string.digits)\n for i in xrange(20))", "title": "" }, { "docid": "c190df0ba92af6e0dcf8e4f012748a21", "score": "0.60648835", "text": "def rstr(stringLength=12):\n letters = string.ascii_lowercase\n return '___' + ''.join(random.choice(letters) for i in range(stringLength))", "title": "" }, { "docid": "5cd9258014ebd77a49857b4fa4fce389", "score": "0.6062691", "text": "def random_run_string():\n s = string.ascii_uppercase\n r = ''\n for i in np.random.random_integers(0, 25, 6).tolist():\n r = r + s[i]\n return r", "title": "" }, { "docid": "af30f2d24687d26955e51c5dc8f0314b", "score": "0.6051825", "text": "def generate_reference(length):\n return \"\".join((random.choice(\"ACGT\") for i in range(length)))", "title": "" }, { "docid": "646165dc831bb3c118f0c7a8918ccc5a", "score": "0.6049098", "text": "def get_ascii(average, dictionary):\n return random.choice(dictionary[str(int((average/256) * len(dictionary)))])", "title": "" }, { "docid": "0f361223659ba07a7d649e7d8206993b", "score": "0.6014157", "text": "def random_string(n):\n return \"\".join(chr(random.randrange(128)) for i in range(n))", "title": "" }, { "docid": "017da75b6dee0207e14608f6cbf4f5d8", "score": "0.60126317", "text": "def generate_random_text(length):\r\n text = []\r\n for num in range(length):\r\n text.append(alphabet[random.randint(0, 25)])\r\n return(''.join(text))", "title": "" }, { "docid": "add3cb328914151396aee1a899b59b60", "score": "0.6002253", "text": "def random_string( length = 6, prefix = \"\", legal_chars = legal_chars ):\n \n rnd_str = \"\"\n for i in range( length ): #For every iteration, a caracter is added to \"rnd_str\" until is has a spesific length \n rnd_str += random.choice( legal_chars ) # chooses a random character from \"legal_chars\"\n\n return prefix + rnd_str", "title": "" }, { "docid": "f1844b36a2b4481cc56e69a89d3005f8", "score": "0.5983737", "text": "def passgen(site=None,length:int=32,copy:bool=True) -> str:\r\n import random,string,os\r\n import pyperclip\r\n used = load(gallery)\r\n punctors = ''.join(i for i in string.punctuation if not i in '{}[]\\/|()')\r\n chars = ''.join(i for i in shuffle(string.ascii_uppercase + string.digits + string.ascii_lowercase + punctors))\r\n ranstr = ''.join([random.choice(chars) for i in range(length)])\r\n # [ranstr.insert(i+sum(format[:i+1]),sep) for i,j in enumerate(format[:-1])]\r\n pyperclip.copy(''.join(ranstr)) if copy else None\r\n return ''.join(ranstr)", "title": "" }, { "docid": "2e2d53d705e2652b01d5d7424cecdffa", "score": "0.59822035", "text": "def generate_str(limit):\n\n alpha = 'abcdefghijklmnopqrstuvwxyz '\n astring = ''\n\n for i in range(limit):\n astring = astring + random.choice(alpha)\n\n return astring", "title": "" }, { "docid": "86ba7ced02c3645c9ceb1124ded24aa0", "score": "0.59771174", "text": "def generate_random_string():\r\n\r\n characters = (\r\n [chr(i) for i in range(ord(\"A\"), ord(\"Z\") + 1)] +\r\n [chr(i) for i in range(ord(\"a\"), ord(\"z\") + 1)] +\r\n [chr(i) for i in range(ord(\"0\"), ord(\"9\") + 1)]\r\n )\r\n\r\n # Choose random length\r\n length = randint(10, 100)\r\n return \"\".join([choice(characters) for i in range(length)])", "title": "" }, { "docid": "ff3367361e8ae31898ca5543aefff915", "score": "0.59725565", "text": "def get_random(self,num):\n return ''.join(sample('abcdefghijklmnopqrstuvwxyz1234567890!',8))", "title": "" }, { "docid": "9dc773d8eb4bcf67e2139e2bde2942aa", "score": "0.5955868", "text": "def generate_authtoken():\n gen = random.SystemRandom()\n p0 = '%032x' % gen.getrandbits(128)\n return p0", "title": "" }, { "docid": "7ad91e49c93a8473c06df899b4eb30be", "score": "0.5933725", "text": "def generateRandomString(N):\n if N == 1:\n return chr(ord('a') + random.randint(0,26))\n \n if (N-1)%2 == 1:\n return 'a'*(N-1) + 'b'\n \n for i in range(N-1, -1, -1):\n path = []\n if dfs(N-i, i, path,{}):\n print(path)\n if len(path)<= 26:\n ans = ''\n for i,num in enumerate(path):\n ans += chr(ord('a') + i)*num\n return ans\n return ''", "title": "" }, { "docid": "d81f8014bba666bc418f1379f08c3224", "score": "0.5928779", "text": "def name(length=6):\n return \"\".join(np.random.choice([c for c in string.hexdigits], length))", "title": "" }, { "docid": "5407b756bb018dca00a135a58059ad29", "score": "0.5916562", "text": "def _random_hex() -> str:\n\n return \"#%02X%02X%02X\" % tuple(randint(0, 255) for _ in range(3))", "title": "" }, { "docid": "565a2d7348bd90ccd2061de454486394", "score": "0.5891229", "text": "def sample_random_string(n):\n current_char, random_string = '.', ''\n for i in xrange(n):\n probabilities = get_probability(current_char)\n current_char = np.random.choice(\n alphabet, p=probabilities)\n random_string += current_char\n return random_string", "title": "" }, { "docid": "fbe7afb5fb95cedd4d4cb432f94dbe10", "score": "0.5890888", "text": "def randstring(length=16): \n rletters='abcdefghijklmnopqrstuvwxyz'\n return ''.join((random.choice(rletters) for i in range(length)))", "title": "" }, { "docid": "5667318ce8894e5813734f6c125c0c44", "score": "0.58773756", "text": "def strgen(length):\n seen = set()\n while True:\n x = rand.str(length)\n if x in seen:\n continue\n seen.add(x)\n yield x", "title": "" }, { "docid": "af24f72695dbb294a0aa1da29347880a", "score": "0.58663416", "text": "def create_random_code(chars=AVAIABLE_CHARS):\n return \"\".join(\n [choice(chars) for _ in range(SIZE)]\n )", "title": "" }, { "docid": "8868061231e5bfa4a447d52cc36bca72", "score": "0.58572984", "text": "def __make_password():\n return ''.join(random.choice(string.lowercase + string.digits + \\\n string.uppercase) for i in range(20))", "title": "" }, { "docid": "bd61a39d3e5cc2997e656da59379caed", "score": "0.58546305", "text": "def randomAlpha(length):\n key = ''\n for i in range(length): # @UnusedVariable\n key += choice(string.ascii_lowercase)\n return key", "title": "" }, { "docid": "a56bbdc92801ef2fdf9f516a7c4e5dd4", "score": "0.5842826", "text": "def generate_instance_str(clauses_to_variables_ratio, n_clauses):\r\n output = ''\r\n n_var = int (n_clauses/clauses_to_variables_ratio)\r\n output += str(n_var) + ' ' + str(n_clauses) + ' 0 '\r\n for clause_index in range(n_clauses):\r\n for var_index in range(3):\r\n var = random.randint(1, n_var)\r\n if(random.randint(0,1) == 1):\r\n var *= -1\r\n output += str(var) + ' '\r\n output += '0 '\r\n output += '% '\r\n for var_index in range(n_var):\r\n output += str(random.randint(1,100)) + ' '\r\n output += '#'\r\n return output", "title": "" }, { "docid": "45771589e1855e071a33c7a4d2e6ce9f", "score": "0.5825627", "text": "def make_random_seq(bp):\n return ''.join(np.random.choice(list('ACGT'), size=bp))", "title": "" }, { "docid": "dc771d513b49c310ca37fe7cae6a2c38", "score": "0.58202064", "text": "def generate_random_string(\n length=25, allowed_chars=string.ascii_letters + string.digits\n):\n return \"\".join(random.choice(allowed_chars) for i in range(length))", "title": "" }, { "docid": "8c3543bc8fb6d3a5801323e129cd9d75", "score": "0.5812987", "text": "def gen_password(n=0):\n n = 20 if n <= 10 else n\n chars = string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation.replace(';', '')\n res = ''\n for i in range(n):\n res += random.choice(chars)\n else:\n return res", "title": "" }, { "docid": "67f0c3d42e24e872a165773815db14a2", "score": "0.5809549", "text": "def gen_char(x: int) -> str:\r\n pow_lst = pow2(x)\r\n return ''.join([chr(97 + int(log2(i))) for i in pow_lst])", "title": "" }, { "docid": "f9e7654ba9c6c0dd2a5542341671cac3", "score": "0.58020943", "text": "def random_text(length=100, chars=_CHARS):\n return ''.join([choice(chars) for _ in xrange(0, length)])", "title": "" }, { "docid": "66bffbc1f58f044e8c8118ea214415af", "score": "0.57986814", "text": "def random_string(length):\n hash = hashlib.sha256()\n ret = \"a\"\n while len(ret) < length:\n hash.update(str(random.random()))\n ret += hash.hexdigest()\n return ret[0:length]", "title": "" }, { "docid": "21034bf5460edef0643774e3754b92f7", "score": "0.5796463", "text": "def randstr():\n\tret =\"\"\n\tfor _ in range(len(target_string)):\n\t\tret += random.choice(all_possible_chars)\n\treturn ret", "title": "" }, { "docid": "ef9999bf0c46acd959633ac0d325eb60", "score": "0.57895327", "text": "def __genPass(self):\n characters = string.ascii_letters + string.punctuation + string.digits\n password = \"\".join(random.choice(characters) for x in range(random.randint(8, 16)))\n return password", "title": "" }, { "docid": "ad9da2adb8591cfbbc23e19e59921349", "score": "0.5787432", "text": "def generate_string(string_length):\n alphabet = \"abcdefghijklmnopqrstuvwxyz \"\n match_string = \"\"\n for i in range(string_length):\n match_string += alphabet[random.randrange(27)]\n return match_string", "title": "" }, { "docid": "ca3eda6efc8a588a80075dc63eb869aa", "score": "0.5772561", "text": "def genRandStr(count: int=10, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(count))", "title": "" }, { "docid": "d3d20f42b6f95832c7b0400e10f043aa", "score": "0.57665217", "text": "def get_random_string(\n length=32,\n allowed_chars=\"abcdefghijklmnopqrstuvwxyz\" \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\",\n):\n return \"\".join([random.choice(allowed_chars) for i in range(length)])", "title": "" }, { "docid": "72c1255891a331c6e7083ea87d55535a", "score": "0.5759601", "text": "def randstr(length: int) -> str:\r\n \r\n i: int = 0\r\n result_string: str = \"\"\r\n while i < length:\r\n rand = random.randint(0, len(string.ascii_letters) - 1)\r\n result_string += string.ascii_letters[rand]\r\n\r\n i += 1\r\n\r\n return result_string", "title": "" }, { "docid": "9667b5269facbb1e4c1cb0de8a013d2b", "score": "0.5756555", "text": "def space() -> str:\n return random.choice([\" \"] * 90 + [\"\\u2007\"] * 10)", "title": "" }, { "docid": "71ad8bddceca3c8a43041004fb8770dc", "score": "0.57534134", "text": "def randpass(self, size=16):\n chars = string.letters + string.digits\n return ''.join((random.choice(chars)) for x in range(size))", "title": "" }, { "docid": "eab144b1be59cb9ae03651c48c8f90c3", "score": "0.5751749", "text": "def translate(x: str) -> str:\n if x == \".\":\n return colorprint(x, \"\", True if random.random() < 0.2 else False)\n elif x == \"*\":\n return colorprint(x, \"\", True if random.random() < 0.1 else False)\n elif x in {\"@\", \"&\"}:\n return colorprint(\n x,\n random.choice((\"cyan\", \"blue\")),\n True if random.random() < 0.1 else False,\n )\n elif x in {\",\", \"`\", \";\", \"'\", \"#\", \"⁂\"}:\n return colorprint(x, \"\", True if random.random() < 0.05 else False)\n elif x in {\"/\", \"\\\\\", \"^\", \"|\", \"_\"}:\n return colorprint(x, \"green\", False)\n elif x == \"★\":\n return colorprint(x, \"yellow\", False)\n return x", "title": "" }, { "docid": "4c21cbcb4b42b8f280a31dfe13cfd11a", "score": "0.5746448", "text": "def string_pass(mix, length):\n import random\n try:\n # Using random.choices instead of random.sample allows for repeats of elements\n temp_password = random.choices(mix, k=length)\n password = \"\".join(temp_password)\n print(f\"\\n{password}\")\n except TypeError:\n print(\"Uh oh... something went wrong\")", "title": "" }, { "docid": "334a3ba9554a7171d8427dbe10b5da2a", "score": "0.57310045", "text": "def random_name():\n\n chars_min = [chr(i) for i in range(97, 123)]\n chars_may = list(map(lambda s: s.upper(), chars_min))\n chars_num = list(map(lambda n: str(n), range(30)))\n\n chars = chars_min + chars_may + chars_num\n ran_name = sample(chars, 20)\n return \"\".join(ran_name)", "title": "" }, { "docid": "0026df35767dc0d331e3749266adaf36", "score": "0.5729573", "text": "def random_an():\r\n alpha = \"abcdefgh\"\r\n number = \"12345678\"\r\n return random.choice(alpha) + random.choice(number)", "title": "" }, { "docid": "3a61c750095cfc7365227082310bec92", "score": "0.57284325", "text": "def make_subject_code():\n number = \"\".join(\n [ str(random.randint(0,9))\n for i in range(self.subject_number_length) ]\n )\n return \"%s%s\" % (random.choice( self.prefixes ), number)", "title": "" }, { "docid": "ffeee599a0330e6e436622f67d2bbd59", "score": "0.57254195", "text": "def randstr(length):\n return \"\".join([random.choice(string.ascii_letters)\n for i in range(length)])", "title": "" }, { "docid": "cd464868b8e3cd54d1916a7ee6a7537a", "score": "0.5716817", "text": "def randchar(chars=string.digits + string.letters):\n try:\n pos = int(float(ord(os.urandom(1))) * 256. / 255.)\n return chars[pos % len(chars)]\n except NotImplementedError:\n return random.choice(chars)", "title": "" }, { "docid": "f53e9e66cee684c9723ef68fb7a03cf9", "score": "0.5709221", "text": "def _create_random_string(length):\n return \"\".join(choice(string.ascii_letters + string.digits) for _ in range(length))", "title": "" }, { "docid": "ee26f1c27c317b0472159cbaee3f9f51", "score": "0.5708425", "text": "def rand_str(count=10):\r\n choice = SystemRandom().choice\r\n ls=string.ascii_letters\r\n return ''.join(choice(ls) for x in range(count))", "title": "" }, { "docid": "14f379ea56825b1c4b82cc9444e1c710", "score": "0.5704695", "text": "def generate_char():\n return random.choice(string.ascii_letters + string.digits)", "title": "" }, { "docid": "aabaa2e599aa90c5719f149b96dbcded", "score": "0.5702171", "text": "def random_string(length):\n import random\n import string\n randstring = ''\n for char in range(length):\n char = random.choice(string.ascii_letters + string.digits)\n randstring += char\n\n return randstring", "title": "" }, { "docid": "fe7f6ab742a114688940aa3307676a0a", "score": "0.56991094", "text": "def get_random_href():\r\n import random\r\n tmp_list = list()\r\n for _ in xrange(3):\r\n rand_number = random.randint(0, 0x100000000)\r\n tmp_list.append(\"{0:x}\".format(rand_number))\r\n return \"-\".join(tmp_list).upper()", "title": "" }, { "docid": "fe7f6ab742a114688940aa3307676a0a", "score": "0.56991094", "text": "def get_random_href():\r\n import random\r\n tmp_list = list()\r\n for _ in xrange(3):\r\n rand_number = random.randint(0, 0x100000000)\r\n tmp_list.append(\"{0:x}\".format(rand_number))\r\n return \"-\".join(tmp_list).upper()", "title": "" }, { "docid": "7b5613c85b49cb06b1a264d6526d607e", "score": "0.5692625", "text": "def strgen():\n seen = set()\n while True:\n x = rand.str(1, 32)\n if x in seen:\n continue\n seen.add(x)\n yield x", "title": "" }, { "docid": "add433a33ab024268a6807a93ae34345", "score": "0.56857103", "text": "def oversized_junk():\n return \"\".join(random.choice(string.ascii_lowercase) for _ in range(4097))", "title": "" }, { "docid": "fe37fa189908ebaf1106545b680ad20b", "score": "0.5685526", "text": "def generate_string(length=20):\n return binascii.hexlify(os.urandom(length)).decode()", "title": "" }, { "docid": "927e6eb799f3e6dab715970fec79c54f", "score": "0.5681773", "text": "def generate_code(length: List[int]) -> Text:\n\n code = []\n for part in length:\n part = list(range(part))\n random.shuffle(part)\n c = ''.join(map(str, [random.choice(part) for i in range(3)]))\n code.append(c)\n return '-'.join(code)", "title": "" }, { "docid": "e9af583ad6b46789e9c0f00862c9955e", "score": "0.56790066", "text": "def random_string(chars, length, rand = random):\n return ''.join([rand.choice(chars) for ignore in xrange(length)])", "title": "" }, { "docid": "f0e95fbfe364ab1decbdb313f4c1296c", "score": "0.5668656", "text": "def generateKey(length, chars):\n return ''.join([choice(chars) for i in range(length)])", "title": "" }, { "docid": "6b849e298d50c1ec23faec2bcc394b32", "score": "0.56641626", "text": "def random(length):\n\n print \"Making a random data string....\"\n print \"If strings don't appear right away, please continually move your mouse cursor. These movements generate entropy which is used to create random data.\\n\"\n\n seed = subprocess.check_output(\"xxd -l {} -p /dev/random\".format(length), shell=True)\n seed = seed.replace('\\n', '')\n\n computer_entropy = \" \".join(chunk_string(seed, 4))\n print(\"Generated Computer entropy: {0}\\n\\n\".format(computer_entropy))\n return unchunk(computer_entropy)", "title": "" }, { "docid": "be4801c56bdeca972b89d0867d80bbb3", "score": "0.5663758", "text": "def SampleCode(nlen, alphabet=['A', 'T', 'C', 'G']):\n return \"\".join([alphabet[i] for i in np.random.random_integers(0, len(alphabet) - 1, nlen)])", "title": "" }, { "docid": "53f3a068732c7ade0faa57185865ba7c", "score": "0.5658539", "text": "def string_generator(length):\n return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))", "title": "" }, { "docid": "d74342820df6814ba9a28708462b7259", "score": "0.56555253", "text": "def RandomString(length):\n dictionary = ascii_letters + punctuation + digits + whitespace\n randstr = \"\".join(choice(dictionary) for x in range(length))\n\n return randstr", "title": "" }, { "docid": "fed91bbdeed85c541f8df530801e4a3a", "score": "0.564191", "text": "def baseString(base):\n (dig0, dig1) = divmod(base, 10)\n if dig0 == 0:\n printable = chr(0x2080 + base)\n else:\n printable = chr(0x2080 + dig0) + chr(0x2080 + dig1)\n return printable", "title": "" }, { "docid": "79fe20a14b14b09a9ebe1398e32f1205", "score": "0.5633256", "text": "def generate_random_string(length, number):\n\n dat = []\n\n alphabet = \"AGTC\"\n\n for i in range(number):\n dat.append(\"\".join([random.choice(alphabet) for j in range(length)]))\n\n return dat", "title": "" }, { "docid": "bf13c9c815dfa48a31705aaa486e1b48", "score": "0.56308156", "text": "def my_getRandomMessage():\r\n\tcomplete_letters = upper_case_letters + lower_case_letters\r\n\tcomplete_letters = list(complete_letters)\r\n\trandom.shuffle(complete_letters)\r\n\tcomplete_letters = complete_letters * random.randint(4,40)\r\n\trandom.shuffle(complete_letters)\r\n\tcomplete_letters = ''.join(complete_letters)\r\n\treturn complete_letters[:random.randint(10,len(complete_letters))]", "title": "" }, { "docid": "6ed49d91abd8ef9601aca3e68c98310c", "score": "0.56287336", "text": "def random_string(length, chars_used=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"):\n chars_used = list(set(chars_used.upper()))\n s = [None] * length\n for i in range(length):\n s[i] = random.choice(chars_used)\n return ''.join(s)", "title": "" }, { "docid": "e2dbed334b3ae835872e05a375dec1c5", "score": "0.5624791", "text": "def generatestring(self, n):\n st = \"\"\n # For each sign requested\n for _ in range(n):\n # random value, which gives location of sign we're looking for\n r = random.random()\n # Percentage chance that this is the sign we're looking for\n p = 0\n for key in self.k:\n # this sign's probability added to the previous probability\n p += self.dict[key]\n # if the randomly generated value is lower than the sum, we have our sign\n if r <= p:\n # add the sign to the string, and break this inner loop\n st += key\n break\n # Return a now hopefully complete sequence of length n\n return st", "title": "" }, { "docid": "eaf924bda6df639c9c2e095024b16da3", "score": "0.56215525", "text": "def generate_pw(length, pw_type, specials):\n asciitable = {}\n password = \"\"\n asciivalue = 48\n for i in range(10):\n asciitable[i] = asciivalue\n asciivalue += 1\n asciivalue = 65\n for i in range(10, 36):\n asciitable[i] = asciivalue\n asciivalue += 1\n asciivalue = 97\n for i in range(36, 62):\n asciitable[i] = asciivalue\n asciivalue += 1\n asciivalue = 33\n for i in range(62, 77):\n asciitable[i] = asciivalue\n asciivalue += 1\n asciivalue = 58\n for i in range(77, 84):\n asciitable[i] = asciivalue\n asciivalue += 1\n asciivalue = 91\n for i in range(84, 90):\n asciitable[i] = asciivalue\n asciivalue += 1\n asciivalue = 123\n for i in range(90, 94):\n asciitable[i] = asciivalue\n asciivalue += 1\n #print asciitable\n # for i in range(len(asciitable)):\n # print i, str(unichr(asciitable.get(i)))\n\n if pw_type == \"alphabet\":\n if not specials:\n for i in range(length):\n rand = unpack(\"I\", urandom(4))[0]\n rand %= 62\n if rand < 10:\n rand += 10\n password += str(unichr(asciitable.get(rand)))\n else:\n for i in range(length):\n rand = unpack(\"I\", urandom(4))[0]\n rand %= 93\n if rand < 10:\n rand += 10\n password += str(unichr(asciitable.get(rand)))\n if pw_type == \"numeric\":\n for i in range(length):\n rand = unpack(\"I\", urandom(4))[0]\n rand %= 10\n password += str(unichr(asciitable.get(rand)))\n if pw_type == \"alphanumeric\":\n if not specials:\n for i in range(length):\n rand = unpack(\"I\", urandom(4))[0]\n rand %= 62\n password += str(unichr(asciitable.get(rand)))\n else:\n for i in range(length):\n rand = unpack(\"I\", urandom(4))[0]\n rand %= 93\n if rand < 10:\n rand += 10\n password += str(unichr(asciitable.get(rand)))\n return password", "title": "" }, { "docid": "d0a316f9b6c26f08e5de0191f5369986", "score": "0.56209856", "text": "def _random_alpha_string(self, length):\n return ''.join([random.choice(string.ascii_letters) for n in range(length)])", "title": "" }, { "docid": "54b7e84be506cb8778563634fc600442", "score": "0.5617034", "text": "def question_10():\n x = ''\n return f'{x:*^50}'", "title": "" }, { "docid": "a50e2c9cd2934fcbce52d8ec5ce96903", "score": "0.5616593", "text": "def _generate_random_string(self, length=10):\n return \"\".join(random.choices(string.ascii_lowercase, k=length))", "title": "" }, { "docid": "d621f1424a1f40f0dbf8a78f9a215a1d", "score": "0.5615456", "text": "def newpin(digits=4):\n randnum = randint(0, 10 ** digits)\n while len(str(randnum)) > digits:\n randnum = randint(0, 10 ** digits)\n return (u'%%0%dd' % digits) % randnum", "title": "" }, { "docid": "573240a88faebcb12a042f1121cf1b5f", "score": "0.56135905", "text": "def rand_token(length: int = 25,\n chars: str = string.ascii_uppercase + string.digits) -> str:\n return \"\".join(secrets.choice(chars) for i in range(length))", "title": "" }, { "docid": "9662eb5d184865b5b7f07b3e6e1a77e8", "score": "0.5609256", "text": "def _generate_random_regex(self) -> str:\n\n regex_length = random.randint(1, 20)\n\n non_union_operators = list(set(RegexChar.operators()) - set(RegexChar.UNION.value))\n # Start with an alphanumeric character or opening group.\n regex: str = random.choice(RegexChar.ALPHANUMERIC.value + [RegexChar.GROUP.value[0]])\n num_open_groups = 1 if regex == RegexChar.GROUP.value[0] else 0\n for i in range(regex_length - 1):\n # 10% chance of adding an opening group.\n if random.random() < 0.10:\n regex += RegexChar.GROUP.value[0]\n num_open_groups += 1\n continue\n\n # 20% chance of closing an open group.\n if num_open_groups > 0 and random.random() <= 0.20:\n regex += RegexChar.GROUP.value[-1]\n num_open_groups -= 1\n continue\n\n if regex[-1] in non_union_operators:\n # If the operator isn't a union, a union can follow it.\n if random.random() < 0.25:\n regex += RegexChar.UNION.value\n continue\n\n if regex[-1] in RegexChar.ALPHANUMERIC.value:\n # Have an 70% probability of choosing another alphanumeric character.\n if random.random() < 0.70:\n regex += random.choice(RegexChar.ALPHANUMERIC.value)\n else:\n regex += random.choice(RegexChar.operators())\n else:\n regex += random.choice(RegexChar.ALPHANUMERIC.value)\n\n for _ in range(num_open_groups):\n regex += RegexChar.GROUP.value[-1]\n\n # Last character of regex cannot be a union.\n if regex.endswith(RegexChar.UNION.value):\n regex = regex[:-1] + random.choice(RegexChar.ALPHANUMERIC.value)\n\n return regex", "title": "" }, { "docid": "0366b09e196fdb82c6f1dba458a26b02", "score": "0.5591496", "text": "def key_for_display(key):\n if key == 'aa':\n return 'AA%'\n elif key == 'ppg':\n return 'PPG'\n elif key == 'color':\n return '&deg;L'\n elif key == 'oz':\n return 'weight'\n else:\n return key.replace('_', ' ')", "title": "" }, { "docid": "85e203e69454c6b2cb8d5f39a1aae1ea", "score": "0.55895144", "text": "def get_random_string(length=32):\n return ''.join(random.choice(string.ascii_letters + string.digits) for i in range(length))", "title": "" }, { "docid": "ca677c58f8457f3443fc0adcf348f032", "score": "0.5586715", "text": "def make_name(mdl, prompt: str = '') -> str:\n generated_text = []\n x = np.zeros((1, max_char, char_dim))\n end = False\n i = 0\n\n for char in prompt.lower():\n x[0, i + 1, char_to_index[char]] = 1\n generated_text.append(char)\n i += 1\n\n while not end:\n probs = list(mdl.predict(x)[0, i])\n probs = probs / np.sum(probs)\n index = np.random.choice(range(char_dim), p=probs)\n if i == max_char - 2:\n character = '.'\n end = True\n else:\n character = index_to_char[index]\n generated_text.append(character)\n x[0, i + 1, index] = 1\n i += 1\n if character == '.':\n end = True\n\n return ''.join(generated_text)", "title": "" }, { "docid": "7317236e6d3f1f7ecdd050bd1d54f268", "score": "0.558668", "text": "def question_11():\n phrase = 'Hello World'\n return f'{phrase:*^79}'", "title": "" }, { "docid": "42d7e3e095fb3a17ac2e4b0fa56ad6d2", "score": "0.5582282", "text": "def generate_secret(length=30):\n rand = random.SystemRandom()\n ascii_characters = string.ascii_letters + string.digits\n\n return ''.join(rand.choice(ascii_characters) for _ in range(length))", "title": "" }, { "docid": "dd6bf0d847f0bf383be1314fa16ab35f", "score": "0.5581089", "text": "def random_str_generator(size=40, chars=string.ascii_uppercase + string.digits):\n return \"\".join(random.choice(chars) for _ in range(size))", "title": "" }, { "docid": "c0886647aa6cb25819a57802c5e90bd9", "score": "0.5580016", "text": "def random_string() -> str:\n\n k = random.randint(5, 10)\n return ''.join(random.choices(string.ascii_letters + string.digits, k=k))", "title": "" }, { "docid": "a9da0e8e68470b7028847c0e8d871bde", "score": "0.5572451", "text": "def random_string():\n\n k = random.randint(5, 10)\n return ''.join(random.choices(string.ascii_letters + string.digits, k=k))", "title": "" }, { "docid": "a9da0e8e68470b7028847c0e8d871bde", "score": "0.5572451", "text": "def random_string():\n\n k = random.randint(5, 10)\n return ''.join(random.choices(string.ascii_letters + string.digits, k=k))", "title": "" }, { "docid": "bc25bc64a60814b2015ffa9252c5a808", "score": "0.55688536", "text": "def _genus_symbol(self, p):\n l = [q for q in self.__jd.keys() if 0 == q%p]\n if [] == l:\n return ''\n l.sort(reverse = True)\n s = ''\n while l != []:\n q = l.pop()\n s += str(q)\n gs = self.__jd[q][1]\n e = gs[2]*gs[3]\n if len(gs) > 4:\n s += '_' + str(gs[4])\n if 1 != e:\n s += '^' + str(e)\n if l != []:\n s += '.'\n return s", "title": "" }, { "docid": "a259b753f0a4c153947afe4ff430caca", "score": "0.5560397", "text": "def encode(plain_text):\n prefix_length = next_positive_random(2) + 4\n buf = ''\n for i in range(0, prefix_length):\n buf += charset[next_positive_random(len(charset))]\n return buf + ' ' + plain_text", "title": "" }, { "docid": "9401cb89f7bafa3ddccb85774260219e", "score": "0.55603224", "text": "def random_string(length: int = 5) -> str:\n return ''.join(choice(ascii_letters) for i in range(0, length))", "title": "" }, { "docid": "4aba64155ba283cee049465211e5778d", "score": "0.5559557", "text": "def get_random_string(length=12,\n allowed_chars='abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):\n\n return ''.join(random.choice(allowed_chars) for i in range(length))", "title": "" }, { "docid": "5e59b8eb090b3b5af5c13d1f8a665fce", "score": "0.55556047", "text": "def random_string(self, length):\n return ''.join( random.choice( string.ascii_lowercase + string.ascii_uppercase + string.digits ) for i in range(length) )", "title": "" }, { "docid": "35ca8a097e98ce749c4bd0b6d9ebf787", "score": "0.5539147", "text": "def random_string(self, size=3):\n ts = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n chars = string.ascii_lowercase + string.digits\n rc = \"\".join(random.choice(chars) for _ in range(size))\n return \"{}-{}-scan\".format(ts, rc)", "title": "" }, { "docid": "46d963ae51f94a6400dfa813901173c8", "score": "0.5536528", "text": "def get_random_string(length=12,\n allowed_chars='abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):\n return ''.join(random.choice(allowed_chars) for i in range(length))", "title": "" } ]
08e2b273bd45cef527c56322381ab929
x.__eq__(y) x==yx.__eq__(y) x==yx.__eq__(y) x==y
[ { "docid": "f6cf9ec283538288c58388f140ed6876", "score": "0.0", "text": "def __eq__(self, *args): #cannot find CLR method\r\n pass", "title": "" } ]
[ { "docid": "28f9b1bc378d0090c502f0b953f818c0", "score": "0.8320325", "text": "def eq(x, y):\n return x == y", "title": "" }, { "docid": "8560fc02cea59b845bfbedfe81c8eb4e", "score": "0.7849895", "text": "def __eq__(self, other):\n\t\treturn self.x == other.x and self.y == other.y", "title": "" }, { "docid": "e37ca814070b8feeccecf7c2121bf3e6", "score": "0.78284943", "text": "def __eq__(self, other):\r\n return (self.x, self.y) == (other.x, other.y)", "title": "" }, { "docid": "42fa0b279e4dab0caecfbd89c139b731", "score": "0.77969134", "text": "def __eq__():", "title": "" }, { "docid": "a2e4ef027d801ddd67f18780135a167b", "score": "0.7746899", "text": "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "title": "" }, { "docid": "a2e4ef027d801ddd67f18780135a167b", "score": "0.7746899", "text": "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "title": "" }, { "docid": "9850b7f98e1b396ba898fce799eb3531", "score": "0.7614158", "text": "def __eq__(self,y):\n test1 = self.left == y.left and self.right == y.right\n test2 = self.left == y.right and self.right == y.left\n test3 = numpy.allclose(self.distance,y.distance,rtol,atol)\n test = (test1 or test2) and test3\n return test", "title": "" }, { "docid": "373da316a73dc5d0063945e445dca9c2", "score": "0.7492266", "text": "def __eq__(self, other): # for `Point(1, 2) == Point(1, 2)`\n return (self.x == other.x) and (self.y == other.y)", "title": "" }, { "docid": "d24ba3b097744baef6a44af0bc07189b", "score": "0.7475724", "text": "def __eq__(other):", "title": "" }, { "docid": "6a07eb0542e01959ab1eb80017a164dc", "score": "0.73861605", "text": "def __eq__(self, rhs):", "title": "" }, { "docid": "6a674182116b3327b49ca22fb039c290", "score": "0.7368673", "text": "def equals(self, x, y):\n raise AssertionError(\"Not implemented\")", "title": "" }, { "docid": "2158783ab0f656aa5790775988535fa8", "score": "0.7321998", "text": "def __eq__(self, rhs):\r\n\r\n x, y, z = self._v\r\n xx, yy, zz = rhs\r\n return x == xx and y == yy and z == zz", "title": "" }, { "docid": "8b333a91c8a64fd186d5d7bd0446fa38", "score": "0.72930807", "text": "def __eq__(self, other):\n return self.x == other.x and self.y == other.y and self.z == other.z", "title": "" }, { "docid": "8b333a91c8a64fd186d5d7bd0446fa38", "score": "0.72930807", "text": "def __eq__(self, other):\n return self.x == other.x and self.y == other.y and self.z == other.z", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "460048dce382d3df25588035da8cd1df", "score": "0.7277985", "text": "def __eq__(self,*args):\r\n pass", "title": "" }, { "docid": "720b928f46da2eb182e9f278b7d19223", "score": "0.72477317", "text": "def __eq__(self, other):\n\t\treturn array_equal(self.x, other.x) and self.f == other.f", "title": "" }, { "docid": "3248bab35c9ec387ecae9c7c68c817fc", "score": "0.72363514", "text": "def equal(x: T.Tensor, y: T.Tensor) -> T.Boolean:\n return numpy.equal(x, y)", "title": "" }, { "docid": "671e467aa8f3e7a835a2a2b6879731e2", "score": "0.7231318", "text": "def __eq__(self, other) -> bool:\n is_same = False\n if self._x == other._x and self._y == other._y and self._z == other._z:\n is_same = True\n # else:\n # print(self._x ,other._x , self._y , other._y , self._z , other._z )\n return is_same", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "c385a8bb410e4aa991aab51a73748358", "score": "0.722546", "text": "def __eq__(self,*args):\n pass", "title": "" }, { "docid": "340f910bddb5f3ee81d3ad234fb79015", "score": "0.7222796", "text": "def __eq__(self,y):\n if len(self) != len(y):\n test = False\n else:\n test = True\n for i in range(len(self.nodes)):\n test = test and self.nodes[i] == y.nodes[i]\n if not test:\n break\n return test", "title": "" }, { "docid": "3c7d76211f1db9877fe85208a36cce0c", "score": "0.72185075", "text": "def __eq__(self, other):", "title": "" }, { "docid": "fb0f56f969a0f6294b72d3de8bc8c2ad", "score": "0.71878946", "text": "def assert_eq(x, y):\n assert x == y, \"%s != %s\" % (x, y)", "title": "" }, { "docid": "32993b7c9da71333520f58c5bfcd67f2", "score": "0.7179169", "text": "def __eq__(self,other):\n return self._coords == others._coords", "title": "" }, { "docid": "bfcd75b88e8121a602c9ad0de45ada5d", "score": "0.7156072", "text": "def is_equal(self, other):", "title": "" }, { "docid": "f13c3886fd7c38d28db7a1768a55a83a", "score": "0.712368", "text": "def __eq__(self, rhs):\n return (rhs.x == self.x) and (rhs.y == self.y) and \\\n (rhs.width == self.width) and (rhs.height == self.height)", "title": "" }, { "docid": "7965945a98ef40665d6cea65e3459349", "score": "0.7077729", "text": "def forum_equal_to(obj1, obj2):\n\n return obj1 == obj2", "title": "" }, { "docid": "be326151787b726c754d62a17b02c0eb", "score": "0.70696735", "text": "def __eq__(self, other):\n return True", "title": "" }, { "docid": "bcd41dbd3e91ec57a061d652b6002da4", "score": "0.70228934", "text": "def test___eq__(self):\n\t\tpass", "title": "" }, { "docid": "00192fa4705f07cfb2b1b3ba2cfa738f", "score": "0.7019051", "text": "def isEqual(self, other):\n \n pass", "title": "" }, { "docid": "01fc92ed73120a5eeae2a47f9100663f", "score": "0.70056057", "text": "def __eq__(self, other):\n if self.x == other.x and self.y == other.y and self.z == other.z:\n return True\n else:\n return False", "title": "" }, { "docid": "99ed2f535a7fdb9306151fc7be1af1fc", "score": "0.6989933", "text": "def is_equal(arg1, arg2):\n return arg1 == arg2", "title": "" }, { "docid": "5d89f02aa214e811abd490d780ab30ca", "score": "0.6968457", "text": "def __eq__(self, other):\n return self.coords == other.coords", "title": "" }, { "docid": "1f839a67a507253bcff618c3cfe110e3", "score": "0.6945054", "text": "def _compare(self, x, y):\n x_index = self._position_to_index(x, to_tuple=False).T\n y_index = self._position_to_index(y, to_tuple=False).T\n \n return np.all(x_index == y_index, axis=1)", "title": "" }, { "docid": "379201467061151f73c91b881b249aa2", "score": "0.693288", "text": "def _py_equal(a, b):\n return a == b", "title": "" }, { "docid": "3e0a645ea87cc6cccd38a41e152ef8b7", "score": "0.69301987", "text": "def is_eq(mdp, v1, v2):\r\n return v1[0] == v2[0]", "title": "" }, { "docid": "adefcb32aa87d189a2d47e3c2a223640", "score": "0.6919645", "text": "def __eq__(self, other):\n return self._coords == other._coords", "title": "" }, { "docid": "03f664d6eaa2c1c34ab7346f0fad6d4a", "score": "0.69170886", "text": "def __eq__(self, other):\n # for row in range( 4 ):\n # if self.cells[row] != other.cells[row]:\n # return False\n # return True\n return hash(self) == hash(other)", "title": "" }, { "docid": "0a418ec44537a5d0896378d41d0761d3", "score": "0.68965256", "text": "def __eq__(self, *args):\r\n pass", "title": "" }, { "docid": "0a418ec44537a5d0896378d41d0761d3", "score": "0.68965256", "text": "def __eq__(self, *args):\r\n pass", "title": "" }, { "docid": "0a418ec44537a5d0896378d41d0761d3", "score": "0.68965256", "text": "def __eq__(self, *args):\r\n pass", "title": "" }, { "docid": "0a418ec44537a5d0896378d41d0761d3", "score": "0.68965256", "text": "def __eq__(self, *args):\r\n pass", "title": "" }, { "docid": "0a418ec44537a5d0896378d41d0761d3", "score": "0.68965256", "text": "def __eq__(self, *args):\r\n pass", "title": "" }, { "docid": "0a418ec44537a5d0896378d41d0761d3", "score": "0.68965256", "text": "def __eq__(self, *args):\r\n pass", "title": "" }, { "docid": "0a418ec44537a5d0896378d41d0761d3", "score": "0.68965256", "text": "def __eq__(self, *args):\r\n pass", "title": "" }, { "docid": "0a418ec44537a5d0896378d41d0761d3", "score": "0.68965256", "text": "def __eq__(self, *args):\r\n pass", "title": "" } ]
89af3611257577a703bd2e6424bdc71d
Encode bounding boxes (that are in centersize form) w.r.t. the corresponding prior boxes (that are in centersize form). For the center coordinates, find the offset with respect to the prior box, and scale by the size of the prior box. For the size coordinates, scale by the size of the prior box, and convert to the logspace. In the model, we are predicting bounding box coordinates in this encoded form.
[ { "docid": "72eb7bc0a8d80969db4fb1de69dca5e6", "score": "0.0", "text": "def cxcy_to_gcxgcy(cxcy, priors_cxcy):\n\n # The 10 and 5 below are referred to as 'variances' in the original Caffe repo, completely empirical\n # They are for some sort of numerical conditioning, for 'scaling the localization gradient'\n # See https://github.com/weiliu89/caffe/issues/155\n return torch.cat([(cxcy[:, :2] - priors_cxcy[:, :2]) / (priors_cxcy[:, 2:] / 10), # g_c_x, g_c_y\n torch.log(cxcy[:, 2:] / priors_cxcy[:, 2:]) * 5], 1) # g_w, g_h", "title": "" } ]
[ { "docid": "1c8b3617401588754bfc0bc6ba4f101c", "score": "0.6071095", "text": "def encode(self, boxes, labels, img):\r\n boxes = boxes.box\r\n #print(img)\r\n _, h, w = img\r\n boxes /= torch.Tensor([[w, h, w, h]]).expand_as(boxes) # normalize (x1, y1, x2, y2) w.r.t. image width/height.\r\n #print(boxes)\r\n S, B, C = self.S, self.B, self.C\r\n N = 5 * B + C\r\n\r\n target = torch.zeros(S, S, N)\r\n cell_size = 1.0 / float(S)\r\n boxes_wh = boxes[:, 2:] - boxes[:, :2] # width and height for each box, [n, 2]\r\n boxes_xy = (boxes[:, 2:] + boxes[:, :2]) / 2.0 # center x & y for each box, [n, 2]\r\n #print(boxes_wh, boxes_xy)\r\n for b in range(boxes.size(0)):\r\n xy, wh, label = boxes_xy[b], boxes_wh[b], int(labels[b])\r\n \r\n ij = (xy / cell_size).ceil() - 1.0\r\n i, j = int(ij[0]), int(ij[1]) # y & x index which represents its location on the grid.\r\n x0y0 = ij * cell_size # x & y of the cell left-top corner.\r\n xy_normalized = (xy - x0y0) / cell_size # x & y of the box on the cell, normalized from 0.0 to 1.0\r\n #print(xy, ij, i, j, boxes.size(0))\r\n # TBM, remove redundant dimensions from target tensor.\r\n # To remove these, loss implementation also has to be modified.\r\n for k in range(B):\r\n #print(label)\r\n s = 5 * k\r\n target[j, i, s :s+2] = xy_normalized\r\n target[j, i, s+2:s+4] = wh\r\n target[j, i, s+4 ] = 1.0\r\n #print(target.shape)\r\n target[j, i, 5*B + label - 1] = 1.0\r\n\r\n return target", "title": "" }, { "docid": "d8df36e03d155ac4561f7e61e1b1cfe3", "score": "0.5973301", "text": "def bbox_transform(boxes, gt_boxes):\n widths = boxes[:, 2] - boxes[:, 0] + 1.0\n heights = boxes[:, 3] - boxes[:, 1] + 1.0\n ctr_x = boxes[:, 0] + 0.5 * widths\n ctr_y = boxes[:, 1] + 0.5 * heights\n gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + 1.0\n gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + 1.0\n gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_widths\n gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_heights\n dx = (gt_ctr_x - ctr_x) / widths\n dy = (gt_ctr_y - ctr_y) / heights\n dw = torch.log(gt_widths / widths)\n dh = torch.log(gt_heights / heights)\n deltas = torch.stack((dx, dy, dw, dh), dim=1)\n return deltas", "title": "" }, { "docid": "f636093228e0b256ca31b3774a0203d5", "score": "0.5849039", "text": "def tf_ssd_encode_boxes(labels, # (N,)\r\n\t\t\t\t\t\tboxes, # (N, 4) <- (ymin, xmin, ymax, xmax)\r\n\t\t\t\t\t\tanchor_boxes, # (num_aboxes, 4) <- (cy, cx, h, w)\r\n\t\t\t\t\t\tprior_scaling,\r\n\t\t\t\t\t\tmatching_threshold = 0.5):\r\n\tnum_boxes = tf.cast(tf.shape(boxes)[0], tf.int64) # ()\r\n\tnum_aboxes = anchor_boxes.shape[0] # ()\r\n\r\n\t# compute coordinates of anchor boxes\r\n\taboxes_ymin = tf.cast(anchor_boxes[:, 0] - anchor_boxes[:, 2] / 2.0, tf.float32) # (num_aboxes,)\r\n\taboxes_xmin = tf.cast(anchor_boxes[:, 1] - anchor_boxes[:, 3] / 2.0, tf.float32) # (num_aboxes,)\r\n\taboxes_ymax = tf.cast(anchor_boxes[:, 0] + anchor_boxes[:, 2] / 2.0, tf.float32) # (num_aboxes,)\r\n\taboxes_xmax = tf.cast(anchor_boxes[:, 1] + anchor_boxes[:, 3] / 2.0, tf.float32) # (num_aboxes,)\r\n\r\n\t# compute Jaccard score of boxes to anchor boxes\r\n\tjaccard = tf_jaccard_score(\r\n\t\tboxes,\r\n\t\ttf.stack([aboxes_ymin, aboxes_xmin, aboxes_ymax, aboxes_xmax], axis=-1)\r\n\t) # (N, num_aboxes)\r\n\r\n\t# best and good abox for each box\r\n\tbest_abox_per_box = tf.argmax(jaccard, axis=1) # (N,)\r\n\tbest_abox_per_box = tf.expand_dims(best_abox_per_box, axis=-1) # (N, 1)\r\n\tbest_abox_per_box_mask = tf.equal(tf.expand_dims(tf.range(num_aboxes, dtype=tf.int64), axis=0), best_abox_per_box) # (N, num_aboxes)\r\n\tgood_abox_per_box_mask = tf.greater(jaccard, matching_threshold) # (N, num_aboxes)\r\n\r\n\tmask = tf.logical_or(best_abox_per_box_mask, good_abox_per_box_mask) # (N, num_aboxes)\r\n\r\n\t# best box for each abox\r\n\tjaccard_masked = tf.where(mask, jaccard, tf.zeros_like(mask, dtype=tf.float32)) # (N, num_aboxes)\r\n\tbest_jaccard_per_abox = tf.reduce_max(jaccard_masked, axis=0) # (num_aboxes,)\r\n\tbest_box_per_abox = tf.argmax(jaccard_masked, axis=0) # (num_aboxes,)\r\n\tbest_box_per_abox = tf.expand_dims(best_box_per_abox, axis=0) # (1, num_aboxes)\r\n\tbest_box_per_abox_mask = tf.equal(tf.expand_dims(tf.range(num_boxes, dtype=tf.int64), axis=-1), best_box_per_abox) # (N, num_aboxes)\r\n\r\n\tmask = tf.logical_and(mask, best_box_per_abox_mask) # (N, num_aboxes)\r\n\timask = tf.cast(mask, tf.int64) # (N, num_aboxes)\r\n\tfmask = tf.cast(mask, tf.float32) # (N, num_aboxes)\r\n\r\n\t# update labels and boxes using the mask\r\n\tupdate_mask = tf.logical_not(tf.equal(tf.reduce_sum(imask, axis=0), 0)) # (num_aboxes,)\r\n\r\n\tlabels = tf.expand_dims(labels, axis=-1) # (N, 1)\r\n\tlabels = tf.multiply(labels, imask) # (N, num_aboxes)\r\n\tlabels = tf.reduce_max(labels, axis=0) # (num_aboxes,)\r\n\r\n\tboxes = tf.expand_dims(boxes, axis=1) # (N, 1, 4)\r\n\tboxes = tf.multiply(boxes, tf.expand_dims(fmask, axis=-1)) # (N, num_aboxes, 4)\r\n\tboxes = tf.reduce_max(boxes, axis=0) # (num_aboxes, 4)\r\n\tboxes = tf.transpose(boxes, [1, 0]) # (4, num_aboxes)\r\n\r\n\t# build final labels, scores and box coordinates\r\n\tizeros = tf.zeros((num_aboxes,), dtype=tf.int64) # (num_aboxes,)\r\n\tfzeros = tf.zeros((num_aboxes,), dtype=tf.float32) # (num_aboxes,)\r\n\tfones = tf.ones((num_aboxes,), dtype=tf.float32) # (num_aboxes,)\r\n\r\n\tfeature_labels = tf.where(update_mask, labels, izeros) # (num_aboxes,)\r\n\tfeature_scores = tf.where(update_mask, best_jaccard_per_abox, fzeros) # (num_aboxes,)\r\n\tfeature_y_min = tf.where(update_mask, boxes[0], fzeros) # (num_aboxes,)\r\n\tfeature_x_min = tf.where(update_mask, boxes[1], fzeros) # (num_aboxes,)\r\n\tfeature_y_max = tf.where(update_mask, boxes[2], fones) # (num_aboxes,)\r\n\tfeature_x_max = tf.where(update_mask, boxes[3], fones) # (num_aboxes,)\r\n\r\n\t# transform to center / size\r\n\tfeature_cy = (feature_y_max + feature_y_min) / 2.0 # (num_aboxes,)\r\n\tfeature_cx = (feature_x_max + feature_x_min) / 2.0 # (num_aboxes,)\r\n\tfeature_h = feature_y_max - feature_y_min # (num_aboxes,)\r\n\tfeature_w = feature_x_max - feature_x_min # (num_aboxes,)\r\n\r\n\t# encode features\r\n\tfeature_cy = (feature_cy - anchor_boxes[:, 0]) / anchor_boxes[:, 2] / prior_scaling[0] # (num_aboxes,)\r\n\tfeature_cx = (feature_cx - anchor_boxes[:, 1]) / anchor_boxes[:, 3] / prior_scaling[1] # (num_aboxes,)\r\n\tfeature_h = tf.log(feature_h / anchor_boxes[:, 2]) / prior_scaling[2] # (num_aboxes,)\r\n\tfeature_w = tf.log(feature_w / anchor_boxes[:, 3]) / prior_scaling[3] # (num_aboxes,)\r\n\r\n\t# reorder for ssd\r\n\tfeature_boxes = tf.stack(\r\n\t\t[feature_cx, feature_cy, feature_w, feature_h],\r\n\t\taxis = -1\r\n\t) # (num_aboxes, 4)\r\n\r\n\treturn (\r\n\t\tfeature_labels, # (num_aboxes,)\r\n\t\tfeature_scores, # (num_aboxes,)\r\n\t\tfeature_boxes # (num_aboxes, 4)\r\n\t)", "title": "" }, { "docid": "98c1a803ed2511ebf9ad5bd2f2031099", "score": "0.5837568", "text": "def encode_bbox_deprecated(prior_box, bbox_target):\n # 5. encode the bbox_target\n\n variance = [0.1, 0.2]\n\n bbox_target /= 300\n\n # get the gt center x and y\n cxcy = bbox_target[:, 0:2] + bbox_target[:, 2:4] / 2\n cxcy = (cxcy - reversed(prior_box[:, 0:2])) / (prior_box[:, 2:4] * variance[0])\n\n # get the gt weight and height\n wh = torch.log(bbox_target[:, 2:4] / prior_box[:, 2:4])\n wh /= variance[1]\n\n bbox_target = torch.cat((cxcy, wh), dim=1)\n\n return bbox_target", "title": "" }, { "docid": "ac030d7bb705b022822fad2770ef1a1c", "score": "0.58288985", "text": "def bev_box_encode(boxes,\r\n anchors,\r\n encode_angle_to_vector=False,\r\n smooth_dim=False):\r\n # need to convert boxes to z-center format\r\n xa, ya, wa, la, ra = np.split(anchors, 5, axis=-1)\r\n xg, yg, wg, lg, rg = np.split(boxes, 5, axis=-1)\r\n diagonal = np.sqrt(la**2 + wa**2) # 4.3\r\n xt = (xg - xa) / diagonal\r\n yt = (yg - ya) / diagonal\r\n if smooth_dim:\r\n lt = lg / la - 1\r\n wt = wg / wa - 1\r\n else:\r\n lt = np.log(lg / la)\r\n wt = np.log(wg / wa)\r\n if encode_angle_to_vector:\r\n rgx = np.cos(rg)\r\n rgy = np.sin(rg)\r\n rax = np.cos(ra)\r\n ray = np.sin(ra)\r\n rtx = rgx - rax\r\n rty = rgy - ray\r\n return np.concatenate([xt, yt, wt, lt, rtx, rty], axis=-1)\r\n else:\r\n rt = rg - ra\r\n return np.concatenate([xt, yt, wt, lt, rt], axis=-1)", "title": "" }, { "docid": "38d2cabbc27207f9ae2d8016dd863213", "score": "0.5821383", "text": "def bbox2loc(bbox, priors, center_var=0.1, size_var=0.2):\n # assert priors.shape[0] == 1\n # assert priors.dim() == 3\n\n # prior bounding boxes\n p_center = priors[..., :2]\n p_size = priors[..., 2:]\n\n # locations\n b_center = bbox[..., :2]\n b_size = bbox[..., 2:]\n\n temp = torch.cat([\n 1 / center_var * ((b_center - p_center) / p_size),\n torch.log(b_size / p_size) / size_var\n ], dim=-1)\n return temp", "title": "" }, { "docid": "f4b9cebff122bd1f452f770d020ac1c9", "score": "0.5812542", "text": "def encodeBBox(bbox, anchors):\n anchor_num = anchors.get_shape()[0]\n shape = tf.stack([anchor_num, tf.constant(1, dtype=tf.int32)], axis=0)\n bbox = tf.expand_dims(bbox, axis=0)\n bbox = tf.tile(bbox, shape)\n\n bbox_center = tf.slice(bbox, [0, 0], [anchor_num, 2])\n bbox_size = tf.slice(bbox, [0, 2], [anchor_num, 2])\n anchor_center = tf.slice(anchors, [0,0], [anchor_num, 2])\n anchor_size = tf.slice(anchors, [0, 2], [anchor_num, 2])\n\n g_center = (bbox_center - anchor_center)/anchor_size\n g_size = tf.log(bbox_size/anchor_size)\n\n return tf.concat([g_center, g_size], axis=1)", "title": "" }, { "docid": "81342da89b0ee0f78c928a6155a7253d", "score": "0.58032376", "text": "def _encode(self, boxes, anchors):\n box_corners = boxes.get()\n if anchors.has_field('stddev'):\n raise ValueError(\"'stddev' is a parameter of MeanStddevBoxCoder and \"\n \"should not be specified in the box list.\")\n means = anchors.get()\n return (box_corners - means) / self._stddev", "title": "" }, { "docid": "a87092a630156c02b3ff9f01c4a84a37", "score": "0.5773656", "text": "def bbox2loc(bbox, priors, center_var=0.1, size_var=0.2):\n assert priors.shape[0] == 1\n assert priors.dim() == 3\n\n # prior bounding boxes\n p_center = priors[..., :2]\n p_size = priors[..., 2:]\n\n # locations\n b_center = bbox[..., :2]\n b_size = bbox[..., 2:]\n\n return torch.cat([\n 1 / center_var * ((b_center - p_center) / p_size),\n torch.log(b_size / p_size) / size_var\n ], dim=-1)", "title": "" }, { "docid": "5af0b1007e24f18b06fb9caf7ead100a", "score": "0.56385434", "text": "def tf_ssd_decode_boxes(localizations, # (N, num_aboxes, 4) <- (cx, cy, w, h)\r\n\t\t\t\t\t\tanchor_boxes, # (num_aboxes, 4) <- (cy, cx, h, w)\r\n\t\t\t\t\t\tprior_scaling):\r\n\t# compute center, height and width\r\n\tcy = localizations[:, :, 1] * anchor_boxes[:, 2] * prior_scaling[0] + anchor_boxes[:, 0] # (N, num_aboxes)\r\n\tcx = localizations[:, :, 0] * anchor_boxes[:, 3] * prior_scaling[1] + anchor_boxes[:, 1] # (N, num_aboxes)\r\n\theight = anchor_boxes[:, 2] * tf.exp(localizations[:, :, 3] * prior_scaling[2]) # (N, num_aboxes)\r\n\twidth = anchor_boxes[:, 3] * tf.exp(localizations[:, :, 2] * prior_scaling[3]) # (N, num_aboxes)\r\n\r\n\t# boxes coordinates\r\n\tymin = cy - height / 2.0 # (N, num_aboxes)\r\n\txmin = cx - width / 2.0 # (N, num_aboxes)\r\n\tymax = cy + height / 2.0 # (N, num_aboxes)\r\n\txmax = cx + width / 2.0 # (N, num_aboxes)\r\n\r\n\treturn tf.stack([ymin, xmin, ymax, xmax], axis=-1) # (N, num_aboxes, 4) <- (ymin, xmin, ymax, xmax)\r", "title": "" }, { "docid": "f43504a5ed8e6af0bb7bdca89ce97108", "score": "0.5601941", "text": "def rescale_boxes(boxes, current_dim, original_shape):\n orig_h, orig_w = original_shape\n # The amount of padding that was added\n pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))\n pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))\n # Image height and width after padding is removed\n unpad_h = current_dim - pad_y\n unpad_w = current_dim - pad_x\n # Rescale bounding boxes to dimension of original image\n boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h\n boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h\n return boxes", "title": "" }, { "docid": "f43504a5ed8e6af0bb7bdca89ce97108", "score": "0.5601941", "text": "def rescale_boxes(boxes, current_dim, original_shape):\n orig_h, orig_w = original_shape\n # The amount of padding that was added\n pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))\n pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))\n # Image height and width after padding is removed\n unpad_h = current_dim - pad_y\n unpad_w = current_dim - pad_x\n # Rescale bounding boxes to dimension of original image\n boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h\n boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h\n return boxes", "title": "" }, { "docid": "c057655c0e94317e6e78ab03a47dd921", "score": "0.55937517", "text": "def apply_boxes(self, boxes: np.ndarray, original_size: Union[List[Any], Tensor]) -> np.ndarray:\n boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)\n return boxes.reshape(-1, 4)", "title": "" }, { "docid": "256d8a138675116491bc817a37b9c423", "score": "0.55462974", "text": "def decode_boxes(self, mbox_loc, mbox_priorbox, variances):\n prior_width = mbox_priorbox[:, 2] - mbox_priorbox[:, 0]\n prior_height = mbox_priorbox[:, 3] - mbox_priorbox[:, 1]\n prior_center_x = 0.5 * (mbox_priorbox[:, 2] + mbox_priorbox[:, 0])\n prior_center_y = 0.5 * (mbox_priorbox[:, 3] + mbox_priorbox[:, 1])\n decode_bbox_center_x = mbox_loc[:, 0] * prior_width * variances[:, 0]\n decode_bbox_center_x += prior_center_x\n decode_bbox_center_y = mbox_loc[:, 1] * prior_width * variances[:, 1]\n decode_bbox_center_y += prior_center_y\n decode_bbox_width = np.exp(mbox_loc[:, 2] * variances[:, 2])\n decode_bbox_width *= prior_width\n decode_bbox_height = np.exp(mbox_loc[:, 3] * variances[:, 3])\n decode_bbox_height *= prior_height\n decode_bbox_xmin = decode_bbox_center_x - 0.5 * decode_bbox_width\n decode_bbox_ymin = decode_bbox_center_y - 0.5 * decode_bbox_height\n decode_bbox_xmax = decode_bbox_center_x + 0.5 * decode_bbox_width\n decode_bbox_ymax = decode_bbox_center_y + 0.5 * decode_bbox_height\n decode_bbox = np.concatenate((decode_bbox_xmin[:, None],\n decode_bbox_ymin[:, None],\n decode_bbox_xmax[:, None],\n decode_bbox_ymax[:, None]), axis=-1)\n decode_bbox = np.minimum(np.maximum(decode_bbox, 0.0), 1.0)\n return decode_bbox", "title": "" }, { "docid": "f97d39017b41a4c2f83fb0f5aeda14fb", "score": "0.5544374", "text": "def decode_bbox(anchors, raw_outputs, variances=[0.1, 0.1, 0.2, 0.2]):\n anchor_centers_x = (anchors[:, :, 0:1] + anchors[:, :, 2:3]) / 2\n anchor_centers_y = (anchors[:, :, 1:2] + anchors[:, :, 3:]) / 2\n anchors_w = anchors[:, :, 2:3] - anchors[:, :, 0:1]\n anchors_h = anchors[:, :, 3:] - anchors[:, :, 1:2]\n\n raw_outputs_rescale = raw_outputs * np.array(variances)\n\n predict_center_x = raw_outputs_rescale[:, :, 0:1] * anchors_w + anchor_centers_x\n predict_center_y = raw_outputs_rescale[:, :, 1:2] * anchors_h + anchor_centers_y\n predict_w = np.exp(raw_outputs_rescale[:, :, 2:3]) * anchors_w\n predict_h = np.exp(raw_outputs_rescale[:, :, 3:]) * anchors_h\n predict_xmin = predict_center_x - predict_w / 2\n predict_ymin = predict_center_y - predict_h / 2\n predict_xmax = predict_center_x + predict_w / 2\n predict_ymax = predict_center_y + predict_h / 2\n predict_bbox = np.concatenate(\n [predict_xmin, predict_ymin, predict_xmax, predict_ymax], axis=-1\n )\n\n return predict_bbox", "title": "" }, { "docid": "de65f04e9e642401d2aff178a6fe4e35", "score": "0.55064666", "text": "def decode_bbox(anchors, raw_outputs, variances=[0.1, 0.1, 0.2, 0.2]):\n\n anchor_centers_x = (anchors[:, :, 0:1] + anchors[:, :, 2:3]) / 2\n anchor_centers_y = (anchors[:, :, 1:2] + anchors[:, :, 3:]) / 2\n anchors_w = anchors[:, :, 2:3] - anchors[:, :, 0:1]\n anchors_h = anchors[:, :, 3:] - anchors[:, :, 1:2]\n\n raw_outputs_rescale = raw_outputs * np.array(variances)\n\n predict_center_x = raw_outputs_rescale[:, :, 0:1]*anchors_w + anchor_centers_x\n predict_center_y = raw_outputs_rescale[:, :, 1:2]*anchors_h + anchor_centers_y\n predict_w = np.exp(raw_outputs_rescale[:, :, 2:3])*anchors_w\n predict_h = np.exp(raw_outputs_rescale[:, :, 3:])*anchors_h\n\n predict_xmin = predict_center_x - predict_w/2\n predict_ymin = predict_center_y - predict_h/2\n predict_xmax = predict_center_x + predict_w/2\n predict_ymax = predict_center_y + predict_h/2\n\n predict_bbox = np.concatenate([predict_xmin, predict_ymin, predict_xmax, predict_ymax], axis=-1)\n\n return predict_bbox", "title": "" }, { "docid": "fa2c10d45875ddbeb03404de94aca276", "score": "0.549052", "text": "def _resize_cbboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('cbbox_fields', []):\n if isinstance(results[key], np.ndarray):\n # For 2-point rect bounding boxes, np.ndarray(K, L, 4)\n cbboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n cbboxes[:, :, 0::2] = np.clip(cbboxes[:, :, 0::2], 0, img_shape[1])\n cbboxes[:, :, 1::2] = np.clip(cbboxes[:, :, 1::2], 0, img_shape[0])\n results[key] = cbboxes\n else:\n # For n-point poly bounding boxes, LIST[ [ [[2*N]], [2*N]], [...] ..., [2*KN]]\n cbboxes = []\n for cbox in results[key]:\n tmp_cbox = np.array(cbox, dtype=np.float32)\n if tmp_cbox.shape[1] == 4:\n tmp_cbox = tmp_cbox * results['scale_factor']\n else:\n new_tmp_cbox = []\n for ccbox in tmp_cbox:\n ccbox = np.array(ccbox, dtype=np.float32)\n ccbox[0::2] *= results['scale_factor'][0]\n ccbox[1::2] *= results['scale_factor'][1]\n new_tmp_cbox.append(ccbox)\n tmp_cbox = np.array(new_tmp_cbox, dtype=np.float32)\n if self.bbox_clip_border:\n tmp_cbox[:, 0::2] = np.clip(tmp_cbox[:, 0::2], 0, img_shape[1])\n tmp_cbox[:, 1::2] = np.clip(tmp_cbox[:, 1::2], 0, img_shape[0])\n cbboxes.append(tmp_cbox)\n results[key] = cbboxes", "title": "" }, { "docid": "0a30e7fe1f0cdcd8402f7d05d7a37dfa", "score": "0.54670906", "text": "def make_gt(bboxes, max_box_per_image, height_scale, width_scale):\n\n # delete bboxes containing [-1,-1,-1,-1, -1] added in **[1]\n bboxes = bboxes[~np.all(bboxes==-1, axis=1)]\n\n bboxes[:,0] *= width_scale\n bboxes[:,1] *= width_scale\n bboxes[:,2] *= height_scale\n bboxes[:,3] *= height_scale\n\n num_boxes, boxes_per_row = bboxes.shape\n\n assert boxes_per_row == 5\n\n arr = np.zeros((max_box_per_image,5), keras.backend.floatx())\n arr[:,-1] -= 1\n\n max_index = min(num_boxes, max_box_per_image)\n\n arr[:max_index, :5] = bboxes[:max_index, :5]\n\n\n return arr.astype('float32') #.astype(keras.backend.floatx())", "title": "" }, { "docid": "ca091e6e85f7f55e152832a1c72f6574", "score": "0.54613835", "text": "def affine_warp_boxes(affine, boxes, output_size, box_history):\n\n def _get_corners(box):\n \"\"\"Get the corner of each box as a tuple of (x, y) coordinates.\"\"\"\n ymi, xmi, yma, xma = tf.split(box, 4, axis=-1)\n tl = tf.concat([xmi, ymi], axis=-1)\n bl = tf.concat([xmi, yma], axis=-1)\n tr = tf.concat([xma, ymi], axis=-1)\n br = tf.concat([xma, yma], axis=-1)\n return tf.concat([tl, bl, tr, br], axis=-1)\n\n def _corners_to_boxes(corner):\n \"\"\"Convert (x, y) corners back into boxes [ymin, xmin, ymax, xmax].\"\"\"\n corner = tf.reshape(corner, [-1, 4, 2])\n y = corner[..., 1]\n x = corner[..., 0]\n y_min = tf.reduce_min(y, axis=-1)\n x_min = tf.reduce_min(x, axis=-1)\n y_max = tf.reduce_max(y, axis=-1)\n x_max = tf.reduce_max(x, axis=-1)\n return tf.stack([y_min, x_min, y_max, x_max], axis=-1)\n\n def _aug_boxes(affine_matrix, box):\n \"\"\"Apply an affine transformation matrix M to the boxes augment boxes.\"\"\"\n corners = _get_corners(box)\n corners = tf.reshape(corners, [-1, 4, 2])\n z = tf.expand_dims(tf.ones_like(corners[..., 1]), axis=-1)\n corners = tf.concat([corners, z], axis=-1)\n\n corners = tf.transpose(\n tf.matmul(affine_matrix, corners, transpose_b=True), perm=(0, 2, 1))\n\n corners, p = tf.split(corners, [2, 1], axis=-1)\n corners /= p\n corners = tf.reshape(corners, [-1, 8])\n box = _corners_to_boxes(corners)\n return box\n\n boxes = _aug_boxes(affine, boxes)\n box_history = _aug_boxes(affine, box_history)\n\n clipped_boxes = bbox_ops.clip_boxes(boxes, output_size)\n return clipped_boxes, box_history", "title": "" }, { "docid": "303886dc86fc1c691020c3fee3da172a", "score": "0.54574317", "text": "def get_bounding_boxes(kps, img_size, box_size):\n kps_resized = kps * np.array(img_size).reshape((-1, 2))\n half_width = box_size // 2\n offset = np.array([-half_width, -half_width, half_width, half_width])\n box_coordinates = np.concatenate([kps_resized, kps_resized], -1) + offset.reshape(\n (-1, 4)\n )\n box_list = np.split(box_coordinates, box_coordinates.shape[0], axis=0)\n box_list = [np.squeeze(b) for b in box_list]\n return box_list\n\n # def normalize(imgs, coords, stickmen, jo, box_factor):\n # out_imgs = list()\n # out_stickmen = list()\n\n # bs = len(imgs)\n # for i in range(bs):\n # img = imgs[i]\n # joints = coords[i]\n # stickman = stickmen[i]\n\n # h, w = img.shape[:2]\n # o_h = h\n # o_w = w\n # h = h // 2 ** box_factor\n # w = w // 2 ** box_factor\n # wh = np.array([w, h])\n # wh = np.expand_dims(wh, 0)\n\n # bparts = [\n # [\"lshoulder\", \"lhip\", \"rhip\", \"rshoulder\"],\n # [\"lshoulder\", \"rshoulder\", \"cnose\"],\n # [\"lshoulder\", \"lelbow\"],\n # [\"lelbow\", \"lwrist\"],\n # [\"rshoulder\", \"relbow\"],\n # [\"relbow\", \"rwrist\"],\n # [\"lhip\", \"lknee\"],\n # [\"rhip\", \"rknee\"],\n # ]\n # ar = 0.5\n\n # part_imgs = list()\n # part_stickmen = list()\n # for bpart in bparts:\n # part_img = np.zeros((h, w, 3))\n # part_stickman = np.zeros((h, w, 3))\n # M = get_crop(bpart, joints, jo, wh, o_w, o_h, ar)\n\n # if M is not None:\n # part_img = cv2.warpPerspective(\n # img, M, (h, w), borderMode=cv2.BORDER_REPLICATE\n # )\n # part_stickman = cv2.warpPerspective(\n # stickman, M, (h, w), borderMode=cv2.BORDER_REPLICATE\n # )\n\n # part_imgs.append(part_img)\n # part_stickmen.append(part_stickman)\n # img = np.concatenate(part_imgs, axis=2)\n # stickman = np.concatenate(part_stickmen, axis=2)\n\n # out_imgs.append(img)\n # out_stickmen.append(stickman)\n # out_imgs = np.stack(out_imgs)\n # out_stickmen = np.stack(out_stickmen)\n # return out_imgs, out_stickmen\n\n # def get_crop(bpart, joints, jo, wh, o_w, o_h, ar = 1.0):\n bpart_indices = [jo.index(b) for b in bpart]\n part_src = np.float32(joints[bpart_indices])\n\n # fall backs\n if not valid_joints(part_src):\n if bpart[0] == \"lhip\" and bpart[1] == \"lknee\":\n bpart = [\"lhip\"]\n bpart_indices = [jo.index(b) for b in bpart]\n part_src = np.float32(joints[bpart_indices])\n elif bpart[0] == \"rhip\" and bpart[1] == \"rknee\":\n bpart = [\"rhip\"]\n bpart_indices = [jo.index(b) for b in bpart]\n part_src = np.float32(joints[bpart_indices])\n elif (\n bpart[0] == \"lshoulder\" and bpart[1] == \"rshoulder\" and bpart[2] == \"cnose\"\n ):\n bpart = [\"lshoulder\", \"rshoulder\", \"rshoulder\"]\n bpart_indices = [jo.index(b) for b in bpart]\n part_src = np.float32(joints[bpart_indices])\n\n if not valid_joints(part_src):\n return None\n\n if part_src.shape[0] == 1:\n # leg fallback\n a = part_src[0]\n b = np.float32([a[0], o_h - 1])\n part_src = np.float32([a, b])\n\n if part_src.shape[0] == 4:\n pass\n elif part_src.shape[0] == 3:\n # lshoulder, rshoulder, cnose\n if bpart == [\"lshoulder\", \"rshoulder\", \"rshoulder\"]:\n segment = part_src[1] - part_src[0]\n normal = np.array([-segment[1], segment[0]])\n if normal[1] > 0.0:\n normal = -normal\n\n a = part_src[0] + normal\n b = part_src[0]\n c = part_src[1]\n d = part_src[1] + normal\n part_src = np.float32([a, b, c, d])\n else:\n assert bpart == [\"lshoulder\", \"rshoulder\", \"cnose\"]\n neck = 0.5 * (part_src[0] + part_src[1])\n neck_to_nose = part_src[2] - neck\n part_src = np.float32([neck + 2 * neck_to_nose, neck])\n\n # segment box\n segment = part_src[1] - part_src[0]\n normal = np.array([-segment[1], segment[0]])\n alpha = 1.0 / 2.0\n a = part_src[0] + alpha * normal\n b = part_src[0] - alpha * normal\n c = part_src[1] - alpha * normal\n d = part_src[1] + alpha * normal\n # part_src = np.float32([a,b,c,d])\n part_src = np.float32([b, c, d, a])\n else:\n assert part_src.shape[0] == 2\n\n segment = part_src[1] - part_src[0]\n normal = np.array([-segment[1], segment[0]])\n alpha = ar / 2.0\n a = part_src[0] + alpha * normal\n b = part_src[0] - alpha * normal\n c = part_src[1] - alpha * normal\n d = part_src[1] + alpha * normal\n part_src = np.float32([a, b, c, d])\n\n dst = np.float32([[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]])\n part_dst = np.float32(wh * dst)\n\n M = cv2.getPerspectiveTransform(part_src, part_dst)\n return M", "title": "" }, { "docid": "dd47bcecb7e19972d92ee6761166475e", "score": "0.54563814", "text": "def generate_prior_bboxes(prior_layer_cfg):\n\n sk_list = [0.2, 0.35, 0.5, 0.65, 0.8, 0.95, 1.1]\n\n priors_bboxes = []\n for feat_level_idx in range(0, len(prior_layer_cfg)): # iterate each layers\n layer_cfg = prior_layer_cfg[feat_level_idx]\n layer_feature_dim = layer_cfg['feature_dim_hw']\n layer_aspect_ratio = layer_cfg['aspect_ratio']\n\n # Todo: compute S_{k} (reference: SSD Paper equation 4.)\n sk = sk_list[feat_level_idx]\n fk = layer_cfg['feature_dim_hw'][0]\n\n for y in range(0, layer_feature_dim[0]):\n for x in range(0,layer_feature_dim[0]):\n\n # Todo: compute bounding box center\n cx = (x+0.5)/fk\n cy = (y+0.5)/fk\n\n # Todo: generate prior bounding box with respect to the aspect ratio\n for aspect_ratio in layer_aspect_ratio:\n if aspect_ratio == '1t':\n sk_ = np.sqrt(sk_list[feat_level_idx] * sk_list[feat_level_idx+1])\n aspect_ratio=1.0\n h = sk_ / np.sqrt(aspect_ratio)\n w = sk_ * np.sqrt(aspect_ratio)\n priors_bboxes.append([cx, cy, w, h])\n else:\n h = sk/np.sqrt(aspect_ratio)\n w = sk* np.sqrt(aspect_ratio)\n priors_bboxes.append([cx, cy, w, h])\n # np.set_printoptions(threshold=np.inf)\n # print(np.asarray(priors_bboxes))\n # Convert to Tensor\n priors_bboxes = torch.tensor(priors_bboxes)\n priors_bboxes = torch.clamp(priors_bboxes, 0.0, 1.0)\n num_priors = priors_bboxes.shape[0]\n\n # [DEBUG] check the output shape\n assert priors_bboxes.dim() == 2\n assert priors_bboxes.shape[1] == 4\n return priors_bboxes", "title": "" }, { "docid": "94f372c0c3aaebc4590c2e0d338bedcd", "score": "0.54435927", "text": "def _resize_bboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('bbox_fields', []):\n if isinstance(results[key], np.ndarray) and (len(results[key] > 0) and len(results[key][0]) == 4):\n # For 2-point rect bounding boxes, np.ndarray(K, 4)\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes\n else:\n # For any n-point poly bounding boxes.\n bboxes = []\n for box in results[key]:\n tmp_box = np.array(box, dtype=np.float32)\n tmp_box[0::2] *= results['scale_factor'][0]\n tmp_box[1::2] *= results['scale_factor'][1]\n if self.bbox_clip_border:\n tmp_box[0::2] = np.clip(tmp_box[0::2], 0, img_shape[1])\n tmp_box[1::2] = np.clip(tmp_box[1::2], 0, img_shape[0])\n bboxes.append(tmp_box)\n results[key] = bboxes", "title": "" }, { "docid": "3bce74203f9024346ba0ee4d50e2f90a", "score": "0.5440936", "text": "def resize_and_crop_boxes(boxes, image_scale, output_size, offset, box_history):\n\n # Shift and scale the input boxes.\n boxes *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])\n boxes -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])\n\n # Check the hitory of the boxes.\n box_history *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])\n box_history -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])\n\n # Clip the shifted and scaled boxes.\n clipped_boxes = bbox_ops.clip_boxes(boxes, output_size)\n return clipped_boxes, box_history", "title": "" }, { "docid": "f2a48292a04173eabf7f85407529e596", "score": "0.54277235", "text": "def produceCenterLabelMap(boxsize, sigma):\n test_X = np.linspace(1, boxsize, boxsize)\n test_Y = np.linspace(1, boxsize, boxsize)\n [X, Y] = np.meshgrid(test_X, test_Y)\n X = X - boxsize/2\n Y = Y - boxsize/2\n D2 = np.square(X) + np.square(Y)\n Exponent = D2 / (2.0 * sigma * sigma)\n label = np.exp(-Exponent)\n return label", "title": "" }, { "docid": "0b798937796bb07599bd14a0d7cb3a62", "score": "0.5411189", "text": "def tf_ssd_encode_boxes_loop(labels, # (N,)\r\n\t\t\t\t\t\t\t boxes, # (N, 4) <- (ymin, xmin, ymax, xmax)\r\n\t\t\t\t\t\t\t anchor_boxes, # (num_aboxes, 4) <- (cy, cx, h, w)\r\n\t\t\t\t\t\t\t prior_scaling,\r\n\t\t\t\t\t\t\t matching_threshold = 0.5):\r\n\tnum_aboxes = anchor_boxes.shape[0] # ()\r\n\r\n\t# compute coordinates of anchor boxes\r\n\taboxes_ymin = tf.cast(anchor_boxes[:, 0] - anchor_boxes[:, 2] / 2.0, tf.float32) # (num_aboxes,)\r\n\taboxes_xmin = tf.cast(anchor_boxes[:, 1] - anchor_boxes[:, 3] / 2.0, tf.float32) # (num_aboxes,)\r\n\taboxes_ymax = tf.cast(anchor_boxes[:, 0] + anchor_boxes[:, 2] / 2.0, tf.float32) # (num_aboxes,)\r\n\taboxes_xmax = tf.cast(anchor_boxes[:, 1] + anchor_boxes[:, 3] / 2.0, tf.float32) # (num_aboxes,)\r\n\taboxes = tf.stack([aboxes_ymin, aboxes_xmin, aboxes_ymax, aboxes_xmax], axis=-1)\r\n\r\n\t# initialize output tensors\r\n\tfeature_labels = tf.zeros([num_aboxes], dtype=tf.int64) # (num_aboxes,)\r\n\tfeature_scores = tf.zeros([num_aboxes], dtype=tf.float32) # (num_aboxes,)\r\n\tfeature_y_min = tf.zeros([num_aboxes], dtype=tf.float32) # (num_aboxes,)\r\n\tfeature_x_min = tf.zeros([num_aboxes], dtype=tf.float32) # (num_aboxes,)\r\n\tfeature_y_max = tf.ones([num_aboxes], dtype=tf.float32) # (num_aboxes,)\r\n\tfeature_x_max = tf.ones([num_aboxes], dtype=tf.float32) # (num_aboxes,)\r\n\r\n\t# define condition of the TF while loop\r\n\tdef condition(ii,\r\n\t\t\t\t feature_labels,\r\n\t\t\t\t feature_scores,\r\n\t\t\t\t feature_y_min,\r\n\t\t\t\t feature_x_min,\r\n\t\t\t\t feature_y_max,\r\n\t\t\t\t feature_x_max):\r\n\t\tcomparison = tf.less(ii, tf.shape(labels))\r\n\t\treturn tf.gather(comparison, 0)\r\n\r\n\t# define body of the TF while loop\r\n\tdef body(ii,\r\n\t\t\t feature_labels,\r\n\t\t\t feature_scores,\r\n\t\t\t feature_y_min,\r\n\t\t\t feature_x_min,\r\n\t\t\t feature_y_max,\r\n\t\t\t feature_x_max):\r\n\t\tlabel = tf.cast(tf.gather(labels, ii), tf.int64) # ()\r\n\t\tbox = tf.gather(boxes, ii) # (4,)\r\n\r\n\t\t# compute Jaccard score of box to anchor boxes\r\n\t\tjaccard = tf_jaccard_score_single(box, aboxes) # (num_aboxes,)\r\n\r\n\t\t# best and good abox for each box\r\n\t\tbest_abox_per_box = tf.argmax(jaccard, axis=0) # ()\r\n\t\tbest_abox_per_box_mask = tf.equal(tf.range(num_aboxes, dtype=tf.int64), best_abox_per_box) # (num_aboxes,)\r\n\t\tgood_abox_per_box_mask = tf.greater(jaccard, matching_threshold) # (num_aboxes,)\r\n\r\n\t\tmask = tf.logical_or(best_abox_per_box_mask, good_abox_per_box_mask) # (num_aboxes,)\r\n\t\tmask = tf.logical_and(mask, tf.greater(jaccard, feature_scores)) # (num_aboxes,)\r\n\t\timask = tf.cast(mask, tf.int64) # (num_aboxes,)\r\n\t\tfmask = tf.cast(mask, tf.float32) # (num_aboxes,)\r\n\r\n\t\t# update labels and boxes using the mask\r\n\t\tfeature_labels = imask * label + (1 - imask) * feature_labels # (num_aboxes,)\r\n\t\tfeature_scores = tf.where(mask, jaccard, feature_scores) # (num_aboxes,)\r\n\r\n\t\tfeature_y_min = fmask * tf.gather(box, 0) + (1.0 - fmask) * feature_y_min # (num_aboxes,)\r\n\t\tfeature_x_min = fmask * tf.gather(box, 1) + (1.0 - fmask) * feature_x_min # (num_aboxes,)\r\n\t\tfeature_y_max = fmask * tf.gather(box, 2) + (1.0 - fmask) * feature_y_max # (num_aboxes,)\r\n\t\tfeature_x_max = fmask * tf.gather(box, 3) + (1.0 - fmask) * feature_x_max # (num_aboxes,)\r\n\r\n\t\treturn [\r\n\t\t\ttf.add(ii, 1),\r\n\t\t\tfeature_labels,\r\n\t\t\tfeature_scores,\r\n\t\t\tfeature_y_min,\r\n\t\t\tfeature_x_min,\r\n\t\t\tfeature_y_max,\r\n\t\t\tfeature_x_max\r\n\t\t]\r\n\r\n\tii = tf.constant(0, dtype=tf.int32)\r\n\tresult = tf.while_loop(\r\n\t\tcondition,\r\n\t\tbody,\r\n\t\t[\r\n\t\t\tii,\r\n\t\t\tfeature_labels,\r\n\t\t\tfeature_scores,\r\n\t\t\tfeature_y_min,\r\n\t\t\tfeature_x_min,\r\n\t\t\tfeature_y_max,\r\n\t\t\tfeature_x_max\r\n\t\t]\r\n\t)\r\n\r\n\tfeature_labels = result[1]\r\n\tfeature_scores = result[2]\r\n\tfeature_y_min = result[3]\r\n\tfeature_x_min = result[4]\r\n\tfeature_y_max = result[5]\r\n\tfeature_x_max = result[6]\r\n\r\n\t# transform to center / size\r\n\tfeature_cy = (feature_y_max + feature_y_min) / 2.0 # (num_aboxes,)\r\n\tfeature_cx = (feature_x_max + feature_x_min) / 2.0 # (num_aboxes,)\r\n\tfeature_h = feature_y_max - feature_y_min # (num_aboxes,)\r\n\tfeature_w = feature_x_max - feature_x_min # (num_aboxes,)\r\n\r\n\t# encode features\r\n\tfeature_cy = (feature_cy - anchor_boxes[:, 0]) / anchor_boxes[:, 2] / prior_scaling[0] # (num_aboxes,)\r\n\tfeature_cx = (feature_cx - anchor_boxes[:, 1]) / anchor_boxes[:, 3] / prior_scaling[1] # (num_aboxes,)\r\n\tfeature_h = tf.log(feature_h / anchor_boxes[:, 2]) / prior_scaling[2] # (num_aboxes,)\r\n\tfeature_w = tf.log(feature_w / anchor_boxes[:, 3]) / prior_scaling[3] # (num_aboxes,)\r\n\r\n\t# reorder for ssd\r\n\tfeature_boxes = tf.stack(\r\n\t\t[feature_cx, feature_cy, feature_w, feature_h],\r\n\t\taxis = -1\r\n\t) # (num_aboxes, 4)\r\n\r\n\treturn (\r\n\t\tfeature_labels, # (num_aboxes,)\r\n\t\tfeature_scores, # (num_aboxes,)\r\n\t\tfeature_boxes # (num_aboxes, 4)\r\n\t)", "title": "" }, { "docid": "e0679e8605030bd437f9706b8e4d91b9", "score": "0.5354375", "text": "def norm_boxes(self, boxes, shape):\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.divide((boxes - shift), scale).astype(np.float32)\n pass", "title": "" }, { "docid": "2ec123d8823476b82675fd55d264ab34", "score": "0.533954", "text": "def compute_decode_bbox_center_size(self, batch, data_offset, prior_data, is_tail):\n prior_width = prior_data[0]\n prior_height = prior_data[1]\n prior_center_x = prior_data[2]\n prior_center_y = prior_data[3]\n prior_var_dest_ub = prior_data[4]\n\n loc_dst_ub = self.instance.Tensor(self.dtype,\n (4, self.handle_each_dst),\n name=\"loc_dst_ub\",\n scope=tik.scope_ubuf)\n\n self.get_loc_data(batch, data_offset, loc_dst_ub, is_tail)\n\n #2.2\n decode_bbox_center_x = self.instance.Tensor(\n self.dtype, (self.handle_each_dst, ), name=\"decode_bbox_center_x\",\n scope=tik.scope_ubuf)\n decode_bbox_center_y = self.instance.Tensor(\n self.dtype, (self.handle_each_dst, ), name=\"decode_bbox_center_y\",\n scope=tik.scope_ubuf)\n decode_bbox_width = self.instance.Tensor(\n self.dtype, (self.handle_each_dst, ), name=\"decode_bbox_width\",\n scope=tik.scope_ubuf)\n decode_bbox_height = self.instance.Tensor(\n self.dtype, (self.handle_each_dst, ), name=\"decode_bbox_height\",\n scope=tik.scope_ubuf)\n decode_bbox_vexp = self.instance.Tensor(\n self.dtype, (self.handle_each_dst, ), name=\"decode_bbox_vexp\",\n scope=tik.scope_ubuf)\n\n\n handle_each_dst_loops = self.handle_each_dst // self.mask\n\n with self.instance.if_scope(self.variance_encoded_in_target):\n self.instance.vmul(self.mask,\n decode_bbox_center_x,\n loc_dst_ub[0, 0],\n prior_width,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vadd(self.mask,\n decode_bbox_center_x,\n prior_center_x,\n decode_bbox_center_x,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n self.instance.vmul(self.mask,\n decode_bbox_center_y,\n loc_dst_ub[1, 0],\n prior_height,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vadd(self.mask,\n decode_bbox_center_y,\n prior_center_y,\n decode_bbox_center_y,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n self.instance.vexp(self.mask,\n decode_bbox_vexp,\n loc_dst_ub[2, 0],\n handle_each_dst_loops, 1, 1, 8, 8)\n self.instance.vmul(self.mask,\n decode_bbox_width,\n decode_bbox_vexp,\n prior_width,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n self.instance.vexp(self.mask,\n decode_bbox_vexp,\n loc_dst_ub[3, 0],\n handle_each_dst_loops, 1, 1, 8, 8)\n self.instance.vmul(self.mask,\n decode_bbox_height,\n decode_bbox_vexp,\n prior_height,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n with self.instance.else_scope():\n self.instance.vmul(self.mask,\n decode_bbox_center_x,\n prior_var_dest_ub[0, 0],\n loc_dst_ub[0, 0],\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vmul(self.mask,\n decode_bbox_center_x,\n prior_width,\n decode_bbox_center_x,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vadd(self.mask,\n decode_bbox_center_x,\n prior_center_x,\n decode_bbox_center_x,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n self.instance.vmul(self.mask,\n decode_bbox_center_y,\n prior_var_dest_ub[1, 0],\n loc_dst_ub[1, 0],\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vmul(self.mask,\n decode_bbox_center_y,\n prior_height,\n decode_bbox_center_y,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vadd(self.mask,\n decode_bbox_center_y,\n prior_center_y,\n decode_bbox_center_y,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n self.instance.vmul(self.mask,\n decode_bbox_width,\n prior_var_dest_ub[2, 0],\n loc_dst_ub[2, 0],\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vexp(self.mask,\n decode_bbox_vexp,\n decode_bbox_width,\n handle_each_dst_loops, 1, 1, 8, 8)\n self.instance.vmul(self.mask,\n decode_bbox_width,\n decode_bbox_vexp,\n prior_width,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n self.instance.vmul(self.mask,\n decode_bbox_height,\n prior_var_dest_ub[3, 0],\n loc_dst_ub[3, 0],\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vexp(self.mask,\n decode_bbox_vexp,\n decode_bbox_height,\n handle_each_dst_loops, 1, 1, 8, 8)\n self.instance.vmul(self.mask,\n decode_bbox_height,\n decode_bbox_vexp,\n prior_height,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n return decode_bbox_center_x, decode_bbox_center_y, decode_bbox_width, \\\n decode_bbox_height", "title": "" }, { "docid": "e729e2ed6d2e61b848956c0ad1bd6a41", "score": "0.5333394", "text": "def bboxes_encode(self, labels, bboxes, anchors,\n scope=None):\n return ssd_common.tf_ssd_bboxes_encode(\n labels, bboxes, anchors,\n config.num_classes,\n config.no_annotation_label,\n ignore_threshold=0.5,\n prior_scaling=config.prior_scaling,\n scope=scope)", "title": "" }, { "docid": "35f59b2557f0f0a9d927df08891d1099", "score": "0.5320964", "text": "def _bbox_pred_to_bbox(points, bbox_pred):\n if bbox_pred.shape[0] == 0:\n return bbox_pred\n\n x_center = points[:, 0] + (bbox_pred[:, 1] - bbox_pred[:, 0]) / 2\n y_center = points[:, 1] + (bbox_pred[:, 3] - bbox_pred[:, 2]) / 2\n z_center = points[:, 2] + (bbox_pred[:, 5] - bbox_pred[:, 4]) / 2\n\n # dx_min, dx_max, dy_min, dy_max, dz_min, dz_max -> x, y, z, w, l, h\n base_bbox = torch.stack([\n x_center,\n y_center,\n z_center,\n bbox_pred[:, 0] + bbox_pred[:, 1],\n bbox_pred[:, 2] + bbox_pred[:, 3],\n bbox_pred[:, 4] + bbox_pred[:, 5],\n ], -1)\n\n # axis-aligned case\n if bbox_pred.shape[1] == 6:\n return base_bbox\n\n # rotated case: ..., sin(2a)ln(q), cos(2a)ln(q)\n scale = bbox_pred[:, 0] + bbox_pred[:, 1] + \\\n bbox_pred[:, 2] + bbox_pred[:, 3]\n q = torch.exp(\n torch.sqrt(\n torch.pow(bbox_pred[:, 6], 2) + torch.pow(bbox_pred[:, 7], 2)))\n alpha = 0.5 * torch.atan2(bbox_pred[:, 6], bbox_pred[:, 7])\n return torch.stack(\n (x_center, y_center, z_center, scale / (1 + q), scale /\n (1 + q) * q, bbox_pred[:, 5] + bbox_pred[:, 4], alpha),\n dim=-1)", "title": "" }, { "docid": "8620c6421040b455e7082ed35cd7e5e0", "score": "0.5320171", "text": "def bboxes_encode(self, bboxes, anchors, num,\n\t\t\t\t\t scope='text_bboxes_encode'):\n\t\treturn textbox_common.tf_text_bboxes_encode(\n\t\t\t\t\t\tbboxes, anchors, num,\n\t\t\t\t\t\tmatching_threshold=0.5,\n\t\t\t\t\t\tprior_scaling=self.params.prior_scaling,\n\t\t\t\t\t\tscope=scope)", "title": "" }, { "docid": "0389e594b14f6bb778095411fcbb4fb0", "score": "0.52936625", "text": "def calibrate_box(bboxes, offsets):\r\n x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]\r\n w = x2 - x1 + 1.0\r\n h = y2 - y1 + 1.0\r\n w = np.expand_dims(w, 1)\r\n h = np.expand_dims(h, 1)\r\n\r\n # this is what happening here:\r\n # tx1, ty1, tx2, ty2 = [offsets[:, i] for i in range(4)]\r\n # x1_true = x1 + tx1*w\r\n # y1_true = y1 + ty1*h\r\n # x2_true = x2 + tx2*w\r\n # y2_true = y2 + ty2*h\r\n # below is just more compact form of this\r\n\r\n # are offsets always such that\r\n # x1 < x2 and y1 < y2 ?\r\n\r\n translation = np.hstack([w, h, w, h]) * offsets\r\n bboxes[:, 0:4] = bboxes[:, 0:4] + translation\r\n return bboxes", "title": "" }, { "docid": "36d69b5ebc29edec2cda22cb5f653cc5", "score": "0.5286167", "text": "def bboxes_encode(self, labels, bboxes, anchors,\n ignore_threshold=0.5, scope=None):\n return tinyface_common.tinyface_bboxes_encode(\n labels, bboxes, anchors,\n self.params.num_classes,\n scope=scope\n )", "title": "" }, { "docid": "54049a24cd2a5b3878e4aba9bdd87e3e", "score": "0.5281373", "text": "def encode(self, reference_box, proposal):\n \n width = proposal[:, 2] - proposal[:, 0]\n height = proposal[:, 3] - proposal[:, 1]\n ctr_x = proposal[:, 0] + 0.5 * width\n ctr_y = proposal[:, 1] + 0.5 * height\n\n gt_width = reference_box[:, 2] - reference_box[:, 0]\n gt_height = reference_box[:, 3] - reference_box[:, 1]\n gt_ctr_x = reference_box[:, 0] + 0.5 * gt_width\n gt_ctr_y = reference_box[:, 1] + 0.5 * gt_height\n\n dx = self.weights[0] * (gt_ctr_x - ctr_x) / width\n dy = self.weights[1] * (gt_ctr_y - ctr_y) / height\n dw = self.weights[2] * torch.log(gt_width / width)\n dh = self.weights[3] * torch.log(gt_height / height)\n\n delta = torch.stack((dx, dy, dw, dh), dim=1)\n return delta", "title": "" }, { "docid": "a30e7d032975b71e5dd83eb4909fde5b", "score": "0.5250163", "text": "def create_Labels_For_Loss(gt_boxes, feat_stride=16, feature_shape=(64, 19), \\\n scales=np.array([8, 16, 32]), ratios=[0.5, 0.8, 1], \\\n image_size=(300, 1000)):\n width = feature_shape[0]\n height = feature_shape[1]\n batch_size = gt_boxes.shape[0]\n # shifts is the all candicate anchors(prediction of bounding boxes)\n center_x = np.arange(0, height) * feat_stride\n center_y = np.arange(0, width) * feat_stride\n center_x, center_y = np.meshgrid(center_x, center_y)\n # Shape is [Batch, Width*Height, 4]\n centers = np.zeros((batch_size, width*height, 4))\n centers[:] = np.vstack((center_x.ravel(), center_y.ravel(),\n center_x.ravel(), center_y.ravel())).transpose()\n A = scales.shape[0] * len(ratios)\n K = width * height # width * height\n anchors = np.zeros((batch_size, A, 4))\n anchors = generate_anchors(scales=scales, ratios=ratios) # Shape is [A, 4]\n\n candicate_anchors = centers.reshape(batch_size, K, 1, 4) + anchors # [Batch, K, A, 4]\n\n # shape is [B, K, A]\n is_inside = batch_inside_image(candicate_anchors, image_size[1], image_size[0])\n\n \"\"\"\n ここまでnpyファイルに保存できる -> 処理時間の短縮につながる\n 必要な情報は、feature mapのサイズと画像サイズ\n return として、anchorsとis_inside(0 or 1)を取得する\n \"\"\"\n # candicate_anchors: Shape is [Batch, K, A, 4]\n # gt_boxes: Shape is [Batch, G, 4]\n # true_index: Shape is [Batch, K, A]\n # false_index: Shape is [Batch, K, A]\n candicate_anchors, true_index, false_index = bbox_overlaps(\n np.ascontiguousarray(candicate_anchors, dtype=np.float),\n is_inside,\n gt_boxes)\n\n for i in range(batch_size):\n true_where = np.where(true_index[i] == 1)\n num_true = len(true_where[0])\n\n if num_true > 64:\n select = np.random.choice(num_true, num_true - 64, replace=False)\n num_true = 64\n batch = np.ones((select.shape[0]), dtype=np.int) * i\n true_where = remove_extraboxes(true_where[0], true_where[1], select, batch)\n true_index[true_where] = 0\n\n false_where = np.where(false_index[i] == 1)\n num_false = len(false_where[0])\n select = np.random.choice(num_false, num_false - (128-num_true), replace=False)\n batch = np.ones((select.shape[0]), dtype=np.int) * i\n false_where = remove_extraboxes(false_where[0], false_where[1], select, batch)\n false_index[false_where] = 0\n\n return candicate_anchors, true_index, false_index", "title": "" }, { "docid": "47ab19e0c2292a129081df6ab1276af7", "score": "0.52442086", "text": "def norm_boxes_graph(self, boxes, shape):\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)", "title": "" }, { "docid": "f9d54f70e1c8636d4544d2730318f488", "score": "0.5240898", "text": "def bbox_transform_inv(boxes, deltas):\n widths = boxes[:, 2] - boxes[:, 0] + 1.0\n heights = boxes[:, 3] - boxes[:, 1] + 1.0\n ctr_x = boxes[:, 0] + 0.5 * widths\n ctr_y = boxes[:, 1] + 0.5 * heights\n dx = deltas[:, 0::4]\n dy = deltas[:, 1::4]\n dw = deltas[:, 2::4]\n dh = deltas[:, 3::4]\n pred_ctr_x = dx * widths.unsqueeze(1) + ctr_x.unsqueeze(1)\n pred_ctr_y = dy * heights.unsqueeze(1) + ctr_y.unsqueeze(1)\n pred_w = torch.exp(dw) * widths.unsqueeze(1)\n pred_h = torch.exp(dh) * heights.unsqueeze(1)\n pred_boxes = deltas.clone()\n pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w\n pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h\n pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1\n pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1\n return pred_boxes", "title": "" }, { "docid": "f040830f1a2f794de41b9b8ab1654f97", "score": "0.5212834", "text": "def fit(self, boxes):\n assert self.k < len(boxes), \"K must be less than the number of data.\"\n\n # If the current number of iterations is greater than 0, then reset\n if self.n_iter > 0:\n self.n_iter = 0\n\n np.random.seed(self.random_seed)\n n = boxes.shape[0]\n\n # Initialize K cluster centers (i.e., K anchors)\n self.anchors_ = boxes[np.random.choice(n, self.k, replace=True)]\n\n self.labels_ = np.zeros((n,))\n\n while True:\n self.n_iter += 1\n\n # If the current number of iterations is greater than max number of iterations , then break\n if self.n_iter > self.max_iter:\n break\n\n self.ious_ = self.iou(boxes, self.anchors_)\n distances = 1 - self.ious_\n cur_labels = np.argmin(distances, axis=1)\n\n # If anchors not change any more, then break\n if (cur_labels == self.labels_).all():\n break\n\n # Update K anchors\n for i in range(self.k):\n self.anchors_[i] = np.mean(boxes[cur_labels == i], axis=0)\n\n self.labels_ = cur_labels", "title": "" }, { "docid": "e6cf7bad4459582b7ddef537961c729d", "score": "0.52101123", "text": "def preprocess_true_boxes(self, true_boxes):\n\n num_anchors = len(self.anchors)\n conv_height = int(self.out_H)\n conv_width = int(self.out_W)\n\n detectors_mask = np.zeros(\n (conv_height, conv_width, num_anchors, 1), dtype=np.float32)\n matching_true_boxes = np.zeros(\n (conv_height, conv_width, num_anchors, 4),\n dtype=np.float32)\n labels_true_boxes = np.zeros((conv_height, conv_width, num_anchors, self.n_classes), dtype=np.float32)\n\n for box in true_boxes:\n # scale box to convolutional feature spatial dimensions\n class_ind = int(box[4])\n box = box[0:4] * np.array(\n [conv_width, conv_height, conv_width, conv_height])\n i = np.floor(box[1]).astype('int')\n j = np.floor(box[0]).astype('int')\n best_iou = 0\n best_anchor = 0\n for k, anchor in enumerate(np.array(self.anchors)):\n # Find IOU between box shifted to origin and anchor box.\n box_maxes = box[2:4] / 2.\n box_mins = -box_maxes\n anchor_maxes = (anchor / 2.)\n anchor_mins = -anchor_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[0] * intersect_wh[1]\n box_area = box[2] * box[3]\n anchor_area = anchor[0] * anchor[1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n if iou > best_iou:\n best_iou = iou\n best_anchor = k\n\n if best_iou > 0:\n detectors_mask[i, j, best_anchor] = 1\n adjusted_box = np.array(\n [\n box[0] - j, box[1] - i,\n np.log(box[2] / self.anchors[best_anchor][0]),\n np.log(box[3] / self.anchors[best_anchor][1])\n ],\n dtype=np.float32)\n matching_true_boxes[i, j, best_anchor] = adjusted_box\n labels_true_boxes[i, j, best_anchor, class_ind] = 1\n return matching_true_boxes, detectors_mask, labels_true_boxes", "title": "" }, { "docid": "ce94b44b2360c479a34592ce7ba06d69", "score": "0.51913303", "text": "def scale(uv_coord, K, bbox, new_size):\r\n xmin, xmax, ymin, ymax = bbox\r\n\r\n uv_coord[:, 0] = (uv_coord[:, 0] - xmin) / (xmax - xmin + 1.) * new_size[1]\r\n uv_coord[:, 1] = (uv_coord[:, 1] - ymin) / (ymax - ymin + 1.) * new_size[0]\r\n\r\n xscale = new_size[1] / (xmax - xmin + 1.)\r\n yscale = new_size[0] / (ymax - ymin + 1.)\r\n\r\n shift = [[1, 0, -xmin],\r\n [0, 1, -ymin],\r\n [0, 0, 1]]\r\n\r\n scale = [[xscale, 0, 0],\r\n [0, yscale, 0],\r\n [0, 0, 1]]\r\n\r\n shift = np.array(shift)\r\n scale = np.array(scale)\r\n\r\n K = np.matmul(scale, np.matmul(shift, K))\r\n\r\n return uv_coord, K", "title": "" }, { "docid": "0fbf2a9a8d23e1d0b180396cc89030d9", "score": "0.5190764", "text": "def get_bboxes_adapted_to_input_size(instances, input_size):\n # scale factors used to resize the images to the size expected by the model\n scale_factors = {\n ann[\"id\"]: input_size / max(ann[\"width\"], ann[\"height\"])\n for ann in instances[\"images\"]\n }\n return np.array(\n [\n np.array(ann[\"bbox\"][-2:]) * scale_factors[ann[\"image_id\"]]\n for ann in instances[\"annotations\"]\n ]\n )", "title": "" }, { "docid": "4b75f0ba89266df11ce1f7036b08492d", "score": "0.5176437", "text": "def bbox_transform_inv(self, boxes, deltas, name=\"bbox_transform_inverse\"):\n with tf.variable_scope(name):\n cx, cy, w, h = tf.split(boxes, 4, axis=-1)\n dx, dy, dw, dh = tf.split(deltas, 4, axis=-1)\n pred_ctr_x = tf.add(tf.multiply(dx, dw), cx)\n pred_ctr_y = tf.add(tf.multiply(dy, dh), cy)\n pred_w = tf.multiply(tf.exp(dw), w)\n pred_h = tf.multiply(tf.exp(dh), h)\n return tf.stack([pred_ctr_x, pred_ctr_y, pred_w, pred_h], axis=-1)", "title": "" }, { "docid": "79443ccae6b57d66eef9df0f79bb8c69", "score": "0.51741594", "text": "def loc2bbox(loc, priors, center_var=0.1, size_var=0.2):\n # assert priors.shape[0] == 1\n # assert priors.dim() == 3\n\n # prior bounding boxes\n p_center = priors[..., :2]\n p_size = priors[..., 2:]\n\n # locations\n l_center = loc[..., :2]\n l_size = loc[..., 2:]\n\n # real bounding box\n return torch.cat([\n center_var * l_center * p_size + p_center, # b_{center}\n p_size * torch. exp(size_var * l_size) # b_{size}\n ], dim=-1)", "title": "" }, { "docid": "b23405ee4297ba709c65e71f3e2efbd2", "score": "0.5164358", "text": "def generate_prior_bboxes(prior_layer_cfg):\n example_prior_layer_cfg = [\n # Example:\n {'layer_name': 'Conv4', 'feature_dim_hw': (64, 64), 'bbox_size': (60, 60), 'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)},\n {'layer_name': 'Conv4', 'feature_dim_hw': (64, 64), 'bbox_size': (60, 60), 'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)}\n # ...\n # TODO: define your feature map settings\n ]\n\n priors_bboxes = []\n for feat_level_idx in range(0, len(prior_layer_cfg)): # iterate each layers\n layer_cfg = prior_layer_cfg[feat_level_idx]\n layer_feature_dim = layer_cfg['feature_dim_hw']\n layer_aspect_ratio = layer_cfg['aspect_ratio']\n\n # Todo: compute S_{k} (reference: SSD Paper equation 4.)\n s_min = 0.1\n s_max = 0.9\n k = feat_level_idx + 1\n m = len(prior_layer_cfg)\n\n sk = s_min + ((s_max - s_min) / (m - 1)) * (k - 1)\n fk = layer_feature_dim[0]\n\n for y in range(0, layer_feature_dim[0]):\n for x in range(0,layer_feature_dim[0]):\n # Todo: compute bounding box center\n cx = (x + 0.5) / fk\n cy = (y + 0.5) / fk\n\n # Todo: generate prior bounding box with respect to the aspect ratio\n for aspect_ratio in layer_aspect_ratio:\n h = sk / np.sqrt(aspect_ratio)\n w = sk * np.sqrt(aspect_ratio)\n priors_bboxes.append([cx, cy, w, h])\n\n # Convert to Tensor\n priors_bboxes = torch.tensor(priors_bboxes)\n priors_bboxes = torch.clamp(priors_bboxes, 0.0, 1.0)\n num_priors = priors_bboxes.shape[0]\n\n if torch.cuda.is_available():\n priors_bboxes = priors_bboxes.cuda()\n\n # [DEBUG] check the output shape\n assert priors_bboxes.dim() == 2\n assert priors_bboxes.shape[1] == 4\n return priors_bboxes", "title": "" }, { "docid": "0aeb235195a1df8521f426bf5f3574d6", "score": "0.5161039", "text": "def _convert_to_batch_coords(bboxes, height, width, max_height, max_width):\n height, width, max_height, max_width = (tf.cast(height, tf.float32),\n tf.cast(width, tf.float32),\n tf.cast(max_height, tf.float32),\n tf.cast(max_width, tf.float32))\n height, width = tf.expand_dims(height, axis=1), tf.expand_dims(width, axis=1)\n\n ymin, xmin, ymax, xmax = tf.unstack(bboxes, axis=-1)\n return tf.stack([\n ymin * height / max_height, xmin * width / max_width,\n ymax * height / max_height, xmax * width / max_width\n ], -1)", "title": "" }, { "docid": "7a43a74cc98defb5c49b056a10029bee", "score": "0.51384264", "text": "def encode_box(self, box, return_iou=True):\n iou = self.iou(box)\n encoded_box = np.zeros((self.num_priors, 4 + return_iou))\n assign_mask = iou > self.overlap_threshold\n if not assign_mask.any():\n assign_mask[iou.argmax()] = True\n if return_iou:\n encoded_box[:, -1][assign_mask] = iou[assign_mask]\n assigned_priors = self.priors[assign_mask]\n box_center = 0.5 * (box[:2] + box[2:])\n box_wh = box[2:] - box[:2]\n assigned_priors_center = 0.5 * (assigned_priors[:, :2] +\n assigned_priors[:, 2:4])\n assigned_priors_wh = (assigned_priors[:, 2:4] -\n assigned_priors[:, :2])\n # we encode variance\n encoded_box[:, :2][assign_mask] = box_center - assigned_priors_center\n encoded_box[:, :2][assign_mask] /= assigned_priors_wh\n encoded_box[:, :2][assign_mask] /= assigned_priors[:, -4:-2]\n encoded_box[:, 2:4][assign_mask] = np.log(box_wh /\n assigned_priors_wh)\n encoded_box[:, 2:4][assign_mask] /= assigned_priors[:, -2:]\n return encoded_box.ravel()", "title": "" }, { "docid": "ad8ec8992b4eb0153750ebd2b5216074", "score": "0.51354754", "text": "def decode_boxes(self,\n pred_dict):\n fmap = pred_dict['center_pred']\n dim_pred = pred_dict['dim_pred']\n xy_pred = pred_dict['xy_pred']\n z_pred = pred_dict['z_pred']\n dir_pred = pred_dict['dir_pred']\n batch, channel, height, width = fmap.shape\n\n xs=torch.arange(0,width).type_as(fmap)\n ys=torch.arange(0,height).type_as(fmap)\n ys,xs=torch.meshgrid([ys,xs])\n xs=xs.unsqueeze(0).unsqueeze(1).expand(batch, -1, height, width)\n ys= ys.unsqueeze(0).unsqueeze(1).expand(batch, -1, height, width)\n xs=xy_pred[:,0:1,:,:]+xs\n ys=xy_pred[:,1:,:,:]+ys\n # centers=torch.cat([xs,ys],dim=1).permute(0,2,3,1).contiguous()\n dim_pred=dim_pred/0.05/self.downsample_ratio\n zs = z_pred\n\n if self.num_dir_bins<=0:\n dir_pred = torch.atan2(dir_pred[:,0:1,:,:], dir_pred[:,1:,:,:])\n else:\n dir_bin = torch.argmax(dir_pred[:,:self.num_dir_bins,:,:], dim=1,keepdim=True)\n dir_res = torch.gather(dir_pred[:,self.num_dir_bins:,:,:], dim=1,\n index=dir_bin)\n dir_pred=self.class2angle(dir_bin,dir_res)\n\n bboxes_batch = torch.cat([xs, ys, zs, dim_pred, dir_pred], dim=1).detach()\n bboxes_batch=bboxes_batch.permute(0,2,3,1).contiguous().reshape(-1,7)\n boxes_pred_instances = LiDARInstance3DBoxes(bboxes_batch, origin=(0.5, 0.5, 0))\n corners_pred = boxes_pred_instances.corners.reshape(batch,height,width,8,3)\n corners_pred=corners_pred[:,:,:,::2,:2].reshape(batch,height,-1,2).int().float()\n centers_pred=boxes_pred_instances.gravity_center.reshape(batch,height,width,3)\n centers_pred=centers_pred[:,:,:,:2].int().float()\n centers_pred[:,:,:,0]=torch.clamp(centers_pred[:,:,:,0],0,width-1)\n centers_pred[:,:,:,1]=torch.clamp(centers_pred[:,:,:,1],0,height-1)\n corners_pred[:,:,:,0]=torch.clamp(corners_pred[:,:,:,0],0,width-1)\n corners_pred[:,:,:,1]=torch.clamp(corners_pred[:,:,:,1],0,height-1)\n\n return centers_pred,corners_pred", "title": "" }, { "docid": "aedf8e0c16e5f7ba8d931318e8e35d50", "score": "0.5131051", "text": "def to_imgaug_box(self) -> imgaug.BoundingBox:\n to_label = self.metadata\n to_label['class'] = self.class_name\n return imgaug.BoundingBox(x1=self.x1, y1=self.y1, x2=self.x2, y2=self.y2, label=to_label)", "title": "" }, { "docid": "7a929a3aaa340b6583dd992ab5b41bc9", "score": "0.51278394", "text": "def bounding_box(self):\n nw = self.args['segment_queries']['bounding_box']['input_nw_corner'].replace(\" N\", \"\").replace(\" W\", \"\")\n nw = nw.split(', ')\n se = self.args['segment_queries']['bounding_box']['input_se_corner'].replace(\" N\", \"\").replace(\" W\", \"\")\n se = se.split(', ')\n return \"geom @ ST_MakeEnvelope (-{}, {}, -{}, {}) and ST_Length(geom) > 0\".format(nw[1], nw[0], se[1], se[0])", "title": "" }, { "docid": "2c5a1577895403652471bcb6739090cb", "score": "0.51270914", "text": "def process_outputs(self, outputs, image_size):\n boxes = []\n box_confidences = []\n box_class_probs = []\n img_h, img_w = image_size\n\n for output in outputs:\n boxes.append(output[..., 0:4])\n box_confidences.append(self.sigmoid(output[..., 4, np.newaxis]))\n box_class_probs.append(self.sigmoid(output[..., 5:]))\n for i, box in enumerate(boxes):\n gr_h, gr_w, anchors_boxes, _ = box.shape\n cx = np.indices((gr_h, gr_w, anchors_boxes))[1]\n cy = np.indices((gr_h, gr_w, anchors_boxes))[0]\n t_x = box[..., 0]\n t_y = box[..., 1]\n t_w = box[..., 2]\n t_h = box[..., 3]\n p_w = self.anchors[i, :, 0]\n p_h = self.anchors[i, :, 1]\n bx = (self.sigmoid(t_x) + cx) / gr_w\n by = (self.sigmoid(t_y) + cy) / gr_h\n bw = (np.exp(t_w) * p_w) / self.model.input.shape[1].value\n bh = (np.exp(t_h) * p_h) / self.model.input.shape[2].value\n top_left_x = bx - bw / 2\n top_left_y = by - bh / 2\n bottom_right_x = bx + bw / 2\n bottom_right_y = by + bh / 2\n box[..., 0] = top_left_x * img_w\n box[..., 1] = top_left_y * img_h\n box[..., 2] = bottom_right_x * img_w\n box[..., 3] = bottom_right_y * img_h\n return boxes, box_confidences, box_class_probs", "title": "" }, { "docid": "7d244f0d105084b2fef71a8038578065", "score": "0.51244086", "text": "def norm_boxes(boxes, shape):\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.divide((boxes - shift), scale).astype(np.float32)", "title": "" }, { "docid": "7d244f0d105084b2fef71a8038578065", "score": "0.51244086", "text": "def norm_boxes(boxes, shape):\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.divide((boxes - shift), scale).astype(np.float32)", "title": "" }, { "docid": "1876745f56d79bcb519e824c7b7363d5", "score": "0.5118919", "text": "def normalize_size(s, y_enc, min_size=5, max_size=30):\n N, K = y_enc.shape\n for k in range(K):\n is_k = y_enc[:, k] > 0.\n s_k = s[is_k]\n s_k = (s_k - np.min(s_k)) / (np.max(s_k) - np.min(s_k))\n s[is_k] = s_k * (max_size - min_size) + min_size\n return s", "title": "" }, { "docid": "2e2c2c33d3237b2e8dc9da6092caffdc", "score": "0.51141363", "text": "def loc2bbox(loc, priors, center_var=0.1, size_var=0.2):\n assert priors.shape[0] == 1\n assert priors.dim() == 3\n\n # prior bounding boxes\n p_center = priors[..., :2]\n p_size = priors[..., 2:]\n\n # locations\n l_center = loc[..., :2]\n l_size = loc[..., 2:]\n\n # real bounding box\n return torch.cat([\n center_var * l_center * p_size + p_center, # b_{center}\n p_size * torch.exp(size_var * l_size) # b_{size}\n ], dim=-1)", "title": "" }, { "docid": "e6ec1d9f6c8b4137e39f8cccfca192d3", "score": "0.5103207", "text": "def convert_box_coordinates(detections):\n\n split = np.array_split(detections, [1, 2, 3, 4, 85], axis=2)\n center_x = split[0]\n center_y = split[1]\n width = split[2]\n height = split[3]\n attrs = split[4]\n \n w2 = width / 2\n h2 = height / 2\n x0 = center_x - w2\n y0 = center_y - h2\n x1 = center_x + w2\n y1 = center_y + h2\n\n boxes = np.concatenate([x0, y0, x1, y1], axis=-1)\n detections = np.concatenate([boxes, attrs], axis=-1)\n \n return detections", "title": "" }, { "docid": "28efc955f118acd7ccbf3a16bbbe474f", "score": "0.5099018", "text": "def centers_to_bboxes(self, point_list):\n bbox_list = []\n for i_img, point in enumerate(point_list):\n bbox = []\n for i_lvl in range(len(self.point_strides)):\n scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5\n bbox_shift = torch.Tensor([-scale, -scale, scale,\n scale]).view(1, 4).type_as(point[0])\n bbox_center = torch.cat(\n [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)\n bbox.append(bbox_center + bbox_shift)\n bbox_list.append(bbox)\n return bbox_list", "title": "" }, { "docid": "6133deb1f86944e146cfee70860aee03", "score": "0.5071915", "text": "def draw_boxes(output_filename, classes_filename, inputs, original_image, resized_image):\n \n names = {}\n with open(classes_filename) as f:\n class_names = f.readlines()\n for id, name in enumerate(class_names):\n names[id] = name\n\n height_ratio = original_image.shape[0] / resized_image.shape[0]\n width_ratio = original_image.shape[1] / resized_image.shape[1]\n ratio = (width_ratio, height_ratio)\n\n for object_class, box_coords_and_prob in inputs.items():\n for box_coord, object_prob in box_coords_and_prob:\n\n box_coord = box_coord.reshape(2,2) * ratio\n box_coord = box_coord.reshape(-1)\n\n x0y0 = (int(box_coord[0]),int(box_coord[1]))\n x1y1 = (int(box_coord[2]), int(box_coord[3]))\n\n textx0y0 = (x0y0[0],x0y0[1]-4)\n\n cv2.rectangle(original_image, x0y0, x1y1, (255,255,255), 3)\n text_label = str(names[object_class])[:-1] + \", \" + str(round(object_prob*100,2)) + \"%\"\n cv2.putText(original_image, text_label, textx0y0, cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 3)\n\n cv2.imwrite(output_filename, cv2.cvtColor(original_image, cv2.COLOR_RGB2BGR))", "title": "" }, { "docid": "5d034de3d426f8f6c4f533844097ca55", "score": "0.5036438", "text": "def compute_prior_bbox_center_size(self, batch, data_offset, is_tail):\n # 1. get bbox\n prior_bbox_dest_ub = self.instance.Tensor(\n self.dtype, (4, self.handle_each_dst), name=\"prior_bbox_dest_ub\",\n scope=tik.scope_ubuf)\n prior_var_dest_ub = self.instance.Tensor(\n self.dtype, (4, self.handle_each_dst), name=\"prior_var_dest_ub\",\n scope=tik.scope_ubuf)\n\n self.get_priorbox_data((batch, data_offset, prior_bbox_dest_ub,\n prior_var_dest_ub), is_tail)\n\n prior_width = self.instance.Tensor(self.dtype, (self.handle_each_dst, ),\n name=\"prior_width\", scope=tik.scope_ubuf)\n prior_height = self.instance.Tensor(self.dtype, (self.handle_each_dst, ),\n name=\"prior_height\", scope=tik.scope_ubuf)\n prior_center_x = self.instance.Tensor(self.dtype, (self.handle_each_dst, ),\n name=\"prior_center_x\", scope=tik.scope_ubuf)\n prior_center_y = self.instance.Tensor(self.dtype, (self.handle_each_dst, ),\n name=\"prior_center_y\", scope=tik.scope_ubuf)\n\n handle_each_dst_loops = self.handle_each_dst // self.mask\n\n self.instance.vsub(self.mask, prior_width,\n prior_bbox_dest_ub[2, 0],\n prior_bbox_dest_ub[0, 0],\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vsub(self.mask, prior_height,\n prior_bbox_dest_ub[3, 0],\n prior_bbox_dest_ub[1, 0],\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n self.instance.vadd(self.mask, prior_center_x,\n prior_bbox_dest_ub[2, 0],\n prior_bbox_dest_ub[0, 0],\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vmuls(self.mask, prior_center_x,\n prior_center_x,\n 0.5, handle_each_dst_loops, 1, 1, 8, 8)\n\n self.instance.vadd(self.mask, prior_center_y,\n prior_bbox_dest_ub[3, 0],\n prior_bbox_dest_ub[1, 0],\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n self.instance.vmuls(self.mask, prior_center_y,\n prior_center_y,\n 0.5, handle_each_dst_loops, 1, 1, 8, 8)\n\n return prior_width, prior_height, prior_center_x, prior_center_y, prior_var_dest_ub", "title": "" }, { "docid": "bd7eeddbd651a5d0fd536c5c3d9d3aab", "score": "0.50273323", "text": "def detection_layer(input, num_classes, img_size, anchors):\n img_size = (416,416)\n num_anchors = len(anchors)\n predict = layers.conv2d(input, num_anchors * (5 + num_classes), 1, strides=1)\n shape = predict.get_shape().as_list()\n grid_size = shape[1:3]\n grids_num = grid_size[0] * grid_size[1]\n bboxes = 5 + num_classes\n predict = tf.reshape(predict, [-1, grids_num, num_anchors ,bboxes])\n \n box_centers, box_sizes, confidence, classes = tf.split(\n predict, [2, 2, 1, num_classes], axis=-1)\n\n box_centers = tf.nn.sigmoid(box_centers)\n confidence = tf.nn.sigmoid(confidence)\n\n batch_size = shape[0]\n a = tf.range(grid_size[0], dtype=tf.float32)\n b = tf.range(grid_size[1], dtype=tf.float32)\n x_offset = tf.reshape(a, (-1, 1))\n x_offset = tf.tile(x_offset,[grid_size[1],1])\n y_offset = tf.reshape(b, (1, -1))\n y_offset = tf.reshape(tf.transpose(tf.tile(y_offset,[grid_size[0],1]),[1,0]),[grids_num,1])\n x_y_offset = tf.concat([x_offset,y_offset],axis=-1)\n x_y_offset = tf.tile(tf.reshape(x_y_offset,[1,-1,1,2]),[1,1,num_anchors,1])\n \n box_centers = (box_centers + x_y_offset)*(img_size)/grid_size\n\n anchors = tf.tile(tf.reshape(anchors,[1,-1,2]),[grids_num,1,1])\n anchors = tf.cast(anchors,dtype=tf.float32)\n\n box_sizes = tf.exp(box_sizes) * anchors\n\n classes = tf.nn.sigmoid(classes)\n \n result_detect_result = tf.concat([box_centers,box_sizes, confidence, classes],axis=-1)\n result_detect_result = tf.reshape(result_detect_result,[-1,grids_num*num_anchors,result_detect_result.get_shape().as_list()[-1]])\n\n return result_detect_result", "title": "" }, { "docid": "47b4e699f6ea1d205ba641661e84d687", "score": "0.50200754", "text": "def expand_boxes(boxes, scale):\n w_half = (boxes[:, 2] - boxes[:, 0]) * .5\n h_half = (boxes[:, 3] - boxes[:, 1]) * .5\n x_c = (boxes[:, 2] + boxes[:, 0]) * .5\n y_c = (boxes[:, 3] + boxes[:, 1]) * .5\n\n w_half *= scale\n h_half *= scale\n\n boxes_exp = np.zeros(boxes.shape)\n boxes_exp[:, 0] = x_c - w_half\n boxes_exp[:, 2] = x_c + w_half\n boxes_exp[:, 1] = y_c - h_half\n boxes_exp[:, 3] = y_c + h_half\n\n return boxes_exp", "title": "" }, { "docid": "2b45c962ab0ff5c8b7c1c544c0f05973", "score": "0.50162095", "text": "def predict_bbs(self, expand_bb_scale_x=0.18, expand_bb_scale_y=0.23):\n resized_image = paragraph_segmentation_transform(self.image, self.form_size)\n # print(resized_image.shape)\n bb_predicted = self.paragraph_segmentation_net(resized_image.as_in_context(self.device))\n bb_predicted = bb_predicted[0].asnumpy()\n # all train set was in the middle\n self.predicted_text_area = expand_bounding_box(bb_predicted,\n expand_bb_scale_x=expand_bb_scale_x,\n expand_bb_scale_y=expand_bb_scale_y)\n if self.show:\n # s_y, s_x = int(i/2), int(i%2)\n _, ax = plt.subplots(1, figsize=(15, 18))\n ax.imshow(self.image, cmap='Greys_r')\n (x, y, w, h) = bb_predicted\n image_h, image_w = self.image.shape[-2:]\n (x, y, w, h) = (x * image_w, y * image_h, w * image_w, h * image_h)\n rect = patches.Rectangle((x, y), w, h, fill=False, color=\"r\", ls=\"--\")\n ax.add_patch(rect)\n ax.axis('off')\n return self.predicted_text_area", "title": "" }, { "docid": "20f91ff356fb1d09d86c2d5c4b483c7b", "score": "0.50078773", "text": "def scale_back_batch(self, bboxes_in, scores_in,device):\n \n bboxes_in = bboxes_in.transpose([0,2,1])\n scores_in = scores_in.transpose([0,2,1])\n\n bboxes_in[:, :, :2] = self.scale_xy*bboxes_in[:, :, :2]\n bboxes_in[:, :, 2:] = self.scale_wh*bboxes_in[:, :, 2:]\n\n bboxes_in[:, :, :2] = bboxes_in[:, :, :2]*self.dboxes_xywh[:, :, 2:] + self.dboxes_xywh[:, :, :2]\n bboxes_in[:, :, 2:] = np.exp(bboxes_in[:, :, 2:])*self.dboxes_xywh[:, :, 2:]\n\n # Transform format to ltrb \n l, t, r, b = bboxes_in[:, :, 0] - 0.5*bboxes_in[:, :, 2],\\\n bboxes_in[:, :, 1] - 0.5*bboxes_in[:, :, 3],\\\n bboxes_in[:, :, 0] + 0.5*bboxes_in[:, :, 2],\\\n bboxes_in[:, :, 1] + 0.5*bboxes_in[:, :, 3]\n\n bboxes_in[:, :, 0] = l\n bboxes_in[:, :, 1] = t\n bboxes_in[:, :, 2] = r\n bboxes_in[:, :, 3] = b\n\n return bboxes_in, softmax_cpu(scores_in, dim=-1)", "title": "" }, { "docid": "f133377840e9334f15c784fa2ce38146", "score": "0.50007075", "text": "def forward(self, anchor_points_list, gt_bboxes, labels, inside_gt_bbox_mask):\n inside_gt_bbox_mask = inside_gt_bbox_mask.clone()\n num_gts = len(labels)\n num_points = sum([len(item) for item in anchor_points_list])\n if num_gts == 0:\n return gt_bboxes.new_zeros(num_points, num_gts), inside_gt_bbox_mask\n center_prior_list = []\n for slvl_points, stride in zip(anchor_points_list, self.strides):\n single_level_points = slvl_points[:, None, :].expand((slvl_points.size(0), len(gt_bboxes), 2))\n gt_center_x = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2\n gt_center_y = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2\n gt_center = torch.stack((gt_center_x, gt_center_y), dim=1)\n gt_center = gt_center[None]\n instance_center = self.mean[labels][None]\n instance_sigma = self.sigma[labels][None]\n distance = ((single_level_points - gt_center) / float(stride) - instance_center) ** 2\n center_prior = torch.exp(-distance / (2 * instance_sigma ** 2)).prod(dim=-1)\n center_prior_list.append(center_prior)\n center_prior_weights = torch.cat(center_prior_list, dim=0)\n if self.force_topk:\n gt_inds_no_points_inside = torch.nonzero(inside_gt_bbox_mask.sum(0) == 0).reshape(-1)\n if gt_inds_no_points_inside.numel():\n topk_center_index = center_prior_weights[:, gt_inds_no_points_inside].topk(self.topk, dim=0)[1]\n temp_mask = inside_gt_bbox_mask[:, gt_inds_no_points_inside]\n inside_gt_bbox_mask[:, gt_inds_no_points_inside] = torch.scatter(temp_mask, dim=0, index=topk_center_index, src=torch.ones_like(topk_center_index, dtype=torch.bool))\n center_prior_weights[~inside_gt_bbox_mask] = 0\n return center_prior_weights, inside_gt_bbox_mask", "title": "" }, { "docid": "d9bea47622ff2031a9b5ab08d5f92ed3", "score": "0.50005513", "text": "def decode_regression_to_boxes(preds):\n preds_bbox = keras.layers.Reshape((-1, 4, BOX_REGRESSION_CHANNELS // 4))(\n preds\n )\n preds_bbox = ops.nn.softmax(preds_bbox, axis=-1) * ops.arange(\n BOX_REGRESSION_CHANNELS // 4, dtype=\"float32\"\n )\n return ops.sum(preds_bbox, axis=-1)", "title": "" }, { "docid": "da28d933911edd0374a016a8c865dcb5", "score": "0.49960855", "text": "def coord_corner2center(bbox):\n\n cat_dim = bbox.dim() - 1\n x1, y1, x2, y2 = bbox.chunk(4, dim=cat_dim)\n\n x = torch.floor((x2 - x1 + 1) / 2) + x1\n y = torch.floor((y2 - y1 + 1) / 2) + y1\n\n w = x2 - x1 + 1\n h = y2 - y1 + 1\n\n bbox_trans = torch.cat([x, y, w, h], dim=cat_dim)\n\n return bbox_trans", "title": "" }, { "docid": "1e96772f00381a3dd41355a589510430", "score": "0.49949917", "text": "def second_box_encode(boxes,\r\n anchors,\r\n encode_angle_to_vector=False,\r\n smooth_dim=False,\r\n cylindrical=False):\r\n # need to convert boxes to z-center format\r\n box_ndim = anchors.shape[-1]\r\n cas, cgs = [], []\r\n if box_ndim > 7:\r\n xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=1)\r\n xg, yg, zg, wg, lg, hg, rg, *cgs = np.split(boxes, box_ndim, axis=1)\r\n else:\r\n xa, ya, za, wa, la, ha, ra = np.split(anchors, box_ndim, axis=1)\r\n xg, yg, zg, wg, lg, hg, rg = np.split(boxes, box_ndim, axis=1)\r\n\r\n diagonal = np.sqrt(la**2 + wa**2) # 4.3\r\n xt = (xg - xa) / diagonal\r\n yt = (yg - ya) / diagonal\r\n zt = (zg - za) / ha # 1.6\r\n lt = np.log(lg / la)\r\n wt = np.log(wg / wa)\r\n ht = np.log(hg / ha)\r\n rt = rg - ra\r\n cts = [g - a for g, a in zip(cgs, cas)]\r\n if smooth_dim:\r\n lt = lg / la - 1\r\n wt = wg / wa - 1\r\n ht = hg / ha - 1\r\n else:\r\n lt = np.log(lg / la)\r\n wt = np.log(wg / wa)\r\n ht = np.log(hg / ha)\r\n if encode_angle_to_vector:\r\n rgx = np.cos(rg)\r\n rgy = np.sin(rg)\r\n rax = np.cos(ra)\r\n ray = np.sin(ra)\r\n rtx = rgx - rax\r\n rty = rgy - ray\r\n return np.concatenate([xt, yt, zt, wt, lt, ht, rtx, rty, *cts], axis=1)\r\n else:\r\n rt = rg - ra\r\n return np.concatenate([xt, yt, zt, wt, lt, ht, rt, *cts], axis=1)", "title": "" }, { "docid": "94313bdd01385ae108c5b7c671b77cf2", "score": "0.49895704", "text": "def box(output, target, size=448, B=2):\n \n #Reshape the output tensor into (S*S)x(B*5+C) to make it easier to work with\n sz = output.size()\n output = output.view(sz[0] * sz[1], -1) #e.g 49x30\n pred_bboxes = output[:,:B*5] #slice out only the bounding boxes e.g 49x10\n pred_classes = output[:,B*5:] #slice out the pred classes e.g 49x20\n target = target.view(sz[0] * sz[1], -1) #e.g 49x5\n\n # The `*_global` variables are needed for IoU calculations \n pred_bboxes_global = cell_to_global(pred_bboxes.clone().detach(), B=B) #e.g 49x10\n target_global = normalised_to_global(target.clone().detach()) #e.g 49*5\n\n num_classes = output.size(1) - (B*5)\n \n R = torch.zeros(output.size(0),5+num_classes) #result to return. e.g it is of size 49x25 \n for i in range(output.size(0)): #loop over each cell coordinate\n # `bboxes` will be a tuple of size B (e.g 2), where each elem is 1*5\n bboxes = torch.split(pred_bboxes[i,:], pred_bboxes.size(1)//B) \n bboxes = torch.stack(bboxes)\n bboxes_global = torch.split(pred_bboxes_global[i,:], pred_bboxes.size(1)//B)\n bboxes_global = torch.stack(bboxes_global)\n\n \"\"\"\n In the case where there is a ground truth tensor at the current grid cell,\n the predicted bounding box with the highest intersection over union to the\n ground truth is chosen.\n If there is no ground truth prediction at the current cell, just pick the\n bounding box with the highest confidence\n \"\"\"\n\n #case 1: There is a ground truth prediction at this cell i\n if target[i].sum() > 0:#select the box with the highest intersection over union\n repeated_target = target_global[i].clone().detach().repeat(bboxes.size(0),1)\n jac_idx = _iou(bboxes_global, repeated_target)\n max_iou_idx = torch.argmax(jac_idx)\n R[i,:5] = bboxes[max_iou_idx,:]\n else: #select the box with the highest confidence\n highest_conf_idx = torch.argmax(bboxes[:,4])\n R[i,:5] = bboxes[highest_conf_idx,:]\n\n #Add the predicted class confidence to the results\n R[i,5:] = pred_classes[i]\n \n return R.view(sz[0], sz[1], -1)", "title": "" }, { "docid": "8376f862ea3d9419140abdb53ca25156", "score": "0.4987937", "text": "def minmax2centroid(boxes):\n centroid = np.copy(boxes).astype(np.float)\n centroid[..., 0] = 0.5 * (boxes[..., 1] - boxes[..., 0])\n centroid[..., 0] += boxes[..., 0] \n centroid[..., 1] = 0.5 * (boxes[..., 3] - boxes[..., 2])\n centroid[..., 1] += boxes[..., 2] \n centroid[..., 2] = boxes[..., 1] - boxes[..., 0]\n centroid[..., 3] = boxes[..., 3] - boxes[..., 2]\n return centroid", "title": "" }, { "docid": "ebe6bb72c2fb4c024c36c526377a133c", "score": "0.49857697", "text": "def augment_bounding_boxes(self, bounding_boxes, transformation, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "c2dff480284a106dfd904e4950d7dab6", "score": "0.49845663", "text": "def pre_process(self, bbox, input_size, output_size):\r\n # Check if ball is present or not from the bounding box coordinates\r\n # Center of bounding box must be greater than 0 for a ball to be present\r\n if not torch.equal(bbox[2], torch.DoubleTensor([0.0, 0.0]).to(self.device)):\r\n img_heatmap = torch.zeros(\r\n (output_size[0], output_size[1], output_size[2])).to(self.device)\r\n # Check if bounding box needs to be scaled or not\r\n if input_size != output_size:\r\n scale = torch.DoubleTensor([output_size[1]/input_size[1],\r\n output_size[2]/input_size[2]]).to(self.device)\r\n bbox[0] = torch.round(bbox[0] * scale)\r\n bbox[1] = torch.round(bbox[1] * scale)\r\n bbox[3][0] = torch.abs(bbox[0, 0]-bbox[1, 0])\r\n bbox[3][1] = torch.abs(bbox[0, 1]-bbox[1, 1])\r\n bbox[2][0], bbox[2][1] = bbox[0, 0]+bbox[3, 0] / \\\r\n 2, bbox[0, 1]+bbox[3, 1]/2\r\n\r\n pt1, pt2 = bbox[0], bbox[1]\r\n dist = torch.abs(pt1-pt2)\r\n width, length = dist[0].item(), dist[1].item()\r\n\r\n # Choose kernel size for gaussian\r\n if length > width:\r\n ksize = int(max(length, 15))\r\n else:\r\n ksize = int(max(width, 15))\r\n\r\n kernel = cv2.getGaussianKernel(ksize, 4)\r\n kernel = np.dot(kernel, kernel.T)\r\n kernel *= 100\r\n\r\n if pt1[1].item()+ksize > img_heatmap.shape[1]-1:\r\n kY_start = img_heatmap.shape[1]-1-ksize\r\n else:\r\n kY_start = int(pt1[1].item())\r\n\r\n if pt1[0].item()+ksize > img_heatmap.shape[2]-1:\r\n kX_start = img_heatmap.shape[2]-1-ksize\r\n else:\r\n kX_start = int(pt1[0].item())\r\n\r\n # Fit gaussian on the heatmap at bounding box location\r\n img_heatmap[0, kY_start:kY_start+ksize, kX_start:kX_start +\r\n ksize] = torch.from_numpy(kernel).to(self.device)\r\n\r\n else:\r\n # When no ball is present\r\n img_heatmap = torch.zeros(\r\n (output_size[0], output_size[1], output_size[2])).to(self.device)\r\n\r\n return img_heatmap, bbox", "title": "" }, { "docid": "f3035c12e4173349f897640ac5d8626b", "score": "0.4980275", "text": "def create_prior_boxes(self):\n\n input_size = 640\n feature_maps = [160, 80, 40, 20, 10, 5]\n anchor_sizes = [16, 32, 64, 128, 256, 512]\n steps = [4, 8, 16, 32, 64, 128]\n imh = input_size\n imw = input_size\n\n prior_boxes = []\n\n for k in range(len(feature_maps)):\n feath = feature_maps[k]\n featw = feature_maps[k]\n for i, j in product(range(feath), range(featw)):\n f_kw = imw / steps[k]\n f_kh = imh / steps[k]\n\n cx = (j + 0.5) / f_kw\n cy = (i + 0.5) / f_kh\n\n s_kw = anchor_sizes[k] / imw\n s_kh = anchor_sizes[k] / imh\n\n prior_boxes.append([cx, cy, s_kw, s_kh])\n\n prior_boxes = torch.FloatTensor(prior_boxes).to(device)\n prior_boxes.clamp_(min=0, max=1) # (34125, 4)\n\n return prior_boxes", "title": "" }, { "docid": "89f31956e6e6f0e120ab31fd37d31fb1", "score": "0.49713737", "text": "def get_bounding_boxes(self, frame):\n # Creates 4-dimensional blob from image.\n # (416,416) is the image size we feed into yolo\n blob = cv2.dnn.blobFromImage(frame, scalefactor=1/255, size=(416,416))\n # Sets the new input value for the network.\n self.net.setInput(blob)\n # Runs forward pass to compute output of layer with name outputName.\n outs = self.net.forward(self.outNames)\n\n layerNames = self.net.getLayerNames()\n lastLayerId = self.net.getLayerId(layerNames[-1])\n lastLayer = self.net.getLayer(lastLayerId)\n\n classIds, confidences, boxes, bboxes = self.output_process(outs, frame)\n \n # Draw the bounding boxes on the frame\n indices = np.arange(0, len(classIds))\n for i in indices:\n box = boxes[i]\n left = box[0]\n top = box[1]\n width = box[2]\n height = box[3]\n self.drawPred(frame, classIds[i], confidences[i], left, top, left+width, top+height)\n\n return frame, bboxes", "title": "" }, { "docid": "9f77f9ef47533e23da16cb337e0d3c84", "score": "0.49710748", "text": "def bbox_transform(bbox):\n with tf.variable_scope('bbox_transform') as scope:\n cx, cy, w, h = bbox\n out_box = [[]] * 4\n out_box[0] = cx - w / 2\n out_box[1] = cy - h / 2\n out_box[2] = cx + w / 2\n out_box[3] = cy + h / 2\n\n return out_box", "title": "" }, { "docid": "bd60e792d45e145fe2ebd92df35569ea", "score": "0.4970432", "text": "def normalize_image_bboxes(image, boxes, input_shape, resize_img,\n allow_rnd_shift=True, bbox_overlap=0.9, interp=Image.BICUBIC):\n if resize_img:\n img_data, scale, (dx, dy) = _scale_image_to_cnn(\n image, input_shape, allow_rnd_shift=allow_rnd_shift, interp=interp)\n else:\n img_data, scale, (dx, dy) = _crop_image_to_cnn(image, input_shape, allow_rnd_shift)\n\n if len(boxes) == 0:\n return img_data, np.zeros((1, 5))\n\n boxes = boxes.copy()\n np.random.shuffle(boxes)\n bb_sizes = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * scale * scale\n boxes[:, [0, 2]] = boxes[:, [0, 2]] * scale + dx\n boxes[:, [1, 3]] = boxes[:, [1, 3]] * scale + dy\n\n boxes = _filter_empty_bboxes(boxes, *input_shape, bb_sizes=bb_sizes, bb_overlap=bbox_overlap)\n\n return img_data, boxes", "title": "" }, { "docid": "730dec55cc1472d07092dbdf475c7d2b", "score": "0.49674663", "text": "def denorm_boxes(self, boxes, shape):\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)\n pass", "title": "" }, { "docid": "b1beb66dd9924ed00901b850eb48cb69", "score": "0.49645928", "text": "def compute_decode_bbox_coord_center_size(self, decode_bbox_data,\n decode_bbox_ori):\n\n decode_bbox_center_x = decode_bbox_data[0]\n decode_bbox_center_y = decode_bbox_data[1]\n decode_bbox_width = decode_bbox_data[2]\n decode_bbox_height = decode_bbox_data[3]\n\n handle_each_dst_loops = self.handle_each_dst // self.mask\n\n self.instance.vmuls(self.mask, decode_bbox_width,\n decode_bbox_width,\n 0.5, handle_each_dst_loops, 1, 1, 8, 8)\n self.instance.vsub(self.mask, decode_bbox_ori[0, 0],\n decode_bbox_center_x,\n decode_bbox_width,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n self.instance.vmuls(self.mask, decode_bbox_height,\n decode_bbox_height,\n 0.5, handle_each_dst_loops, 1, 1, 8, 8)\n self.instance.vsub(self.mask, decode_bbox_ori[1, 0],\n decode_bbox_center_y,\n decode_bbox_height,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n\n self.instance.vadd(self.mask, decode_bbox_ori[2, 0],\n decode_bbox_center_x,\n decode_bbox_width,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)\n\n self.instance.vadd(self.mask, decode_bbox_ori[3, 0],\n decode_bbox_center_y,\n decode_bbox_height,\n handle_each_dst_loops, 1, 1, 1, 8, 8, 8)", "title": "" }, { "docid": "5842cf679805ced3e4953c17c50e97ba", "score": "0.49634072", "text": "def box_refinement(box, gt_box):\n box = box.astype(np.float32)\n gt_box = gt_box.astype(np.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = np.log(gt_height / height)\n dw = np.log(gt_width / width)\n\n return np.stack([dy, dx, dh, dw], axis=1)", "title": "" }, { "docid": "5842cf679805ced3e4953c17c50e97ba", "score": "0.49634072", "text": "def box_refinement(box, gt_box):\n box = box.astype(np.float32)\n gt_box = gt_box.astype(np.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = np.log(gt_height / height)\n dw = np.log(gt_width / width)\n\n return np.stack([dy, dx, dh, dw], axis=1)", "title": "" }, { "docid": "2a6fef67a8273aef8c3e91d1ee0d1466", "score": "0.49615827", "text": "def addBoundingBoxAndLabelBox(self, _bounding_box, _label_width, _label_height):\n\n # Add the bounding box, for fun ;-)\n self.bounding_boxes.append(_bounding_box)\n\n left = _bounding_box[0]\n right = _bounding_box[2]\n top = _bounding_box[1]\n bottom = _bounding_box[3]\n\n # Keep the initial propose\n first_proposal = [ left , top - _label_height, left + _label_width , top]\n\n # The offset will slide the box propose to the right or to the left\n x_offset = 0\n while x_offset < (right-left)-_label_width:\n\n #\n # Proposal 1\n #\n # First label box proposal, top/left\n\n proposal_label_box = [ left + x_offset, top - _label_height, left + _label_width + x_offset, top]\n\n\n # Surface if over an existing label box 30% of area\n if self.isOverALabel(proposal_label_box)< MAX_COVERING:\n # We are done\n self.label_bounding_boxes.append(proposal_label_box)\n return proposal_label_box\n\n #\n # Proposal 2\n #\n # Second proposal, bottom/left\n proposal_label_box = [left + x_offset, bottom , left + _label_width + x_offset, bottom + _label_height]\n if self.isOverALabel(proposal_label_box) < MAX_COVERING:\n # We are done\n self.label_bounding_boxes.append(proposal_label_box)\n return proposal_label_box\n\n #\n # Proposal 3\n #\n # Third proposal, top/right\n proposal_label_box = [right-_label_width - x_offset, top - _label_height, right - x_offset, top]\n if self.isOverALabel(proposal_label_box) < MAX_COVERING:\n # We are done\n self.label_bounding_boxes.append(proposal_label_box)\n return proposal_label_box\n\n #\n # Proposal 4\n #\n # Third proposal, bottom/right\n proposal_label_box = [right-_label_width - x_offset, bottom , right - x_offset, bottom + _label_height]\n if self.isOverALabel(proposal_label_box) < MAX_COVERING:\n # We are done\n self.label_bounding_boxes.append(proposal_label_box)\n return proposal_label_box\n\n x_offset = x_offset + 20\n\n\n # Last chance\n # Rectangle is as height as the image (95%)\n if (bottom - top) > (0.95 * self.image_height):\n # Inside the rectangle\n first_proposal = [left, top, left + _label_width, top + _label_height]\n\n return first_proposal", "title": "" }, { "docid": "44db79f2e79cef369693b47f2eae201b", "score": "0.49585396", "text": "def center_size(boxes):\n return torch.cat(\n (boxes[:, 2:] + boxes[:, :2]) / 2, boxes[:, 2:] - boxes[:, :2], 1 # cx, cy\n ) # w, h", "title": "" }, { "docid": "ed248d0fa6ce1416646dee6e39d7762e", "score": "0.4958458", "text": "def reorg_layer(self, feature_map, anchors):\n num_anchors = len(anchors) # num_anchors=3\n grid_size = feature_map.shape.as_list()[1:3]\n # the downscale image in height and weight\n stride = tf.cast(self.img_size // grid_size, tf.float32) # [h,w] -> [y,x]\n feature_map = tf.reshape(feature_map, [-1, grid_size[0], grid_size[1], num_anchors,\n 5 + self.NUM_CLASSES]) # shape:[N,grid_H,W,num_anchors,5+classes]\n\n box_centers, box_sizes, conf_logits, prob_logits = tf.split(feature_map, [2, 2, 1, self.NUM_CLASSES],\n axis=-1) # e.m.box_centers [N,H,W,num_anchors,2]\n box_centers = tf.nn.sigmoid(box_centers) # (1+e^(-a))^-1\n\n # design a offset matrix\n grid_x = tf.range(grid_size[1], dtype=tf.int32) # x shape(w,)\n grid_y = tf.range(grid_size[0], dtype=tf.int32) # y shape(h,)\n\n a, b = tf.meshgrid(grid_x, grid_y) # shape(h,w)\n x_offset = tf.reshape(a, (-1, 1)) # shape(h*w,)\n y_offset = tf.reshape(b, (-1, 1)) # shape(h*w,)\n x_y_offset = tf.concat([x_offset, y_offset], axis=-1) # shape(h*w,2)\n x_y_offset = tf.reshape(x_y_offset, [grid_size[0], grid_size[1], 1, 2]) # (h,w,1,2)\n x_y_offset = tf.cast(x_y_offset,tf.float32)\n\n box_centers = box_centers + x_y_offset # predicted centers + the grid offset\n box_centers = box_centers * stride[::-1] # rescale to original scale\n\n box_sizes = tf.exp(box_sizes) * anchors # anchors ->[w,h] exp(box sizes) * anchors\n boxes = tf.concat([box_centers, box_sizes], axis=-1)\n return x_y_offset, boxes, conf_logits, prob_logits", "title": "" }, { "docid": "32b58e223e1969a4448b1e5063f58aa2", "score": "0.4944291", "text": "def point_form(boxes):\n return torch.cat(\n (\n boxes[:, :2] - boxes[:, 2:] / 2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:] / 2,\n ),\n 1,\n ) # xmax, ymax", "title": "" }, { "docid": "18237e1dddae068313dc51fa3543720a", "score": "0.49437317", "text": "def encode_batch(bboxes, keypoints_pairs):\n # import ipdb\n # ipdb.set_trace()\n N, M = keypoints_pairs.shape[:2]\n keypoints_pairs = keypoints_pairs.view(N, M, -1, 3)\n # num_joints = keypoints_pairs.shape[-2]\n # import ipdb\n # ipdb.set_trace()\n keypoints = keypoints_pairs[..., :2]\n visibility = keypoints_pairs[..., 2]\n # spatial_dims = heatmap_size[0] * heatmap_size[1]\n # heatmaps_shape = (N, M, num_joints, spatial_dims)\n # heatmaps = torch.zeros(heatmaps_shape).type_as(bboxes)\n\n peak_pos, peak_offsets = KeyPointCoder._calculate_peak_pos(\n bboxes, keypoints)\n # filter inside of the window\n # inside_filter = (peak_pos[:, :, 0] < heatmap_size[1]) & (\n # peak_pos[:, :, 1] < heatmap_size[0]) & (peak_pos[:, :, 0] >= 0) & (\n # peak_pos[:, :, 1] >= 0)\n\n # assign peak to heatmaps\n # heatmaps = heatmaps.view(-1, spatial_dims)\n # peak_pos = peak_pos.view(-1, 2)\n # inside_filter = inside_filter.view(-1)\n\n # # import ipdb\n # # ipdb.set_trace()\n resolution = KeyPointCoder.resolution\n peak_pos = peak_pos[..., 1] * resolution + peak_pos[..., 0]\n\n # peak_weights = torch.zeros_like(peak_offsets).type_as(peak_offsets)\n # peak_weights[inside_filter] = 1\n # peak = torch.stack([peak_offsets, peak_weights], dim=-1)\n # peak = peak.view(-1, 4 * 2).float()\n # import ipdb\n # ipdb.set_trace()\n encoded_keypoints = torch.cat(\n [peak_pos.unsqueeze(-1), peak_offsets,\n visibility.unsqueeze(-1)],\n dim=-1)\n return encoded_keypoints.view(N, M, -1)", "title": "" }, { "docid": "30649a58f3776b20ade0f88ee2e318e6", "score": "0.49437228", "text": "def preprocess_voc(inputs, format, image_size):\n inputs[\"image\"] = tf.image.resize(inputs[\"image\"], image_size)\n inputs[\"objects\"][\"bbox\"] = bounding_box.convert_format(\n inputs[\"objects\"][\"bbox\"],\n images=inputs[\"image\"],\n source=\"rel_yxyx\",\n target=format,\n )\n return {\n \"images\": inputs[\"image\"],\n \"bounding_boxes\": inputs[\"objects\"][\"bbox\"],\n }", "title": "" }, { "docid": "65bf2a01d352e6ad4d208d7d43ae004f", "score": "0.49385947", "text": "def normalize_encodings(dummy_encodings, names, center_encoding=True):\n\n max_dim = 0\n squared_matrices = []\n output_dict = {}\n\n for dummy in dummy_encodings:\n dummies_as_list = dummy.transpose().values.tolist()\n dummies_flat = [item for sublist in dummies_as_list\n for item in sublist]\n filler_list = [0] * (\n next_perfect_square(len(dummies_flat)) - len(dummies_flat))\n encoding_squared = dummies_flat + filler_list\n dimension = int(math.sqrt(len(encoding_squared)))\n max_dim = max(max_dim, dimension)\n squared_matrices.append(\n np.array(encoding_squared).reshape(dimension, dimension))\n\n print('Maximum dimension is', max_dim, 'x', max_dim)\n if center_encoding:\n print('Centering smaller matrices in', max_dim, 'x', max_dim, '\\n')\n else:\n print('Shifting smaller encoding to match maximum dimension\\n')\n\n for i in range(len(dummy_encodings)):\n if center_encoding:\n centered = center_matrix(squared_matrices[i], max_dim)\n output_dict[names[i]] = list(np.ravel(centered.astype(int)))\n\n else:\n shifted = shift_matrix(squared_matrices[i], max_dim)\n output_dict[names[i]] = list(np.ravel(shifted.astype(int)))\n\n return output_dict", "title": "" }, { "docid": "7880f3bcc6551a93c5054f350a8103a0", "score": "0.49359158", "text": "def _convert_to_extended_corners_format(boxes):\n x1, y1, x2, y2 = tf.split(boxes, [1, 1, 1, 1], axis=-1)\n new_boxes = tf.concat(\n [x1, y1, x2, y2, x2, y1, x1, y2],\n axis=-1,\n )\n return new_boxes", "title": "" }, { "docid": "44ef2bac8435643fad407ac7f5251487", "score": "0.4934016", "text": "def decodeBBox(bboxes, anchors):\n anchor_num = anchors.get_shape()[0]\n\n bbox_center = tf.slice(bboxes, [0, 0], [anchor_num, 2])\n bbox_size = tf.slice(bboxes, [0, 2], [anchor_num, 2])\n anchor_center = tf.slice(anchors, [0,0], [anchor_num, 2])\n anchor_size = tf.slice(anchors, [0, 2], [anchor_num, 2])\n\n pbbox_center = bbox_center * anchor_size + anchor_center\n pbbox_size = tf.exp(bbox_size) * anchor_size\n\n return tf.concat([pbbox_center, pbbox_size], axis=1)", "title": "" }, { "docid": "b6b247065237aa13411faaebaecf7444", "score": "0.49327257", "text": "def create_prior_boxes(self):\n fmap_dims = {'conv4_3': 38,\n 'conv7': 19,\n 'conv8_2': 10,\n 'conv9_2': 5,\n 'conv10_2': 3,\n 'conv11_2': 1}\n\n obj_scales = {'conv4_3': 0.1,\n 'conv7': 0.2,\n 'conv8_2': 0.375,\n 'conv9_2': 0.55,\n 'conv10_2': 0.725,\n 'conv11_2': 0.9}\n\n aspect_ratios = {'conv4_3': [1., 2., 0.5],\n 'conv7': [1., 2., 3., 0.5, .333],\n 'conv8_2': [1., 2., 3., 0.5, .333],\n 'conv9_2': [1., 2., 3., 0.5, .333],\n 'conv10_2': [1., 2., 0.5],\n 'conv11_2': [1., 2., 0.5]}\n\n fmaps = list(fmap_dims.keys())\n\n prior_boxes = []\n\n for k, fmap in enumerate(fmaps):\n for i in range(fmap_dims[fmap]):\n for j in range(fmap_dims[fmap]):\n cx = (j + 0.5) / fmap_dims[fmap]\n cy = (i + 0.5) / fmap_dims[fmap]\n\n for ratio in aspect_ratios[fmap]:\n prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)])\n\n # For an aspect ratio of 1, use an additional prior whose scale is the geometric mean of the\n # scale of the current feature map and the scale of the next feature map\n if ratio == 1.:\n try:\n additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]])\n # For the last feature map, there is no \"next\" feature map\n except IndexError:\n additional_scale = 1.\n prior_boxes.append([cx, cy, additional_scale, additional_scale])\n\n prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (8732, 4)\n prior_boxes.clamp_(0, 1) # (8732, 4)\n\n return prior_boxes", "title": "" }, { "docid": "299095b8e29984474b53f4a71c9c78f3", "score": "0.49284807", "text": "def norm_boxes_graph(boxes, shape):\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)", "title": "" }, { "docid": "b10c382b4fabd4f99378374443e4d7ae", "score": "0.4922583", "text": "def bbox_transform_inv(boxes, deltas, means=None, stds=None):\n\n if boxes.shape[0] == 0:\n return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)\n\n # boxes = boxes.astype(deltas.dtype, copy=False)\n\n widths = boxes[:, 2] - boxes[:, 0] + 1.0\n heights = boxes[:, 3] - boxes[:, 1] + 1.0\n ctr_x = boxes[:, 0] + 0.5 * widths\n ctr_y = boxes[:, 1] + 0.5 * heights\n\n dx = deltas[:, 0]\n dy = deltas[:, 1]\n dw = deltas[:, 2]\n dh = deltas[:, 3]\n\n if stds is not None:\n dx *= stds[0]\n dy *= stds[1]\n dw *= stds[2]\n dh *= stds[3]\n\n if means is not None:\n dx += means[0]\n dy += means[1]\n dw += means[2]\n dh += means[3]\n\n pred_ctr_x = dx * widths + ctr_x\n pred_ctr_y = dy * heights + ctr_y\n pred_w = torch.exp(dw) * widths\n pred_h = torch.exp(dh) * heights\n\n pred_boxes = torch.zeros(deltas.shape)\n\n # x1, y1, x2, y2\n pred_boxes[:, 0] = pred_ctr_x - 0.5 * pred_w\n pred_boxes[:, 1] = pred_ctr_y - 0.5 * pred_h\n pred_boxes[:, 2] = pred_ctr_x + 0.5 * pred_w\n pred_boxes[:, 3] = pred_ctr_y + 0.5 * pred_h\n\n return pred_boxes", "title": "" }, { "docid": "e1dd7f4ccab54db0ec8e67ee207a4f51", "score": "0.49128607", "text": "def convert_to_batch_coordinates(detection_boxes, height, width, batch_height,\n batch_width):\n height = tf.expand_dims(tf.cast(height, tf.float32), -1)\n width = tf.expand_dims(tf.cast(width, tf.float32), -1)\n batch_height = tf.cast(batch_height, tf.float32)\n batch_width = tf.cast(batch_width, tf.float32)\n\n ymin, xmin, ymax, xmax = tf.unstack(detection_boxes, axis=-1)\n detection_boxes_converted = tf.stack([\n ymin * height / batch_height, xmin * width / batch_width,\n ymax * height / batch_height, xmax * width / batch_width\n ], -1)\n return detection_boxes_converted", "title": "" }, { "docid": "130db8e4a65d6ca877c31371d483200b", "score": "0.49118328", "text": "def retinanet_bbox(\n model,\n sizes,\n strides,\n ratios,\n scales,\n nms = True,\n class_specific_filter = True,\n name = 'retinanet-bbox',\n nms_threshold = 0.5,\n score_threshold = 0.05,\n max_detections = 300,\n parallel_iterations = 32,\n **kwargs\n):\n\n # if no anchor parameters are passed, use default values\n\n # compute the anchors\n features = [model.get_layer(p_name).output for p_name in ['P3', 'P4', 'P5', 'P6', 'P7']]\n\n anchors = __build_anchors(\n features, \n sizes=sizes,\n strides=strides,\n ratios=ratios,\n scales=scales)\n\n # we expect the anchors, regression and classification values as first output\n regression = model.outputs[0]\n classification = model.outputs[1]\n\n # \"other\" can be any additional output from custom submodels, by default this will be []\n # other = model.outputs[2:]\n\n # apply predicted regression to anchors\n boxes = RegressBoxes(name='boxes')([anchors, regression])\n boxes = ClipBoxes(name='clipped_boxes')([model.inputs[0], boxes])\n\n # filter detections (apply NMS / score threshold / select top-k)\n detections = FilterDetections(\n nms = nms,\n class_specific_filter = class_specific_filter,\n name = 'filtered_detections',\n nms_threshold = nms_threshold,\n score_threshold = score_threshold,\n max_detections = max_detections,\n parallel_iterations = parallel_iterations\n )([boxes, classification])# + other)\n\n # construct the model\n return keras.models.Model(inputs=model.inputs, outputs=detections, name=name)", "title": "" }, { "docid": "babf09002926f64a229763ba10a8e644", "score": "0.4901964", "text": "def apply_box_deltas(boxes, deltas):\n boxes = boxes.astype(np.float32)\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= np.exp(deltas[:, 2])\n width *= np.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n return np.stack([y1, x1, y2, x2], axis=1)", "title": "" }, { "docid": "babf09002926f64a229763ba10a8e644", "score": "0.4901964", "text": "def apply_box_deltas(boxes, deltas):\n boxes = boxes.astype(np.float32)\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= np.exp(deltas[:, 2])\n width *= np.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n return np.stack([y1, x1, y2, x2], axis=1)", "title": "" }, { "docid": "661131f3c0daf778d46e519e3aba925c", "score": "0.48980507", "text": "def _get_jittered_box(self, box, mode, rng):\r\n\r\n jittered_size = box[2:4] * np.exp(\r\n rng.randn(2) * self.scale_jitter_factor[mode])\r\n max_offset = (np.sqrt(jittered_size.prod()) *\r\n self.center_jitter_factor[mode])\r\n jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (rng.rand(2)\r\n - 0.5)\r\n\r\n return np.concatenate(\r\n (jittered_center - 0.5 * jittered_size, jittered_size), axis=0)", "title": "" }, { "docid": "05fde9c7755bfd24f76eb56433f17c86", "score": "0.48967716", "text": "def calc_bounding_box(self):\n _w, _h = self.boxWidth, self.boxHeight\n _x, _y = self.shiftFun(self.position[0], self.position[1], _w, _h, world=1)\n self.boundingBox = BBox.as_bbox(((_x, _y - _h), (_x + _w, _y)))", "title": "" }, { "docid": "41987e5b684853e4313b1755273d00fc", "score": "0.48906314", "text": "def center_size(boxes):\n return torch.cat(( (boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2] ), 1) # w, h", "title": "" }, { "docid": "c17426dbc7bd2f944316ec29c938bfad", "score": "0.48879912", "text": "def _rescale_boxes(boxes, inds, scales):\n for i in xrange(boxes.shape[0]):\n boxes[i,:] = boxes[i,:] / scales[int(inds[i])]\n\n return boxes", "title": "" }, { "docid": "2e3c9b7bfb8d3acd04258a70cb7c8ced", "score": "0.48860818", "text": "def bbox_decode(self, bbox_preds, anchors, stds, means, wh_ratio_clip=1e-6):\n\n num_imgs, H, W, _ = bbox_preds.shape\n bbox_delta = paddle.reshape(bbox_preds, [-1, 5])\n bboxes = self.delta2rbox(anchors, bbox_delta, means, stds,\n wh_ratio_clip)\n return bboxes", "title": "" }, { "docid": "9565741b6b1684bc58a1a71f368c6d80", "score": "0.48854178", "text": "def xyxy_to_normalized_xyxy(bboxes: Tensor, image_shape: Tuple[int, int]) -> Tensor:\n rows, cols = image_shape\n if torch.jit.is_scripting():\n scale = torch.tensor([cols, rows, cols, rows], dtype=bboxes.dtype, device=bboxes.device)\n scale = scale.reshape([1] * (len(bboxes.size()) - 1) + [4])\n else:\n if torch.is_tensor(bboxes):\n scale = torch.tensor([cols, rows, cols, rows], dtype=bboxes.dtype, device=bboxes.device)\n scale = scale.reshape([1] * (len(bboxes.size()) - 1) + [4])\n elif isinstance(bboxes, np.ndarray):\n scale = np.array([cols, rows, cols, rows], dtype=bboxes.dtype)\n else:\n raise RuntimeError(f\"Only Torch tensor or Numpy array is supported. Received bboxes of type {str(type(bboxes))}\")\n return bboxes / scale", "title": "" }, { "docid": "43de817eb409c21092a137c535f2beb0", "score": "0.4881725", "text": "def center_size(boxes):\n return np.hstack([(boxes[:, 3:] + boxes[:, :3])/2,\n (boxes[:, 3:] - boxes[:, :3])])", "title": "" }, { "docid": "ff55d2257a827e02e425cb3ccca8bdf2", "score": "0.48815787", "text": "def scale_bounding_area_to(X, bbox, low=0, high=256):\n\n assert len(X.shape) == 3\n assert X.shape[-1] == 2\n assert bbox.shape[-1] == 4\n\n half_max = (high - low)/2\n\n half_width = (bbox[:, 2] - bbox[:, 0])/2\n half_height = (bbox[:, 3] - bbox[:, 1])/2\n\n X_new = X - bbox[:, :2].reshape((-1, 1, 2))\n\n for i in range(X.shape[0]):\n \n scale_x = 1.0\n scale_y = 1.0\n\n offset_x = 0\n offset_y = 0\n\n if half_width[i] > half_height[i]:\n scale_x = half_max / half_width[i]\n scale_y = (half_height[i] / half_width[i]) * scale_x\n offset_y = half_max - half_height[i] * scale_y\n else:\n scale_y = half_max / half_height[i]\n scale_x = (half_width[i] / half_height[i]) * scale_y\n offset_x = half_max - half_width[i] * scale_x\n\n X_new[i, :, 0] = X_new[i, :, 0] * scale_x + offset_x\n X_new[i, :, 1] = X_new[i, :, 1] * scale_y + offset_y\n\n return X_new", "title": "" } ]
f5f1a12e423f64898c87722da6b0c697
Don't have extraction on event class.
[ { "docid": "79d08c89e43444b6e17aa3003457ef12", "score": "0.5585676", "text": "def applyExtraction(self, evt):\n return evt", "title": "" } ]
[ { "docid": "7d776ad9ddd3ecf0dfcfdd983b9d9ea6", "score": "0.63723147", "text": "def processEvent(self, event):\n pass", "title": "" }, { "docid": "fad067ebf8bcbda50b08c793865316b0", "score": "0.6328626", "text": "def __init__(self):\n _LOGGER.error(\"Event class is not meant to be instantiated\")", "title": "" }, { "docid": "d4c876aa3ba9369bfa7cb50d42719cfa", "score": "0.63256216", "text": "def unmatched_events(self):\n pass", "title": "" }, { "docid": "f2088e0344b73b6de9da9e80162d4653", "score": "0.62354386", "text": "def __init__(self,event: Event = None): \n self.event=event", "title": "" }, { "docid": "36fdd1f578f846ae5e0a17eaa8fdfc80", "score": "0.61416686", "text": "def identify_events(self):", "title": "" }, { "docid": "fcae66858c7386312461b968f2c5065f", "score": "0.6120544", "text": "def _register_events(self):", "title": "" }, { "docid": "189762de5127190a748321c4d89ff48e", "score": "0.6089802", "text": "def process_event(self, event):\n raise NotImplementedError()", "title": "" }, { "docid": "189762de5127190a748321c4d89ff48e", "score": "0.6089802", "text": "def process_event(self, event):\n raise NotImplementedError()", "title": "" }, { "docid": "189762de5127190a748321c4d89ff48e", "score": "0.6089802", "text": "def process_event(self, event):\n raise NotImplementedError()", "title": "" }, { "docid": "b787937fec42720f84c4a641909043aa", "score": "0.60722786", "text": "def _unifyEvent(self, event):\n\t\treturn Event(self._unifyCycle(event.cycle))", "title": "" }, { "docid": "58c5f76e055be7e7c9c92cf5d14a99af", "score": "0.6060854", "text": "def on_event(self, event):\n pass", "title": "" }, { "docid": "13d18d3ae81e2cd3e2274cc109050bee", "score": "0.6056755", "text": "def Parse(cls, event):\n pass", "title": "" }, { "docid": "2369c3916bf2e52db39ecb608cc1da40", "score": "0.6054911", "text": "def unpackevent(ae,formodulename):\n\tpass", "title": "" }, { "docid": "a74bc0320192f2b7da047da31caf99e4", "score": "0.6026685", "text": "def event(self, event_name):\n pass", "title": "" }, { "docid": "af6a4e5d2b20fe80ff7988372b29465e", "score": "0.5967716", "text": "def _(event):", "title": "" }, { "docid": "79264564935ee082d2793a1bb9eb1d63", "score": "0.59592617", "text": "def ignored(self):\n ...", "title": "" }, { "docid": "dad9851e99d01d8dd3cb78f9ea2bb55f", "score": "0.5912813", "text": "def _(event):\n pass", "title": "" }, { "docid": "d4112c219cc28812b615f766f81a3dc4", "score": "0.5876806", "text": "def on_event(self, event):", "title": "" }, { "docid": "d4112c219cc28812b615f766f81a3dc4", "score": "0.5876806", "text": "def on_event(self, event):", "title": "" }, { "docid": "7cb723f2288a6fdf170e930d9a2d05d5", "score": "0.58757424", "text": "def __init__(event, **params):", "title": "" }, { "docid": "f3277a62f42097ee30600529d7bcbca4", "score": "0.58144474", "text": "def get_non_events(stmts):\n return [st for st in stmts if not isinstance(st, Event)]", "title": "" }, { "docid": "9eee7057593496835fa23e8e4d441c07", "score": "0.5796359", "text": "def on_event(self, kind, event):", "title": "" }, { "docid": "9eee7057593496835fa23e8e4d441c07", "score": "0.5796359", "text": "def on_event(self, kind, event):", "title": "" }, { "docid": "bee541a45947ac0f344a66e188d854ea", "score": "0.57728004", "text": "def visitNot(self, not_):\n # Do nothing; override in a subclass to do something.\r", "title": "" }, { "docid": "b745c393084c2961125c79a4660905a1", "score": "0.57558644", "text": "def _(event):\n # TODO:\n pass", "title": "" }, { "docid": "36a5266209bae0d01e764266789e2678", "score": "0.57221144", "text": "def handle_event(self, event):", "title": "" }, { "docid": "2de883dc347be9c62da3db2743aebd98", "score": "0.57168984", "text": "def _ignore(self, event):\n\n msg = '\\nINFO: %s for %s in %s' % (event.type, event.source, event.host_application)\n debug.println(debug.LEVEL_INFO, msg)\n\n if not self._active:\n msg = 'INFO: Ignoring because event manager is not active'\n debug.println(debug.LEVEL_INFO, msg)\n return True\n\n if list(filter(event.type.startswith, self._ignoredEvents)):\n msg = 'INFO: Ignoring because event type is ignored'\n debug.println(debug.LEVEL_INFO, msg)\n return True\n\n if event.type.startswith('window'):\n msg = 'INFO: Not ignoring because event type is never ignored'\n debug.println(debug.LEVEL_INFO, msg)\n return False\n\n # This should ultimately be changed as there are valid reasons\n # to handle these events at the application level.\n if event.type.startswith('object:children-changed:remove') \\\n and event.source != self.registry.getDesktop(0):\n msg = 'INFO: Ignoring because event type is ignored'\n debug.println(debug.LEVEL_INFO, msg)\n return True\n\n if event.type.startswith('object:text-changed') and event.type.endswith('system'):\n # We should also get children-changed events telling us the same thing.\n # Getting a bunch of both can result in a flood that grinds us to a halt.\n if event.any_data == self.EMBEDDED_OBJECT_CHARACTER:\n msg = 'INFO: Text changed event for embedded object. Who cares?'\n debug.println(debug.LEVEL_INFO, msg)\n return True\n\n try:\n # TODO - JD: For now we won't ask for the name. Simply asking for the name should\n # not break anything, and should be a reliable way to quickly identify defunct\n # objects. But apparently the mere act of asking for the name causes Orca to stop\n # presenting Eclipse (and possibly other) applications. This might be an AT-SPI2\n # issue, but until we know for certain....\n #name = event.source.name\n state = event.source.getState()\n except:\n msg = 'ERROR: %s from potentially-defunct source %s in app %s (%s, %s, %s)' % \\\n (event.type, event.source, event.host_application, event.detail1,\n event.detail2, event.any_data)\n debug.println(debug.LEVEL_INFO, msg)\n return True\n if state.contains(pyatspi.STATE_DEFUNCT):\n msg = 'ERROR: %s from defunct source %s in app %s (%s, %s, %s)' % \\\n (event.type, event.source, event.host_application, event.detail1,\n event.detail2, event.any_data)\n debug.println(debug.LEVEL_INFO, msg)\n return True\n\n if event.type.startswith('object:state-changed:showing'):\n try:\n role = event.source.getRole()\n except:\n role = None\n if role in [pyatspi.ROLE_IMAGE, pyatspi.ROLE_MENU_ITEM, pyatspi.ROLE_PARAGRAPH]:\n msg = 'INFO: %s for %s in app %s. Who cares?' % \\\n (event.type, event.source, event.host_application)\n debug.println(debug.LEVEL_INFO, msg)\n return True\n\n if event.type.startswith('object:children-changed:add'):\n if not event.any_data:\n msg = 'ERROR: %s without child from source %s in app %s' % \\\n (event.type, event.source, event.host_application)\n debug.println(debug.LEVEL_INFO, msg)\n return True\n try:\n state = event.any_data.getState()\n role = event.any_data.getRole()\n except:\n msg = 'ERROR: %s with potentially-defunct child %s from source %s in app %s' % \\\n (event.type, event.any_data, event.source, event.host_application)\n debug.println(debug.LEVEL_INFO, msg)\n return True\n if state.contains(pyatspi.STATE_DEFUNCT):\n msg = 'ERROR: %s with defunct child %s from source %s in app %s' % \\\n (event.type, event.any_data, event.source, event.host_application)\n debug.println(debug.LEVEL_INFO, msg)\n return True\n\n # This should be safe. We do not have a reason to present a newly-added,\n # but not focused image. We do not need to update live regions for images.\n # This is very likely a completely and utterly useless event for us. The\n # reason for ignoring it here rather than quickly processing it is the\n # potential for event floods like we're seeing from matrix.org.\n if role == pyatspi.ROLE_IMAGE:\n msg = 'INFO: %s for child image %s from source %s in app %s. Who cares?' % \\\n (event.type, event.any_data, event.source, event.host_application)\n debug.println(debug.LEVEL_INFO, msg)\n return True\n\n msg = 'INFO: Not ignoring due to lack of cause'\n debug.println(debug.LEVEL_INFO, msg)\n return False", "title": "" }, { "docid": "3d3d0ea79d73513ef8d5dcde38f5ea6f", "score": "0.57148415", "text": "def _(event):\n # TODO", "title": "" }, { "docid": "3d3d0ea79d73513ef8d5dcde38f5ea6f", "score": "0.57148415", "text": "def _(event):\n # TODO", "title": "" }, { "docid": "a3c47ccf11147590954cd57a81a790fd", "score": "0.5713714", "text": "def on_event(self, event):\n raise NotImplementedError('Listener base class is doing nothing')", "title": "" }, { "docid": "900fc50fd746c3faefa18e5f8249a44f", "score": "0.57088625", "text": "def process_default(self, event):\n warn(InotifyUnexpectedEvent(event))", "title": "" }, { "docid": "1fbb4f2a4fd898ace8a56f836772ef64", "score": "0.5708082", "text": "async def process_event(self, event: BaseEvent) -> Any:", "title": "" }, { "docid": "896d93952d55596bb6cf32eaf433e811", "score": "0.5700405", "text": "def getSubEventClassesWithDisabledTransform(self):\n evts = [ec for ec in self.getSubEventClasses() if not ec.transformEnabled]\n return evts", "title": "" }, { "docid": "b470c8b112af2eb3646c646d7e15693c", "score": "0.5691102", "text": "def __init__(self):\n #self.events = []\n self._handlers = set()", "title": "" }, { "docid": "3787cb2aa103571a4fb082d92d86c7f9", "score": "0.567084", "text": "def get_events(self): # real signature unknown; restored from __doc__\n return 0", "title": "" }, { "docid": "97faca580943c136170962ec882dbe51", "score": "0.566579", "text": "def new_event(self):\n pass", "title": "" }, { "docid": "d46046f4ab276bdf37caa883769b0d20", "score": "0.56635", "text": "def received_events(self):\n pass", "title": "" }, { "docid": "6940b8300dbc84377932e738fa4c0c79", "score": "0.56593484", "text": "def on(cls):", "title": "" }, { "docid": "5a7e8e3ceac38a5a73ef409dcc57137c", "score": "0.56190354", "text": "def clean_events(self, events):\n if 'exp_version' in events.dtype.names:\n events.exp_version = re.sub(r'[^\\d.]', '', events[10].exp_version)\n for event in events:\n event.eegfile = os.path.basename(event.eegfile)\n return events", "title": "" }, { "docid": "c383daf21affd89fdc7dce054daa5884", "score": "0.5611063", "text": "def __init__(self, events):\n self.events = events", "title": "" }, { "docid": "0fa478713d691ac08928b81e958d173c", "score": "0.55814135", "text": "def compose_event(self, ti):\n pass", "title": "" }, { "docid": "e0404403e5530020058b3dbf71ef9cf4", "score": "0.55799395", "text": "def matched_events(self):\n pass", "title": "" }, { "docid": "e7ec20a12366d8c1c408a28c5949be2b", "score": "0.55784416", "text": "def __nonzero__(self):\n return bool(self.events)", "title": "" }, { "docid": "a6b58af1a00519b83225a24439f37e41", "score": "0.55675656", "text": "def name_inner_event(cls):\n if hasattr(cls, 'Event'):\n cls.Event._event_name = '{}.Event'.format(cls.__name__)\n else:\n warnings.warn('Class {} does not have a inner Event'.format(cls))\n return cls", "title": "" }, { "docid": "e1d7334f1a1792925b31d1079eb64409", "score": "0.5546858", "text": "def __init__(self):\r\n self.event_list = []", "title": "" }, { "docid": "83f74b386df41e9a2144e62dbfbdef5d", "score": "0.5511837", "text": "def packevent(ae,parameters,attributes):\n\tpass", "title": "" }, { "docid": "6e4da83ae86f1ac99ac79a15c86a50ff", "score": "0.55069005", "text": "def handled(self):\r\n raise NotImplementedError()", "title": "" }, { "docid": "83e4f3c0b2d774eec29e3006892e74c2", "score": "0.55065656", "text": "def __init__(self, events=None):\n self.events = events", "title": "" }, { "docid": "31ee7124de568c237d67ef11beea63c4", "score": "0.5496558", "text": "def recv_event(self, e):", "title": "" }, { "docid": "cc83973aeb5c73ce2b75603742a07e53", "score": "0.5458924", "text": "def test_event_is_event_instance(self):\n self.assertIsInstance(self.event, Event)", "title": "" }, { "docid": "1e2a0bf00c82cb15d487a15aa4a6ef1a", "score": "0.5453811", "text": "def run(self, event: BaseEvent):\n pass", "title": "" }, { "docid": "4763c93505430b6226582a9db3347259", "score": "0.54306674", "text": "def eventstreamer(self) -> EventStreamer:", "title": "" }, { "docid": "03b2010eaeef6a9f6288471e49116cda", "score": "0.53993297", "text": "def _visit_ignore(self, elem):\n pass", "title": "" }, { "docid": "fa148287ca9418dc558b58b5210f5845", "score": "0.53947854", "text": "def get_events(self):\n pass", "title": "" }, { "docid": "aea969f22b418ec6c6953f03b23262a9", "score": "0.53784794", "text": "def handle_events(self, events):\n pass", "title": "" }, { "docid": "996a910a68056cf0e0a45ed64ee939c6", "score": "0.53605455", "text": "def test_dotted_unknown_event():\n listener = Listener()\n listener.handle_stream_([\n 'event: do.something',\n 'data: {}',\n '',\n ])\n assert listener.do_something_called is True\n assert listener.updates == []\n assert listener.notifications == []\n assert listener.deletes == []\n assert listener.heartbeats == 0", "title": "" }, { "docid": "cca0cefc58fe77a5713ca9d333b618a5", "score": "0.53585243", "text": "def test_deferred_instance_event_subclass_no_propagate(self):\n users, User = (self.tables.users, self.classes.User)\n\n class SubUser(User):\n pass\n\n canary = []\n\n def evt(x):\n canary.append(x)\n\n event.listen(User, \"load\", evt, propagate=False)\n\n m = self.mapper_registry.map_imperatively(SubUser, users)\n m.class_manager.dispatch.load(5)\n eq_(canary, [])", "title": "" }, { "docid": "5756f443c878951f64d63a5ce4ef4980", "score": "0.53464544", "text": "def on(self) -> None:", "title": "" }, { "docid": "e19b2898287bee2a356a9de1d3945f77", "score": "0.5344293", "text": "def on(self, *_, **__):\n raise NotImplementedError()", "title": "" }, { "docid": "fd4889604fc62fb7b0ec1b0163b2c4e8", "score": "0.5334859", "text": "def off(cls):", "title": "" }, { "docid": "3aa13599accb1197788c5dac385a7bc6", "score": "0.5331115", "text": "def from_sdl_event(cls, sdl_event: Any) -> Any:\n raise NotImplementedError()", "title": "" }, { "docid": "5f0cfece3bd4438c4cc6e97281904764", "score": "0.53281355", "text": "def __init__(self, events: List[Any] = None) -> None:\n self.events: List[Any] = events or []", "title": "" }, { "docid": "9b263d8953f383e1ffd3f8975c07d34f", "score": "0.532682", "text": "def dropEvent(self, event):", "title": "" }, { "docid": "3c3a2efb163fc8356285b014462b05de", "score": "0.5325779", "text": "def test_event_without_flattening(self):\n with open(\"tests/resources/simple_event.json\") as f:\n orig = json.load(f)\n with open(\"tests/resources/simple_event-transformed.json\") as f:\n expected = json.load(f)\n self.assertEqual(expected, processor.transform_events([orig])[0], \"simple event, no flattening\")", "title": "" }, { "docid": "87a06c4ac6c3f25c949c17a489338e7a", "score": "0.5324368", "text": "def handle(self, event):\n\t\treturn None", "title": "" }, { "docid": "37b06280841a47c7da1eb6fc4ed3ec6c", "score": "0.53021866", "text": "def bind_events(self):", "title": "" }, { "docid": "e35b3dbe9a4b60f155b9d7c498e6b677", "score": "0.529557", "text": "def get_event(self):\n ret = self.event\n self.event = \"\"\n return ret", "title": "" }, { "docid": "b7ad79fa6df717cdf889b50d036f3642", "score": "0.5289799", "text": "def test_on_subset(events):\n for e in [\"0\", \"1\", \"2\"]:\n events.on(e)(lambda: None)", "title": "" }, { "docid": "76fb023a07d28a8805abc809371e90ff", "score": "0.5282825", "text": "def isHandled(self):\r\n raise NotImplementedError()", "title": "" }, { "docid": "b84a08f0c2ecb1ffcbc20d698af712d6", "score": "0.52803123", "text": "def no_prefix(name):\n return name.startswith('E') and not name.startswith('EVENT')", "title": "" }, { "docid": "c58804d6970886227b6607d15546587f", "score": "0.52720904", "text": "def class_empty(self):\n pass", "title": "" }, { "docid": "7c9b525cd715103811d09e736a8d0707", "score": "0.527109", "text": "def handle_ignore(self, e):\n pass", "title": "" }, { "docid": "95fca459ba6c3c16bb6b699b29626c40", "score": "0.52688295", "text": "def test_converter_convert_dict_event_with_empty_conversion_set(event):\n\n class DummyBaseConversionSet(BaseConversionSet):\n \"\"\"Dummy implementation of abstract BaseConversionSet.\"\"\"\n\n __dest__ = BaseModel\n\n def _get_conversion_items(self): # pylint: disable=no-self-use\n \"\"\"Returns a set of ConversionItems used for conversion.\"\"\"\n return set()\n\n assert not convert_dict_event(event, \"\", DummyBaseConversionSet()).dict()", "title": "" }, { "docid": "a33348839674bbc22028444940f44703", "score": "0.52648634", "text": "def _extract(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "25f53e3159b44089be8a049f3e4d5b80", "score": "0.5264371", "text": "def A(self, event, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "cb472718d1214b4d55841898ed494fd0", "score": "0.52625215", "text": "def process_post_events(self):\n raise NotImplementedError()", "title": "" }, { "docid": "8c307081ed2e7f3533f324b439c73c3f", "score": "0.5253854", "text": "def visitFunction(self, function_):\n # Do nothing; override in a subclass to do something.\r", "title": "" }, { "docid": "359fd40b4a8c57ac5abc47eef841d4ab", "score": "0.5247906", "text": "def off_event(self, event):\n self._events = list(filter(lambda e: not (e[1] == event), self._events))\n\n self.update_events()", "title": "" }, { "docid": "6a3a6102aed11fdc61caae9b1dc6eedc", "score": "0.5243521", "text": "def ignore_events(self):\n self._ignoring_events = True\n try:\n yield\n finally:\n self._ignoring_events = False", "title": "" }, { "docid": "3fd663d65866ebace468c327f17f6127", "score": "0.5206915", "text": "def test_deferred_map_event_subclass_no_propagate(self):\n users, User = (self.tables.users, self.classes.User)\n\n class SubUser(User):\n pass\n\n canary = []\n\n def evt(x, y, z):\n canary.append(x)\n\n event.listen(User, \"before_insert\", evt, propagate=False)\n\n m = self.mapper_registry.map_imperatively(SubUser, users)\n m.dispatch.before_insert(5, 6, 7)\n eq_(canary, [])", "title": "" }, { "docid": "e2ce70a779482441c4a10c3bc02871c0", "score": "0.52038854", "text": "def __init__(self, event_type: EventType):\n self.event_type = event_type", "title": "" }, { "docid": "65a7147470ad18279e8fc9c191532a79", "score": "0.51967084", "text": "def apply_noop(aggregate, event):\n return aggregate", "title": "" }, { "docid": "59c563000322384e7097676819516764", "score": "0.51913404", "text": "def __init__(self, event: dict):\n self.event_id = event.get(\"event_id\")\n self.name = event.get(\"name\")\n self.group_name = event.get(\"group_name\")\n self.start_date = event.get(\"start_date\")\n self.end_date = event.get(\"end_date\")\n self.start_time = event.get(\"start_time\")\n self.end_time = event.get(\"end_time\")\n self.description = event.get(\"description\")\n self.deleted = event.get(\"deleted\")\n self.created_date = event.get(\"created_date\")\n self.created_user = event.get(\"created_user\")\n self.created_app = event.get(\"created_app\")\n self.modified_date = event.get(\"modified_date\")\n self.modified_user = event.get(\"modified_user\")\n self.modified_app = event.get(\"modified_app\")\n self.deleted_date = event.get(\"deleted_date\")\n self.deleted_user = event.get(\"deleted_user\")\n self.deleted_app = event.get(\"deleted_app\")", "title": "" }, { "docid": "33d53bee2da1fff4e4a891dbf6561881", "score": "0.51899445", "text": "def test_is_event_dispatcher_valid__returns_false(self):\n\n class CustomEventDispatcher(object):\n def some_other_method(self):\n pass\n\n self.assertFalse(validator.is_event_dispatcher_valid(CustomEventDispatcher))", "title": "" }, { "docid": "722995f8a7bebbae7cf6465c6cd215e9", "score": "0.51878583", "text": "def on_event(self, event, param = None):\n pass", "title": "" }, { "docid": "3ed5b482503144e375e17fa471e4511c", "score": "0.51815265", "text": "def as_leaf_class(self):\n logger = logging.getLogger(__name__)\n if hasattr(self, 'activity'):\n return self.activity\n elif hasattr(self, 'educationevent'):\n return self.educationevent\n elif hasattr(self, 'companyevent'):\n return self.companyevent\n else:\n # Unknown event type\n logger.warning(\"Unknown event type for Event {}, {}. Did you add a new subclass to Event \"\n \"without fixing the get_subclass_instance method?!\".format(self.pk, self.summary_nl))\n return self", "title": "" }, { "docid": "9d3446449711d357aad078371673eb30", "score": "0.5179412", "text": "def ignore(self):\n pass", "title": "" }, { "docid": "6740c9b311dc882cb6154cea1a274cbf", "score": "0.5178366", "text": "def handle(self, event):\n raise NotImplementedError()", "title": "" }, { "docid": "0ceef41b32fe973583952a172e3b39f7", "score": "0.5173064", "text": "def test_deferred_instance_event_plain(self):\n users, User = (self.tables.users, self.classes.User)\n\n canary = []\n\n def evt(x):\n canary.append(x)\n\n event.listen(User, \"load\", evt, raw=True)\n\n m = self.mapper_registry.map_imperatively(User, users)\n m.class_manager.dispatch.load(5)\n eq_(canary, [5])", "title": "" }, { "docid": "9c1aab96dd2cbb2ac984c55e175ec587", "score": "0.51730424", "text": "def normalclass(self):\n pass", "title": "" }, { "docid": "7c54423879dfb038399d8a3f58ef754a", "score": "0.5172813", "text": "def eventCategory(event):\n raise NotImplementedError\n return []", "title": "" }, { "docid": "82e3fde1b753f9fbd59b0b0e871af93e", "score": "0.5172468", "text": "def _mouse_event(self, event):\n pass", "title": "" }, { "docid": "bbb39da5cb10d59fa054dc8f2bb1de6f", "score": "0.51631236", "text": "def class_attributes_only(self):\n pass", "title": "" }, { "docid": "b4f25235f094392e534b401a807474e5", "score": "0.515996", "text": "def test_unknown_event():\n listener = Listener()\n listener.handle_stream_([\n 'event: blahblah',\n 'data: {}',\n '',\n ])\n assert listener.bla_called is True\n assert listener.updates == []\n assert listener.notifications == []\n assert listener.deletes == []\n assert listener.heartbeats == 0", "title": "" }, { "docid": "bd9049dfb774753508f000df303e59dd", "score": "0.51564765", "text": "def clean_events(self, events):\n super(FRMatConverter, self).clean_events(events)\n\n serialpos = 0\n current_list = -999\n current_word = 'X'\n current_serialpos = -999\n last_stim_time = 0\n last_stim_duration = 0\n last_event = None\n for event in events:\n if 'PRACTICE' in event.type:\n event.list = -1\n if event.type == 'PRACTICE_WORD':\n event.serialpos = serialpos\n serialpos += 1\n event.item_num = -1\n if event.type == 'REC_WORD_VV':\n event.intrusion = -1\n event.list = current_list\n event.msoffset = 20\n event.item_num = -1\n if event.type == 'REC_WORD':\n event.msoffset = 20\n if event.type == 'REC_START':\n event.list = current_list\n event.msoffset = 1\n if event.type == 'WORD_OFF':\n event.item_name = current_word\n if event.type == 'REC_WORD' and event.intrusion == 0:\n event.recalled = True\n\n if (event.type in ('REC_WORD', 'WORD_OFF') and (event.item_num in (-1, -999))):\n this_word_events = events[events.item_name == event.item_name]\n wordnos = np.unique(this_word_events.item_num)\n wordnos = wordnos[np.logical_and(wordnos != -1, wordnos != -999)]\n if len(wordnos) > 0:\n event.item_num = wordnos[0]\n\n if event.type == 'PRACTICE_WORD_OFF':\n event.item_name = current_word\n\n if event.type == 'WORD_OFF':\n event.serialpos = current_serialpos\n\n\n if event.list != -999:\n current_list = event.list\n\n if event.item_name != 'X':\n current_word = event.item_name\n\n if event.serialpos > 0:\n current_serialpos = event.serialpos\n\n if event.type == 'STIM_ON':\n event.stim_params['stim_on'] = True\n last_stim_time = event.mstime\n last_stim_duration = self.FR2_STIM_DURATION\n events.stim_list[events.list == event.list] = True\n # if last_event:\n # last_event.stim_params = event.stim_params\n # last_event.stim_params['stim_on'] = 0\n\n if event.mstime <= last_stim_time + last_stim_duration:\n event.is_stim = 1\n event.stim_params['stim_on'] = True\n else:\n event.stim_params = BaseSessionLogParser.empty_stim_params()\n\n last_event = event\n return events", "title": "" }, { "docid": "e474a277e8c62f256a2771c8d1f91e6a", "score": "0.5151315", "text": "def get_all_event_names_no_begin_end_events(cls):\n return [\n event_name.value\n for event_name in cls\n if not (event_name.name.endswith(\"BEGIN\"))\n | (event_name.name.endswith(\"END\"))\n ]", "title": "" }, { "docid": "5c91e108167ef8dc0b1c6fec6f13c920", "score": "0.5140681", "text": "def event11200690():\n header(11200690)\n\n skip_if_this_event_on(6)\n obj.disable_treasure(1201600)\n obj.disable(1201600)\n if_event_flag_on(-1, 1127) # Dusk kidnapped.\n if_event_flag_on(-1, 1130) # Dusk rescued.\n if_condition_true(0, -1)\n end()\n\n obj.enable(1201600)\n obj.enable_treasure(1201600)", "title": "" }, { "docid": "b2f17297bb2ebe311d5247b21003d6d8", "score": "0.5139578", "text": "def _ensure_known_event(self, event_class: type[Event]):\n # If known, nothing to do\n if event_class in self._known_events:\n return\n\n # If base class isn't an event (ie we're dealing with Event), nothing to do\n base_cls = event_class.__bases__[0]\n if not issubclass(base_cls, Event):\n return\n\n # We'll know this going forwards, don't want to re-register\n self._known_events[event_class] = None\n\n # Ensure base class is known - if it isn't, we'll keep working up until we find\n # something we do know\n self._ensure_known_event(base_cls)\n\n # We've found a known event, propagate down any handlers\n\n # Propagating at registration means that we don't need to walk the MRO for\n # every event raised\n self.events[event_class] = self.events[base_cls][:]", "title": "" }, { "docid": "13c4a41e530adb4705b2faac4e4f1981", "score": "0.5138456", "text": "def _extract_events(events: Sequence[Event]) -> ProtoSpan.TimeEvents:\n if not events:\n return None\n logs = []\n dropped_annontations = 0\n if len(events) > MAX_NUM_EVENTS:\n logger.warning(\n \"Exporting more then %s annotations, some will be truncated\",\n MAX_NUM_EVENTS,\n )\n dropped_annontations = len(events) - MAX_NUM_EVENTS\n events = events[:MAX_NUM_EVENTS]\n for event in events:\n if len(event.attributes) > MAX_EVENT_ATTRS:\n logger.warning(\n \"Event %s has more then %s attributes, some will be truncated\",\n event.name,\n MAX_EVENT_ATTRS,\n )\n logs.append(\n {\n \"time\": _get_time_from_ns(event.timestamp),\n \"annotation\": {\n \"description\": _get_truncatable_str_object(\n event.name, 256\n ),\n \"attributes\": _extract_attributes(\n event.attributes, MAX_EVENT_ATTRS\n ),\n },\n }\n )\n return ProtoSpan.TimeEvents(\n time_event=logs,\n dropped_annotations_count=dropped_annontations,\n dropped_message_events_count=0,\n )", "title": "" }, { "docid": "a158366e1ce038ab170d7166ff9824a4", "score": "0.5135987", "text": "def test_events_from_annotation_orig_time_none():\n # Create fake data\n sfreq, duration_s = 100, 10\n data = np.random.RandomState(42).randn(1, sfreq * duration_s)\n info = mne.create_info(ch_names=[\"EEG1\"], ch_types=[\"eeg\"], sfreq=sfreq)\n raw = mne.io.RawArray(data, info)\n\n # Add annotation toward the end\n onset = [8]\n duration = [1]\n description = [\"0\"]\n annots = mne.Annotations(onset, duration, description)\n raw = raw.set_annotations(annots)\n\n # Crop start of raw\n raw.crop(tmin=7)\n\n # Extract epochs\n events, event_ids = mne.events_from_annotations(raw)\n epochs = mne.Epochs(\n raw, events, tmin=0, tmax=1, baseline=None, on_missing=\"warning\"\n )\n\n # epochs is empty\n assert_array_equal(epochs.get_data()[0], data[:, 800:901])", "title": "" } ]
8a1fab3c54bbd5998de5c3cb6c71b86b
Fetch account transactions pending
[ { "docid": "49c3248688c051b4d4a533f3c9b576f7", "score": "0.72934383", "text": "def get_account_transactions_pending(self, account_id=None, query_params=None):\n\n return self.fetch('get',\n '%s/%s/transactions/pending' % (ACCOUNTS, account_id),\n query_params=query_params)", "title": "" } ]
[ { "docid": "7cb379d40366857996bc7be31b7c555a", "score": "0.6982897", "text": "def get_latest_transactions(self):", "title": "" }, { "docid": "732597fde4bf630d817f5643921c959f", "score": "0.67092973", "text": "def transaction_request(self, acct, start, end, status):\n if status == 'pending':\n st = 0\n elif status == 'approved':\n st = 1\n elif status == 'declined':\n st = 2\n else:\n st = 0\n conn = http.client.HTTPSConnection(\"api.admitad.com\")\n payload = ''\n\n \n conn.request(\"GET\", f\"/advertiser/{acct}/statistics/actions/?start_date={start}&end_date={end}&status={st}&limit=5000\", payload, self.headers)\n res = conn.getresponse()\n data = res.read()\n return json.loads(data.decode(\"utf-8\"))['results']", "title": "" }, { "docid": "1738d10a02d9d3e8f6adadca8dfa7c95", "score": "0.66989326", "text": "def _get_transactions(account, params):\n query = _build_success_query()\n query = query & _build_type_query(account, params)\n\n if \"conversation\" in params:\n query = query & _build_conversation_query(params['conversation'])\n\n if \"date\" in params:\n query = query & _build_date_query(params['date'], params['tz_offset'])\n\n results = Transaction.objects.filter(query).order_by(\"-timestamp\")\n\n first = params['page'] * params['tpp']\n last = (params['page']+1) * params['tpp']\n\n transactions = []\n for transaction in results[first:last]:\n trans = {}\n trans['transaction_id'] = transaction.id\n trans['timestamp'] = utils.datetime_to_unix_timestamp(\n transaction.timestamp)\n trans['amount'] = transaction.amount_in_drops\n\n if transaction.type == Transaction.TYPE_DEPOSIT:\n trans['type'] = \"DEPOSIT\"\n elif transaction.type == Transaction.TYPE_WITHDRAWAL:\n trans['type'] = \"WITHDRAWAL\"\n elif transaction.type == Transaction.TYPE_CHARGE:\n if transaction.debit_account == account:\n trans['type'] = \"CHARGE_PAID\"\n else:\n trans['type'] = \"CHARGE_RECEIVED\"\n elif transaction.type == Transaction.TYPE_ADJUSTMENT:\n if account == transaction.debit_account:\n trans['type'] = \"ADJUSTMENT_PAID\"\n else:\n trans['type'] = \"ADJUSTMENT_RECEIVED\"\n\n if transaction.debit_account == account:\n other_account = transaction.credit_account\n elif transaction.credit_account == account:\n other_account = transaction.debit_account\n else:\n raise RuntimeError(\"Should never happen\")\n\n trans['other_account_type'] = Account.TYPE_MAP[other_account.type]\n if other_account.type == Account.TYPE_USER:\n trans['other_account_global_id'] = other_account.global_id\n\n if transaction.message != None:\n trans['message_hash'] = transaction.message.hash\n\n transactions.append(trans)\n\n return {'transactions' : transactions}", "title": "" }, { "docid": "53ef2c385c18e12cda87a044e8d71a7a", "score": "0.66439724", "text": "def get_transactions():\n email = os.environ['EMAIL']\n password = os.environ['PASSWORD']\n mint = mintapi.Mint(email, password)\n mint.initiate_account_refresh()\n transactions = mint.get_transactions()\n transactions.drop_duplicates(['date', 'original_description', 'amount'], inplace=True)\n return transactions", "title": "" }, { "docid": "6b4e9230290163c980e64600d3c3f2c7", "score": "0.6631142", "text": "async def get_transactions(self):\n return self._make_get_request('gettransactions')", "title": "" }, { "docid": "f6e826fb762c1876bc0f128731d5183f", "score": "0.65901923", "text": "def get_pending_tx():\n return json.dumps(blockchain.unconfirmed_transactions)", "title": "" }, { "docid": "bbbae914a76bd86cfe8bfa648799cf3d", "score": "0.6575564", "text": "def get_pending_transactions(\n endpoint = DEFAULT_ENDPOINT,\n timeout = DEFAULT_TIMEOUT\n) -> list:\n method = \"hmyv2_pendingTransactions\"\n try:\n return rpc_request( method,\n endpoint = endpoint,\n timeout = timeout )[ \"result\" ]\n except KeyError as exception:\n raise InvalidRPCReplyError( method, endpoint ) from exception", "title": "" }, { "docid": "d43f83a2061fb95388cbe0d29042f983", "score": "0.6448456", "text": "def fetch_transactions(access_token):\n Transaction = apps.get_model('finance', 'Transaction')\n Transaction.objects.create_or_update_transactions(access_token)", "title": "" }, { "docid": "c5fe3d67cdb81a11bc3aad05eff4658e", "score": "0.6433668", "text": "async def accounts_pending(self, accounts: List[str]) -> dict:\n pending_action = {\n 'action': 'accounts_pending',\n 'accounts': accounts,\n 'include_only_confirmed': True\n }\n return await self.make_request(pending_action)", "title": "" }, { "docid": "723198e6db1b49d6820ee47025d331b3", "score": "0.6413563", "text": "def getTransactions(self):\n return []", "title": "" }, { "docid": "a8a62e4cc0448d18e52b3721f196a200", "score": "0.6408732", "text": "def test_get_transactions(self):\n with self.database.get_session() as session:\n self.database.add_acc(session, self.acc_name_1, 100, \"EUR\")\n self.database.commit(session)\n acc = self.database.get_acc(session, self.acc_name_1)\n acc.add_income(100, \"income1\")\n time.sleep(0.001)\n acc.add_income(100, \"income2\")\n time.sleep(0.001)\n acc.add_income(100, \"income3\")\n\n transactions = self.database.get_transactions(\n session, self.acc_name_1, False, 2\n )\n self.assertEqual(transactions[0].new_balance, 200)\n self.assertEqual(len(transactions), 2)\n\n transactions = self.database.get_transactions(\n session, self.acc_name_1, True, 50\n )\n self.assertEqual(transactions[0].new_balance, 400)\n self.assertEqual(len(transactions), 3)\n\n transactions = self.database.get_transactions(\n session, self.acc_name_1, False, 2, 1\n )\n self.assertEqual(transactions[0].new_balance, 300)", "title": "" }, { "docid": "6d65a58ae9c7ee4086abe0b3983be7a0", "score": "0.6391569", "text": "def get_all_transactions(self, acct, start, end, status): \n transactions = self.transaction_request(acct,start,end,status)\n transaction_list = []\n for t in transactions:\n try:\n if t['product_id'] == 1636:\n nvr = 'New'\n else:\n nvr = 'Returning'\n except KeyError as e:\n nvr = 'Returning'\n transaction = {\n 'transactionDate': t['action_time'],\n 'id': t['id'],\n 'saleAmount': {'amount':t['order_sum'], 'currency':t['currency']},\n 'commissionAmount': {'amount':t['payment_webmaster'], 'currency':t['currency']},\n 'publisherId': t['website_id'],\n 'status': nvr,\n 'voucherCode': t['promocode'],\n 'customerCountry': t['action_country'],\n 'advertiserCountry': t['action_country']\n }\n try:\n if 'мобильный' in t['product_name']:\n transaction['device'] = 'Mobile'\n else:\n transaction['device'] = 'Desktop'\n transaction_list.append(transaction)\n except TypeError as e:\n transaction['device'] = 'Desktop'\n return transaction_list", "title": "" }, { "docid": "02a4b9b60c65672b20a33a4712e53201", "score": "0.63852715", "text": "def get_all_pending(cls):\n\n return cls.query.filter(\n cls.status.in_(['new', 'translating'])\n ).all()", "title": "" }, { "docid": "a2074cd54c6d0286b8c8ddf91f97fdc2", "score": "0.6363952", "text": "def get_transactions(self, account_name, skip_seq, num_seq) -> dict:\n\n body = dict(\n account_name=account_name,\n skip_seq=skip_seq,\n num_seq=num_seq,\n )\n\n return self.exec(\n api='account_history',\n endpoint='get_transactions',\n body=body\n )", "title": "" }, { "docid": "7899b94807d9972392c8062b56e72f08", "score": "0.63368607", "text": "def get_pending_orders(self):\n\n params = ('pending',)\n\n sql = \"\"\"\n select *\n from OrderQueue\n where status = ?\n \"\"\"\n\n return self._exec_query(sql, params)", "title": "" }, { "docid": "4e5f3d84a5e9dcfd15346afb6ee34278", "score": "0.6331846", "text": "def transactions(self, *, account_id: str) -> List[Transaction]:\n self.log.debug(\"Requesting transaction list for account: %s\", account_id)\n response = self.get(f'transactions?account_id={account_id}')\n return Transaction.from_json(response.json())", "title": "" }, { "docid": "17dd017f083dd86d30c8df8f2310b1d5", "score": "0.6321221", "text": "def fetch_history_transactions():\n Transaction = apps.get_model('finance', 'Transaction')\n FetchTransactions = apps.get_model('finance', 'FetchTransactions')\n\n ft = FetchTransactions.objects.filter(is_processed=False).first()\n if ft is not None:\n Transaction.objects.create_or_update_transactions(\n ft.item.access_token,\n start_date=ft.start_date, end_date=ft.end_date)\n ft.is_processed = True\n ft.save()\n fetch_history_transactions.delay()", "title": "" }, { "docid": "c1b7d23c617aeeeaaa0adb92dd482b77", "score": "0.62593585", "text": "def get_account_transactions(self, from_date=None, to_date=None):\n raw_transactions = []\n params = {}\n if to_date:\n params['to'] = int(to_date.timestamp()) * 1000\n if from_date:\n params['from'] = int(from_date.timestamp()) * 1000\n\n while True:\n ret = self.client._get(_URL_GET_TRANSACTIONS_LAST, params=params)\n ret_transactions = ret.json()\n if not ret_transactions:\n break\n params['to'] = ret_transactions[-1]['startedDate']\n raw_transactions.extend(ret_transactions)\n \n return AccountTransactions(raw_transactions)", "title": "" }, { "docid": "c5d132c3b23f87ce284d84ed232bc358", "score": "0.62511325", "text": "def get_transactions(account_id=None):\n url = f\"{BASE_URL}{constants['paths']['TRANSACTIONS_PATH']}\"\n if account_id:\n return Base()._request(url.format(account_id), auth=True)\n return Base()._request(url.replace('{0:d}', ''), auth=True)", "title": "" }, { "docid": "bc01ce57a5c0cd9ab404be21832262e8", "score": "0.6189526", "text": "def get_transactions_to_process():\n return WebhookTransaction.objects.filter(\n status=WebhookTransaction.UNPROCESSED\n )", "title": "" }, { "docid": "6f1dafe73acf867d69f22f5770676153", "score": "0.61435425", "text": "def get_bank_transactions(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "title": "" }, { "docid": "1a01a86ba40ee7acb6e9b8c625509b36", "score": "0.61351246", "text": "def test_find_all_transactions(self):\n response = self.client.open(\n '/matthickey709/uomi_api/1.0.0/transactions/{account_id}'.format(account_id=56),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "5695290a2f472e824e6ede794ab3550b", "score": "0.61204535", "text": "def get_credits_of_account(self, account):\n with TransactionMapper() as mapper:\n result = []\n\n if not (account is None):\n transactions = mapper.find_by_target_account_id(account.get_id())\n if not (transactions is None):\n result.extend(transactions)\n\n return result", "title": "" }, { "docid": "d24a2a8b9c8102bc11fed30f54e73300", "score": "0.6100377", "text": "def fetch_fn(args):\n\n logging.info(\n \"Fetching the transaction history from the account '%s'...\", args.account\n )\n\n credentials = get_credentials()\n accounts = get_accounts()\n client = init_client(credentials)\n\n # Convert date to strings in ISO format\n start = args.start.isoformat()\n end = args.end.isoformat()\n\n token = credentials[\"banks\"][args.bank]\n account_list = None\n\n if not args.all:\n account_list = []\n if args.account:\n account_list.append(accounts[args.bank][args.account][\"account_id\"])\n else:\n for k, v in accounts[args.bank].items():\n account_list.append(v[\"account_id\"])\n\n if len(account_list) > 0:\n response = client.Transactions.get(\n token, start_date=start, end_date=end, account_ids=account_list\n )\n else:\n response = client.Transactions.get(token, start_date=start, end_date=end)\n\n transactions = response[\"transactions\"]\n\n logging.info(\"Fetched %d transactions\", response[\"total_transactions\"])\n\n while len(transactions) < response[\"total_transactions\"]:\n response = client.Transactions.get(\n token,\n start_date=start,\n end_date=end,\n offset=len(transactions),\n account_ids=account_list,\n )\n transactions.extend(response[\"transactions\"])\n\n logging.info(\"Writing %d transactions to %s\", len(transactions), args.output)\n json.dump(transactions, args.output, sort_keys=True, indent=4)", "title": "" }, { "docid": "ed3ee718e4b164391b03c4e4d42854e9", "score": "0.60913116", "text": "def test_list_transactions(self):\n pass", "title": "" }, { "docid": "d2b215c24e9308ac739e09e9bad254fb", "score": "0.60887164", "text": "def test_get_account_status_all_using_get(self):\n pass", "title": "" }, { "docid": "feb9dfbd49351ac809d3778d02d5060a", "score": "0.60714614", "text": "def get_transactions():\n offset = request.args.get(\"offset\", None)\n limit = request.args.get(\"limit\", None)\n if offset is None or limit is None:\n transactions = Transaction.objects(user=current_user._get_current_object()).order_by(\"-timestamp\")\n return jsonify(transactions), 200\n transactions = Transaction.objects(user=current_user._get_current_object()).skip(int(offset)).limit(int(limit)).order_by(\"-timestamp\")\n return jsonify(transactions), 200", "title": "" }, { "docid": "f1e0933840ade805254683ccd702f411", "score": "0.6050491", "text": "def read_balances(until=datetime.datetime.now()):\n account = 'Balance'\n cursor = connection.cursor()\n cursor.execute(\n\"\"\"select t1.dest_organization_id, sum(t1.amount - coalesce(t2.amount, 0))\nfrom saas_transaction t1 left outer join saas_transaction t2\non t1.dest_organization_id = t2.orig_organization_id\n and t1.dest_account = t2.orig_account\nwhere t1.dest_account = '%s'\ngroup by t1.dest_organization_id\n\"\"\" % account)\n return cursor.fetchall()", "title": "" }, { "docid": "c716c2d342628047a8e8434f4ba04b9b", "score": "0.6048915", "text": "def poll_pending_deposits(cls, pending_deposits: QuerySet) -> List[Transaction]:\n # interface with mock banking rails\n ready_deposits = []\n mock_bank_account_id = \"XXXXXXXXXXXXX\"\n client = rails.BankAPIClient(mock_bank_account_id)\n for deposit in pending_deposits:\n bank_deposit = client.get_deposit(memo=deposit.external_extra)\n if bank_deposit and bank_deposit.status == \"complete\":\n ready_deposits.append(deposit)\n\n return ready_deposits", "title": "" }, { "docid": "2df7b19a7d58e33819a299b5f0004280", "score": "0.5988206", "text": "def query_account(self) -> None:\n self.reqid += 1\n self.queryCash(self.reqid)", "title": "" }, { "docid": "dfec1a1470edcf8dbbf01f3cff0b42e8", "score": "0.5981264", "text": "def ManagePendingTransactionStatus(self):\n [params[k] for k in []]\n params['method'] = 'ManagePendingTransactionStatus' \n return self.__request(params)", "title": "" }, { "docid": "74a8201769509ef1b0bf5e0575f025b6", "score": "0.5970593", "text": "def transactions(self):\n\n return [Transaction.from_dict(self.session, transaction_dict) for transaction_dict in self.session._query_api_with_exception(\"/rest/accounts/%s/transactions\" % (str(self.account_id), ))['transactions']]", "title": "" }, { "docid": "66dc8da360b25d18dc9fb6032739e81c", "score": "0.59678245", "text": "def test_recharge_transactions_get(self):\n pass", "title": "" }, { "docid": "794ae66a065fb770a080a1521dc7a45d", "score": "0.5943531", "text": "def getpending(self):\n pass", "title": "" }, { "docid": "c77d69441f9268db2e56aa640d8118a7", "score": "0.59115523", "text": "def getAccountTransactions(burstId, numberOfTransactions = 1):\n r = requests.get(getAccountTransactions + '%s&firstIndex=0&lastIndex=%i' % (burstId, numberOfTransactions))\n if r.status_code != requests.codes.ok:\n return\n msg = r.json()\n try:\n transactionsList = []\n transactions = msg['transactions']\n for transaction in transactions:\n transactionsList.append({'sender': transaction['sender'],\n 'amount': float(transaction['amountNQT'])/100000000,\n 'acc_id': transaction['recipient'],\n 'timestamp': transaction['timestamp']})\n # if no transactions are there\n except KeyError:\n transactionsList = None\n return transactionsList", "title": "" }, { "docid": "495af4dc7d8ed6feea9b534ab8d4bcb8", "score": "0.5889325", "text": "def futures_account_balance(self, **params):\n return self._request_futures_api('get', 'balance', True, 2, data=params)", "title": "" }, { "docid": "0d146d3bb1d245876932833d780eece6", "score": "0.5887558", "text": "def retrieve_accounts(self):\n if not self.settings['addresses']:\n return\n addresses = '|'.join(self.settings['addresses'])\n url = f\"{self.settings['url']}/balance\"\n request_data = {\n 'active': addresses,\n }\n\n try:\n r = requests.get(url, params=request_data, timeout=self.settings['timeout']).json()\n except (\n requests.exceptions.ConnectionError,\n requests.exceptions.ReadTimeout\n ) as e:\n log.warning(f\"Can't connect to {self.settings['url']}. Exception caught: {utils.short_msg(e)}\")\n\n for address in self.settings['addresses']:\n if r.get(address):\n balance = float(int(r.get(address).get('final_balance')) / 100000000)\n if not self._accounts.get('BTC'):\n self._accounts.update({'BTC': {}})\n self._accounts['BTC'].update({\n address: balance,\n })\n else:\n log.warning('Could not retrieve balance. The result follows.')\n log.warning(f\"{r.get('result')}: {r.get('message')}\")", "title": "" }, { "docid": "aba8d8a7a744122743b1fa05cab31eee", "score": "0.5884691", "text": "def test_get_account_scheduled_payments_using_get(self):\n pass", "title": "" }, { "docid": "a58870f9f4fa8d5d3502ac32f7e3d318", "score": "0.58809656", "text": "def get_accounts():", "title": "" }, { "docid": "a58870f9f4fa8d5d3502ac32f7e3d318", "score": "0.58809656", "text": "def get_accounts():", "title": "" }, { "docid": "f4285d64f9dc20c7614a489cc0964dc2", "score": "0.58776236", "text": "def get_pending_staking_transactions(\n endpoint = DEFAULT_ENDPOINT,\n timeout = DEFAULT_TIMEOUT\n) -> list:\n method = \"hmyv2_pendingStakingTransactions\"\n try:\n return rpc_request( method,\n endpoint = endpoint,\n timeout = timeout )[ \"result\" ]\n except KeyError as exception:\n raise InvalidRPCReplyError( method, endpoint ) from exception", "title": "" }, { "docid": "71e0caf8ed059e744c1c86cf356d672e", "score": "0.5866871", "text": "def transactions(self):\n return self._transactions", "title": "" }, { "docid": "590d697c4b2a159d0e9a77bf3549e04e", "score": "0.5852646", "text": "def credits(self):\n return self.sheet.transactions.filter(credit=self.account)", "title": "" }, { "docid": "4713b9fe75b592eda2efaaf77dc7f2cf", "score": "0.58464825", "text": "def futures_account(self, **params):\n return self._request_futures_api('get', 'account', True, 2, data=params)", "title": "" }, { "docid": "5e8caef6f3ae00db81e6e948c31af08c", "score": "0.58388466", "text": "def get_transactions(\n self, session, acc_name, reverse=False, max_length=-1, offset=0\n ):\n pass", "title": "" }, { "docid": "2328137a4e22c493697d2d65468f67a7", "score": "0.5836183", "text": "def heroku_cron_transactions() -> tuple:\n return asyncio.run(send_cron_request(_endpoint='_cron/v1/transactions')), 200", "title": "" }, { "docid": "636346b5906a72831d6c693777371788", "score": "0.5827048", "text": "def user_transactions(self, offset=0, limit=100, descending=True):\n data = {\n 'offset': offset,\n 'limit': limit,\n 'sort': 'desc' if descending else 'asc',\n }\n return self._post(\"user_transactions/\", data=data, return_json=True)", "title": "" }, { "docid": "41044beb9fdd0da44cd0bb82be82948b", "score": "0.5816571", "text": "def load_transactions(self, address, start_block, end_block=None):\n payload = {\n 'module': 'account',\n 'action': 'txlist',\n 'address': address,\n 'startblock': start_block,\n }\n if end_block:\n payload['endblock'] = end_block\n\n try:\n return self.request_maker.get(payload)\n except Exception as ex:\n print(\"ex: {}\".format(type(ex)))\n return []", "title": "" }, { "docid": "791eeca825044bc2af1f26c27ed545f4", "score": "0.5794029", "text": "def query_btc_balances(self) -> None:\n if len(self.accounts.btc) == 0:\n return\n\n self.balances[A_BTC] = {}\n btc_usd_price = Inquirer().find_usd_price(A_BTC)\n total = FVal(0)\n for account in self.accounts.btc:\n try:\n balance = self.query_btc_account_balance(account)\n except InputError:\n # This should really never happen.\n self.msg_aggregator.add_error(\n f'While querying BTC balances found invalid BTC account {account} in the DB',\n )\n continue\n total += balance\n self.balances[A_BTC][account] = {\n 'amount': balance,\n 'usd_value': balance * btc_usd_price,\n }\n\n self.totals[A_BTC] = {'amount': total, 'usd_value': total * btc_usd_price}", "title": "" }, { "docid": "9ebdab3cb34210395fa1aac31b19fd9e", "score": "0.5786778", "text": "def process_get_account_unconfirmed_transactions(\n status: int,\n json: list,\n network_type: models.NetworkType,\n) -> typing.Sequence[models.Transaction]:\n\n assert status == 200\n return [models.Transaction.create_from_dto(i, network_type) for i in json]", "title": "" }, { "docid": "ffb71393b802d653888bb28d9c06db8f", "score": "0.57834435", "text": "def futures_account(self, **params):\n return self._request_futures_api('get', 'account', True, data=params)", "title": "" }, { "docid": "3abc59dbba615212e8c1a35082ed712d", "score": "0.57832193", "text": "def process_get_account_transactions(\n status: int,\n json: list,\n network_type: models.NetworkType,\n) -> typing.Sequence[models.Transaction]:\n\n assert status == 200\n return [models.Transaction.create_from_dto(i, network_type) for i in json]", "title": "" }, { "docid": "1d83120b26a8f25dc141c8a267e49fce", "score": "0.5778644", "text": "def get_transactions(self, **kwargs):\n return self._prepare_payload({\n 'method': 'getTransactions',\n 'params': {},\n })", "title": "" }, { "docid": "c556df09753b57af0e5350333c05fe28", "score": "0.57721496", "text": "def get_transactions(self, account_or_account_id, options ):\n allowed_keys = [\"accounts\", \"filter\", \"sync_id\", \"count\", \"offset\", \"sort\", \"since\", \"until\", \"since_type\", \"types\", \"cents\", \"include_pending\", \"include_statistics\"]\n options = filterNone(filterKeys(options, allowed_keys))\n\n account_id = getAccountId(account_or_account_id)\n if account_id is not None:\n path = \"/rest/accounts/{0}/transactions?{1}\".format(account_id, urllib.urlencode(options))\n else:\n path = \"/rest/transactions?{0}\".format( urllib.urlencode(options))\n\n return self._query_api_object(Transaction, path, collection_name=\"transactions\")", "title": "" }, { "docid": "e712ca7a515568cc07b5060b511d07fc", "score": "0.5745724", "text": "def process_get_account_partial_transactions(\n status: int,\n json: list,\n network_type: models.NetworkType,\n) -> typing.Sequence[models.Transaction]:\n\n assert status == 200\n return [models.Transaction.create_from_dto(i, network_type) for i in json]", "title": "" }, { "docid": "60fc7781ae16af76d1ef74bd1693b616", "score": "0.5741674", "text": "def transactions(account_id):\n API_URL = 'https://api.nordeaopenbanking.com/v2' # this is freaking ugly\n url = \"{}/accounts/{}/transactions\".format(API_URL, account_id)\n title = 'Transactions endpoint for transaction listing.'\n HEADERS['Authorization'] = \"Bearer {}\".format(str(access_token))\n r = requests.get(url, headers=HEADERS)\n error = check_status(r)\n return render_template('transactions_template.html', response=r.json(), title=title, error=error, account_id=account_id)", "title": "" }, { "docid": "a88f1a18efef7be62a88d5fec34510f3", "score": "0.5737978", "text": "def get_transactions_api():\n from tippicserver.models.user import get_address_by_userid\n from tippicserver.models.transaction import get_transactions_json\n from tippicserver.utils import get_discovery_apps\n try:\n user_id, auth_token = extract_headers(request)\n public_address = get_address_by_userid(user_id)\n platform = get_user_os_type(user_id)\n link = DISCOVERY_APPS_ANDROID_URL if platform == OS_ANDROID else DISCOVERY_APPS_OSX_URL\n \n discovery_apps = get_discovery_apps(link)\n detailed_txs = get_transactions_json(user_id, public_address, discovery_apps)\n\n # sort by date\n detailed_txs = sorted(detailed_txs, key=lambda k: k['date'], reverse=True)\n if len(detailed_txs) > MAX_TXS_PER_USER:\n detailed_txs = detailed_txs[:MAX_TXS_PER_USER]\n\n except Exception as e:\n import traceback\n log.error('cant get txs for user')\n traceback.print_exc()\n return jsonify(status='error', txs=[])\n\n return jsonify(status='ok', txs=detailed_txs)", "title": "" }, { "docid": "7e8a184a201ea6553bd786fcdb8f2507", "score": "0.5735739", "text": "def futures_account_transfer(self, **params):\n return self._request_margin_api('post', 'futures/transfer', True, data=params)", "title": "" }, { "docid": "7e8a184a201ea6553bd786fcdb8f2507", "score": "0.5735739", "text": "def futures_account_transfer(self, **params):\n return self._request_margin_api('post', 'futures/transfer', True, data=params)", "title": "" }, { "docid": "b80e2d046e8667905583456f57cf80d0", "score": "0.5717284", "text": "def test_get_account_periodic_payments_using_get(self):\n pass", "title": "" }, { "docid": "2887700031af941953f30d2142ddcccf", "score": "0.5717258", "text": "def transactions(self, interval, verbose=True):\n\t\tinterval = interval.lower()\n\t\tassert interval in ['minute', 'hour', 'day'], 'Invalid interval specified.'\n\t\treqst = requests.get('https://www.bitstamp.net/api/v2/transactions/btcusd/?time={}'.format(interval))\n\t\ttime_stamp = datetime.strftime(datetime.now(), \"%Y-%m-%d %H:%M:%S\")\n\t\tif verbose: print(\"({}): Collected transaction data!\".format(time_stamp))\n\t\ttrxns = pd.DataFrame(json.loads(reqst.content))\n\t\tif 'date' in trxns.columns:\n\t\t\ttrxns['date'] = pd.to_datetime(trxns['date'].astype(int).apply(datetime.fromtimestamp))\n\t\treturn trxns", "title": "" }, { "docid": "4b9528edd63a746dce62bb9f43f2088e", "score": "0.5712164", "text": "def balance(self):\n from funding.bin.utils import Summary, coin_to_usd\n from funding.factory import db\n rtn = {'sum': 0.0, 'txs': [], 'pct': 0.0, 'available': 0}\n\n if self.archived:\n return rtn\n\n try:\n r = requests.get(f'http://{settings.RPC_HOST}:{settings.RPC_PORT}/json_rpc', json={\n \"jsonrpc\": \"2.0\",\n \"id\": \"0\",\n \"method\": \"get_payments\",\n \"params\": {\n \"payment_id\": self.payment_id\n }\n })\n r.raise_for_status()\n blob = r.json()\n\n assert 'result' in blob\n assert 'payments' in blob['result']\n assert isinstance(blob['result']['payments'], list)\n except Exception as ex:\n return rtn\n\n txs = blob['result']['payments']\n for tx in txs:\n tx['amount_human'] = float(tx['amount'])/1e11\n tx['txid'] = tx['tx_hash']\n tx['type'] = 'in'\n\n data = {\n 'sum': sum([float(z['amount']) / 1e11 for z in txs]),\n 'txs': txs\n }\n\n if not isinstance(data, dict):\n print('error; get_transfers_in; %d' % self.id)\n return rtn\n\n prices = Summary.fetch_prices()\n for tx in data['txs']:\n if prices:\n tx['amount_usd'] = coin_to_usd(amt=tx['amount_human'], btc_per_coin=prices['coin-btc'], usd_per_btc=prices['btc-usd'])\n\n if data.get('sum', 0.0):\n data['pct'] = 100 / float(self.funds_target / data.get('sum', 0.0))\n data['available'] = data['sum']\n else:\n data['pct'] = 0.0\n data['available'] = 0.0\n\n if data['pct'] != self.funds_progress:\n self.funds_progress = data['pct']\n db.session.commit()\n db.session.flush()\n\n if data['available']:\n data['remaining_pct'] = 100 / float(data['sum'] / data['available'])\n else:\n data['remaining_pct'] = 0.0\n\n return data", "title": "" }, { "docid": "7acf70600ef4cce8bfaf2a9d21db0e0f", "score": "0.57018524", "text": "def get_balance(self):\n\n # Prepare variables\n ms_time = round(time.time() * 1000)\n request = \"/api/v3/account\"\n\n # Create hashed signature\n query_string = f\"timestamp={ms_time}\"\n signature = hmac.new(self.secret.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()\n\n params = {\n \"timestamp\": ms_time,\n \"signature\": signature,\n }\n\n return requests.get(self.endpoint + request, params=params, headers=self.header)", "title": "" }, { "docid": "4cdcd50466ed1a00a1e54bcf54126d5f", "score": "0.56921864", "text": "def test_get_all_pending(self, db):\n\n translation_ids = [t.uid for t in Translation.get_all_pending()]\n\n expected_ids = ['uid0000005', ]\n\n assert translation_ids == expected_ids", "title": "" }, { "docid": "7b6e90293e21100f60d22188c0cc8019", "score": "0.5678511", "text": "def get_pending_critical_transactions(actor_id):\n\n actor = user_by_id_and_type(actor_id, T2Employee)\n return Transaction.query.filter_by(amount > critical_transaction_threshold, approved = False).all()", "title": "" }, { "docid": "8e5e88a34118cf111f148efc9cb05b0f", "score": "0.56599593", "text": "def retrieve_all_bank_accounts(db: Session= Depends(get_db)):\n return account.retrieve_all_bank_accounts(db)", "title": "" }, { "docid": "2892b649f7c19a13ad0223a73484dbc1", "score": "0.5655631", "text": "def transactions(self):\n return self._query_api_object(Transaction, \"/rest/transactions\",\n collection_name=\"transactions\")", "title": "" }, { "docid": "998f293665479daba28df2994bb1c4fe", "score": "0.56494564", "text": "def pending_requests(self):\n return self._connection.pending_requests()", "title": "" }, { "docid": "45be55b818deb10049d51989ecaa1ff1", "score": "0.5645533", "text": "def pending_entities(self):\n from aleph.model.entity import Entity, collection_entity_table\n from aleph.model.document import collection_document_table\n from aleph.model.reference import Reference\n cet = aliased(collection_entity_table)\n cdt = aliased(collection_document_table)\n q = db.session.query(Entity)\n q = q.filter(Entity.state == Entity.STATE_PENDING)\n q = q.join(Reference, Reference.entity_id == Entity.id)\n q = q.join(cet, cet.c.entity_id == Entity.id)\n q = q.join(cdt, cdt.c.document_id == Reference.document_id)\n q = q.filter(cet.c.collection_id == self.id)\n q = q.filter(cdt.c.collection_id == self.id)\n q = q.group_by(Entity)\n return q.order_by(func.count(Reference.id).desc())", "title": "" }, { "docid": "921eba480e541c10dd4217e5bcf74ffb", "score": "0.56446993", "text": "def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):\n self.load_markets()\n currency = None\n request = {}\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['id']\n if since is not None:\n # 90 days date range\n request['start_ts'] = since\n if limit is not None:\n request['page_size'] = limit\n response = self.spotPrivatePostPrivateGetWithdrawalHistory(self.extend(request, params))\n #\n # {\n # id: 1640704829096,\n # method: 'private/get-withdrawal-history',\n # code: 0,\n # result: {\n # withdrawal_list: [\n # {\n # currency: 'DOGE',\n # client_wid: '',\n # fee: 50,\n # create_time: 1640425168000,\n # id: '3180557',\n # update_time: 1640425168000,\n # amount: 1102.64092,\n # address: 'DDrGGqmp5Ddo1QH9tUvDfoL4u4rqys5975',\n # status: '5',\n # txid: 'ce23e9e21b6c38eef953070a05110e6dca2fd2bcc76d3381000547b9ff5290b2/0'\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'result', {})\n withdrawalList = self.safe_value(data, 'withdrawal_list', [])\n return self.parse_transactions(withdrawalList, currency, since, limit)", "title": "" }, { "docid": "c0341ba72f2ed626ee3d4a78e57e42b4", "score": "0.5638033", "text": "def get(self):\n return Account.get_all()", "title": "" }, { "docid": "1de5fd8cfd7721c5d37e52294d1a6438", "score": "0.5635234", "text": "def __get_transaction_list(self, address):\n try:\n request = requests.get(CONSTANTS['ADDR_DETAIL_API'] + address)\n json_text = request.json()\n return RESPONSE['success'], json_text[\"txs\"]\n except BaseException as error:\n return RESPONSE['invalid'], None", "title": "" }, { "docid": "d31c30a4a24c9450caf01c4ecb147056", "score": "0.5613252", "text": "def _download_query(self, as_of, until):\n c = self.institution.client()\n q = c.bank_account_query(\n number=self.number,\n date_start=as_of,\n date_end=until,\n account_type=self.account_type,\n bank_id=self.routing_number)\n return q", "title": "" }, { "docid": "f12c1e1623ab2bf3a0452662e680f090", "score": "0.5589328", "text": "def process_get_account_outgoing_transactions(\n status: int,\n json: list,\n network_type: models.NetworkType,\n) -> typing.Sequence[models.Transaction]:\n\n assert status == 200\n return [models.Transaction.create_from_dto(i, network_type) for i in json]", "title": "" }, { "docid": "73dbfc99443c77507613a9e72fea0918", "score": "0.5574656", "text": "def pending(self):\n from .models import EngagementRequest\n return self.filter(status=EngagementRequest.PENDING_STATUS)", "title": "" }, { "docid": "54d22ced5f926731e8babfa082d50def", "score": "0.5566669", "text": "def balance(self, account):\n res = self.call('eth_getBalance', address_encoder(account), 'pending')\n return quantity_decoder(res)", "title": "" }, { "docid": "867b3f5cabc2fe8a2710d30e6f3cfc23", "score": "0.55655605", "text": "def get_transactions(self, account=None, created_at=None, from_trans=None,\n order=None, order_direction=None, separate_splitsid=None):\n\n limit = 250\n page = 1\n accounts = {account.id: account for account in self.get_accounts()}\n categories = {category.id: category for category in self.get_categories()}\n while True:\n params = {'account_id': account.id if account else None,\n 'created_at': created_at.to_date_string() if created_at else None,\n 'from_id': from_trans.id if from_trans else None,\n 'created_at_time': created_at.to_time_string()\n if created_at else None,\n 'created_at_timezone': created_at.timezone if created_at else None,\n 'order': order,\n 'order_direction': order_direction,\n 'separate_splitsid': separate_splitsid,\n 'limit': limit,\n 'page': page}\n response = self._session.get('transactions', params=params)\n response.raise_for_status()\n if not response.json()['status']:\n raise RuntimeError(response.json()['error_msg'])\n if not response.json()['data']:\n break\n for tr_data in response.json()['data']:\n yield Transaction.from_api(tr_data,\n accounts.get(tr_data['account_id'],\n NO_ACCOUNT),\n categories.get(tr_data['category_id'],\n NO_CATEGORY),\n self.get_transaction(tr_data['parent']) if\n tr_data['parent'] else None)\n page += 1", "title": "" }, { "docid": "407089302e5013e50ec5cb77915907a1", "score": "0.5563465", "text": "def get_debits_of_account(self, account):\n with TransactionMapper() as mapper:\n result = []\n\n if not (account is None):\n transactions = mapper.find_by_source_account_id(account.get_id())\n if not (transactions is None):\n result.extend(transactions)\n\n return result", "title": "" }, { "docid": "9b32161d5b0ce10f760c70672bd9e3b1", "score": "0.5560302", "text": "async def test_get_tx(self):\n await test_service.get_tx(self)", "title": "" }, { "docid": "19fc129af859d9d5d8eca930e97f1953", "score": "0.55421585", "text": "def withdrawal_requests(self):\n return self._post(\"withdrawal_requests/\", return_json=True)", "title": "" }, { "docid": "7b5aed6b1280dd767a9623a3fa331e89", "score": "0.55324954", "text": "def address_unconfirmed_transactions_response(self, address, page=1, limit=20, sort_dir='asc'):\n response = self.client.get(\"/address/%s/unconfirmed-transactions\" % (address, ), params={'page': page, 'limit': limit, 'sort_dir': sort_dir})\n\n return response", "title": "" }, { "docid": "5c1239232859de83397ea5e12e461555", "score": "0.5531672", "text": "def get_data(self):\n transactions = (\n Transaction.objects.filter(src__id=self.kwargs.get(\"pk\"))\n .values(\"date\")\n .annotate(Sum(\"amount\"))\n .order_by(\"date\")\n )\n init_balance = (\n Account.objects.filter(pk=self.kwargs.get(\"pk\")).first().init_balance\n )\n # do something and convert to ...\n data = []\n last_amount = init_balance\n for transaction in transactions:\n last_amount += transaction[\"amount__sum\"]\n data.append(last_amount)\n return [data]", "title": "" }, { "docid": "837e81971369c71c5beb6d283c727ec6", "score": "0.5515066", "text": "def test_get_account_status_using_get(self):\n pass", "title": "" }, { "docid": "69afb53c4afd140a6f1d36395fa16bef", "score": "0.5505919", "text": "def transaction():\n return _transactions.get(_thread_id())", "title": "" }, { "docid": "4bc4a64ed2518df8b6ee7eeedb284f71", "score": "0.54943275", "text": "def get(self):\r\n\t\tself.cursor.execute(\"\"\"\r\n\t\t\t\t\t\t\tSELECT\r\n\t\t\t\t\t\t\t\tContractedTransactions.*,\r\n\t\t\t\t\t\t\t\tPurchaseOrderList.ContractorId,\r\n\t\t\t\t\t\t\t\tContractedItems.ContractedQty\r\n\t\t\t\t\t\t\tFROM ContractedTransactions\r\n\t\t\t\t\t\t\tLEFT JOIN Certificates ON\r\n\t\t\t\t\t\t\tContractedTransactions.ContractedTransactionId = Certificates.ContractedTransactionId\r\n\t\t\t\t\t\t\tLEFT JOIN ContractedItems ON\r\n\t\t\t\t\t\t\tContractedTransactions.ContractedItemId = ContractedItems.ContractedItemId\r\n\t\t\t\t\t\t\tLEFT JOIN PurchaseOrderList\r\n\t\t\t\t\t\t\tON ContractedItems.PurchaseOrder = PurchaseOrderList.PurchaseOrder\r\n\t\t\t\t\t\t\tWHERE\r\n\t\t\t\t\t\t\tCertificates.ContractedTransactionId IS NULL AND\r\n\t\t\t\t\t\t\tContractedItems.PurchaseOrder = %s\"\"\",self.purchaseorder)\r\n\t\tuncerttrans=self.cursor.fetchall()\r\n\t\tfor i in uncerttrans:\r\n\t\t\tself.cursor.execute(\r\n\t\t\t\t\"\"\"\t\r\n\t\t\t\tSELECT\r\n\t\t\t\t\tSUM(ContractedTransactions.QtyCompleted) AS AccUncertQty,\r\n\t\t\t\t\tSUM(ContractedTransactions.QtyCompleted * ContractedTransactions.UnitPrice) AS TotalPrice,\r\n\t\t\t\t\tSUM(ContractedTransactions.QtyCompleted * ContractedTransactions.UnitPrice) * \"\"\"\r\n\t\t\t\t+ str(TAX_MULTIPLIER) + \"\"\" AS PriceIncTax\r\n\t\t\t\tFROM ContractedTransactions\r\n\t\t\t\tLEFT JOIN Certificates ON\r\n\t\t\t\tContractedTransactions.ContractedTransactionId = Certificates.ContractedTransactionId\r\n\t\t\t\tWHERE\r\n\t\t\t\tCertificates.ContractedTransactionId IS NULL AND\r\n\t\t\t\tContractedTransactions.ContractedItemId=%(ContractedItemId)s AND\r\n\t\t\t\tContractedTransactions.ContractedTransactionId <= %(ContractedTransactionId)s\r\n\t\t\t\tGROUP BY ContractedTransactions.ContractedItemId\r\n\t\t\t\t\"\"\", i)\r\n\t\t\tcalc=self.cursor.fetchone()\r\n\t\t\tif calc:\r\n\t\t\t\ti.update(calc)\n\t\t\tself.cursor.execute(\r\n\t\t\t\t\"\"\"\t\r\n\t\t\t\tSELECT ContractedItems.ContractedQty - SUM(QtyCompleted) AS RemainingQty\r\n\t\t\t\tFROM ContractedTransactions\r\n\t\t\t\tLEFT JOIN ContractedItems ON\r\n\t\t\t\tContractedTransactions.ContractedItemId = ContractedItems.ContractedItemId\r\n\t\t\t\tWHERE\r\n\t\t\t\tContractedTransactions.ContractedItemId=%(ContractedItemId)s AND\r\n\t\t\t\tContractedTransactions.ContractedTransactionId <= %(ContractedTransactionId)s\r\n\t\t\t\tGROUP BY ContractedTransactions.ContractedItemId\r\n\t\t\t\t\"\"\", i)\r\n\t\t\tcalc=self.cursor.fetchone()\r\n\t\t\tif calc:\r\n\t\t\t\ti.update(calc)\r\n\t\t\tif i['ContractedTransactionId'] in self.selection:\r\n\t\t\t\ti['Selected']=True\n\t\t\telse:\r\n\t\t\t\ti['Selected']=False\r\n\t\treturn uncerttrans", "title": "" }, { "docid": "13d1ae8b66f3f97e9e2e2c9c09cd4143", "score": "0.5488626", "text": "def GetTransactionList(self):\n pklog.info(\"GetTransactionList() was called\")\n return self._get_transaction_list()", "title": "" }, { "docid": "edf8873e4bd9aa8a9b6ed84f58d95eb4", "score": "0.54830265", "text": "def transactions(self, start=0):\n\n url = self.api_base_url + \"/transactions/new_transactions?timestamp={}\"\n url = url.format(start)\n\n # Get the transactions and wrap each one as a Transaction so that\n # we have a human-friendly str() available\n data = self._get(url)\n data['transactions'] = [Transaction(t) for t in data['transactions']]\n\n return data", "title": "" }, { "docid": "0a235f2fa053a33efd0b9dbb8307196d", "score": "0.5472348", "text": "def collectFoundationTransactions(addr):\n\n startblock = 11000000 #Block before start of Foundation\n endblock = 12200000 #Block after now\n blockstep = 30000\n df = pd.DataFrame(columns=columnNames)\n while (startblock < endblock):\n result = query(\"account\",\"txlist\",address=addr,startblock=str(startblock),\n endblock=str(startblock+blockstep))\n startblock = startblock + blockstep\n print(str(startblock) + \": \" + str(len(result[\"result\"])))\n if (len(result[\"result\"])==0):\n continue\n elif (len(result[\"result\"])>9999):\n print(\"Too large block step, missed some transactions\")\n else:\n df = df.append(pd.DataFrame(result[\"result\"]),sort=False)\n return df", "title": "" }, { "docid": "8d87da37c4216187f76d074c8768f7ff", "score": "0.5466623", "text": "def _build_received_transactions(self):\n transactions = (\n Transaction.select(\n Transaction.recipient_id, fn.SUM(Transaction.amount).alias(\"amount\")\n )\n .where(Transaction.type == TRANSACTION_TYPE_TRANSFER)\n .group_by(Transaction.recipient_id)\n )\n for transaction in transactions:\n # TODO: make this nicer. It feels like a hack to do it this way\n wallet = self.find_by_address(transaction.recipient_id)\n wallet.balance = int(transaction.amount)\n self.save_wallet(wallet)", "title": "" }, { "docid": "d4c90054191e33dab65e86bcfd2f61ab", "score": "0.54596263", "text": "async def get_latest_txn_author_acceptance(self):", "title": "" }, { "docid": "f60751b6c07750b97b17e915addce017", "score": "0.5459581", "text": "def optimize_bank_transaction_list(bank_transactions):\n return bank_transactions.select_related('block')", "title": "" }, { "docid": "bb9194d9d213c7e66288d2f497e3e4dc", "score": "0.54595613", "text": "def futures_coin_account(self, **params):\n return self._request_futures_coin_api(\n \"get\", \"account\", signed=True, data=params\n )", "title": "" }, { "docid": "bb9194d9d213c7e66288d2f497e3e4dc", "score": "0.54595613", "text": "def futures_coin_account(self, **params):\n return self._request_futures_coin_api(\n \"get\", \"account\", signed=True, data=params\n )", "title": "" }, { "docid": "408c8141e58a31c50a7eb565165f9181", "score": "0.5450974", "text": "def futures_coin_account_balance(self, **params):\n return self._request_futures_coin_api(\n \"get\", \"balance\", signed=True, data=params\n )", "title": "" }, { "docid": "408c8141e58a31c50a7eb565165f9181", "score": "0.5450974", "text": "def futures_coin_account_balance(self, **params):\n return self._request_futures_coin_api(\n \"get\", \"balance\", signed=True, data=params\n )", "title": "" }, { "docid": "d107b877df0e1219ee6d0cc568d6a5ad", "score": "0.5450132", "text": "def debits(self):\n return self.sheet.transactions.filter(debit=self.account)", "title": "" }, { "docid": "359bcba50f99b6222923f1457e6327dd", "score": "0.5441917", "text": "def address_transactions_response(self, address, page=1, limit=20, sort_dir='asc'):\n\n response = self.client.get(\"/address/%s/transactions\" % (address, ), params={'page': page, 'limit': limit, 'sort_dir': sort_dir})\n\n return response", "title": "" }, { "docid": "eb680214b6c2b37a8cbd76cb9ac5ad26", "score": "0.54282993", "text": "def accounts(params):\n try:\n unconf = conexiones_btc[params_btc[\"current_wallet\"]].listaccounts(minconf=0, as_dict=True)\n conf = conexiones_btc[params_btc[\"current_wallet\"]].listaccounts(minconf=6, as_dict=True)\n \n for x in conf:\n print(\" - %s: %f (%f unconfirmed)\" % (x, float(conf[x]), float(unconf[x]) - float(conf[x])))\n except KeyError:\n error(\"Invalid Wallet: %s\" % params_btc[\"current_wallet\"])", "title": "" }, { "docid": "7f3ec57ddecc85419735a0c2dc90f62f", "score": "0.54196495", "text": "async def get_payments_paginated(\n *,\n wallet_id: Optional[str] = None,\n complete: bool = False,\n pending: bool = False,\n outgoing: bool = False,\n incoming: bool = False,\n since: Optional[int] = None,\n exclude_uncheckable: bool = False,\n filters: Optional[Filters[PaymentFilters]] = None,\n conn: Optional[Connection] = None,\n) -> Page[Payment]:\n\n values: List[Any] = []\n clause: List[str] = []\n\n if since is not None:\n clause.append(f\"time > {db.timestamp_placeholder}\")\n values.append(since)\n\n if wallet_id:\n clause.append(\"wallet = ?\")\n values.append(wallet_id)\n\n if complete and pending:\n pass\n elif complete:\n clause.append(\"((amount > 0 AND pending = false) OR amount < 0)\")\n elif pending:\n clause.append(\"pending = true\")\n else:\n pass\n\n if outgoing and incoming:\n pass\n elif outgoing:\n clause.append(\"amount < 0\")\n elif incoming:\n clause.append(\"amount > 0\")\n else:\n pass\n\n if exclude_uncheckable: # checkable means it has a checking_id that isn't internal\n clause.append(\"checking_id NOT LIKE 'temp_%'\")\n clause.append(\"checking_id NOT LIKE 'internal_%'\")\n\n return await (conn or db).fetch_page(\n \"SELECT * FROM apipayments\",\n clause,\n values,\n filters=filters,\n model=Payment,\n )", "title": "" }, { "docid": "bd79bdf1235f38c6027a94c6abc4ec95", "score": "0.54193", "text": "def retrieve_all_transaction():\n all_jsons = [each for each in os.listdir(\"./bsc-txns/\") if each.endswith(\".json\")][-30:]\n all_json_df = [pd.read_json(f\"./bsc-txns/{each}\") for each in all_jsons]\n if len(all_jsons) >= 1:\n df_transactions = pd.concat(all_json_df)\n df_transactions = df_transactions.drop_duplicates(subset=[\"tx_hash\"])\n df_transactions.age = df_transactions.age.apply(lambda x: datetime.fromtimestamp(x/1000))\n return df_transactions\n else:\n return {\"message\": \"no file found\"}", "title": "" } ]
f968f34a257cf2d6f3d05f77ac35c02c
Registers for notification of updated assessments. ``AssessmentReceiver.changedAssessments()`` is invoked when an assessment in this assessment bank is changed.
[ { "docid": "bd42644844bcf3be048f5518d5509323", "score": "0.7559024", "text": "def register_for_changed_assessments(self):\n pass", "title": "" } ]
[ { "docid": "03c592e690d6c4a948d9b29d61ce96c3", "score": "0.7275361", "text": "def register_for_changed_assessments_offered(self):\n pass", "title": "" }, { "docid": "d8638ba531e80f7d89b21a423be92304", "score": "0.70348734", "text": "def register_for_changed_assessments_taken(self):\n pass", "title": "" }, { "docid": "44ae085c1c34a2e345cf95d282f5eb77", "score": "0.6946726", "text": "def register_for_changed_assessment(self, assessment_id):\n pass", "title": "" }, { "docid": "75462b7aa5c13b3d0c7bcbfb12fa608d", "score": "0.6850582", "text": "def register_for_changed_assessments_offered_for_assessment(self, assessment_id):\n pass", "title": "" }, { "docid": "fa9addb30351c255f90183389fb10bb9", "score": "0.65303075", "text": "def register_for_changed_assessments_taken_for_assessment(self, assessment_id):\n pass", "title": "" }, { "docid": "2dec9fcf0c967c02300c1ff0fe06e48f", "score": "0.5913823", "text": "def register_for_new_assessments(self):\n pass", "title": "" }, { "docid": "e61f1b5cecffcd5a879a1bf56243d8ed", "score": "0.5835164", "text": "def register_for_changed_assessment_taken(self, assessment_taken_id):\n pass", "title": "" }, { "docid": "e6af8ab15b6b4923808b240e212328cb", "score": "0.58261096", "text": "def register_for_new_assessments_taken(self):\n pass", "title": "" }, { "docid": "6e8a7d4d5547c736f746e01c1544e679", "score": "0.57779294", "text": "def register_for_new_assessments_offered(self):\n pass", "title": "" }, { "docid": "b096abf9a317b8f888e7e22883d83862", "score": "0.5593039", "text": "def register_for_new_assessments_offered_for_assessment(self, assessment_id):\n pass", "title": "" }, { "docid": "2124e49faff267be5a4f30e5c629cc40", "score": "0.55755454", "text": "def register_for_changed_assessments_taken_for_assessment_offered(self, assessment_offered_id):\n pass", "title": "" }, { "docid": "6469bb3c6c9451e02639b4fa3d6f4b03", "score": "0.54900104", "text": "def register_for_changed_assessment_offered(self, assessment_offered_id):\n pass", "title": "" }, { "docid": "d9a34e3d36951c51078c62ff4bd90c17", "score": "0.5396243", "text": "def register_for_new_assessments_taken_for_assessment(self, assessment_id):\n pass", "title": "" }, { "docid": "148ae70084c67adc726866606679e814", "score": "0.53933567", "text": "def changed_assets(self, notification_id, asset_ids):\n pass", "title": "" }, { "docid": "4639c64caced225fbe6182da80587d9d", "score": "0.52860326", "text": "def update_assessment(self, assessment_form):\n pass", "title": "" }, { "docid": "139750c74a2b34bff6c9686209af53b1", "score": "0.51074183", "text": "def register_updates(self, callback):\n _LOGGER.debug(\"Registered callback for state: %s\", self._stateName)\n self._observer_callbacks.append(callback)", "title": "" }, { "docid": "3984ca312c3723f3bac1b7ecc0dcd264", "score": "0.5049907", "text": "def can_update_assessments(self):\n return # boolean", "title": "" }, { "docid": "07d0e245e265546403c47b81b8a85083", "score": "0.49825096", "text": "def reliable_assessment_notifications(self):\n pass", "title": "" }, { "docid": "8de1883713e12b9e51d2cbe24ade60b9", "score": "0.4923074", "text": "def can_update_assessments_offered(self):\n return # boolean", "title": "" }, { "docid": "c5f66888ed62c3fb9ddeb1112d5f2b0f", "score": "0.48997366", "text": "def update_observers(self, *args, **kwargs):\n for observer in self.observers:\n observer.update(*args, **kwargs)", "title": "" }, { "docid": "171b28cb93673bd5539e7e2816d55d9f", "score": "0.48959136", "text": "def register_for_deleted_assessments_offered_for_assessment(self, assessment_id):\n pass", "title": "" }, { "docid": "6ad0b6d4ee6f2e59e4a8b69fb2aeff09", "score": "0.4880357", "text": "def _notify_observers(self):\n for o in self._observers:\n o.model_has_changed()", "title": "" }, { "docid": "b8eed9a360201c2e10d30a21d6c3f9e2", "score": "0.48793328", "text": "def reliable_assessment_offered_notifications(self):\n pass", "title": "" }, { "docid": "d7068bcfb67808a66dda9732542eb61b", "score": "0.48649687", "text": "def notify(self) -> None:\n for observer in self._observers:\n observer.update(self)", "title": "" }, { "docid": "0f2e2aafd16092264f926b50795b9a3c", "score": "0.48551822", "text": "def register_for_changed_assessments_taken_for_taker(self, resource_id):\n pass", "title": "" }, { "docid": "4c8d1cf795391b85952bd566b0454d5e", "score": "0.48190582", "text": "def register_for_deleted_assessments_taken(self):\n pass", "title": "" }, { "docid": "41f4db4660ec6a4ad0bdb407d31dabb5", "score": "0.48052454", "text": "def register_for_deleted_assessments_taken_for_assessment(self, assessment_id):\n pass", "title": "" }, { "docid": "4344cbf85153a00ea5d9a17c52ce3530", "score": "0.48047715", "text": "def register_for_changed_items(self):\n pass", "title": "" }, { "docid": "d101f149ca835220fc7e97506a4a1365", "score": "0.47901553", "text": "def update_assessment_taken(self, assessment_taken_form):\n pass", "title": "" }, { "docid": "ec01ea2facf46ef6dc76298239fecee5", "score": "0.47708204", "text": "def reliable_assessment_taken_notifications(self):\n pass", "title": "" }, { "docid": "7b6add561eec6d215daaf1d887cd0a8d", "score": "0.47601968", "text": "def get_assessments(self):\n return # osid.assessment.AssessmentList", "title": "" }, { "docid": "4f4994867e2d44b97e856b3f4397c175", "score": "0.4728418", "text": "def statements_update_reciever(self, sender, **kwargs):\n self._set_satetements()", "title": "" }, { "docid": "9d7ea8800dfb7b460a158541519d718a", "score": "0.47218573", "text": "def register_for_deleted_assessments(self):\n pass", "title": "" }, { "docid": "a57e70435d8466622401cd5400e2eb01", "score": "0.4718263", "text": "def post_exam_changes(exam_changes):\n event_name = SETTINGS['ifttt_event']\n secret_key = SETTINGS['ifttt_secret_key']\n\n post_event(event_name, secret_key, '\\n'.join(exam_changes))", "title": "" }, { "docid": "6dacb777a72b22c4056397d1b2a47ea8", "score": "0.46894833", "text": "def register_for_deleted_assessments_offered(self):\n pass", "title": "" }, { "docid": "73f660d790fea34fda76dcf992738264", "score": "0.46781105", "text": "def _notify_metadata_modified(self):\n notify_callback(self._metadata_modified_callbacks)", "title": "" }, { "docid": "5c08255b786c0639ca61926f025d8b65", "score": "0.46475825", "text": "def log_exams_updated_added_event(sender, **kwargs):\n logger = logging.getLogger(__name__)\n\n exam = kwargs['instance']\n if kwargs['created']:\n logger.info(\"Exam added: ID%d, %s %s on %s \", exam.id, exam.exam_title, exam.exam_group, exam.date_time)\n else:\n logger.info(\"Exam updated: ID%d, %s %s on %s \", exam.id, exam.exam_title, exam.exam_group, exam.date_time)", "title": "" }, { "docid": "dc46da9c44f10e68a38a6fe09d1a66ec", "score": "0.4647394", "text": "def sequences_changed(self):\n \n # update shots\n self.update_shots_comboBox()", "title": "" }, { "docid": "20532933644995d7597c3a0fcebb5bb8", "score": "0.46311328", "text": "def get_assessments_by_ids(self, assessment_ids):\n return # osid.assessment.AssessmentList", "title": "" }, { "docid": "cd0b3301e60da7cefce519fc599dbbd8", "score": "0.46274456", "text": "def register_for_new_assessments_taken_for_assessment_offered(self, assessment_offered_id):\n pass", "title": "" }, { "docid": "7d1fcc48aa40878197d819b2fc72a5f5", "score": "0.46159166", "text": "def notifications(self, notifications):\n\n self._notifications = notifications", "title": "" }, { "docid": "5b5ac80659d3b390e6663d0478452679", "score": "0.4610387", "text": "def sentiment_assessments(self):\n raise NotImplementedError", "title": "" }, { "docid": "5b5ac80659d3b390e6663d0478452679", "score": "0.4610387", "text": "def sentiment_assessments(self):\n raise NotImplementedError", "title": "" }, { "docid": "6d5b52a7a9edafec75fde8b23b6ae2a9", "score": "0.4584794", "text": "def notify(self):\n key = self.key()\n LOG.debug(\"Sending id(self): %s\", key)\n self.SIGNAL_RESULTS_UPDATED.emit(key)", "title": "" }, { "docid": "07e59f08116a9ba4c37c9caabd521b76", "score": "0.456163", "text": "def publish(self, event: GameEvent):\n for o in self.observers.values():\n o.update(event)", "title": "" }, { "docid": "d560121de9f4b8cd28bf000903121fe5", "score": "0.45543337", "text": "def can_update_assessments_taken(self):\n return # boolean", "title": "" }, { "docid": "8f2dca7567bb1321439b69f391d99ba2", "score": "0.45387676", "text": "def changed_compositions(self, notification_id, composition_ids):\n pass", "title": "" }, { "docid": "1ef1e75c2d40d9da3c75d8e606c25480", "score": "0.45380172", "text": "def register_for_changed_resources(self):\n pass", "title": "" }, { "docid": "fa075462c836660583c6ffd115c11dcb", "score": "0.44991177", "text": "def update_assessment_offered(self, assessment_offered_form):\n pass", "title": "" }, { "docid": "64b805681e6a2c553f62daeb66abdc42", "score": "0.44979", "text": "def notify_observers(self) -> None:\n pass", "title": "" }, { "docid": "8673960fac6cd8781feddc8d133e5817", "score": "0.44832423", "text": "def account_updates(self, account_updates):\n\n self._account_updates = account_updates", "title": "" }, { "docid": "fb59c12332ebc8ef4695c01b4ac2c6de", "score": "0.44778106", "text": "def attend_game(self, game):\n # Update attributes\n self.games_attended.append(game)", "title": "" }, { "docid": "ed58fc47a8963efa3f9d5c3cd3213bb8", "score": "0.44415137", "text": "def unreliable_assessment_notifications(self):\n pass", "title": "" }, { "docid": "f6fc1b1a6e659355095c0d9b013bb8d5", "score": "0.443887", "text": "def notify(self, event):\n self.notifications.add(event)", "title": "" }, { "docid": "2a151faf62446f7e2d2fe45bccd73bfd", "score": "0.44273558", "text": "def updateWidgets(self):\n super(ConfirmAttendanceForm, self).updateWidgets()\n context = aq_inner(self.context)\n storage = IAttendeesStorage(context)\n self.widgets['attending'].value = utils.get_confirmation(context)", "title": "" }, { "docid": "94ee02e4df24567478624880226897b0", "score": "0.44234544", "text": "def app_registrations(self, app_registrations):\n\n self._app_registrations = app_registrations", "title": "" }, { "docid": "049914d8369e41251763ace19f6e9eb1", "score": "0.44222975", "text": "def on_rules_update(self, profile_id, rules):\n _log.info(\"Profile update: %s\", profile_id)\n for rules_mgr in self.rules_mgrs:\n rules_mgr.on_rules_update(profile_id, rules, async=True)", "title": "" }, { "docid": "d3c4f26bb2f874fbb02d7e2e378e6a4d", "score": "0.44175395", "text": "def can_register_for_assessment_notifications(self):\n return # boolean", "title": "" }, { "docid": "1bdc6d08dbadc6c1a5de8b1702dda308", "score": "0.44039527", "text": "def UpdateEnablement(self, *a):\r\n\r\n #FIXME: Need a way to un-observe after and object is destroyed\r\n if not wx.IsDestroyed(self):\r\n spenabled = self.spenabled = pref('messaging.spellcheck.enabled')\r\n\r\n if spenabled:\r\n self.SpellCheckAll()\r\n else:\r\n self.spellerrors.clear()\r\n self.Refresh()\r\n\r\n else:\r\n log.error('UpdateEnablement observer called on a SpellCheckTextCtrlMixin that has been destroyed')", "title": "" }, { "docid": "2673edbe30ae95fdec46f210a7b883f1", "score": "0.44026542", "text": "def get_assessments_in_progress(self):\n return # osid.assessment.AssessmentTakenList", "title": "" }, { "docid": "65b073cddc1da31870a2e12388abcaa7", "score": "0.4391315", "text": "def unreliable_assessment_taken_notifications(self):\n pass", "title": "" }, { "docid": "cbd1460683858293a6a2e4ffd10fcb70", "score": "0.43648157", "text": "def update_events(self):\n self.__events__ = self.fetch_json()", "title": "" }, { "docid": "e2cb3bc95e304c2ea4780b050cc96dfb", "score": "0.43591812", "text": "def add_indication_observer(self, observer):\n self.indications.append(observer)", "title": "" }, { "docid": "bb68340e1622fd634f3fba9cc862143a", "score": "0.4353568", "text": "def unreliable_assessment_offered_notifications(self):\n pass", "title": "" }, { "docid": "5b1fc278815305c364bd76c979ca0d80", "score": "0.43474033", "text": "async def notify(self, event_args):\n for subscriber in self.__subscribers:\n await subscriber.update(event_args)", "title": "" }, { "docid": "7c8f90db39ad17a94a28cc52bf4f9cc2", "score": "0.4343352", "text": "def notify_change(self):\n pass", "title": "" }, { "docid": "7cca9238dbc73559ba11365e4eceb117", "score": "0.4316438", "text": "def get_assessments_by_query(self, assessment_query):\n return # osid.assessment.AssessmentList", "title": "" }, { "docid": "17f40182e04c3ebe894b41fd8d19e86a", "score": "0.43082184", "text": "def update(self):\n if self._subject is not None: # If subject has not been specified, do nothing\n children = self.get_children('')\n for item in children:\n values = self.item(item, \"values\")\n patient_name = values[0]\n current_patient = self._subject.select_patient(patient_name)\n new_data = format_historical_systolic_data(current_patient)\n if new_data != values: # Check if the new values are different\n self.item(item, values=new_data) # update the display with new data", "title": "" }, { "docid": "4a878506781443ad35cb7d73300a35c9", "score": "0.43047616", "text": "def get_assessments_started(self):\n return # osid.assessment.AssessmentTakenList", "title": "" }, { "docid": "6a9d039d5c5eacceabe95c15427a40cd", "score": "0.43012047", "text": "def _update_events(self, events):\n\n print(events)\n # Distribute the events to the Canvas and Tabs\n if isinstance(self._tabs.value, TabWidget):\n self._tabs.value.update_events(events)\n\n if isinstance(self._canvas.value, Canvas):\n self._canvas.value.update_events(events)\n\n if 'scan' in events:\n self._progress.max = events['scan'][2]\n self._progress.value = events['scan'][1]", "title": "" }, { "docid": "bb2976416586da7390b8680c2a4ca897", "score": "0.42987517", "text": "def acknowledge_assessment_taken_notification(self, notification_id):\n pass", "title": "" }, { "docid": "29ff7bcb5b4d73166b5fbcabeebf0d83", "score": "0.42938593", "text": "def notifyAll(self, data):\n for l in self._listeners:\n try:\n l.notifyChange(data)\n except:\n print(traceback.format_exc())", "title": "" }, { "docid": "44157264d26a9f9dead7bff4083971b1", "score": "0.42736867", "text": "def register_for_deleted_assessment(self, assessment_id):\n pass", "title": "" }, { "docid": "bed004d2336c81fd1d1c48f845e957a7", "score": "0.4272151", "text": "def _update_subscribers(self, val):\n self._value = val\n for callback in self._observer_callbacks:\n callback(self._address, self._group, val)", "title": "" }, { "docid": "6aa153d49dddb510e044d295d0fa9375", "score": "0.42699605", "text": "def SLOT_emitChanged(self):\n\t\tself.emit(QtCore.SIGNAL(\"changed ()\"))", "title": "" }, { "docid": "d9629f94fe0fd8022bd4774150058e76", "score": "0.4253809", "text": "def new_notifications(self, new_notifications):\n\n self._new_notifications = new_notifications", "title": "" }, { "docid": "bc9563340988333b3546e329db0af886", "score": "0.4253733", "text": "def on_interface_update(self, name, iface_up):\n _log.info(\"Interface %s state changed\", name)\n for endpoint_mgr in self.endpoint_mgrs:\n endpoint_mgr.on_interface_update(name, iface_up, async=True)", "title": "" }, { "docid": "06351e0ea9deccf1336fb1de7b256258", "score": "0.42485672", "text": "def add_assumptions(self, new_assumptions):\n ...", "title": "" }, { "docid": "06351e0ea9deccf1336fb1de7b256258", "score": "0.42485672", "text": "def add_assumptions(self, new_assumptions):\n ...", "title": "" }, { "docid": "7073aacd970e96bdbb65ba7f1d7494ba", "score": "0.42464194", "text": "def fit_has_changed_slot(self,val):\n self.update()", "title": "" }, { "docid": "08e4b81d659476505b43d9d43001c63b", "score": "0.4244117", "text": "def _add_expected_incident_signals(self):\n ax = self._ax_incident\n view_y_bottom = ax.yaxis.get_view_interval()[0]\n\n ref = self._signal\n\n total = None\n for component in self.expected_incident:\n try:\n total += component\n except TypeError:\n total = component\n\n if self._plot_components:\n self._add_signal(ax,\n self._energies,\n component,\n **self._comp_expectation_line_kwargs)\n\n self._add_signal(ax,\n self._energies,\n total,\n **self._expectation_line_kwargs)\n\n if self._show_attenuated:\n total = None\n for component in self.expected_attenuated_incident:\n try:\n total += component\n except TypeError:\n total = component\n\n if self._plot_components:\n self._add_signal(ax,\n self._energies,\n component,\n **self._comp_expectation_line_kwargs)\n\n self._add_signal(ax,\n self._energies,\n total,\n **self._expectation_line_kwargs)\n\n ax.set_ylim(bottom = view_y_bottom)\n\n ax.set_xlim([self._signal.energy_edges[0],\n self._signal.energy_edges[-1]])\n locmaj = LogLocator(base=10.0, numticks=100)\n ax.yaxis.set_major_locator(locmaj)\n\n locmin = LogLocator(base=10.0, subs=_np.arange(2,10)*0.1, numticks=100)\n ax.yaxis.set_minor_locator(locmin)\n ax.yaxis.set_minor_formatter(NullFormatter())", "title": "" }, { "docid": "4b895e0cc114dc4a14ae282b47424950", "score": "0.4238123", "text": "def notify(self, *args):\n self.emit(SIGNAL('plotSettingsChanged(PlotSettings)'), self)", "title": "" }, { "docid": "4fc720e4f60265f6e180c2730aa53674", "score": "0.42357495", "text": "def save(self, *args, **kwargs):\n if self._state.adding:\n self.notify_subscribers()\n super().save(*args, **kwargs)", "title": "" }, { "docid": "e6b967bf73a830d0c8a2d491f168f23e", "score": "0.42338708", "text": "def update(self):\n update = self.update_configuration()\n if update:\n self.module.exit_json(msg=\"Audit-log update complete\", changed=update)\n else:\n self.module.exit_json(msg=\"No audit-log changes required\", changed=update)", "title": "" }, { "docid": "ee7ec8af674f7aa940a706485781517b", "score": "0.4233396", "text": "def add_notification_observer(self, observer):\n self.notifications.append(observer)", "title": "" }, { "docid": "263928e65f35311d3521295d148b09aa", "score": "0.42326635", "text": "def update_report_acknowledgements(self, request, response, **kw):\n return self._post_request(request, kw, response)", "title": "" }, { "docid": "a07dccdb633440b8c8f62e45bc5640a0", "score": "0.4227111", "text": "def acknowledge_assessment_notification(self, notification_id):\n pass", "title": "" }, { "docid": "a8baa7bd8796a38ea30f825ed63d4ff4", "score": "0.42266148", "text": "def get_assessments_offered_for_assessment(self, assessment_id):\n return # osid.assessment.AssessmentOfferedList", "title": "" }, { "docid": "c9dc8cdd94671cee9d3928a93b176546", "score": "0.4221096", "text": "def on_update_end(self, **kwargs) -> None:\n for callback in self.callbacks:\n callback.on_update_end(**kwargs)", "title": "" }, { "docid": "9cdc373e2a64070e02f93037f938881f", "score": "0.4212424", "text": "def register_for_changed_item(self, item_id):\n pass", "title": "" }, { "docid": "8669fdd0b2bfe5dc2a21652f994e221e", "score": "0.42045063", "text": "def assessments(self) -> Sequence[str]:\n return pulumi.get(self, \"assessments\")", "title": "" }, { "docid": "6beccd2e383c99a19200eb7dd1c5e866", "score": "0.41999388", "text": "def integrity_monitoring(self, integrity_monitoring):\n\n self._integrity_monitoring = integrity_monitoring", "title": "" }, { "docid": "83decd865067cffd51fd66d507ce3039", "score": "0.4198663", "text": "def update(self, events: list[event.Event]) -> None:\n pass", "title": "" }, { "docid": "111525791953dbf8e8a3f66927da5ff4", "score": "0.41969967", "text": "def save(self):\n c.database.redisSessionServer.hset(self.sessionID, \"alerts\",\n json.dumps(self._alerts))", "title": "" }, { "docid": "6b5cedd7de800d7f8524e66666c64fe6", "score": "0.4186347", "text": "def update(self, subject):\n pass", "title": "" }, { "docid": "178653758673c876ba217182a7220910", "score": "0.4185534", "text": "def notify_change(self):\n\t\tif not self.__changed:\n\t\t\tself.__changed=True", "title": "" }, { "docid": "e8a7728dcf8837991ec2b0d7f43b0796", "score": "0.41845322", "text": "def on_update(self):\r\n\t\tself.set_total_invoiced_amount()", "title": "" }, { "docid": "f0b26a873acdd0451b7d71820dcd9742", "score": "0.41814777", "text": "def get_assessments_by_genus_type(self, assessment_genus_type):\n return # osid.assessment.AssessmentList", "title": "" }, { "docid": "277af2781dc8516f8dce7bd687cc4d79", "score": "0.41730988", "text": "async def async_update(self):\n for appliance in self.appliances:\n await appliance.async_update()", "title": "" }, { "docid": "d625f823a13161ad11271713fd912e2e", "score": "0.4172706", "text": "def __notifyPropertyChanged(self, info):\n for it in self.__listeners:\n it.onPropertyChanged(self, PropertyChangedEventArgs(info))", "title": "" } ]
de6db4ef752c53d8676de255dba988df
take a description as formatted by _transform_block, search for and return the first preceeding header (string defined by cd)
[ { "docid": "361975eaf1cdd14e939f8b5f5ecae174", "score": "0.0", "text": "def _get_keybind_category(self, key_desc):\n query = \"(?<=\\\\n)({}.*?)\\\\n(?=.*{})\".format(re.escape(self.category_descr), re.escape(key_desc))\n category_search = re.findall(query, self._get_raw_config(), flags=re.S)\n if category_search:\n # strip away any special chars in case the category is formatted weirdly for some reason\n # we allow & and | since they aren't generally great characters for padding something in ascii art/column-filling...\n result = re.sub('[^A-Za-z0-9\\s&|]+', '', category_search[-1]).strip()\n return result\n else:\n return 'misc'", "title": "" } ]
[ { "docid": "e13ea169042ee5445f2f49734d97a207", "score": "0.65945625", "text": "def get_header(block):\n # Standard header\n if block.type is BlockType.HEADER:\n return block.header\n # Text followed by :\n elif is_valid_header(block.content.get_partial_clean()):\n return 7\n # Bolded text\n elif re.fullmatch(\n r'\\s*\\*\\*\\s*[\\w\\'\\- ]+\\s*\\*\\*\\s*', block.content.get_raw()) \\\n and len(block.content.get_raw()) <= 50:\n return 4\n # Any other paragraph or element\n else:\n return 100", "title": "" }, { "docid": "33895e33ae1b7d396a52ac3c9bc30911", "score": "0.62361276", "text": "def __get_head_from_temp(self, num):\n look_for = 'mi<mk<header-ope<' + num + '\\n'\n found_head = 0\n string_to_return = ''\n line = 1\n while line:\n line = self.__read_from_head_obj.readline()\n if found_head:\n if line == 'mi<mk<header-clo\\n':\n return string_to_return\n string_to_return = string_to_return + line\n \n else:\n if line == look_for:\n found_head = 1", "title": "" }, { "docid": "3989e301542537284d235aff06a4c175", "score": "0.6132666", "text": "def first(self):\n\t\treturn self._make_position(self._header._next)", "title": "" }, { "docid": "34092994576ef2a2967177b9e72f39aa", "score": "0.61222285", "text": "def getrawheader(self, name):\r\n\r\n lst = self.getfirstmatchingheader(name)\r\n if not lst:\r\n return None\r\n lst[0] = lst[0][len(name) + 1:]\r\n return ''.join(lst)", "title": "" }, { "docid": "a340cbaf0870856e36882a907f6f931c", "score": "0.5942645", "text": "def find(self, e):\n p = self._header._next\n if p is None:\n return None\n elif p._element == e:\n return self._make_position(p)\n else:\n return self.find(e)", "title": "" }, { "docid": "82e252a345a41d2355ee73f02b22d14a", "score": "0.58843064", "text": "def first(self):\n return self._make_position(self._header._next)", "title": "" }, { "docid": "4fdf9913ed892ac7efed887645f5a2d6", "score": "0.5847293", "text": "def _get_hdr_start(self):\n return self.__hdr_start", "title": "" }, { "docid": "c602671774eb75ba45ef9cdafde8db23", "score": "0.5821573", "text": "def __find_block_start(self):\n try:\n return self.__find_token(self.__block_head)\n except RouteParserError:\n raise StartTokenNotFoundError(_('No match for entry block start'))", "title": "" }, { "docid": "8eb64baa60ed5fcba7db6b9f4cf99d73", "score": "0.5674031", "text": "def getfirstmatchingheader(self, name):\r\n name = name.lower() + ':'\r\n n = len(name)\r\n lst = []\r\n hit = 0\r\n for line in self.headers:\r\n if hit:\r\n if not line[:1].isspace():\r\n break\r\n elif line[:n].lower() == name:\r\n hit = 1\r\n if hit:\r\n lst.append(line)\r\n return lst", "title": "" }, { "docid": "4a14892263139c9bd0e9821b21215c6e", "score": "0.5639054", "text": "def header_pass(block):\n # TODO: Add logic to handle doctests raising exceptions.\n\n if len(block.lines) == 2 and is_at_header(block.lines):\n assert not block.ind\n return [Header(block.lines + block.wh)]\n elif len(block.lines) >= 2 and is_at_header(block.lines):\n h = Header(block.lines[:2])\n block.lines = block.lines[2:]\n return (h, block)\n return (block,)", "title": "" }, { "docid": "c713f09d5f08a61f9bac27de054006c1", "score": "0.5638157", "text": "def extract_header(self):\n\n if self.header.find(\"<ti\") != -1:\n header_open_tag_index = self.header.find(\"<ti\")\n header_close_tag_index = self.header.find(\"</ti\")\n self.parser.parse_text(self.header[header_open_tag_index + len(\"<ti>\"): header_close_tag_index])\n\n elif self.header.find(\"document\") != -1:\n header_open_tag_index = self.header.find(\"document type:\")\n header_close_tag_index = self.header.find(\"<\", header_open_tag_index)\n self.parser.parse_text(\n self.header[header_open_tag_index + len(\"document type:\"): header_close_tag_index])\n else:\n self.parser.parse_text(self.header)\n\n self.header = self.parser.parsed_text", "title": "" }, { "docid": "e710d293724e7fc5fb4fd47dadc76356", "score": "0.56323886", "text": "def get_parent_header(block_header, db):\n return db.get_block_header_by_hash(block_header.parent_hash)", "title": "" }, { "docid": "a21bfd862b6cbfaf132b4be44e5ee298", "score": "0.5609556", "text": "def get_headerline_by_name(self, buffer, name, pos = 0):\n if len(name) == 0:\n return 0\n lines = buffer.split('\\r\\n')\n lineindex = 1\n tmp_pos_value = 0\n \n while True:\n if len(lines[lineindex]) > 3:\n result = lines[lineindex].find(':')\n \n if result != -1:\n \n result = int(lines[lineindex].find(name, 0, 20))\n if result != -1:\n temp_buffer1 = lines[lineindex].split(':')\n temp_buffer2 = temp_buffer1[0].strip()\n if len(name) == len(temp_buffer2):\n \n if tmp_pos_value == pos:\n return lineindex\n tmp_pos_value += 1\n \n else:\n temp_buffer1_a = lines[lineindex].split(':')\n if len(temp_buffer1_a[0]) == 1:\n temp_buffer2_a = self.get_compact_format_by_name(name)\n if temp_buffer2_a[0] != '0':\n if temp_buffer1_a[0][0] == temp_buffer2_a[0]:\n if tmp_pos_value == pos:\n return lineindex\n tmp_pos_value += 1\n lineindex += 1\n else:\n break \n \n return 0", "title": "" }, { "docid": "3a9c910e42edfaf49e94a459ed2b5cc1", "score": "0.55626476", "text": "def header_read(buf, begin=0):\n buf.seek(begin) # starting at the given offset\n stringvar = str(buf.read(56)) # reading header\n listvar = stringvar.split() # spliting header\n listvar.pop(0) # first element of header is \"FCS\" and it's useless\n while len(listvar) > 4: # listvar needs only 4 elements, and elements are removed from\n listvar.pop() # the tail until list is 4 elements long\n # offsets are converted into string\n listvar = [int(x) for x in listvar]\n next_offset = listvar[-1]+1 # next offset is calculated\n text_begin = listvar[0]\n # the difference of BEGIN and END gives size-1\n text_size = listvar[1]-listvar[0]\n data_begin = listvar[2]\n # the difference of BEGIN and END gives size-1\n data_size = listvar[3]-listvar[2]\n listvar = [text_begin, text_size, data_begin, data_size]\n return(next_offset, listvar)", "title": "" }, { "docid": "2968830bc4fc31d1c4c481ae436c6e56", "score": "0.5532336", "text": "def __parse_interchange_header(self):\n header = self.ediDocument.interchange.header\n #The edi separator is always at position 4\n self.ediDocument.document_configuration.element_separator = self.documentText[3:4]\n headerFieldList = self.documentText.split(self.ediDocument.document_configuration.element_separator)\n for index, isa in enumerate(headerFieldList):\n if index == 12:\n self.ediDocument.version = isa\n if index <= 15:\n header.fields[index].content = isa\n if index == 16:\n lastHeaderField = headerFieldList[16]\n #the sub-element separator is always the first character in this element.\n header.isa16.content = lastHeaderField[0:1]\n if lastHeaderField[1:2]:\n self.ediDocument.document_configuration.segment_terminator = lastHeaderField[1:2]\n else:\n raise SegmentTerminatorNotFoundError(\n msg=\"The segment terminator is not present in the Interchange Header, can't parse file.\")", "title": "" }, { "docid": "ea4986002c35110a88b46002cae79d60", "score": "0.5528164", "text": "def get_description(self):\n def split_header(s, get_header=True):\n s = s.lstrip().rstrip()\n parts = s.splitlines()\n if parts[0].startswith('#'):\n if get_header:\n header = re.sub('#+\\s*', '', parts.pop(0))\n if not parts:\n return header, ''\n else:\n header = ''\n rest = '\\n'.join(parts).lstrip().split('\\n\\n')\n desc = rest[0].replace('\\n', ' ')\n return header, desc\n else:\n if get_header:\n if parts[0].startswith(('=', '-')):\n parts = parts[1:]\n header = parts.pop(0)\n if parts and parts[0].startswith(('=', '-')):\n parts.pop(0)\n if not parts:\n return header, ''\n else:\n header = ''\n rest = '\\n'.join(parts).lstrip().split('\\n\\n')\n desc = rest[0].replace('\\n', ' ')\n return header, desc\n\n first_cell = self.nb['cells'][0]\n\n if not first_cell['cell_type'] == 'markdown':\n return '', ''\n header, desc = split_header(first_cell['source'])\n if not desc and len(self.nb['cells']) > 1:\n second_cell = self.nb['cells'][1]\n if second_cell['cell_type'] == 'markdown':\n _, desc = split_header(second_cell['source'], False)\n return header, desc", "title": "" }, { "docid": "d70569550495a42309bac826e8b1ca39", "score": "0.55266535", "text": "def get_block_header_by_hash(block_hash, db):\n return db.get_block_header_by_hash(block_hash)", "title": "" }, { "docid": "e53baf64caece9bac9801c9785c651a9", "score": "0.5519661", "text": "def header_name(some_str):\r\n header_final = ''\r\n header = re.findall(r'.*:header_text \\(\"?(.*[^\"])\"*?\\)', some_str)\r\n if header:\r\n # decode bytes to unicode\r\n header_final = header\r\n \r\n return header_final", "title": "" }, { "docid": "0deae747b6fe54fe52beb79d933c065d", "score": "0.5494471", "text": "def extract_first_subsection(message, boundary):\n try:\n full_boundary = \"--\" + boundary\n boundary_length = len(full_boundary) + 1\n first_instance = message.index(full_boundary)\n next_instance = message.index(full_boundary, first_instance + boundary_length)\n message_section = message[first_instance + boundary_length:next_instance + 1].strip()\n header_matches = SECTION_HEADERS_ENDING.search(message_section)\n if not header_matches:\n return message\n else:\n return message_section[header_matches.start() + len(header_matches.group(0)):].strip()\n except ValueError:\n return message", "title": "" }, { "docid": "2afb638c198df9302cf7ad10fe721ff4", "score": "0.54666436", "text": "def _collectHeader(self,header,otherHeader):\n returnHeader=\"\"\n if not len(otherHeader)>0:\n returnHeader=header[0]\n else:\n returnHeader=otherHeader\n return self._headerFilter(returnHeader)", "title": "" }, { "docid": "4327dbe564ffe480661f8857647ca2a9", "score": "0.5462364", "text": "def test_header(self) -> None:\n header_text = \"\".join(iter_at_depth(OUTPUT.header, 4))\n assert re.match(r\"Header text----media/image\\d+\\.\\w+----$\", header_text)", "title": "" }, { "docid": "3d47aa6ac936f0e4fd3c341708d4f847", "score": "0.54392266", "text": "def find_header(self, header: str):\n try:\n return pyclibrary.utils.find_header(header, dirs=self.include_dirs)\n except OSError:\n return None", "title": "" }, { "docid": "ad77a8e454d978a42a181e2e390a00e9", "score": "0.5436333", "text": "def start_line(self, lines):\n for i, line in enumerate(lines):\n if not isinstance(line, SoupString):\n raise TypeError(\"HTML lines should be of type SoupString\")\n soup = line.soup\n if soup.th is not None:\n return i\n\n return None", "title": "" }, { "docid": "e285ab54e460545a62776a7026d2833b", "score": "0.5392865", "text": "def _parse_mxp_header(it, current, recipe):\n current = _skip_empty(it, current)\n if _is_mxp_header(current):\n current = next(it)\n return current", "title": "" }, { "docid": "0b4da4038bba293bcc1500ae8bc4c94c", "score": "0.5363171", "text": "def section_header(line: str) -> Optional[str]:\n if line.startswith(\"[\") and line.endswith(\"]\"):\n return line[1:-1]\n return None", "title": "" }, { "docid": "aa8912ebec7d12c8cc06f2b5d0dd2260", "score": "0.5360923", "text": "def process_header(header):\n res = ''\n for idx in range(len(header)):\n if idx < len(header) - 1 \\\n and header[idx] == '#' and re.match(r'\\d+', header[idx + 1]):\n continue\n else:\n res += header[idx]\n return res", "title": "" }, { "docid": "6a380d8c5f47ab33ab6c4438c7af933e", "score": "0.53461415", "text": "def __find_file_header(self):\n try:\n return self.__find_token(self.__file_header)\n except RouteParserError:\n raise StartTokenNotFoundError(_('No match for file header'))", "title": "" }, { "docid": "8d83b173e2f8d41195e9599c729f1998", "score": "0.53428763", "text": "def get_blockheader(f):\n # block header is packed big-endian as 4 shorts, 1 long, 4 floats\n return struct.unpack('>4hl4f', f.read(28))", "title": "" }, { "docid": "8ca2e84655141371e9d8b81850b28014", "score": "0.53371924", "text": "def header(self):\n # vsm segemnts start with 'segment'\n new_line_idx = min(i for i, v in enumerate(self.raw_out) if v == '\\n') # all indices with new line\n\n # find first data index\n data_start_idx = min(i for i,v in enumerate(self.raw_out) if v.startswith('+') or v.startswith('-')) # first index of data\n\n data_header = self.raw_out[new_line_idx + 1: data_start_idx-1]\n data_header = [i for i in data_header]\n sifw = [len(i) + 1 for i in self.raw_out[data_start_idx].split(',')]\n sifw += [len(self.raw_out[data_start_idx])]\n sifw = [sum(sifw[:i]) for i in range(len(sifw))]\n data_header = np.array(\n [[v[sifw[i]: sifw[i + 1]] for i in range(len(sifw) - 1)] for j, v in enumerate(data_header)]).T\n\n data_header = [\" \".join(i) for i in data_header]\n data_header = [' '.join(j.split()) for j in data_header]\n data_header = [j.split(' (')[0].lower() for j in data_header]\n return data_header", "title": "" }, { "docid": "8dea92af786a3e1bdcfbc57654d6f74e", "score": "0.5334005", "text": "def get_header(text):\n header = ''\n for l in text.splitlines():\n if header:\n if ')' in l:\n i = l.find(')')\n header += l[:i+1]\n break\n else:\n header += l\n if 'SUBROUTINE' in l:\n header = l.strip()\n data = [d.strip() for d in header.split('$')]\n header = ' '.join(data)\n paren1 = header.find('(')\n paren2 = header.find(')')\n name = header[11: paren1]\n params = header[paren1+1:paren2].strip().split(', ')\n params = [p.strip() for p in params if p]\n return name, params", "title": "" }, { "docid": "149f9b5f6e5fa57378a6c8b57441e5a9", "score": "0.5326433", "text": "def read_header(h):\n return ROOT.gROOT.ProcessLine('#include \"%s\"' % h)", "title": "" }, { "docid": "609445c8b9c3aadf8e6111440ccb4595", "score": "0.5318148", "text": "def get_head(string):\r\n if string == \"\":\r\n return None\r\n else:\r\n return string[0]", "title": "" }, { "docid": "d6113e33819953acc31e0a86a6d949a9", "score": "0.5300776", "text": "def decodeHeader(self):\n\t\t\n\t\tret = \"\"\n\t\t\n\t\t# th_decode_headerin(self.theoraInfo, self.mComment, self.setupInfo_addr, self.packet)\n\t\t# self.setupInfo_addr:-\n\t\t# Returns a pointer to additional, private setup information needed by the decoder.\n\t\t# The contents of this pointer must be initialized to NULL on the first call,\n\t\t# and the returned value must continue to be passed in on all subsequent calls.\n\t\t#\n\t\t# setupInfo_addr can be free'd after this using th_decode_free()\n\t\t\n\t\twhile self.isHeader():\n\t\t\tret, self.setupInfo_addr = th_decode_headerin(self.theoraInfo, \n\t\t\t\t\tself.mComment, self.setupInfo_addr, self.packet)\n\t\t\t# val need not be returned, as readPacket() sets the self.packet\n\t\t\tval = self.readPacket()\n\t\t\tif not val: return None\n\n\t\t# iterate till the first VIDEO is encountered\n\t\twhile not ret == 'VIDEO':\n\t\t\tret, self.setupInfo_addr = th_decode_headerin(self.theoraInfo, \n\t\t\t\t\tself.mComment, self.setupInfo_addr, self.packet)\n\t\t\t\n\t\t\tif not ret == 'VIDEO':\n\t\t\t\tval = self.readPacket()\n\t\treturn", "title": "" }, { "docid": "89b768eba3fda39a8abe55befb156db7", "score": "0.52923936", "text": "def _find_diff_start(lines: InputSource) -> None:\n # Ignore everything before last '---'.\n for i in reversed(range(len(lines))):\n if lines[i] == '---':\n lines.consume(i)\n break\n if lines[0] == '---':\n lines.consume()\n else:\n raise ValueError('failed to find ---, instead found: ' + lines[0])\n\n # Drop high level summary before first file diff.\n while re.match(r'^\\S+\\s+\\|\\s+\\d+ \\+*-*$', lines[0]):\n lines.consume()\n if re.match(r'^\\d+ file(s?) changed(, \\d+ insertion(s?)\\(\\+\\))?(, \\d+ deletion(s?)\\(\\-\\))?$', lines[0]):\n lines.consume()\n else:\n raise ValueError('failed to find top level summary, instead found: ' + lines[0])\n while re.match(r'^create mode \\d+ \\S+$', lines[0]):\n lines.consume()\n\n if not lines[0]:\n lines.consume()\n else:\n logging.info('expected blank line after summary, instead got: %s', lines[0])\n\n # Make sure the next line is the start of a file diff.\n if not DIFF_LINE_MATCHER.match(lines[0]):\n raise ValueError('failed to find file diff, instead found: ' + lines[0])", "title": "" }, { "docid": "e1e3058928b7208542be61c6070fb697", "score": "0.52921695", "text": "def _get_start(self, index):\n start = index * self._csize\n if start >= self.fsize: return None # This is off the end of the file.\n self._f.seek(start)\n s = self._f.read(self._searchlen)\n match = re.search(self._search, s, re.MULTILINE)\n if match:\n i = match.span()[0]\n else:\n #i = s.find(self._search)\n #if i == -1: \n if start+len(s) == self.fsize: # No start, our search has gotten us to the end of the file.\n return None\n else:\n raise Exception(\"Search string not found. Is your search length too short?\")\n return start + i", "title": "" }, { "docid": "093a5193c3e9aa90a03ed1c668874bf2", "score": "0.5276154", "text": "def get_header(self):\n return self.obj_dict['Hd'][0]", "title": "" }, { "docid": "497c84264defd18e4b0a097d43a00287", "score": "0.5267536", "text": "def grab_header(filename):\n f = open(filename, 'rb')\n eoh_found = False\n\n header_str = ''\n header_sub_count = 0\n while not eoh_found:\n header_sub = f.read(512)\n header_sub_count += 1\n if b'HEADER_START' in header_sub:\n idx_start = header_sub.index(b'HEADER_START') + len(b'HEADER_START')\n header_sub = header_sub[idx_start:]\n\n if b'HEADER_END' in header_sub:\n eoh_found = True\n idx_end = header_sub.index(b'HEADER_END')\n header_sub = header_sub[:idx_end]\n\n if header_sub_count >= MAX_HEADER_BLOCKS:\n raise RuntimeError(\"MAX HEADER LENGTH REACHED. THIS FILE IS FUBARRED.\")\n header_str += header_sub\n\n f.close()\n return header_str", "title": "" }, { "docid": "3bd8794d7fe6eb2f7335def4776027de", "score": "0.5267093", "text": "def fst(tpl):\n return tpl[0]", "title": "" }, { "docid": "a6d95deb6e04662ba62322b02be41e6d", "score": "0.5266099", "text": "def get_header(self):\n result = []\n buf = _kstring_t(0, 0, None)\n while _hts_getline(self.file.struct_ptr, _KS_SEP_LINE,\n ctypes.byref(buf)) >= 0:\n if not buf.l or buf.s[0] != self.struct.conf.meta_char:\n break\n else:\n result.append(buf.s.decode('utf-8'))\n result.append('\\n')\n buf.free_p()\n return ''.join(result)", "title": "" }, { "docid": "5fd9636d0be0fd863d2793438c60a99e", "score": "0.5263724", "text": "def get_header(val, headers):\n for h in headers:\n k = h.key.get_generator({})\n if len(k) == len(val) and k[:].lower() == val.lower():\n return h\n return None", "title": "" }, { "docid": "efe8647bd906adeed0af6ccbdf69075d", "score": "0.5249384", "text": "def get_header(filename: str) -> str:\n for line in files.get_lines(filename):\n if re.findall(\"(^# )\", line):\n return line[2:].replace(\"/\", \"\")\n return \"\"", "title": "" }, { "docid": "6f6c8d02c63685fa3026a3068ea70cbd", "score": "0.5240597", "text": "def LocateHeader(self):\n try:\n nth = self.getparent(Header)\n except ValueError as msg:\n nth = list(self.backtrace(fn=lambda x:x))[-1]\n return nth", "title": "" }, { "docid": "5617cffca4537509c9c280ec254455b6", "score": "0.52401423", "text": "def convert_header(header):\n return pf.Header().fromstring(header, sep='\\n') if header else None", "title": "" }, { "docid": "4a3b2fe136955967a0fea1d5059c8ca1", "score": "0.5226011", "text": "def isheader(self, line):\r\n i = line.find(':')\r\n if i > 0:\r\n return line[:i].lower()\r\n return None", "title": "" }, { "docid": "356e08e76759297d4a97ae1b19650d36", "score": "0.52151585", "text": "def find_title(self, markdown):\n title = None\n with open(markdown, \"r\") as f:\n lines = f.readlines()\n previous=lines[0] or \"\"\n found = None\n\n for line in lines:\n if \"# \" in line:\n found = line.split(\"# \", 1)[1]\n\n elif line[0] in [\"=-~^\"]:\n found = previous\n if found:\n if \"[\" in found:\n title = found.split(\"[\")[0].strip()\n return title\n previous = line\n return title", "title": "" }, { "docid": "589d8bc17e45d9952b7b8fd28d901677", "score": "0.5213177", "text": "def _find_header_value(header, header_section, ignore_semicolon=False):\n begin = header_section.find(header)\n\n assert begin != -1, 'Header must exist.'\n\n end_of_line = header_section.find(CRLF, begin)\n\n if ignore_semicolon:\n end = end_of_line\n else:\n semi_colon = header_section.find(';', begin, end_of_line)\n end = end_of_line if semi_colon == -1 else semi_colon\n\n return header_section[begin + len(header):end]", "title": "" }, { "docid": "5b3b3edbb3b16fbfba1227927f63a9c2", "score": "0.51988643", "text": "def test_clang_format_parser_line_start():\n cfp = ClangFormatXMLParser()\n data = \"\"\n offset = 0\n assert cfp.find_index_of_line_start(data, offset) == 0", "title": "" }, { "docid": "8acdd283b623fdeb3004b234edaf8e94", "score": "0.5197263", "text": "def pullHeader(fh):\n while True:\n line = fh.readline()\n if line.startswith(\"##INFO=<ID=IMPRECISE\"):\n continue\n if line.startswith(\"##INFO=<ID=PRECISE\"):\n continue\n if line.startswith(\"##INFO=\"): \n #sys.stdout.write(line)\n fout.write(line)\n if line.startswith(\"##FOR\"):\n #sys.stdout.write(line.replace(\"FORMAT\",\"INFO\"))\n fout.write(line.replace(\"FORMAT\",\"INFO\")) \n if line.startswith(\"#CH\"):\n return\n\n if line is None:\n sys.stderr.write(\"ERROR! No read good.\\n\")\n exit(10)", "title": "" }, { "docid": "f8e1202590fbb065e5666e77d7328b02", "score": "0.5196293", "text": "def find_first_text(blocks, default=\"\"):\n for block in blocks:\n if block.type == \"text\":\n return block.value\n return default", "title": "" }, { "docid": "ec780d7cbc6c7cccbdc5ea5af397ab9b", "score": "0.5191112", "text": "def next_header(response):\n line = response.fp.readline()\n r = None\n while r is None:\n m = HTTPMultipartThread._content_range.match(line)\n if m is not None:\n r = m.group(1)\n line = response.fp.readline()\n if len(line.strip()) == 0:\n break\n return r", "title": "" }, { "docid": "1ee0d88a019d0c62c944134067dd3893", "score": "0.5188252", "text": "def _extract_block_from_next_pos(self, marker):\n block = ''\n if not self.oom.find_text(marker):\n return block\n\n line = self.oom.current()\n block += \"{}\\n\".format(line)\n for line in self.oom:\n if not line.startswith(' '):\n self.oom.back()\n break\n block += \"{}\\n\".format(line)\n return block", "title": "" }, { "docid": "013ad91f9979ad0c08ee9ce96eec9822", "score": "0.51847833", "text": "def _read_spl_component_header(self):\n\n data_start = self.f.tell()\n # manually do headers\n _1,_2,self.nprocs, nbodies, nint_attr, nfloat_attr, infostringlen = np.fromfile(self.f, dtype=np.uint32, count=7)\n\n # need to figure out what to do with nprocs...it has to always be the same, right?\n head = np.fromfile(self.f, dtype=np.dtype((np.bytes_, infostringlen)),count=1)\n head_normal = head[0].decode()\n head_dict = yaml.safe_load(head_normal)\n\n\n head_dict['nint_attr'] = nint_attr\n head_dict['nfloat_attr'] = nfloat_attr\n # data starts at ...\n next_comp = 4*7 + infostringlen + data_start\n\n self.component_map[head_dict['name']] = next_comp\n self.header[head_dict['name']] = head_dict\n\n next_comp += self.nprocs*1024\n\n # specifically look for indexing\n try:\n self.indexing = head_dict['parameters']['indexing']\n except:\n self.indexing = head_dict['indexing']=='true'\n\n return next_comp", "title": "" }, { "docid": "9dfa8bc4bb65a13debf1032e44b94dd7", "score": "0.51791817", "text": "def get_defect_body(line):\n return regex.DEFECT_DESCRIPTION_RE.search(line).group(1)", "title": "" }, { "docid": "7afbbffb76b515f3b950260ca0f213bf", "score": "0.51729995", "text": "def get_angle_bracketed_header(message, header):\n value = message[header]\n if value is None:\n return None\n match = ANGLE_BRACKETED_RE.match(value)\n if match is None:\n return None\n content = match.group(1)\n content = content.lower()\n return content", "title": "" }, { "docid": "81530af3f7f35cd59d292fb0af7237a9", "score": "0.5161858", "text": "def __check_for_header(self, line: str) -> Union[str, None]:\n for section in _SECTION_TYPES:\n if section in line:\n return section\n return None", "title": "" }, { "docid": "249aee1af7cff265cdd12cc6e2874a62", "score": "0.5143063", "text": "def find_results_header(self, line):\n if line.strip() == '': # empty line\n pass\n elif line.startswith('WARNING: The file'):\n pass\n elif line.startswith('Using Time Stamp'):\n pass\n elif line.strip().startswith('access'):\n self._result_columns = [x.lower() for x in line.strip().split()]\n self._parser = self.parse_result\n next(self._content) # separator line", "title": "" }, { "docid": "9f2018b71a90d59cea8b9dea8a22f93b", "score": "0.5142884", "text": "def get_heading(line):\n head = re.split('[\\d\\W]', line.lower())\n # print head, \"head\"\n # head = split(line.lower())\n # parse [verse] number, if existing:\n number = \"\"\n numbers = re.split('\\D', line)\n for n in numbers:\n if n != \"\":\n number = n\n if 'order' in head:\n return 'order'\n if 'intro' in head:\n return 'intro'\n if 'verse' in head:\n return 'verse' # + str(number)\n if 'pre chorus' in line.lower():\n return 'pre chorus' # + str(number)\n if 'chorus' in head:\n return 'chorus'\n if 'bridge' in head:\n return 'bridge'\n if 'tag' in head:\n return 'tag'\n if 'refrain' in head:\n return 'refrain'\n if 'ending' in head:\n return 'ending'\n if 'outro' in head:\n return 'outro'\n if 'end' in head:\n return 'end'\n\n # if none of these, try single letters\n if 'v' in head:\n return 'verse' # + str(number)\n # if 'c' in head:\n # return 'chorus'\n # if 'b' in head:\n # return 'bridge'\n # if 'r' in head:\n # return 'refrain'\n return None", "title": "" }, { "docid": "b9a2857db9776b402ca562edbe9e151f", "score": "0.51349", "text": "def _check_header_item (self, it , kind ='erp'):\r\n \r\n dict_ = self.idictcpr if kind =='ves' else self.idicttags\r\n \r\n for k, val in dict_.items(): \r\n for s in val : \r\n if str(it).lower().find(s)>=0: \r\n return k \r\n return", "title": "" }, { "docid": "b2ad2d8a85370adbac0859f5974ceb6d", "score": "0.5127361", "text": "def get_first_group (match):\n return match.group(1)", "title": "" }, { "docid": "e29bfe9858ee973b9156954813e90509", "score": "0.5124687", "text": "def first_block_for(self, anchor):\n for block in self.blocks:\n if anchor in block.anchors:\n return block", "title": "" }, { "docid": "f4dc88c17b35b8910692672015a2167c", "score": "0.51237017", "text": "def header(self):\n self.ready() # check if we're ready\n for line in self._file:\n if not line.strip(): continue\n if line.endswith(\"|\"):\n yield line\n else:\n break", "title": "" }, { "docid": "2494ae86b23a31c19ec1b8309440adab", "score": "0.51193756", "text": "def headers_para(doc, size_tag, pic_location):\n header_para = [] # list with headers and paragraphs\n first = True # boolean operator for first header\n previous_s = {} # previous span\n block_string = \"\"\n for pgno,page in enumerate(doc):\n blocks = page.getText(\"dict\")[\"blocks\"]\n #print(\"Blocks --------- \",blocks)\n for b in blocks: # iterate through the text blocks\n if b['type'] == 0: # this block contains text\n\n # REMEMBER: multiple fonts and sizes are possible IN one block\n\n #block_string = \"\" # text found in block\n for l in b[\"lines\"]: # iterate through the text lines\n for s in l[\"spans\"]: # iterate through the text spans\n #print(s)\n s['size']=round(s['size'])\n if len(s['text'])>1 and s['text'].strip(): # removing whitespaces:\n s['text']=s['text'].encode('ascii','ignore').decode('utf-8')\n if first:\n previous_s = s\n first = False\n block_string = size_tag[s['size']] + s['text']\n else:\n if s['size'] == previous_s['size']:\n #if block_string: #and all((c == \"|\") for c in block_string):\n # block_string only contains pipes\n # block_string = size_tag[s['size']] + s['text']\n #if block_string == \"\":\n # # new block has started, so append size tag\n # block_string = size_tag[s['size']] + s['text']\n #else: # in the same block, so concatenate strings\n block_string += \" \" + s['text']\n\n else:\n header_para.append(block_string)\n block_string = size_tag[s['size']] + s['text']\n\n previous_s = s\n #print(\"-------------\")\n\n # new block started, indicating with a pipe\n #block_string += \"|\"\n\n #header_para.append(block_string)\n #print(\"**************\")\n for imgno,img in enumerate(doc.getPageImageList(pgno)):\n print(\"img found in \",pgno)\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5: # this is GRAY or RGB\n pix.writePNG(pic_location+\"p%s-%s.png\" % (pgno, imgno))\n else: # CMYK: convert to RGB first\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(pic_location+\"p%s-%s.png\" % (pgno, imgno))\n pix1 = None\n pix = None \n header_para.append(\"<img> p%s-%s.png\" % (pgno, imgno))\n header_para.append(block_string)\n return header_para", "title": "" }, { "docid": "b9c587b32834186418f3076f28fc49e1", "score": "0.5117599", "text": "def header(self, header_name):\n start = self._str.find(header_name + \":\")\n if start == -1:\n return \"\"\n end = self._str.find(\"\\n\", start)\n if end == -1:\n end = self.size\n return self._str[start+len(header_name)+1:end].strip()", "title": "" }, { "docid": "b295afaed4a4d098b29fb52a81921de8", "score": "0.51126826", "text": "def _getCachedHeader(self, uri, header):\n (scheme, authority, request_uri, cachekey) = urlnorm(uri)\n cached_value = self.get(cachekey)\n header_start = header + ':'\n if not isinstance(header_start, bytes):\n header_start = header_start.encode('utf-8')\n if cached_value is not None:\n for line in BytesIO(cached_value):\n if line.startswith(header_start):\n return line[len(header_start):].strip()\n return None", "title": "" }, { "docid": "74f0c4a6a55a9b322dd12f0cb294ae06", "score": "0.5093462", "text": "def findPosition(line):\n result = re.search(r\"\\|[^\\|]*\\|\\n\", line).start()\n return result", "title": "" }, { "docid": "bca7712c9cab0d9de178e9413fa001d0", "score": "0.5091141", "text": "def parse_hp_block_header(\n block: Union[bytes, bytearray],\n is_big_endian: bool,\n length_before_block: Optional[int] = None,\n raise_on_late_block: bool = False,\n) -> Tuple[int, int]:\n begin = block.find(b\"#A\")\n if begin < 0:\n raise ValueError(\n \"Could not find the standard block header (#A) indicating the start \"\n \"of the block. The block begin by %r\" % block[:25]\n )\n\n length_before_block = length_before_block or DEFAULT_LENGTH_BEFORE_BLOCK\n if begin > length_before_block:\n msg = (\n \"The beginning of the block has been found at %d which \"\n \"is an unexpectedly large value. The actual block may \"\n \"have been missing a beginning marker but the block \"\n \"contained one:\\n%s\"\n ) % (begin, repr(block))\n if raise_on_late_block:\n raise RuntimeError(msg)\n else:\n warnings.warn(msg, UserWarning)\n\n offset = begin + 4\n\n data_length = int.from_bytes(\n block[begin + 2 : offset], byteorder=\"big\" if is_big_endian else \"little\"\n )\n\n return offset, data_length", "title": "" }, { "docid": "8b9b75913c75e9bd3e1ed17b839a3c3a", "score": "0.50895673", "text": "def _process_header(config, infile):\n header, line = [], infile.readline()\n while line.startswith(\"@\"):\n header.append(line)\n line = infile.readline()\n\n if config.flag_as_sorted:\n _set_sort_order(header)\n _set_pg_tags(header, config.update_pg_tag)\n\n return header, line", "title": "" }, { "docid": "34b5ffd6fc0ce209998c7c4528dc6518", "score": "0.5081281", "text": "def extract_headers(report):\r\n if END_HEADERS in report:\r\n return report[:report.find(END_HEADERS)]\r\n return report", "title": "" }, { "docid": "c17f8363584d07e86c89433127d6df02", "score": "0.5079453", "text": "def getHeader(self):\n begin = self.where + \" \" + self.direction() if (self.direction != '') else self.where\n end = \": \" + self.get_what_display() if self.what != 'OTHER' else \"\"\n \n return begin + end", "title": "" }, { "docid": "73e82bbad9aef1bf0a192db92cfa4718", "score": "0.505962", "text": "def head(self) -> SnakeBlock:\n\n return self.body[0]", "title": "" }, { "docid": "352da6548eb20ea6e7cf1ce6a083d315", "score": "0.5035433", "text": "def first_line(self) -> str | None:\n if self.output:\n return self.output.split(\"\\n\", 1)[0]\n return None", "title": "" }, { "docid": "7dc8377c2b3742bb5128cf2d21e65019", "score": "0.5032423", "text": "def get_matching_cf(s,data):\n return next((x for x in data if x.title == f'{s.title[0]}cf'),\n next((y for y in data if y.title == \"cantus firmus\")))", "title": "" }, { "docid": "b447a4c0fbb4e1bc1633c4bcd83cad5f", "score": "0.5031311", "text": "def header_from_buffer(buffer, offset=0) -> (dict, list, int):\n header = defaultdict(list)\n while buffer[offset] == b'@':\n end = buffer.find(b'\\n', offset)\n line = buffer[offset:end]\n tag = line[1:2]\n if tag == b'CO':\n header[tag].append(line[4:])\n else:\n header[tag].append({m[0]: m[1] for m in header_re.findall(line)})\n offset = end + 1\n\n return header, [bam.Reference(ref[b'SN'], int(ref[b'LN'])) for ref in header.pop(b'SQ')] if b'SQ' in header else [], offset", "title": "" }, { "docid": "2bb2b6ec7a2891d85be4515fa3bba39c", "score": "0.5031126", "text": "def get_section_from_chunk(chunk, sectionname, item):\n section = []\n in_section = False\n is_done = False\n i = -1\n\n# print(\"Looking for: \"+sectionname)\n for line in chunk:\n if is_done:\n # Something in the previous iteration decided we should stop processing this chunk\n break\n i += 1\n if line == sectionname:\n # We found the section we're looking for\n in_section = True\n continue\n if in_section:\n for check_section_name in SECTION_NAMES:\n # Check to see if we've hit another section\n if line == check_section_name+\":\":\n # We've hit another section, signal the outer loop to stop\n is_done = True\n break\n if not is_done:\n # We're still in our section, so store the line\n section.append(line)\n if section[-1] == \"\":\n # Sections usually end with a blank line, but we don't want it, so remove it\n section.pop()\n if \"\" in section and sectionname not in [\"Notes:\", \"Examples:\"]:\n # Having removed any final blank lines, there should be no further blank lines, but we found one\n message = \"%s has a blank line in %s\" % (item[\"signature\"], sectionname)\n warn(message)\n LINTS.append({\n \"file\": item[\"file\"],\n \"line\": int(item[\"lineno\"]) + 3,\n \"title\": \"Blank lines should not occur within sections\",\n \"message\": message,\n \"annotation_level\": \"failure\"\n\n })\n\n return section", "title": "" }, { "docid": "13a62d5900d30dab2febd450738dec8e", "score": "0.50277334", "text": "def get_part1(self):\n return self.zmsg[self.rpos + 2]", "title": "" }, { "docid": "e346fb3cb40c112e1e3af8c4e91f4a42", "score": "0.5025892", "text": "def decode_ccsds_header(self,binary):\n\n\t # pull out CCSDS Version (Bits 0-2)\n version = int(binary[0:3],2)\n\n # pull out CCSDS Type (Bit 3) \n # [0 if TLM, 1 if CMD]\n CCSDStype = int(binary[3],2)\n\n # pull out CCSDS Secondary Packet Header Flag (SHF) (Bit 4) \n # [0 if FALSE, 1 if TRUE]\n sphf = int(binary[4],2)\n\n # pull out CCSDS APID (Bits 5-15) \n apid = int(binary[5:16],2)\n\n # pull out CCSDS Sequence/Grouping Flags (Bits 16-17) \n # [01 1st pkt; 00 cont pkt; 10 last pkt; 11 no group]\n seqflag = int(binary[16:18],2)\n\n # pull out CCSDS Sequence Count (Bits 18-31) \n seqcount = int(binary[18:32],2)\n\n # pull out CCSDS Packet Data Length (Bits 32-47)\n # number ot octets of packet data field minus 1\n datalen = int(binary[32:48],2)\n\n # pull out CCSDS Packet Data Field Data (Bits 48 - end)\n data = binary[49:]\n\n # if DEBUG print out CCSDS data to screen\n if self.debug:\n print(\"CCSDS Version: \",binary[0:3],\"(\",str(version),\")\")\n print(\"CCSDS Type: \",binary[3],\"(\",str(CCSDStype),\")\")\n print(\"CCSDS SPHF: \",binary[4],\"(\",str(sphf),\")\")\n print(\"CCSDS APID: \",binary[5:16],\"(\",str(apid),\")\")\n print(\"CCSDS Sequence Flag: \",binary[16:18],\"(\",str(seqflag),\")\")\n print(\"CCSDS Sequence Count: \",binary[18:32],\"(\",str(seqcount),\")\")\n print(\"CCSDS Data Length: \",binary[32:48],\"(\",str(datalen),\")\")\n print(\"\")\n return data", "title": "" }, { "docid": "91041461c6865f6ff694a7a68eafc6e5", "score": "0.5023813", "text": "def pull(self, pattern):\n # The matching block of text.\n block = None\n # Match the given pattern against the buffer head.\n regexp = re.compile(pattern, re.X|re.U)\n match = regexp.match(self.text, self.index)\n if match is not None:\n # Extract the block that matched the pattern.\n block = match.group()\n # Move the buffer head.\n self.index = match.end()\n # Advance over whitespace characters and comments.\n self.skip()\n return block", "title": "" }, { "docid": "fadfa7b83851f8c0f4cdf8a4c57ace5b", "score": "0.5023421", "text": "def openheader(filename, card=0):\n import pyfits\n return pyfits.getheader(filename, card)", "title": "" }, { "docid": "fb10dad5c8057920ebbd60105927d0e8", "score": "0.502278", "text": "def describe_header(fcs,of):\n of.write(\"*** FCS Header Information ***\\n\")\n of.write(\"Version: \"+fcs.version+\"\\n\")\n text_string = str(len(fcs.text.bytes))+' bytes'\n of.write(\"TEXT: \"+text_string+\"\\n\")\n data_string = str(fcs.standard.ENDDATA-fcs.standard.BEGINDATA+1)+' bytes'\n of.write(\"DATA: \"+data_string+\"\\n\")\n analysis_string = 'False'\n if fcs.standard.BEGINANALYSIS != 0:\n analysis_string = 'True, '\n analysis_string +=str(fcs.standard.ENDANLYSIS-fcs.standard.BEGINANALYSIS+1)+' bytes'\n of.write(\"ANALYSIS: \"+analysis_string+\"\\n\")\n other_string = 'False'\n if len(fcs.other) > 0:\n other_string = 'True, '\n word = 'segment'\n if len(fcs.other) > 1: word = 'segments'\n other_string += str(len(fcs.other))+' '+word+', '\n other_string += str(sum([len(x) for x in fcs.other]))+' bytes'\n of.write('OTHER: '+other_string+\"\\n\")", "title": "" }, { "docid": "5ead4c6d060272efbf51b0d3c400bc93", "score": "0.5014535", "text": "def parse_header(self, header):\n # Should be 8 words long\n head_int = np.fromstring(header, dtype=np.uint32) \n\n hdict = self.header_dict\n\n t_ind = hdict['time']\n frame_ind = hdict['frame']\n stat_ind = hdict['station']\n link_ind = hdict['link']\n slot_ind = hdict['slot']\n eud2_ind = hdict['eud2']\n\n station = self.bit_manip(head_int[stat_ind[0]], stat_ind[1], stat_ind[2])\n link = self.bit_manip(head_int[link_ind[0]], link_ind[1], link_ind[2])\n slot = self.bit_manip(head_int[slot_ind[0]], slot_ind[1], slot_ind[2])\n frame = self.bit_manip(head_int[frame_ind[0]], frame_ind[1], frame_ind[2])\n time = self.bit_manip(head_int[t_ind[0]], t_ind[1], t_ind[2])\n count = self.bit_manip(head_int[eud2_ind[0]], eud2_ind[1], eud2_ind[2])\n\n return station, link, slot, frame, time, count", "title": "" }, { "docid": "ebb82adc353f2af8fba1ba1c0a06be0e", "score": "0.5010057", "text": "def _read_block_header(self) -> typing.Tuple[bytes, typing.Optional[int]]:\n return self._handle.read(1), self._read_int()", "title": "" }, { "docid": "c72875ed9e6caed90f1c1b0cfdcf5774", "score": "0.50067693", "text": "def _sync_decode_head(self,\n head_block: SyncGSFBlock) -> GSFFileHeaderDict:\n head: GSFFileHeaderDict = {}\n head['id'] = head_block.read_uuid()\n head['created'] = head_block.read_datetime()\n\n head['segments'] = []\n head['tags'] = []\n\n # Read head block children\n for head_child in head_block.child_blocks():\n # Parse a segment block\n if head_child.tag == \"segm\":\n segm = {}\n segm['local_id'] = head_child.read_uint(2)\n segm['id'] = head_child.read_uuid()\n segm['count'] = head_child.read_sint(8)\n segm['tags'] = []\n\n # Segment blocks can have child tags as well\n while head_child.has_child_block():\n with SyncGSFBlock(head_block) as segm_child:\n if segm_child.tag == \"flow\":\n src_id = segm_child.read_uuid()\n flow_id = segm_child.read_uuid()\n format = segm_child.read_string(64)\n data = segm_child.read_varbytes()\n segm['flow'] = {\n 'source_id': src_id,\n 'flow_id': flow_id,\n 'format': format,\n 'data': data.decode('utf-8')\n }\n elif segm_child.tag == \"tag \":\n key = segm_child.read_varstring()\n value = segm_child.read_varstring()\n segm['tags'].append((key, value))\n\n head['segments'].append(segm)\n\n # Parse a tag block\n elif head_child.tag == \"tag \":\n key = head_child.read_varstring()\n value = head_child.read_varstring()\n head['tags'].append((key, value))\n\n return cast(GSFFileHeaderDict, head)", "title": "" }, { "docid": "c57c6ff678a588a1eef21d538be5d622", "score": "0.49932387", "text": "def csg_extract_metadata(file_name: Path) -> Tuple[str, List[ET.Element]]:\n children = get_root_children(file_name)\n header = children[0].text\n return header, children[1:]", "title": "" }, { "docid": "a9457fbe17194ed364b542db0ab22585", "score": "0.49857605", "text": "def tagStartFromEndViaMatchingName(scimoz, endTagPos, firstVisiblePos):\n # On code-completion an element name can be inserted, but we\n # haven't colourised it yet, so do that now.\n lineNo = scimoz.lineFromPosition(endTagPos)\n lineEndPos = scimoz.getLineEndPosition(lineNo)\n scimoz.colourise(endTagPos, lineEndPos)\n \n endTagPos = _tagStartPosFromPos(scimoz, endTagPos, scimoz.SCE_UDL_M_ETAGO)\n if endTagPos == -1:\n return None\n if scimoz.getCharAt(endTagPos) == ord(\"/\"):\n endTagPos -= 1\n tagName = _getCurrTagName(scimoz, endTagPos + 2)\n\n origPos = scimoz.currentPos\n origAnchor = scimoz.anchor\n tagCount = 1\n startTagSearch = \"<\" + tagName\n endTagSearch = \"</\" + tagName\n searchLen = len(tagName) + 1\n scimoz.currentPos = scimoz.anchor = endTagPos - 1\n scimoz.searchAnchor()\n try:\n nextStartTagPos = scimoz.searchPrev(0, startTagSearch)\n if firstVisiblePos is not None and nextStartTagPos < firstVisiblePos:\n return None\n nextEndTagPos = scimoz.searchPrev(0, endTagSearch)\n # Figure out what to do based on what we've seen\n while True:\n if nextStartTagPos == -1:\n return None\n elif nextStartTagPos > nextEndTagPos:\n # Includes no more end-tags\n style = scimoz.getStyleAt(nextStartTagPos)\n if style == scimoz.SCE_UDL_M_STAGO:\n closeTagPos = _getCloseTagPos(scimoz, nextStartTagPos)\n if (closeTagPos > 0\n and (scimoz.getStyleAt(closeTagPos)\n == scimoz.SCE_UDL_M_STAGC)\n and (scimoz.getStyleAt(nextStartTagPos + searchLen)\n != scimoz.SCE_UDL_M_TAGNAME)):\n if tagCount == 1:\n tagEndPos = _tagEndPosFromPos(scimoz, nextStartTagPos,\n scimoz.SCE_UDL_M_STAGC)\n return (nextStartTagPos, tagEndPos,\n endTagPos,\n _tagEndPosFromPos(scimoz, endTagPos,\n scimoz.SCE_UDL_M_ETAGC))\n tagCount -= 1\n scimoz.currentPos = scimoz.anchor = nextStartTagPos - 1\n scimoz.searchAnchor()\n nextStartTagPos = scimoz.searchPrev(0, startTagSearch)\n if firstVisiblePos is not None and nextStartTagPos < firstVisiblePos:\n nextStartTagPos = -1\n else:\n # Found an end-tag\n style = scimoz.getStyleAt(nextEndTagPos)\n if (style == scimoz.SCE_UDL_M_ETAGO\n and (scimoz.getStyleAt(nextEndTagPos + searchLen + 1)\n != scimoz.SCE_UDL_M_TAGNAME)):\n tagCount += 1\n scimoz.currentPos = scimoz.anchor = nextEndTagPos - 1\n scimoz.searchAnchor()\n nextEndTagPos = scimoz.searchPrev(0, endTagSearch)\n if firstVisiblePos is not None and nextEndTagPos < firstVisiblePos:\n nextEndTagPos = -1\n finally:\n scimoz.currentPos = origPos\n scimoz.anchor = origAnchor", "title": "" }, { "docid": "8281dcb3114c3e677e7631ceb868841b", "score": "0.49849796", "text": "def parse_rich_header(self):\n\n # Rich Header constants\n #\n DANS = 0x536E6144 # 'DanS' as dword\n RICH = 0x68636952 # 'Rich' as dword\n\n rich_index = self.__data__.find(\n b'Rich', 0x80, self.OPTIONAL_HEADER.get_file_offset())\n if rich_index == -1:\n return None\n\n # Read a block of data\n try:\n # The end of the structure is 8 bytes after the start of the Rich\n # string.\n rich_data = self.__data__[0x80:rich_index + 8]\n # Make the data have length a multiple of 4, otherwise the\n # subsequent parsing will fail. It's not impossible that we retrieve\n # truncated data that it's not a multiple.\n rich_data = rich_data[:4*int(len(rich_data)/4)]\n data = list(struct.unpack(\n '<{0}I'.format(int(len(rich_data)/4)), rich_data))\n if RICH not in data:\n return None\n except PEFormatError:\n return None\n\n # get key, raw_data and clear_data\n key = struct.pack('<L', data[data.index(RICH)+1])\n result = {\"key\": key}\n\n raw_data = rich_data[:rich_data.find(b'Rich')]\n result[\"raw_data\"] = raw_data\n\n ord_ = lambda c : ord(c) if not isinstance(c, int) else c\n\n clear_data = bytearray()\n for i in range(len(raw_data)):\n clear_data.append((ord_(raw_data[i]) ^ ord_(key[i % len(key)])))\n result[\"clear_data\"] = bytes(clear_data)\n\n # the checksum should be present 3 times after the DanS signature\n #\n checksum = data[1]\n if (data[0] ^ checksum != DANS\n or data[2] != checksum\n or data[3] != checksum):\n return None\n\n result[\"checksum\"] = checksum\n headervalues = []\n result[\"values\"] = headervalues\n\n data = data[4:]\n for i in range(int(len(data) / 2)):\n\n # Stop until the Rich footer signature is found\n #\n if data[2 * i] == RICH:\n\n # it should be followed by the checksum\n #\n if data[2 * i + 1] != checksum:\n self.__warnings.append('Rich Header is malformed')\n break\n\n # header values come by pairs\n #\n headervalues += [data[2 * i] ^ checksum, data[2 * i + 1] ^ checksum]\n return result", "title": "" }, { "docid": "a6a4d743ab34be71d32b811e3f8cbdab", "score": "0.49846095", "text": "def get_string(self, key):\r\n start, stop = self._lookup(key)\r\n self._file.seek(start)\r\n self._file.readline() # Skip '1,' line specifying labels.\r\n original_headers = StringIO.StringIO()\r\n while True:\r\n line = self._file.readline()\r\n if line == '*** EOOH ***' + os.linesep or line == '':\r\n break\r\n original_headers.write(line.replace(os.linesep, '\\n'))\r\n while True:\r\n line = self._file.readline()\r\n if line == os.linesep or line == '':\r\n break\r\n return original_headers.getvalue() + \\\r\n self._file.read(stop - self._file.tell()).replace(os.linesep,\r\n '\\n')", "title": "" }, { "docid": "b809f466aa8fba82276df96ca7943bcb", "score": "0.49823892", "text": "def first_trimmed_raw(seg: BaseSegment) -> str:\n s = seg.raw_upper.split(maxsplit=1)\n return s[0] if s else \"\"", "title": "" }, { "docid": "d133aa53eb9c20d30f7bce1a0c7273c6", "score": "0.49757403", "text": "def getNextPattern(filePointer, lineNumber, pattern):\n filePointer.seek(lineNumber)\n current_tell = filePointer.tell()\n for line_number, line in enumerate(iter(filePointer.readline, '')):\n line = line.rstrip()\n # find first header\n # print(line)\n if re.search(pattern, line):\n break\n current_tell = filePointer.tell()\n return current_tell", "title": "" }, { "docid": "b6f72074f29cc632c035e2baa9a0e066", "score": "0.49725312", "text": "def getHeader():\n menuStart = 0\n for i,line in enumerate(self.menulist):\n if line == self.title:\n # print(i)\n menuStart = i\n break\n\n self.header = \"\\n\".join(self.menulist[:menuStart])\n self.title = self.menulist[menuStart]\n self.menulist = self.menulist[menuStart+1:]", "title": "" }, { "docid": "32f06a4d273d5d59fe3f7ce376d9b760", "score": "0.49716538", "text": "def rest_lines_text(self, line):\n # note that python lacks an atomic grouping operator or a possessive\n # quantifier, so a regex like:\n # ^\\s{%s,}([^-].+) is insufficient in and of itself. It will backtrack\n # itself into accepting.\n if self.regex_next_lines_whitespace.match(line):\n match = self.REGEX_NEXT_LINES_CONTENT.match(line.lstrip())\n if match:\n return match.groups()[0]\n return None", "title": "" }, { "docid": "512f29e999e38f1b3b3767dd0acc39da", "score": "0.49712932", "text": "def handle_toc_header(val: str) -> nodes.Element:\n para = addnodes.compact_paragraph(\"\", \"\", nodes.Text(val))\n item = nodes.list_item(\"\", para)\n item[\"classes\"].append(\"fs-1-2\")\n return item", "title": "" }, { "docid": "f547da8bf1e07d66de3a98eff20334c0", "score": "0.49655148", "text": "def parse_filename_header(self, diff_line):\n parts = None\n\n if b'\\t' in diff_line:\n # There's a \\t separating the filename and info. This is the\n # best case scenario, since it allows for filenames with spaces\n # without much work. The info can also contain tabs after the\n # initial one; ignore those when splitting the string.\n parts = diff_line.split(b'\\t', 1)\n\n if b' ' in diff_line:\n # There are spaces being used to separate the filename and info.\n # This is technically wrong, so all we can do is assume that\n # 1) the filename won't have multiple consecutive spaces, and\n # 2) there are at least 2 spaces separating the filename and info.\n parts = re.split(b' +', diff_line, 1)\n\n if parts:\n return (parts[0].decode(_fs_encoding),\n b'\\t' + parts[1])\n\n # Strip off ending newline, and return it as the second component.\n return (diff_line.split(b'\\n')[0].decode(_fs_encoding),\n b'\\n')", "title": "" }, { "docid": "f13d3626468a2db7478bad3b3b4d42b6", "score": "0.49617922", "text": "def head_position(self):\n return self.parts[0]", "title": "" }, { "docid": "96ac64641c8ff7641fbaec507049e7e1", "score": "0.49603304", "text": "def find_algorithm_real_name(cpp_path):\n header_path = find_corresponding_headerpath(cpp_path)\n if not os.path.exists(header_path):\n return None\n source = open(header_path).read()\n regex = r'std::string\\ name\\(\\)\\ const\\ \\{\\ return\\ \\\"([\\w]+)\\\"'\n m = re.search(regex, source)\n if not m is None:\n return m.group(1) \n else:\n return None", "title": "" }, { "docid": "5e0288bcf666caafd3e23e9e2ae69e27", "score": "0.4954437", "text": "def get_header(self):\n return self.volume.get_header()", "title": "" }, { "docid": "b771ad3a26795caa452b36368b0bcce3", "score": "0.49530166", "text": "def download_header():\n fieldlist = download_field_list()\n i = 0\n for f in fieldlist:\n if f in TRANSLATE_HEADER:\n fieldlist[i] = TRANSLATE_HEADER[f]\n i = i+1\n return '\\t'.join(fieldlist)", "title": "" }, { "docid": "54f99dbd68adefb48f41e1b21f032135", "score": "0.4949591", "text": "def _asd_head(header):\n # replace data type to reflection\n h1 = list(struct.unpack('484b', header))\n h1[0] = ord('A')\n h1[1] = ord('S')\n h1[2] = ord('D')\n h1[179] = 16\n h1[199] = 0\n header = struct.pack('484b', *h1)\n return header", "title": "" }, { "docid": "ed53cbc09c1c938842388144116a5644", "score": "0.4947893", "text": "def getblock(lines):\r\n blockfinder = BlockFinder()\r\n try:\r\n tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)\r\n except (EndOfBlock, IndentationError):\r\n pass\r\n return lines[:blockfinder.last]", "title": "" }, { "docid": "57d4bdb2973b778944bcfe23589c6ed8", "score": "0.49419406", "text": "def clean_header(fitsPath):\n header = pf.open(fitsPath, ignore_missing_end=True)[0].header\n new_header = pf.Header()\n for x,y,z in header.cards: new_header[x.replace('.','_')] = (y,z)\n return pf.PrimaryHDU(header=new_header).header", "title": "" }, { "docid": "14ae5e0a2453bfecd3b52d5a4db6a8be", "score": "0.4939524", "text": "def _extract_tag(self, haml_line):\n try:\n if haml_line.startswith(\".\") or haml_line.startswith(\"#\"):\n return 'div'\n\n return re.match(\"\\w+\", haml_line).group(0)\n except AttributeError:\n raise RuntimeError(\"No tag was specified in %s\" % haml_line)", "title": "" }, { "docid": "f43f7bb24d2150fff2185fddccbfaa01", "score": "0.49357373", "text": "def _find_the_right_header_file(self):\n # Get up to 3 days worth of header files\n hdr_files = []\n base_date = self['_desired_start'].date()\n for offset_days in range(-1,2):\n day = base_date + timedelta(days=offset_days)\n try:\n hdr_files += self._get_header_files_for_date(day)\n except:\n pass\n\n # Iterate pairwise to find the right one to use\n hdr_file = None\n for hdr1, hdr2 in pairwise(hdr_files):\n match1, match2 = [ match_header_filename(f) for f in [hdr1, hdr2] ]\n # Parse each of first/last header to get lower and upper bound of day interval\n stop1_str = match1.group('stop_str')\n stop2_str = match2.group('stop_str')\n # Convert time strings to datetime objects\n t1 = timestr_to_datetime(stop1_str)\n t2 = timestr_to_datetime(stop2_str)\n if self['_desired_start'] > t1 and self['_desired_start'] <= t2:\n #print \"*\", t1, \"to\", t2, \"<<<\", self.desired_start\n hdr_file = hdr2\n break\n return hdr_file", "title": "" } ]
45a270b227dd168f8c816fa65256ce17
A dict of this user's manga stats, with keys as strings, and values as numerics.
[ { "docid": "e41855e15012b2c4c9f88eb45ae6c253", "score": "0.69878834", "text": "def manga_stats(self):\n return self._manga_stats", "title": "" } ]
[ { "docid": "6e5d1b8f1ebbc44afbe20dd37487c1fa", "score": "0.71393543", "text": "def _make_stats(self):\n return {\n \"load_avg\": self.load_avg.to_dict(),\n \"service_time\": self.service_time.to_dict(),\n }", "title": "" }, { "docid": "13dcc88a79d8dadee07400dfb6386aad", "score": "0.70405793", "text": "def get_stats(self):\r\n return {\r\n 'species': self._species,\r\n 'alive': self._alive,\r\n 'typing': self._typing,\r\n 'status': self._status,\r\n 'item': self._item,\r\n 'hp': self._hp,\r\n 'atk': self._atk,\r\n 'def': self._def,\r\n 'spatk': self._spatk,\r\n 'spdef': self._spdef,\r\n 'spd': self._spd,\r\n 'acc': self._acc,\r\n 'eva': self._eva,\r\n 'sub': self._sub\r\n }", "title": "" }, { "docid": "eb4b6a71bdb7ae11deb56f9a6733247b", "score": "0.7038779", "text": "def stats_dict(self):\n return self._stats_dict", "title": "" }, { "docid": "6c6a6d1a35965ef139942b618aca9aea", "score": "0.7022744", "text": "def get_stats(self):\n return {\"hp\": self._hp, \"def\": self._defph, \"defsp\": self._defsp, \"attk\": self._attkph,\n \"attksp\": self._attksp, \"speed\": self._speed, \"attacks\": self._attacks}", "title": "" }, { "docid": "beb55879707cc9b01b3959007bbd819a", "score": "0.66725016", "text": "def statistics(self):\n reply = dict()\n sm = StatsModel.newest_for_user(self.id())\n reply[\"result\"] = Error.LEGAL\n reply[\"nickname\"] = self.nickname()\n reply[\"fullname\"] = self.full_name()\n sm.populate_dict(reply)\n # Add statistics from the user entity\n reply[\"highest_score\"] = self._highest_score\n reply[\"highest_score_game\"] = self._highest_score_game\n reply[\"best_word\"] = self._best_word\n reply[\"best_word_score\"] = self._best_word_score\n reply[\"best_word_game\"] = self._best_word_game\n return reply", "title": "" }, { "docid": "6498faaa38e4f76f14e97fde0db7ee8c", "score": "0.6643124", "text": "def get_stats(self):\n stats_dict = {}\n stats_dict['bias'] = self.bias\n stats_dict['bins(lenghts)'] = [len(x) for x in self.bins]\n stats_dict['total_bins'] = self.bins_total\n stats_dict['features'] = self.features\n stats_dict['delta_mult'] = self.delta_mult\n stats_dict['len(feature_ids)'] = len(self.feature_ids)\n stats_dict['nf_counts'] = self.nf_counts\n stats_dict['len(tree_table)'] = len(self.tree_table)\n\n return stats_dict", "title": "" }, { "docid": "836aa2661b5754eb5c084b9ee70a1b2d", "score": "0.65922165", "text": "def statistics(self) -> Mapping[str, int]:\n return pulumi.get(self, \"statistics\")", "title": "" }, { "docid": "99a584b99b36315f0824221f8233dfdd", "score": "0.6568075", "text": "def statistics(self):\n stats = {}\n for name, resource, fa, member in self._find_members('__statistics__'):\n stats[name] = member()\n return stats", "title": "" }, { "docid": "fc8a0476bd7305c89ce61d69807c43df", "score": "0.65520513", "text": "def statistics(self) -> Mapping[str, str]:\n return pulumi.get(self, \"statistics\")", "title": "" }, { "docid": "a5ee5790ec398ec96355794736c63779", "score": "0.648622", "text": "def _iostats_dict(header, stats):\n stats = [\n float((sum(stat) / len(stat)).quantize(decimal.Decimal(\".01\")))\n for stat in zip(*stats)\n ]\n stats = dict(zip(header, stats))\n return stats", "title": "" }, { "docid": "7729882880da727139e633778263dd9b", "score": "0.6469473", "text": "def getstats():\n stats = {\n \"amenities\": storage.count(\"Amenity\"),\n \"cities\": storage.count(\"City\"),\n \"places\": storage.count(\"Place\"),\n \"reviews\": storage.count(\"Review\"),\n \"states\": storage.count(\"State\"),\n \"users\": storage.count(\"User\")\n }\n return jsonify(stats)", "title": "" }, { "docid": "d4662eaf028b8a3f9c029fad42ad5b87", "score": "0.6385749", "text": "def stats(self):\n return self.data[\"attributes\"][\"stats\"]", "title": "" }, { "docid": "6e4e464eb7edf1a138f75dda1b95c1b5", "score": "0.6352421", "text": "def get_stats(self):\n\n stats = {}\n stats.update(self.switch_cost())\n stats.update(self.congruency_effect())\n stats['u_accuracy'] = self.df['ucorrect'].mean()\n stats['m_accuracy'] = self.df['mcorrect'].mean()\n urts = self.df['urt_ms'][self.select(**{'ucorrect': 1})]\n mrts = self.df['mrt_ms'][self.select(**{'mcorrect': 1})]\n stats['u_mean_rt'] = urts.mean()\n stats['m_mean_rt'] = mrts.mean()\n stats['u_rt_sd'] = urts.std()\n stats['m_rt_sd'] = mrts.std()\n self.summary_stats = stats\n return stats", "title": "" }, { "docid": "1bdfd9842d829baa3d4bc5c77d2bf556", "score": "0.63473237", "text": "def package_statistics(self):\n\n stats = {}\n \"\"\"Achievement Statistics\"\"\"\n if self.achievements is not None:\n stats['achievements'] = self.achievement_statistics()\n\n \"\"\"Volume Statistics\"\"\"\n # TODO The maths for this is insane....\n\n \"\"\"Motion Statistics\"\"\"\n # Standard Variation of the Internode Distance Average would represent\n # the variability of the fleet\n mot_stat = {\"std_of_INDA\": np.std([self.inter_distance_average(t) for t in xrange(self.tmax)]),\n \"std_of_INDD\": np.std(\n [self.position_matrix(t) / self.inter_distance_average(t) for t in xrange(self.tmax)])}\n # Fleet speed (i.e. total distance covered) would represent comparative\n # efficiency\n mot_f_distance = np.sum(map(mag, self.v))\n mot_stat[\"fleet_distance\"] = mot_f_distance\n mot_stat[\"fleet_efficiency\"] = (mot_f_distance / self.tmax) / self.n\n\n stats['motion'] = mot_stat\n\n return stats", "title": "" }, { "docid": "f1d166daafcb8b40190bf3bf9a4b22ee", "score": "0.63071144", "text": "def game_stats(self, num1, num2, game_status):\n return {\n 'game_num1': num1,\n 'game_num2': num2,\n 'game_status': game_status\n }", "title": "" }, { "docid": "ad1686e4eac567d5aff44970a4d64374", "score": "0.62988394", "text": "def get_current_user_type_counts(self):\n logging.info(\"Gathering current Zoom user metrics...\")\n\n # create various counts which will help provide metrics\n pro_account_count = 0\n basic_account_count = 0\n corp_account_count = 0\n account_count = 0\n\n account_count = len(self.zoom.model[\"users\"])\n\n for user_data in self.zoom.model[\"users\"]:\n\n # change type from integer to human-readable value\n # also make counts of the number of accounts per type\n if user_data[\"type\"] == 1:\n basic_account_count += 1\n user_data[\"type\"] = \"Basic\"\n elif user_data[\"type\"] == 2:\n pro_account_count += 1\n user_data[\"type\"] = \"Pro\"\n elif user_data[\"type\"] == 3:\n corp_account_count += 1\n user_data[\"type\"] = \"Corp\"\n\n # Share various metrics with the user on\n # total, basic, pro and deprovisioning information to better\n # inform them before proceeding.\n logging.info(\"Total accounts: %s\", account_count)\n logging.info(\"Basic accounts: %s\", basic_account_count)\n logging.info(\"Pro accounts: %s\", pro_account_count)\n logging.info(\"Corp accounts: %s\", corp_account_count)\n\n return {\n \"Basic Accounts\": basic_account_count,\n \"Pro Accounts\": pro_account_count,\n \"Corp Accounts\": corp_account_count,\n \"Total Accounts\": account_count,\n }", "title": "" }, { "docid": "45ed867f1515730e575f01af870a22c0", "score": "0.627091", "text": "def enemy_stats(self):\n stats = {\n \"style\": self.enemy_style(),\n \"devotion\": self.enemy_devotion(),\n \"beauty\": self.enemy_beauty(),\n \"generosity\": self.enemy_generosity(),\n \"loyalty\": self.enemy_loyalty(),\n \"creativity\": self.enemy_creativity()\n }\n return stats", "title": "" }, { "docid": "d0a286f71914c33148ca58e91f0e936a", "score": "0.6263168", "text": "def __initial_stats() -> Dict[str, int]:\n stats = {\n 'read': 0,\n 'ACKs': 0,\n 'ARPs': 0,\n 'sent': 0,\n 'pps': 0\n }\n return stats", "title": "" }, { "docid": "82492154f4e012f872b12f8e0b5bd8b1", "score": "0.62594354", "text": "def stats_by_document(self) -> Dict[str, GenderDistanceStats]:\n return _get_stats_from_distances_by_metadata_value(self.by_document())", "title": "" }, { "docid": "8f8c4953ccf503d259c68e3e5f1deb18", "score": "0.6212154", "text": "def stats():\n return jsonify({\"amenities\": models.storage.count('Amenity'),\n \"cities\": models.storage.count('City'),\n \"places\": models.storage.count('Place'),\n \"reviews\": models.storage.count('Review'),\n \"states\": models.storage.count('State'),\n \"users\": models.storage.count('User')})", "title": "" }, { "docid": "cd1ea6a5ff280a69050d25fb963c01e2", "score": "0.619965", "text": "def _get_stats(self):\n with open('rating.txt') as file:\n for line in file:\n (key, val) = line.split()\n self.stats[key] = int(val)\n file.close()", "title": "" }, { "docid": "30d98efc851cc71e099140476c08e495", "score": "0.61783916", "text": "def anime_stats(self):\n return self._anime_stats", "title": "" }, { "docid": "62aad8afe1f03fc2e2c31e603c79d918", "score": "0.6165689", "text": "def get_metrics(self) -> dict:\n return {}", "title": "" }, { "docid": "544909ae04c99e2f6ec2c7d5c3eef67d", "score": "0.61506534", "text": "def stats():\n dict_types = {\"amenities\": Amenity, \"cities\": City, \"places\": Place,\n \"reviews\": Review, \"states\": State, \"users\": User}\n for key, values in dict_types.items():\n dict_types[key] = storage.count(values)\n return jsonify(dict_types)", "title": "" }, { "docid": "b51f63b4cb2ce113616ed891e3cbb1e6", "score": "0.6114079", "text": "def stats(self):\r\n return self.__stats", "title": "" }, { "docid": "51bcff0685f7f184a8892240f9246313", "score": "0.6111447", "text": "def get_spam_data(self, user):\n data = {'total': self.get_user_message_count(user), '%': 0, 'spam': self.get_user_message_count(user, spam__gt=Message.SPAM_THRESHOLD)}\n data['%'] = (100.0 * data['spam'] / data['total']) if data['total'] > 0 else 0\n return data", "title": "" }, { "docid": "bbb7de310e4eaea2c66d6be4e7b12bf2", "score": "0.60971975", "text": "def stats():\n ob_d = {\"amenities\": \"Amenity\", \"cities\": \"City\", \"places\": \"Place\",\n \"reviews\": \"Review\", \"states\": \"State\", \"users\": \"User\"}\n count_d = {}\n for k, v in ob_d.items():\n count_d[k] = storage.count(v)\n return(jsonify(count_d))", "title": "" }, { "docid": "4e29615a009eb912940cdb43f75196bb", "score": "0.60917795", "text": "def stats(self):\n return self._stats", "title": "" }, { "docid": "4e29615a009eb912940cdb43f75196bb", "score": "0.60917795", "text": "def stats(self):\n return self._stats", "title": "" }, { "docid": "4e29615a009eb912940cdb43f75196bb", "score": "0.60917795", "text": "def stats(self):\n return self._stats", "title": "" }, { "docid": "4e29615a009eb912940cdb43f75196bb", "score": "0.60917795", "text": "def stats(self):\n return self._stats", "title": "" }, { "docid": "4e29615a009eb912940cdb43f75196bb", "score": "0.60917795", "text": "def stats(self):\n return self._stats", "title": "" }, { "docid": "33da6c739a302c030a68e350e9e44ba2", "score": "0.6091499", "text": "def stats():", "title": "" }, { "docid": "fdb8c7dc745071f13c62e2b0feff707d", "score": "0.60865366", "text": "def bin_stats(self) -> dict:\n\n stats = {\n 'width': self.x,\n 'height': self.y,\n 'area': self.area,\n 'efficiency': (self.area-self.free_area)/self.area,\n 'items': self.items,\n }\n\n return stats", "title": "" }, { "docid": "98699f6e1b306b722b3c32adc3701cd2", "score": "0.60841745", "text": "def stats(self):\n stats = {\"epoch\": self.epoch}\n for k, t in self._timers.items():\n stats[k + \"_time_mean\"] = t.mean\n stats[k + \"_time_total\"] = t.sum\n t.reset()\n return stats", "title": "" }, { "docid": "c1169affda43c58db5d489dacb90c288", "score": "0.6016104", "text": "def get_user_info(self):\n return dict((key, self.get('user-info', key)) for key in self.user_info_keys)", "title": "" }, { "docid": "5fba6bdfe5fc689c8e8da228f8fa3d3b", "score": "0.5986881", "text": "def _register_statistics(self):\n x = { \"u0\": self.popul_parameter, \"u\": self.sample_statistic,\n \"hypotest\": self.test_statistic_name+\"-Test for HT about means\",\n \"n\": self.sample_size,\n self.test_statistic_name: self.test_statistic,\n \"p\": self.pvalue, \"side\": self.side }\n if self.test_statistic_name==\"t\":\n x.update( {\"se\": self.standard_error,\n \"df\": self.deg_of_freedom} )\n elif self.test_statistic_name==\"z\":\n x.update( {\"sd\": self.standard_deviation} )\n return x", "title": "" }, { "docid": "efe31dca2092c62cfb9b75a55be00e68", "score": "0.5986477", "text": "def get_statistics(self):\n length = len(self.Azimuth)\n self.rain_stats = {}\n self.drizzle_stats = {}\n for key in STATS_KEYS:\n self.rain_stats[key] = np.zeros(length)\n self.drizzle_stats[key] = np.zeros(length)\n for j in np.arange(length):\n self.rain_stats['N'][j], self.rain_stats['low'][j], \\\n self.rain_stats['median'][j], self.rain_stats['high'][j] = \\\n calc_median_ci(self.rain_refl[np.str(j)])\n self.drizzle_stats['N'][j], self.drizzle_stats['low'][j], \\\n self.drizzle_stats['median'][j],\\\n self.drizzle_stats['high'][j] = \\\n calc_median_ci(self.drizz_zdr[np.str(j)])", "title": "" }, { "docid": "6d704c497e6e2941462976f54150cb8c", "score": "0.59766155", "text": "def basic_stats(self):\n return self._statistics()", "title": "" }, { "docid": "6d704c497e6e2941462976f54150cb8c", "score": "0.59766155", "text": "def basic_stats(self):\n return self._statistics()", "title": "" }, { "docid": "6d704c497e6e2941462976f54150cb8c", "score": "0.59766155", "text": "def basic_stats(self):\n return self._statistics()", "title": "" }, { "docid": "3889bc47d511851483067ad05535d8b3", "score": "0.59637886", "text": "def __get_stats(self):\n stats = {}\n # Get fresh NavData\n NDC = self.__drone.NavDataCount\n while self.__drone.NavDataCount == NDC or not all(\n package in self.__drone.NavData for package in self.__REQ_PACKS):\n time.sleep(0.01)\n\n # Straightforward data\n stats[\"acc\"] = self.__drone.NavData[\"raw_measures\"][0]\n stats[\"gyr\"] = self.__drone.NavData[\"raw_measures\"][1]\n stats[\"gps\"] = self.__drone.NavData[\"gps\"][:-1] # not using altitude value\n stats[\"pry\"] = self.__drone.NavData[\"demo\"][2] # pitch roll yaw\n stats[\"mfu\"] = self.__drone.NavData[\"magneto\"][6]\n stats[\"vel\"] = self.__drone.NavData[\"demo\"][4] # xyz velocity mm/s\n\n # Convert altitude to meters\n stats[\"alt\"] = self.__drone.NavData[\"altitude\"][0] / 1000.0\n\n # Turn magnetometer data into heading (degrees)\n stats[\"mag\"] = self.__drone.NavData[\"magneto\"][0][:-1] # not using z value\n for i in range(len(stats[\"mag\"])): stats[\"mag\"][i] -= self.__mag_avg[i]\n stats[\"deg\"] = -1 * math.atan2(stats[\"mag\"][1], stats[\"mag\"][0])\n\n # Set new stats\n return stats", "title": "" }, { "docid": "c2b0f84e39dca47252e6835c4af60f53", "score": "0.5943142", "text": "def user_statistics():\r\n #Creating csv format dict\r\n head = [\"User name\", \"INFO\", \"ERROR\"]\r\n row = []\r\n\r\n \"\"\"---------------------------------------sorted in nest dict-------------------------------------------\"\"\"\r\n # sort by Username\r\n for name in sorted(alert_count_per_user.keys()):\r\n # add value to row format\r\n temp_dict = {}\r\n temp_dict[\"User name\"] = name\r\n temp_dict[\"INFO\"] = alert_count_per_user[name][\"INFO\"]\r\n temp_dict[\"ERROR\"] = alert_count_per_user[name][\"ERROR\"]\r\n row.append(temp_dict)\r\n \"\"\"-----------------------------------------------------------------------------------------------------\"\"\"\r\n\r\n #creating csv file\r\n with open(\"User_statistics.csv\",\"w\") as writer:\r\n writer = csv.DictWriter(writer, fieldnames = head)\r\n writer.writeheader()\r\n writer.writerows(row)", "title": "" }, { "docid": "505e9378f75bb5df197619c29b9eb662", "score": "0.59430456", "text": "def stats(self):\n sum_counts = self.counts.sum()\n imbalance_factor = self.counts.shape[0] * np.power(self.counts, 2).sum() / sum_counts**2\n return {\n \"vectors_per_image\": sum_counts / self.n_images,\n \"mean_entries_per_vw\": self.counts.mean(),\n \"empty_vw\": sum(1 for x in self.counts if x == 0),\n \"min_entries_per_vw\": self.counts.min(),\n \"max_entries_per_vw\": self.counts.max(),\n \"std_of_entries_per_vw\": self.counts.std(),\n \"imbalance_factor_of_vw\": imbalance_factor,\n }", "title": "" }, { "docid": "1a5675eebdb47c39d9484289146bde81", "score": "0.59230113", "text": "def stats(self, debug: bool = False) -> dict:\n data = {\n \"added_count\": self._num_timesteps_added,\n \"added_count_wrapped\": self._num_timesteps_added_wrap,\n \"eviction_started\": self._eviction_started,\n \"sampled_count\": self._num_timesteps_sampled,\n \"est_size_bytes\": self._est_size_bytes,\n \"num_entries\": len(self._storage),\n }\n if debug:\n data.update(self._evicted_hit_stats.stats())\n return data", "title": "" }, { "docid": "c6cffd6f9eb1c134090cbe71e89d5e5f", "score": "0.59224224", "text": "def get_stats(self):\n self.std()\n self.means()\n return (self.x_mean,\n self.y_mean,\n self.z_mean,\n self.gyrox_mean,\n self.gyroy_mean,\n self.gyroz_mean,\n self.x_std,\n self.y_std,\n self.z_std,\n self.gyrox_std,\n self.gyroy_std,\n self.gyroz_std)", "title": "" }, { "docid": "d03f7f106c6d0567c2603dd51d18c58f", "score": "0.5918336", "text": "def stats(self, pid=None):\n if sys.platform != 'linux':\n return {}\n if pid is None:\n pid = os.getpid()\n\n try:\n with open('/proc/{pid}/stat'.format(pid=pid), 'r', encoding='ascii') as fp:\n raw = fp.readline().strip().split(' ')\n cooked = dict(zip(self.cols,raw))\n retval = {k+'_min':float(cooked[k])/self.CLOCKTICK/60.0 for k in 'utime stime cutime cstime'.split(' ')}\n retval['vsize_MB'] = float(cooked['vsize'])/1024.0/1024.0\n retval['rss_MB'] = float(cooked['rss'])*self.PAGESIZE/1024.0/1024.0\n\n return retval\n except Exception as e:\n logging.error('Exception trying to get stats from /proc/{pid}/stat: {e}'.format(pid=pid, e=repr(e)))\n return {}", "title": "" }, { "docid": "8f93b0c03aea8a435b7af8404a7e1b20", "score": "0.5900035", "text": "def stats(self):\n raise NotImplementedError()", "title": "" }, { "docid": "007c4733eb976f555a8aa25802766443", "score": "0.58954996", "text": "def _parse_stats(stats):\n stat = {}\n for attribute in ElementTree.fromstring(stats):\n if attribute.tag == 'BytesScanned':\n stat['BytesScanned'] = attribute.text\n elif attribute.tag == 'BytesProcessed':\n stat['BytesProcessed'] = attribute.text\n elif attribute.tag == 'BytesReturned':\n stat['BytesReturned'] = attribute.text\n\n return stat", "title": "" }, { "docid": "bc918958dbddb04eec6c35a370673a9d", "score": "0.5892207", "text": "def get_global_stats():\n raw_response = request_helper(STAT_URL)\n stat_response = raw_response.decode('utf-8')\n stat_json_response = json.loads(stat_response)\n stat_dict = stat_json_response['result']\n stats = Stats.construct_from_dict(stat_dict)\n return stats", "title": "" }, { "docid": "add8894c791f8deb2222dd1300b924a9", "score": "0.58847165", "text": "def calculate_metrics(self) -> Dict[str, Any]:\n\n return self.__dict__", "title": "" }, { "docid": "1e35ba2b1fb98b0e320de63040ad99b3", "score": "0.5851763", "text": "def job_stats(self):\n stats = {}\n # vCPU-hr\n stats['total_vcpu_time'] = self.GetMetricValue('TotalVcpuTime') / 3600\n # GB-hr\n stats['total_mem_usage'] = self.GetMetricValue(\n 'TotalMemoryUsage') / 1024 / 3600\n # GB-hr\n stats['total_pd_usage'] = self.GetMetricValue('TotalPdUsage') / 3600\n # TODO(user): retrieve BillableShuffleDataProcessed\n # and/or BillableStreamingDataProcessed when applicable\n return stats", "title": "" }, { "docid": "251b331a59103f3919347a446af43ce5", "score": "0.58503014", "text": "def fancyStats():\n\torigin = []\n\tusage = []\n\tpoints = []\n\tdiff = []\n\tallWords = words.posts.find({}, {'origin':1, 'use':1, 'points':1, 'diff':1})\n\tfor word in allWords:\n\t\torigin.append(word['origin'])\n\t\tusage.append(word['use'])\n\t\tpoints.append(word['points'])\n\t\tdiff.append(word['diff'])\n\td = {'origin' : origin, 'use' : usage, 'points' : points, 'diff' : diff}\n\treturn d", "title": "" }, { "docid": "7a994797879cf3b6bbac02dbd9409030", "score": "0.5847094", "text": "def add_stats(self, **kwargs):\n for key, value in kwargs.items():\n if key[-1] == \"_\":\n key = key[:-1]\n assert key not in self.stats\n self.stats[key] = value\n return self.stats", "title": "" }, { "docid": "8328570c71cd3c481373404dd63c415d", "score": "0.5832454", "text": "def getStats(self):\n if self.cspace is None: return {}\n return self.cspace.getStats()", "title": "" }, { "docid": "1761ab8054f5aace4cf2149a9c38cfdc", "score": "0.58256066", "text": "def statistics(self):\n return dict(\n n_accepted=self.n_accepted,\n n_proposed=self.n_proposed,\n initial_energy=self.initial_energy,\n initial_positions=self.initial_positions,\n final_energy=self.final_energy,\n proposed_positions=self.proposed_positions,\n final_positions=self.final_positions,\n logp_accept=self.logp_accept)", "title": "" }, { "docid": "c2d830f8d21865980150226c316959dd", "score": "0.5819166", "text": "def __init__(self):\n self.stats = {\n \"pool\": 0,\n \"container\": 0,\n \"object\": 0,\n \"dkey\": 0,\n \"akey\": 0,\n \"array\": 0,\n \"single_value\": 0,\n \"user_value\": 0,\n \"user_meta\": 0,\n \"total_meta\": 0,\n \"nvme_total\": 0,\n \"total\": 0\n }", "title": "" }, { "docid": "face2835aa058528800bab789a64cc9d", "score": "0.58154", "text": "def get_gender_stats(self):\n\t\treturn self.gender_stats", "title": "" }, { "docid": "298e4318fe13d5bb0752c0cbddc8cf34", "score": "0.58151317", "text": "def stats(self):\n return self.data.state.stats", "title": "" }, { "docid": "61637e36a79c1bc421eaca42c1063bd1", "score": "0.580789", "text": "def statistics(self):\r\n\t\t# one takes the first examiner as representative\r\n\t\tfor Slot in self.storage[0].storages:\r\n\t\t\tif len(list(set([Exam.storages[Slot].itemLength for Exam in self.storage]))) > 1:\r\n\t\t\t\terror('Observer: ',self.Name + ': Inconsistent item length accross examiners')\r\n\t\t\t# computing the best value of each coordinate\r\n\t\t\tbest = map(lambda x: max(x), transpose([Exam.storages[Slot].best \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor Exam in self.storage]))\r\n\t\t\t# computing the total number of individual data\r\n\t\t\tcumulative_number = sum([Exam.storages[Slot].length for Exam in self.storage])\r\n\t\t\t# computing global statistics by summing averages weighted by corresponding numbers\r\n\t\t\ttotals = transpose([map(lambda x: x*Exam.storages[Slot].length,\r\n\t\t\t\t\t\t Exam.storages[Slot].average) for Exam in self.storage])\r\n\t\t\tif cumulative_number:\r\n\t\t\t\taverage = map(lambda x: sum(x)/cumulative_number, totals)\r\n\t\t\telse:\r\n\t\t\t\taverage = map(lambda x: sum(x), totals)\r\n\t\t\tself.Statistics[Slot] = dict([('length',cumulative_number), \r\n\t\t\t\t\t\t\t\t\t\t ('best',best),\r\n\t\t\t\t\t\t\t\t\t\t ('average', average),\r\n\t\t\t\t\t\t\t\t\t\t ('data',reduce(lambda x,y: x+y, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t tuple(tuple(Exam.storages[Slot].storage \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t for Exam in self.storage))))])\r\n\t\treturn self.Statistics", "title": "" }, { "docid": "8739e0e338a9cbb22df52589409498ce", "score": "0.5801872", "text": "def getScoreInfo(cls):\n return {}", "title": "" }, { "docid": "1616553fa70732bfd0257ad5303cdf27", "score": "0.57926905", "text": "def summarize_info(self):\n for key in self.scores.keys():\n scored = self.scores[key][1] # goals score\n against = self.scores[key][2] # goals against\n wins = self.scores[key][0].count(3) # number of wins\n ties = self.scores[key][0].count(1) # number of ties\n losses = self.scores[key][0].count(0) # number of losses\n points = 3 * wins + 1 * ties # total points\n\n self.info[key] = [points, wins + ties + losses,\n wins, ties, losses,\n scored - against, scored, against]\n return self.info", "title": "" }, { "docid": "2154618674750ed2710e17ca9d881937", "score": "0.5786472", "text": "def _InitializePerPlayerStatsMap(self):\n return dict((ele, 0) for ele in common.PLAYER_STATS_MAPPING)", "title": "" }, { "docid": "824642af7785a7a1d69cc92be10469fd", "score": "0.5779562", "text": "def stats_by_author_gender(self) -> Dict[str, GenderDistanceStats]:\n return self.stats_by_metadata(metadata_key='author_gender')", "title": "" }, { "docid": "5e4d68a514d6986d8c8ddb1c18d683e1", "score": "0.5775745", "text": "def corpus_stats(self) -> GenderDistanceStats:\n corpus_distances_by_gender = self.corpus_results()\n\n corpus_stats = {}\n for gender, results_by_gender in corpus_distances_by_gender.items():\n corpus_stats[gender] = _get_stats_from_distances(results_by_gender)\n\n return corpus_stats", "title": "" }, { "docid": "07a331e4b2ae4f662cf65da653ac14ba", "score": "0.57734203", "text": "def make_metrics(self):\n return {}", "title": "" }, { "docid": "3030bfbc0e6ab50b369b638f4b6c8d59", "score": "0.5764561", "text": "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n timer = MethodTimer()\n\n # Display counts of user types\n print(\"The user type counts ::\")\n print(df['User Type'].value_counts())\n\n try:\n # Display counts of gender\n print(\"\\nThe gender counts ::\")\n print(df['Gender'].value_counts())\n\n # Display earliest, most recent, and most common year of birth\n earliest_year_of_birth = df['Birth Year'].min()\n most_recent_year_of_birth = df['Birth Year'].max()\n most_common_year_of_birth = df['Birth Year'].mode()\n print(\"\\nThe earliest year of birth :: %s \" % int(earliest_year_of_birth))\n print(\"The most recent year of birth :: %s \" % int(most_recent_year_of_birth))\n print(\"The most common year of birth :: %s \" % int(most_common_year_of_birth[0]))\n except KeyError:\n print(\"Some statistics are not available ... \")\n\n timer.print_duration()", "title": "" }, { "docid": "a93ea05084915f720bc3891845d36833", "score": "0.5759543", "text": "def get_stats(self):\n langspec=sum(1 for deprel in self.deprel_counter.iterkeys() if u\":\" in deprel)\n ud_rels=len(set(deprel.split(u\":\")[0] for deprel in self.deprel_counter.iterkeys()))\n d={\"tree_count\":self.tree_count,\"word_count\":self.word_count,\"token_count\":self.token_count,\"deprels\":len(self.deprel_counter),\"langspec_deprels\":langspec, \"universal_deprels\":ud_rels, \"postags\":sum(1 for cat_is_val in self.f_val_counter if cat_is_val.startswith(u\"CPOSTAG=\")),\"catvals\":sum(1 for cat_is_val in self.f_val_counter if not cat_is_val.startswith(u\"CPOSTAG=\")),\"words_with_lemma_count\":self.words_with_lemma_count,\"words_with_deps_count\":self.words_with_deps_count}\n return d", "title": "" }, { "docid": "08a6dd2d9827e61f19df0805ec53c7c9", "score": "0.5753854", "text": "def get_statistics(system_corpus: tp.SystemCorpus) -> dict:\n averages = ['micro', 'macro', 'weighted']\n f1_scores = tuple(map(lambda average: str(get_f1_intent(system_corpus, average)), averages))\n return {\n 'system name': system_corpus.system.name,\n 'corpus': str(system_corpus.corpus),\n 'f1 intent scores': dict(zip(averages, f1_scores))\n }", "title": "" }, { "docid": "cc3952ee4aca76cb9f9bd8dadabd6b8c", "score": "0.5749783", "text": "def event_stats(self):\n stats = []\n if self.is_goaltend_violation:\n stats.append(\n {\n \"player_id\": self.player1_id,\n \"team_id\": self.team_id,\n \"stat_key\": DEFENSIVE_GOALTENDING_STRING,\n \"stat_value\": 1,\n }\n )\n team_ids = list(self.current_players.keys())\n lineups_ids = self.lineup_ids\n for stat in stats:\n opponent_team_id = (\n team_ids[0] if stat[\"team_id\"] == team_ids[1] else team_ids[1]\n )\n stat[\"lineup_id\"] = lineups_ids[stat[\"team_id\"]]\n stat[\"opponent_team_id\"] = opponent_team_id\n stat[\"opponent_lineup_id\"] = lineups_ids[opponent_team_id]\n return self.base_stats + stats", "title": "" }, { "docid": "1c9c73cde3dee95f103a38876143bde6", "score": "0.57406217", "text": "def make_stats_dict(in_list):\n my_dict = {}\n my_dict[\"Sum\"] = np.sum(in_list)\n my_dict[\"Min\"] = np.min(in_list)\n my_dict[\"Mean\"] = np.mean(in_list)\n\n # Do some stats calculations, store in dictionary\n return my_dict", "title": "" }, { "docid": "6b6ab75511d61fd4f350b03ff3be67a2", "score": "0.5726788", "text": "def getStatistics(self,):\n out = {}\n\n # Get all wanted metric\n for metric in self._case:\n out[metric] = self._methodDict[metric]()\n\n return out", "title": "" }, { "docid": "45d67d7429bee9e2d48aa11a459e9ab6", "score": "0.57227737", "text": "def _initialise_stats(self):\r\n calculated_stats = {a:0 for a in ATTRIBUTES}\r\n for _ in range(20): #Randomly allocate 20 stat points\r\n calculated_stats[random.choice(ATTRIBUTES)] += 1\r\n bonuses = self._calculate_response_bonuses()\r\n for b in bonuses: #Apply the remaining 8-10 response points\r\n calculated_stats[b] += bonuses[b]\r\n return calculated_stats", "title": "" }, { "docid": "ca8e24470aab2d1b7041f51fbdc75350", "score": "0.57224977", "text": "def metrics(self) -> Dict[str, Any]:\n return self.__metrics", "title": "" }, { "docid": "c8ead7ab3089e35b5f857c6ddef6d622", "score": "0.5713563", "text": "def stat(self):\n\t\t\n\t\tout = {}\n\t\tout.update(self.draw_s())\n\t\tout.update(self.draw_constants())\n\t\treturn out", "title": "" }, { "docid": "3a4f1a2149a7a6457f38eb5ae869f7eb", "score": "0.5702765", "text": "def gm_stats(self) -> ModeData:\n return self.stats[self.status.mode]", "title": "" }, { "docid": "853f8e7efc95312f80aa8cc7ebd4c3d0", "score": "0.57004595", "text": "def tweak_stats(self,redditor_id):\n\n huachis = HuachiNet(redditor_id)\n\n ultimate_stats = {\"Genkidama\" : (10,[0,0,(self.contar_miembros(huachis.guild)* 1),0]),\n \"LlamadoTuculo\" : (10,[50,20,(self.contar_miembros(huachis.guild)* 1),0]),\n \"BendicionRaquel\" : (10,[0,0,0,(self.contar_miembros(huachis.guild)* 1)]),\n \"ExorcismoFantasmas\" : (10,[30,20,50 + (self.contar_miembros(huachis.guild)* 1),0])\n }\n\n perks_stats = {\"ImpactTrueno\" : (5,[0,0,30,0]),\n \"PolvoDiamante\" : (5,[0,0,30,0]),\n \"Rasegan\" : (5,[0,0,50,0]),\n \"Omaewamushinderu\" : (5,[0,0,40,0]),\n \"VisionNalgotica\" : (5,[0,0,30,0]),\n \"Conxuro\" : (5,[0,0,50,20]),\n \"LecturaTarot\" : (5,[0,0,30,0]),\n \"AguaCalzon\" : (5,[0,0,40,0]),\n \"Normal\" : (0,[0,0,0,0]),\n \"CeroMiedo\" : (5,[0,-30,20,0]),\n \"CampeonAjedrez\" : (5,[0,0,30,0]),\n \"MonaInflable\" : (5,[0,30,0,0]),\n \"MALVERDE\" : (5,[0,0,30,0]),\n \"ParvadaCuervos\" : (5,[0,0,30,0]),\n \"SaposAlucinogenos\" : (5,[0,0,40,0]),\n \"Trepanacion\" : (5,[0,0,30,0]),\n \"Sangria\" : (5,[0,0,30,0]),\n \"Gabardina\" : (0,[0,0,0,0])\n }\n\n base_stats = huachis.stats\n \n #Conocer perk del usuario\n if huachis.perk in ultimate_stats:\n\n mod_stats = ultimate_stats[huachis.perk]\n\n else:\n\n mod_stats = perks_stats[huachis.perk]\n\n #Si no tiene energia regresamos basestats\n if huachis.power < mod_stats[0]:\n\n return base_stats\n \n else:\n\n huachis.Consumir_Energia(mod_stats[0])\n\n new_stats = [ base_stats[i] + mod_stats[1][i] for i in range(4) ]\n\n return new_stats", "title": "" }, { "docid": "88ecf0fea5c938ebbe58f98fbddc2cba", "score": "0.56925815", "text": "def get_metrics(self):\n return {self.metric_names[i] : self._metrics[i] for i in range(len(self._metrics))}", "title": "" }, { "docid": "326e01133b9ad5ede8fc92d6faf51204", "score": "0.5689769", "text": "def statistics(self):\n\n return {\"loops\": self.loops,\n \"bitops\": self.bitops,\n \"syscalls\": self.syscalls,\n \"indirect_jumps\": self.indirect_jumps,\n \"strings\": self.string_table,\n \"filename\": self._project.filename,\n \"main_object\": {\"execstack\": self._main_object.execstack,\n \"pic\": self._main_object.pic,\n \"relocations\": {hex(addr): data for addr, data\n in self._relocs.items()},\n },\n \"binary_info\": {\"arch\": self._project.arch.name,\n \"entry\": hex(self._project.entry)\n },\n \"functions\": self.functions,\n # Convert callgraph to something that can be printed.\n \"callgraph\": {hex(addr): {hex(call): {str(k): v for k, v\n in info.items()}\n for call, info in data.items()}\n for addr, data in\n self._project.kb.callgraph.adj.items()},\n }", "title": "" }, { "docid": "be5f9c136ba37f10f8f92fac0b5457f3", "score": "0.56813127", "text": "def statistics_tables(self):\n return {\n \"overall\": self.overall_statistics_table(),\n \"resolution_binned\": self.merging_statistics_table(),\n \"cc_half_significance_level\": 0.01, # Note - in dials.scale and dials.merge, this is a hardcoded value\n \"overall_summary_data\": self.overall_statistics_summary_data(),\n }", "title": "" }, { "docid": "f8694ad5d0b8c77c7618e2c81b5ee948", "score": "0.5678929", "text": "def get_metadata(self):\n\n return {\n 'num_cg': self.num_cg,\n }", "title": "" }, { "docid": "b4ac0ed99ebb5f30d71e50ad3c48e598", "score": "0.5676591", "text": "def get_info(self):\n\n info = {\n \"green_genes\": len(self.symbols[\"3\"]),\n \"entity_types\": self.data[\"stats\"]\n }\n\n return info", "title": "" }, { "docid": "cb336df405d000ad9b00262ef5bdc1b4", "score": "0.5671076", "text": "def dictionary_of_metrics(items):\n \n n = len(items)\n average = round(np.mean(items), 2)\n median = round(np.median(items), 2)\n variance = round((sum((items-np.mean(items))**2))/(n-1), 2)\n standard_dev = round(((sum((items-np.mean(items))**2))/(n-1))**(1/2), 2)\n minimum = round(min(items), 2)\n maximum = round(max(items), 2)\n \n return {'mean':average,'median':median,'var':variance,'std':standard_dev,'min':minimum,'max':maximum}", "title": "" }, { "docid": "dbf09483ff9da8521c1fed1cb5e3db38", "score": "0.5665446", "text": "def cmd_get_global_stats(args, opts):\n\tstats = jsonrpc_call(opts, 'stats', 'get_stats')\n\tfor name, value in stats.items():\n\t\tprint(\"%-40s %s\" % (name, value))", "title": "" }, { "docid": "24ae760f4686036b719c2a42b6250bb0", "score": "0.56594294", "text": "def __StatisticRating__(self):\n\t\tuserRate = {}\n\t\tbusinessRate = {}\n\n\t\tfor rid,r in self.getReviews().iteritems():\n\t\t\tuid = r.getUID()\n\t\t\tbid = r.getBID()\n\t\t\t\n\t\t\tif userRate.has_key(uid):\n\t\t\t\tuserRate[uid] += 1\n\t\t\telse:\n\t\t\t\tuserRate[uid] = 1\n\n\t\t\tif businessRate.has_key(bid):\n\t\t\t\tbusinessRate[bid] += 1\n\t\t\telse:\n\t\t\t\tbusinessRate[bid] = 1\n\n\t\treturn userRate,businessRate", "title": "" }, { "docid": "76f179d9147602b4d68d5583703156d9", "score": "0.56466806", "text": "def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # To display counts of user types\r\n user_types = df['User Type'].value_counts()\r\n print(f'Counts of user types are: {user_types}')\r\n # To display counts of gender\r\n if 'Gender' in df:\r\n gender_counts = df['Gender'].value_counts()\r\n print(f'The gender counts are: {gender_counts}')\r\n else:\r\n print('information weren\\'t collected') \r\n # To display earliest, most recent, and most common year of birth\r\n if 'Birth Year' in df:\r\n birth_earliest = df['Birth Year'].min()\r\n birth_recent = df['Birth Year'].max()\r\n birth_common = df['Birth Year'].mode()[0]\r\n print(f'The youngest rider was born on: {birth_recent}\\n The oldest rider was born on: {birth_earliest}\\n Riders with the most common age were born on: {birth_common}')\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "title": "" }, { "docid": "a4d6c9ff1ae1859399061b06482c6a2f", "score": "0.5645205", "text": "def to_dict(self) -> ContributorStatsSummaryDict:\n return {\n 'contributor_user_id': self.contributor_user_id,\n 'translation_contribution_stats': [\n stats.to_dict() for stats in (\n self.translation_contribution_stats)],\n 'question_contribution_stats': [\n stats.to_dict() for stats in self.question_contribution_stats],\n 'translation_review_stats': [\n stats.to_dict() for stats in self.translation_review_stats],\n 'question_review_stats': [\n stats.to_dict() for stats in self.question_review_stats]\n }", "title": "" }, { "docid": "b030bfc834ce799e8debb7d2c6df5ced", "score": "0.5640184", "text": "def info(self):\n if not self.exists: return {}\n return self.stats[0]", "title": "" }, { "docid": "a2beff8b8a09009f5b59a5ec715345a9", "score": "0.56338084", "text": "def stats(bureau):\n p = [agent.score for agent in bureau]\n return Stat(p)", "title": "" }, { "docid": "fe6c40be8dbc9a2b59ed77595f4e5076", "score": "0.5629163", "text": "def stats(scores):\n if not scores:\n return {}\n\n users = defaultdict(int)\n killers = defaultdict(int)\n maxlvl = 0\n maxlvl_user = None\n\n for s in scores:\n users[s.user] += 1\n\n if s.status == 'killed' and s.cause:\n killers[s['cause']] += 1\n if s.level > maxlvl:\n maxlvl = s.level\n maxlvl_user = '%d (%s)' % (maxlvl, s.user)\n\n s = {\n 'max_level': maxlvl_user,\n 'most_active':\n '%s (%d games)' % tuple(max(users.items(), key=lambda p: p[1])),\n }\n\n if killers:\n s['best_killer'] = \\\n '%s (%d kills)' % tuple(max(killers.items(), key=lambda p: p[1]))\n else:\n s['best_killer'] = '?'\n\n return s", "title": "" }, { "docid": "7ed4c12a9986f99f4f1235f396196fed", "score": "0.56263405", "text": "def get_stats():\n if current_user.battletag == os.getenv('ADMIN_TAG'):\n return jsonify({'count': User.query.count(),\n 'tags': [u.battletag for u in User.query]})\n return jsonify({}), 403", "title": "" }, { "docid": "e9f87efc57b23384b1074c56e6804023", "score": "0.56239533", "text": "def GetStats(self):\r\n self.WaitTillCompleted()\r\n extractor = sirca_get_stats.StatExtractor(stats_dict=self.stats)\r\n return extractor.GetStats()", "title": "" }, { "docid": "3bd9d6125892b75a8bf38bbd854e1ec7", "score": "0.5622947", "text": "def print_stats():\r\n\r\n print(\"______\\n\")\r\n for stat in sorted(stats.keys()):\r\n print(\" \", stat, \": \\t\", stats[stat])\r\n print(\"\\n\")", "title": "" }, { "docid": "6000959c5067bb1ac1240aafdb704bf3", "score": "0.56194496", "text": "def statistics(self):\n return 0.0, 0.0, 0", "title": "" }, { "docid": "c8a6f45f9747280c7ac377e2a1e989bf", "score": "0.56150794", "text": "def stats():\n class_dict = {\"Amenity\": \"amenities\", \"City\": \"cities\", \"Place\": \"places\",\n \"Review\": \"reviews\", \"State\": \"states\", \"User\": \"users\"}\n objs = {class_dict[cls]: storage.count(cls) for cls in class_dict}\n return jsonify(objs)", "title": "" }, { "docid": "18a708a30a77f9729a389edd71c6272d", "score": "0.561504", "text": "def get_users_info(self):\n info = {}\n for user_quest in self.user_quests.all():\n user = user_quest.user\n info[user.username] = user_quest.get_info()\n return info", "title": "" }, { "docid": "5c88c52c06ae71779819528aa4e173fd", "score": "0.55990094", "text": "def stats():\n dic = {}\n for key, val in dbClass.items():\n dic[key] = storage.count(val)\n return jsonify(dic)", "title": "" }, { "docid": "027a973517c0f0b4570347047789e6e2", "score": "0.55832464", "text": "def results2stats_dict(results):\n res = {}\n for x in Stats.statistics:\n try:\n res[x] = Stats.statistics[x].function(results)\n except:\n logger.info('stats error')\n res[x] = -1\n return res", "title": "" }, { "docid": "239862f251da37ac7b79f9f0d6a22f33", "score": "0.55831075", "text": "def user_stats(df):\n print(\"\\033[1mUSER STATICS\\033[0m\")\n print('\\n\\033[92mCalculating User Stats...\\n\\033[0m')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df[\"User Type\"].value_counts().to_frame(\"\")\n print(\"\\033[1mCounts of user types \\033[0m\", user_types)\n print()\n\n # Display counts of gender\n if \"Gender\" in df.columns:\n gender = df[\"Gender\"].value_counts().to_frame(\"\")\n print(\"\\033[1mCounts of gender \\033[0m\", gender)\n print()\n\n # Display earliest, most recent, and most common year of birth\n if \"Birth Year\" in df.columns:\n earliest_birth = int(df[\"Birth Year\"].min())\n recent_birth = int(df[\"Birth Year\"].max())\n common_birth = int(df[\"Birth Year\"].mode()[0])\n print(\"\\033[1mThe earliest birth year: \\033[0m\", earliest_birth)\n print(\"\\033[1mThe most recent birth year: \\033[0m\", recent_birth)\n print(\"\\033[1mThe most common birth year: \\033[0m\", common_birth)\n \n print(\"\\n\\033[92mThis took %s seconds.\\033[0m\" % (time.time() - start_time))\n print('-'*40)", "title": "" }, { "docid": "0b11758fd1b5c4d025c3786229702da1", "score": "0.5578972", "text": "def make_stat(items):\r\n stats = {}\r\n for item in items:\r\n goal = get_goal(item)\r\n if goal is None: \r\n continue\r\n time = get_time(item)\r\n stats.setdefault(goal, {\"count\" : 0, \"time\" : 0})\r\n stats[goal][\"count\"] += 1\r\n stats[goal][\"time\"] += time\r\n return stats", "title": "" } ]
928fc070f71adccefb437f5252756b01
This decorator checks for a token, verifies if it is valid and redirects to the login page if needed
[ { "docid": "074034c9511ffb2c577fbf453134a2c0", "score": "0.74882853", "text": "def token_required(func):\n @wraps(func)\n def decorated(*args, **kwargs):\n global login_token\n if not app.config[\"DEBUG\"]:\n url_token = request.args.get('token')\n if url_token is not None:\n app.logger.info(\"Using token from the URL argument\")\n if url_token == login_token:\n session[\"token\"] = url_token\n else:\n return render_error_msg(\"Invalid token\")\n else:\n try:\n session[\"token\"]\n except KeyError:\n try:\n login_token\n except NameError:\n return render_error_msg(\"No login token and session token found.\")\n else:\n return render_error_msg(\"Log in using the URL printed to the terminal when it was started.\")\n else:\n app.logger.info(\"Using the token from the session\")\n if session[\"token\"] != login_token:\n return render_error_msg(\"Invalid token\")\n\n return func(*args, **kwargs)\n return decorated", "title": "" } ]
[ { "docid": "6254f1b48b0bbeec6eb32b333e524b1a", "score": "0.83707714", "text": "def login_required(f):\n @wraps(f)\n def decorator(*args, **kwargs):\n if not valid_token():\n return redirect(url_for('views.login', **{\"continue\":request.url}))\n return f(*args, **kwargs)\n return decorator", "title": "" }, { "docid": "c43d78b6c05f79b65abd5aad3f604a7b", "score": "0.74665046", "text": "def require_auth(function):\n\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n token = session.get(\"token\")\n if token:\n if not auth.authorize_with_token(token):\n del session[\"token\"]\n abort(401, \"Invalid token.\")\n else:\n return redirect(url_for(\"login_view\", next=request.url))\n\n return function(*args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "7f06c9415336cb63d832ea29709dfe38", "score": "0.74072844", "text": "def requires_auth(f):\r\n @wraps(f)\r\n def decorated(*args, **kwargs):\r\n if 'access_token' not in flask.session:\r\n # Redirect to Login page\r\n return flask.redirect('/login')\r\n if flask.session[\"token_expires_in\"] < datetime.datetime.now():\r\n #If the access token is expired, require the user to login again\r\n return flask.redirect('/login')\r\n return f(*args, **kwargs)\r\n return decorated", "title": "" }, { "docid": "7b05c63f3289df30e5d5ff1c6e29074e", "score": "0.7200913", "text": "def login_required(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n auth = request.headers.get('Authorization')\n if auth:\n try:\n auth_token = auth.split(\" \")[1]\n except IndexError as e:\n current_app.logger.debug(e)\n auth_token = ''\n else:\n auth_token = ''\n\n # Ensure token exists and is not blacklisted\n if auth_token and not BlacklistToken.query.filter_by(token=auth_token).first():\n response = User.decode_auth_token(auth_token)\n if isinstance(response, int):\n return f(*args, **kwargs)\n\n return unauthorized(\"Not logged in\")\n\n return decorated_function", "title": "" }, { "docid": "67f6b951b1ff9cb2e7471ae4baff0c46", "score": "0.70733696", "text": "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n if not token:\n return make_response(jsonify({'message': 'Token is missing!'}), 401)\n\n try:\n data = jwt.decode(token, 'topsecret')\n is_blacklisted_token = DisableTokens.check_blacklist(token)\n if is_blacklisted_token:\n return make_response(jsonify({'message': 'Logged out. log in again'}), 401)\n else:\n current_user = User.query.filter_by(username=data['username']).first()\n except:\n return make_response(jsonify({'message': 'Token is Invalid'}), 401)\n\n return f(current_user, *args, **kwargs)\n return decorated", "title": "" }, { "docid": "78b9d8fe898e7e10f321b72ff45bc97e", "score": "0.7021096", "text": "def validate_sep10_token():\n\n def decorator(view):\n def wrapper(request, *args, **kwargs):\n return check_auth(request, view)\n\n return wrapper\n\n return decorator", "title": "" }, { "docid": "5cd8b5e827936d0fea0fe2656ca0ed98", "score": "0.69750696", "text": "def login_required(func):\n @wraps(func)\n def decorator():\n if not 'user' in session:\n return redirect(url_for('login'))\n return func()\n return decorator", "title": "" }, { "docid": "bfb2a4d12bbc3d58810fd379523e43b6", "score": "0.69321424", "text": "def auth_required(func):\n @wraps(func)\n def decorator_func(*args, **kwargs):\n auth_token = None\n auth_header = request.headers.get('Authorization')\n if auth_header:\n auth_token = auth_header.split(\"Bearer \")[1]\n \n if not auth_token:\n return make_response(jsonify({\n \"status\": 401,\n \"data\": \"Unauthorized! Token required\"\n })), 401\n try:\n response = users.verify_auth_token(auth_token)\n if isinstance(response, str):\n user = users.login(username=response)\n if not user:\n return make_response(jsonify({\n \"status\": 400,\n \"message\": \"Authentication failed: Wrong username\"\n })), 400\n except:\n return make_response(jsonify({\n \"status\": 400,\n \"message\": \"Authentication failed: Invalid token\"\n })), 400\n return func(user, *args, *kwargs)\n return decorator_func", "title": "" }, { "docid": "518cd5b3d9949aca356b6f150f2d8de4", "score": "0.6922712", "text": "def check_login(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if g.user is None:\r\n return redirect(url_for('index'))\r\n elif g.user == kwargs['user']:\r\n return f(*args, **kwargs)\r\n else:\r\n return redirect(url_for('info', user=g.user))\r\n\r\n return decorated_function", "title": "" }, { "docid": "884eccf00facf508e05c886f3478ad8a", "score": "0.68849695", "text": "def token_required(f):\r\n @wraps(f)\r\n def decorated(*args, **kwargs):\r\n token = request.headers.get(AUTH_TOKEN_HEADER_NAME)\r\n\r\n if not token_exists(token):\r\n return create_response(\r\n data={'token': token},\r\n status=401,\r\n message='invalid authorization token'\r\n )\r\n if not is_valid_token(token):\r\n delete_token(token)\r\n return create_response(\r\n data={'token': token},\r\n status=401,\r\n message='expired authorization token'\r\n )\r\n\r\n update_token_expiration(token)\r\n\r\n return f(*args, **kwargs)\r\n\r\n return decorated", "title": "" }, { "docid": "d93ad657d25cb6226964823c639c6f62", "score": "0.68786496", "text": "def requires_token(token):\n def _decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not token:\n raise RuntimeError(\"Must provide auth token\")\n else:\n return func(*args, **kwargs)\n\n return wrapper\n\n return _decorator", "title": "" }, { "docid": "e59d4a5241406bd145ca5b5a427a6fbc", "score": "0.6844828", "text": "def token_required(func):\n @wraps(func) # Presrve doc string and other goodies.\n def decorator(*args, **kwargs):\n token = request.headers.get('X-Auth-Token', None)\n if token is None:\n abort(401, message=\"Please provide X-Auth-Token header.\")\n try:\n g.user = User.verify_auth_token(token)\n return func(*args, **kwargs) # Call wraped function\n except SignatureExpired:\n abort(401, message=\"Token has expired.\")\n except BadSignature:\n abort(401, message=\"Invalid token provided.\")\n return decorator", "title": "" }, { "docid": "db41ad069f69ce6ac135f484a0bff80d", "score": "0.6803071", "text": "def login_required(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n data = {\"username\": session.get('user', ''), \"session\": session.get('session', '')}\n status = seated.send_post(config, \"/api/auth\", data)\n\n if status['status'] == \"AUTH_OK\":\n return f(*args, **kwargs)\n\telif status['status'] == 'CONNECTION_FAILED':\n\t flash(\"The service is currently unavailable, please try again later.\", 'warning')\n return redirect(url_for('login'))\n\n return decorated_function", "title": "" }, { "docid": "94adfa9f51107df73290bf8348435b20", "score": "0.6797543", "text": "def login_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n role = get_role(get_path(request.url))\n current_app.role = role\n check_and_set_session(role)\n print current_app.login_manager.error\n if request.method in EXEMPT_METHODS:\n return func(*args, **kwargs)\n elif current_app.login_manager._login_disabled:\n return func(*args, **kwargs)\n elif not session.get(\"loggedin\", False) or current_app.login_manager.error:\n return redirect(current_app.login_manager.login_view)\n return func(*args, **kwargs)\n return decorated_view", "title": "" }, { "docid": "7f2ba66a565bb1c65524b9e3c7ab5997", "score": "0.676443", "text": "def login_required(func):\n @wraps(func) # this requires an import\n def wrapper():\n if 'username' not in login_session:\n return redirect('login')\n else:\n func()\n return wrapper", "title": "" }, { "docid": "a4fad9f3ccca239e0e3da0b70245fe35", "score": "0.67584544", "text": "def login_required(view):\n\n @wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login', _external=True))\n return view(**kwargs)\n return wrapped_view", "title": "" }, { "docid": "3e104d060ef7eb160c72b1f897a7c24e", "score": "0.6753711", "text": "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "title": "" }, { "docid": "3e104d060ef7eb160c72b1f897a7c24e", "score": "0.6753711", "text": "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "title": "" }, { "docid": "f152943e9ca0ed9fbc9a32a273ccca07", "score": "0.6743491", "text": "def token_required(real_token):\n def decorator(f):\n f.gw_method = f.__name__\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n def _get_token(request):\n \"\"\"Gets token from request\"\"\"\n token = request.headers.get(\"Authorization\")\n if not token and request.method == \"GET\":\n token = request.args.get(\"token\")\n elif request.method in [\"POST\", \"PUT\"]:\n token = request.headers.get(\"Authorization\")\n\n return token\n\n def _check_token(token, real_token):\n \"\"\"Checks token\"\"\"\n if not token:\n return False, \"No token provided\"\n\n if token != real_token and token != f'Bearer {real_token}':\n return False, \"Invalid token\"\n\n return True, 'Token is valid'\n\n token = _get_token(request)\n is_valid, message = _check_token(token, real_token)\n if not is_valid:\n logger.warning('{} Invalid token: {}: {}'.format(request.url_rule, message, token))\n return {'errors': {'auth': message}}, 401\n\n return f(*args, **kwargs)\n return wrapper\n return decorator", "title": "" }, { "docid": "3fcd3fc8c19c7324f4090b1257a94a97", "score": "0.6728086", "text": "def check_token():\r\n\tcookie = request.cookies.get(\"token\", None);\r\n\ttoken = Utils.get_token(cookie);\r\n\tif not token:\r\n\t\treturn Utils.make_response({\r\n\t\t\t'status': 'failure',\r\n\t\t\t'reason': 'unauthorized'\r\n\t\t\t}, 403);\r\n\telse:\r\n\t\treturn Utils.make_response({\r\n\t\t\t'status': 'success'\r\n\t\t\t}, 200);", "title": "" }, { "docid": "888e31d2966f88f74e8535e870835475", "score": "0.6725575", "text": "def login_required(function=None, redirect_field_name=None, login_url=None):\n actual_decorator = user_passes_test(\n lambda u: os.path.exists(TOKEN),\n login_url=login_url,\n redirect_field_name=redirect_field_name\n )\n if function:\n return actual_decorator(function)\n return actual_decorator", "title": "" }, { "docid": "998dc28eda3fb0c133a6865660a743b9", "score": "0.6703151", "text": "def authenticated(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = session.get(\"token\")\n if token:\n user = auth.user_for_token(token)\n else:\n user = None\n if user:\n return f(user, *args, **kwargs)\n else:\n state = auth.generate_state(next_url=request.host_url)\n response = make_response(redirect(auth.login_url + '&state=%s' % state))\n response.set_cookie(auth.state_cookie_name, state)\n return response\n return decorated", "title": "" }, { "docid": "5ccaa3f34847be1d5bc8d8f967346d37", "score": "0.66932726", "text": "def login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login'))\n\n return view(**kwargs)\n\n return wrapped_view", "title": "" }, { "docid": "a12beee483f6a505cfc2a74a3b2cbf36", "score": "0.6679619", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"login\", next=request.url))\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "a12beee483f6a505cfc2a74a3b2cbf36", "score": "0.6679619", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"login\", next=request.url))\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "a12beee483f6a505cfc2a74a3b2cbf36", "score": "0.6679619", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"login\", next=request.url))\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "2d3d3ac5759de996059871a8f0bfd33f", "score": "0.66477674", "text": "def login_required(func):\n async def wrapper(request):\n # if request.user:\n # return await func(request)\n # get session:\n session = await get_session(request)\n # print(session)\n if 'jwt_token' not in session:\n # return web.json_response({'message': 'Auth required'}, status=401)\n # redirect to login page\n location = request.app.router['login'].url_for()\n # location = '/login'\n raise web.HTTPFound(location=location)\n else:\n jwt_token = session['jwt_token']\n if not await token_ok(request, jwt_token):\n # return web.json_response({'message': 'Auth required'}, status=401)\n # redirect to login page\n location = request.app.router['login'].url_for()\n # location = '/login'\n raise web.HTTPFound(location=location)\n return await func(request)\n return wrapper", "title": "" }, { "docid": "cad5aa22aa43b371cb3a23564958eb57", "score": "0.6629219", "text": "def login_required(f): # f is the fcn that's immediately below the @login_required in application.py\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "8795deb98acf90e72f840824d163a7f4", "score": "0.6622876", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n g.user.id\n return f(*args, **kwargs)\n except AttributeError:\n return redirect(url_for(\"auth.login\"))\n return decorated_function", "title": "" }, { "docid": "0bccfcbd8ff83dfe0aa436618c8dc984", "score": "0.66092294", "text": "def login_required(view):\r\n @functools.wraps(view)\r\n def wrapped_view(**kwargs):\r\n \"\"\"view wrapper\"\"\"\r\n if g.user is None:\r\n return redirect(url_for('auth.login'))\r\n\r\n return view(**kwargs)\r\n\r\n return wrapped_view", "title": "" }, { "docid": "cca1675d008b074ba44d35d62da654fe", "score": "0.6606401", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "ae5e26dd92cb0377ac11a9be1c1d2c71", "score": "0.66045517", "text": "def user_token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth_header = None\n\n if 'Authorization' in request.headers:\n auth_header = request.headers.get('Authorization')\n\n if auth_header:\n auth_token = list(filter(None, auth_header.split(\" \")))[1]\n else:\n auth_token = ''\n\n if not auth_token:\n response = jsonify({'error': True, 'message': 'token is missing!'})\n response.status_code = 401\n return response\n\n is_token_blacklisted = BlacklistToken.blacklisted(auth_token)\n\n if is_token_blacklisted:\n response = jsonify({'error': True, 'message': 'Token is blacklisted. Please login again'})\n response.status_code = 401\n return response\n\n try:\n data = jwt.decode(auth_token, current_app.config['SECRET'], algorithms=['HS256'])\n current_user = User.get_by_public_id(data['public_id'])\n\n if not current_user:\n response = jsonify({'error': True, 'message': 'token is invalid'})\n response.status_code = 402\n return response\n\n if current_user.status == STATUS_USER_DEACTIVATED:\n response = jsonify({'error': True, 'message': 'You have been deactivated'})\n response.status_code = 401\n return response\n\n if current_user.status == STATUS_USER_PENDING:\n response = jsonify({'error': True, 'message': 'Your account is pending'})\n response.status_code = 401\n return response\n\n except jwt.ExpiredSignatureError:\n response = jsonify({'error': True, 'message': 'token has expired'})\n response.status_code = 401\n return response\n\n except jwt.InvalidTokenError:\n response = jsonify({'error': True, 'message': 'token is invalid'})\n response.status_code = 401\n return response\n\n return f(current_user, *args, **kwargs)\n\n return decorated", "title": "" }, { "docid": "acd5812f255bb8490ddda12ca94d0654", "score": "0.6604304", "text": "def login_required(f):\n\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n if session.get(\"Uid\"):\n return f(*args, **kwargs)\n else:\n return redirect(\"/login\")\n\n return wrapper", "title": "" }, { "docid": "87fe3af012f6992ba15155690c9887f5", "score": "0.6600698", "text": "def is_login(f):\r\n @wraps(f)\r\n def wrapper(*args, **kwargs):\r\n if g.user:\r\n return redirect(url_for('info', user=g.user))\r\n\r\n return f(*args, **kwargs)\r\n\r\n return wrapper", "title": "" }, { "docid": "29cfeb34b947bcb3c32046ec5624dd4a", "score": "0.6598583", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'username' not in login_session:\n return redirect('/login')\n return f(*args,**kwargs)\n return decorated_function", "title": "" }, { "docid": "c99ceefe0378994ffd43faa703ec78ad", "score": "0.6592376", "text": "def check_auth(request, func):\n jwt_error_str = validate_jwt_request(request)\n if jwt_error_str:\n return render_error_response(jwt_error_str)\n return func(request)", "title": "" }, { "docid": "1d7994da3c447faa7deb43433f44acaf", "score": "0.65900767", "text": "def login_required(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'user' not in flask.session or not flask.session['logged_in']:\n flash('login required @ {}'.format(request.path), 'warning')\n return redirect(url_for('auth.login', next=request.url))\n return f(*args, **kwargs)\n\n return decorated_function", "title": "" }, { "docid": "21f0d59a88c4abe19a6f70ccfac87e26", "score": "0.6579311", "text": "def login_required(handler_method):\n\n def check_login(self, *args, **kwargs):\n user = get_current_user()\n if user:\n # XSRFTool(self).verify()\n return handler_method(self, *args, **kwargs)\n else:\n raise HTTPForbidden('not_logged_in')\n\n return check_login", "title": "" }, { "docid": "d9dfa53ec7dece4c72f654bfa5970341", "score": "0.6566333", "text": "def check_valid_login():\n login_valid = session.get(\"nethz_cookie\") # or whatever you use to check valid login\n\n if (request.endpoint and getattr(FLASK_SERVER.view_functions[request.endpoint],\n 'needs_login', False)\n and not login_valid):\n return redirect(\"/static/userLogin.html\", code=303)", "title": "" }, { "docid": "0eba2d9a3c584dbc2d8f18869ea6ebb3", "score": "0.6540759", "text": "def _authorise_request_token_with_login(self):\n self._request_obj(\n self._urls[\"validate_with_login\"],\n method=\"POST\",\n json={\n \"username\": self.username,\n \"password\": self.password,\n \"request_token\": self.request_token,\n }\n )", "title": "" }, { "docid": "d78315fbb08537493962aaacbd10870f", "score": "0.6526728", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'gplus_id' not in login_session:\n flash('You are not allowed to access there')\n return redirect('/login')\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "e33683e00a5dd0d4bfb0b182d01511ac", "score": "0.65200335", "text": "def verify_token(token):\n\n if not token:\n current_app.logger.debug(f'Token not supplied {request.base_url}')\n return False\n\n try:\n decoded_token = base64.b64decode(token).decode('utf-8')\n except UnicodeDecodeError:\n current_app.logger.debug(f'Unable to decode token {request.base_url}')\n return False # Can't decode token, so fail login\n\n valid_token, user_id = AuthenticationService.is_valid_token(decoded_token, 604800)\n if not valid_token:\n current_app.logger.debug(f'Token not valid {request.base_url}')\n return False\n\n if tm.is_pm_only_resource:\n if not UserService.is_user_a_project_manager(user_id):\n current_app.logger.debug(f'User {user_id} is not a PM {request.base_url}')\n return False\n\n tm.authenticated_user_id = user_id # Set the user ID on the decorator as a convenience\n return True # All tests passed token is good for the requested resource", "title": "" }, { "docid": "298770f376df3674f2c12fdde78c6639", "score": "0.6516456", "text": "def login_required(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n if 'username' not in login_session:\n return redirect('/login')\n else:\n return function(*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "09648c78f0a17cfd9ccb91302a5eec4d", "score": "0.65043527", "text": "def login_required(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n\n # login detection\n if 'uid' in login_session:\n return f(*args, **kwargs)\n else:\n message = 'You are not allowed to access there'\n return jsonify({'error', message}), 200\n\n return decorated_function", "title": "" }, { "docid": "c9e725fff6e7308cbe00a83f0705ba9c", "score": "0.650254", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "c12d290d334e7bdc2cc5f98298c33c07", "score": "0.64991933", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "46d2eb469ced0b58369fa91dd6a77fa4", "score": "0.6495926", "text": "def checks_login_and_csrf_status(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n # Check that user id exists in session\n if 'user_id' not in login_session:\n return abort(401)\n # Verify posted csrf token matches session token\n if request.cookies['_csrf'] != login_session['_csrf']:\n return abort(401)\n return func(*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "23a42c687bbe724a3eb1d8c3f1161da2", "score": "0.64877015", "text": "def login_authorized(fn):\n @wraps(fn)\n def decorated_function(*args, **kwargs):\n if not 'Authorization' in request.headers:\n response = jsonify(message='Missing authorization header')\n response.status_code = 401\n return response\n\n # try parser json web token\n try:\n if 'jwt_disabled' in request.headers:\n access_token = get_token(request)\n else:\n json_web_token = parse_token(request)\n access_token = get_oauth_token(json_web_token)\n\n logger.debug('access_token: %s' % access_token)\n\n user = validate_token(access_token)\n if user is None:\n response = jsonify(message='Check user token failed')\n response.status_code = 403\n return response\n return fn(user=user, *args, **kwargs)\n\n except DecodeError:\n response = jsonify(message='Token is invalid')\n response.status_code = 401\n return response\n except ExpiredSignature:\n response = jsonify(message='Token has expired')\n response.status_code = 403\n return response\n\n return decorated_function", "title": "" }, { "docid": "c68bcf1e0423445e4f63259ad30aa811", "score": "0.6487067", "text": "def _auth_required():\n\n login_mechanisms = (\n ('token', _check_token),\n ('session', lambda: current_user.is_authenticated),\n )\n\n def wrapper(fn):\n @wraps(fn)\n def decorated_view(*args, **kwargs):\n for _, mechanism in login_mechanisms:\n if mechanism and mechanism():\n return fn(*args, **kwargs)\n return security._unauthorized_callback()\n return decorated_view\n return wrapper", "title": "" }, { "docid": "3b35693f1f38cb9874a391500e371519", "score": "0.64869636", "text": "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n\n if \"SESSION\" in request.headers:\n token = request.headers[\"SESSION\"]\n \n if not token:\n name_space.abort(405, status = \"Token is missing\", statusCode = \"405\")\n \n if token not in tokens:\n name_space.abort(406, status = \"Invalid token\", statusCode = \"406\")\n \n return f(*args, **kwargs)\n\n return decorated", "title": "" }, { "docid": "88c0c89e5a2ac3ad0dea55212425965e", "score": "0.6485786", "text": "def token_login(request, api_token):\n try:\n user = User.objects.get(auth_token__key=api_token)\n except exceptions.ObjectDoesNotExist:\n return redirect(reverse('index'))\n\n login(request, user)\n\n return redirect(reverse('subscription'))", "title": "" }, { "docid": "b143f6618aef77c01e94b7febfb75572", "score": "0.6482826", "text": "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"student_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "3a3a916852bddc8bf5df602fce3276a0", "score": "0.64786184", "text": "def check_authorization(self):\n self.token", "title": "" }, { "docid": "d45499bf2490239bf2e96759d326ce46", "score": "0.6471098", "text": "def require_auth(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.access_token:\n raise AuthenticationRequired\n return func(self, *args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "8ad4296234cb3e4bc71b9a388a83a56c", "score": "0.64556164", "text": "def login_required(function):\n\n def decorator(function):\n def onCall(request, *args, **kwargs):\n try:\n token = str(request.path)\n token = token.split(\"/\")[-2]\n params = signing.loads(token, max_age=3600)\n\n if (not 'is_authenticated' in request.session) or (not request.session['is_authenticated']):\n return redirect(settings.LOGIN_URL)\n\n if (not 'username' in params) or (not params['username']):\n return HttpResponseRedirect(reverse('agency:logout_agency'))\n if (not 'username' in request.session) or (not request.session['username']):\n return HttpResponseRedirect(reverse('agency:logout_agency'))\n\n if not params['username'] == request.session['username']:\n return HttpResponseRedirect(reverse('agency:logout_agency'))\n return function(request, *args, **kwargs)\n except Exception as e:\n LOG.error(\"Errore in decorator login_required: {}\".format(str(e)), extra=set_client_ip(request))\n return HttpResponseRedirect(reverse('agency:logout_agency'))\n\n return onCall\n\n return decorator(function)", "title": "" }, { "docid": "05fdd954413b2f53587b9c37c2dc199b", "score": "0.6453231", "text": "def is_logged_in(f):\n @wraps(f)\n def wrapper(*args, **kwds):\n if 'email' not in login_session:\n return redirect(url_for('login_page'))\n return f(*args, **kwds)\n return wrapper", "title": "" }, { "docid": "58c71b14b77e1b46c40748343930c687", "score": "0.6442942", "text": "def require_login(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n if not _is_logged_in():\n return request.redirect(\"/web/login?redirect=/account/\")\n else:\n return func(*args, **kwargs)\n return wrapped", "title": "" }, { "docid": "29e4a7d8ef540f3f3a7a808b8a649b5d", "score": "0.6442689", "text": "def requires_login(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if g.user is None:\n flash(u'You need to be signed in for this page.')\n return redirect(url_for('users.login'))\n return f(*args, **kwargs)\n\n return decorated_function", "title": "" }, { "docid": "175271866f25ad5168613d971d103b52", "score": "0.64338654", "text": "def login_required(func):\n @functools.wraps(func)\n def checker(**kwargs):\n if 'logged_in' in session.keys() and session['logged_in']:\n if kwargs == {}:\n return func()\n else:\n return func(*kwargs.values())\n else:\n session['notification_active'] = True\n session['notification_title'] = \"Login required!\"\n session['notification_description'] = \"Please log in to continue.\"\n session['notification_color'] = \"warning\"\n return redirect(url_for('admin.show_admin_menu_with_login'))\n return checker", "title": "" }, { "docid": "fdc3e10614a56eb83b89e9b292bf237c", "score": "0.64173144", "text": "def dispatch(self, request, *args, **kwargs):\n self.token = None\n token_string = request.GET.get(\"login_token\", None)\n if token_string:\n # Logout to avoid potential confusion if token is attempt to switch user.\n logout(request)\n try:\n self.token = ReportToken.objects.get(token=token_string)\n if self.token.is_valid():\n login(request, self.token.member.user)\n except ReportToken.DoesNotExist:\n pass\n\n # Either token login failed or user wasn't logged in.\n if request.user.is_anonymous:\n messages.add_message(\n request,\n messages.WARNING,\n \"Login or token required to submit reports. (Token may be expired, invalid, or missing.)\",\n )\n return redirect(\"/\")\n\n return super().dispatch(request, *args, **kwargs)", "title": "" }, { "docid": "9d0b934dce167caa889a05982381271c", "score": "0.6401813", "text": "def requires_login(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not session.get('logged_in', None):\n return redirect('/login')\n else:\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "6d463f57fc622ffed3c1b732a97487e6", "score": "0.64011794", "text": "def login_required(view):\n \n @wraps(view)\n def inner_decorator(request,*args, **kwargs):\n \n out = createBaseResponseObject()\n \n try:\n if request.user.is_authenticated():\n return view(request, *args, **kwargs)\n \n except Exception, e:\n out['status'] = 0\n out['errors'] = [str(e)]\n return HttpResponse(json.dumps(out))\n \n out['status'] = 0\n out['errors'] = ['You must be logged in to use this feature']\n return HttpResponse(json.dumps(out))\n\n return inner_decorator", "title": "" }, { "docid": "7270db44cbd7a3b31543fa99f9262040", "score": "0.639754", "text": "def login_required(self, f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n user = self.get_identity()\n if not user:\n return self.auth_failure()\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "503e740473caa50388ed64b8f66158d0", "score": "0.6376235", "text": "def user_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = request.header.get('Authorization')\n if token:\n token = token.replace('Basic ', '', 1)\n try:\n token = base64.b64decode(token)\n g.current_user = User.verify_auth_token(token)\n except TypeError:\n abort(403)\n else:\n abort(403)", "title": "" }, { "docid": "3d2b4ff2cd72ad71a381f423df2fa207", "score": "0.6370762", "text": "def _validate_token():\n try:\n from mesclan import oauth\n except ImportError:\n _debug_flow()\n\n if app.debug or app.testing:\n _debug_flow()\n else:\n _facebook_flow(oauth)", "title": "" }, { "docid": "aba25e4f1ba2e8f998c3ebd32db789ad", "score": "0.63700515", "text": "def validate_login(self):\n if HTTP_HEADER.TOKEN not in request.headers:\n return False\n\n user = self.__validate_token(request.headers[HTTP_HEADER.TOKEN])\n if user is None:\n return False\n\n g.user = user\n return True", "title": "" }, { "docid": "de9575088d30c43b2283bf93b437d7eb", "score": "0.6366841", "text": "def login_required(f):\n @wraps(f)\n def https_redirect(*args, **kwargs):\n if not current_user.is_authenticated:\n if not current_app.debug:\n return redirect(\n url_for(\n 'auth.login',\n next=request.url,\n _scheme='https',\n _external='true'))\n return redirect(url_for('auth.login', next=request.url))\n return f(*args, **kwargs)\n return https_redirect", "title": "" }, { "docid": "ec57fc9b674332f6b5bbcae6a704fb03", "score": "0.6364505", "text": "def authorize(funct):\n def wrapper(*args):\n \"\"\"\n Wrapper function that validated token before implementation validataion\n :param args: arguments of the function that is being decorated\n :return:\n \"\"\"\n token = request.headers[\"Token\"]\n validate_token(token)\n return funct(*args)\n return wrapper", "title": "" }, { "docid": "c17db077607ea64d22e19f8f79420c95", "score": "0.636058", "text": "def login_required(secure_page):\n @wraps(secure_page)\n def wrapper(*args, **kwargs):\n userid = login_session.get('userid')\n if userid:\n return secure_page(*args, **kwargs)\n else:\n flash(\"Please login to view this page.\")\n source = request.path\n app.logger.debug(source)\n return redirect(url_for('Login', source_url=source))\n return wrapper", "title": "" }, { "docid": "4dd9914c2f49394725f2457a1726fee0", "score": "0.6357159", "text": "def test_valid_token_flow(monkeypatch, runner):\n monkeypatch.setattr(auth, \"validate_token\", lambda token: None)\n\n TOKEN = \"test-token\"\n\n with runner.isolated_filesystem():\n # Login\n auth.validate_and_cache_token(TOKEN)\n\n # Use the token\n assert auth.get_id_token() == TOKEN", "title": "" }, { "docid": "463d72841b2bc6cde40bd1c9bb7c97ac", "score": "0.635571", "text": "def login_required(view):\n @wraps(view)\n def inner(request, *args, **kwargs):\n if not request.user.is_authenticated() or not request.user.is_active:\n if request.is_ajax():\n # if is ajax return 403\n return JsonResponse({'login_url': settings.LOGIN_URL}, status=403)\n else:\n # if not ajax redirect login page\n return redirect(settings.LOGIN_URL)\n return view(request, *args, **kwargs)\n return inner", "title": "" }, { "docid": "5b38d00a484d3d03ab6b17e1c7be1eca", "score": "0.6354874", "text": "def auth_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.authorization\n if auth and auth.username == 'alvaro' and auth.password == '1234':\n return f(*args, **kwargs)\n\n return make_response('Could not verify your login!', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})\n\n return decorated", "title": "" }, { "docid": "20aba590643ee5f34379a3beffe84039", "score": "0.633112", "text": "def loginRequired(f):\n ########################################################################\n @wraps(f)\n def decoratedFunction(*args, **kwargs):\n try:\n tokenDir = '/tmp/tokens'\n tokenDict = dict()\n if g.user is None:\n raise exception.AuthError('')\n\n authenticated = False\n\n # load pickle file which will be used to validate token\n token = request.cookies.get(\"token\")\n if not token:\n raise exception.AuthError('Token not provided')\n\n\n filePath = \"%s/%s\" % (tokenDir, token.split('.')[-1])\n\n if not os.path.isfile(filePath):\n tempDir = '/tmp/tokens'\n tempPath = \"%s/%s\" % ('/tmp/tokens', token.split('.')[-1])\n if os.path.isfile(tempPath) and os.path.exists('/tmp/tokens/'):\n for file in os.listdir(tempDir):\n path = os.path.join(tempDir, file)\n shutil.move(path, tokenDir)\n else:\n raise exception.AuthError('Invalid token')\n\n with FileLock(filePath + \".lock\"):\n with open(filePath, \"r\") as fd:\n tokenDict = cPickle.load(fd)\n\n #print int(time.time()), '---> current time'\n\t print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(time.time())))\n print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(tokenDict['expireTime'])) #tokenDict['expireTime'], '--> expireTime'\n print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(tokenDict['time'])) #tokenDict['expireTime'], '--> expireTime'\n print tokenDict\n print \"-------------------------------------\"\n if int(time.time()) > tokenDict['expireTime']:\n print tokenDict['expireTime'],\"=========\"\n #logger.error('Session has been timed out')\n os.remove(filePath)\n raise exception.AuthError('Session has been timed out')\n\n for token in tokenDict.iterkeys():\n if token == request.cookies.get(\"token\"):\n authenticated = True\n\n if not authenticated:\n #logger.error(\"Not authenticated %d\" % authenticated)\n raise exception.AuthError('')\n\n #with FileLock(filePath + \".lock\"):\n #with open(filePath, \"w\") as fd:\n #tokenDict['expireTime'] += int(\n #time.time()) - tokenDict['time']\n #cPickle.dump(tokenDict, fd)\n\n return f(*args, **kwargs)\n\n except Exception, e:\n stat = 'false'\n #code = 'AUTH-3001'\n mesg = getattr(e, 'value', 'Authentication error')\n flask_abort(401)\n\n return decoratedFunction", "title": "" }, { "docid": "f8eb7e0ab4e3d4ade7deab6baf4d286b", "score": "0.6317352", "text": "def api_login():\n username = request.form['login']\n password = request.form['password']\n\n \"\"\"\n Handle auth:\n \"\"\"\n\n auth = UserModel\n _hash = auth.hash_password(password)\n _verify = auth.verify_password(password, _hash) # Will return true/false is the password doesnt already exist. \n loggedIn = str(_verify)\n\n _username = UserModel.User(username)\n session[\"user\"] = username\n session[\"loggedIn\"] = True\n\n role = _username.role(username)\n session[\"role\"] = role\n\n if role == 'admin':\n session[\"admin\"] = True\n else:\n session[\"admin\"] = False\n\n auth = _username.authenticate(username, password)\n\n \"\"\"\n Handle conditional redirect:\n \"\"\"\n\n if auth == True:\n if session[\"user\"] == username:\n if session[\"loggedIn\"] == True:\n admin = session[\"admin\"]\n token = jwt.encode({\"user\": username, \"password\": password, \"exp\": datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY']) # Expiration is a reserved part of the payload in JWT\n return redirect(url_for('home.home', auth=auth, admin=admin, token=token.decode('UTF-8')))\n if auth != True:\n return redirect('/login_error')", "title": "" }, { "docid": "1af403701c924d42b72e8aa668fb87a7", "score": "0.63144726", "text": "def token_required(view_func, admin=False):\n\n @csrf_exempt\n @wraps(view_func)\n def _wrapped_view(request, *args, **kwargs):\n user = None\n token = None\n\n if ('user_id' in request.json_data) and ('token' in request.json_data):\n user = request.json_data['user_id']\n token = request.json_data['token']\n del request.json_data['token'], request.json_data['user_id']\n\n # Now that I think about it, it's a bad idea to get data on JSON reqs.\n if ('user_id' in request.GET) or ('token' in request.GET):\n user = request.GET.get('user_id')\n token = request.GET.get('token')\n\n if not (user and token):\n return HttpResponseForbidden(\"Must include 'user_id' and 'token' parameters with request.\")\n\n user = authenticate(pk=user, token=token)\n if user:\n if admin:\n if not user.is_staff:\n return HttpResponseForbidden(\"Not an admin.\")\n request.user = user\n return view_func(request, *args, **kwargs)\n\n return HttpResponseForbidden()\n\n return _wrapped_view", "title": "" }, { "docid": "6d46f66323cbaf1937669923c5af16db", "score": "0.6309126", "text": "def requires_login():\n\n def wrapper(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n if not get_user(args[0].request):\n raise HTTP_401(\"I can't let you do that\")\n return f(*args, **kwargs)\n return wrapped\n return wrapper", "title": "" }, { "docid": "dc9313f997c41af469dd26d23f5988ec", "score": "0.62944084", "text": "def requires_api_token(func):\n\n @wraps(func)\n def decorated(*args, **kwargs):\n req_token = request.headers.get('Token', None)\n our_token = current_app.config['ZPARK_API_TOKEN']\n\n if our_token is None:\n current_app.logger.error(\"Request rejected: ZPARK_API_TOKEN\"\n \" must be set in app.cfg\")\n abort(500)\n\n if req_token is None:\n current_app.logger.warning(\"Request rejected: client\"\n \" did not send a Token header\")\n abort(401)\n\n if req_token == our_token:\n return func(*args, **kwargs)\n else:\n current_app.logger.warning(\"Request rejected: Invalid\"\n \" Token header received from\"\n \" client\")\n abort(401)\n\n return decorated", "title": "" }, { "docid": "c8ba979623d154c1371c41d41a421d7a", "score": "0.62940973", "text": "def login_required(handler_method):\n\n def check_login(self, *args, **kwargs):\n if self.request.method != 'GET':\n self.abort(400, detail='The login_required decorator '\n 'can only be used for GET requests.')\n\n self._user = users.get_current_user()\n if not self._user:\n return self.redirect(users.create_login_url(self.request.url))\n else:\n handler_method(self, *args, **kwargs)\n\n return check_login", "title": "" }, { "docid": "1bb78f23b81292fcda083f3d196c96f2", "score": "0.6289462", "text": "def authenticated(method):\r\n @functools.wraps(method)\r\n def wrapper(self, *args, **kwargs):\r\n if not self.current_user:\r\n if self.request.method in (\"GET\", \"HEAD\"):\r\n url = self.get_login_url()\r\n if \"?\" not in url:\r\n if urlparse.urlsplit(url).scheme:\r\n # if login url is absolute, make next absolute too\r\n next_url = self.request.full_url()\r\n else:\r\n next_url = self.request.uri\r\n url += \"?\" + urlencode(dict(next=next_url))\r\n self.redirect(url)\r\n return\r\n raise HTTPError(403)\r\n return method(self, *args, **kwargs)\r\n return wrapper", "title": "" }, { "docid": "cddc0100fa7b61208f03f71c670277d2", "score": "0.6271176", "text": "def verify_token(token):\n if not token:\n try:\n token = session['token']\n except:\n pass\n user = models.User.verify_auth_token(token)\n if user is not None:\n g.user = user\n return True\n return False", "title": "" }, { "docid": "f1727f8a4d0c590c2868d7b8c88831d7", "score": "0.625879", "text": "def wrapper(*args):\n token = request.headers[\"Token\"]\n validate_token(token)\n return funct(*args)", "title": "" }, { "docid": "06e41f7506fe7a4b7b85b4028c42e825", "score": "0.62577754", "text": "def ltd_login():\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n get_ltd_token()\n return f(*args, **kwargs)\n\n except Exception as e:\n raise BackendError(\n 'Unexpected error while authentication with LSST the '\n 'Docs ({0})'.format(current_app.config['KEEPER_URL']),\n status_code=500,\n content=str(e))\n return decorated_function\n return decorator", "title": "" }, { "docid": "a72923fbe001a4cfe0f68228396b01c4", "score": "0.6251344", "text": "def login_required(self, f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'user_id' not in g:\n raise AuthorizationError()\n return f(*args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "0f3a3dcb38f1f52fd0b34e0bbe050a84", "score": "0.6250484", "text": "def view_login_required(func=None, **kwargs):\n if 'url' in kwargs:\n url = kwargs['url']() if callable(kwargs['url']) else kwargs['url']\n else:\n url = None\n\n url = url or app_settings.LOGIN_URL\n\n def _wrapper(func):\n def _inner(*args, **kwargs):\n request = get_request(*args, **kwargs)\n\n if not request.user.is_authenticated():\n return redirect_to(request, url, no_history=True)\n \n return func(*args, **kwargs)\n return _inner\n\n if func:\n return _wrapper(func)\n else:\n return _wrapper", "title": "" }, { "docid": "b10feff39901b87522f10d56861e6931", "score": "0.62495756", "text": "def require_logged_in_user(view_func):\n def decorated(*args, **kwargs):\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n auth_token = auth_header.split(\" \")[1]\n except IndexError:\n responseObject = {\n 'status': 'fail',\n 'message': 'Bearer token malformed.'\n }\n return make_response(jsonify(responseObject)), 401\n else:\n auth_token = ''\n if auth_token:\n decoded_data = User.decode_auth_token(auth_token)\n if not isinstance(decoded_data, str):\n token_response, user = decoded_data[0], decoded_data[1]\n return view_func(*args, **kwargs, user=user, token_response=token_response)\n responseObject = {\n 'status': 'fail',\n 'message': decoded_data\n }\n return make_response(jsonify(responseObject)), 401\n else:\n responseObject = {\n 'status': 'fail',\n 'message': 'Provide a valid auth token.'\n }\n return make_response(jsonify(responseObject)), 401\n return decorated", "title": "" }, { "docid": "37601c6d7b339fd48a749b2177cde301", "score": "0.6242621", "text": "def auth_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if valid_jwt(get_jwt()):\n return func(*args, **kwargs)\n else:\n return jsonify({\"message\": \"UNAUTHORIZED USER\"}), 401\n return wrapper", "title": "" } ]
b854cc0f94f5ca5dbf66e9174e0af376
Write a JSON object to cloud storage.
[ { "docid": "afce2bb582e97c7fadb81c2706acd731", "score": "0.7463363", "text": "def json_write(bucket, path, data):\n blob = _make_blob(bucket, path)\n return blob.upload_from_string(json.dumps(data))", "title": "" } ]
[ { "docid": "87c2e2abe4ae47b3be26825827779192", "score": "0.7004237", "text": "def store_json(bucket, dataset, key, to_json):\n json_file = NamedTemporaryFile(mode=\"w+b\")\n json_file.write(json.dumps(to_json).encode())\n # Reset to the beginning:\n json_file.seek(0)\n store_file(bucket, dataset, key, json_file)\n return (bucket, key)", "title": "" }, { "docid": "f3bbd38ff916a4b55e41814497e8f904", "score": "0.6782208", "text": "def upload_json(data, gcs_upload_path):\n try:\n with tempfile.NamedTemporaryFile() as tmp_data:\n tmp_data.write(parser.json_stringify(data).encode())\n tmp_data.flush()\n storage_client = StorageClient()\n storage_client.put_text_file(tmp_data.name, gcs_upload_path)\n except Exception: # pylint: disable=broad-except\n LOGGER.exception('Unable to upload json document to bucket %s:\\n%s',\n gcs_upload_path, data)", "title": "" }, { "docid": "837770fd92a40769d752374d9876d387", "score": "0.6763322", "text": "def write_json_to_s3(data, s3_path, *args, **kwargs):\n bucket, key = s3_path_to_bucket_key(s3_path)\n s3_resource = boto3.resource(\"s3\")\n log_file = StringIO()\n json.dump(data, log_file, *args, **kwargs)\n log_obj = s3_resource.Object(bucket, key)\n log_upload_resp = log_obj.put(Body=log_file.getvalue())\n return log_upload_resp", "title": "" }, { "docid": "c4d0fed0c618dbe9a4fcb9d2ca09a7ad", "score": "0.64873815", "text": "def jsonwrite(object, target: str):\n with open(target, \"w\") as goal:\n json.dump(object, goal)", "title": "" }, { "docid": "df16a6e96d40f7eed3f77e9cf31736af", "score": "0.6430821", "text": "def write_json_string(json_string, output_gcsfile):\n schema = json.loads(json_string)\n logging.info(schema)\n fd, fname = tempfile.mkstemp()\n with open(fname, 'w') as ofp:\n json.dump(schema, ofp, sort_keys=False, indent=2)\n os.close(fd)\n exec_shell_command(['gsutil', 'cp', fname, output_gcsfile])", "title": "" }, { "docid": "759d0b7e0984be7be558035b2f655b41", "score": "0.6395738", "text": "def save(self, file_name: str, path: str = \"\") -> None:\n data = {\n \"size\" : self.__size,\n \"data\" : self.__data.tolist(),\n \"trgt\" : self.__trgt.tolist()\n }\n \n json_obj = json.dumps(data, indent=4)\n with open(path + '/' + file_name, 'w') as file:\n file.write(json_obj)", "title": "" }, { "docid": "40feeb7e36e94224941201d754a2ab72", "score": "0.6389266", "text": "def save_json_to_file(cls, file_path: str, json_object: Dict[str, Any]):\n\n #\n json_contents = json.dumps(json_object,\n sort_keys=True,\n indent=4, ensure_ascii=True)\n cls.save_file(file_path, json_contents)", "title": "" }, { "docid": "6b4692ac72b9d0ab70768b323f8dbcd8", "score": "0.6288493", "text": "def _savejson(self, fname, obj):\r\n fname = \"%s/%s.json\" % (self._directory, fname)\r\n try:\r\n s = json.dumps(obj)\r\n with open(\"%s.tmp\" % fname, 'wb') as f:\r\n f.write(s)\r\n f.flush()\r\n shutil.move(\"%s.tmp\" % fname, fname)\r\n except Exception as e:\r\n raise BackendIOException(\"Unable to save JSON file %s: %s\" \\\r\n % (fname, e))", "title": "" }, { "docid": "7cb2298e73d0af55aaa75c462189f839", "score": "0.6285415", "text": "def json_save(obj, name):\n with open(name, 'w') as fout:\n json.dump(obj, fout)", "title": "" }, { "docid": "e10794d7d5917e236edccfde3bded48a", "score": "0.62671137", "text": "def save(self):\n json_string = json.dumps(self.fields)\n with open(self.filepath, 'w', encoding='utf-8') as fh:\n fh.write(json_string)", "title": "" }, { "docid": "65b3beb0aaf2570a0f20af436f12d7ba", "score": "0.62601984", "text": "def dump_json_to_s3(name, json_obj, public=False, get_url=False):\n s3 = get_s3_client(unsigned=False)\n key = 'indra_network_search/' + name\n options = {'Bucket': DUMPS_BUCKET,\n 'Key': key}\n if public:\n options['ACL'] = 'public-read'\n s3.put_object(Body=json.dumps(json_obj), **options)\n if get_url:\n return s3.generate_presigned_url(\n 'get_object', Params={'Key': key, 'Bucket': DUMPS_BUCKET})", "title": "" }, { "docid": "db31300652a08a402e56ff0491efa215", "score": "0.6256401", "text": "def upload_json_file(bucket, key, body, tagging_string):\r\n\r\n # get s3 client\r\n s3 = get_s3_resource()\r\n \r\n try: \r\n reponse = s3.meta.client.put_object(Bucket=bucket, Key=key, Body=body, Tagging=tagging_string, ContentType='application/json', ContentEncoding='utf-8', ACL='public-read')\r\n print(reponse)\r\n except botocore.exceptions.ClientError as e:\r\n if e.response['Error']['Code'] == \"404\":\r\n print(\"The object does not exist.\")\r\n else:\r\n print(e)\r\n raise\r\n except Exception as e:\r\n print(e)\r\n raise", "title": "" }, { "docid": "108545eacf4c24a735ef4caf8e028d06", "score": "0.62398815", "text": "def save_json(obj: dict, file_path: str):\n with open(file_path, \"w\") as f:\n json.dump(obj, f)", "title": "" }, { "docid": "c0f88b855ed5bdf1c8808b6c4e109442", "score": "0.6182317", "text": "def savejson(obj, fp, **kw):\n\tskipkeys = kw.get('skipkeys', False)\n\tensure_ascii = kw.get('ensure_ascii', True)\n\tcheck_circular = kw.get('check_circular', True)\n\tallow_nan = kw.get('allow_nan', True)\n\tcls = kw.get('cls', None)\n\tindent = kw.get('indent', '\\t')\n\tseparators = kw.get('separators', (',', ':'))\n\tdefault = kw.get('default', None)\n\tsort_keys = kw.get('sort_keys', True)\n\twith open(fp, 'w') as fout:\n\t\tjson.dump(obj, fout, skipkeys = skipkeys, ensure_ascii = ensure_ascii, check_circular = check_circular,\n\t\t\tallow_nan = allow_nan, cls = cls, indent = indent, separators = separators, sort_keys = sort_keys, **kw)", "title": "" }, { "docid": "24f8171858166135bd0b30365f6453b8", "score": "0.61802053", "text": "def put_object(credentials, bucket, name, data, session):\n out = session.post('https://www.googleapis.com/upload/storage/'\n 'v1/b/%s/o?uploadType=media&name=%s' % (\n quote_plus(bucket), quote_plus(name)),\n headers={'Authorization': 'Bearer ' +\n credentials.access_token,\n 'Content-Type': 'application/octet-stream',\n 'Content-Length': len(data)}, data=data)\n assert out.status_code == 200", "title": "" }, { "docid": "aa3e384a0910f4faa52f149a0628da00", "score": "0.6125473", "text": "def save_to_json_file(my_obj, filename):\n with open(filename, 'w', encoding='utf-8') as fl:\n fl.write(json.dumps(my_obj))", "title": "" }, { "docid": "01a6686b419f78d88428e771c358c057", "score": "0.61084265", "text": "def save_to_json_file(my_obj, filename):\n with open(file=filename, mode=\"w\") as f:\n f.write(json.dumps(my_obj))", "title": "" }, { "docid": "2d8558e6b07e7cd98ba8b155c6fba1af", "score": "0.6055257", "text": "def write_json(self, data):\n self.set_header('Content-Type', 'application/json')\n if isinstance(data, str):\n self.write(data)\n else:\n self.write(json.dumps(data))", "title": "" }, { "docid": "28525b0ab77219a434719e004ba52e98", "score": "0.60478824", "text": "def save_to_json_file(my_obj, filename):\n with open(filename, mode='w', encoding='UTF-8') as input_file:\n input_file.write(json.dumps(my_obj))", "title": "" }, { "docid": "3467b695b607ad61ff4c0b8714b34802", "score": "0.60434943", "text": "def save(filename, obj):\n with open(filename, 'w') as outfile:\n json.dump(obj, outfile, indent = 1)", "title": "" }, { "docid": "5bf8028c052d8c26322cc97f0d988687", "score": "0.60337377", "text": "def save_to_json_file(my_obj, filename):\n with open(filename, \"a\", encoding=\"utf_8\") as f:\n f.write(json.dumps(my_obj))", "title": "" }, { "docid": "f81b21b8f7bd186bc35ae7fed156d790", "score": "0.6029824", "text": "def write_json_object(jsonobject, filepath, verbose = True):\n\tif verbose == True : print('Writing on ',filepath,'.......')\n\t\n\twith jsonlines.open(filepath, mode = 'w') as writer:\n\t\twriter.write(jsonobject)\n\n\tif verbose == True : print('Done writing')", "title": "" }, { "docid": "da15f5f6b2f0735752a25b857d939b7d", "score": "0.60106164", "text": "def save_to_json_file(my_obj, filename):\n import json\n with open(filename, 'w', encoding=\"utf-8\") as f:\n f.write(json.dumps(my_obj))", "title": "" }, { "docid": "63253226210258a544de631bcb944ebd", "score": "0.6010043", "text": "def write_data(formatted_list):\n df = pd.DataFrame(formatted_list, columns=[\"name\", \"group\", \"mediaId\", \"title\", \"rating\"]) \n jsonFile=convert_df_to_json(df)\n \n client = boto3.client('s3')\n client.put_object(Body=bytes(jsonFile.encode('utf-8')), Bucket=vars.DATA_BUCKET, Key=vars.JSON_DATA)", "title": "" }, { "docid": "371da518a61d5ed51f14a40fa116a62f", "score": "0.6008184", "text": "def store(self, storage_id, obj):", "title": "" }, { "docid": "6d80a32ed12049e63aded44bdde63fde", "score": "0.6006945", "text": "def save_json(data, path_json):\n with open(path_json, 'w') as file:\n json.dump(data, file, ensure_ascii=False)", "title": "" }, { "docid": "bf88b17f34585a3722cdfae418a15d16", "score": "0.5997141", "text": "async def put_object(self, bucket: str, name: str, data: bytes) -> None:\n raise NotImplementedError # pragma: no cover", "title": "" }, { "docid": "f44bb14142a24c3357f7d60c1e84200a", "score": "0.59632844", "text": "def persist(self, path: str, data: bytes):\n self.client.put_object(Bucket=self.bucket, Key=path, Body=data)", "title": "" }, { "docid": "0cbf88a1e44b04c7aaea8d7353e6f43e", "score": "0.59629077", "text": "def save_json(json_path, data):\n with open(json_path, \"w+\", encoding=\"utf-8\") as open_file:\n json.dump(data, open_file, indent=4)", "title": "" }, { "docid": "70cb42cd96e4470768f7f5917ccf943a", "score": "0.59615016", "text": "def save_to_json_file(my_obj, filename):\n import json\n with open(filename, mode='w', encoding='UTF8') as f:\n my_str = json.dumps(my_obj)\n f.write(my_str)", "title": "" }, { "docid": "683f832f6286e81dba3eb82a39847540", "score": "0.596085", "text": "def save_data():\n with open(path, 'w') as myfile:\n myfile.write(json.dumps(data))", "title": "" }, { "docid": "3683e3256cd98f83caf037e1237c879c", "score": "0.59568447", "text": "def write_json(obj, fpath):\n mkdir_if_missing(osp.dirname(fpath))\n with open(fpath, 'w') as f:\n json.dump(obj, f, indent=4, separators=(',', ': '))", "title": "" }, { "docid": "1ec478b289d20f23df1e56ac71100997", "score": "0.59540164", "text": "def save_json(\n object: Any,\n path: str,\n encoding: str = \"utf-8\",\n pretty: bool = False,\n sortkeys: bool = False,\n) -> None:\n with open(path, \"w\", encoding=encoding) as f:\n if pretty:\n indent = 2\n separators = (\",\", \": \")\n else:\n indent = None\n separators = (\", \", \": \")\n json.dump(\n object,\n f,\n indent=indent,\n sort_keys=sortkeys,\n separators=separators,\n )", "title": "" }, { "docid": "2aee1102bfdc2b4b352f7e9f7a438fe3", "score": "0.59516925", "text": "def save_to_json_file(my_obj, filename):\n\n with open(filename, \"w\") as file:\n json.dump(my_obj, file)", "title": "" }, { "docid": "c429ea5944d94eb35a388db3b8fd7bc8", "score": "0.59371525", "text": "def _save(data: dict, file_path):\n jdata = json.dumps(data, ensure_ascii=False)\n file_path.write_text(jdata, encoding=\"UTF-8\")", "title": "" }, { "docid": "0ea4dec9846fc9d7e23ef409cd78cd49", "score": "0.59301424", "text": "def create_storage(fs_path):\n logging.info(f\"Creating storage at: {fs_path}\")\n with open(fs_path, \"w\") as file:\n json.dump({}, file)", "title": "" }, { "docid": "3a1bb2e655d5658a5b33b99b59e06fa3", "score": "0.59301376", "text": "def save_to_json_file(my_obj, filename):\n with open(filename, mode=\"w\") as f:\n json.dump(my_obj, f)", "title": "" }, { "docid": "678b3c2b81637964f830d7df60fba049", "score": "0.59288424", "text": "def save_json(jsondata, name):\n filepath = os.path.dirname(os.path.abspath(__file__))+\"/\"+str(name)\n with open(filepath, 'w') as f:\n json.dump(jsondata, f)", "title": "" }, { "docid": "07ca1313dcb3fb8d7ce2211ee5907e38", "score": "0.5928091", "text": "def save_to_json_file(my_obj, filename):\n\n with open(filename, 'w') as f:\n json.dump(my_obj, f)", "title": "" }, { "docid": "90a514d70a421c7495f3a899443a7120", "score": "0.5900504", "text": "def save_to_json_file(my_obj, filename):\n with open(filename, mode='w', encoding=\"utf-8\") as file:\n json.dump(my_obj, file)", "title": "" }, { "docid": "6a0c57438c357e20088d079fb4508e01", "score": "0.58975345", "text": "def save_blob_as_json(filename, blob, force_write):\n try:\n if prompt_overwrite(filename, force_write):\n with open(filename, 'wt') as f:\n print_info('%s' % json.dumps(blob, indent=4, separators=(',', ': '), sort_keys=True,\n cls=CustomJSONEncoder))\n except Exception as e:\n print_exception(e)", "title": "" }, { "docid": "8af15023e59890b33618c8573677abf6", "score": "0.5897036", "text": "def save_to_json_file(my_obj, filename):\n import json\n with open(filename, \"w\") as f:\n json.dump(my_obj, f)", "title": "" }, { "docid": "4019cda0a595e4d9d26f48d37804b642", "score": "0.5894932", "text": "def save_to_json_file(my_obj, filename):\n with open(filename, 'w', encoding='utf-8') as f:\n json.dump(my_obj, f)", "title": "" }, { "docid": "4cc768e9610d259f2da794dfcb3db1be", "score": "0.5890055", "text": "def save_json(dict_obj, path, name):\n if '.json' not in name:\n name += '.json'\n with open(os.path.join(path, name), 'w') as json_file:\n json.dump(dict_obj, json_file)", "title": "" }, { "docid": "fa2fb583811fb9022fd726b95e76ef21", "score": "0.58865124", "text": "def save_to_json_file(my_obj, filename):\n with open(filename, 'w', encoding=\"utf-8\") as f:\n json.dump(my_obj, f)", "title": "" }, { "docid": "b8f8e6d1cdb2c41103e3270df9fbc03c", "score": "0.5878521", "text": "def upload_recipe(object, bucket, key):\n json_object = object\n client.put_object(\n Body=json.dumps(json_object),\n Bucket=bucket,\n Key=key\n )", "title": "" }, { "docid": "d49ec7998d76f766dbed9bd855013e1a", "score": "0.58768713", "text": "def write_to_json(name, data):\n with open(name, 'w') as outfile:\n json.dump(data, outfile)", "title": "" }, { "docid": "2e43c3049140b8cb84e1b7d2a982da16", "score": "0.5873263", "text": "def save_to_json_file(py_obj, filename):\n with open(filename, 'w') as json_file:\n json.dump(py_obj, json_file)", "title": "" }, { "docid": "8358f7cc65d329a2f0eeaf5b853cb472", "score": "0.58705544", "text": "def save_json(obj, filename: str, path: str = \"\"):\n\n def _default(obj):\n if isinstance(obj, np.generic):\n return obj.item()\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n raise TypeError(f\"invalid json serialization type: {type(obj)}\")\n\n with open(os.path.join(path, filename), \"w\") as fn:\n json.dump(obj, fn, indent=2, sort_keys=True, default=_default)", "title": "" }, { "docid": "44ed4cac7ad08092e41669ac7c55e1df", "score": "0.5866514", "text": "def write_json_to_file(json, filename) -> None:\n with open(filename, 'w') as outfile:\n json.dump(json, outfile)", "title": "" }, { "docid": "e6e48cae2cb3088dd217c1b25e77600f", "score": "0.5856324", "text": "def _write_json_to_tracker_file(tracker_file_path, data):\n json_string = json.dumps(data)\n _write_tracker_file(tracker_file_path, json_string)", "title": "" }, { "docid": "da658f52f6212c05268060d0621056c5", "score": "0.5843329", "text": "def json_file_write(file_name, data):\n with open(file_name, 'w') as file_data:\n json.dump(data, file_data)", "title": "" }, { "docid": "204d8336da7979b6f925752aad31a87e", "score": "0.58259076", "text": "def write_data(filepath, data):\n\n with open(filepath, \"w+\") as f:\n f.write(json.dumps(data))", "title": "" }, { "docid": "7be86e56fc527d42ea803f94471fa978", "score": "0.58257574", "text": "def save(self):\n d = dict()\n for k in FileStorage.__objects.keys():\n d[k] = FileStorage.__objects[k].to_dict()\n with open(FileStorage.__file_path, 'w') as fp:\n json.dump(d, fp)", "title": "" }, { "docid": "29be050074a79b6a80eb6afc9e982f87", "score": "0.5825563", "text": "def _write_string_to_file(bucket, name, string):\n f = StringIO.StringIO()\n f.write(string)\n f.seek(0)\n result = gcs_utils.upload_object(bucket, name, f)\n f.close()\n return result", "title": "" }, { "docid": "32337480d98e3c4649af111a511334c9", "score": "0.5816353", "text": "def write_json(jsondata, BASE_PATH, path):\r\n full_path=\"{}{}\".format(BASE_PATH, path)\r\n with open(full_path, \"w\") as db: # dump json to file.\r\n json.dump(jsondata, db, indent=4, separators=(',', ': '))", "title": "" }, { "docid": "6bdca8faf76ea8fff425e4e9ecbbdbb5", "score": "0.57876647", "text": "def json_write(data, path, **kwargs):\n with open(_fsdecode(path), 'wt') as file:\n _json_dump(data, file, **kwargs)", "title": "" }, { "docid": "88000ae56373d8a16b51674f2801c03a", "score": "0.5781652", "text": "def _store(self, path):\n logging.debug(\"Storing data\")\n\n with open(path, \"w\", encoding=\"utf-8\") as f:\n f.write(self._to_json())", "title": "" }, { "docid": "839bfc03ea888a17832ea93662c70237", "score": "0.57743996", "text": "def save_json(data, database_json_file=\"super_mart.json\"):\n with open(database_json_file, \"w\") as p: \n json.dump(data, p)", "title": "" }, { "docid": "4c16f120c604c132c8136656bb04ed40", "score": "0.57740265", "text": "def write_json(data, fh):\n json.dump(data, fh, sort_keys=True, indent=2, cls=JSONEncoder)", "title": "" }, { "docid": "b2da2618ff16f4a55caa62ff09ca279e", "score": "0.5773689", "text": "def save(self):\n with open(FileStorage.__file_path, 'w') as f:\n tmp = {}\n for key in FileStorage.__objects.keys():\n tmp[key] = FileStorage.__objects[key].to_dict()\n json.dump(tmp, f)", "title": "" }, { "docid": "7f7531a563164dc0b53e84a2546f3ccb", "score": "0.5769724", "text": "def save_to_json_file(my_obj, filename):\n if type(filename) is not str:\n raise TypeError(\"filename must be a string\")\n with open(filename, \"w\", encoding=\"utf-8\") as text_file:\n json.dump(my_obj, text_file)", "title": "" }, { "docid": "43c27c05cac286f361cab6a55e7dd171", "score": "0.5765733", "text": "def save(self):\n with open(FileStorage.__file_path, \"w\", encoding=\"utf-8\") as f:\n d = {k: v.to_dict() for k, v in FileStorage.__objects.items()}\n json.dump(d, f)", "title": "" }, { "docid": "43c27c05cac286f361cab6a55e7dd171", "score": "0.5765733", "text": "def save(self):\n with open(FileStorage.__file_path, \"w\", encoding=\"utf-8\") as f:\n d = {k: v.to_dict() for k, v in FileStorage.__objects.items()}\n json.dump(d, f)", "title": "" }, { "docid": "a711de79e8e3beb89bf7f7ca1d970915", "score": "0.5764811", "text": "def writeJSON(fname, obj, comm=None):\n\n class MyEncoder(json.JSONEncoder):\n \"\"\"\n Custom encoder class for Numpy arrays and CaseInsensitiveDict\n \"\"\"\n\n def default(self, o):\n \"\"\"\n If input object is an ndarray it will be converted into a dict\n holding dtype, shape and the data, base64 encoded.\n \"\"\"\n if isinstance(o, np.ndarray):\n if o.flags[\"C_CONTIGUOUS\"]:\n pass\n else:\n o = np.ascontiguousarray(o)\n assert o.flags[\"C_CONTIGUOUS\"]\n if o.size == 1:\n return o.item()\n else:\n return dict(__ndarray__=o.tolist(), dtype=str(o.dtype), shape=o.shape)\n elif isinstance(o, np.integer):\n return dict(__ndarray__=int(o), dtype=str(o.dtype), shape=o.shape)\n elif isinstance(o, np.floating):\n return dict(__ndarray__=float(o), dtype=str(o.dtype), shape=o.shape)\n elif isinstance(o, CaseInsensitiveDict):\n return dict(o)\n elif isinstance(o, CaseInsensitiveSet):\n return set(o)\n else:\n # Let the base class default method raise the TypeError\n super().default(o)\n\n if (comm is None) or (comm is not None and comm.rank == 0):\n with open(fname, \"w\") as json_file:\n json.dump(obj, json_file, sort_keys=True, indent=4, separators=(\",\", \": \"), cls=MyEncoder)\n if comm is not None:\n comm.barrier()", "title": "" }, { "docid": "1c05883f86c225b026355b9436cf8a92", "score": "0.57616365", "text": "def put_json(self, url, data, **kwargs):\n\n headers = {'Content-type': 'application/json'}\n if 'headers' in kwargs:\n headers.update(kwargs['headers'])\n\n return requests.put(url,\n data=json.dumps(data),\n headers=headers,\n auth=(self.session.user_id, self.session.session_id))", "title": "" }, { "docid": "bcbc277a7ea1f2b29f9a3be2bdfc0fd3", "score": "0.57414913", "text": "def json_to_file(filename, content):\n with open(filename, 'w') as fname:\n json.dump(content, fname, indent=4,\n separators=(',', ': '),\n sort_keys=True)\n fname.write('\\n')\n print(\"Saved {} objects to {}\".format(len(content), filename))", "title": "" }, { "docid": "23a92c9cc269504ed7d140ee0d348b01", "score": "0.57370496", "text": "def write_json(self, meta, output=\"metadata.json\"):\n f = open(output, 'w')\n printable_json = json.dumps(meta, encoding=\"utf-8\", indent=3)\n f.write(printable_json)\n f.close()", "title": "" }, { "docid": "2001020eb0af2ceb90c616ee1818a608", "score": "0.5731704", "text": "def write(filename, data):\n\n json_data = json.dumps(data, sort_keys=True, indent=2)\n cache = open(os.path.expanduser(filename), 'w')\n cache.write(json_data)\n cache.close()\n\n return data", "title": "" }, { "docid": "1f28d6fdee66fb2b1d706c3464275912", "score": "0.57291526", "text": "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)", "title": "" }, { "docid": "a038895df3ad7550f14fb0161e5314ac", "score": "0.5727255", "text": "def write_as_json(self, data: JSONDict, path: str):\n logger.info(\"Write json to file %s\", path)\n json_data = json.dumps(data.to_dict(), indent=4)\n write_file(path, json_data)", "title": "" }, { "docid": "6b7f645e9301ebe071a0c7b2a5af4cc2", "score": "0.5722797", "text": "def save(self):\n my_dict = {k: v.to_dict() for k, v in FileStorage.__objects.items()}\n with open(FileStorage.__file_path, \"w\") as f:\n json.dump(my_dict, f)", "title": "" }, { "docid": "b147e3748d4be01e3eb8701692216b35", "score": "0.57129365", "text": "def JSONExport(Object, filename):\n\n data = JSONEncoder(Object)\n with open(filename, \"w+\") as outfile:\n json.dump(data, outfile)\n outfile.close()\n print(\"Export Of Object Data Successfull!\")", "title": "" }, { "docid": "22952925ae2c18410e9f4417f151faf6", "score": "0.5711215", "text": "def save_json(self, path):\n f = None\n try:\n f = open(path, 'r')\n json.dump(self.json, f)\n f.close()\n except IOError as e:\n if f is not None:\n f.close()", "title": "" }, { "docid": "b4d706b853e084859284c3da1c5203be", "score": "0.5704248", "text": "def write_index(self, s3_key: str, index: Dict):\n if not index:\n return\n\n try:\n object = self.s3_res.Object(self.bucket, s3_key)\n object.put(Body=json.dumps(index))\n logger.info(f\"Wrote dictionary to key: {s3_key}\")\n except BotoCoreError:\n logger.error(f\"Failed to upload dictionary to key: {s3_key}\", exc_info=True)\n raise", "title": "" }, { "docid": "7c6a0cca28d8d5f66f6c64fffcf3caf3", "score": "0.5704058", "text": "def write(self, file_path, data, num_retries=10, content_type=None, bucket=None):\n\n bucket = self.client.get_bucket(self.bucket or bucket)\n\n try:\n blob = Blob(file_path, bucket)\n except:\n blob = bucket.get_blob(file_path)\n\n try:\n data = json.loads(data)\n except:\n pass\n\n if isinstance(data, (dict, list)):\n data = json.dumps(data)\n else:\n data = data\n\n data = _to_bytes(data, encoding=\"utf-8\")\n string_buffer = BytesIO(data)\n\n blob.upload_from_file(\n file_obj=string_buffer,\n size=len(data),\n client=self.client,\n num_retries=num_retries or self.num_retries,\n content_type=_MEME_TYPES[self.content_type or content_type],\n )\n return", "title": "" }, { "docid": "9106e9e45969b5e80f9a6cce82b76c67", "score": "0.56966764", "text": "def __save_to_file__(JSON, file_name, newFile=True):\n if(newFile):\n mode = 'w'\n else:\n mode = 'a+'\n with open(str(file_name), mode) as f:\n f.writelines(JSON)", "title": "" }, { "docid": "3b71decb8a4f9a8693610440179e22a9", "score": "0.5696309", "text": "def json_write(json_data, filename, indent=4):\n with open(filename, 'w') as datafile:\n json_data = json.dump(json_data, datafile, indent=indent)\n return True", "title": "" }, { "docid": "b79dcc62e50f1c184f812f98459441d4", "score": "0.5693868", "text": "def store_database(obj):\n\n with open('message_database.json', 'w') as json_file:\n json_file.write(json.dumps(obj))\n\n return True", "title": "" }, { "docid": "3b7de658b0798338580e7919bf34d2c6", "score": "0.5692252", "text": "def writeJson(self, file_path, tensor):\n with open(file_path, 'w') as f:\n json.dump(tensor.tolist(), f)", "title": "" }, { "docid": "8b021a3db91c157f6f191693c4680ff9", "score": "0.56816584", "text": "def store_json_to_file(contents, filename):\n _file = os.path.join(os.getcwd(), \"app/raw\", filename)\n with open(_file, \"w\") as fp: \n json.dump(contents, fp, indent=4)", "title": "" }, { "docid": "a6049e19f49c2b1e8d7630084c41fe5d", "score": "0.5679866", "text": "def save_json(data: Union[list, dict], filepath: str) -> None:\n with open(filepath, 'w', encoding = 'utf-8') as f:\n json.dump(data, f)", "title": "" }, { "docid": "797fa2fa25a26de4c925ea9e3ef9c17a", "score": "0.5678551", "text": "def write_json(path_json, dictionary):\n if os.path.exists(path_json):\n raise FileExistsError(f\"{path_json}\")\n with open(path_json, \"w\") as outfile:\n json.dump(dictionary, outfile)", "title": "" }, { "docid": "f4aeb17be6568e466641835f390f253d", "score": "0.56750554", "text": "def store(self):\n if self.time == 0:\n self.storage.write(self.json_str())\n else: # it is array of objects so if it is not first make ,\n self.storage.write(\",\" + self.json_str())", "title": "" }, { "docid": "e3351c9f440d5f901aa5c86c83bfda11", "score": "0.5674374", "text": "def save_json(data, uri):\n with open(uri, 'w') as file:\n json.dump(data, file, indent=2)", "title": "" }, { "docid": "5d84c026cca118af5f66d227ece05a32", "score": "0.5674062", "text": "def save_json(cls, filepath):\n with open(filepath, 'w') as jfile:\n json.dump(cls.save_dict(), jfile, indent=4)", "title": "" }, { "docid": "9b5541774a10dfe18d99e7ad251de2d1", "score": "0.56673235", "text": "def save(self):\n if self.is_null():\n return\n\n from Acquire.ObjectStore import ObjectStore as _ObjectStore\n from Acquire.Service import get_service_account_bucket \\\n as _get_service_account_bucket\n\n bucket = _get_service_account_bucket()\n key = self._get_key()\n\n _ObjectStore.set_object_from_json(bucket=bucket, key=key,\n data=self.to_data())", "title": "" }, { "docid": "87cd01eeafd764709903ac755d332977", "score": "0.56658787", "text": "def _putObject(self, containerName, key, value):\n\n try:\n self.swiftConnection.put_object(containerName, key , contents=value, content_type='text/plain')\n print('Object added with key %s' %key)\n\n except Exception as exp:\n print('Exception = %s' %exp)", "title": "" }, { "docid": "0959533c88f2398886f3bf49bf3769ec", "score": "0.5661241", "text": "def commit(self):\r\n with open(self.file_name, 'w') as f:\r\n simplejson.dump(json_objects, f)", "title": "" }, { "docid": "cc4081d50552f888a1a50f681edd5060", "score": "0.565381", "text": "def store_obj(obj_json, new_dir):\n #new_dir = os.path.join(src_dir, obj_json['title'])\n with open(new_dir, \"w\") as open_file:\n str_ = json.dumps(obj_json, indent=1)\n str_ = the_replacer(str_)\n open_file.write(str_)\n open_file.close()\n return", "title": "" }, { "docid": "1319a13fdeff45c75cb5f851ec931f9b", "score": "0.5644295", "text": "def save(self):\n\n diccionario = {}\n for obj_id in FileStorage.__objects.keys():\n obj = FileStorage.__objects[obj_id]\n diccionario[obj_id] = obj.to_dict()\n\n with open(FileStorage.__file_path, mode=\"w\",\n encoding=\"UTF8\") as textfile:\n dict1 = json.dumps(diccionario)\n textfile.write(dict1)", "title": "" }, { "docid": "8ef21fa80cb8ce969b22f778fb37828c", "score": "0.5641739", "text": "def saveJson(fileName, o):\n with io.open(fileName, 'w', encoding='utf-8', newline='') as f:\n f.write(_unicode(json.dumps(o, ensure_ascii=False)))", "title": "" }, { "docid": "cdd04be656182765836310692aa81238", "score": "0.56344336", "text": "def _writeJSON(self, jsonDoc : dict):\n fname = self._getAbsoluteFileLocation(jsonDoc)\n self.log.debug(\"Writing JSON at %s\", fname)\n with open(fname, \"wb\") as f:\n f.write(self._compressJSON(jsonDoc))", "title": "" }, { "docid": "2d9df5c5a7083279e71b6fc3420c042c", "score": "0.562474", "text": "def storeData(jsonDoc):\n dbSession = DynamoDbSession()\n session = dbSession.getSession()\n\n dynamodb=session.resource('dynamodb')\n\n table = dynamodb.Table('pastebin')\n\n result = table.put_item(\n Item=jsonDoc\n )\n print(result)", "title": "" }, { "docid": "87c5c8cc86f1b53cd6b9f60e94e5eee6", "score": "0.5623791", "text": "def write_storage(fs_path: str, alerts_log: Dict[str, Alert]):\n try:\n # TODO: investigate why we need to call __dict__\n serializable_alerts_log = {}\n\n for key in alerts_log.keys():\n serializable_alerts_log[key] = alerts_log[key].__dict__\n\n with open(fs_path, \"w\") as outfile:\n json.dump(serializable_alerts_log, outfile)\n except:\n logging.error(\n f\"An error occurred writing to storage with path: {fs_path} and log: {alerts_log}\",\n exc_info=True,\n )", "title": "" }, { "docid": "38afd7ce0a9a7639b7549ac327f8af1f", "score": "0.56135875", "text": "def write_json(data, filename):\n with open(filename, 'w') as f:\n json.dump(data, f)", "title": "" }, { "docid": "74f7e599d5ec1b2fefa749c167c3a469", "score": "0.5608112", "text": "def write_json(filename, json_data):\n with open(filename, \"w\") as file:\n json.dump(json_data, file)", "title": "" }, { "docid": "fd15f5fe4154cea600591a32f2a53f92", "score": "0.55992335", "text": "def write(file_path, file_data):\n outfile = open(file_path, 'w')\n json.dump(file_data, outfile, indent=4, sort_keys=True)\n outfile.close()", "title": "" }, { "docid": "39a596903dbba931dd33c922ae58ab7a", "score": "0.55968416", "text": "def write_object_from_dict(bucket: str, key: str, data: Dict, aws_auth: Dict[str, str] = {}) -> str:\n if not isinstance(data, dict):\n raise TypeError(\"Object data must be dictionary type\")\n\n return write_object_from_bytes(bucket, key, json.dumps(data).encode(), aws_auth)", "title": "" }, { "docid": "4646b54fb445c112c8e8e80a9c16a24a", "score": "0.5591987", "text": "def putJSONDisk(File=None, Update=None):\n try:\n FLAG = False # return variable\n FILE = File\n DATA = Update\n SIZE = len(DATA)\n JSON = dict()\n # check if DATA is valid dictionary\n if isinstance(DATA, dict) and SIZE:\n # file created if not present\n with open(FILE, \"a+\") as INFILE:\n try:\n INFILE.seek(0) # reset cursor\n JSON = json.load(INFILE)\n JSON.update(DATA)\n open(FILE, \"w\").close() # flush the file\n json.dump(JSON, INFILE, indent=4) # write updated data\n # error as failed to load JSON\n except ValueError as e:\n INFILE.truncate() # flush data in memory\n open(FILE, \"w\").close() # flush the file\n json.dump(DATA, INFILE, indent=4) # write data\n # return true if succeed\n FLAG = True\n else:\n raise TypeError(\"`Dictionary` TypeError or Empty #{}.\".format(SIZE))\n # in case of exception\n except Exception as e:\n APP.logger.error(\"`putJSONDisk`: Failed to save file `{}`.\\n{}\" \\\n .format(FILE, e))\n # return FLAG\n return(FLAG)", "title": "" } ]
5a5ae203ea2c110f171c5dc86de46448
concatenate files and print on the standard output
[ { "docid": "ca75e9e0396c765c4871f1ef36b0373c", "score": "0.6244764", "text": "def cat(args):\r\n # for every file, attempt to open, read every line, and print\r\n for argument in args[2:]:\r\n file = open(argument)\r\n lines = file.readlines()\r\n for line in lines:\r\n print(line, end=\"\")\r\n file.close()", "title": "" } ]
[ { "docid": "23e5d8240e71cd89332d10b1504d6198", "score": "0.7242691", "text": "def concat_files(self, in_files, out_file, concat_opts=None):\n concat_file = f'{out_file}.concat'\n self.write_debug(f'Writing concat spec to {concat_file}')\n with open(concat_file, 'w', encoding='utf-8') as f:\n f.writelines(self._concat_spec(in_files, concat_opts))\n\n out_flags = list(self.stream_copy_opts(ext=determine_ext(out_file)))\n\n self.real_run_ffmpeg(\n [(concat_file, ['-hide_banner', '-nostdin', '-f', 'concat', '-safe', '0'])],\n [(out_file, out_flags)])\n self._delete_downloaded_files(concat_file)", "title": "" }, { "docid": "eee23c5a558a7fc807d77e86fc50991f", "score": "0.6905139", "text": "def concat(filenames, outpath):\n (cat.__getitem__(filenames) > outpath)()", "title": "" }, { "docid": "7eaef831c60efb8b4dced5f28ac8e0ac", "score": "0.68908", "text": "def concatenate_results():\n\tk = 0\n\tlist_of_txt = sorted(glob.glob('./Test_Results/*.txt'))\n\tfile1 = open(\"./Test_Results/Results.txt\",\"w\")\n\tfile1.close()\n\tfor file_name in list_of_txt:\n\t\twith file(list_of_txt[k], 'r') as original: data = original.read()\n\t\twith file(\"./Test_Results/Results.txt\", 'a') as modified: modified.write(data + \"\\n\")\n\t\tk +=1", "title": "" }, { "docid": "bdafd7ba360657c819586c8860f4ec4c", "score": "0.6728014", "text": "def combine_files():\n\n n_files = len(cat)\n print_source_names()\n\n # if more than 1 file combine them with DBCON\n if n_files > 1:\n\n run_dbcon(0, 1)\n\n # and keep adding in one if there are more\n for ii in range(n_files-1, 1, -1):\n run_dbcon(-1, ii)\n cat.zap_entry(-2) # zap previous dbcon to save space \n\n # remove uv files\n for ii in range(n_files):\n cat.zap_entry(0)", "title": "" }, { "docid": "f60f9f8ace869295c084eeb72dbdb1d7", "score": "0.6687743", "text": "def concat_files(sample_id, sample_list, out_dir):\n\tif os.path.isdir(out_dir):\n\t\tpass\n\telse:\n\t\tmake_dir(out_dir)\n\t\tprint(\"Made new directory for results\")\n\t\t\n\tfor file in sample_list:\n\t\tif \"L001_R1\" in file:\n\t\t\tL001_R1 = file\n\t\telif \"L002_R1\" in file:\n\t\t\tL002_R1 = file\n\t\telif \"L001_R2\" in file:\n\t\t\tL001_R2 = file\n\t\telif \"L002_R2\" in file:\n\t\t\tL002_R2 = file\n\n\tcmd1 = \"cat {0} {1} > {2}/R1.fastq.gz\".format(L001_R1,L002_R1,out_dir)\n\tcmd2 = \"cat {0} {1} > {2}/R2.fastq.gz\".format(L001_R2,L002_R2,out_dir)\n\t\n\trun_cmd(cmd1)\n\trun_cmd(cmd2)\n\treturn", "title": "" }, { "docid": "54c0a073f96283389e43a2e883b07275", "score": "0.66774434", "text": "def cat_files(files,output_file):\n \n # check that the files exist\n file_list=list(filter(os.path.isfile,files))\n \n try:\n stdout=open(output_file,\"w\")\n except EnvironmentError:\n sys.exit(\"ERROR: Unable to open file: \" + output_file)\n \n try:\n subprocess.check_call([\"cat\"]+file_list,stdout=stdout)\n except (subprocess.CalledProcessError,EnvironmentError):\n sys.exit(\"ERROR: Unable to cat files.\")", "title": "" }, { "docid": "87e4942a34edf28712a1c946d3144134", "score": "0.66299933", "text": "def cat_files(files, out_path):\n assert type(files) is list, \"Files have to be in a list\"\n cat_command = [\"cat\"]\n cat_command.extend(files)\n subprocess.Popen(cat_command, stdout=open(out_path, \"w+\"), stderr=subprocess.PIPE)\n assert os.path.isfile(out_path) is True, \"Subprocess call didn't work. Make sure regex gives full path\"\n return out_path", "title": "" }, { "docid": "3604a4f03348e75825a02e3d6b070b1b", "score": "0.6553597", "text": "def concatenate_files(file_paths, output_file_path):\n with open(output_file_path, 'wb') as out_file:\n for file_path in file_paths:\n with open(file_path, 'rb') as in_file:\n # 100MB per writing chunk to avoid reading big file into memory.\n shutil.copyfileobj(in_file, out_file, 1024*1024*100)", "title": "" }, { "docid": "6782072bd95192e90933e8fa5c359408", "score": "0.65450996", "text": "def run(self):\n\t\tif not self.quiet:\n\t\t\tprint \"Combining files\"\n\n\t\ti = 0\n\t\tinFiles = list(self.inFiles) # deep copies the list\n\t\twhile True:\n\t\t\tcurrentWriteDir = mkdtemp(prefix=str(i) + \"_\", dir=self.tmpdir) + '/'\n\t\t\t# Tuplize Files into our special tuple\n\t\t\tcurrentTuples = self.__tuplizeFiles(currentWriteDir, inFiles)\n\t\t\tprint \"currentTuples:\", currentTuples\n\t\t\tprint \"currentWriteDir:\", currentWriteDir\n\t\t\t# Run Hadding\n\t\t\tself.__haddMultiple(currentWriteDir, currentTuples)\n\t\t\t# Check output files\n\t\t\tcurrentReadDir = currentWriteDir\n\t\t\tinFiles = listdirwithpath(currentReadDir)\n\t\t\t# If we have one file left, we're done\n\t\t\tif len(inFiles) == 1:\n\t\t\t\tif not self.quiet:\n\t\t\t\t\tprint \"Copying final file:\", inFiles[0], \"-->\", self.outfile\n\t\t\t\tcopy2(inFiles[0], self.outfile)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t# Cleanup output files in the tmp directory\n\t\tself.__cleanup()", "title": "" }, { "docid": "65693c7b7e8a96319472fedce748c3c5", "score": "0.65250725", "text": "def run(self):\n if not self.quiet:\n print \"Combining files\"\n\n i = 0\n inFiles = list(self.inFiles) # deep copies the list\n while True:\n currentWriteDir = mkdtemp(prefix=str(i) + \"_\", dir=self.tmpdir) + '/'\n # Tuplize Files into our special tuple\n currentTuples = self.__tuplizeFiles(currentWriteDir, inFiles)\n # Run Hadding\n self.__haddMultiple(currentWriteDir, currentTuples)\n # Check output files\n currentReadDir = currentWriteDir\n inFiles = listdirwithpath(currentReadDir)\n # If we have one file left, we're done\n if len(inFiles) == 1:\n if not self.quiet:\n print \"Copying final file:\", inFiles[0], \"-->\", self.outfile\n copy2(inFiles[0], self.outfile)\n break\n else:\n i += 1\n # Cleanup output files in the tmp directory\n self.__cleanup()", "title": "" }, { "docid": "6776da602c41c358942f22134fbacefa", "score": "0.65077746", "text": "def concatenate_faa(*args: [Path], outname: Path):\n str_arg_list = [str(f) for f in args]\n outname.touch(exist_ok=True)\n for str_arg in str_arg_list:\n cmd = f\"cat {str_arg} >> {outname}\"\n run_subprocess(cmd)\n return outname", "title": "" }, { "docid": "785573071099929bea9db18858597f31", "score": "0.6451455", "text": "def concatenate_files(\n filenames: list,\n output_file: str,\n concat_dimension: str = \"time\",\n variables: Optional[list] = None,\n new_attributes: Optional[dict] = None,\n) -> None:\n with Concat(filenames, output_file, concat_dimension) as concat:\n concat.get_constants()\n concat.create_global_attributes(new_attributes)\n concat.concat_data(variables)", "title": "" }, { "docid": "96f0ec2db9ddbd1eafced58482c030b1", "score": "0.6422016", "text": "def concat_file(file_list, output_file):\n combined_csv = pd.concat([pd.read_csv(f) for f in file_list ]) #combine all files in the list\n combined_csv.to_csv( output_file, index=False, encoding='utf-8-sig') #export to csv with uft-8 encoding", "title": "" }, { "docid": "c831d07ae440256f07c743bb8893b026", "score": "0.6419786", "text": "def _combine_files(input_files, output_file):\n with open(output_file, mode='wb') as out:\n for input_file in input_files:\n out.write(open(input_file, mode='rb').read())", "title": "" }, { "docid": "6d9f1548155a59797860dd5f3cdfebc6", "score": "0.6413411", "text": "def concatenate(dest: Path, *sources: Path, basedir: Path) -> None:\n\n comment = \"\"\"\n<!--#slipbox-metadata\nfilename: {}\n-->\n\"\"\"\n with dest.open(\"w\") as file:\n for src in sources:\n filename = str(src.relative_to(basedir))\n print(comment.format(filename), file=file)\n print(src.read_text(), file=file)", "title": "" }, { "docid": "172d36958096ec86d2d44259e09222b9", "score": "0.6409352", "text": "def combine(args):\n file_names = []\n file_names_reading = False\n output_name = \"\"\n output_name_reading = False\n\n for arg in args:\n if arg == \"-combine\" or arg == \"-comb\":\n file_names_reading = True\n output_name_reading = False\n elif arg == \"-out\":\n output_name_reading = True\n file_names_reading = False\n elif file_names_reading:\n file_names.append(arg)\n elif output_name_reading:\n output_name = arg\n \n if not file_names: return\n if output_name == \"\":\n output_name = raw_res_fold + \"combine_tmp.txt\"\n combine_files(file_names, output_name)", "title": "" }, { "docid": "7b032ccdac27318ccb038c4fe0777250", "score": "0.64072627", "text": "def cat(files, path):\n dst = open(path, 'w')\n for f in files:\n with open(f, 'r') as src:\n dst.write(src.read())\n dst.close()", "title": "" }, { "docid": "e495397f8f578bb01234fb416e557727", "score": "0.63825285", "text": "def cat_files(inputf, ext='.clean'):\n files = get_file_list(inputf, ext)\n for file in files:\n with open(inputf + '/' + file, 'r') as f:\n for line in f:\n print(line.strip())", "title": "" }, { "docid": "eca20cc8a2935a788673c2866a24ebac", "score": "0.63213754", "text": "def merger(file_a, file_b, fill):\n out = \"All_\" + fill + \"_consensi.fa\"\n command = \"cat \" + file_a + \" \" + file_b + \" > \" + out\n os.system(command)\n return out", "title": "" }, { "docid": "c2b082339cc54603a93f54cbb88aa3fb", "score": "0.6290609", "text": "def combine_concatenate(self, output):\n\n # create root node for the full compendium\n outputRoot = ElementTree.Element('compendium', version='5');\n \n # insert the other files into the full compendium's tag\n for filename in self.filenames:\n outputRoot.extend(ElementTree.parse(filename).getroot().getchildren())\n\n # write the compendium to the file by creating a new element tree from the root and writing it\n return ElementTree.ElementTree(outputRoot).write(output, encoding='UTF-8')", "title": "" }, { "docid": "efae6a86f757fc7a112254a1d2ab6787", "score": "0.62700826", "text": "def cat_files(files, outfilename, gzip=False):\n\n import shutil\n\n if gzip:\n import gzip as gz\n\n outhandle = gz.open\n else:\n outhandle = open\n\n with outhandle(outfilename, \"wb\") as f_out:\n for f in files:\n with open(f, \"rb\") as f_in:\n shutil.copyfileobj(f_in, f_out)", "title": "" }, { "docid": "a1800032de87f7d8902bc3ee606a49cf", "score": "0.62589127", "text": "def cat_files(files,outfilename,gzip=False):\n\n import shutil\n\n if gzip:\n import gzip as gz\n outhandle= gz.open\n else:\n outhandle = open\n\n with outhandle(outfilename, 'wb') as f_out:\n for f in files:\n with open(f, 'rb') as f_in:\n shutil.copyfileobj(f_in, f_out)", "title": "" }, { "docid": "d0578c42768075d4810a78e036fcbf11", "score": "0.62480927", "text": "def concat(self, name):\n merge = PdfFileMerger()\n n = len(self.paths)\n c = 1\n for f in self.paths:\n progress_bar(c / n * 100)\n with open(f, 'rb') as f:\n merge.append(PdfFileReader(f))\n c += 1\n merge.write(os.path.join(self.build_dir, \"%s.pdf\" % name))", "title": "" }, { "docid": "2d9f5e396f1e6b81d773179ffb019763", "score": "0.62456346", "text": "def multi_cat(filename):\n\n return cat(filename)", "title": "" }, { "docid": "770bff49939bc404070f25efeb8edfdc", "score": "0.62027395", "text": "def combine_files(file_names, output_name):\n input = []\n max_lines = -1\n for name in file_names:\n f = open(name, mode='r')\n lines = f.readlines()\n max_lines = max(max_lines, len(lines))\n input.append(lines)\n\n res = \"\"\n for i in range(max_lines):\n for lines in input:\n try:\n l = lines[i].strip()\n res += l\n except IndexError:\n timestamp = (i-1) * measuring_interval\n res += empty_tcp_line(timestamp, \",\")\n res += f\"\\n\"\n \n f = open(output_name, mode='w')\n f.write(res)\n f.close()", "title": "" }, { "docid": "4b6080c37ab98117100640b31e417bb9", "score": "0.61796194", "text": "def merge():\n files = []\n for x in os.listdir(\".\"):\n if x.endswith(\"ts\"):\n files.append(x)\n if not files:\n return\n cmd = f\"\"\"ffmpeg -i \"concat:{'|'.join(files)}\" -acodec copy -vcodec copy -absf aac_adtstoasc output.mp4\"\"\"\n os.system(cmd)", "title": "" }, { "docid": "40215de881d3eedc84135723c46a69ba", "score": "0.6171235", "text": "def main():\n import pathlib\n\n input_dir = pathlib.Path('test_data/things_kinds')\n\n file_path_names = i_walk_dir_for_filepaths_names(str(input_dir))\n\n csv_file_path_names = ifilter(karld.io.is_file_csv, file_path_names)\n\n out_prefix = \"\"\n out_dir = pathlib.Path('out_data/things_kinds')\n out_filename = \"combined_things.csv\"\n\n csv_files_to_file(\n chain.from_iterable,\n out_prefix,\n str(out_dir),\n out_filename,\n csv_file_path_names)", "title": "" }, { "docid": "5af38c24efa431292b2f35bbc5fa639e", "score": "0.61482507", "text": "def run_concatenation(directory):\n\n concat_file = directory + os.sep + \"compressed_PDBS.txt\"\n print(\n \"Start Concatenation: To separate files use the \\\n file_concatenation_and_compression.py in the Utility script folder.\"\n )\n file_list = glob.glob(directory + os.sep + \"*\")\n file_list = [os.path.abspath(x) for x in file_list]\n\n with open(concat_file, \"a+\") as f:\n for file_name in file_list:\n f.write(get_file_info(file_name))\n\n job_list = tuple([(file_path,) for file_path in file_list])\n print(\"\\tFinish Concatenation\")\n print(\"\\tRemoving files that were concatenated\")\n mp.multi_threading(job_list, -1, del_files)\n print(\"\\tCompressing file\")\n compress_file(concat_file)\n if os.path.exists(concat_file + \".gz\"):\n del_files(concat_file)\n print(\"Finished Compression\")", "title": "" }, { "docid": "cf2300655b6038214bea36e4a9b2a03d", "score": "0.60945624", "text": "def concat_asset_files(filenames, joinWithSemicolon=False):\n contents = [\"/* %s */\\n\\n %s\" % (filename, file_get_contents(filename)) for filename in filenames]\n if joinWithSemicolon:\n return \"\\n\\n;\\n\\n\".join(contents) \n else:\n return \"\\n\\n\\n\\n\".join(contents)", "title": "" }, { "docid": "a7c504a37a99d02683fbf959a002af47", "score": "0.6086993", "text": "def concatcsv(in_files):\n import os.path as op\n from nipype.utils.filemanip import split_filename\n\n if not isinstance(in_files, list):\n return in_files\n if isinstance(in_files[0], list):\n in_files = in_files[0]\n first = open(in_files[0], 'r')\n path, name, ext = split_filename(in_files[0])\n out_name = op.abspath('concat.csv')\n out_file = open(out_name, 'w')\n out_file.write(first.readline())\n first.close()\n for in_file in in_files:\n file_to_read = open(in_file, 'r')\n file_to_read.readline() # scrap first line\n for line in file_to_read:\n out_file.write(line)\n return out_name", "title": "" }, { "docid": "10a63212c5aed204266c5ef8fb382286", "score": "0.60648596", "text": "def concatcsv(in_files):\r\n import os.path as op\r\n from nipype.utils.filemanip import split_filename\r\n\r\n if not isinstance(in_files, list):\r\n return in_files\r\n if isinstance(in_files[0], list):\r\n in_files = in_files[0]\r\n first = open(in_files[0], 'r')\r\n path, name, ext = split_filename(in_files[0])\r\n out_name = op.abspath('concat.csv')\r\n out_file = open(out_name, 'w')\r\n out_file.write(first.readline())\r\n first.close()\r\n for in_file in in_files:\r\n file_to_read = open(in_file, 'r')\r\n scrap_first_line = file_to_read.readline()\r\n for line in file_to_read:\r\n out_file.write(line)\r\n return out_name", "title": "" }, { "docid": "a5b08b0a0f82fdf4c123b377eecc5ff9", "score": "0.60395235", "text": "def concatenate():\n alldata = []\n files = []\n for item in os.listdir(settings.DATA_DIR):\n if item.endswith(\".csv\"):\n files.append(item)\n \n # keep the header of the first file\n first = pd.read_csv(os.path.join(settings.DATA_DIR, files[0]), header = 1, \n usecols = SELECT, dtype = dt, index_col=False, na_values = [''])\n first.drop(first.tail(3).index, inplace = True) #delete comments at the end of the data file\n\n alldata.append(first)\n \n # drop the header in the rest of the files\n files = files[1:]\n for f in files:\n data = pd.read_csv(os.path.join(settings.DATA_DIR, f), header = 1, \n usecols = SELECT, dtype = dt, index_col=False, na_values = [''])\n data.drop(0, inplace = True) # drops header\n\n data.drop(data.tail(3).index, inplace = True) #trim comments out of df\n \n alldata.append(data)\n \n # concatenate all data into one file and resets index\n alldata = pd.concat(alldata, axis=0).reset_index(drop=True)\n \n # Saving the pandas dataframe \n alldata.to_pickle('processed/assembled.pkl')", "title": "" }, { "docid": "0e3676a642a33398d3281c922d6eb124", "score": "0.60288894", "text": "def merge(input_files, output_file):\n with open(output_file, 'w') as wfd:\n for f in input_files:\n with open(f, 'r') as fd:\n shutil.copyfileobj(fd, wfd)", "title": "" }, { "docid": "4ce7bc9a11be98f4ada90ba44a1f95ef", "score": "0.60211384", "text": "def cat(filename):\n filename = filename[0]\n with open(filename) as f:\n for line in f:\n print(line, end=\"\")", "title": "" }, { "docid": "419468c4126dcb5c83d4528fbb60a1b6", "score": "0.6004886", "text": "def mergeFiles(inFiles=[]):\n files = [open(fn) for fn in inFiles]\n with contextlib.nested(*files):\n with open('output', 'w') as f:\n f.writelines(heapq.merge(*files))", "title": "" }, { "docid": "c7a413fb4e140cc0f29ddcc8c175eaa3", "score": "0.600071", "text": "def output_merge (output_name):\n\tsubprocess.Popen([\"cat blast_out_%s_* >> %s\" % (output_name,output_name)],shell=True).wait()\n\tsubprocess.Popen([\"rm blast_out_%s_*\" % (output_name)],shell=True).wait()", "title": "" }, { "docid": "5054ecf7d8c6c71b35cf6613856d0450", "score": "0.60000414", "text": "def concat_wav_files(outfname, args):\r\n fnames = []\r\n for x in args:\r\n if len(x) > 0:\r\n if len(x) == 1 and isinstance(x, list) \\\r\n and not isinstance(x, str):\r\n fnames.extend(x)\r\n else:\r\n fnames.append(x)\r\n else:\r\n pass\r\n\r\n with wave.open(outfname,\r\n 'wb') as wav_out:\r\n for wav_path in fnames:\r\n with wave.open(wav_path,\r\n 'rb') as wav_in:\r\n if not wav_out.getnframes():\r\n wav_out.setparams(wav_in.getparams())\r\n wav_out.writeframes(wav_in.readframes(wav_in.getnframes()))\r\n return(outfname)", "title": "" }, { "docid": "3816e07fcb30974f4e52a574844c3b77", "score": "0.5998666", "text": "def combine_files(*inputs, output):\n fig = combine_figs(*[(load_single_fig(fn), label) for label, fn in inputs],\n unit=\"px\")\n fig.save(output)", "title": "" }, { "docid": "3a05a0b1286725bc9aa1889241a9b469", "score": "0.5994436", "text": "def combine_clips(clips):\n logger.info('Starting the concatenation process.')\n success = False\n clip_description = \"Included Twitch Clips:\\n\"\n combined_clip_name = \"combined.mp4\"\n combined_clip_path = os.path.dirname(clips[0][\"file_name\"]) + \"\\\\\" + combined_clip_name\n os.chdir(os.path.dirname(clips[0][\"file_name\"]))\n\n # re-encode each clip since the bitrate and fps may change with each clip\n with open(\"clips_to_combine.txt\", 'w', encoding='utf-8') as outfile:\n outfile.write(\"file \" + intro_clip_path + \"\\n\")\n for index, clip in enumerate(clips):\n clip_to_find = clip[\"file_name\"]\n slug_from_clip = os.path.basename(clip_to_find).split('_')[1].split('.')[0]\n clip_path = glob.glob(os.path.dirname(clip_to_find) + '\\\\*_' + slug_from_clip + '.mp4', recursive=False)[0]\n success, encoded_clip_path = encode_clip(clip, clip_path)\n if success:\n clip_description = clip_description + \"Clip #\" + str(index + 1) + \" \" + clip[\"broadcaster\"] + \": \" + clip[\"broadcaster_url\"] + \"\\n\"\n outfile.write(\"file \" + os.path.normpath(encoded_clip_path).replace('\\\\', '/').replace('\\'', '\\\\\\'') + \"\\n\")\n outfile.write(\"file \" + outro_clip_path + \"\\n\")\n outfile.close()\n\n args = [\"-y\", \"-f\", \"concat\", \"-safe\", \"0\", \"-i\", \"clips_to_combine.txt\", \"-c:a\", \"aac\",\n \"-c:v\", \"libx264\", \"-preset\", \"slow\", \"-crf\", \"18\", \"-pix_fmt\", \"yuv420p\",\n \"-s\", \"1920x1080\", \"-r\", \"60\", \"-vbr\", \"5\", \"-ac\", \"2\", \"-ar\", \"44100\", \"-vsync\", \"0\",\n combined_clip_path]\n run_ffmpeg(args)\n success = os.path.isfile(combined_clip_path)\n\n # save the description of the combined clips\n with open(os.path.dirname(combined_clip_path) + \"\\combined_description.txt\", 'w', encoding='utf-8') as outfile:\n outfile.write(clip_description)\n outfile.close()\n\n return success, combined_clip_path, clip_description", "title": "" }, { "docid": "8245b5fd698f0fe0ffb5734dca96e500", "score": "0.5989189", "text": "def __call__(self, files, outfilename):\n self.get_all(files)\n self.write(outfilename)", "title": "" }, { "docid": "b42401817fa6028615f7a35296d8d2a2", "score": "0.59797335", "text": "def merge_fasta_files (fasta_file_1, fasta_file_2, fasta_file_out):\n\tf_out = open(fasta_file_out, 'w')\n\tf1_in = open(fasta_file_1, 'rU')\n\tfor line in f1_in:\n\t\tf_out.write(line)\n\tf1_in.close()\n\tf_out.write('\\n')\n\tf2_in = open(fasta_file_2, 'rU')\n\tfor line in f2_in:\n\t\tf_out.write(line)\n\tf2_in.close()\n\tf_out.close()", "title": "" }, { "docid": "0fcb3a52b857197f2891003c364d1487", "score": "0.59650844", "text": "def join_files(first_file, second_file, destination_file):\n \n exit_log_dir()\n\n if debug:\n print 'in tunnel.py join_files'\n \n\n \n if os.path.exists(first_file):\n if debug:\n print 'located file:', first_file\n if os.path.exists(second_file):\n if debug:\n print 'located second file:', second_file\n print 'cating the first file into the destination file'\n os.popen(\"cat %s > %s \" % (first_file, destination_file))\n if debug:\n print 'cating the second file into the destination file'\n os.popen(\"cat %s >> %s \" % (second_file, destination_file))\n if debug:\n print 'your final file is named:', destination_file\n else:\n print 'unable to locate file:', second_file\n sys.exit(1)\n return 0", "title": "" }, { "docid": "84dd1bf82a17dc971e5a369b13426c9f", "score": "0.5960711", "text": "def combine_fasta( sequence_files, output_file ):\n with FastaWriter( output_file ) as handle:\n for filename in sequence_files:\n try:\n for record in FastaReader( filename ):\n handle.writeRecord( record )\n except:\n log.warn('Could not open \"%s\" as Fasta' % fasta)\n check_output_file( output_file )\n return output_file", "title": "" }, { "docid": "cc0b4cdf31f0e63d79598584b981755a", "score": "0.59575313", "text": "def combine_audios(audio_files, combined_audio_file):\n combined_segment = None\n for audio_file in audio_files:\n segment = AudioSegment.from_file(\n audio_file,\n 'aiff',\n )\n if combined_segment is None:\n combined_segment = segment\n else:\n combined_segment += segment\n os.system('rm %s' % audio_file)\n\n combined_segment.export(\n combined_audio_file,\n format='aiff',\n )\n log.info('Saved combined audios to %s', combined_audio_file)", "title": "" }, { "docid": "9df213bdf8a785837762875562f91a34", "score": "0.59531814", "text": "def catenate(self):\n\n jobs = []\n input_files = []\n sample_name = []\n catenate_fasta = os.path.join(\"catenate\", \"seqs.fna\")\n\n for readset in self.readsets:\n merge_directory = os.path.join(\"merge\", readset.sample.name)\n merge_file_prefix = os.path.join(merge_directory, readset.name + \".extendedFrags.fastq\")\n\n # Find input readset FASTQs first from previous FLASh job,\n input_files.append(merge_file_prefix)\n sample_name.append(str(readset.sample.name).replace(\"_\", \".\"))\n\n if config.param('qiime_catenate', 'map_file'):\n job = qiime.catenate(\n input_files,\n sample_name,\n catenate_fasta\n )\n job.name = \"catenate\"\n job.samples = self.samples\n jobs.append(job)\n else:\n mapbuild_job = tools.py_ampliconSeq(\n [],\n [],\n 'map_build',\n \"\"\"-s {samples}\"\"\".format(\n samples=','.join(sample_name)\n )\n )\n catenate_job = qiime.catenate(\n input_files,\n sample_name,\n catenate_fasta\n )\n catenate_job.samples = self.samples\n jobs.append(concat_jobs([\n mapbuild_job,\n catenate_job\n ], name=\"catenate\"))\n\n return jobs", "title": "" }, { "docid": "6e2a33ef990c6183baf3a268e07a9d5a", "score": "0.5949362", "text": "def join_files(self,args, files):\n if files == \"compras\":\n cbte = \"REGINFO_CV_COMPRAS_CBTE.txt\"\n exp_cbte = \"compras_cbte.txt\"\n ali = \"REGINFO_CV_COMPRAS_ALICUOTAS.txt\"\n exp_ali = \"compras_alicuotas.txt\"\n if files == \"ventas\":\n cbte = \"REGINFO_CV_VENTAS_CBTE.txt\"\n exp_cbte = \"ventas_cbte.txt\"\n ali = \"REGINFO_CV_VENTAS_ALICUOTAS.txt\"\n exp_ali = \"ventas_alicuotas.txt\"\n for z in args:\n try:\n with ZipFile(z) as myzipfile:\n with myzipfile.open(cbte) as txt:\n with open(exp_cbte, mode=\"a\", newline=\"\\r\\n\") as exp:\n text = str(txt.readlines()).replace(\"'\",\"\").replace(\"[\",\"\").replace(\"]\",\"\").replace(\"b\",\"\").replace(\",\",\"\").split('\\\\r\\\\n')[1:]\n for l in text:\n if l != \"\":\n l = l[1:] + \"\\n\"\n exp.write(l)\n with myzipfile.open(ali) as txt:\n with open(exp_ali, mode=\"a\", newline=\"\\r\\n\") as exp:\n text = str(txt.readlines()).replace(\"'\",\"\").replace(\"[\",\"\").replace(\"]\",\"\").replace(\"b\",\"\").replace(\",\",\"\").split('\\\\r\\\\n')[1:]\n for l in text:\n if l != \"\":\n l = l[1:] + \"\\n\"\n exp.write(l)\n except Exception as e:\n raise e", "title": "" }, { "docid": "e11a27b17c17326c65ca1e5fb4db44fd", "score": "0.59445035", "text": "def concat_scripts(args):\n \n file = None\n full_script = []\n for script in args:\n try:\n file = open(script, \"rU\")\n contents = file.read()\n full_script.append(contents)\n if not contents.endswith(\"\\n\"):\n full_script.append(\"\\n\")\n finally:\n if not file is None:\n file.close()\n \n return \"\".join(full_script)", "title": "" }, { "docid": "a54a8b382f5dd6a187764f0fe1075970", "score": "0.5941044", "text": "def main() -> None:\n\n args = get_args()\n\n if not os.path.isdir(args.out_dir):\n os.makedirs(args.out_dir)\n\n num_files, num_seqs = 0, 0\n for fh in args.files:\n # open an output file in the output directory\n out_file = os.path.join(args.out_dir, os.path.basename(fh.name))\n print(fh.name, '->', out_file)\n out_fh = open(out_file, 'wt')\n\n # for each line/sequence from the input file:\n # write the transcribed sequence to the output file\n # update the number of files processed\n for dna in fh:\n out_fh.write(dna.rstrip().replace('T','U'))\n\n\n \n print('Done! Wrote')", "title": "" }, { "docid": "3c06b3c10ff724adaf129ac137def2e5", "score": "0.59407884", "text": "def combine(filenames, fout, site=[None], overwrite=False, verbose=False):\n if os.path.exists(fout) and not overwrite:\n return fout\n cmd = [\n 'gdalbuildvrt',\n ]\n if not verbose:\n cmd.append('-q')\n if site[0] is not None:\n bounds = get_vector_bounds(site)\n cmd.append('-te %s' % (' '.join(map(str, bounds))))\n cmd.append(fout) \n cmd = cmd + filenames\n if verbose:\n print 'Combining %s files into %s' % (len(filenames), fout)\n #print ' '.join(cmd)\n #subprocess.check_output(cmd)\n os.system(' '.join(cmd))\n return fout", "title": "" }, { "docid": "1a21c39613d41f490a5077dc57597de6", "score": "0.5936047", "text": "def concat(args):\n\n print('CONCATENATE START')\n\n # define commands\n # file of all blast hits\n cmd = 'cat {args.outputdir}/*.result > {args.outputdir}/concat.txt'.format(args=args)\n hp.run_cmd(cmd, args.verbose, 0)\n # all fasta entries\n cmd = 'cat {args.outputdir}/*.fasta > {args.outputdir}/above_threshold.fa'.format(args=args)\n hp.run_cmd(cmd, args.verbose, 0)\n\n # set of all input IDs from the concatenated fasta file\n with open(args.outputdir + '/above_threshold.fa', 'r') as g:\n allids = {i[1:] for i in g.read().split('\\n') if i and i[0] == '>'}\n\n # set of IDs seen so far\n seenids = set()\n\n # file of top blast hits\n tophitsfile = open(args.outputdir + '/top.concat.txt', 'w')\n # file filtered with Ioan's prescription\n ifilterfile = open(args.outputdir + '/ifilter.concat.txt', 'w')\n\n # Ioan: Top number of BLAST hits to parse through in order to determine whether top hit can be trusted as truly non-human\n topchunk = 10\n # a counter\n minicounter = 0\n # a line representing a top hit\n topline = None\n # a filtering boolean (if true, filter out line)\n filterbool = False\n\n # glob blast files\n myfiles = glob.glob(args.outputdir + '/*.result')\n f = fileinput.input(files=myfiles)\n for line in f:\n linelist = line.strip().split()\n myid = linelist[0]\n # ID not yet seen (i.e., is top hit)\n if not myid in seenids:\n tophitsfile.write(line)\n seenids.add(myid)\n\n # this will skip on the loop's first iteration\n if topline:\n # print previous top line\n if not filterbool:\n ifilterfile.write(topline + '\\n')\n\n # here we're assuming fmt is:\n # qseqid, sseqid, saccver, staxids, pident, nident, length, mismatch, gapopen, gaps, qstart, qend, qlen, qframe, qcovs, sstart, send, slen, sframe, sstrand, evalue, bitscore, stitle\n topbitscore = float(linelist[21])\n topstaxids = linelist[3]\n topline = line.strip()\n\n # reset counter\n minicounter = 0\n # reset boolean (if tophit human, preemptively filter)\n if topstaxids == humantaxid:\n filterbool = True\n else:\n filterbool = False\n #print(myid + ' ' + str(minicounter) + ' ' + str(filterbool))\n # keep on checking results if filter flag hasn't gone high and #lines < topchunk\n elif (not filterbool) and minicounter < topchunk:\n # here we're assuming fmt is:\n # qseqid, sseqid, saccver, staxids, pident, nident, length, mismatch, gapopen, gaps, qstart, qend, qlen, qframe, qcovs, sstart, send, slen, sframe, sstrand, evalue, bitscore, stitle\n staxids = linelist[3]\n evalue = float(linelist[20])\n bitscore = float(linelist[21])\n filterbool = ioanfilter(staxids, evalue, bitscore, topbitscore)\n minicounter += 1\n #print(myid + ' ' + str(minicounter) + ' ' + str(filterbool))\n\n # do last entry\n if not filterbool:\n ifilterfile.write(topline)\n\n f.close()\n tophitsfile.close()\n ifilterfile.close()\n\n # set of IDs that didn't blast\n # print(allids)\n # print(seenids)\n noblastids = allids - seenids\n\n # get fasta file of entries that didn't blast\n filecount = hp.fastaidfilter(args.outputdir + '/above_threshold.fa', args.outputdir + '/no_blastn.fa', noblastids)\n\n if not args.noclean:\n cmd = 'rm {args.outputdir}/*.result {args.outputdir}/*.fasta'.format(args=args)\n hp.run_cmd(cmd, args.verbose, 0)\n\n print('No blast hits for: ' + ', '.join(list(noblastids)))\n\n if args.nosge:\n pass\n else:\n # concat blast logs and remove folder\n print('concatenate blast logs')\n cmd = 'head -100 {args.logsdir}/* > {args.outputdir}/log.blast'.format(args=args)\n hp.run_cmd(cmd, args.verbose, 0)\n\n if not args.noclean:\n shutil.rmtree(args.logsdir)\n\n print('CONCATENATE END')", "title": "" }, { "docid": "552fc894194db9643f334a5be609abb6", "score": "0.59333324", "text": "def combineFiles(inPaths, outPath):\n with open(outPath, 'w') as outfile:\n for fname in inPaths:\n with open(fname) as infile:\n outfile.write(infile.read())", "title": "" }, { "docid": "95d793486205b1a343cfe0ceda35f0b7", "score": "0.5928905", "text": "def concat_images(file_li, outname, mode=\"horiz\", del_input=True):\n\n if mode == \"horiz\":\n append = \"+append\"\n elif mode == \"vert\":\n append = \"-append\"\n\n formated_cmd = [\"convert\", append]\n\n for file_name in file_li:\n formated_cmd.append(file_name)\n\n formated_cmd.append(outname)\n subprocess.call(formated_cmd)\n\n if del_input:\n for file_name in file_li:\n os.remove(file_name)\n\n return", "title": "" }, { "docid": "be2f96f34092ef6074e6d6f6b6fefbc0", "score": "0.5925992", "text": "def concatenate_tmp_files(self):\n csv_files = [\n filename for filename in self.tmp_dir.iterdir() if filename.suffix == \".csv\"\n ]\n frames_list = []\n logger.info(f\"Merging previously downloaded files\")\n try:\n for filename in csv_files:\n tmp_df = pd.read_csv(\n filename,\n index_col=None,\n header=0,\n na_values=\"None\",\n keep_default_na=True,\n dtype=self.columns_output,\n )\n frames_list.append(tmp_df)\n concat_df = pd.concat(frames_list, axis=0, ignore_index=True)\n except Exception:\n logger.exception(f\"An error occured while merging the files!\")\n raise\n logger.info(f\"Successfully merged the files!\")\n return concat_df", "title": "" }, { "docid": "89173d321e725f85c7036e63e108cb47", "score": "0.59198457", "text": "def main():\n dir = sys.argv[1]\n (runs, o_s, errs, batch_stderr, shells, versions) = get_file_names(dir)\n\n process_file_list(dir, runs, 'concatenated_run_logs.txt')\n process_file_list(dir, o_s, 'concatenated_stdout.txt')\n process_file_list(dir, errs, 'concatenated_stderr.txt')\n process_file_list(dir, versions, 'concatenated_versions.txt')\n\n handle_batch_errs(dir, batch_stderr)", "title": "" }, { "docid": "0978c24e4da5011104d6a90ff968c93b", "score": "0.5916263", "text": "def combine_(args):\n combine(args.datasets, args.output)", "title": "" }, { "docid": "6a7486b8c36f93a0a08a1acafd1b1404", "score": "0.5912825", "text": "def append_files(reader1, reader2):\n out = []\n\n for row in reader1:\n out.append(row)\n\n for row in reader2:\n out.append(row)\n\n return out", "title": "" }, { "docid": "54cd4c1e8ec4e56c3d76e9525fa62799", "score": "0.59117013", "text": "def cat_fepout(fep_dir, outfile):\n # get list of all *.fepout file in this fep_dir\n fep_file = sorted(glob.glob(fep_dir+'/*.fepout'), key=numericalSort)\n\n # loop through all *.fepout files and write to the summary file\n with open(outfile, 'w') as output:\n print('Concatenating {}'.format(outfile))\n for fname in fep_file:\n with open(fname) as infile:\n output.write(infile.read())\n\n return outfile", "title": "" }, { "docid": "f319f18924b7e7f0a4c44dd3d7a3394f", "score": "0.58967894", "text": "def merge_files(fns_in, fn_out):\n with open(fn_out, 'w') as fout:\n for fn_in in fns_in:\n with open(fn_in) as fin:\n for line in fin:\n fout.write(line)", "title": "" }, { "docid": "d33f1d64ac9e02f98779118012dbf0e0", "score": "0.5894308", "text": "def _combine_files(orig_files, base_out_file, data, fill_paths=True):\n orig_files = [x for x in orig_files if x and utils.file_exists(x)]\n if not orig_files:\n return None\n out_file = \"%s-combine%s\" % (utils.splitext_plus(base_out_file)[0],\n utils.splitext_plus(orig_files[0])[-1])\n with open(out_file, \"w\") as out_handle:\n for orig_file in orig_files:\n with open(orig_file) as in_handle:\n for line in in_handle:\n if fill_paths and line.startswith(\"file\"):\n line = _fill_file_path(line, data)\n out_handle.write(line)\n out_handle.write(\"\\n\\n\")\n return out_file", "title": "" }, { "docid": "b107342e84c35bf2dfe568888fc85cb6", "score": "0.58832914", "text": "def combineFile(file1, file2, output):\r\n data = []\r\n n = 0\r\n # Read all the data from both files\r\n with open(file1, \"r\") as f:\r\n for line in f:\r\n data.append(line)\r\n with open(file2, \"r\") as f:\r\n for line in f:\r\n data.append(line)\r\n print(\"\\nCombining:\", file1, \"and\", file2)\r\n print(len(data), \"data points detected\")\r\n snOut = open(output, \"w\")\r\n for i in range(0, len(data)):\r\n index = random.randint(0, len(data) - 1) # Write the elements to file in a random order\r\n snOut.write(data.pop(index))\r\n n += 1\r\n snOut.close()\r\n print(n, \"data points written randomly to\", output) # Print how many data points were written to file\r\n # End combineFile()\r", "title": "" }, { "docid": "88e25f56e05525a46866c8fa666fcde0", "score": "0.5845041", "text": "def main():\n args = get_args()\n files = args.files\n outdir = args.outdir\n\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n\n read=0\n for i, file in enumerate(files):\n if not os.path.isfile(file):\n warn('\"{}\" is not a file.'.format(os.path.basename(file)))\n continue\n else:\n save_path = outdir\n basename = os.path.basename(file)\n newName = os.path.join(save_path, basename)\n \n out_dir = open(newName, 'w+')\n with open(file, 'r') as fh:\n read+=1\n lines = fh.read().splitlines()\n word_list=[]\n for line in lines:\n for word in line.split():\n clean = re.sub('[^a-zA-Z0-9]', '', word)\n consonants = re.sub('[aeiouAEIOU]', '', string.ascii_letters)\n match = re.match('^([' + consonants + ']+)(.+)', clean)\n if match:\n out_dir.write('-'.join([match.group(2), match.group(1) + 'ay']) + ' ')\n else:\n out_dir.write(word + '-yay' +' ')\n out_dir.write('\\n')\n\n if read == 0:\n print('Done, wrote 0 files to \"{}\".'.format(outdir))\n elif read ==1:\n print('Done, wrote 1 file to \"{}\".'.format(outdir))\n else:\n print('Done, wrote {} files to \"{}\".'.format(read, outdir))", "title": "" }, { "docid": "50e5cfa285d33d2c21c1ae049a237c30", "score": "0.5835188", "text": "def do_combine(self, input_files, output_file):\n reverse = self.input1_reverse.get() != 0\n\n pdfmanipulation.pdf_merge_files(\n input_files,\n output_file,\n reverse=reverse)\n\n base_dir = os.path.split(input_files[0])[0]\n self.parameters[Operation.DIR_COMBINE.name]['input1'] = base_dir\n self.parameters[Operation.DIR_COMBINE.name]['input1_reverse'] = reverse\n self.parameters[Operation.DIR_COMBINE.name]['output_path'] = os.path.split(output_file)[0]", "title": "" }, { "docid": "d82e00b87dbd8e6956985db097600368", "score": "0.58178616", "text": "def combine(datasets, output):\n if which(ncrcat) is None or which(ncpdq) is None:\n raise EnvironmentError(\"you must have {} and {} installed.\".format(ncrcat, ncpdq))\n cat(sorted(datasets), output)", "title": "" }, { "docid": "61e08893013fb3a09335eb79c75260bc", "score": "0.5801555", "text": "def combine_fastq( sequence_files, output_file ):\n with FastqWriter( output_file ) as handle:\n for filename in sequence_files:\n try:\n for record in FastqReader( filename ):\n handle.writeRecord( record )\n except:\n log.warn('Could not open \"%s\" as Fastq' % fasta)\n check_output_file( output_file )\n return output_file", "title": "" }, { "docid": "46437ee74f6a4c1be60f4b3b41c4f0b3", "score": "0.5789782", "text": "def merge_results(listfiles, outfile, pb_pos):\n with open(outfile, 'a') as dst:\n for k, v in tqdm(enumerate(listfiles), desc=\"Merging results\", unit=\"files\", position=pb_pos):\n with open(v, 'r') as src:\n shutil.copyfileobj(src, dst)\n src.close()\n dst.close()", "title": "" }, { "docid": "704ea424f1fb4a89ee147994c1122872", "score": "0.57833934", "text": "def compile_outputs(self, paths=None):\n lines = []\n # TODO: Move to writer module. merge is dependent on writer-specific formatting \n output_path = self.params[(\"io\", \"output_path\")]\n\n if (output_path is None) | (output_path == \"\"): \n print(f\"Please set the output_path before saving\")\n return False\n\n os.makedirs(output_path, exist_ok=True)\n output_path = Path(output_path).joinpath(f\"{microtime()}-merged\")\n os.makedirs(output_path, exist_ok=True)\n print(f\"Compiling outputs\")\n\n if self.params[(\"writer\",\"merge\",\"merge_summary_files\")]: \n files = []\n for path in paths: \n try: \n files.append(str(Path(path).joinpath(\"summary_output.csv\")))\n except Exception as e: \n print(f\"Summary file not located in {path}: {e}\")\n \n if len(files) > 1: \n df = pd.concat(pd.read_csv(file) for file in files)\n elif len(files) == 1: \n df = pd.read_csv(files[0])\n elif len(files) == 0: \n return None\n \n filename = f\"{output_path}/summary_output_merged.csv\"\n df.to_csv(filename)\n\n print(f\"summary output: {len(df)} rows from {len(files)} files saved to {Path(filename).name}\")\n\n if self.params[(\"writer\",\"merge\",\"merge_full_output_files\")]: \n files = []\n for path in paths: \n try: \n files.append(list(Path(path).glob(\"*full_output.csv\"))[0])\n except Exception as e: \n print(f\"full output not located in {path}: {e}\")\n \n if len(files) > 1: \n df = pd.concat(pd.read_csv(file) for file in files)\n elif len(files) == 1: \n df = pd.read_csv(files[0])\n elif len(files) == 0: \n return None\n \n filename = f\"{output_path}/full_output_merged.csv\"\n df.to_csv(filename)\n print(f\"full output: {len(df)} lines from {len(files)} files saved to {Path(filename).name}\")\n\n if self.params[(\"writer\",\"merge\",\"merge_obj_images\")]: \n path = Path(output_path).joinpath(\"objs\")\n os.makedirs(path, exist_ok=True)\n files = []\n for path in paths: \n files.extend(list(Path(path).joinpath(\"objs\").glob(\"*.png\")))\n print(f\"object images: found {len(files)} images\")\n [shutil.copy(file, str(Path(output_path).joinpath(\"objs\").joinpath(str(Path(file).name)))) for file in files] \n print(f\"object images: copied {len(files)} images\")\n \n if self.params[(\"writer\",\"merge\",\"merge_frames\")]:\n path = Path(output_path).joinpath(\"frames\")\n os.makedirs(path, exist_ok=True)\n files = dict()\n for path in paths: \n for name in Path(path).joinpath(\"frames\").glob(\"*.png\"): \n if name.name not in files: files[name.name] = str(name)\n \n print(f\"frames: found {len(files)} images\")\n [shutil.copy(files[name], str(Path(output_path).joinpath(\"frames\").joinpath(name))) for name in files]\n print(f\"frames: copied {len(files)} images\")\n\n print(f\"Data merge complete\")", "title": "" }, { "docid": "a47bf028027a904a51e472d5e0997811", "score": "0.5777704", "text": "def combine_beam(bfile_list, outname=None):\n bfile_list.sort()\n dat_list = []\n for bfile in bfile_list:\n dd = np.load(bfile)\n dat_list.append(dd)\n all_dat = np.concatenate(dat_list, axis=0)\n if outname is None:\n return all_dat\n else:\n np.save(outname, all_dat)\n return", "title": "" }, { "docid": "373550c21c7209068728cfe43fb46f60", "score": "0.5774767", "text": "def merge_files(\n self, output_file: str, input_files: Optional[List[str]] = None\n ) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "53ed3d72f0c3c865486c07a3748f8026", "score": "0.5773205", "text": "def merge(self):\n if self.erroneous():\n return\n\n # Checking against v_name here is redundant (at least for now)\n if not (self.v_name and self.a_name):\n return\n\n m_name = f\"{self.name}.{opts.merge_ext}\" # merged name\n t_name = f\"{self.name}.txt\" # txt name\n\n try:\n # Print .txt for FFmpeg's concat\n with open(t_name, \"w\") as f:\n for _ in range(opts.repeat):\n print(f\"file 'file:{self.v_name}'\", file=f)\n\n # Loop footage until shortest stream ends\n # Concatenated video (via list) counts as one long stream\n command = [\n opts.ffmpeg_path, \"-y\", \"-v\", \"error\",\n \"-f\", \"concat\", \"-safe\", \"0\",\n \"-i\", f\"file:{t_name}\", \"-i\", f\"file:{self.a_name}\",\n ]\n if opts.duration:\n command.extend([\"-t\", opts.duration])\n command.extend([\"-c\", \"copy\", \"-shortest\", f\"file:temp_{m_name}\"])\n\n subprocess.run(command, check=False)\n finally:\n if os.path.exists(t_name):\n os.remove(t_name)\n\n # Merging would break when using <...>.mp4 both as input and output\n os.replace(f\"temp_{m_name}\", m_name)\n\n if not opts.keep:\n if self.v_name != m_name:\n os.remove(self.v_name)\n os.remove(self.a_name)", "title": "" }, { "docid": "b56b7cc0fb5455ac9af4a0bf76f36898", "score": "0.5773003", "text": "def _combine_all_output(self):\n output_parser = HeadPoseOutputParser()\n image_counter = 0\n for attention_yes_no_dir in os.listdir(self._REL_PATH_TO_OPENFACE_OUTPUT_MAIN_DIR):\n if attention_yes_no_dir != \".gitkeep\":\n current_dir = self._REL_PATH_TO_OPENFACE_OUTPUT_MAIN_DIR + \"/\" + attention_yes_no_dir\n for image_dir in os.listdir(current_dir):\n image_counter += 1\n outputfile_path = current_dir + \"/\" + image_dir + \"/\" + image_dir + self._OUTPUT_FILE_EXTENSION\n arff_data_line = output_parser.extract_valid_head_poses_from_output_file(outputfile_path, \\\n image_counter)\n self._store_dataline_in_class_list(arff_data_line)\n print (outputfile_path)\n print (arff_data_line + \"!!!\")\n\n self._write_arff_datalines()", "title": "" }, { "docid": "1e0ef9cd17200408bc3fd1b0c42e333f", "score": "0.57694167", "text": "def catFiles(filesToCat, catFile):\n if len(filesToCat) == 0: #We must handle this case or the cat call will hang waiting for input\n open(catFile, 'w').close()\n return\n maxCat = 25\n system(\"cat %s > %s\" % (\" \".join(filesToCat[:maxCat]), catFile))\n filesToCat = filesToCat[maxCat:]\n while len(filesToCat) > 0:\n system(\"cat %s >> %s\" % (\" \".join(filesToCat[:maxCat]), catFile))\n filesToCat = filesToCat[maxCat:]", "title": "" }, { "docid": "9b6bb41c2a10e0e5220d9535d66b3a53", "score": "0.5769162", "text": "def combine_pdf_files():\n merger = PdfFileMerger()\n selected_pdfs_list = []\n selected_pdfs = filedialog.askopenfilenames()\n\n # adds selected PDFs to list for later use\n for pdfs in selected_pdfs:\n selected_pdfs_list.append(pdfs)\n\n # select file name and save location of final PDF output\n final_pdf_file_path = filedialog.asksaveasfilename(defaultextension='.pdf')\n with open(final_pdf_file_path, 'wb') as final_pdf:\n for pdf_files in selected_pdfs_list:\n merger.append(pdf_files)\n merger.write(final_pdf)", "title": "" }, { "docid": "092e3cf1f5102313126b04346944c9c0", "score": "0.5765454", "text": "def combined(filenames):\n for filename in filenames:\n with open(filename) as f:\n for line in f:\n yield line", "title": "" }, { "docid": "8c120c6658a0d631a34055e09086046c", "score": "0.5759953", "text": "def combine_ionex(outpath,filenames,newfilename):\n if os.path.isfile(outpath+newfilename):\n print (\"FILE exists: \",outpath+newfilename)\n return outpath+newfilename\n newf=open(outpath+newfilename,'w')\n filenames=sorted(filenames)\n firstfile=open(outpath+filenames[0])\n lastfile=open(outpath+filenames[-1])\n for line in lastfile:\n if \"EPOCH OF LAST MAP\" in line:\n lastepoch=line\n lastfile.close()\n break\n #write header + tec map\n for line in firstfile:\n if \"END OF TEC MAP\" in line:\n break\n if not \"EPOCH OF LAST MAP\" in line:\n if \"OF MAPS IN FILE\" in line:\n newf.write(line.replace('1',str(len(filenames))))\n else:\n newf.write(line)\n else:\n newf.write(lastepoch)\n tecmapnr=2\n for myfname in filenames[1:]:\n myf=open(outpath+myfname)\n end_of_header=False\n for line in myf:\n if not end_of_header and \"END OF HEADER\" in line:\n end_of_header=True\n else:\n if end_of_header:\n if \"END OF TEC MAP\" in line:\n newf.write(line.replace('1',str(tecmapnr)))\n break\n if \"START OF TEC MAP\" in line:\n newf.write(line.replace('1',str(tecmapnr)))\n else:\n newf.write(line)\n tecmapnr+=1\n newf.write(\"END OF FILE\\n\")\n return outpath+newfilename\n #ignore RMS map for now, since it is filled with zeros anyway", "title": "" }, { "docid": "d7838fb46d2720451c50d89f097b9492", "score": "0.574935", "text": "def merge(files, outfile):\n all_ = []\n for fn in files:\n with open('../vrt/' + fn, 'r', encoding='utf-8') as f:\n all_.extend(f.read().splitlines())\n\n with open('../grouped_vrt/%s.vrt' % outfile, 'w', encoding='utf-8') as f:\n for line in all_:\n f.write(line + '\\n')", "title": "" }, { "docid": "e9f9fbeb487c6da36296f7e40e421649", "score": "0.57148176", "text": "def print_files(self):\n Output().print_result(self.filelist)", "title": "" }, { "docid": "020fa321e07038560120481d162bf498", "score": "0.5714341", "text": "def merge_files(file_handler_list, merged_handler, verbose = True):\n total_files = len(file_handler_list)\n current_file = 1\n if verbose:\n print \"\"\n for f in file_handler_list:\n if verbose:\n print \"Processing file\",current_file,\"of\",total_files\n for line in f:\n merged_handler.write(line)\n current_file = current_file +1", "title": "" }, { "docid": "7d4e278a8b75a223d8856af82fdef347", "score": "0.5712947", "text": "def combine_subbasins(self, \r\n output,\r\n verbose = True,\r\n ):\r\n\r\n if verbose: \r\n print('trying to combine subbasin shapefiles into a single file\\n')\r\n\r\n w = Writer(shapeType = 5)\r\n\r\n projection = None\r\n fields = None\r\n\r\n for comid in self.subbasins:\r\n\r\n its = output, comid\r\n filename = '{}/{}/combined'.format(*its)\r\n if os.path.isfile(filename + '.shp'):\r\n\r\n # start by copying the projection files\r\n\r\n if projection is None:\r\n projection = self.subbasincatchments + '.prj'\r\n shutil.copy(filename + '.prj', projection)\r\n\r\n # read the new file\r\n \r\n r = Reader(filename, shapeType=5)\r\n\r\n if fields is None:\r\n fields = r.fields\r\n for field in fields: w.field(*field)\r\n\r\n shape = r.shape(0)\r\n\r\n # write the shape and record to the new file\r\n\r\n w.poly(shapeType = 5, parts = [shape.points])\r\n record = r.record(0)\r\n w.record(*record)\r\n\r\n elif verbose: print('unable to locate {}'.format(filename))\r\n\r\n if fields is not None: \r\n w.save(self.subbasincatchments)\r\n if verbose: print('successfully combined subbasin shapefiles\\n')\r\n elif verbose: print('unable to combine subbasins\\n')", "title": "" }, { "docid": "12fd8a09b006f222dc14b9d3f84ff534", "score": "0.5708302", "text": "def main():\n # generate a list with filenames\n os.system('ls out*.char > tmp.char')\n ifile = open('tmp.char')\n charfiles = []\n for line in ifile:\n line = line.split()\n charfiles.append(line[0])\n # check the number of filenames, leave if nfiles = 1\n nfiles = len(charfiles)\n if nfiles == 1:\n print 'NO CONCENTATION NECESSARY'\n exit()\n # compute the number of total steps\n print 'Determining the number of total steps'\n natoms_all = 0\n nsteps_all = 0\n for i in range(nfiles):\n hfile = open('out' + str(i+1) + '.header')\n natoms, nsteps = read_first(hfile)\n hfile.close()\n natoms_all = natoms\n nsteps_all = nsteps_all + nsteps\n nsteps_all = nsteps_all - nfiles + 1\n # removing previous files if there are any\n os.system('rm out.header.all out.char.all')\n # cat header files to a master file\n print 'Combining header files'\n ofile = open('out.header.all', 'w')\n ofile.write('nsnapshots, natoms = ' + str(nsteps_all) + ' ' + str(natoms_all) + '\\n')\n ofile.close()\n for i in range(nfiles):\n if i == 0:\n os.system('tail -n +2 out1.header >> out.header.all')\n else:\n os.system('tail -n +11 out' + str(i+1) + '.header >> out.header.all')\n # cat char files to a master file\n print 'Combining char files'\n ofile = codecs.open('out.char.all', 'w', 'UTF-8')\n for i in range(nfiles):\n hfile = open('out' + str(i+1) + '.header')\n cfile = codecs.open('out' + str(i+1) + '.char', 'r', 'UTF-8')\n natoms, nsteps = read_first(hfile)\n ### skip first snapshot if i > 0\n if i > 0:\n skip_snapshots(hfile, cfile, 1)\n nsteps = nsteps - 1\n ### copy the rest of the snapshot\n for j in range(nsteps):\n print 'i/nfiles; j/nsteps', i,nfiles,j,nsteps\n b = cfile.reader.read(natoms*4,natoms*4)\n ofile.write(b)\n hfile.close()\n cfile.close()\n ofile.close()\n return", "title": "" }, { "docid": "8ff94fbb19be4ebacafb633ec384ab37", "score": "0.56969726", "text": "def main(argv=None):\n\n if argv is None:\n argv = sys.argv[1:]\n\n args = parser.parse_args(argv)\n log.info('start parameters: ' + str(args))\n\n log.info('loading data')\n file2_content = list(file_line_generator(args.file2))\n\n log.info('combining files')\n\n with utf8_file_open(args.out_file, 'w') as outfile:\n\n for c, line1 in enumerate(file_line_generator(args.file1)):\n log_iterations(log, c, 1000)\n\n for line2 in file2_content:\n outfile.write(line1 + args.separator + line2 + u'\\n')\n\n log.info('finished')", "title": "" }, { "docid": "098533fa700809384cc7e7dab7ef785f", "score": "0.5694657", "text": "def concatenate(self, paths_to_datasets: list, parent_working_directory: Path):\n subbrick_labels = subbrick_labels_of(paths_to_datasets[0])\n\n results = {}\n for label in subbrick_labels:\n tcat_args = \"-tr 2\".split()\n for path in paths_to_datasets:\n tcat_args += [f\"{path}[{label}]\"]\n results[label] = AFNI(program=\"3dTcat\", args=tcat_args, working_directory=parent_working_directory/f\"{label}_concatenated\")\n\n return results", "title": "" }, { "docid": "aa6caf8856fd9eb28104863846fa88cf", "score": "0.5694422", "text": "def merge_into_one(self):\n for file, chunks in self.chunks.items():\n for chunk in chunks:\n if len(self.final_file_list)>0:\n for sorted_file in self.final_file_list:\n self.sort_pair_of_files(sorted_file, chunk)\n self.final_file_list.append(chunk)\n\n with open(self.final_file, 'w+') as obj:\n for file in self.final_file_list:\n obj.write(open(file).read())", "title": "" }, { "docid": "9a5625c994045c6b8c0f227992943727", "score": "0.5685118", "text": "def combine():\n combined = \"\"\n for md in exists(markdown_dir).glob(\"*.md\"):\n combined += md.read_text(errors=\"backslashreplace\") + \"\\n\\n\"\n combined_markdown.write_text(combined)\n print(f\"{combined_markdown}\")\n subprocess.Popen([\"code\", str(combined_markdown)])", "title": "" }, { "docid": "bc957420a560aa96f20000c4eaa9ec9c", "score": "0.56822973", "text": "def run(self):\n\t\tfor index, pair in enumerate(self.filepairs):\n\t\t\tself.outputInfo.emit(\"Combining %s and %s...\" % (os.path.basename(pair[0]), os.path.basename(pair[1])))\n\n\t\t\tif self.convertGS:\n\t\t\t\tself.outputInfo.emit(\"Converting to greyscale...\")\n\t\t\t\timg1 = Image.open(pair[0]).convert('L')\n\t\t\t\timg2 = Image.open(pair[1]).convert('L')\n\t\t\telse:\n\t\t\t\timg1 = Image.open(pair[0])\n\t\t\t\timg2 = Image.open(pair[1])\n\n\t\t\tif self.resize:\n\t\t\t\t# Resize to 20% of longest dimension of smaller image\n\t\t\t\tself.outputInfo.emit(\"Resizing...\")\n\t\t\t\tnewsize = min(max(img1.size), max(img2.size))\n\t\t\t\tnewsize /= 5\n\t\t\t\timg1.thumbnail((newsize, newsize), Image.ANTIALIAS)\n\t\t\t\timg2.thumbnail((newsize, newsize), Image.ANTIALIAS)\n\n\t\t\tself.outputInfo.emit(\"Adjusting image brightness...\")\n\t\t\tenhancer1 = ImageEnhance.Brightness(img1)\n\t\t\tenhancer2 = ImageEnhance.Brightness(img2)\n\n\t\t\t# Output filename format \"file1_file2.JPG\" where 'file1' and 'file2' are the file name, sans extension\n\t\t\toutfname = os.path.splitext(os.path.basename(pair[0]))[0]\n\t\t\toutfname += \"_\"\n\t\t\toutfname += os.path.basename(pair[1])\n\n\t\t\timg3 = ImageChops.add(enhancer1.enhance(0.5), enhancer2.enhance(0.5))\n\t\t\tself.outputInfo.emit(\"Creating %s...\\n\" % outfname)\n\t\t\timg3.save(os.path.join(self.outdir, outfname))\n\n\t\tself.outputInfo.emit(\"Operation completed!\")", "title": "" }, { "docid": "202b50ecbe3949e37c4012583d16d51e", "score": "0.56803817", "text": "def concat(filepath, delete):\n\tpath = filepath if os.path.isabs(filepath) else os.path.join(os.getcwd(), filepath)\n\twith open(path, 'r+') as f:\n\t\tif '@import' in f.read():\n\t\t\tfor line in fileinput.input([path], inplace=1):\n\t\t\t\tif '@import' in line:\n\t\t\t\t\turl = get_path_from_import(line, 0)\n\t\t\t\t\trelpath = url if os.path.isabs(url) else get_directory_path(filepath) + '/' + url\n\t\t\t\t\tif os.path.exists(relpath):\n\t\t\t\t\t\twith open(relpath, 'r') as resource:\n\t\t\t\t\t\t\tsys.stdout.write(resource.read())\n\t\t\t\t\t\tif delete is True:\n\t\t\t\t\t\t\tos.remove(relpath)\n\t\t\t\t\telse:\n\t\t\t\t\t\tsys.stdout.write(line)\n\t\t\t\telse:\n\t\t\t\t\tsys.stdout.write(line)", "title": "" }, { "docid": "9ea55ad9137e037519b6c5c555de21d2", "score": "0.5679772", "text": "def combine_results(directory, file_name):\n output=[]\n os.makedirs(directory+\"/output/\", exist_ok=True)\n for files in os.listdir(directory):\n if(files.endswith(\".json\") or files.endswith(\".JSON\")):\n with open(directory + \"/\" + files) as file:\n current_file = json.load(file)\n output.append(current_file)\n save_json(directory + \"/output/\", file_name,output)\n print(time.ctime(), \"COMBINED FOLDER :\", directory)", "title": "" }, { "docid": "9c8872a581a0c31a6157c079bbae237c", "score": "0.56761026", "text": "def merge_walkupseq_files(paths_to_batches_info):\n paths_to_batches_info = glob.glob('walkupseq_files/*sample_info*')\n\n dfs = []\n for f in paths_to_batches_info:\n tmp = pd.read_table(f, encoding='latin1')\n dfs.append(tmp)\n\n df = pd.concat(dfs, axis=0)\n df.to_csv('walkupseq_files/walkupseq_all_combined.txt', sep=\"\\t\", index=None)\n return df", "title": "" }, { "docid": "1de6e3d29043813ca8b1883a3b73aec6", "score": "0.56739086", "text": "def run_cat_pairing():\n for file_R1 in glob('*R1*fasta'):\n for file_R2 in glob('*R2*fasta'):\n if file_R1.split('R1') == file_R2.split('R2'):\n outname = file_R1.replace('R1_', '')\n outname = outname.replace('.fasta', '.tmp.fasta')\n subprocess.check_call(['touch', outname])\n with open(outname, 'w') as out:\n with open(file_R1) as f1:\n with open(file_R2) as f2:\n out.write(f1.read())\n out.write(f2.read())", "title": "" }, { "docid": "f6e5385ee506c3c9b5b0d8ba0f71374e", "score": "0.5668559", "text": "def merge_parts_of_speech(files, output, sort_command, verbose=False):\n# TODO: Shouldn't we just fail if any of these files is missing?\n parts_files = list_existing(map(compose_parts_of_speech_path_for, files))\n header = [\n 'SSTART',\n 'SEND',\n ]\n merge_numbered_files(\n parts_files, output, header, sort_command, verbose=verbose)", "title": "" }, { "docid": "4d6ba6b02c8d827adb3f0b218dd401da", "score": "0.5664303", "text": "def combinePdfs(self):\n pdfs = ['displacement_comparison.pdf', 'results.pdf', 'stats_table_all.pdf', 'stats_table_acc.pdf']\n pdfPaths = [os.path.join(self.workingDir, f) for f in pdfs]\n merger = PdfFileMerger()\n for pdf in pdfPaths:\n merger.append(pdf)\n\n merger.write(os.path.join(self.workingDir, 'report_{0}.pdf'.format(self.eventTimestamp)))\n merger.close()", "title": "" }, { "docid": "9656f9cd4cf994adffd556c95c85279e", "score": "0.565662", "text": "def integrate_files(file_path):\n file_list = glob.glob(file_path+\"*.txt\")\n with open('result_train.txt', 'w', encoding='utf-8') as result:\n for file_ in file_list:\n for line in open(file_, 'r', encoding='utf-8'):\n result.write(line)", "title": "" }, { "docid": "33dfd116c099e5fa3186f135b825302c", "score": "0.5653899", "text": "def concatenate_audio_moviepy(audio_clip_paths, output_path):\n clips = [AudioFileClip(c) for c in audio_clip_paths]\n final_clip = concatenate_audioclips(clips)\n final_clip.write_audiofile(output_path)", "title": "" }, { "docid": "31294d93fa8db634f06d30e4ea266c31", "score": "0.5649751", "text": "def cat(in_paths, out_path, headers: List[str]):\n data = []\n with open(out_path, \"w\") as out:\n out.write(\",\".join(headers))\n out.write(\"\\n\")\n for path in in_paths:\n with open(path) as f:\n for i, line in enumerate(f):\n if i == 0:\n continue\n out.write(line)", "title": "" }, { "docid": "f261455184aa5f408893a048fd938e04", "score": "0.5647724", "text": "def merge(input_path, output):\n paths = glob.glob('{}/*.pdf'.format(input_path))\n paths.sort()\n\n pdf_merger = PdfFileMerger()\n\n for path in paths:\n print(\"[+] Merging {0} into {1}\".format(path, output.name))\n pdf_merger.append(path)\n\n pdf_merger.write(output)", "title": "" }, { "docid": "9d6b1e5a5578868b0576c30c2293f9b9", "score": "0.56447536", "text": "def _produceOutputFiles(self):\n pass", "title": "" }, { "docid": "388613d8c116d97fb6d30fe44bbda51d", "score": "0.5643399", "text": "def join_images(dest_dir):\n\n cmd = \"\"\"matlab -r \"combine_keggs('%s');quit;\" -nodesktop -logfile %s -nosplash\"\"\" % (dest_dir, os.path.join(dest_dir, 'tmp.out'))\n print cmd\n call(shlex.split(cmd))", "title": "" }, { "docid": "6c0946c0e575e2c1d25e82744801fe1c", "score": "0.56417805", "text": "def merge(infile,output):\n\n outstream = (_distiller_common.open_bgzip(output, mode='w') \n if output else sys.stdout)\n\n paths = sum([glob.glob(mask) for mask in infile], [])\n merged_header = form_merged_header(paths)\n\n merged_header = _distiller_common.append_pg_to_sam_header(\n merged_header,\n {'ID': UTIL_NAME,\n 'PN': UTIL_NAME,\n 'VN': _distiller_common.DISTILLER_VERSION,\n 'CL': ' '.join(sys.argv)\n })\n\n outstream.writelines(merged_header)\n \n if hasattr(outstream, 'close'):\n outstream.close()\n\n command = r'''\n /bin/bash -c 'sort -k {0},{0} -k {1},{1} -k {2},{2}n -k {3},{3}n -k {4},{4} \n --merge --field-separator=$'\\''\\v'\\'' \n '''.replace('\\n',' ').format(\n _distiller_common.COL_C1+1, \n _distiller_common.COL_C2+1, \n _distiller_common.COL_P1+1, \n _distiller_common.COL_P2+1,\n _distiller_common.COL_P2+1,\n )\n for path in paths:\n if path.endswith('.gz'):\n command += r''' <(zcat {} | sed -n -e '\\''/^[^#]/,$p'\\'')'''.format(path)\n else:\n command += r''' <(sed -n -e '\\''/^[^#]/,$p'\\'' {})'''.format(path)\n if output.endswith('.gz'):\n command += '| bgzip -c'\n if output:\n command += ' >> ' + output\n command += \"'\"\n subprocess.call(command, shell=True)", "title": "" }, { "docid": "9aa7b88c38d63d0176f7f9edded0b3ad", "score": "0.5635278", "text": "def main(argv=None):\n\tparser = argparse.ArgumentParser(description='Merge CSV Files')\n\tparser.add_argument('-n', '--no-headers', action='store_true',\n\t\thelp='Do not write headers to the output.')\n\n\tparser.add_argument('-d', '--deduplicate', action='store_true',\n\t\thelp='De-duplicate identical rows.')\n\tparser.add_argument('-s', '--sort', type=int,\n\t\thelp='Column to sort on (0-indexed, default no sorting).')\n\n\tparser.add_argument('-N', '--newline-only', action='store_true',\n\t\thelp='Use newlines as line endings, no carriage returns.')\n\tparser.add_argument('-q', '--quoting', type=int, default=csv.QUOTE_MINIMAL,\n\t\thelp='Select quoting - 0=MINIMAL (default), 1=ALL, 2=NONNUMERIC, 3=NONE')\n\n\tparser.add_argument('output', help='Output file')\n\tparser.add_argument('files', nargs='+', help='Input files.')\n\n\tif argv is None:\n\t\targv = sys.argv\n\targuments = parser.parse_args(argv[1:])\n\n\t#Read all of the files\n\theaders = None\n\tdata = []\n\tfor filename in arguments.files:\n\t\twith open (filename) as csvFile:\n\t\t\tcsvReader = csv.reader(csvFile)\n\t\t\tfor n, row in enumerate(csvReader):\n\t\t\t\t#Handle headers\n\t\t\t\tif n == 0:\n\t\t\t\t\tif headers is None:\n\t\t\t\t\t\theaders = row\n\t\t\t\t\telif headers != row:\n\t\t\t\t\t\tprint('Mismatched headers in %s' % filename)\n\t\t\t\t\t\treturn 1\n\t\t\t\t\tcontinue\n\n\t\t\t\tdata.append(row)\n\n\t#Sort all the rows by key\n\tif arguments.sort is not None:\n\t\tdata.sort(key=lambda row: int(row[arguments.sort]))\n\n\t#De-duplicate\n\tif arguments.deduplicate:\n\t\tdata = [row for row, _ in itertools.groupby(data)]\n\n\t#Write to another file\n\twith open(arguments.output, 'w', newline='') as outputFile:\n\t\tcsvWriter = csv.writer(outputFile,\n\t\t\tlineterminator='\\n' if arguments.newline_only else '\\r\\n',\n\t\t\tquoting=arguments.quoting)\n\t\tif headers is not None and not arguments.no_headers:\n\t\t\tcsvWriter.writerow(headers)\n\t\tcsvWriter.writerows(data)\n\n\treturn 0", "title": "" }, { "docid": "dece9a4385d3fdd24b26faf60bdf9c80", "score": "0.56346935", "text": "def cat(*args, **kwargs):\n s = str()\n for f in args:\n if not f.exists():\n warn(f'File {f.name} does not exist.')\n else:\n try:\n s += f.read_text()\n except FileNotFoundError:\n warn(f'File {str(f)} not found!')\n if not 'quiet' in kwargs.keys() or kwargs['quiet'] == False: print(s)\n return s", "title": "" }, { "docid": "3b49c3d4596515dc356c927cc1f195fa", "score": "0.56261915", "text": "def main():\n\n args = get_args()\n outdir = args.outdir\n file = args.file\n\n filecount = 0\n seqcount = 0\n\n\n for fh in args.file:\n outfile = os.path.join(outdir, os.path.basename(fh.name))\n out = open(outfile, 'wt')\n filecount += 1\n\n for line in fh:\n rna = line.replace('T', 'U')\n seqcount += 1\n out.write(''.join(rna))\n out.close()\n\n\n if seqcount == 1 and filecount == 1:\n print(f'Done, wrote {seqcount} sequence in {filecount} file to directory \"{outdir}\".')\n elif seqcount > 1 and filecount == 1:\n print(f'Done, wrote {seqcount} sequences in {filecount} file to directory \"{outdir}\".')\n elif seqcount > 1 and filecount > 1:\n print(f'Done, wrote {seqcount} sequences in {filecount} files to directory \"{outdir}\".')", "title": "" } ]
1cff34ef942b9ac38d0ec62678e277de
Return the directory name with give __file__.
[ { "docid": "111ea80cd841421e7f9fc192363d582d", "score": "0.69222474", "text": "def getDirName(magicFile):\n dirPath = os.path.dirname(os.path.abspath(magicFile))\n parts = dirPath.split(os.path.sep)\n return parts[-1]", "title": "" } ]
[ { "docid": "50bfc1f95a5c3b0fd2195fa403f8eabf", "score": "0.78758234", "text": "def dir_name(self):\n return os.path.split(self.path)[0]", "title": "" }, { "docid": "709bd8acf9d73662e780a61c5798a2cd", "score": "0.7685119", "text": "def directory(self):\n return \"%s.d\" % self.filename", "title": "" }, { "docid": "6eae11f2a0513805cd2afe779fcdbe30", "score": "0.7601026", "text": "def filename(self):\n return \".\".join([self.dirname, self.__class__.__name__.lower()])", "title": "" }, { "docid": "e8d1a182beaed513d33d33f9daaf2ec1", "score": "0.7562282", "text": "def dir_path():\n return os.path.dirname(os.path.abspath(__file__))", "title": "" }, { "docid": "8757a0e49761bf8637ba6f30888e49dd", "score": "0.7521747", "text": "def file_dir(file):\n return file.rstrip(file_base(file)).rstrip(\"/\")", "title": "" }, { "docid": "242633c22f694fad9466ab20bf48e986", "score": "0.73352915", "text": "def directory_name(self) -> str:\n return pulumi.get(self, \"directory_name\")", "title": "" }, { "docid": "bb090fe15c31f26b9656fda80d1a7268", "score": "0.72811145", "text": "def get_dir(module):\n if hasattr(module, '__file__'):\n filename = module.__file__\n else:\n filename = module\n filename = os.path.dirname(os.path.split(filename)[0])\n if filename.endswith('.egg'):\n filename = os.path.split(filename)[0]\n return filename", "title": "" }, { "docid": "873cb29c5da19a7adf3524e4dc3b9fae", "score": "0.72770524", "text": "def path():\n return os.path.dirname(os.path.realpath(__file__))", "title": "" }, { "docid": "1df68c8045871054328bab43f7751d55", "score": "0.72733164", "text": "def dir_name(self):\n\n if not self._filename or not os.path.isfile(self._filename):\n return None\n\n return os.path.dirname(self._filename)", "title": "" }, { "docid": "581cfde4b5a76248625cf951d3fac319", "score": "0.72029316", "text": "def getDirname(self):\n return os.path.join(settings.MEDIA_ROOT, 'rmg','tools','compare/')", "title": "" }, { "docid": "174c987bfe00bf17df262d1033d65253", "score": "0.7117472", "text": "def getDirname(self):\n return os.path.join(settings.MEDIA_ROOT, 'rmg','tools','flux/')", "title": "" }, { "docid": "b21fea6b9536f11d501031893638bfeb", "score": "0.71055526", "text": "def filename(self):\r\n return self.module.__file__", "title": "" }, { "docid": "c462d4e82a8d4dd746782a84c884afd7", "score": "0.70864457", "text": "def filename(self):\n # E.g. __main__ does not have __file__\n return getattr(self._pymodule, '__file__', self.name)", "title": "" }, { "docid": "cd2135f081cfacf9a9536aa9e4299833", "score": "0.7036621", "text": "def getDirname(self):\n return os.path.join(settings.MEDIA_ROOT, 'rmg','tools/')", "title": "" }, { "docid": "c79e65edc00a66bf257f9e0eb2647795", "score": "0.70044774", "text": "def static_file_directory():\n current_file = inspect.getfile(inspect.currentframe())\n current_directory = os.path.dirname(os.path.abspath(current_file))\n static_directory = os.path.join(current_directory, \"static\")\n return static_directory", "title": "" }, { "docid": "ab7f0581db5662108e7be974ce44144d", "score": "0.6985338", "text": "def getbasefile() -> str:\n trace(\"enter\")\n return os.path.splitext(os.path.basename(__file__))[0]", "title": "" }, { "docid": "2962c2701e48c201471517d16a62bffe", "score": "0.69498056", "text": "def name(self):\n return os.path.split(self.source)[-1].split('.')[0]", "title": "" }, { "docid": "f9b3d7483a0da8da164e20ceb1d22e59", "score": "0.6938953", "text": "def path(self) -> str:\n if self._filename is not None:\n return self._filename\n return self.__directory", "title": "" }, { "docid": "340519bb4d2d9761e319f1b119585393", "score": "0.6920103", "text": "def get_directory(self):\n directory_name = re.split('_[0-9]+\\-[0-9]+', self.name)[0]\n return os.path.join(self.data_folder, directory_name)", "title": "" }, { "docid": "92595414ac9202be013573fe57c1d85f", "score": "0.69185376", "text": "def get_directory():\n\tpass", "title": "" }, { "docid": "a5c9cc7ec4d54d270eb1fa4bfbb5b13c", "score": "0.687504", "text": "def path(folder):\n\tpath = os.path.realpath(__file__)\n\tdirectory = os.path.dirname(path)\n\treturn u\"{}/{}\".format(directory, folder)", "title": "" }, { "docid": "73fe93c200cce938134e0b8fdd658132", "score": "0.6859817", "text": "def __package_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "title": "" }, { "docid": "7dda0fa3405d215214a5374e1c439982", "score": "0.68527126", "text": "def day_name():\n file_path = os.path.dirname(__file__)\n day_path = os.path.normpath(os.path.join(file_path, '..'))\n return os.path.basename(day_path)", "title": "" }, { "docid": "e6953d3b21ddf93a10c3a35b0f4d23af", "score": "0.6849006", "text": "def get_dir():\n return Logger.CURRENT.get_dir()", "title": "" }, { "docid": "19bde88b5b84bfd818a040b285c1e186", "score": "0.68376595", "text": "def slice_dir(self):\r\n slice_dir = os.path.abspath(os.path.dirname(slices.__file__))\r\n return os.path.join(slice_dir, self.slice_name)", "title": "" }, { "docid": "0cbd23eea50df7601c5a8e4c131b5a04", "score": "0.68312556", "text": "def folder_name(self):\n return os_path.basename(self._file_path).replace(\" \", \"_\")", "title": "" }, { "docid": "84626999c4de04d184bc943b43ed6634", "score": "0.68228805", "text": "def directory(self):\n return self._directory.path", "title": "" }, { "docid": "685f78599fdf0c953316fedd3fbe2b01", "score": "0.68123555", "text": "def get_own_path():\n module = sys.modules[__name__]\n path = os.path.dirname(module.__file__)\n return path", "title": "" }, { "docid": "c7d655d1fbf08156ed77ab692cb946e5", "score": "0.6769696", "text": "def get_fn(filename):\n return join(split(abspath(__file__))[0], 'files', filename)", "title": "" }, { "docid": "b271d1dd97bc0d37cf7f682daedfec6d", "score": "0.6750516", "text": "def name():\n return os.path.basename(os.getcwd()).split(\".\")[0].lower()", "title": "" }, { "docid": "429cc5f2de8b53a55c81bf953ffdc780", "score": "0.67485297", "text": "def get_dir(self):\n return self.dir", "title": "" }, { "docid": "af2cbdb84988791557b80a96416de5d8", "score": "0.6747918", "text": "def getPath():\n\n path = __file__\n parent = os.path.join(path, os.pardir)\n parent = os.path.join(parent, os.pardir)\n return parent + \"\\output\\\\\"", "title": "" }, { "docid": "f121a115ba7e9ac42737db25dac7e67e", "score": "0.6736883", "text": "def __file__(self):\n return __file__", "title": "" }, { "docid": "4348143b087228fa5dc66b5ac15cab0d", "score": "0.6716318", "text": "def app_self(self, context):\n\t\treturn \"./\" + context.file.basename", "title": "" }, { "docid": "7b0bea95569902af2090a54ca2ef43fc", "score": "0.6715835", "text": "def get_folder(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), FOLDER_NAME).lower()", "title": "" }, { "docid": "6745d7dfcdf38cc6b233cb8c215caac1", "score": "0.6696005", "text": "def full_path(filename):\n return Path(__file__).parent.joinpath(filename)", "title": "" }, { "docid": "d8affb0aed6ced1f98cf498e5ee071ed", "score": "0.6695883", "text": "def displayname(self):\n if self.path.is_dir():\n return self.path.name + '/'\n return self.path.name", "title": "" }, { "docid": "63dbf8d72807f56d79489358f0c8c3c8", "score": "0.6690796", "text": "def sample_dir():\n return pathlib.Path(__file__).parent / \"files\"", "title": "" }, { "docid": "59421c69c4198149e7af11ac59c71afe", "score": "0.6690753", "text": "def get_directory(self):\n return self.directoryname.text()", "title": "" }, { "docid": "86983206d4ba8746ce393f4973ae93af", "score": "0.6674066", "text": "def get_fidasim_dir():\n\n directory = dirname(dirname(dirname(dirname(os.path.abspath(__file__)))))\n\n return directory", "title": "" }, { "docid": "57772095ec85e3d51febe101c6b0c99e", "score": "0.66560954", "text": "def get_file_name(self, file):\n\t\treturn file[file.rfind(\"/\") + 1:]", "title": "" }, { "docid": "96aab47b95aba2978708df029b41f5f0", "score": "0.66516805", "text": "def _indir(self, filename):\n return Path(self.directory) / filename", "title": "" }, { "docid": "3f02bd53d542538e8b7375eddcf5a709", "score": "0.66452646", "text": "def get_file_dir(f):\n if not isinstance(f, File):\n raise Exception(\"Illegal file class: not File\")\n if not f.scope:\n raise Exception(\"Illegal file object: no scope\")\n if not f.guid:\n raise Exception(\"Illegal file object: no guid\")\n return os.path.join('/system', f.scope, f.guid)", "title": "" }, { "docid": "8104c34f35a31496c9e02ad3bef9c2ba", "score": "0.66280156", "text": "def get_parent_directory():\n return Path(__file__).parent.parent", "title": "" }, { "docid": "b1231c96fba712c9d9ba3696b759eba4", "score": "0.6627908", "text": "def get_package_dir():\n return path.dirname(path.realpath(__file__))", "title": "" }, { "docid": "4da6fea75dc180dda562ade9db55d286", "score": "0.66278183", "text": "def _get_current_directory():\n return os.path.abspath(os.path.dirname(os.path.realpath(__file__)))", "title": "" }, { "docid": "6f5bfc5daac82381a0418d738152e331", "score": "0.66055644", "text": "def get_path():\n current_folder = os.path.dirname(os.path.abspath(__file__))\n if DEBUG:\n print('Current folder: ' + current_folder)\n return current_folder", "title": "" }, { "docid": "c65d9efc727e88801a8fd973f5903373", "score": "0.6600078", "text": "def dir(self):\n return self.path", "title": "" }, { "docid": "19cfd39172e6d95bc3248da30db9fb4f", "score": "0.6598482", "text": "def reporoot():\n return os.path.dirname(\n os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))))", "title": "" }, { "docid": "15fdd51b9e64ddc316ed2f09dacf22d3", "score": "0.6588243", "text": "def find_directory(file):\n answer = os.path.dirname(file)\n return answer if answer not in [None, ''] else os.path.realpath(__file__).strip(f\"/{file}\")", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.6587881", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.6587881", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.6587881", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.6587881", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.6587881", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.6587881", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.6587881", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.6587881", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.6587881", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "1b95f4e4811b552cc9fa8d67700a1447", "score": "0.6584592", "text": "def get_src_path() -> str:\n return f\"{pathlib.Path(__file__).parent.parent.absolute()}\"", "title": "" }, { "docid": "78d61a099c29a00e82d689bb74d43817", "score": "0.6582228", "text": "def path_for_file(self, filename):\n return str(self.file / filename)", "title": "" }, { "docid": "52518e35b44b46502787841414014828", "score": "0.65777206", "text": "def files_relative_dir(self):\n return os.path.join(self.name)", "title": "" }, { "docid": "8a6e40dfeea1ac472499083744684e39", "score": "0.65518445", "text": "def file_base(file):\n return os.path.basename(file)", "title": "" }, { "docid": "6e60ea334dd0e4dab3f351bbcb767848", "score": "0.65503174", "text": "def operation_dir(self):\n return os.path.dirname(inspect.getfile(self.__class__))", "title": "" }, { "docid": "f6fbd5537cd4d143a357c825a11ee5a2", "score": "0.6519395", "text": "def getDirname(name):\n return name.replace('/', os.sep)", "title": "" }, { "docid": "6eadcab6a22d498a6293b580a8eed5e0", "score": "0.6517145", "text": "def path(self):\n return os.path.join(self._path, self._file_name)", "title": "" }, { "docid": "d9fa37d0a1f6a0cd69d116e574f27018", "score": "0.64990675", "text": "def dir_name(self):\n dir_name = self.show_name.replace(\" \", \"_\")\n dir_name = ''.join(char for char in dir_name if char in self.VALID_FILE_CHARS)\n return dir_name", "title": "" }, { "docid": "36303a8ce7a9ae79e2057c6563ebd914", "score": "0.64938426", "text": "def get_current_directory():\n prog = __file__\n return os.path.dirname(os.path.abspath(prog))", "title": "" }, { "docid": "aa9dd9babbba467cb49ebe5b4d424074", "score": "0.64913607", "text": "def get_path():\n # Theme directory is defined as our parent directory\n return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))", "title": "" }, { "docid": "aa9dd9babbba467cb49ebe5b4d424074", "score": "0.64913607", "text": "def get_path():\n # Theme directory is defined as our parent directory\n return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))", "title": "" }, { "docid": "303a3543b86db4bd5a79132fd0ff7e6d", "score": "0.64864564", "text": "def get_package_dir():\n return os.path.abspath(os.path.dirname(__file__))", "title": "" }, { "docid": "e218ba4f5c995005fd05d44a00320088", "score": "0.64808404", "text": "def _get_plugin_path(self):\n return os.path.dirname(inspect.getfile(self.__class__))", "title": "" }, { "docid": "844d88a2aa5ff25e542a42824fc834cc", "score": "0.647821", "text": "def getCurrentDirectory():\n\treturn os.path.dirname(os.path.abspath(__file__))", "title": "" }, { "docid": "ead4394062a170e0075fa0d4129c8a9f", "score": "0.6477702", "text": "def get_filename(self, name):\n for dirname in self.directories + [os.path.dirname(__file__)]:\n filename = os.path.join(dirname, '%s.%s' % (name, self.extension))\n if os.path.isfile(filename):\n return filename", "title": "" }, { "docid": "fa3cb2db6b772e0eb6d3656a99afe99d", "score": "0.64758104", "text": "def _dir_of(self, namespace: str) -> Path:\n return self.directory / get_middle_path(\n name=self.orchestrator.config.name, namespace=namespace\n )", "title": "" }, { "docid": "88b53fa541bd427a7620d10a3466c045", "score": "0.6474218", "text": "def path(relpath):\n return os.path.join(os.path.dirname(__file__), relpath)", "title": "" }, { "docid": "be01869b7fb4dc6994d4a0b9903f3526", "score": "0.64659965", "text": "def relpath(filename):\n return os.path.join(os.path.dirname(__file__), filename)", "title": "" }, { "docid": "4d3e42e1bb5a5bf9a874f7bb6d990cd2", "score": "0.6455886", "text": "def get_path_for(view):\n if view.file_name():\n return os.path.dirname(view.file_name())\n if view.window().project_file_name():\n return os.path.dirname(view.window().project_file_name())\n return '/'", "title": "" }, { "docid": "a3be0053f32ba8dd1ac673c6a7457ff7", "score": "0.6453853", "text": "def post_directory_path(instance, filename):\n return 'post_{0}/{1}'.format(instance.post_id, filename)", "title": "" }, { "docid": "0acd00114a74f6aa58e4c12dd70a03de", "score": "0.64535654", "text": "def get_script_directory(self):\r\n return os.path.dirname(os.path.realpath(sys.argv[0]))", "title": "" }, { "docid": "ab4751e9e282016b00d3ef459b49faef", "score": "0.64508325", "text": "def get_full_name(self):\n return self.path + path.sep + self.filename", "title": "" }, { "docid": "769478c24161ae7363df945ec2bbd0e5", "score": "0.64469975", "text": "def root_directory():\n return os.path.normpath(os.path.dirname(os.path.abspath(__file__)))", "title": "" }, { "docid": "fef8307e9fc738b9d7643a598ac23f9b", "score": "0.64447474", "text": "def stadium_view_dir_path(instance, filename: str) -> str:\n\n return f'stadium_{instance.stadium_id}/{filename}'", "title": "" }, { "docid": "e95bd880b53c11870c37779673426731", "score": "0.6437661", "text": "def get_app_directory(file_name=None):\n path = appdirs.user_data_dir(AppName)\n if file_name is not None:\n return os.path.join(path, file_name)\n else:\n return path", "title": "" }, { "docid": "5d72521f0bae972ffd1aed9f1ab55f16", "score": "0.6426298", "text": "def script_dir_name(script_path):\n script_path = os.path.normpath(decode_path(script_path))\n if script_path.endswith(EXT):\n path = script_path\n name = os.path.basename(script_path).replace(EXT, \".py\")\n else:\n path = os.path.dirname(script_path) or \".\"\n name = os.path.basename(script_path)\n return path, name", "title": "" }, { "docid": "33e626ef5ccebb071adaf4e8acfac23e", "score": "0.64227194", "text": "def get_file_path(fname):\n return os.path.dirname(os.path.realpath(fname))", "title": "" }, { "docid": "ec264aa69d3fccce8e00d5af5539bc7c", "score": "0.6419752", "text": "def get_name(self):\n path = os.path.normcase(self.path)\n return os.path.split(path)[-1]", "title": "" }, { "docid": "144a28e0e96413c04720734c62ebe99f", "score": "0.64175975", "text": "def get_path(file_name):\n path = os.path.dirname(file_name)\n if path == '':\n path = os.getcwd()\n return path", "title": "" }, { "docid": "e38ba51e342465f1c39ae6429af4d401", "score": "0.63975996", "text": "def getfilename(self):\n\t\treturn self.__dict__['path']", "title": "" }, { "docid": "b545f4ca0da8c6cd7d74eda3f2004d2e", "score": "0.6395369", "text": "def base_name(self):\n return os.path.splitext(os.path.basename(self.path))[0]", "title": "" }, { "docid": "16ad07e0023cc09fca9792f452286e2b", "score": "0.63911474", "text": "def test_dir():\n return os.path.abspath(os.path.dirname(__file__))", "title": "" }, { "docid": "556ba2c6b12fbb7686297518ed7e6f06", "score": "0.6386354", "text": "def get_absolute_directory(self):\n return join(self.location, self.directory or self.name or '')", "title": "" }, { "docid": "8721877ab0fcf238b645fca83e0e7c1b", "score": "0.6378875", "text": "def getDirName(self):\n return 'kit-%s-%s' % (self.get('name'), self.getDbVersion())", "title": "" }, { "docid": "6ecfea7eca60f6c2b6bd86bd41ab061f", "score": "0.63655156", "text": "def _file_name(self, key):\n return os.path.join(self.path, key)", "title": "" }, { "docid": "eeb240171364b699f7512d4b6bded616", "score": "0.6364573", "text": "def files_dir(self):\n if self.root_dir is None:\n return self.files_relative_dir\n else:\n return os.path.join(self.root_dir, self.name)", "title": "" }, { "docid": "7e7b33cafbcb0ea4221880252b494c1a", "score": "0.63615847", "text": "def getAbsFileDir(magicFile):\n return os.path.dirname(os.path.abspath(magicFile)) + '/'", "title": "" }, { "docid": "6bd286f633e40142b12720f0d0423765", "score": "0.63564014", "text": "def path(self):\n return (self.filename\n if self.parent is None\n else os.path.join(self.parent.pathPrefix, self.filename))", "title": "" }, { "docid": "754125b9d12f423ed2751b3fc69fb015", "score": "0.6355512", "text": "def _generate_filename(self):\n directory = self._config.base_dir\n if not self._config.base_dir.endswith(self._os.sep):\n directory += self._os.sep\n return directory + str(self._uuid.uuid4()) + \".pickle\"", "title": "" }, { "docid": "bb7187447de9c45306fbd8fffbcf7313", "score": "0.6355195", "text": "def get_path(self):\n return os.path.dirname(self.path)", "title": "" }, { "docid": "a41525053d3a8455bba5ce770d9ab287", "score": "0.63518536", "text": "def file_name(self):\n pass", "title": "" } ]
1c8c674bc4911d3e990ecfa6a4ed6b08
Return a tuple of availability zones that have the instance_type. This function builds on get_supported_az_for_instance_types, but simplifies the input to 1 instance type and returns a tuple
[ { "docid": "61d8f6b23b3a6fc37b9fcaa914e21ad3", "score": "0.7708298", "text": "def get_supported_az_for_instance_type(self, instance_type: str):\n return self.get_supported_az_for_instance_types([instance_type])[instance_type]", "title": "" } ]
[ { "docid": "03a5145e5f4e16bab8475e545fe9c442", "score": "0.7645274", "text": "def get_supported_az_for_instance_types(self, instance_types: List[str]):\n # first looks for info in cache, then using only one API call for all infos that is not inside the cache\n result = {}\n offerings = self.describe_instance_type_offerings(\n filters=[{\"Name\": \"instance-type\", \"Values\": instance_types}], location_type=\"availability-zone\"\n )\n for instance_type in instance_types:\n result[instance_type] = tuple(\n offering[\"Location\"] for offering in offerings if offering[\"InstanceType\"] == instance_type\n )\n return result", "title": "" }, { "docid": "5359de5b18ae09f1fcfd796a75232f47", "score": "0.65041214", "text": "def azones(self):\n az_filter = make_filter('state', 'available')\n azs = self.ec2_client.describe_availability_zones(Filters=az_filter)\n return map(lambda az : az['ZoneName'], azs['AvailabilityZones'])", "title": "" }, { "docid": "2649947e114a12987cc0ab74ced55535", "score": "0.6393532", "text": "def get_availability_zones(self):\n if self._zones is None:\n nova = self._context.clients.client('nova')\n zones = nova.availability_zones.list(detailed=False)\n self._zones = [zone.zoneName for zone in zones]\n return self._zones", "title": "" }, { "docid": "e76452ede51df2521011aec86fbaf651", "score": "0.6257756", "text": "def availability_zones(self) -> typing.List[str]:\n return jsii.get(self, \"availabilityZones\")", "title": "" }, { "docid": "829961d208294effc7adc3f9c6607719", "score": "0.6130877", "text": "def availability_zones(self) -> Sequence[str]:\n return pulumi.get(self, \"availability_zones\")", "title": "" }, { "docid": "829961d208294effc7adc3f9c6607719", "score": "0.6130877", "text": "def availability_zones(self) -> Sequence[str]:\n return pulumi.get(self, \"availability_zones\")", "title": "" }, { "docid": "6437afb4c7a002a7410c3baa13abba6b", "score": "0.6103796", "text": "def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"availability_zones\")", "title": "" }, { "docid": "67cd5fdc4cfa5a917b769339fe9ff42f", "score": "0.60568565", "text": "def availability_zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"availability_zones\")", "title": "" }, { "docid": "654132209dcf29e0c40182fd2f75bcec", "score": "0.60247195", "text": "def overlaps(self, zonetypes=[1,2,6]):\n overlaps = []\n for zone in Zone.objects.filter(zonetype__in=zonetypes, geom__overlaps=self.geom).order_by('zonetype'):\n overlap = zone.geom.intersection(self.geom).area\n overlaps.append([zone, overlap])\n return overlaps", "title": "" }, { "docid": "130467cae288e8d0976dd550cdd9a8ee", "score": "0.59240997", "text": "def get_current_instances(self, instance_type='t1.micro'):\n ReturnValue = {}\n instance_filters = [{'Name': 'instance-state-name', 'Values': ['running']}]\n instances = ec2.instances.filter(Filters=instance_filters)\n for instance in instances:\n #Were checking for a type of t1.micro here for testing purposes.\n if instance.instance_type == instance_type:\n ReturnValue[instance.id] = instance.launch_time\n #Returns a list if keys sorted by oldest first.\n return sorted(ReturnValue.keys(), key=lambda p: ReturnValue[p], reverse=False)", "title": "" }, { "docid": "ad1ccf43c6ca56e19f4a300d33e92cb7", "score": "0.5761975", "text": "def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n warnings.warn(\"\"\"This field will soon be handled by Region in Subnets\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zones is deprecated: This field will soon be handled by Region in Subnets\"\"\")\n\n return pulumi.get(self, \"availability_zones\")", "title": "" }, { "docid": "ad1ccf43c6ca56e19f4a300d33e92cb7", "score": "0.5761975", "text": "def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n warnings.warn(\"\"\"This field will soon be handled by Region in Subnets\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zones is deprecated: This field will soon be handled by Region in Subnets\"\"\")\n\n return pulumi.get(self, \"availability_zones\")", "title": "" }, { "docid": "3a27255228c47e40eab7c1b13d85d65c", "score": "0.57538533", "text": "def list_instance_types(self) -> List[str]:\n return [offering.get(\"InstanceType\") for offering in self.describe_instance_type_offerings()] + list(\n self.additional_instance_types_data.keys()\n )", "title": "" }, { "docid": "1f4de49b21fb46213655a911c099daa1", "score": "0.57356477", "text": "def availability_zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n warnings.warn(\"\"\"This field will soon be handled by Region in Subnets\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zones is deprecated: This field will soon be handled by Region in Subnets\"\"\")\n\n return pulumi.get(self, \"availability_zones\")", "title": "" }, { "docid": "97f7d21be9a0c4957bf02abd5586b91f", "score": "0.570209", "text": "def list_instances(environment=None):\n ec2 = boto.ec2.connect_to_region(DEFAULT_REGION)\n reservations = ec2.get_all_instances()\n table = prettytable.PrettyTable(['ID', 'Host', 'IP', 'Type', 'State', 'Environment', 'Zone'])\n table.align = 'l'\n print CLEAR_SCREEN\n print '\\nREGION: %s' % DEFAULT_REGION\n print 'EC2 INSTANCES'\n\n for instance in _sorted_instances(reservations):\n colorize = INSTANCE_STATE_FNS.get(instance.state, lambda s: s)\n table.add_row((\n instance.id,\n colors.white(instance.public_dns_name),\n instance.ip_address,\n instance.instance_type,\n colorize(instance.state),\n _get_instance_environment(instance),\n instance.placement,\n ))\n print table\n rds = boto.rds2.connect_to_region(DEFAULT_REGION)\n db_instances = rds.describe_db_instances()['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']\n print '\\nRDS INSTANCES'\n table = prettytable.PrettyTable(['ID', 'Host', 'Type', 'Status', 'Zones'])\n table.align = 'l'\n for instance in db_instances:\n colorize = colors.green if instance['DBInstanceStatus'] == 'available' else lambda s: s\n table.add_row((\n instance['DBInstanceIdentifier'],\n colors.white('%(Address)s:%(Port)s' % instance['Endpoint']),\n instance['DBInstanceClass'],\n colorize(instance['DBInstanceStatus']),\n '%s/%s' % (colors.white(instance['AvailabilityZone']), instance['SecondaryAvailabilityZone']),\n\n ))\n print table", "title": "" }, { "docid": "9e0f965af76efb0dcf515af95cc753f8", "score": "0.5598721", "text": "async def get_available_regions(self, *args, **kwargs):\n return [\"us-east-1\", \"us-east-2\", \"us-west-1\", \"us-west-2\"]", "title": "" }, { "docid": "8196616c08bbdb1f41fa79b9e323c38a", "score": "0.5534365", "text": "def describe_instance_types(self):\n instance_types = []\n resp = self.get({'Action': 'DescribeInstanceTypes'})\n for instance_type in resp['InstanceTypes']['InstanceType']:\n instance_types.append(\n InstanceType(instance_type['InstanceTypeId'],\n int(instance_type['CpuCoreCount']),\n int(instance_type['MemorySize'])))\n\n return instance_types", "title": "" }, { "docid": "0cac2ebb8c2e91bfcd4be240ec45c013", "score": "0.55284446", "text": "def overlapping(self, zonetypes=None, min_overlap=25000):\n if not self.has_geom():\n return []\n if zonetypes is None:\n zonetypes = [1, 3, 6, 7]\n if self.zonetype_id in zonetypes:\n zonetypes.remove(self.zonetype_id)\n # print 'zonetypes: ', zonetypes\n zones = []\n for zone in Zone.objects.filter(zonetype__id__in=zonetypes, geom__overlaps=self.geom).order_by('-zonetype__id', 'id'):\n # print 'zone: ', zone\n overlap = zone.geom.intersection(self.geom).area\n if overlap > min_overlap:\n zones.append(zone)\n return zones", "title": "" }, { "docid": "3c007bc352ec671fea955fcc0ef25f02", "score": "0.5508196", "text": "def get_ec2_instance_types(self):\n if self.get_format().startswith(\"6dof\"):\n return self.ec2_instance_types_cpu\n else:\n return self.ec2_instance_types_gpu", "title": "" }, { "docid": "d4954613d48ab4c979c497f7deac90cb", "score": "0.5508115", "text": "def get(self, request):\n result = ec2.list_availability_zones(request)\n return {\"items\": [u.to_dict() for u in result]}", "title": "" }, { "docid": "4c369e233fcd9e188bcbd448acaaa397", "score": "0.54932487", "text": "def instances_sorted_by_avail_ip(instances):\n ips = get_private_ips_for_instances(instances)\n ips_to_instances = zip(ips, instances)\n insts = sorted(ips_to_instances, key=lambda x: x[0])\n insts = [x[1] for x in insts]\n return sorted(insts, key=lambda x: x.placement['AvailabilityZone'])", "title": "" }, { "docid": "9832732cd7c58b91822a3cd7650563bf", "score": "0.5417001", "text": "def list_availability_zones(dummy_arg):\n ec2 = get_ec2_client()\n header = ['Name', 'State', 'Message', 'Region']; rows = []\n for az in ec2.describe_availability_zones()['AvailabilityZones']:\n row = [az['ZoneName'], az['State'], az['Messages'], az['RegionName']]\n rows.append(row)\n output_table(params, header, rows)", "title": "" }, { "docid": "4ae5db0493855e67b5aec158ca5ecfad", "score": "0.53724056", "text": "def describe_instance_type_offerings(self, filters=None, location_type=None):\n kwargs = {\"Filters\": filters} if filters else {}\n if location_type:\n kwargs[\"LocationType\"] = location_type\n return list(self._paginate_results(self._client.describe_instance_type_offerings, **kwargs))", "title": "" }, { "docid": "b9fbd7f88a07abb2a0ec35dec87ab1ee", "score": "0.53620625", "text": "def load_all_instances(instance_type: Types) -> ItemsView[Any, Any]:\n return CommonResourceUtils.get_instance_manager(instance_type).types.items()", "title": "" }, { "docid": "1b93bac1d90f8d0d91619ff41473920c", "score": "0.5359447", "text": "def _get_availability_zone(self):\n if \"AvailabilityZone\" in self._heat_resource.properties:\n return self._heat_resource.properties[\"AvailabilityZone\"]", "title": "" }, { "docid": "17f5a27fd1ede64bf83a8e7783e09a92", "score": "0.53401816", "text": "def get_zone_elements(zone_type, coord1, coord2, board):\n elements = []\n if zone_type == \"col\":\n for row in range(0, 9):\n if board[row][coord2] != 0:\n elements.append(board[row][coord2])\n elif zone_type == \"row\":\n for col in range(0, 9):\n if board[coord1][col] != 0:\n elements.append(board[coord1][col])\n else:\n square_coords = generate_square_coords()\n for square in square_coords:\n if (square[\"row_begin\"] <= coord1 <= square[\"row_end\"]) and (\n square[\"col_begin\"] <= coord2 <= square[\"col_end\"]):\n for row in range(square[\"row_begin\"], square[\"row_end\"] + 1):\n for col in range(square[\"col_begin\"],\n square[\"col_end\"] + 1):\n if board[row][col] != 0:\n elements.append(board[row][col])\n break\n\n return elements", "title": "" }, { "docid": "80b24e09424ee1ac06be64a708b06571", "score": "0.5336934", "text": "def parkeszones(type, reference, test, units, numeric=False):\n\n # obtain zones from a Clarke reference object\n _zones = _Parkes(\n type,\n reference,\n test,\n units,\n None,\n None,\n None,\n None,\n None,\n True,\n False,\n \"#000000\",\n \"auto\",\n \"auto\",\n None,\n None,\n )._calc_error_zone()\n\n if numeric:\n return _zones\n else:\n labels = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n return [labels[i] for i in _zones]", "title": "" }, { "docid": "9922d154442eda257240d6d6ca8623c6", "score": "0.5320543", "text": "def time_zone_choices(self):\n tz_list = []\n regionless = []\n current_region = ''\n region_list = []\n\n for tz in pytz.common_timezones:\n try:\n region, zone = tz.split('/', 1)\n\n if region in self.excluded_regions:\n continue\n\n if region != current_region:\n if region_list:\n tz_list.append((current_region, region_list))\n region_list = []\n current_region = region\n\n region_list.append((tz, zone.replace('_', ' ')))\n\n except ValueError:\n # This didn't have a region, just collect these for now.\n if region_list:\n tz_list.append((current_region, region_list))\n region_list = []\n current_region = ''\n regionless.append((tz, tz))\n\n if region_list:\n tz_list.append((current_region, region_list))\n\n # Re-combine the lists\n if self.regionless_first:\n tz_list = [(self.regionless_label, regionless)] + tz_list\n else:\n tz_list += [(self.regionless_label, regionless)]\n\n return tz_list", "title": "" }, { "docid": "0620096a666c5739528d13d7b7fb9fd8", "score": "0.53132147", "text": "def get_zones(self) -> Union[List[str], None]:\n # read the original value passed by the command\n zones = self.raw_param.get(\"zones\")\n # try to read the property value corresponding to the parameter from the `agentpool` object\n if self.agentpool and self.agentpool.availability_zones is not None:\n zones = self.agentpool.availability_zones\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return zones", "title": "" }, { "docid": "ce2eea28dbc1cdad338b3c5f0aabd5e7", "score": "0.5295803", "text": "def list_catalog(self) -> Dict[str, Any]:\n response = _try_request_with_backoff('get',\n f'{API_ENDPOINT}/instance-types',\n headers=self.headers)\n return response.json().get('data', [])", "title": "" }, { "docid": "206a3b8fbbf54688cc85deee280eccbf", "score": "0.52600414", "text": "def fetch_active_instance_reservations(cls, client, filters=[]):\n filters.append({\n 'Name': 'state',\n 'Values': ['active']\n })\n resp = client.describe_reserved_instances(Filters=filters)\n\n for ri in resp['ReservedInstances']:\n assert ri['Scope'] == 'Availability Zone', \\\n \"Unsupported reservation scope: {}\".format(ri['Scope'])\n\n return resp['ReservedInstances']", "title": "" }, { "docid": "856063b9ede7e44ce8e4d94a2030af35", "score": "0.5219543", "text": "def neighbouring(self, zonetypes=None, min_overlap=25000):\n if not self.has_geom():\n return []\n if zonetypes is None:\n zonetypes = [self.zonetype.id]\n \"\"\"\n zones = []\n for zone in Zone.objects.filter(zonetype__in=zonetypes, geom__overlaps=self.geom).order_by('zonetype'):\n if self.zonetype_id==1 and zone.zonetype_id!=1: # \n continue\n overlap = zone.geom.intersection(self.geom).area\n if overlap < min_overlap:\n zones.append(zone)\n \"\"\"\n if zonetypes:\n # zones = Zone.objects.filter(zonetype__id__in=zonetypes, geom__distance_lte=(self.geom, 100)).order_by('-zonetype__id', 'id')\n zones = Zone.objects.filter(zonetype__id__in=zonetypes, geom__distance_lte=(self.geom, 100)).exclude(id=self.id).order_by('-zonetype__id', 'name')\n else:\n # zones = Zone.objects.filter(geom__distance_lte=(self.geom, 100)).order_by('-zonetype__id', 'id')\n zones = Zone.objects.filter(geom__distance_lte=(self.geom, 100)).exclude(id=self.id).order_by('-zonetype__id', 'name')\n neighbouring_zone_dict = defaultdict(list)\n for zone in zones:\n if zone.zonetype.id == 6: \n neighbouring_zone_dict['0'].append(zone)\n else:\n neighbouring_zone_dict[zone.code[0]].append(zone)\n neighbouring_zone_list = []\n for key in ['M', 'R', 'Q', 'S', 'Z', 'C','0']:\n neighbouringlist = neighbouring_zone_dict.get(key, None)\n if neighbouringlist:\n neighbouring_zone_list.append((\"%s %s\" % (zone_prefix_dict[key][2], _(\"neighbouring\")), zone_prefix_dict[key][1], neighbouringlist))\n return neighbouring_zone_list", "title": "" }, { "docid": "d3814d2e7c4e3a509b25e01c49389b1e", "score": "0.5204109", "text": "def get_ec2_instances():\n instances = [i for instance_list in\n [x['Instances'] for x in ec2.describe_instances()['Reservations']]\n for i in instance_list]\n app.log.debug('Got some instances')\n return [x['InstanceId'] for x in instances\n if sum([(i['Key'] == 'KeepAwake') for i in x['Tags']]) == 0]", "title": "" }, { "docid": "e6836009f33ee3d57d0dfc7698c9c925", "score": "0.51827836", "text": "def instance_data(self):\n r = self.provider.azure_client.list_vm_types()\n return r", "title": "" }, { "docid": "d017ac4e18bc198e1d7721a7158090a1", "score": "0.5163803", "text": "def supported_time_zones(self):\n return_type = ClientResult(self.context, ClientValueCollection(TimeZoneInformation))\n qry = FunctionQuery(self, \"supportedTimeZones\", None, return_type)\n self.context.add_query(qry)\n return return_type", "title": "" }, { "docid": "cd40a8fc3b5ebd530491293faa41ed40", "score": "0.51633745", "text": "def _sorted_instances(reservations):\n instances = itertools.chain.from_iterable(res.instances for res in reservations)\n return sorted(instances, cmp=lambda a, b: cmp((_get_instance_environment(a), a.state), (_get_instance_environment(b), b.state)))", "title": "" }, { "docid": "543aaa5c4cb070aab5eb42526a2f6f4d", "score": "0.51356727", "text": "def _find_unused_or_unreserved(self):\n # Determine what AZs to look through for instances\n reserved_azs = [ri['AvailabilityZone'] for ri in self.reservations]\n log.debug(\"Filtering through AZ: {}\".format(reserved_azs))\n\n # Construct a request to find all EC2 instances that are not stopped or\n # terminated\n all_instances = [i for i in self.ec2.instances.filter(**{\n 'Filters': [\n {\n 'Name': 'instance-state-name',\n 'Values': [\n 'pending',\n 'running',\n 'shutting-down',\n 'stopping',\n ]\n }\n ]\n })]\n\n # Order instances based on their creation time\n all_instances = sorted(all_instances, key=lambda i: i.launch_time)\n\n # Mark all reservations as unused initially\n self._unused = {}\n for res in self.reservations:\n az = res['AvailabilityZone']\n instance_type = res['InstanceType']\n\n if az not in self._unused:\n self._unused[az] = {}\n\n if instance_type not in self._unused[az]:\n self._unused[az][instance_type] = 0\n\n self._unused[az][instance_type] += res['InstanceCount']\n\n # Determine most recent datetime for which instances started on or\n # before should be considered for the unreserved instance report.\n if self.unreserved_days is not None:\n launched_before = datetime.now(UTC()) \\\n - timedelta(int(self.unreserved_days))\n\n # Iterate through all instances and tick off the ones that are\n # reserved\n self._unreserved = []\n for instance in all_instances:\n az = instance.placement['AvailabilityZone']\n instance_type = instance.instance_type\n\n try:\n self._unused[az][instance_type] -= 1\n\n if self._unused[az][instance_type] == 0:\n # This reservation is fully in-use\n del self._unused[az][instance_type]\n\n if self._unused[az] == 0:\n # No reservations remaining in AZ\n del self._unused[az]\n\n except KeyError:\n # No matching reservation\n if self.unreserved_days is not None \\\n and instance.launch_time <= launched_before:\n # Old enough to be reported on\n self._unreserved.append(instance)", "title": "" }, { "docid": "9bb0810f9da8fc750cbc9216618d735b", "score": "0.5064745", "text": "def _GetZones(self):\n return utils.AllNames(self._zones_api.list, self._project)", "title": "" }, { "docid": "8d1b5879c280fa1ce926a7d72e0d4467", "score": "0.5040544", "text": "def list_resources(self, gce_type, format='json', extra_args=None):\n needs_zone = gce_type in self.__LIST_NEEDS_ZONE\n args = ['list'] + (extra_args or [])\n cmdline = self.build_gcloud_command_args(\n gce_type, args, format=format, project=self.__project,\n zone=self.__zone if needs_zone else None)\n return self.run(cmdline, trace=self.trace)", "title": "" }, { "docid": "a858c8685549c2714fe5ed07a1054d7e", "score": "0.50018376", "text": "def get_viable_hosts(self, context, instance_uuid=None, instance=None,\n flavor=None, vol_type=None, image=None,\n msg_dict=None):\n # If we don't have the instance yet, look it up\n if instance_uuid and not instance:\n instance = db.instance_get_by_uuid(context, instance_uuid)\n LOG.debug(\"Instance***: %s.\" % instance.__dict__)\n inst_d = instance.__dict__ if instance is not None else \"n/a\"\n # Log the instance, other params handled by method decorator\n LOG.debug(\"Incoming instance=%s\" % inst_d)\n\n db_hosts = su.get_compute_nodes_from_DB(context)\n if not db_hosts and msg_dict:\n msg_dict['messages'].append(_(\"No hosts are managed.\"))\n\n # Check to see if the instance already is hosted (e.g. migrate op)\n if not instance or not instance.get('host'):\n # In 1.2.1, there is no flavor option or volume type that\n # would support a request to only one type of boot storage.\n msg = _(\"Since the request is in regard to a deploy operation \"\n \"or the instance was not provided for filtering, all \"\n \"KVM hosts will be considered viable. To deploy to hosts \"\n \"with shared storage only, a targeted deploy to a \"\n \"specific host must be requested.\")\n LOG.info(msg)\n if msg_dict:\n msg_dict['messages'].append(msg)\n db_hosts = su.get_compute_nodes_from_DB(context)\n return dict([[x, {'host': x}] for x in db_hosts])\n\n # The instance is currently hosted - either onboarded or deployed.\n # We want to return hosts that could host the instance from\n # a storage standpoint. First we get the storage CONF information\n # from the source host.\n # Note: We don't have to get the disk information for the instance\n # in 1.2.1 because we can assume that there is only the\n # single boot disk and it is using storage from a single\n # host location - described by the storage config props here.\n # Note: We don't filter out the current host.\n source_props = self._get_storage_conf_props(instance['host'])\n\n if source_props[CONF_STORAGE_TYPE] != 'nfs':\n msg = _(\"The source host, %(host)s, for the instance is not \"\n \"providing supported shared storage, so instance \"\n \"'%(instance)s' cannot be migrated to another host. The \"\n \"host storage type is '%(type)s'.\") %\\\n dict(host=instance['host'], instance=instance['name'],\n type=source_props[CONF_STORAGE_TYPE])\n LOG.info(msg)\n if msg_dict:\n msg_dict['messages'].append(msg)\n return {}\n\n # Loop over hosts, matching those with like storage.\n ret_dict = {}\n log_msgs = []\n conf_files = []\n for db_host in db_hosts:\n prefix = _(\"Host '%s' is filtered out because it does \"\n \"not \") % db_host\n host_props = self._get_storage_conf_props(db_host)\n if host_props[CONF_STORAGE_TYPE] != 'nfs':\n log_msgs.append(prefix + _(\"use NFS for ephemeral storage.\"))\n conf_files.append(host_props['conf_file'])\n elif host_props[CONF_SERVER] != source_props[CONF_SERVER] or\\\n host_props[CONF_SHARE] != source_props[CONF_SHARE]:\n log_msgs.append(prefix + _(\"specify the same network server \"\n \"and share as the source host.\"))\n conf_files.append(host_props['conf_file'])\n elif host_props[CONF_HOST_PATH] != source_props[CONF_HOST_PATH]:\n log_msgs.append(prefix + _(\"specify the same host mount path \"\n \"as the source host.\"))\n conf_files.append(host_props['conf_file'])\n else:\n ret_dict[db_host] = {'host': db_host}\n\n # TODO: It would be nice to check for sufficient space on the\n # storage provider to contain the image or the VM disk\n # like we do for the HMC version of this API. But current\n # NFS space is not a host metric being flowed back to the\n # management server, and in a follow-on release, the NFS\n # provider (or GPFS provider) will be managed and the free\n # space metrics would come from the provider rather than the\n # host, so this is left as a future exercise.\n LOG.debug(\"Returning viable hosts for instance '%s': %s\" %\n (instance['name'], ret_dict))\n if log_msgs:\n LOG.info(_(\"Hosts excluded: %(msg)s. Check the following \"\n \"management server configuration files for details: \"\n \"targets=%(target)s, source=%(source)s\") %\n dict(msg=log_msgs, target=conf_files,\n source=source_props['conf_file']))\n return ret_dict", "title": "" }, { "docid": "b88ab0edb3c4eb5c7a62a709d92969ba", "score": "0.5000791", "text": "def getInstanceInfo(self, filterList):\n # select the regions based upon the regions the user passed in\n self.getRegionsToSearch()\n\n # if we get here there are customer regions that match the regions we need to search in\n # so we need to loop through all the regions to search, looking for the instances that match the filters\n for regionInfo in self.customerRegionsToSearch:\n if regionInfo['InstanceType'] == \"AWS\":\n self.getInstancesFromAWS(filterList)\n elif regionInfo['InstanceType'] == \"VM\":\n self.readInstanceConfigFile(regionInfo['configFileName'])\n self.applyVMFilters(filterList)\n else:\n print(\"Error: Unknown InstanceType({}) for region: {}\".format(regionInfo['InstanceType'],\n regionInfo['RegionName']),\n regionInfo['RegionName'])\n\n for theInstanceName in self.lastReturnedListOfInstances:\n anInst = self.lastReturnedListOfInstances[theInstanceName]\n\n # prepend the path if we have it. And if so, then the key doesn't have the extension either so add it\n retKeyList = []\n if anInst[\"KeyName\"]:\n for aKey in anInst[\"KeyName\"]:\n retKeyList.append(str(aKey))\n\n # store these away if needed later\n self.lastReturnedListOfKeyAndPaths.append(str(aKey))\n\n # and see if this instance has a jumpserver associated with it and add that jump servers\n # keyName to the list\n if \"JumpServer\" in anInst:\n jumpServerInfo = self.getJumpServerInfo(theInstanceName)\n\n if jumpServerInfo:\n for tmpKey in jumpServerInfo[\"KeyName\"]:\n # and store this away if needed later\n self.lastReturnedListOfKeyAndPaths.append(str(tmpKey))\n\n retKeyList = self.determineWhereKeyExists(retKeyList, self.keysDirectory)\n\n # create a named tuple to return\n InstanceDetails = namedtuple('InstanceDetails', 'PublicIpAddress, PublicDnsName, PublicPort, '\n 'PrivateIpAddress, PrivateDnsName, PrivatePort, ' \n 'Gateway, '\n 'InstanceName, DestLogin, DestKey, Shard, Tags')\n\n self.lastReturnedListOfIPAddresses.append(InstanceDetails(\n PublicIpAddress=(anInst[\"PublicIpAddress\"] if \"PublicIpAddress\" in anInst else ''),\n PublicDnsName=(anInst[\"PublicDnsName\"] if \"PublicDnsName\" in anInst else ''),\n PublicPort=(anInst[\"PublicPort\"] if \"PublicPort\" in anInst else ''),\n PrivateIpAddress=(anInst[\"PrivateIpAddress\"] if \"PrivateIpAddress\" in anInst else ''),\n PrivateDnsName=(anInst[\"PrivateDnsName\"] if \"PrivateDnsName\" in anInst else ''),\n PrivatePort=(anInst[\"PrivatePort\"] if \"PrivatePort\" in anInst else ''),\n Gateway=(anInst[\"JumpServer\"] if \"JumpServer\" in anInst else ''),\n InstanceName=anInst[\"TagsDict\"][\"Name\"],\n DestLogin=anInst[\"UserLogin\"] if \"UserLogin\" in anInst else '',\n DestKey=retKeyList,\n Shard=anInst[\"TagsDict\"][\"Shard\"] if \"Shard\" in anInst[\"TagsDict\"] else '',\n Tags=anInst[\"TagsDict\"]))\n\n\n return self.lastReturnedListOfIPAddresses", "title": "" }, { "docid": "a3bdf9750d5a958efa04632b384eabdc", "score": "0.499124", "text": "def ListZoneFunc(self):\n if self._IsUsingAtLeastApiVersion('v1beta14'):\n return self._zone_operations_api.list\n return self._global_operations_api.list", "title": "" }, { "docid": "4afe47420fca187a72f89d4ea01120cd", "score": "0.49879673", "text": "def get_gcp_instance_responses(project_id: str, zones: Optional[List[Dict]], compute: Resource) -> List[Resource]:\n if not zones:\n # If the Compute Engine API is not enabled for a project, there are no zones and therefore no instances.\n return []\n response_objects: List[Resource] = []\n for zone in zones:\n req = compute.instances().list(project=project_id, zone=zone['name'])\n res = req.execute()\n response_objects.append(res)\n return response_objects", "title": "" }, { "docid": "fae4c22f4d1dbec63c70c55c0b5d4e9f", "score": "0.49509126", "text": "def get_child_instances(source: List[AnyInstanceType], _type: Optional[str] = None) -> List[str]:\n if _type is None:\n return [item.instance for item in source]\n\n return [\n item.instance\n for item in source\n if item.type == _type\n ]", "title": "" }, { "docid": "a54fb43c1f01dea81c5507db8c65e88a", "score": "0.4949646", "text": "def list_instances(self):\n lpar_instances = self._operator.list_lpar_instances()\n # We filter out instances that haven't been created\n # via OpenStack. Notice that this is fragile and it can\n # be improved later.\n instances = [instance for instance in lpar_instances\n if re.search(r'^instance-[0-9]{8}$', instance)]\n return instances", "title": "" }, { "docid": "efc9ecf750ebe613d5563cac56cb4b67", "score": "0.4929883", "text": "def __list_instances( self ):\n name = self.ctx.to_aws_name( self.role( ) )\n reservations = self.ctx.ec2.get_all_instances( filters={ 'tag:Name': name } )\n instances = [ i for r in reservations for i in r.instances if i.state != 'terminated' ]\n instances.sort( key=lambda _: _.launch_time + _.id )\n return name, instances", "title": "" }, { "docid": "4bbea077339f94b2ef86e274bc82529a", "score": "0.49184448", "text": "def _retrieve_instance_region():\n region = None\n valid_regions = ['ap-northeast-1', 'ap-northeast-2', 'ap-southeast-1', 'ap-southeast-2',\n 'ap-south-1', 'ca-central-1', 'eu-central-1', 'eu-north-1',\n 'eu-west-1', 'eu-west-2', 'eu-west-3', 'sa-east-1',\n 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2']\n\n url = \"http://169.254.169.254/latest/dynamic/instance-identity/document\"\n response = requests_helper(url, timeout=0.1)\n\n if response is not None:\n response_json = json.loads(response.text)\n\n if response_json['region'] in valid_regions:\n region = response_json['region']\n\n return region", "title": "" }, { "docid": "f162b9c7c7d76b656daf306553db7610", "score": "0.49152285", "text": "def get_all_instance_status(self, zone_id=None):\n instance_status = []\n params = {\n 'Action': 'DescribeInstanceStatus'\n }\n\n if zone_id is not None:\n params.update({'ZoneId': zone_id})\n\n for resp in self.get(params, paginated=True):\n for item in resp['InstanceStatuses']['InstanceStatus']:\n instance_status.append(\n InstanceStatus(item['InstanceId'], item['Status']))\n return instance_status", "title": "" }, { "docid": "99e844248e91ad009d1ddc9a28a46f72", "score": "0.4904448", "text": "def get_enterprise_instances(env_type: Optional[str] = None,\n instance_alias_regex: Optional[str] = None,\n instance_source: Optional[str] = None,\n instance_type: Optional[str] = None,\n name_regex: Optional[str] = None,\n net_type: Optional[str] = None,\n output_file: Optional[str] = None,\n search_key: Optional[str] = None,\n status: Optional[str] = None,\n tid: Optional[int] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEnterpriseInstancesResult:\n __args__ = dict()\n __args__['envType'] = env_type\n __args__['instanceAliasRegex'] = instance_alias_regex\n __args__['instanceSource'] = instance_source\n __args__['instanceType'] = instance_type\n __args__['nameRegex'] = name_regex\n __args__['netType'] = net_type\n __args__['outputFile'] = output_file\n __args__['searchKey'] = search_key\n __args__['status'] = status\n __args__['tid'] = tid\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('alicloud:dms/getEnterpriseInstances:getEnterpriseInstances', __args__, opts=opts, typ=GetEnterpriseInstancesResult).value\n\n return AwaitableGetEnterpriseInstancesResult(\n env_type=pulumi.get(__ret__, 'env_type'),\n id=pulumi.get(__ret__, 'id'),\n ids=pulumi.get(__ret__, 'ids'),\n instance_alias_regex=pulumi.get(__ret__, 'instance_alias_regex'),\n instance_source=pulumi.get(__ret__, 'instance_source'),\n instance_type=pulumi.get(__ret__, 'instance_type'),\n instances=pulumi.get(__ret__, 'instances'),\n name_regex=pulumi.get(__ret__, 'name_regex'),\n names=pulumi.get(__ret__, 'names'),\n net_type=pulumi.get(__ret__, 'net_type'),\n output_file=pulumi.get(__ret__, 'output_file'),\n search_key=pulumi.get(__ret__, 'search_key'),\n status=pulumi.get(__ret__, 'status'),\n tid=pulumi.get(__ret__, 'tid'))", "title": "" }, { "docid": "b6517f25a6fcb977f0b9f02696e62ea3", "score": "0.49039537", "text": "def get_all_types(inactive=0):\n return db.instance_type_get_all(context.get_admin_context(), inactive)", "title": "" }, { "docid": "208e9a14903de4352bb3b7ee9f58e067", "score": "0.49023286", "text": "def get_access_zones_list(self):\n try:\n access_zones_list = (self.zone_api.list_zones()).to_dict()\n LOG.info(\"Got Access zones from PowerScale cluster %s\",\n self.module.params['onefs_host'])\n return access_zones_list\n except Exception as e:\n error_msg = (\n 'Get Access zone List for PowerScale cluster: {0} failed'\n 'with error: {1}' .format(\n self.module.params['onefs_host'],\n utils.determine_error(e)))\n LOG.error(error_msg)\n self.module.fail_json(msg=error_msg)", "title": "" }, { "docid": "f8271b540b0666e598014524e84cde46", "score": "0.48961556", "text": "def get_instances(self):\n conn = self.get_nova_connection()\n reservations = conn.get_all_instances()\n instances = []\n for reservation in reservations:\n for instance in reservation.instances:\n instances.append(instance)\n return instances", "title": "" }, { "docid": "dfd4e1e3f515e65ebffac384527b9ef1", "score": "0.48695084", "text": "def get_availability_zone(libcloud_image, seed_profile):\n available_zones = {}\n for zone in libcloud_image.driver.ex_list_availability_zones():\n available_zones.update({\n zone.name: zone})\n\n if seed_profile.region not in available_zones.keys():\n raise SeedAvailibiltyZoneDoesNotExist(\"%s [ %s ]\" \n % (seed_profile.region, ','.join(available_zones.keys())))\n logger.debug(\"Using %s region\" % seed_profile.region)\n return available_zones.get(seed_profile.region)", "title": "" }, { "docid": "999c9694894f7d172787a2a76ab0d644", "score": "0.48513117", "text": "def list_subzones(self, zonetype_id=None):\n if zonetype_id:\n subzones = self.zones.filter(zonetype_id=zonetype_id).order_by('name')\n else:\n subzones = self.zones.all().order_by('name')\n subzone_dict = defaultdict(list)\n for subzone in subzones:\n subzone_dict[subzone.code[0]].append(subzone)\n subzone_list = []\n for key in ['R', 'Q', 'S', 'Z']:\n sublist = subzone_dict.get(key, None)\n if sublist:\n subzone_list.append((zone_prefix_dict[key][2], zone_prefix_dict[key][1], sublist))\n return subzone_list", "title": "" }, { "docid": "6448a6d06d388708bf397c44ee3fd065", "score": "0.4851072", "text": "def get_vpc_instances(conn, vpc):\n\n instances = []\n reservations = conn.get_all_reservations(filters = {'vpc-id': vpc.id})\n\n for r in reservations:\n instances.append(r.instances[0])\n\n return instances", "title": "" }, { "docid": "9d90415112db37bfe0a94bdabe51f0f5", "score": "0.48473388", "text": "def ValidateInstanceInZone(instances, zone):\n invalid_instances = [inst.SelfLink()\n for inst in instances if inst.zone != zone]\n if any(invalid_instances):\n raise exceptions.InvalidArgumentException(\n 'instances', 'The zone of instance must match the instance group zone. '\n 'Following instances has invalid zone: %s'\n % ', '.join(invalid_instances))", "title": "" }, { "docid": "04d3a50c22026235cc63f2706b90a8f7", "score": "0.4818513", "text": "def _get_list_param_value(self, type_name: str, param_header: str) -> List[str]:\n fzf = Pyfzf()\n\n if type_name == \"List<AWS::EC2::AvailabilityZone::Name>\":\n with Spinner.spin(message=\"Fetching AvailabilityZones ...\"):\n response = self.ec2.client.describe_availability_zones()\n response_list = response[\"AvailabilityZones\"]\n fzf.process_list(response_list, \"ZoneName\", empty_allow=True)\n elif type_name == \"List<AWS::EC2::Instance::Id>\":\n return list(\n self.ec2.get_instance_id(multi_select=True, header=param_header)\n )\n elif type_name == \"List<AWS::EC2::SecurityGroup::GroupName>\":\n return list(\n self.ec2.get_security_groups(\n multi_select=True, return_attr=\"name\", header=param_header\n )\n )\n elif type_name == \"List<AWS::EC2::SecurityGroup::Id>\":\n return list(\n self.ec2.get_security_groups(multi_select=True, header=param_header)\n )\n elif type_name == \"List<AWS::EC2::Subnet::Id>\":\n return list(self.ec2.get_subnet_id(multi_select=True, header=param_header))\n elif type_name == \"List<AWS::EC2::Volume::Id>\":\n return list(self.ec2.get_volume_id(multi_select=True, header=param_header))\n elif type_name == \"List<AWS::EC2::VPC::Id>\":\n return list(self.ec2.get_vpc_id(multi_select=True, header=param_header))\n elif type_name == \"List<AWS::Route53::HostedZone::Id>\":\n self.route53.set_zone_id(multi_select=True)\n return self.route53.zone_ids\n return list(\n fzf.execute_fzf(multi_select=True, empty_allow=True, header=param_header)\n )", "title": "" }, { "docid": "e0d5eb2fa7135627c645deaf7604db5b", "score": "0.4809683", "text": "def zones(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "e0d5eb2fa7135627c645deaf7604db5b", "score": "0.4809683", "text": "def zones(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "eac3f2b090c48b9521ade9f85bf42738", "score": "0.4804236", "text": "def scan_region(region):\n\n client = SESSION.client('ec2', region_name=region)\n\n instances = []\n paginator = client.get_paginator('describe_instances')\n\n for page in paginator.paginate():\n for res in page['Reservations']:\n for inst in res['Instances']:\n instance_map = {}\n instance_map[\"id\"] = inst['InstanceId']\n instance_map[\"type\"] = inst['InstanceType']\n instances.append(instance_map)\n\n print(f'Instances found: {len(instances)}')\n\n return instances", "title": "" }, { "docid": "085dae4521cda82f2b0d6983eea81094", "score": "0.4802557", "text": "def _find_instance_elbs(self, instances):\n all_elbs = yield self.thread(self.elb_conn.get_all_load_balancers)\n elbs_with_members = []\n\n for instance in instances:\n elbs = filter(lambda lb: instance in [i.id for i in lb.instances],\n all_elbs)\n self.log.debug('%s is a member of %s' % (instance, elbs))\n elbs_with_members.extend(elbs)\n\n raise gen.Return(elbs_with_members)", "title": "" }, { "docid": "54bf244a4586e5e175ce45b935792dd1", "score": "0.47986755", "text": "def get_instances():\n filters = [\n {\n 'Name': 'instance-state-name',\n 'Values': ['stopped']\n }\n ]\n \n instances = EC2.instances.filter(Filters=filters)\n \n Instances = []\n \n for instance in instances:\n Instances.append(instance.id)\n \n return Instances", "title": "" }, { "docid": "d2b6e574d3ce706924f9f18c62573b31", "score": "0.47970802", "text": "async def _fetch_zones(self) -> set[int]:\n if self._zones is None:\n available_stations = await self._controller.get_available_stations()\n self._zones = {\n zone\n for zone in range(1, available_stations.stations.count + 1)\n if available_stations.stations.active(zone)\n }\n return self._zones", "title": "" }, { "docid": "84372b5b39600e3a75d9f43d700643ca", "score": "0.47910786", "text": "def zones(self) -> Optional[Sequence['outputs.ClusterZoneResponse']]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "d9e80ced935182bb2e398a3dd0cb86e2", "score": "0.47887948", "text": "def _check_instance_image(self, env):\n # Overload Project._check_instance_image()\n # Instanciate a new CloudCli\n cloud_cli = CloudCli(env.cloud, bin_path=self.cloud_tools_bin_path)\n\n # Node types list available for this Cloud vendor\n node_types = ['postgres_server', 'pem_server', 'hammerdb_server',\n 'barman_server', 'pooler_server']\n\n # Check instance type and image availability\n for instance_type in self._get_instance_types(node_types):\n with AM(\n \"Checking instance type %s availability in %s\"\n % (instance_type, env.azure_region)\n ):\n cloud_cli.check_instance_type_availability(\n instance_type, env.azure_region\n )\n # Check availability of image in target region\n with AM(\n \"Checking image %s:%s:%s availability in %s\"\n % (\n self.terraform_vars['azure_publisher'],\n self.terraform_vars['azure_offer'],\n self.terraform_vars['azure_sku'],\n env.azure_region\n )\n ):\n cloud_cli.cli.check_image_availability(\n self.terraform_vars['azure_publisher'],\n self.terraform_vars['azure_offer'],\n self.terraform_vars['azure_sku'],\n env.azure_region\n )", "title": "" }, { "docid": "557234e5eb8e4f95a967e10bdaed7468", "score": "0.4776339", "text": "def get_instance_region():\n # TODO: XXX This shouldn't get called if we're not on EC2.\n zone = boto.utils.get_instance_metadata().get('placement', {}).get('availability-zone', None)\n if zone is None:\n get_logger('krux_boto').warn('get_instance_region failed to get the local instance region')\n raise Error('get_instance_region failed to get the local instance region')\n return zone.rstrip(string.ascii_lowercase)", "title": "" }, { "docid": "6876360980553a7670385868eb9e56d0", "score": "0.47736615", "text": "def get_zone_tpu_types(tpu_api: discovery.Resource, project_id: str,\n zone: str) -> Optional[List[TPUSpec]]:\n\n location = 'projects/{}/locations/{}'.format(project_id, zone)\n rsp = tpu_api.projects().locations().acceleratorTypes().list(\n parent=location).execute()\n\n tpus = []\n for t in rsp['acceleratorTypes']:\n spec = gke_tpu_to_tpuspec(t['type'])\n if spec is None:\n continue\n tpus.append(spec)\n\n return tpus", "title": "" }, { "docid": "a5d86521c9dfb99f6328d9204c0e255d", "score": "0.47692528", "text": "def get_default_instance_type(self):\n kwargs = {\n \"Filters\": [\n {\"Name\": \"free-tier-eligible\", \"Values\": [\"true\"]},\n {\"Name\": \"current-generation\", \"Values\": [\"true\"]},\n ]\n }\n free_tier_instance_type = list(self._paginate_results(self._client.describe_instance_types, **kwargs))\n return free_tier_instance_type[0][\"InstanceType\"] if free_tier_instance_type else \"t3.micro\"", "title": "" }, { "docid": "b0a0bc78e580626faf96921b761cbcb6", "score": "0.47689226", "text": "def _treeizeAvailabilityZone(zone):\n AvailabilityZone = availability_zones.AvailabilityZone\n\n az = AvailabilityZone(zone.manager,\n copy.deepcopy(zone._info), zone._loaded)\n result = []\n\n # Zone tree view item\n az.zoneName = zone.zoneName\n az.zoneState = ('available'\n if zone.zoneState['available'] else 'not available')\n az._info['zoneName'] = az.zoneName\n az._info['zoneState'] = az.zoneState\n result.append(az)\n\n if getattr(zone, \"hosts\", None) and zone.hosts is not None:\n for (host, services) in zone.hosts.items():\n # Host tree view item\n az = AvailabilityZone(zone.manager,\n copy.deepcopy(zone._info), zone._loaded)\n az.zoneName = '|- %s' % host\n az.zoneState = ''\n az._info['zoneName'] = az.zoneName\n az._info['zoneState'] = az.zoneState\n result.append(az)\n\n for (svc, state) in services.items():\n # Service tree view item\n az = AvailabilityZone(zone.manager,\n copy.deepcopy(zone._info), zone._loaded)\n az.zoneName = '| |- %s' % svc\n az.zoneState = '%s %s %s' % (\n 'enabled' if state['active'] else 'disabled',\n ':-)' if state['available'] else 'XXX',\n state['updated_at'])\n az._info['zoneName'] = az.zoneName\n az._info['zoneState'] = az.zoneState\n result.append(az)\n return result", "title": "" }, { "docid": "bd47d5261e4f305fddf9324888d98062", "score": "0.47501805", "text": "def _check_elb_zones(self, elb):\n zones = yield self.thread(self.ec2_conn.get_all_zones)\n zone_names = {z.name for z in zones}\n\n enabled_zones = set(elb.availability_zones)\n\n if not zone_names.issubset(enabled_zones):\n self.log.warning('ELB \"%s\" is missing some AZ.' % elb.name)\n self.log.info('Enabling all zones: %s' % zone_names)\n yield self.thread(elb.enable_zones, zone_names)", "title": "" }, { "docid": "8f6bf3126e13d5f3be552e8556803af2", "score": "0.47472307", "text": "def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "8f6bf3126e13d5f3be552e8556803af2", "score": "0.47472307", "text": "def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "8f6bf3126e13d5f3be552e8556803af2", "score": "0.47472307", "text": "def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "8f6bf3126e13d5f3be552e8556803af2", "score": "0.47472307", "text": "def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "7d372f0439185aeda67d65aa9cda3cbc", "score": "0.47441226", "text": "def get_running_instances(compute, project, zone) -> list:\n result = compute.instances().list(project=project, zone=zone).execute()\n return result['items'] if 'items' in result else []", "title": "" }, { "docid": "f17a4fb9250fa3d80b968e54c8bd65a3", "score": "0.47439727", "text": "def _active_timezones(self) -> [str]:\n return [\n timezone for timezone in TIMEZONES if self._is_timezone_active(timezone)\n ]", "title": "" }, { "docid": "79033a6272eabd87f2febb6686b6d3d8", "score": "0.47230634", "text": "def instance_types_ondemand(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"instance_types_ondemand\")", "title": "" }, { "docid": "71fe16b8479ce1695ac87f39cfb0815b", "score": "0.47041547", "text": "def list_zones(self) -> bool:\n ret = self._call(\"/zones/list\", data={}, method=\"GET\")\n if ret.get(\"errcode\", 0) == 200:\n print(\"Cluster zones:\")\n for zone in ret.get(\"data\", []):\n print(\" - \", zone[\"name\"])\n print(\" \", zone[\"description\"])\n return True", "title": "" }, { "docid": "d90ea48374f8892b3ee4a66611815bcf", "score": "0.46911234", "text": "def availability_zone_count(self) -> int:\n return pulumi.get(self, \"availability_zone_count\")", "title": "" }, { "docid": "3edb9534a2def79b13555859abdea3c0", "score": "0.46790677", "text": "def list_topo_zones(self):\n # zones = Zone.objects.filter(zonetype_id=self.id)\n # zones = Zone.objects.filter(zonetype_id=self.id).values('id', 'code', 'name', 'slug')\n zones = Zone.objects.filter(zonetype_id=self.id).exclude(geom__isnull=True).values('id', 'code', 'name', 'slug').order_by('name')\n return split_topo_zones(zones)", "title": "" }, { "docid": "037cf137733d05e7af0e19c32d3235f0", "score": "0.46789682", "text": "def availability_zone(self) -> Optional[str]:\n return pulumi.get(self, \"availability_zone\")", "title": "" }, { "docid": "9ba79f4af2eeead8b4e1a8db8096f45a", "score": "0.46697846", "text": "def get_ec2_instances(ec2_client=get_ec2_client()):\n instances = []\n instance_page = ec2_client.describe_instances()\n instance_from_page = [instance for reservation in instance_page['Reservations']\n for instance in reservation['Instances']]\n instances = instances + instance_from_page\n while 'NextToken' in instance_page and instance_page['NextToken']:\n instance_page = ec2_client.describe_instances(NextToken=instance_page['NextToken'])\n instance_from_page = [instance for reservation in instance_page['Reservations']\n for instance in reservation['Instances']]\n instances = instances + instance_from_page\n return [EC2Instance(instance, ec2_client) for instance in instances]", "title": "" }, { "docid": "8b0614fa78f9470523fdfdb9128dfd6f", "score": "0.46621504", "text": "def list_zones(self):\n response = self.connection.request(\"/zones\")\n zones = self._to_zones(response.object)\n return zones", "title": "" }, { "docid": "7440c583edb0145c91c18cf91452bb3f", "score": "0.4661302", "text": "def zones(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "7440c583edb0145c91c18cf91452bb3f", "score": "0.4661302", "text": "def zones(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "7440c583edb0145c91c18cf91452bb3f", "score": "0.4661302", "text": "def zones(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zones\")", "title": "" }, { "docid": "1c3bfd4b9e7415c5da844d0caa02afa9", "score": "0.46612427", "text": "def get_az_ip(self, az):\n if not self.region_match_ip:\n self.region_match_ip = self.get_region_match_ip()\n ip_list = []\n for region, ip in self.region_match_ip.items():\n if region.startswith(az):\n ip_list.append(ip)\n\n return ip_list", "title": "" }, { "docid": "985379b51b0ba839fa2339653cb672a6", "score": "0.4653639", "text": "def Type(self):\n return _pcbnew.SEGZONE_List_Type(self)", "title": "" }, { "docid": "f8d6bb58c6a0604dbd29cad913ee3f8e", "score": "0.46532315", "text": "def get_zone_gpu_types(compute_api: discovery.Resource, project_id: str,\n zone: str) -> Optional[List[GPUSpec]]:\n\n rsp = compute_api.acceleratorTypes().list(project=project_id,\n zone=zone).execute()\n\n gpus = []\n\n for x in rsp['items']:\n gpu = gke_gpu_to_gpu(x['name'])\n if gpu is None:\n continue\n gpus.append(GPUSpec(gpu, int(x['maximumCardsPerInstance'])))\n\n return gpus", "title": "" }, { "docid": "579ba9530e9a6c0055ec09a929c7aada", "score": "0.46486983", "text": "def infer_ec2_instance_type():\n try:\n ec2_instance_type = requests.get(\n 'http://169.254.169.254/latest/meta-data/instance-type',\n timeout=0.01\n ).text\n except requests.exceptions.RequestException:\n logger.spam(\n \"Unable to retrieve metadata about ec2 instance, will not set ec2 instance type\"\n )\n ec2_instance_type = None\n return ec2_instance_type", "title": "" }, { "docid": "5189e129c6131d29002d771eac908eed", "score": "0.4646456", "text": "def get_availability_zone(region):\n return IntrinsicsSymbolTable.REGIONS.get(region)", "title": "" }, { "docid": "dd70bc3755e0c4bb8b73571d59ef2b14", "score": "0.46427134", "text": "def get_all_instance_ids(self, zone_id=None):\n return [x.instance_id for x in self.get_all_instance_status(zone_id)]", "title": "" }, { "docid": "18e2989c8c51222d4e1938a08895e991", "score": "0.46395287", "text": "def describe_instances(self, parameters, pending=False):\n instance_ids = []\n public_ips = []\n private_ips = []\n\n conn = self.open_connection(parameters)\n reservations = conn.get_all_instances()\n instances = [i for r in reservations for i in r.instances]\n for i in instances:\n if (i.state == 'running' or (pending and i.state == 'pending')) \\\n and i.key_name.startswith(parameters[self.PARAM_KEYNAME]):\n instance_ids.append(i.id)\n public_ips.append(i.public_dns_name)\n private_ips.append(i.private_dns_name)\n return public_ips, private_ips, instance_ids", "title": "" }, { "docid": "9feb238642bffda5a130249ddb80f9ca", "score": "0.46311224", "text": "def get_supported_architectures(self, instance_type):\n instance_info = self.get_instance_type_info(instance_type)\n return instance_info.supported_architecture()", "title": "" }, { "docid": "edad6a61503cba8b69d311b982cd29a6", "score": "0.46273875", "text": "def get_instances_for_account(account, region):\n instance_data = []\n session = boto3.session.Session(region_name=region)\n assume = rolesession.assume_crossact_audit_role(\n session, account['accountNum'], region)\n if assume:\n age_lookup = ImageAgeLookup(assume)\n membership_lookup = LookupInstanceMembership(assume)\n ec2 = assume.client('ec2')\n instances = ec2.describe_instances().get('Reservations')\n if not instances:\n instances = []\n for res in instances:\n for instance in res['Instances']:\n tags = {x['Key']: x['Value']\n for x in instance.get('Tags', [])} # Extract tags as a dict\n if not tags:\n tags = dict()\n instance_prof = instance.get('IamInstanceProfile')\n if instance_prof:\n profile_name = instance_prof['Arn'].split('/')[-1]\n else:\n profile_name = None\n\n instance_data.append(\n dict(Name=tags.get('Name'),\n CreateDate=json.dumps(\n instance['LaunchTime'], cls=MyEncoder),\n Region=region,\n InstanceId=instance['InstanceId'],\n State=instance['State']['Name'],\n AccountNum=account['accountNum'],\n IamInstanceProfile=profile_name,\n ImageId=instance.get('ImageId'),\n Membership=membership_lookup.lookup(\n instance['InstanceId'], tags),\n Runtime=runtime(instance),\n ImageAge=age_lookup.lookup(instance.get('ImageId')),\n AccountAlias=account.get('alias')))\n return instance_data", "title": "" }, { "docid": "f93a5ca0b9c74f3854f8aa76fc10b8f8", "score": "0.4625527", "text": "def get_zones(self):\n\n return self.zones", "title": "" }, { "docid": "5e6dd5b0fb97a58dcfd731dab273f1ed", "score": "0.46233281", "text": "def get_subnets_for_vpc(self,vpc_id):\n\n try:\n available_subnets = []\n for az in self._availability_zones:\n query_filter = [\n {\n 'Name': 'vpc-id',\n 'Values': [\n vpc_id,\n ]\n },\n {\n 'Name': 'availability-zone',\n 'Values': [\n az,\n ]\n }\n ]\n\n subnets = self._ec2_Client.describe_subnets(Filters = query_filter)['Subnets']\n\n for subnet in subnets:\n if(subnet['State'] == 'available'):\n available_subnets.append(subnet)\n\n return available_subnets\n\n except Exception as ex:\n print (\" Error retrieving subnets for VPC ID {}, error :{}\".format(vpc_id,ex))\n quit(1)", "title": "" }, { "docid": "5e6dd5b0fb97a58dcfd731dab273f1ed", "score": "0.46233281", "text": "def get_subnets_for_vpc(self,vpc_id):\n\n try:\n available_subnets = []\n for az in self._availability_zones:\n query_filter = [\n {\n 'Name': 'vpc-id',\n 'Values': [\n vpc_id,\n ]\n },\n {\n 'Name': 'availability-zone',\n 'Values': [\n az,\n ]\n }\n ]\n\n subnets = self._ec2_Client.describe_subnets(Filters = query_filter)['Subnets']\n\n for subnet in subnets:\n if(subnet['State'] == 'available'):\n available_subnets.append(subnet)\n\n return available_subnets\n\n except Exception as ex:\n print (\" Error retrieving subnets for VPC ID {}, error :{}\".format(vpc_id,ex))\n quit(1)", "title": "" }, { "docid": "05ed410ba7a888964089934bbed4b8da", "score": "0.46231505", "text": "def instance_types_ondemand(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_types_ondemand\")", "title": "" }, { "docid": "05ed410ba7a888964089934bbed4b8da", "score": "0.46231505", "text": "def instance_types_ondemand(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_types_ondemand\")", "title": "" } ]
4166f43da3751428ef835a8d56b0b615
Create a quadtree of 'n' points.
[ { "docid": "7679c017510138d7ac6a406eafbc687e", "score": "0.0", "text": "def random(cls,n=50,**kwargs):\n motions=[Motion.random(n=2) for i in range(n)]\n return cls(motions,**kwargs)", "title": "" } ]
[ { "docid": "1ad95cfc250155d495868d7d4a817b52", "score": "0.6579787", "text": "def random(cls,n=50):\n return cls(QuadTree.random(n=n))", "title": "" }, { "docid": "0240cf3dd9442d3bd81eef06fb677339", "score": "0.5730425", "text": "def quad_children(x, y):\n for xo in range(2):\n for yo in range(2):\n yield (2 * x + xo, 2 * y + yo)", "title": "" }, { "docid": "3372e184c9c152e5026c819c18fbc66f", "score": "0.5709088", "text": "def draw_random_points(self, n):\n idcs = np.asarray([ self.rng.permutation(n) for i in range(self.gp.d) ])\n idcs = idcs.T\n \n box_size = (self.ranges[:,1] - self.ranges[:,0]) / n\n \n assert len(box_size) == self.gp.d\n \n uu = self.rng.uniform(0, 1, size=(n, self.gp.d))\n \n pts = self.ranges.T[0] + (uu + idcs) * box_size\n return pts", "title": "" }, { "docid": "000580afd7b968ab8ceba83430790b3e", "score": "0.5571477", "text": "def build_tree(array, n):\r\n if n % 2 == 0:\r\n middle = int(n / 2)\r\n else:\r\n middle = int((n - .5) / 2) \r\n \r\n root = Node(array[middle])\r\n array.pop(middle)\r\n\r\n for x in array[0:]:\r\n current_node = root\r\n while True:\r\n if x < current_node.value:\r\n if current_node.left is None:\r\n current_node.left = Node(x)\r\n break\r\n else:\r\n current_node = current_node.left\r\n\r\n # >= here is for good form due to the uniqueness of the integers\r\n if x >= current_node.value:\r\n if current_node.right is None:\r\n current_node.right = Node(x)\r\n break\r\n else:\r\n current_node = current_node.right\r\n\r\n return root", "title": "" }, { "docid": "eb7952105e702377a1644301f8cf6d64", "score": "0.5533014", "text": "def build_kdtree(points, leafsize):\n return KDTree(points,leaf_size =leafsize)", "title": "" }, { "docid": "c7bb0da0c6b59c6036a58e0148e35439", "score": "0.552189", "text": "def num_and_generate_trees(n): # type: (int) -> List[TreeNode[int]]\n\n def _generate_trees(start, end):\n # type: (int, int) -> List[TreeNode[int]]\n res = []\n if start > end:\n res.append(None)\n return res\n for i in range(start, end + 1): # i is root node\n left = _generate_trees(start, i - 1)\n right = _generate_trees(i + 1, end)\n\n for l in left:\n for r in right: # traverse all possible combination\n root = TreeNode(i)\n root.left = l\n root.right = r\n res.append(root)\n return res\n\n if n == 0:\n return []\n else:\n return _generate_trees(1, n)", "title": "" }, { "docid": "d5e0f8a6c4180790813e2d6af7687933", "score": "0.5510055", "text": "def generateTrees(self, n: int) -> List[Optional[TreeNode]]:\n\n def dp(nums: List[int]) -> List[Optional[TreeNode]]:\n if len(nums) == 0:\n return [None]\n if len(nums) == 1:\n return [TreeNode(nums[0])]\n ret = []\n for i in range(len(nums)):\n left_list = dp(nums[:i])\n right_list = dp(nums[i + 1:])\n for left, right in itertools.product(left_list, right_list):\n root = TreeNode(nums[i])\n root.left = left\n root.right = right\n ret.append(root)\n\n return ret\n\n return dp(list(range(1, n + 1)))", "title": "" }, { "docid": "731ec8df68aa6c26ef8f7a3a7bf2039d", "score": "0.5504503", "text": "def rand_quad(N):\n return array([random.choice([-1, 1, 1j, -1j]) for idx in range(N)], dtype=complex64)", "title": "" }, { "docid": "503e0dc92cf38f1d98626336d61ab0e4", "score": "0.54827857", "text": "def formable_spider_cuboids(n):\n ans = []\n # The longest possible perimeter would be for the triangle generated from\n # a = b = n. That is, where a+b is maximal given fixed n and the restriction\n # that a<=b<=n. We get P_max = n(3+sqrt(5)).\n # This perimeter is used in our pythagorean triple search. But our search\n # needs an integral perimeter. So we floor P_max to an integer and add 1\n # to be safe.\n P_max = int(n*(3+(5**0.5)))+1\n for (x,y,z) in utils.numtheory.pythagorean_triples(P_max):\n # We find the leg whose length is n, and set ab to be the other one\n if x == n: n, ab = x, y\n elif y == n: n, ab = y, x\n else: continue # Or neither leg has length n, so we try the next triple\n # if ab < n, we just take (1, ab-1), (2, ab-2), ... until a>b\n if ab <= n: a, b = 1, ab-1\n # if ab > n, still have ab-n <= n so we take (ab-n, n), (ab-n+1, n-1),\n # ... until a>b\n if ab > n: a, b = ab-n, n\n while a <= b:\n yield tuple(sorted([a, b, n]))\n a, b = a+1, b-1", "title": "" }, { "docid": "7d1a2f6a835eaaa0c37a4b3c31be07a7", "score": "0.5433506", "text": "def build_kdtree(self, points, leafsize):\n return KDTree(points,leafsize =leafsize)", "title": "" }, { "docid": "245ab8205897a48c168cafae7a4412b4", "score": "0.53729874", "text": "def generate_sphere_points(n):\n points = []\n inc = math.pi * (3 - math.sqrt(5))\n offset = 2 / float(n)\n for k in range(int(n)):\n y = k * offset - 1 + (offset / 2)\n r = math.sqrt(1 - y*y)\n phi = k * inc\n points.append([math.cos(phi)*r, y, math.sin(phi)*r])\n return np.array(points) #MB", "title": "" }, { "docid": "3f8440f453a9cfdbe22a68f896e47220", "score": "0.53187925", "text": "def makePoints(n=1000):\n\n #points = np.random.uniform(-10,10,(n,4))\n #points = np.random.randn(n,3)\n points = np.cumsum(np.random.randint(-1,2,(n,3)), axis=0) #classic random walk\n #colors = np.random.rand(n,4)\n clr4 = np.random.rand(1,4)\n\n #points = [(0,0,0)]\n\n fmt = GeomVertexFormat.getV3c4() #3 component vertex, w/ 4 comp color\n vertexData = GeomVertexData('points', fmt, Geom.UHStatic)\n\n verts = GeomVertexWriter(vertexData, 'vertex')\n color = GeomVertexWriter(vertexData, 'color')\n\n #for point,clr4 in zip(points,colors):\n for point in points:\n verts.addData3f(*point)\n #color.addData4f(*point)\n color.addData4f(*clr4[0])\n #color.addData4f(.1,.1,.1,1)\n\n #pointCloud = GeomLinestrips(Geom.UHStatic) #this is fucking cool!\n #pointCloud = GeomTristrips(Geom.UHStatic) #this is fucking cool!\n pointCloud = GeomPoints(Geom.UHStatic)\n pointCloud.addConsecutiveVertices(0,n) #warning may error since n-1?\n pointCloud.closePrimitive()\n\n cloud = Geom(vertexData)\n cloud.addPrimitive(pointCloud)\n return cloud", "title": "" }, { "docid": "c0bc9190d4dfd49de1272dd78dd4de48", "score": "0.53104854", "text": "def full_rary_tree(r, n, create_using=None):\n G=nx.empty_graph(n,create_using)\n G.add_edges_from(_tree_edges(n,r))\n return G", "title": "" }, { "docid": "efe7b5949030b76d2437e0f78c2b06c0", "score": "0.5270759", "text": "def test_pauliz_fourthroot(self, n):\n assert qml.PauliZ(0).pow(n)[0].__class__ is qml.T\n\n op = qml.PauliZ(0)\n quad_mat = qml.matrix(op.pow)(n)\n quad_mat_pow = qml.math.linalg.matrix_power(quad_mat, 4)\n\n assert qml.math.allclose(quad_mat_pow, qml.matrix(op))", "title": "" }, { "docid": "dca7cbf465c9e0d7e9f474e22112bcda", "score": "0.52593625", "text": "def sum_of_triangular(n):\n \n draw.make_blank_canvas([12,12],facecolor=\"lightgray\")\n draw.make_blank_plot(1,1,1,[-4,4],[-4,4])\n draw.title(\"$T_n + T_{n+1} = n^2$\",size=35)\n \n xs = np.linspace(-3,3,n)\n ys = np.linspace(-3,3,n)\n \n radius = 3/(n+2)\n \n for m,x in enumerate(xs):\n for n,y in enumerate(ys):\n if m >= n:\n draw.circle_xy(x,y,radius,fc=\"black\")\n else:\n draw.circle_xy(x,y,radius,fc=\"red\")", "title": "" }, { "docid": "5a0bf32a57c5af77fe1317950851a04d", "score": "0.52221835", "text": "def subplotgrid(n):\n rows = np.ceil(n / np.ceil(np.sqrt(n)))\n cols = np.ceil(np.sqrt(n))\n return int(rows), int(cols)", "title": "" }, { "docid": "0b28fc2ed7deb10631791378bd58e46f", "score": "0.5221877", "text": "def createForest(n):\n grid = np.zeros((n+2)**2).reshape(n+2,n+2)\n grid[n/2][n/2] = -1\n grid[0] = 2 # Top Row\n grid[-1] = 2 # Bottom Row\n grid[:,0] = 2 # Left Side\n grid[:,-1] = 2 # Right Side\n return grid", "title": "" }, { "docid": "30b25d1aa97c41d08186846972fbcc6e", "score": "0.52140635", "text": "def gen_grid(n=3, nx=None, ny=None, xs=0., xf=1., ys=0., yf=1.):\n if not nx: nx = n\n if not ny: ny = n\n\n grid_pts = [ ((xf-xs)*xi/(nx-1.), (yf-ys)*yi/(ny-1.)) \\\n for xi in xrange(nx) \\\n for yi in xrange(ny) ]\n return grid_pts", "title": "" }, { "docid": "90c1e0c67626f99a5819aa7ff76f6a24", "score": "0.5191532", "text": "def quadrature(expr, n, weight):\n\n # Process Integrals\n if isinstance(expr, Integral):\n\n # Get the integral integrand and vars\n integrand, vars = expr.args\n\n # Get the integration variable\n if isinstance(vars, Tuple):\n x = vars[0]\n else:\n x = vars\n\n # Get the quadrature points\n k = weight.args[1]\n x_k = IndexedBase(str(x))[k]\n\n # Return the quadrature\n return summation(my_subs(integrand, x, x_k) * weight, (k, 0, n-1))\n\n # Process leaves in the expression tree\n elif expr.args == ():\n return expr\n\n # Process all other expressions\n else:\n result = []\n for arg in expr.args:\n result.append(quadrature(arg, n, weight))\n return expr.func(*tuple(result))", "title": "" }, { "docid": "b73f629bd93ac8e21b4a00dfbb4ac3d1", "score": "0.5178049", "text": "def split(self, n):\n # Let e be the exponent of self, write e=e1e2 with gcd(e1,n)=1, e2 | n^infty,\n # choose x, y such that 1 = e1x+e2y, set u=e1x and v=e2y. If a in A,\n # then a = u*a + v*a is the decomposition of a with respect to self = U + V.\n # In fact, if q dnotes the order of a, let q=q1q2 with gcd(q1,n)=1, q2 | n^infty.\n # Then the order of u*a is q/gcd(q,u) = q/gcd(q,e1x) = q/q1 = q2, and\n # the order of v*a is q/gcd(q,v) = = q/gcd(q,e2y) = q/q2 = q1.\n e = self.level()\n e2 = gcd(e, n); e1 = Integer(e/e2)\n while not 1 == e1.gcd(e2):\n n = n**2; e2 = gcd(e,n); e1 = Integer(e/e2)\n g, x, y = xgcd(e1, e2)\n u = x*e1; v = y*e2\n ul =[]; vl =[]\n for a in self.gens():\n ul.append(u*a); vl.append(v*a)\n return FiniteQuadraticModule_subgroup(ul), FiniteQuadraticModule_subgroup(vl)", "title": "" }, { "docid": "ca09a2257d03bae9c4f17cf114651816", "score": "0.515954", "text": "def generateTrees(self, n):\n\n if n <= 0:\n return []\n if n == 1:\n return [TreeNode(1)]\n\n results = []\n for r in range(1, n+1, 1):\n left_trees = self.dfs(1, r-1)\n right_trees = self.dfs(r+1, n)\n\n for lt in left_trees:\n for rt in right_trees:\n root = TreeNode(r)\n root.left, root.right = lt, rt\n results.append(root)\n\n return results", "title": "" }, { "docid": "14e70b91cd6ca77ee2fc067ae4b94258", "score": "0.51577204", "text": "def random_sorted_vertices(n=10):\n W = float(n)\n vertices = []\n for _ in range(n):\n x = randint(0, W)\n y = randint(0, W)\n x /= W\n y /= W\n vertices.append((x, y))\n vertices = list(set(vertices))\n vertices.sort()\n return vertices", "title": "" }, { "docid": "09d0d6757ebd2e7f10d431cb2f038c13", "score": "0.51260114", "text": "def computeDictTree(self, points, position, length, n=0, path=[]):\n if len(points) == 0:\n return None\n elif len(points) <= self.neighbors or n > self.depth:\n for key in points:\n self.paths[key] = path\n return list(points.keys())\n else:\n x, y = position\n l = length\n pos1 = (x - l / 4, y + l / 4)\n pos2 = (x + l / 4, y + l / 4)\n pos3 = (x - l / 4, y - l / 4)\n pos4 = (x + l / 4, y - l / 4)\n pts1, pts2, pts3, pts4 = [], [], [], []\n\n for key, pt in points.items():\n if pt.x > x:\n if pt.y > y:\n pts2.append((key, pt))\n else:\n pts4.append((key, pt))\n else:\n if pt.y > y:\n pts1.append((key, pt))\n else:\n pts3.append((key, pt))\n\n pts1 = dict(pts1)\n pts2 = dict(pts2)\n pts3 = dict(pts3)\n pts4 = dict(pts4)\n\n tree={}\n t0=self.computeDictTree(pts1, pos1, l/2, n+1, path+[0])\n t1=self.computeDictTree(pts2, pos2, l/2, n+1, path+[1])\n t2=self.computeDictTree(pts3, pos3, l/2, n+1, path+[2])\n t3=self.computeDictTree(pts4, pos4, l/2, n+1, path+[3])\n if t0: tree[0]=t0\n if t1: tree[1]=t1\n if t2: tree[2]=t2\n if t3: tree[3]=t3\n return tree", "title": "" }, { "docid": "e730667c07a1733e255fbe665a6ee66b", "score": "0.5123517", "text": "def insert(self, point):\n print(self)\n\n\n if(self.children):\n # if we tried to insert into something that isnt a \n # leaf node we check the children witht he valid\n # ranges and insert into that child\n for each in self.children:\n if(point[0]>=each.x_start and point[0]<each.x_end and\n point[1]>=each.y_start and point[1]<each.y_end):\n each.insert(point)\n\n else:\n # if we still have space in the child add to points\n if(len(self.points)<4): self.points.add(point)\n\n else:\n # we have to split this node into 4 new nodes\n\n # line separators for x and y\n diffX = (self.x_end + self.x_start)//2\n diffY = (self.y_end + self.y_start)//2\n\n self.children = []\n\n # the 4 new children with their ranges\n self.children+=[QuadTree(self.x_start,self.y_start,diffX,diffY)]\n self.children+=[QuadTree(diffX,diffY,self.x_end,self.y_end)]\n\n self.children+=[QuadTree(self.x_start,diffY,diffX,self.y_end)]\n self.children+=[QuadTree(diffX,self.y_start,self.x_end,diffY)]\n\n\n # for each of the current points insert them into\n # current node so that it automatically finds the new child\n # to put that point into it\n for each in self.points:\n self.insert(each)\n\n # insert the main point\n self.insert(point)\n\n # current node isnt a leaf anymore so it shouldnt\n # have points anymore\n self.points = None", "title": "" }, { "docid": "32a9d0983315d2a01df0f2b7652ad772", "score": "0.50828815", "text": "def sample_points(self, n: int) -> np.ndarray:\n shape = self.space.shape\n radius = (min(shape) / (n ** (1 / self.dim))) / 2\n while True:\n points = disc.Bridson_sampling(dims=np.array(shape), radius=radius)\n radius /= 2\n\n points = points[self.space[tuple(points.T.astype(int))]]\n if len(points) >= n:\n break\n\n np.random.shuffle(points)\n return points[:n]", "title": "" }, { "docid": "ef7ad7082ef26f14969640b232efc509", "score": "0.5082758", "text": "def circle_points(n):\n top = np.linspace(1, -1, n)[:-1]\n bottom = np.linspace(-1, 1, n)\n x = np.concatenate((top, bottom))\n circle_top = np.sqrt(1 - top**2)\n circle_bot = -np.sqrt(1 - bottom**2)\n y = np.concatenate((circle_top, circle_bot))\n return x, y", "title": "" }, { "docid": "ebaf698ba15ec9bda07eb6413ed51d07", "score": "0.50715137", "text": "def random_tree(self, n=12):\n\n tre = toytree.rtree.coaltree(n)\n ## assign random edge lengths and supports to each node\n for node in tre.treenode.traverse():\n node.dist = np.random.exponential(1)\n node.support = int(np.random.uniform(50, 100))\n return tre", "title": "" }, { "docid": "0caabbd82db854a6aaed918de1b7fce0", "score": "0.5067835", "text": "def partial_tree(s, n):\n if n == 0:\n return None, s\n left_size = (n-1)//2\n right_size = n - left_size - 1\n # \"*** YOUR CODE HERE ***\"\n def p_tree(lst):\n n = len(lst)\n if n == 0:\n return None\n left_size = (n-1)//2\n t = Tree(lst[left_size])\n t.left = p_tree(lst[:int(left_size)])\n t.right = p_tree(lst[int(left_size+1):])\n return t\n lst, temp = [], s\n for i in range(n):\n lst.append(temp.first)\n temp = temp.rest\n return (p_tree(lst), temp)", "title": "" }, { "docid": "4d3974bf2b4a1415ec2426f7b135e11d", "score": "0.5066667", "text": "def test_tree(self, n=None):\n\n if n==None:\n n, ok = QInputDialog().getInt(self, \"Test tree\",\n \"Nodes:\", 10)\n if not ok:\n return\n self.set_tree(self.random_tree(n=n))\n self.height = 200+self.tree.ntips*10\n self.update()\n return", "title": "" }, { "docid": "2a87f4e0402e0887b02f7c9d32684b16", "score": "0.50658274", "text": "def tree(N, seed):\n return nx.random_powerlaw_tree(N, seed=seed, tries=10000)", "title": "" }, { "docid": "1c224b2ba3aa586b0aaaf667d688ff1a", "score": "0.505926", "text": "def __init__(self, points_list, depth=0):\n\n # meant to take list of points, not dataframe,\n # but since given.slow() takes a df this might be helpful.\n if isinstance(points_list, pd.DataFrame):\n points_list = make_point_list(points_list)\n\n n = len(points_list)\n self.depth = depth\n self.axis = self.axes[depth % len(self.axes)]\n\n if n > 0:\n sorted_points = sorted(points_list,\n key=lambda x: getattr(x, self.axis))\n\n self.node = sorted_points[n // 2]\n self.left = KDTree(sorted_points[:n // 2], self.depth + 1)\n self.right = KDTree(sorted_points[n // 2 + 1:], self.depth + 1)\n else:\n self.node = None", "title": "" }, { "docid": "8e5ab5d27935d508c90bebbd4a9654eb", "score": "0.505268", "text": "def build_octrees(pos, bucket_size, ngrid, wts, log=null_log, buf_size=None):\n\n lib = _initlib(log)\n\n sizeof_twn = lib.get_tree_iterator_size()\n guess_nodes = buf_size is None\n npos = len(pos)\n pts = require(pos, dtype=float64, requirements=['C'])\n if guess_nodes:\n max_nodes = int(npos*2.1)+1\n buf_size = max_nodes * (sizeof_twn//8)\n print('Guessed number of nodes {:,}'.format(max_nodes),file=log)\n buf = empty(buf_size, dtype=float64)\n sort_idx = empty(npos, dtype=int32)\n\n num_roots = lib.build_octree_iterator(pts, npos, ngrid, bucket_size, \n sort_idx, buf, buf_size)\n if num_roots==-1:\n raise Exception('Out of memory')\n if num_roots==-2:\n raise Exception('>bucket_size points have indistinguishable double representations')\n\n class tree:\n root_counts = buf.view(dtype=int32)[:num_roots]\n num_nodes = root_counts.sum()\n root_indices = empty(num_roots, dtype=int32)\n root_cells = buf.view(dtype=int32)[num_roots:num_roots*2]\n\n it = buf[num_roots:num_roots + (sizeof_twn*num_nodes)//8]\n\n sof32 = sizeof_twn//4 # size of TWN in 32 bit ints\n # use intimate knowledge of structure layout\n n = it.view(dtype=int32)[sof32-2::sof32]\n depth_next = it.view(dtype=int32)[sof32-3::sof32]\n breadth_next = it.view(dtype=int32)[sof32-4::sof32]\n depths = it.view(dtype=int32)[sof32-1::sof32]\n \n\n\n tree.root_indices[1:] = cumsum(tree.root_counts[:-1])\n tree.root_indices[0] = 0\n\n tree.fill = tree.n[tree.root_indices]\n\n print('{:,} filled cells(trees),'.format(num_roots),\n '{:,}-{:,} nodes per tree,'.format(tree.root_counts.min(), tree.root_counts.max()),\n 'av. %.2f'%tree.root_counts.mean(dtype=float64), file=log)\n\n print('{:,}-{:,} points per tree'.format(tree.fill.min(),tree.fill.max()), \n '(av. %.2f),'%tree.fill.mean(dtype=float64), \n 'av. point in a tree of {:,} points'.format(square(tree.fill.astype(int64)).sum()//npos),\n file=log)\n\n \n leaf_counts = tree.n[flatnonzero(tree.n<=bucket_size)]\n\n av_leaf_size_per_pt = square(leaf_counts).sum()/float(npos)\n print('%d-%d points per leaf (leaf size %d), average %.2f, average point is in a leaf of %.2f pts'%(leaf_counts.min(), leaf_counts.max(), bucket_size, leaf_counts.mean(dtype=float64), av_leaf_size_per_pt), file=log)\n\n\n print('Actual number of nodes used {:,}, total memory {:,} bytes'.format(tree.num_nodes, tree.num_nodes*sizeof_twn),file=log)\n \n print('Indexing {:,} points for octree-ordered xyzw'.format(npos),file=log)\n xyzw = empty((npos+tree.num_nodes, 4), dtype=float64)\n xyzw[:npos,:3] = pts[sort_idx]\n \n if sum(array(wts).shape)<=1:\n xyzw[:npos,3] = wts\n else:\n xyzw[:npos,3] = wts[sort_idx]\n\n\n print('Building xyzw for {:,} nodes'.format(tree.num_nodes), file=log)\n\n tree.max_depth = lib.fill_treewalk_xyzw(num_roots, tree.it, tree.root_counts, xyzw)\n tree.xyzw = xyzw\n\n print('Max leaf depth %d'%tree.max_depth, file=log)\n return tree, sort_idx", "title": "" }, { "docid": "e113fb581a950d679aef635e6a11f9c5", "score": "0.5051846", "text": "def apply_quadrature(expr, n, weights):\n\n # Ensure exprs is always a list\n if isinstance(expr, Expr):\n exprs = [expr]\n else:\n exprs = list(expr)\n num_expr = len(exprs)\n\n # Apply quadrature to each expression\n for i in range(num_expr):\n exprs[i] = quadrature(exprs[i], n, weights)\n\n # Return the results\n if num_expr == 1:\n exprs = exprs[0]\n else:\n exprs = tuple(exprs)\n return exprs", "title": "" }, { "docid": "a79a0527061d00887c1f53cdb1974c54", "score": "0.5051083", "text": "def test_NUTS_binary_tree_flatten():\n d = 5\n d_max = 10\n\n # Index table\n save_index_table = np.ones(d_max+1, dtype=int) * -1\n\n def print_line(m, save_index_table):\n print_line = \"%2d: \" % m\n for i in range(1, m+1):\n if (i in save_index_table) or (i == 1) or (i==m):\n print_line = print_line + \"x \"\n else:\n print_line = print_line + \"o \"\n print print_line\n return None\n\n for m in range(2, 2**d+1):\n # Decide whether to save the point for future comparison.\n if (m % 2) == 1: # Only odd numbered points are saved.\n save_index = find_next(save_index_table)\n save_index_table[save_index] = m \n print_line(m, save_index_table)\n else:\n print_line(m, save_index_table) \n # Check termination conditions against each point.\n check_pts = check_points(m)\n for l in check_pts:\n # Retrieve a previous point \n save_index = retrieve_save_index(save_index_table, l)\n\n # If the point is no longer needed, then release the space. \n if (l > 1) and release(m, l):\n save_index_table[save_index] = -1", "title": "" }, { "docid": "c521267d242fa0143a1d649f7310c0b6", "score": "0.50500554", "text": "def makeND(Qargs):\n nodeData = np.zeros((len(Qargs),len(Qargs[1,:])+1))\n P = eqnStateQ(Qargs[:,0],Qargs[:,1],Qargs[:,2])\n i = 0\n for x in Qargs:\n nodeData[i,:] = np.array([P[i],x[0],x[1]/x[0],x[2]/x[0]])\n i+=1\n return nodeData", "title": "" }, { "docid": "60e6836c3586e7184aa38e7916fab84f", "score": "0.5038458", "text": "def rand_rk1_proj(n):\n u = rand_unit_vector(n)\n return np.outer(u, u)", "title": "" }, { "docid": "ecac28641332574179072ae0316af9ab", "score": "0.5033763", "text": "def opa(n): #NOT IMPLEMENTED\n #original preferential attachment\n\n # Starting configuration, dumbbell.\n G=nx.Graph()\n G.add_node(0)\n G.add_node(1)\n G.add_edge(0,1)\n\n zeros = 0\n for v in xrange(2,n):\n G.add_node(v)\n\n # Add each to existing node u with probability deg(u)/2m\n num_edges = G.num_edges # value will otherwise change in the loop.\n for u in G:\n if u == v: continue\n if random.random() < (len(G.neighbors(u)) / (2*num_edges)):\n G.add_edge(v,u)\n\n # If no edges were added, just add one random edge (component).\n if len(G.neighbors(v)) == 0:\n zeros += 1\n rand_node = v\n while (rand_node == v): rand_node = G.random_node()\n G.add_edge(v,rand_node)\n\n assert G.num_nodes == n\n return G", "title": "" }, { "docid": "ff876a35996dbca156741d1a32f7a1da", "score": "0.5029823", "text": "def quad(listofpoints, list_t):\n lst = listofpoints\n list_x_1, list_y_1 = linear([lst[0], lst[1]], [lst[2], lst[3]], list_t)\n list_x_2, list_y_2 = linear([lst[2], lst[3]], [lst[4], lst[5]], list_t)\n list_x_3, list_y_3 = linear([lst[4], lst[5]], [lst[6], lst[7]], list_t)\n g0x_list = []\n g0y_list = []\n g1x_list = []\n g1y_list = []\n\n for indexi, indext in enumerate(list_t):\n g0x_list.append((1-indext) * list_x_1[indexi] + indext * list_x_2[indexi])\n g0y_list.append((1-indext) * list_y_1[indexi] + indext * list_y_2[indexi])\n\n for indexi, indext in enumerate(list_t):\n g1x_list.append((1-indext) * list_x_2[indexi] + indext * list_x_3[indexi])\n g1y_list.append((1-indext) * list_y_2[indexi] + indext * list_y_3[indexi])\n\n return g0x_list, g0y_list, g1x_list, g1y_list", "title": "" }, { "docid": "dae4648b2d18a0b9fe6840d6ecfd3ba7", "score": "0.5027809", "text": "def createGrid(bounds=[[-1, -1, -1], [1, 1, 1]], dr=0.1):\r\n # round to integer, type is still float\r\n bounds = bounds/dr\r\n bounds = np.stack((np.floor(bounds[0]), np.ceil(bounds[1])))*dr\r\n# print(\"bounds=\\n\", bounds)\r\n # number of points in x,y,z direction:(nx,ny,nz)\r\n nx, ny, nz = np.ceil((bounds[1]-bounds[0])/dr).astype(int)\r\n x = np.linspace(bounds[0, 0], bounds[0, 0]+(nx-1)*dr, num=nx)\r\n y = np.linspace(bounds[0, 1], bounds[0, 1]+(ny-1)*dr, num=ny)\r\n z = np.linspace(bounds[0, 2], bounds[0, 2]+(nz-1)*dr, num=nz)\r\n # a flattened grid of xyzs of the vertices\r\n xyz_grid = np.stack(np.meshgrid(x, y, z), axis=-1).reshape((-1, 3))\r\n return xyz_grid, bounds, (nx, ny, nz)", "title": "" }, { "docid": "f2c62e48670773d5c207b7598f740d3d", "score": "0.5027285", "text": "def make_pts(self, n_pts=1000):\n xs = np.random.random(n_pts)\n ys = np.random.random(n_pts)\n self.pts = gpd.GeoSeries([Point(x, y) for x, y in zip(xs, ys)])", "title": "" }, { "docid": "5e3b904cbedb18257241adaf4345d2e4", "score": "0.5003271", "text": "def generate_square(lx, ly, c, p, n):\n ys = []\n yws = []\n xs = []\n xws = []\n wx = 2*lx/n\n wy = 2*ly/n\n for i in range(n):\n y, w = cheb(p, -ly + i*wy, -ly + (i+1)*wy)\n ys.append(y)\n yws.append(w)\n x, w = cheb(p, -lx + i*wx, -lx + (i+1)*wx)\n xs.append(x)\n xws.append(w)\n x = np.concatenate(xs)\n xw = np.concatenate(xws)\n y = np.concatenate(ys)\n yw = np.concatenate(yws)\n # stitch sides together to get nodes\n lxl = np.repeat(lx, p*n)\n lyl = np.repeat(ly, p*n)\n left = np.row_stack([ -lxl + c[0], y + c[1] ])\n right = np.row_stack([ lxl + c[0], y + c[1] ])\n bottom = np.row_stack([ x + c[0], -lyl + c[1] ])\n top = np.row_stack([ x + c[0], lyl + c[1] ])\n nodes = np.column_stack([ left, right, bottom, top ])\n # stitch together weights\n weights = np.concatenate([ yw, xw, yw, xw ])\n\n return nodes, weights, nodes.shape[1]", "title": "" }, { "docid": "55481fd6e8ba5fa7ca8110d44cdf8741", "score": "0.49930915", "text": "def print_quadrant(root):\n xmin,ymin = root.min_coordinates\n xmax,ymax = root.max_coordinates\n p1 = Point([xmin,ymin])\n p2 = Point([xmin,ymax])\n p3 = Point([xmax,ymax])\n p4 = Point([xmax,ymin])\n s1 = Segment([p1,p2])\n s2 = Segment([p2,p3])\n s3 = Segment([p3,p4])\n s4 = Segment([p4,p1])\n print_segment([s1,s2,s3,s4])\n if root.childs:\n for child in root.childs:\n print_quadrant(child)", "title": "" }, { "docid": "b831babd0a0332e8a64e19ed5fab99b9", "score": "0.49908406", "text": "def heap_graph(n: int) -> DiGraph:\n result = DiGraph()\n result.add_nodes_from(range(0, n))\n result.add_edges_from(\n (parent(i), i) for i in range(0, n) if parent(i) is not None\n )\n assert is_heap_graph(result)\n return result", "title": "" }, { "docid": "ba995c120368dc4caea70d36a45a394e", "score": "0.49710006", "text": "def quadrants(points):\n # group points on 4 quadrants\n # [Top_left,Top_right,Bottom_left,Bottom_right]\n p = ImCoors(points) # points data x,y -> (width,height)\n mean_x, mean_y = p.mean\n Bottom, Top = separe(points, mean_y, axis=1)\n Top_right, Top_left = separe(Top, mean_x, axis=0)\n Bottom_right, Bottom_left = separe(Bottom, mean_x, axis=0)\n return [Top_left, Top_right, Bottom_left, Bottom_right]", "title": "" }, { "docid": "f7d3f2f00c457b3135fd0e9c474463eb", "score": "0.49616572", "text": "def geometric(self, p, size):\n return ndarray()", "title": "" }, { "docid": "0295cabad4b8802ba4305c7206d43d0a", "score": "0.49556673", "text": "def quad_fit(geo):\n minx, maxx, miny, maxy, minz, maxz = np.inf, 0, np.inf, 0, np.inf, 0\n def check_pt(pt, refmin, refmax):\n if pt < refmin:\n refmin = pt\n if pt > refmax:\n refmax = pt\n return refmin, refmax\n for n in geo.nodes:\n minx, maxx = check_pt(n.x, minx, maxx)\n miny, maxy = check_pt(n.y, miny, maxy)\n minz, maxz = check_pt(n.z, minz, maxz)\n mid = [np.mean([n.x for n in geo.nodes]),\n np.mean([n.y for n in geo.nodes]),\n np.mean([n.z for n in geo.nodes])]\n # Use mean-centered for quadratic fit (?)\n minx, maxx = minx - mid[0], maxx - mid[0]\n miny, maxy = miny - mid[1], maxy - mid[1]\n minz, maxz = minz - mid[2], maxz - mid[2]\n #\n def get_XYgrid(xmin, xmax, ymin, ymax, zval):\n gridpts = [] # Populate this with the grid \n # Fit XY (minx, 0), (0, maxy), (maxx, 0); & reflect for bottom part\n coefXY = np.polyfit([xmin, 0, xmax], [0, ymax, 0], 2) # Force quad\n numxsteps = int((xmax - xmin)/20.) # max - (-min) = max+abs(min)\n xsteps = linspace(xmin, xmax, numxsteps)\n ylims = [sum([coefXY[0]*x**2, coefXY[1]*x, coefXY[2]]) for x in xsteps]\n for i in range(len(ylims)):\n yvals = np.linspace(0, ylims[i], int(ylims[0]/20.)) # positive y\n for j in yvals:\n gridpts.append([xstep[i], j, zval])\n gridpts.append([xstep[i], -j, zval])\n gridpts.append([-xstep[i], j, zval])\n gridpts.append([-xstep[i], -j, zval])\n return gridpts # All grid points for this level of z\n # Fit ZY and ZX to get the \n return", "title": "" }, { "docid": "77eaf0e9d567af8c491d39a322938385", "score": "0.49499023", "text": "def get_regions(self, n):\n coords = []\n i = j = self.N - n\n for _ in xrange(2 * n - 1):\n j += 1\n coords.append((i, j))\n for _ in xrange(2 * n - 1):\n i += 1\n coords.append((i, j))\n for _ in xrange(2 * n - 1):\n j -= 1\n coords.append((i, j))\n for _ in xrange(2 * n - 1):\n i -= 1\n coords.append((i, j))\n return coords", "title": "" }, { "docid": "592b033f730f7b6c480166e117e876a6", "score": "0.49482837", "text": "def drawnbinom(n,p,size=1):\n t0 = time.time() \n m = np.zeros(size) # array to contain draws\n nlim = 1.0e6 # the limit deciding what approach to take\n\n if n <= nlim: # for very large n the u-vector becomes too large\n for ii in xrange(size):\n u = np.random.uniform(low=0.0, high=1.0, size=n)\n m[ii] = int(np.sum(np.log10(u)/np.log10(1-p))) \n else: # so in that case slit up in sub samples instead\n nusub = np.floor(n/nlim) # sub arrays of size Nlim to draw\n nrest = n - nlim*nusub # size of final subarray\n\n for ii in xrange(size):\n madd = 0.0\n# for jj in xrange(int(nusub)):\n for jj in xrange(1): # only doing it once and then multiplying by number of sub arrays\n usub = np.random.uniform(low=0.0, high=1.0, size=nlim)\n msub = int(np.sum(np.log10(usub)/np.log10(1-p))) \n madd = madd + msub * nusub\n if nrest > 0:\n urest = np.random.uniform(low=0.0, high=1.0, size=nrest)\n mrest = int(np.sum(np.log10(urest)/np.log10(1-p))) \n madd = madd + mrest\n \n m[ii] = int(madd)\n\n# looping is way too slow ...\n# sumval = 0.0\n# for ii in xrange(size):\n# for jj in xrange(int(n)):\n# u = np.random.uniform(low=0.0, high=1.0, size=1)\n# sumval = sumval + np.log10(u)/np.log10(1-p)\n# m[ii] = int(sumval)\n \n N = m + n\n return N", "title": "" }, { "docid": "9c60fd1d952c991625df4de0391574e9", "score": "0.4944114", "text": "def computeListTree(self, points, position, length, n=0, path=[]):\n if len(points) == 0:\n return None\n elif len(points) <= self.neighbors or n > self.depth:\n for key in points:\n self.paths[key] = path\n return list(points.keys())\n else:\n x, y = position\n l = length\n pos1 = (x - l / 4, y + l / 4)\n pos2 = (x + l / 4, y + l / 4)\n pos3 = (x - l / 4, y - l / 4)\n pos4 = (x + l / 4, y - l / 4)\n pts1, pts2, pts3, pts4 = [], [], [], []\n\n for key, pt in points.items():\n if pt.x > x:\n if pt.y > y:\n pts2.append((key, pt))\n else:\n pts4.append((key, pt))\n else:\n if pt.y > y:\n pts1.append((key, pt))\n else:\n pts3.append((key, pt))\n\n pts1 = dict(pts1)\n pts2 = dict(pts2)\n pts3 = dict(pts3)\n pts4 = dict(pts4)\n\n return [self.computeListTree(pts1, pos1, l/2, n+1, path+[0]),\n self.computeListTree(pts2, pos2, l/2, n+1, path+[1]),\n self.computeListTree(pts3, pos3, l/2, n+1, path+[2]),\n self.computeListTree(pts4, pos4, l/2, n+1, path+[3])]", "title": "" }, { "docid": "4cf5697109a0db5028cc697ff91ef8eb", "score": "0.49434394", "text": "def square_tree(t):\n sq_branched = [square_tree(branch) for branch in branches(t)]\n return tree(label(t) ** 2, sq_branched)", "title": "" }, { "docid": "aafa0e0f747680731e2069651fec1796", "score": "0.49342802", "text": "def square_tree(t):\n t.entry = t.entry ** 2\n for branch in t.branches:\n square_tree(branch)", "title": "" }, { "docid": "3fa82338342acaf0653d5c07d0e20db5", "score": "0.49261233", "text": "def geometric(self, p, size):\n return ndarray()", "title": "" }, { "docid": "58d1f45a679ea9d0e12b5259b3919caa", "score": "0.49171537", "text": "def generate_separated_square(lx, ly, c, p, n):\n ys = []\n yws = []\n xs = []\n xws = []\n wx = 2*lx/n\n wy = 2*ly/n\n for i in range(n):\n y, w = cheb(p, -ly + i*wy, -ly + (i+1)*wy)\n ys.append(y)\n yws.append(w)\n x, w = cheb(p, -lx + i*wx, -lx + (i+1)*wx)\n xs.append(x)\n xws.append(w)\n x = np.concatenate(xs)\n xw = np.concatenate(xws)\n y = np.concatenate(ys)\n yw = np.concatenate(yws)\n # stitch sides together to get nodes\n lxl = np.repeat(lx, p*n)\n lyl = np.repeat(ly, p*n)\n left = np.row_stack([ -lxl + c[0], y + c[1] ])\n right = np.row_stack([ lxl + c[0], y + c[1] ])\n bottom = np.row_stack([ x + c[0], -lyl + c[1] ])\n top = np.row_stack([ x + c[0], lyl + c[1] ])\n\n return left, right, bottom, top", "title": "" }, { "docid": "81a97016e61de6706d628a1d5689d40e", "score": "0.49136722", "text": "def generate_n_random_directions(self, n):\n return generate_ndim_random_directions(3, k=n)", "title": "" }, { "docid": "f1fed9fa3d76bc79fdb6bae2609d525b", "score": "0.49115485", "text": "def split(n):\n a = n.children\n n.children = a[:2]\n n.size = sum([size(x) for x in n.children])\n return Node(a[2:])", "title": "" }, { "docid": "102a47c560485d829a161c53851d1c69", "score": "0.4911468", "text": "def hypercube_graph(n):\n dim=n*[2]\n G=grid_graph(dim)\n G.name=\"hypercube_graph_(%d)\"%n\n return G", "title": "" }, { "docid": "a1903d613acd750424c44827050cfb7a", "score": "0.4898225", "text": "def bezier_curve_range(self, n, points):\n\t\tfor i in xrange(n):\n\t\t\tt = i / float(n - 1)\n\t\t\tyield self.bezier(t, points)", "title": "" }, { "docid": "315be991316d8f95e63cc0d17ba4c383", "score": "0.48951232", "text": "def get_subplot_grid(n):\r\n assert 0 <= n <= 9\r\n if n <= 3:\r\n return 1, n\r\n elif n <= 6:\r\n return 2, n - 3\r\n else:\r\n return 3, n - 6", "title": "" }, { "docid": "64153ca5632e54068e4e7b4f50aa5aad", "score": "0.4887886", "text": "def get_square_subplot_grid(n):\r\n side = math.ceil(n ** .5)\r\n return side, side", "title": "" }, { "docid": "fe603a374f67f743d51501649b4bebae", "score": "0.4876723", "text": "def build_kdtree(pnts, ntrees, nchecks):\n\n # pnts must be contiguous for the library\n pnts = numpy.ascontiguousarray(pnts)\n N = ctypes.c_uint(pnts.shape[0])\n D = ctypes.c_uint(pnts.shape[1])\n pnts_p = pnts.ctypes.data_as(ctypes.c_void_p)\n\n if pnts.dtype not in ['u1', 'f4', 'f8']:\n raise TypeError, 'datatype %s not currently supported' % pnts.dtype\n\n suffix = get_suffix(pnts.dtype)\n\n ptr = getattr(lib, \"fastann_nn_obj_build_kdtree_\" + suffix)(pnts_p, N, D, ctypes.c_uint(ntrees), ctypes.c_uint(nchecks))\n\n return nn_obj(ptr, pnts)", "title": "" }, { "docid": "d8ceac89abd766073a0be35ede085a94", "score": "0.48764104", "text": "def ST_PointN(geometry: ColumnOrName, n: Union[ColumnOrName, int]) -> Column:\n return _call_st_function(\"ST_PointN\", (geometry, n))", "title": "" }, { "docid": "0b57f222df83f1982b67f85433bb75d5", "score": "0.48713973", "text": "def quad2(listofpoints, list_t):\n lst = listofpoints\n list_x_1, list_y_1 = linear([lst[0], lst[1]], [lst[2], lst[3]], list_t)\n list_x_2, list_y_2 = linear([lst[2], lst[3]], [lst[4], lst[5]], list_t)\n g0x_list = []\n g0y_list = []\n for indexi, indext in enumerate(list_t):\n g0x_list.append((1-indext) * list_x_1[indexi] + indext * list_x_2[indexi])\n g0y_list.append((1-indext) * list_y_1[indexi] + indext * list_y_2[indexi])\n return g0x_list, g0y_list", "title": "" }, { "docid": "90c174967d6fca7ff93572e1bc6ef4b5", "score": "0.48705947", "text": "def random_circle_vertices(n=10, cx=0, cy=0):\n# import fractions\n# from gmpy2 import mpfr\n# import gmpy2\n# gmpy2.get_context().precision = 53 * 4\n\n vertices = []\n for _ in range(n):\n r = sqrt(random())\n t = 2 * pi * random()\n x = r * cos(t)\n y = r * sin(t)\n vertices.append((x+cx, y+cy))\n vertices = list(set(vertices))\n vertices.sort()\n return vertices", "title": "" }, { "docid": "603921da5690cb02f4f183ad26c28177", "score": "0.48662412", "text": "def vertices(n:int, size:float, r:float, start_angle:float=0) -> list:\n angle = math.radians(360 / n) # inner angle of polygon\n start_angle = math.radians(start_angle)\n # 'size +' means here that origin is the rectangle's middlepoint\n return [[int(size//2 + r*math.sin(start_angle + i*angle)), int(size//2 + r*math.cos(start_angle + i*angle))] for i in range(0, n)]", "title": "" }, { "docid": "189bfedc28d10a7ea1f750d0f27b9572", "score": "0.48632172", "text": "def initial_nodes(n):\n # Turnover point\n # linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules\n fit = 0.49082003 * n - 4.37859653\n turnover = around(fit).astype(int)\n # Compute all approximations\n ia = arange(1, int(floor(n * 0.5) + 1))\n ib = flipud(arange(1, int(1 + n - ceil(n * 0.5))))\n xasq = initial_nodes_a(n, ia[:turnover + 1])\n xbsq = initial_nodes_b(n, ib[turnover + 1:])\n # Combine\n iv = sqrt(hstack([xasq, xbsq]))\n # Central node is always zero\n if n % 2 == 1:\n iv = hstack([0.0, iv])\n return iv", "title": "" }, { "docid": "770932691a76aa3975f5b185d5286d1f", "score": "0.4858925", "text": "def num_trees(n):\n if n == 1:\n return 1\n return sum(num_trees(k) * num_trees(n-k) for k in range(1, n))", "title": "" }, { "docid": "b4372ea284af0d0f4309ba1cb9e9abbf", "score": "0.4854558", "text": "def __init__(self, n, q, name='a'):\n id = 'PGU(%s,%s)'%(n,q)\n PermutationGroup_generic.__init__(self, gap_group=id)\n self._q = q\n self._base_ring = GF(q, name=name)\n self._field_of_definition = GF(q**2, name)\n self._n = n", "title": "" }, { "docid": "616a0df1e37d582577a377cdafcc8139", "score": "0.48490718", "text": "def tree_subdivide(node,point_limit):\n\n if len(node.points) == 0:\n # print('zero')\n node.is_leaf = True\n return\n\n elif int(len(node.points)) <= point_limit:\n node.is_leaf = True\n # print('below lim')\n return\n\n elif len(node.points) > point_limit:\\\n # scale reduction\n w_reduced = node.width*0.5\n h_reduced = node.height*0.5\n\n # points in each quadrant\n # denoted as sw,nw,se,ne\n points_in_sw = check_points(node.x0,node.y0,w_reduced,h_reduced,\n node.points)\n\n points_in_nw = check_points(node.x0,node.y0 + h_reduced,w_reduced,\n h_reduced,node.points)\n\n points_in_se = check_points(node.x0 + w_reduced,node.y0,w_reduced,\n h_reduced, node.points)\n\n points_in_ne = check_points(node.x0 + w_reduced,node.y0 + h_reduced,\n w_reduced,h_reduced,node.points)\n\n sw = Node(node.x0,node.y0,w_reduced,h_reduced,\n points_in_sw,parent=node)\n\n nw = Node(node.x0,node.y0 + h_reduced,w_reduced,h_reduced,\n points_in_nw,parent=node)\n\n se = Node(node.x0 + w_reduced,node.y0,w_reduced,h_reduced,\n points_in_se,parent=node)\n\n ne = Node(node.x0 + w_reduced,node.y0 + h_reduced,w_reduced,h_reduced,\n points_in_ne,parent=node)\n\n node.children = [sw,nw,se,ne]\n\n for child in node.children:\n tree_subdivide(child,point_limit)\n\n return node", "title": "" }, { "docid": "ae890bd01564493c44fda42b0759c5f0", "score": "0.48469892", "text": "def num_to_dottedquad(n):\n return socket.inet_ntoa(struct.pack('!L',n))", "title": "" }, { "docid": "b2a7900d04ba51728ff38262fb8472b9", "score": "0.48462617", "text": "def grid(N):\n m = 1\n for i in range(1, int(math.sqrt(N)) + 1):\n if N % i == 0:\n m = i\n return nx.grid_2d_graph(m, N // m)", "title": "" }, { "docid": "87bcc9bd8d431f844b66e34396c9107a", "score": "0.4844722", "text": "def generateSquare(Nelements, length):\n from dolfin import UnitSquareMesh, SubDomain, MeshFunction, Measure, near\n mesh = UnitSquareMesh(Nelements, Nelements)\n # Rescale for Chapelle-Moireau comparison\n mesh.coordinates()[:] *= length\n\n # Subdomains: Solid\n class Left(SubDomain):\n def inside(self, x, on_boundary):\n return near(x[0], 0.0) and on_boundary\n\n class Right(SubDomain):\n def inside(self, x, on_boundary):\n return near(x[0], length) and on_boundary\n\n class Top(SubDomain):\n def inside(self, x, on_boundary):\n return near(x[1], length) and on_boundary\n\n class Bottom(SubDomain):\n def inside(self, x, on_boundary):\n return near(x[1], 0.0) and on_boundary\n left, right, top, bottom = Left(), Right(), Top(), Bottom()\n LEFT, RIGHT, TOP, BOTTOM = 1, 2, 3, 4 # Set numbering\n NONE = 99 # Marker for empty boundary\n\n markers = MeshFunction(\"size_t\", mesh, 1)\n markers.set_all(0)\n\n boundaries = (left, right, top, bottom)\n def_names = (LEFT, RIGHT, TOP, BOTTOM)\n for side, num in zip(boundaries, def_names):\n side.mark(markers, num)\n\n return mesh, markers, LEFT, RIGHT, TOP, BOTTOM, NONE", "title": "" }, { "docid": "e1fd5d21f25fcd0b900697523f4e4553", "score": "0.48433375", "text": "def quadratic_number_field():\n from sage.all import ZZ, QuadraticField\n while True:\n d = ZZ.random_element(x=-10**5, y=10**5)\n if not d.is_square():\n return QuadraticField(d,'a')", "title": "" }, { "docid": "22987061f250a20aab62e1b08a3cdf9c", "score": "0.48374888", "text": "def _quadrilateral_grid_coords(points):\n assert points.ndim == 3\n assert points.shape[0] >= 2\n assert points.shape[1] >= 2\n assert points.shape[2] == 2\n\n dim0, dim1 = points.shape[:2]\n grid_points = numpy.zeros((dim0 + 1, dim1 + 1, 2), dtype=numpy.float64)\n\n # Compute inner points as mean of 4 neighbours\n neighbour_view = numpy.lib.stride_tricks.as_strided(\n points,\n shape=(dim0 - 1, dim1 - 1, 2, 2, points.shape[2]),\n strides=points.strides[:2] + points.strides[:2] + points.strides[-1:], writeable=False)\n inner_points = numpy.mean(neighbour_view, axis=(2, 3))\n grid_points[1:-1, 1:-1] = inner_points\n\n # Compute 'vertical' sides\n # Alternative: grid_points[1:-1, [0, -1]] = points[:-1, [0, -1]] + points[1:, [0, -1]] - inner_points[:, [0, -1]]\n grid_points[1:-1, [0, -1], 0] = points[:-1, [0, -1], 0] + points[1:, [0, -1], 0] - inner_points[:, [0, -1], 0]\n grid_points[1:-1, [0, -1], 1] = inner_points[:, [0, -1], 1]\n\n # Compute 'horizontal' sides\n grid_points[[0, -1], 1:-1, 0] = inner_points[[0, -1], :, 0]\n grid_points[[0, -1], 1:-1, 1] = points[[0, -1], :-1, 1] + points[[0, -1], 1:, 1] - inner_points[[0, -1], :, 1]\n\n # Compute corners\n d0, d1 = [0, 0, -1, -1], [0, -1, -1, 0]\n grid_points[d0, d1] = 2 * points[d0, d1] - inner_points[d0, d1]\n return grid_points", "title": "" }, { "docid": "c857b3d03535ba7ff267851abe445990", "score": "0.4837327", "text": "def make_triangle(n_rows):\n # Function to add binary numbers\n def bin_add(*args): return bin(sum(int(x, 2) for x in args))[2:]\n\n results = []\n for _ in range(n_rows):\n row = [bin(1)] # Append a binary 1 to the start of a row\n if results: # If there are existing results (row > 1)\n last_row = results[-1]\n # The following is just a fancy way to say \"For each result in the last row add it with its neighbor\"\n # Zip functions collects the previous row with itself and a version indexed one element ahead\n # The bin_add(*pair) unpacks the pair and calls the bin_add function with the results\n row.extend([bin_add(*pair) for pair in zip(last_row, last_row[1:])])\n row.append(bin(1)) # Append a binary 1 to the end of a row\n results.append(row)\n return results", "title": "" }, { "docid": "49a6240874430646f7fa0d7ebb98f448", "score": "0.48313656", "text": "def create_geometric_progression(a1, q, n):\n new_list = [a1]\n for i in range(1, n):\n new_list.append(new_list[i-1]*q)\n return new_list", "title": "" }, { "docid": "bdfddfa63f487c7161d35b67a4a3e294", "score": "0.48295766", "text": "def _generate_kpoints_mesh(npoints):\n from aiida.orm import KpointsData\n\n kpoints = KpointsData()\n kpoints.set_kpoints_mesh([npoints] * 3)\n\n return kpoints", "title": "" }, { "docid": "3866a24b1cb372b1f39a2e62e870e9ef", "score": "0.48277992", "text": "def test_create_lagrange1_quad():\n create_lagrange1_quad()", "title": "" }, { "docid": "8bf7b97423655745db5f4657be9564b7", "score": "0.48234716", "text": "def create(point_list, dimensions, axis=0, sel_axis=None, parent=None):\n\n if not point_list and not dimensions:\n raise ValueError('either point_list or dimensions must be provided')\n\n elif point_list:\n dimensions = check_dimensionality(point_list, dimensions)\n\n # by default cycle through the axis\n sel_axis = sel_axis or (lambda prev_axis: (prev_axis + 1) % dimensions)\n\n if not point_list:\n return KDNode(sel_axis=sel_axis, axis=axis, dimensions=dimensions)\n\n # Sort point list and choose median as pivot element\n point_list.sort(key=lambda point: point.get(axis, 0.))\n median = len(point_list) // 2\n\n loc = point_list[median]\n root = KDNode(loc, parent, left=None, right=None,\n axis=axis, sel_axis=sel_axis)\n root.left = create(point_list[:median],\n dimensions, sel_axis(axis), parent=root)\n root.right = create(point_list[median + 1:],\n dimensions, sel_axis(axis), parent=root)\n return root", "title": "" }, { "docid": "1d57b052e3b14e84347e0c0616a83c4d", "score": "0.48221102", "text": "def random(cls,n,**kwargs):\n points = [Point.random() for i in range(n)]\n return cls(points,**kwargs)", "title": "" }, { "docid": "e0fe594c11c3abafc36c6eb34bb814e9", "score": "0.4813354", "text": "def _split(self) -> None:\n\n width_low = self._bounds.bounds[0] // 2\n width_high = self._bounds.bounds[0] - width_low\n\n height_low = self._bounds.bounds[1] // 2\n height_high = self._bounds.bounds[1] - height_low\n\n self._children.append(QuadTree(AxisAlignedBoundingBox(\n self._bounds.position, (width_low, height_low)\n ), node_capacity=self._max_items, max_depth=self._max_depth - 1))\n\n self._children.append(QuadTree(AxisAlignedBoundingBox(\n self._bounds.position + (0, height_low), (width_low, height_high)\n ), node_capacity=self._max_items, max_depth=self._max_depth - 1))\n\n self._children.append(QuadTree(AxisAlignedBoundingBox(\n self._bounds.position + (width_low, height_low), (width_high, height_high)\n ), node_capacity=self._max_items, max_depth=self._max_depth - 1))\n\n self._children.append(QuadTree(AxisAlignedBoundingBox(\n self._bounds.position + (width_low, 0), (width_high, height_low)\n ), node_capacity=self._max_items, max_depth=self._max_depth - 1))\n\n nodes = self._nodes\n\n self._nodes = []\n\n for node in nodes:\n self._insert_into_children(node)", "title": "" }, { "docid": "74523fb124399df01396d22c03058cf2", "score": "0.48130873", "text": "def create_grid(X, npts):\n\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n return np.meshgrid(np.linspace(x_min, x_max, num=npts),\n np.linspace(y_min, y_max, num=npts))", "title": "" }, { "docid": "a04b7b08cc6ffeadf9440b08b7366858", "score": "0.48108792", "text": "def try_to_fix_quad(self,node,verbose=False):\n # print \"fix_quad\"\n \n # find the four outside vertices, in order (starting point\n # doesn't matter)\n\n # compute interior quad angle at each\n\n # the pair (0,2) or (1,3) with smaller average angle\n # define the endpoints (along with node) of the\n # edges to be removed.\n\n # get with it...\n cells = list(self.pnt2cells(node))\n\n if 0:\n subplot(211)\n cla()\n self.plot_cells(cells)\n\n cell_points_not_node = setdiff1d(ravel(self.cells[cells,:]),[node])\n\n # print cell_points_not_node\n\n deltas = self.points[cell_points_not_node,:2] - self.points[node,:2]\n angles = arctan2(deltas[:,1],deltas[:,0])\n\n quad_verts = cell_points_not_node[ argsort(angles) ]\n quad_points = self.points[quad_verts,:2]\n\n\n # now we have the outer four vertices in CCW order.\n quad_angles = zeros( (4,), float64)\n # plot( quad_points[ [0,1,2,3,0],0],\n # quad_points[ [0,1,2,3,0],1] )\n\n for i in range(4):\n im1 = (i-1)%4\n ip1 = (i+1)%4\n\n delta_prev = quad_points[i] - quad_points[im1]\n delta_next = quad_points[ip1] - quad_points[i]\n\n angle_prev = arctan2(delta_prev[1],delta_prev[0])\n angle_next = arctan2(delta_next[1],delta_next[0])\n\n quad_angles[i] = (angle_prev+pi - angle_next) % (2*pi)\n # print quad_points[i]\n # annotate( \"%g\"%(quad_angles[i]*180/pi), quad_points[i] )\n\n # now decide which way to merge:\n # switch the order of quad_verts so that 0 and 2 are the points that\n # define the merge axis\n if (quad_angles[1]+quad_angles[3]) < (quad_angles[0] + quad_angles[2]):\n quad_verts = quad_verts[ [1,2,3,0] ]\n\n\n # print \"Edges to be removed: %i-%i and %i-%i\"%(node,quad_verts[0],\n # node,quad_verts[2])\n\n # first figure out the indices for everyone:\n # the edges that get removed entirely:\n # names are oriented with the first node of the pointy end up\n dead_edge_top = self.find_edge( [node,quad_verts[0]] )\n dead_edge_bot = self.find_edge( [node,quad_verts[2]] )\n\n merge_edge_left = self.find_edge( [node,quad_verts[1]] )\n merge_edge_right = self.find_edge( [node,quad_verts[3]] )\n\n # order cells such that cell i is node,quad_verts[i],quad_verts[i+1]\n ordered_cells = -1*ones((4,),int32)\n for i in range(4):\n for j in range(4):\n # is cells[j] the ordered cell i?\n if all( sort(self.cells[ cells[j] ]) == sort([node,quad_verts[i],quad_verts[(i+1)%4]])):\n ordered_cells[i] = cells[j]\n break\n if any(ordered_cells) < 0:\n raise \"Failed to reorder the cells CCW\"\n\n # rename for better visuals...\n cell_nw,cell_sw,cell_se,cell_ne = ordered_cells\n\n # record that we're changing stuff:\n self.changed_cells.append(cell_nw)\n self.changed_cells.append(cell_ne)\n self.changed_cells.append(cell_se)\n self.changed_cells.append(cell_sw)\n\n # start combining cells:\n\n ### Combine northwest and northeast:\n north_points = setdiff1d(concatenate( (self.cells[cell_nw],self.cells[cell_ne]) ),[node])\n south_points = setdiff1d(concatenate( (self.cells[cell_sw],self.cells[cell_se]) ),[node])\n cell_n = cell_nw # new cell takes on northwest's index\n self.cells[cell_n,:] = north_points\n cell_s = cell_sw\n self.cells[cell_s,:] = south_points\n\n # mark the other cells as deleted\n self.cells[cell_ne,:] = -1\n self.cells[cell_se,:] = -1\n\n ## Edges\n self.edges[dead_edge_top] = -1\n self.edges[dead_edge_bot] = -1\n\n self.edges[merge_edge_right] = -1\n\n # and the one that gets rewritten to span across the quad\n self.edges[merge_edge_left,0] = quad_verts[1]\n self.edges[merge_edge_left,1] = quad_verts[3]\n # leave the marker as is\n self.edges[merge_edge_left,3] = cell_n\n self.edges[merge_edge_left,4] = cell_s\n\n # and surrounding edges - the diagonals northeast and southeast\n # have to have one of their cells rewritten:\n northeast_edge = self.find_edge( [quad_verts[3],quad_verts[0]] )\n southeast_edge = self.find_edge( [quad_verts[2],quad_verts[3]] )\n if self.edges[northeast_edge,3] == cell_ne:\n self.edges[northeast_edge,3] = cell_n\n if self.edges[northeast_edge,4] == cell_ne:\n self.edges[northeast_edge,4] = cell_n\n if self.edges[southeast_edge,3] == cell_se:\n self.edges[southeast_edge,3] = cell_s\n if self.edges[southeast_edge,4] == cell_se:\n self.edges[southeast_edge,4] = cell_s\n \n\n # and now fixup any stuff that we destroyed along the way:\n\n if self._pnt2cells:\n del self._pnt2cells[node] # this node is totally disowned\n for new_point in quad_verts:\n set_of_cells = self._pnt2cells[new_point]\n if cell_ne in set_of_cells:\n set_of_cells.remove(cell_ne)\n set_of_cells.add(cell_n)\n if cell_se in set_of_cells:\n set_of_cells.remove(cell_se)\n set_of_cells.add(cell_s)\n if self._vcenters:\n self._vcenters = None # lazy\n\n # fix pnts2edge:\n for other_point in quad_verts:\n nodes = (node,other_point)\n if nodes[0] > nodes[1]:\n nodes = (other_point,node)\n del self.pnts2edge[nodes]\n # and insert the new edge:\n nodes = (quad_verts[1],quad_verts[3])\n if nodes[0] > nodes[1]:\n nodes = (quad_verts[3],quad_verts[1])\n self.pnts2edge[nodes] = merge_edge_left\n # end fixing pnts2edge\n\n if verbose:\n #subplot(212)\n #cla()\n print(\"fix_quad: node %d, create cells %d %d\"%(node,cell_n,cell_s))\n # self.plot_cells([cell_n,cell_s])\n\n self.quads_merged += 1\n return True", "title": "" }, { "docid": "8f6a85382d5d9e71252501125d5f1dbf", "score": "0.48085007", "text": "def generate_xmas_tree(rows=10):\n #return_str = ''\n return_str = f\"{' '*(rows-1)}*\"\n for i in range(2, rows+1):\n return_str += '\\n'\n return_str += f\"{' '*(rows-i)}{'*'*i}{'*'*(i-1)}\" \n return return_str", "title": "" }, { "docid": "f670ede5638e5d3577e980e1d9b0cf40", "score": "0.47963655", "text": "def generate_grid(upper_right, lower_left, n):\n\n conn = sqlite3.connect(db)\n cursor = conn.cursor()\n\n stmt_delete = 'DELETE FROM grid_cells;'\n cursor.execute(stmt_delete)\n stmt_delete = 'DELETE FROM routes_cells;'\n cursor.execute(stmt_delete)\n stmt_delete = 'DELETE FROM stations_cells_overpass;'\n cursor.execute(stmt_delete)\n stmt_delete = 'DELETE FROM stations_cells_here;'\n cursor.execute(stmt_delete)\n\n stmt_insert = \"\"\"INSERT INTO grid_cells (x_axis, y_axis, upper_left, upper_right, lower_right, lower_left) \n VALUES (?, ?, ?, ?, ?, ?);\"\"\"\n\n lat_steps = numpy.linspace(lower_left[0], upper_right[0], n + 1)\n lon_steps = numpy.linspace(lower_left[1], upper_right[1], n + 1)\n\n lat_stride = lat_steps[1] - lat_steps[0]\n lon_stride = lon_steps[1] - lon_steps[0]\n\n for lat_index, lat in enumerate(lat_steps[:-1]):\n for lon_index, lon in enumerate(lon_steps[:-1]):\n upper_left = ','.join(map(str, [lat + lat_stride, lon]))\n upper_right = ','.join(map(str, [lat + lat_stride, lon + lon_stride]))\n lower_right = ','.join(map(str, [lat, lon + lon_stride]))\n lower_left = ','.join(map(str, [lat, lon]))\n\n data = (lat_index, lon_index, upper_left, upper_right, lower_right, lower_left)\n cursor.execute(stmt_insert, data)\n\n stmt_routes = 'SELECT bus_id, coordinates FROM routes_points ORDER BY bus_id, segment AND seq;'\n routes = cursor.execute(stmt_routes).fetchall()\n\n stmt_cells = 'SELECT id, upper_left, upper_right, lower_right, lower_left FROM grid_cells;'\n cells = cursor.execute(stmt_cells).fetchall()\n\n stmt_check = 'SELECT count(*) FROM routes_cells WHERE bus_id = ? AND cell_id = ?;'\n stmt_insert = 'INSERT INTO routes_cells (bus_id, cell_id, seq) VALUES (?, ?, ?);'\n\n for point in enumerate(tqdm(routes)):\n for cell in cells:\n point_obj = Point(map(float, point[1].split(',')))\n poly_obj = Polygon(map(list, [\n map(float, cell[1].split(',')), \n map(float, cell[2].split(',')), \n map(float, cell[3].split(',')), \n map(float, cell[4].split(','))\n ]))\n \n if point_obj.within(poly_obj):\n data = (point[0], cell[0])\n counter = cursor.execute(stmt_check, data).fetchone()[0]\n if counter == 0:\n # index += 1\n data = (point[0], cell[0], 1) # sets seq = 1 because the order of segments is wrong anyway\n cursor.execute(stmt_insert, data)\n break\n\n stmt_stations_overpass = 'SELECT id, coordinates_overpass FROM stations WHERE no_data = 0 AND duplicate = 0;'\n stations_overpass = cursor.execute(stmt_stations_overpass).fetchall()\n\n stmt_insert_overpass = 'INSERT INTO stations_cells_overpass (station_id, cell_id) VALUES (?, ?);'\n\n for station in tqdm(stations_overpass):\n for cell in cells:\n point_obj = Point(map(float, station[1].split(',')))\n poly_obj = Polygon(map(list, [\n map(float, cell[1].split(',')), \n map(float, cell[2].split(',')), \n map(float, cell[3].split(',')), \n map(float, cell[4].split(','))\n ]))\n\n if point_obj.within(poly_obj):\n data = (station[0], cell[0])\n cursor.execute(stmt_insert_overpass, data)\n break\n\n stmt_stations_here = 'SELECT id, coordinates_here FROM stations WHERE no_data = 0 AND duplicate = 0;'\n stations_here = cursor.execute(stmt_stations_here).fetchall()\n\n stmt_insert_here = 'INSERT INTO stations_cells_here (station_id, cell_id) VALUES (?, ?);'\n\n for station in tqdm(stations_here):\n for cell in cells:\n point_obj = Point(map(float, station[1].split(',')))\n poly_obj = Polygon(map(list, [\n map(float, cell[1].split(',')), \n map(float, cell[2].split(',')), \n map(float, cell[3].split(',')), \n map(float, cell[4].split(','))\n ]))\n\n if point_obj.within(poly_obj):\n data = (station[0], cell[0])\n cursor.execute(stmt_insert_here, data)\n break\n\n conn.commit()\n cursor.close()\n conn.close()", "title": "" }, { "docid": "7f4e8d0169017cf36e1902aa8106f7d4", "score": "0.47936025", "text": "def create_nodes(id, x, y, z):", "title": "" }, { "docid": "ced671e4e26eca131b8f8fc8eb8c5e9d", "score": "0.47927603", "text": "def show_primitive_pthyagorean_triples(n):\n \n draw.make_blank_canvas([15,15],facecolor=\"lightgray\")\n draw.make_blank_plot(1,1,1,[-.5,n],[-.5,n])\n \n for T in primitive_pythagorean_triples():\n if T[2] > n:\n break\n \n pt1 = (0,0)\n pt2 = (T[2],0)\n pt3 = (T[2],T[1])\n \n draw.connection_p(pt1,pt2,alpha=.2)\n draw.connection_p(pt2,pt3,alpha=.2)\n draw.connection_p(pt3,pt1,alpha=.2)", "title": "" }, { "docid": "161423f3fd99e06f48ebb4e055205a6b", "score": "0.4789858", "text": "def polygon(n, length):\n for _ in range(n):\n forward(length)\n left(360/n)", "title": "" }, { "docid": "63dafa27e66f39207c677ab2ed7e5fe3", "score": "0.47893977", "text": "def balanced_tree(r, h, create_using=None):\n # number of nodes is n=1+r+..+r^h\n if r==1:\n n=2\n else:\n n = int((1-r**(h+1))/(1-r)) # sum of geometric series r!=1\n G=nx.empty_graph(n,create_using)\n G.add_edges_from(_tree_edges(n,r))\n return G\n\n return nx.full_rary_tree(r,n,create_using)", "title": "" }, { "docid": "d87edb10fddac11a6d69ec223704d812", "score": "0.47886518", "text": "def nodeNumGrid(self, obj, x=None, y=None, t=None):\r\n # Data:\r\n dL = obj.domLims\r\n discNum = obj.discNum\r\n dx = obj.dx\r\n dy = obj.dy\r\n domain = obj.domain\r\n \r\n n = obj.n\r\n dt = obj.dt\r\n T = obj.T\r\n \r\n # Error handling:\r\n if not self.isnone(x) and self.Type(x) is not float: raise TypeError('\\'x\\' must be a float number!')\r\n if not self.isnone(y) and self.Type(y) is not float: raise TypeError('\\'y\\' must be a float number!')\r\n \r\n if not self.isnone([x,y]) and not domain.isInside(np.array([[x,y]]), tol=1e-6):\r\n raise ValueError('specified point is outside specified domain bounds!')\r\n if not self.isnone(x) and (x<dL[0,0] or dL[1,0]<x):\r\n raise ValueError('specified depth \\'x\\' is outside the domain bounds!')\r\n elif not self.isnone(y) and (y<dL[0,1] or dL[1,1]<y):\r\n raise ValueError('specified depth \\'y\\' is outside the domain bounds!')\r\n \r\n if not self.isnone(t) and self.Type(t) is not float: raise TypeError('\\'t\\' must be a float number!')\r\n elif not self.isnone(t) and (t<0.0 or t>T): raise ValueError('\\'t\\' is out of bound!')\r\n \r\n # Find closest discrete nodes to each coordinate:\r\n if not x is None:\r\n nx1 = math.floor((x - dL[0,0])/dx)\r\n nx2 = math.ceil((x - dL[0,0])/dx)\r\n x1 = dL[0,0] + nx1*dx\r\n x2 = dL[0,0] + nx2*dx\r\n if np.abs(x-x1)<=np.abs(x-x2): nx, x = nx1, x1\r\n else: nx, x = nx2, x2\r\n if nx==discNum[0]: nx-=1\r\n else:\r\n nx, x = [None]*2\r\n \r\n if not y is None:\r\n ny1 = math.floor((y - dL[0,1])/dy)\r\n ny2 = math.ceil((y - dL[0,1])/dy)\r\n y1 = dL[0,1] + ny1*dy\r\n y2 = dL[0,1] + ny2*dy\r\n if np.abs(y-y1)<=np.abs(y-y2): ny, y = ny1, y1\r\n else: ny, y = ny2, y2\r\n if ny==discNum[1]: ny-=1\r\n else:\r\n ny, y = [None]*2\r\n \r\n if not t is None:\r\n nt1 = math.floor(t/dt)\r\n t1 = nt1*dt\r\n nt2 = math.ceil(t/dt)\r\n t2 = nt2*dt\r\n if np.abs(t-t1)<=np.abs(t-t2): nt, t = nt1, t1\r\n else: nt, t = nt2, t2\r\n if nt==n: nt-=1\r\n else:\r\n nt, t = [None]*2\r\n \r\n if not self.isnone([x,y]): ns = ny*discNum[0]+nx\r\n else: ns = None\r\n \r\n return nx, ny, nt, x, y, t, ns", "title": "" }, { "docid": "fbb9bd6935a26f5a557a85c5bb2ba3e5", "score": "0.4786501", "text": "def generate_points(n, w):\n\n x1_min, x1_max, x2_min, x2_max = get_range(w)\n x1_range = x1_max - x1_min\n x2_range = x2_max - x2_min\n\n points = []\n\n for i in range(n):\n # Choose a random value in the range (x1_min, x1_max)\n x1 = random() * x1_range + x1_min\n # Choose a random value in the range (x2_min, x2_max)\n x2 = random() * x2_range + x2_min\n \n x = (1, x1, x2)\n points.append(x)\n\n return points", "title": "" }, { "docid": "9098b77e2b27eda7c9ed7c476246c616", "score": "0.47850773", "text": "def build_kdtree(kdtree, points, deeplimit, dimorder=None):\n assert len(points.shape) == 2\n assert deeplimit >= 0\n if type(dimorder) != type(None):\n assert len(dimorder) >= deeplimit\n\n if deeplimit != 0: kdtree.divide_bit = 1 << (deeplimit-1)\n # deal with deeplimit == 0, leaf node.\n if points.shape[0] <= 1 or deeplimit == 0:\n kdtree.dim = -1\n kdtree.point = np.uint8(np.mean(points, axis=0))\n return\n\n\n # decide divide dim\n dividedim = -1\n if type(dimorder) == type(None):\n # compute cubebbox for points.\n cubebbox = np.array([[np.min(x), np.max(x)] for x in points.T], dtype=np.uint8)\n\n # find long dim in cube.\n dividedim = np.argmax(cubebbox[:,1]-cubebbox[:,0])\n else:\n # use dim in dimorder\n dividedim = dimorder[len(dimorder)-deeplimit]\n\n assert dividedim != -1\n\n sorted_dim_idx = np.argsort(points[:,dividedim])\n\n # compute median of the dim dvalue.\n median = 0\n if sorted_dim_idx.size % 2 == 0:\n median_idx1 = sorted_dim_idx[sorted_dim_idx.size//2]\n median_idx2 = sorted_dim_idx[sorted_dim_idx.size//2-1]\n median = np.uint8((int(points[median_idx1, dividedim]) + int(points[median_idx2, dividedim]))/2)\n else:\n median_idx = sorted_dim_idx[sorted_dim_idx.size//2]\n median = points[median_idx, dividedim]\n\n # find divide index in the dim of cube.\n divide_idx = sorted_dim_idx.size\n for i in range(sorted_dim_idx.size):\n if points[sorted_dim_idx[i], dividedim] > median:\n divide_idx = i\n break\n\n # compute points set 1 and 2 with point set's cube bbox.\n points1 = points[sorted_dim_idx[:divide_idx], :]\n points2 = points[sorted_dim_idx[divide_idx:], :]\n\n # build tree\n kdtree.dim = dividedim\n kdtree.dvalue = median\n kdtree.point = np.uint8(np.mean(points, axis=0))\n\n if points1.size != 0:\n kdtree.ltree = KDtree()\n build_kdtree(kdtree.ltree, points1, deeplimit-1, dimorder)\n kdtree.lcode = kdtree.ltree.code\n\n if points2.size != 0:\n if deeplimit > 6:\n t = 0\n kdtree.rtree = KDtree()\n kdtree.rtree.code = kdtree.code | kdtree.divide_bit\n build_kdtree(kdtree.rtree, points2, deeplimit-1, dimorder)\n kdtree.rcode = kdtree.rtree.code", "title": "" }, { "docid": "c5979bf18060f802e257824c6255b9ed", "score": "0.47841573", "text": "def insert(self, p: List[Point]):\r\n dim=2\r\n # print(\"self._n:\",self._n)\r\n depth=0\r\n def add(pts,depth):\r\n if len(pts) > 1:\r\n axis = depth% dim#Split the dimensions in turn\r\n pts.sort(key=lambda x: x[axis])\r\n m = int(len(pts) /2)#Split the left and right subtrees\r\n # print(\"m:\",m)\r\n leftc=add(pts[:m],depth+1)\r\n rightc=add(pts[m + 1:],depth+1)\r\n self._n=self._n+1#increase the number of nodes in the tree\r\n \r\n return Node(pts[m], leftc, rightc)\r\n if len(pts) == 1:\r\n return Node(pts[0], None, None)\r\n\r\n self._root=add(p, 0)", "title": "" }, { "docid": "2dc22ea1f6d6725955816aebb16d05a3", "score": "0.47835997", "text": "def make_complete_planar_graph(N, seed: int = None) -> nx.Graph:\n\n np.random.seed(seed)\n\n # Complete graph on points in xy-plane with pairwise distances as edge weights\n G = nx.complete_graph(N)\n\n pos = np.random.rand(N, 2)\n d = distance_matrix(pos, pos)\n\n for ei, ej in G.edges:\n G[ei][ej][\"weight\"] = d[ei][ej]\n\n for node in G.nodes:\n G.nodes[node][\"pos\"] = pos[node, :]\n\n return G", "title": "" }, { "docid": "d15aef7872548c913af24022afa80aae", "score": "0.47740647", "text": "def make_edges(n):\n edges = []\n for i in range(n):\n for j in range(i, n):\n edges.append([i, j])\n return edges", "title": "" }, { "docid": "2fda9cf6313aa562b1cf0453cffccfe5", "score": "0.47740105", "text": "def complete(cls, size=3):\n verts = range(size)\n edges = [(i, j) for i in verts for j in range(i+1, size)]\n return cls(verts, edges)", "title": "" }, { "docid": "a3ed69407653133f8773ff3d6d32ed6a", "score": "0.4773751", "text": "def generate_points(state, n):\n item_idx = []\n for i in range(n):\n item_x = random.randint(1, state.shape[0] - 1)\n item_y = random.randint(1, state.shape[1] - 1)\n item_idx.append(np.array([item_x, item_y]))\n item_idx = np.array(item_idx)\n state[item_idx[:, 1], item_idx[:, 0]] = 1", "title": "" }, { "docid": "2d7cf05950a2cf24ea67d7e43beec888", "score": "0.47724634", "text": "def complete_graph(n,create_using=None):\n G=empty_graph(n,create_using)\n G.name=\"complete_graph(%d)\"%(n)\n if n>1:\n if G.is_directed():\n edges=itertools.permutations(range(n),2)\n else:\n edges=itertools.combinations(range(n),2)\n G.add_edges_from(edges)\n return G", "title": "" }, { "docid": "5dd2f221eb90568d53c83085a62b186a", "score": "0.4771989", "text": "def Grundy(n):\r\n\r\n # base case \r\n if n == 0:\r\n return 0\r\n if 1 <= n and n <= 3:\r\n return n\r\n \r\n return MEX({Grundy(n-1), Grundy(n-2), Grundy(n-3)})", "title": "" }, { "docid": "6f8cd9d8a88be478f90600734c498542", "score": "0.47717896", "text": "def returnPointsUpToN(n):\n points = [first_points]\n while n:\n points.append( returnNewPoints(points[-1]) )\n n -= 1\n return points", "title": "" }, { "docid": "4edca9863e3d6927d7982f6759dd19b6", "score": "0.47715172", "text": "def create_ordered_G(n):\n G = np.zeros((n,n))\n for j in range(n):\n for i in range(j):\n G[i][j] = 1\n return G", "title": "" }, { "docid": "fbb022e006568b84eee4ca6bb2bc667e", "score": "0.476778", "text": "def __init__(self, n, q, name='a'):\n id = 'Group([()])' if n == 1 else 'PGL(%s,%s)'%(n,q)\n PermutationGroup_generic.__init__(self, gap_group=id)\n self._q = q\n self._base_ring = GF(q, name=name)\n self._n = n", "title": "" } ]
6693dd0d3545fa42d0a2ba4f46360eaf
Makes featured profiles for IdeaLab galleries.
[ { "docid": "9895d20dfb920c5da005bd25ddbc885b", "score": "0.7489455", "text": "def makeGallery():\n if params['subtype'] in ['intro', 'new_idea', 'ieg_draft', 'participants_wanted']:\n featured_list = getFeaturedProfiles()\n else:\n sys.exit(\"unrecognized featured content type \" + params['subtype']) \n prepOutput(featured_list)", "title": "" } ]
[ { "docid": "d1d605377361822796ab2f870c16fe02", "score": "0.6286335", "text": "def getFeaturedProfiles():\n featured_list = []\n profile_page = profiles.Profiles(params[params['subtype']]['input page path'], params[params['subtype']]['input page id'], params)\n profile_list = profile_page.getPageSectionData(level = params[params['subtype']]['profile toclevel'])\n for profile in profile_list:\n# print profile\n text = profile_page.getPageText(profile['index'])\n profile = profile_page.scrapeInfobox(profile, text)\n if len(profile['summary']) > 1 and len(profile['image']) > 1:\n profile['action'] = params[params['subtype']]['action']\n profile['summary'] = tools.formatSummaries(profile['summary']) \n featured_list.append(profile)\n shuffle(featured_list)\n featured_list = featured_list[:params[params['subtype']]['number featured']]\n return featured_list", "title": "" }, { "docid": "128c49717341c7df9e176ad69095f58c", "score": "0.56162995", "text": "def profiles(self, create, extracted, **kwargs):\n if not create:\n return\n if extracted:\n for profile in extracted:\n self.profiles.add(profile)", "title": "" }, { "docid": "ad09365168992469f2768252acbaf3c3", "score": "0.5578986", "text": "def instance_profiles_group():\n pass", "title": "" }, { "docid": "63ebbc3e6a0ff78ccc7141aa30e7f80f", "score": "0.5551682", "text": "def profiles():", "title": "" }, { "docid": "4fa50165ea610cd44d25af30d03c5a75", "score": "0.5376387", "text": "def create_in_hdx(self):\n # type: () -> None\n self._create_in_hdx('showcase', 'name', 'title')", "title": "" }, { "docid": "8865fd579593a456e02c2a3687866959", "score": "0.5176281", "text": "def test_18_Aussie_Specialist_Photos(self):\n # pre-condition: Logged in as a Qualified User.\n self.dr.open_home_page()\n CP.SignIn(self.dr).sign_in(self.globs['username'], self.globs['password'])\n # Navigate to ASC > AS Photos\n try:\n CP.NavMenu.AussieSpecialistClub(self.dr).open().aussie_specialist_photos().click()\n except Exception:\n self.add_error()\n CP.BackupHrefs(self.dr).photos()\n # Should display Instagram Image Tiles, with links and descriptions\n for pic in CP.AussieSpecialistPhotos(self.dr).random_images(10):\n pic.open()\n pic.get_description()\n if not self.globs['cn_mode']: # China does not have Instagram.\n pic.get_link()\n pic.close()", "title": "" }, { "docid": "ad8d448a63a6a7eee967ba10f53fe650", "score": "0.5131111", "text": "def crawler_create_new_fashion_profiles(\n submission_tracker=None, num_pages_to_load=20\n):\n _create_profiles_from_instagram_hashtags(\n hashtags_keys=('fashion_hashtags', 'fashion_brands', ),\n pipeline_class_name=pipelines.FashionPipeline.__name__,\n submission_tracker=submission_tracker,\n num_pages_to_load=num_pages_to_load\n )", "title": "" }, { "docid": "0e1feffb442910895a69cc80d286de8d", "score": "0.51246053", "text": "def profile(self):\n class ProfileTile(PanelTile):\n title = 'Profile'\n fullwidth = True\n\n class MyProfile(ProfileForm):\n pass\n\n return dict(page='profile', page_title='profile',\n page_tile=ProfileTile())", "title": "" }, { "docid": "5ce9356bfde1107cf28d735a2c59e208", "score": "0.51224387", "text": "def galleries_create(title, description, primary_photo_id=None):\n method = 'flickr.galleries.create'\n if primary_photo_id is None:\n _dopost(method, auth=True, title=title, description=description, \n primary_photo_id=primary_photo_id)\n elif primary_photo_id is not None:\n _dopost(method, auth=True, title=title, description=description)", "title": "" }, { "docid": "6ed010e00ed3ee293ddc34ae0b0a8c1b", "score": "0.50293815", "text": "def crawler_create_new_menfashion_profiles(\n submission_tracker=None, num_pages_to_load=20\n):\n _create_profiles_from_instagram_hashtags(\n hashtags_keys=('menfashion_hashtags', ),\n pipeline_class_name=pipelines.MenFashionPipeline.__name__,\n submission_tracker=submission_tracker,\n num_pages_to_load=num_pages_to_load\n )", "title": "" }, { "docid": "cf8fbe6b0a8e1bc144f5a34636a62218", "score": "0.50221646", "text": "def create_image_list_by_fixed_gallery(self):\n self.image = copy.deepcopy( self.dataset['image_g'] )\n self.pid = copy.deepcopy( self.dataset['pid_g'] ) \n self.cam = copy.deepcopy( self.dataset['cam_g'] ) \n self.seq = copy.deepcopy( self.dataset['seq_g'] )\n self.frame = copy.deepcopy( self.dataset['frame_g'] )\n self.record = copy.deepcopy( self.dataset['record_g'] )", "title": "" }, { "docid": "2b607c95eb5ba93290e9cbdb7ec5eed3", "score": "0.4987727", "text": "def viewer_setup(self):", "title": "" }, { "docid": "cbb22af65659d15f72dda2073b2424be", "score": "0.498219", "text": "def udevelop_presets(self,request):\n out_html=\"\"\"\n <div class=\"underdevelop\">\n <h1> Under Development, please refer to the Github repository.</h1>\n </div>\n \"\"\"\n return out_html", "title": "" }, { "docid": "46e00162c423828d509c2a3d85ca3d4e", "score": "0.49809617", "text": "def galleries(request):\n\n context_galleries_selected = 'class=\"disabled\"' # see seeourminds.css\n template = loader.get_template('content/galleries.html')\n context = {\n 'adsense_ads': adsense_ads,\n 'context_galleries_selected': context_galleries_selected,\n }\n return HttpResponse(template.render(context, request))", "title": "" }, { "docid": "c96d099972cac8e4dff5a7d63d66511d", "score": "0.49054644", "text": "def new_classified(request, pPersonSlug):\n try:\n lPerson = Person.objects.filter(slug=pPersonSlug)[0]\n except IndexError:\n raise Http404\n\n if request.user.is_anonymous == True or request.user.profile.pro_member == False:\n raise Http404\n\n lMaxProfiles = request.user.profile.max_profile_count\n lActualPeopleProfilesCount = ClassifiedPerson.objects.filter(owner=request.user).count()\n\n if (lActualPeopleProfilesCount >= lMaxProfiles):\n return HttpResponseRedirect(\"/people/%s/newclassified/too_many/?username=%s\" % (lPerson.slug,request.user.username))\n\n lProfile = None\n try:\n lProfile = ClassifiedPerson.objects.filter(person=lPerson)[0]\n except IndexError:\n lProfile = None\n\n if request.method == 'POST':\n if lProfile:\n if lProfile.owner != request.user:\n raise Http404\n lForm = EditClassifiedProfileForm(request.POST, instance=lProfile)\n else:\n lForm = EditClassifiedProfileForm(request.POST)\n if lForm.is_valid():\n # save new profile\n lNewProfile = lForm.save(commit=False)\n lNewProfile.person = lPerson\n lNewProfile.lastChangedBy = request.user\n lNewProfile.owner = request.user\n lNewProfile.visible = True\n lNewProfile.show_on_homepage = False\n lNewProfile.save()\n\n # email notification\n notification(None, lNewProfile, 'people', 'profile', 'new', request.user, browser_details(request))\n\n # redirect to person page\n return HttpResponseRedirect('/people/%s/' % pPersonSlug)\n else:\n if lProfile:\n lForm = EditClassifiedProfileForm(instance=lProfile)\n else:\n lForm = EditClassifiedProfileForm()\n\n return render_auth(request, 'people/edit_classified.html', {\"Person\" : lPerson,\n \"form\" : lForm,\n })", "title": "" }, { "docid": "e7c023df7e28b457af1d205f4310e2a3", "score": "0.4899078", "text": "async def make(self, ctx):\n\t\tprofile = await self.get_profile(ctx)\n\t\tif not profile:\n\t\t\tawait ctx.db.execute(f'insert into profiles values({ctx.author.id})')\n\t\tawait ctx.invoke(self.profile, member=ctx.author)", "title": "" }, { "docid": "4510ce3d9dd38d4fed8cb2ee3cf8c1eb", "score": "0.48953187", "text": "def crawler_create_new_food_profiles(\n submission_tracker=None, num_pages_to_load=20\n):\n _create_profiles_from_instagram_hashtags(\n hashtags_keys=('food_hashtags', ),\n pipeline_class_name=pipelines.FoodPipeline.__name__,\n submission_tracker=submission_tracker,\n num_pages_to_load=num_pages_to_load\n )", "title": "" }, { "docid": "1053c5bf707818829372f0718c44e0c7", "score": "0.48833743", "text": "def handle_manual_profiles(self) -> None:\n for filepath, module, contains in self.settings.MANUAL_PROFILES:\n for contained in contains:\n profile: FHIRStructureDefinition = FHIRStructureDefinition(self, None)\n profile.is_manual = True\n\n prof_dict = {\n \"name\": contained,\n \"differential\": {\"element\": [{\"path\": contained}]},\n }\n if module == \"fhirtypes\":\n profile_name = self.class_name_for_profile(contained)\n assert isinstance(profile_name, str)\n if self.class_name_is_primitive(profile_name):\n prof_dict[\"kind\"] = \"primitive-type\"\n\n profile.structure = FHIRStructureDefinitionStructure(profile, prof_dict)\n\n if self.found_profile(profile):\n profile.process_profile()", "title": "" }, { "docid": "4729e8e85d774f7d87a0f8e0f5ae1716", "score": "0.48826858", "text": "def create_profile(theme_name):\n profile_id = str(uuid.uuid4())\n profile_dir = dconf_path + '/:' + profile_id\n\n # create new profile\n os.system(\"%s %s/default \\\"'%s'\\\"\" % (dconf_write, dconf_path, profile_id))\n\n # update profile list\n existing_profiles = get_profile_list()\n existing_profiles.append(profile_id)\n\n profiles = \"','\".join(existing_profiles)\n os.system(\n \"%s %s/list \\\"['%s']\\\"\" % (dconf_write, dconf_path, profiles)\n )\n\n # set visible name\n os.system(\n \"%s %s/visible-name \\\"'%s'\\\"\" % (dconf_write, profile_dir, theme_name)\n )\n\n return profile_id", "title": "" }, { "docid": "9b482174c76c57355b4fd1e4224ebe1e", "score": "0.48780692", "text": "def view():\n return GalleryTemplate(resources=list(config.applications.values()))", "title": "" }, { "docid": "59dcb2d7d301380785d2df990a7757a1", "score": "0.48745", "text": "def __init__(self, profile_url):\n\n LightProfile.__init__(self, profile_url)", "title": "" }, { "docid": "7f7f406d25b65de2d98a5b401389364d", "score": "0.4864639", "text": "def insert_picture(url):\n html_code = '<img class = \"gallery\" src='+ url_for('static', filename = url) +' alt =' + url + ' >'\n return html_code", "title": "" }, { "docid": "de25cbac235ca8119965054082d5ee54", "score": "0.48623726", "text": "def showcase(request):\n\n artworks = models.Artwork.objects.all()\n\n context = {\n 'artworks': artworks,\n }\n\n return render(request, 'showcase/showcase.html', context)", "title": "" }, { "docid": "e780ea9d78446ae36b7a7ae2ec5ac954", "score": "0.48529172", "text": "def setUpPloneSite(self, portal):\n self.applyProfiles(portal)", "title": "" }, { "docid": "1e53b133ffaf21fc70d4219c2770c82d", "score": "0.4842471", "text": "def after_django_setup():\n from ddf import teach\n from mezzanine_seminars.models import Seminar\n\n # When creating Seminars we don't want to create extra sites\n teach(Seminar, site=None, featured_image=\"\")", "title": "" }, { "docid": "b5c9aac328317a89e2effab21d0f6a36", "score": "0.48282424", "text": "def my_create_features(self, example):", "title": "" }, { "docid": "b268f9f69b91cb4fb346bb33d56fb449", "score": "0.48206577", "text": "def staff_profile(name, img, blurb, type=\"large\"):\n\n if type == \"large\":\n profile_div_classes = \"col-12 col-md-6 col-lg-3\"\n img_div_classes = \"col-8 col-lg-12\"\n blurb_classes = \"\"\n else:\n profile_div_classes = \"col-6 col-md-4 col-lg-2\"\n img_div_classes = \"col-12\"\n blurb_classes = \"text-center\"\n\n html = f\"\"\"<div class=\"{profile_div_classes}\">\n <div class=\"row justify-content-center\">\n <div class=\"{img_div_classes}\">\n <img class=\"img-fluid w-100 mx-auto d-block rounded-circle shadow mb-3\" \n alt=\"{name}\" \n src=\"{img}\"/> \n </div>\n </div>\n <h3 class=\"text-center\">{name}</h3> \n <p class=\"pb-4 {blurb_classes}\">{blurb}</p>\n </div>\"\"\"\n return mark_safe(html)", "title": "" }, { "docid": "d99bdac3f5e79619919b39a6a5d80d96", "score": "0.48089728", "text": "def setUp(self):\n self.user = User.objects.create_user('user', '[email protected]',\n 'password')\n user_profile = UserProfile(user=self.user, restkey='key')\n user_profile.save()\n\n self.superuser = User.objects.create_superuser('superuser',\n '[email protected]',\n 'password')\n\n self.client.login(username='user', password='password')\n\n self.snippet_text = Snippet.objects.create(\n author=self.user,\n title=\"Text file\",\n description=\"Text file for testing\",\n lexer=\"txt\",\n body=\"\"\"This is a text snippet\n this is is the second line\n \"\"\"\n )\n\n self.snippet_python = Snippet.objects.create(\n author=self.user,\n title=\"Python snippet\",\n description=\"Python snippet for testing\",\n lexer=\"python\",\n body=\"\"\"Snippet body\"\"\",\n )\n self.snippet_python.tags.add(\"python\", \"code\")", "title": "" }, { "docid": "ad867d9f97115ecae23f2d72d79ec47f", "score": "0.48057523", "text": "def gallery(self):\n\n\t\tcurrent_image_index = 0\n\t\timg_dir = self.img_dirs[0]\n\n\t\twhile True:\n\t\t\twallpaper = Image.new('RGB', self.target_size)\n\n\t\t\tself.__make_wallpaper(wallpaper, \"gallery\", [current_image_index])\n\t\t\tself.__save_img(wallpaper, current_image_index)\n\n\t\t\tcurrent_image_index = (current_image_index + 1) % len(img_dir)\n\n\t\t\tif current_image_index == 0:\n\t\t\t\tbreak;", "title": "" }, { "docid": "a76318640aa71843c68654ba32d4d8d7", "score": "0.47976053", "text": "def addDefaultObjects(context):\n if context.readDataFile('urban_extra_marker.txt') is None:\n return\n\n profile_name = context._profile_path.split('/')[-1]\n module_name = 'Products.urban.profiles.%s.default_objects' % profile_name\n attribute = 'default_objects'\n module = __import__(module_name, fromlist=[attribute])\n default_objects = getattr(module, attribute)\n\n # add some users, some architects and some foldermanagers...\n # add 3 users, one as manager, one as reader and one as editor...\n site = context.getSite()\n addTestUsers(site)\n # add some architects...\n urbanFolder = getattr(site, \"urban\")\n notFolder = getattr(urbanFolder, \"architects\")\n if not notFolder.objectIds():\n # create some architects using the Extensions.imports script\n from Products.urban.Extensions.imports import import_architects\n import_architects(context.getSite().portal_urban)\n\n # add some notaries...\n urbanFolder = getattr(site, \"urban\")\n notFolder = getattr(urbanFolder, \"notaries\")\n if not notFolder.objectIds():\n objects_list = default_objects['notaries']\n createFolderDefaultValues(notFolder, objects_list)\n logger.info(\"Notaries examples have been added\")\n\n # add some geometricians...\n urbanFolder = getattr(site, \"urban\")\n geoFolder = getattr(urbanFolder, \"geometricians\")\n if not geoFolder.objectIds():\n objects_list = default_objects['geometricians']\n createFolderDefaultValues(geoFolder, objects_list)\n logger.info(\"Geometricians examples have been added\")\n\n # add some parcellings...\n urbanFolder = getattr(site, \"urban\")\n parcelFolder = getattr(urbanFolder, \"parcellings\")\n if not parcelFolder.objectIds():\n objects_list = default_objects['parcellings']\n createFolderDefaultValues(parcelFolder, objects_list)\n logger.info(\"Parcelling examples have been added\")\n\n # add some folder managers\n tool = site.portal_urban\n fmFolder = getattr(tool, \"foldermanagers\")\n if not fmFolder.objectIds():\n objects_list = default_objects['foldermanagers']\n for obj in objects_list[1:]:\n obj.update({'manageableLicences': URBAN_TYPES})\n createFolderDefaultValues(fmFolder, objects_list)\n\n # set layout to sorted_title_view with z3ctable\n fmFolder.setLayout('sorted_title_folderview')\n\n # create some streets using the Extensions.imports script\n if not tool.streets.objectIds('City'):\n from Products.urban.Extensions.imports import import_streets_fromfile, import_localities_fromfile\n import_streets_fromfile(tool)\n import_localities_fromfile(tool)", "title": "" }, { "docid": "a5b8d7354af35a7da6fad13bc92ee932", "score": "0.47935447", "text": "def main_page_3_imgs(request):\n imgs = Main_imgs.objects.all()[:3]\n\n return {\n 'main_page_3_imgs': imgs,\n }", "title": "" }, { "docid": "6076c9a09a2e06c194d06893cc205ebd", "score": "0.47876713", "text": "def addToFeaturedList(self, category):\n\n listpage = \"Commons:Featured pictures, list\"\n page = pywikibot.Page(COMMONS, listpage)\n old_text = page.get(get_redirect=True)\n\n # First check if we are already on the page,\n # in that case skip. Can happen if the process\n # have been previously interrupted.\n if re.search(wikipattern(self.fileName()), old_text):\n out(\n \"Skipping addToFeaturedList for '%s', page already listed.\"\n % self.cleanTitle(),\n color=\"lightred\",\n )\n return\n\n # This function first needs to find the main category\n # then inside the gallery tags remove the last line and\n # add this candidate to the top\n\n # Thanks KODOS for a nice regexp gui\n # This adds ourself first in the list of length 4 and removes the last\n # all in the chosen category\n out(\"Looking for category: '%s'\" % wikipattern(category))\n ListPageR = re.compile(\n r\"(^==\\s*{{{\\s*\\d+\\s*\\|%s\\s*}}}\\s*==\\s*<gallery.*>\\s*)(.*\\s*)(.*\\s*.*\\s*)(.*\\s*)(</gallery>)\"\n % wikipattern(category),\n re.MULTILINE,\n )\n new_text = re.sub(ListPageR, r\"\\1%s\\n\\2\\3\\5\" % self.fileName(), old_text)\n self.commit(old_text, new_text, page, \"Added [[%s]]\" % self.fileName())", "title": "" }, { "docid": "34f6c463d1c47c54a7e409a5d8f93147", "score": "0.47775766", "text": "def setUp(self):\n\n self.new_profile = Profile(photo='image.jpg', user=2)\n self.new_image.save()", "title": "" }, { "docid": "37a4af50e423fb95522e594d79010046", "score": "0.47676244", "text": "def populate_profile():\n puppy = db.session.query(Puppy).all()\n for pup in puppy:\n new_profile = Profile(breed=breeds(), description=descriptions(), specialNeeds=special_needs(), puppy_id=pup.id)\n db.session.add(new_profile)\n db.session.commit()\n print \"profile update\"", "title": "" }, { "docid": "4c576c4447eaf148f955aebdba13896b", "score": "0.4755977", "text": "def create_a_profile(base, spec=None, name=None, metadata=None):\n\n if spec is None:\n spec = constants.spec_nova_server\n\n if name is None:\n name = data_utils.rand_name(\"tempest-created-profile\")\n\n if metadata:\n spec['properties']['metadata'] = metadata\n\n params = {\n 'profile': {\n 'name': name,\n 'spec': spec,\n }\n }\n res = base.client.create_obj('profiles', params)\n return res['body']['id']", "title": "" }, { "docid": "938ebe988c98ab970643f6f80959a93d", "score": "0.4748125", "text": "def make_gallery01_profile_plot(number_of_profiles, indy, pts_fov, pts_roi,\npts_value, pts_drop, zs_median_filter_size, zs_gaussian_filter_sigma,\nnnan_xs, nnan_zs, fltr_zs, nnan_xs_midp, nnan_dzdxs, fltr_dzdxs, segm_ps):\n just0, just1, just2 = ngn.just0, ngn.just1, ngn.just2\n prt = False if 1 else True\n prt_ = prt\n mult_str = '--- '\n def_str = 'make_gallery01_profile_plot'\n if prt_:\n print \"\\n... (beg) def %s ...\\n%s\" % (def_str, mult_str * ngn.mult)\n #--.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---\n\n prt = True if 0 and prt_ else False\n\n fltr_zs_mask_edges_pos = segm_ps.fltr_zs_mask_edges_pos\n fltr_zs_mask_edges_neg = segm_ps.fltr_zs_mask_edges_neg\n fltr_dzdxs_mask_edges_pos = segm_ps.fltr_dzdxs_mask_edges_pos\n fltr_dzdxs_mask_edges_neg = segm_ps.fltr_dzdxs_mask_edges_neg\n\n nnan_xs_max = np.nanmax(nnan_xs)\n nnan_xs_min = np.nanmin(nnan_xs)\n nnan_xs_mid = (nnan_xs_max + nnan_xs_min) / 2.\n\n fltr_zs_max = np.nanmax(fltr_zs)\n fltr_zs_min = np.nanmin(fltr_zs)\n fltr_zs_mid = (fltr_zs_max + fltr_zs_min) / 2.\n\n #== === === === === === === === === === === === === === === === === === ===\n title_fontsize = 12\n text_fontsize = 10\n text_fontsize2 = 8\n ax1_get_ylim_rng_mult1 = 0.95\n ax1_get_ylim_rng_mult2 = 0.87\n ax1_get_ylim_rng_mult3 = 0.08\n axvline_ymin = 0.01\n axvline_ymax = 0.99\n axhline_xmin = 0.01\n axhline_xmax = 0.99\n ms_data = 2.\n lw_data = 0.5\n ax_text_xref1 = 0.02\n ax_text_yref1 = 0.1\n #\n #-- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---\n fig = plt.gcf()\n fig.set_size_inches(16, 12, forward=True) # default is (8, 6)\n fig.suptitle(\"%s:\\n%s, %s\" % (\"Laser Profile (X, Z) %i of %i\" %\n ((indy + 1), number_of_profiles), ngn.job_zs_csv, ngn.job_id))\n gridspec = [2, 1]\n gs = mpl.gridspec.GridSpec(*gridspec)\n #\n #-- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---\n ax1 = plt.subplot(gs[0]) # zs\n ax1.set_title('Given & Flitered \"Not NaN\" Profiles (%s, %s, %s, %s)' % (\n \"Point Counts: FOV: %i\" % pts_fov, \"ROI: %i\" % pts_roi,\n \"Values: %i\" % pts_value, \"Drops: %i\" % pts_drop),\n fontsize=title_fontsize)\n #x1.set_xlabel('X-Coordinate (Profile Centered on 0.0)')\n ax1.set_ylabel('Z-Coordinate')\n\n ax1_ylim_mgn = 1.70\n nanmax = np.nanmax(fltr_zs)\n nanmin = np.nanmin(fltr_zs)\n nanmid = (nanmax + nanmin) / 2.\n nanrng = (nanmax - nanmin) / 2.\n nanmgn = nanrng * ax1_ylim_mgn\n ax1.set_ylim((nanmid - nanmgn, nanmid + nanmgn))\n if 0 and prt:\n print fmt0(just1)[0:] % (\"[nanmin, nanmax]\", [nanmin, nanmax])\n print fmt0(just1)[1:] % (\"ax1.get_ylim()\", ax1.get_ylim())\n\n #:: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: :::\n\n ax1.plot(nnan_xs, nnan_zs,\n 'co-', mec='none', ms=ms_data, lw=lw_data,\n label='Data')\n\n ax1.plot(nnan_xs, fltr_zs,\n 'bo-', mec='none', ms=ms_data, lw=lw_data,\n label='%s & %s' % (\n 'Median Filtered (size: %i pts)' % zs_median_filter_size,\n 'Gaussian Filtered (sigma: %.1f)' % zs_gaussian_filter_sigma))\n\n fltr_zs_edges_pos = fltr_zs.copy()\n fltr_zs_edges_pos[~ fltr_zs_mask_edges_pos] = np.nan\n ax1.plot(nnan_xs, fltr_zs_edges_pos, 'ro-', mec='none',\n lw=1., label=\"positive edge points\")\n\n fltr_zs_edges_neg = fltr_zs.copy()\n fltr_zs_edges_neg[~ fltr_zs_mask_edges_neg] = np.nan\n ax1.plot(nnan_xs, fltr_zs_edges_neg, 'mo-', mec='none',\n lw=1., label=\"negative edge points\")\n\n #:: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: :::\n\n for i, tow_center_xref in enumerate(ngn.tow_center_xrefs):\n ax1_axvline_ls = (\n 'dashed' if i == 0 or (i + 1) == len(ngn.tow_center_xrefs)\n else 'dotted')\n ax1.axvline(x=tow_center_xref, ymin=axvline_ymin,\n ymax=axvline_ymax, c='y', ls=ax1_axvline_ls, lw=2.)\n\n trans1 = mpl.transforms.blended_transform_factory(\n ax1.transData, ax1.transAxes)\n\n zipped = zip(ngn.tow_ids[1:-1], ngn.tow_center_xrefs[1:-1])\n for i11, (tid, tcx) in enumerate(zipped):\n ax1.text(tcx, 0.94 if i11 % 2 else 0.99, \"tow\\n%.2i\" % tid,\n color='black', fontsize=text_fontsize2, fontweight='bold',\n ha='center', va='top', transform=trans1)\n\n ax1.axvline(x=nnan_xs_min,\n ymin=axvline_ymin, ymax=axvline_ymax, c='k', ls='dashed')\n ax1.text(nnan_xs_min, ax_text_yref1,\n 'X Min. (mm):\\n%.3f' % nnan_xs_min,\n fontsize=text_fontsize, fontweight='bold', ha='center', va='bottom',\n transform=trans1)\n\n ax1.axvline(x=nnan_xs_mid,\n ymin=axvline_ymin, ymax=axvline_ymax, c='k', ls='dashed')\n ax1.text(nnan_xs_mid, ax_text_yref1,\n 'X Mid. (mm):\\n%.3f' % nnan_xs_mid,\n fontsize=text_fontsize, fontweight='bold', ha='center', va='bottom',\n transform=trans1)\n\n ax1.axvline(x=nnan_xs_max,\n ymin=axvline_ymin, ymax=axvline_ymax, c='k', ls='dashed')\n ax1.text(nnan_xs_max, ax_text_yref1,\n 'X Max. (mm):\\n%.3f' % nnan_xs_max,\n fontsize=text_fontsize, fontweight='bold', ha='center', va='bottom',\n transform=trans1)\n\n trans1 = mpl.transforms.blended_transform_factory(\n ax1.transAxes, ax1.transData)\n\n ax_text_xref1 = nnan_xs_min\n ax1.axhline(y=fltr_zs_min,\n xmin=axhline_xmin, xmax=axhline_xmax, c='k', ls='dashed', lw=2.)\n ax1.text(ax_text_xref1, fltr_zs_min,\n 'Z Min. (mm):\\n%.3f' % fltr_zs_min,\n fontsize=text_fontsize, fontweight='bold', ha='right', va='bottom')\n #\n ax1.axhline(y=fltr_zs_mid,\n xmin=axhline_xmin, xmax=axhline_xmax, c='k', ls='dashed', lw=2.)\n ax1.text(ax_text_xref1, fltr_zs_mid,\n 'Z Mid. (mm):\\n%.3f' % fltr_zs_mid,\n fontsize=text_fontsize, fontweight='bold', ha='right', va='bottom')\n #\n ax1.axhline(y=fltr_zs_max,\n xmin=axhline_xmin, xmax=axhline_xmax, c='k', ls='dashed', lw=2.)\n ax1.text(ax_text_xref1, fltr_zs_max,\n 'Z Max. (mm):\\n%.3f' % fltr_zs_max,\n fontsize=text_fontsize, fontweight='bold', ha='right', va='bottom')\n\n #:: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: :::\n ax1.legend(\n loc=8,\n ncol=4,\n numpoints=1,\n markerscale=1.,\n prop={'size': 9.2, 'weight': 'bold'}\n ) if 1 else None\n #-- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---\n ax2 = plt.subplot(gs[1], sharex=ax1) # dzdxs\n ax2.set_title('First Difference of Given & Flitered \"Not NaN\" Profiles',\n fontsize=title_fontsize)\n ax2.set_xlabel('X-Coordinate (Profile Centered on 0.0)')\n ax2.set_ylabel('Z-Coordinate First Difference')\n\n nnan_xs_and_tow_center_xrefs = (\n np.concatenate((nnan_xs, ngn.tow_center_xrefs)))\n ax2_set_xlim_max = np.nanmax(np.abs(nnan_xs_and_tow_center_xrefs)) * 1.25\n ax2_set_xlim_min = -ax2_set_xlim_max\n ax2.set_xlim((ax2_set_xlim_min, ax2_set_xlim_max))\n\n ax2_ylim_mgn = 3.0\n nanmax = ngn.dzdxs_threshold\n #anmin = np.nanmin(fltr_dzdxs)\n nanmin = -nanmax\n nanmid = (nanmax + nanmin) / 2.\n nanrng = (nanmax - nanmin) / 2.\n nanmgn = nanrng * ax2_ylim_mgn\n ax2.set_ylim((nanmid - nanmgn, nanmid + nanmgn))\n if 0 and prt:\n print fmt0(just1)[0:] % (\"[nanmin, nanmax]\", [nanmin, nanmax])\n print fmt0(just1)[1:] % (\"ax2.get_ylim()\", ax2.get_ylim())\n\n #:: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: :::\n\n ax2.plot(nnan_xs_midp, nnan_dzdxs,\n 'co-', mec='none', ms=ms_data, lw=lw_data,\n label='\"Not NaN\" Data') if 0 else None\n ax2.plot(nnan_xs_midp, fltr_dzdxs,\n 'bo-', mec='none', ms=ms_data, lw=lw_data,\n label='Not NaN\" Data %s & %s' % (\n 'Median Filtered (size: %i pts)' % zs_median_filter_size,\n 'Gaussian Filtered (sigma: %.1f)' % zs_gaussian_filter_sigma))\n\n fltr_dzdxs_edges_pos = fltr_dzdxs.copy()\n fltr_dzdxs_edges_pos[~ fltr_dzdxs_mask_edges_pos] = np.nan\n ax2.plot(nnan_xs_midp, fltr_dzdxs_edges_pos, 'ro-', mec='none',\n lw=1., label=\"positive edges\")\n\n fltr_dzdxs_edges_neg = fltr_dzdxs.copy()\n fltr_dzdxs_edges_neg[~ fltr_dzdxs_mask_edges_neg] = np.nan\n ax2.plot(nnan_xs_midp, fltr_dzdxs_edges_neg, 'mo-', mec='none',\n lw=1., label=\"negative edges\")\n\n #:: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: :::\n\n for i, tow_center_xref in enumerate(ngn.tow_center_xrefs):\n ax2_axvline_ls = (\n 'dashed' if i == 0 or (i + 1) == len(ngn.tow_center_xrefs)\n else 'dotted')\n ax2.axvline(x=tow_center_xref, ymin=axvline_ymin,\n ymax=axvline_ymax, c='y', ls=ax2_axvline_ls, lw=2.)\n\n ax2.axvline(x=nnan_xs_min,\n ymin=axvline_ymin, ymax=axvline_ymax, c='k', ls='dashed')\n #\n ax2.axvline(x=nnan_xs_mid,\n ymin=axvline_ymin, ymax=axvline_ymax, c='k', ls='dashed')\n #\n ax2.axvline(x=nnan_xs_max,\n ymin=axvline_ymin, ymax=axvline_ymax, c='k', ls='dashed')\n\n trans2 = mpl.transforms.blended_transform_factory(\n ax2.transData, ax2.transAxes)\n\n zipped = zip(ngn.tow_ids[1:-1], ngn.tow_center_xrefs[1:-1])\n for i22, (tid, tcx) in enumerate(zipped):\n ax2.text(tcx, 0.94 if i22 % 2 else 0.99, \"tow\\n%.2i\" % tid,\n color='black', fontsize=text_fontsize2, fontweight='bold',\n ha='center', va='top', transform=trans2)\n\n ax_text_xref2 = ngn.tow_center_xrefs[-1]\n ax2.axhline(y=-ngn.dzdxs_threshold,\n xmin=axhline_xmin, xmax=axhline_xmax, c='k', ls='dotted', lw=2.)\n ax2.text(ax_text_xref2, -ngn.dzdxs_threshold,\n '-dZdX Threshold (mm): %.3f' % -ngn.dzdxs_threshold,\n fontsize=text_fontsize2, fontweight='bold', ha='right', va='bottom')\n #\n ax2.axhline(y=ngn.dzdxs_threshold,\n xmin=axhline_xmin, xmax=axhline_xmax, c='k', ls='dotted', lw=2.)\n ax2.text(ax_text_xref2, ngn.dzdxs_threshold,\n 'dZdX Threshold (mm): %.3f' % ngn.dzdxs_threshold,\n fontsize=text_fontsize2, fontweight='bold', ha='right', va='top')\n\n #:: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: ::: :::\n ax2.legend(\n loc=8,\n ncol=5,\n numpoints=1,\n markerscale=1.,\n prop={'size': 8.0, 'weight': 'bold'}\n ) if 1 else None\n #-- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---\n png_abspath = os.path.join(ngn.gallery01_absdir,\n ngn.job_zs_csv.replace('z', '').replace('.txt', '')\n .replace('.csv', '_meast_%.5i.png' % (indy + 1)))\n if 0 and prt:\n print fmt1(just1)[0:] % (\"ngn.gallery01_absdir\", ngn.gallery01_absdir)\n print fmt1(just1)[1:] % (\"ngn.job_zs_csv\", ngn.job_zs_csv)\n print fmt1(just1)[0:] % (\"png_abspath\", png_abspath)\n #\n png = png_abspath if 1 else None\n if png is not None:\n None if os.path.isdir(ngn.gallery01_absdir) else (\n os.makedirs(ngn.gallery01_absdir))\n plt.savefig(png)\n else:\n plt.show()\n plt.close()\n\n #== === === === === === === === === === === === === === === === === === ===\n\n #--.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---\n if prt_:\n print \"\\n%s\\n... (end) def %s ...\" % (mult_str * ngn.mult, def_str)\n None if 1 else sys.exit()", "title": "" }, { "docid": "215b502a61fdec4920ca61817e2c15ec", "score": "0.47480857", "text": "def main():\n\n options = parse_user_arguments()\n generate_profiles(options)", "title": "" }, { "docid": "5c515740c44bfaf26d8c291d8e02d537", "score": "0.47359642", "text": "def create_profile(sender, **kwargs):\n if kwargs['created']:\n profile = ImagerProfile(user=kwargs['instance'])\n profile.save()", "title": "" }, { "docid": "21ecb8c39bc5bbed2642b56b132fc06d", "score": "0.47356355", "text": "def profiles():\n return render_template('profiles.html', users = userprofile.query.all())", "title": "" }, { "docid": "7869db298df50a2a2622b516bd74a008", "score": "0.47348624", "text": "def seed_flavors():\n return [{\n 'id': 'static',\n 'provider': 'static',\n 'params': {},\n }]", "title": "" }, { "docid": "de757d8349ca2b0131041fa2a09d6cd1", "score": "0.47260582", "text": "def expand_profiles(full_game, profiles):\n red_players = np.minimum(full_game.num_role_players, 2)\n profiles = np.asarray(profiles, int)\n red_game = rsgame.empty(red_players, full_game.num_role_strats)\n utils.check(red_game.is_profile(profiles).all(), \"profiles must be valid\")\n return dpr.expand_profiles(full_game, profiles)", "title": "" }, { "docid": "d228bd33db94d61f551cd1facbe5cfbd", "score": "0.4712177", "text": "def setUp(self):\n super(PreviousNextTest, self).setUp()\n self.test_travelogue = TravelogueFactory()\n self.pl1 = PhotoFactory()\n self.pl2 = PhotoFactory()\n self.pl3 = PhotoFactory()\n self.test_travelogue.photos.add(self.pl1)\n self.test_travelogue.photos.add(self.pl2)\n self.test_travelogue.photos.add(self.pl3)", "title": "" }, { "docid": "30db673597c76f20dc0b60cb5797d20a", "score": "0.470864", "text": "def makescreenshots():\n run_in_demo_projects('makescreenshots', '--traceback')", "title": "" }, { "docid": "a5ac18e48eee345f6f5f2bc3d27ecf8f", "score": "0.47026324", "text": "def setUp(self):\n\n self.new_image = Image(\n image='image.jpg', image_name='sample_name', image_caption='Restricted', profile=2)\n self.new_image.save()", "title": "" }, { "docid": "e85b3e08b86ba4a42f9cbb5a3fb670c3", "score": "0.4700637", "text": "def viewer_setup(self):\n pass", "title": "" }, { "docid": "e85b3e08b86ba4a42f9cbb5a3fb670c3", "score": "0.4700637", "text": "def viewer_setup(self):\n pass", "title": "" }, { "docid": "12fb9c18da416bcc4be09182fd20230c", "score": "0.46918032", "text": "def insert_profiles_to_treeview(self):\n\n # * delete all profiles which have been found in previous searches\n for child in self.tree.get_children():\n self.tree.delete(child)\n\n log_message(\"Browser-Profile aktualisiert\", \"info\")\n profiles = self.parent.controller.load_profiles() # refresh profile overview\n if profiles:\n for browser in profiles:\n parent = self.tree.insert(\"\", \"end\", text=browser)\n for profile in profiles[browser]:\n # ! filter out the deprecated empty firefox \"default\" profile for default-release channel firefox builds newer than v67\n # * for firefox version previous to version 67\n # * or other release channels - please deactivate filter)\n\n # self.tree.insert(parent, \"end\", text=profile)\n\n # * if you want to deactivate the filter comment out the if-loop below\n # * and uncomment the function call above\n\n if \"default-release\" in profile:\n self.tree.insert(parent, \"end\", text=profile)\n elif \"Profil\" in profile:\n self.tree.insert(parent, \"end\", text=profile)\n else:\n log_message(\"Keine Profile gefunden\", \"error\")\n pass", "title": "" }, { "docid": "de43c1914d64b81c44066d5671508347", "score": "0.46548405", "text": "def add_scientist_slide(prs,scientist,verbose=False):\n for photos in scientist._scientistdict['Photo']:\n for photo in photos:\n img_path = 'test.jpg'\n if verbose:\n print(f'Grabbing image: {photo}')\n img_data = requests.get(photo).content\n with open(img_path, 'wb') as handler:\n handler.write(img_data)\n blank_slide_layout = prs.slide_layouts[5]\n slide = prs.slides.add_slide(blank_slide_layout)\n\n shapes = slide.shapes\n title_shape = shapes.title\n title_shape.text = scientist.name\n\n top = Inches(1.75)\n left = Inches(0.5)\n height = Inches(5)\n pic = slide.shapes.add_picture(img_path, left, top, height=height)\n\n notes_slide = slide.notes_slide\n text_frame = notes_slide.notes_text_frame\n text_frame.text = open(scientist._fname).read()", "title": "" }, { "docid": "c9977058f80b2b0ecb447c60a7bfd5d1", "score": "0.465383", "text": "def crawler_create_new_german_profiles(\n submission_tracker=None, num_pages_to_load=20\n):\n _create_profiles_from_instagram_hashtags(\n hashtags_keys=('germany', ),\n pipeline_class_name=pipelines.GermanyPipeline.__name__,\n submission_tracker=submission_tracker,\n num_pages_to_load=num_pages_to_load\n )", "title": "" }, { "docid": "da966b37f7b1131856f75372f13ef690", "score": "0.46401772", "text": "def create_profile(underscore_name):\n name = ''.join(underscore_name.split('_')) # bring together\n if name[0] == ' ':\n name = name[1:]\n \n # add content\n body = '---\\n'\\\n 'author: {}\\n'\\\n 'layout: default\\n'\\\n 'permalink: /{}\\n'\\\n '---\\n'.format(underscore_name, name)\n body += add_profile_content(underscore_name)\n\n # create author page\n author_page = open('{}.html'.format(name), 'w')\n author_page.write(body)\n\n # create author in yaml\n myAuthor = {\n underscore_name : {\n 'name': ' '.join(underscore_name.split('_')),\n 'web' : 'http://www.bruinsportsanalytics.com/{}'.format(name),\n 'bio' : ''\n }\n }\n with open('_data/authors.yaml', 'r') as yamlFile:\n cy = yaml.load(yamlFile)\n cy.update(myAuthor)\n \n with open('_data/authors.yaml', 'w') as yamlFile:\n yaml.safe_dump(cy, yamlFile)\n \n author_page.close()", "title": "" }, { "docid": "4fcf230963f28ff54c1c2dbc4156ab65", "score": "0.463844", "text": "def gallery(request, gallery_name='all'):\n\n import json\n import os\n context_gallery_name = gallery_name\n site_content_dir = os.path.abspath(os.path.dirname(__file__))\n data_file_name = gallery_name + '.json'\n data_file_dir = site_content_dir + '/static/content/json/galleries/'\n data_file_path = data_file_dir + data_file_name\n gallery_json_file = open(data_file_path)\n gallery_json_string = gallery_json_file.read()\n gallery_json_file.close()\n gallery_dictionary = json.loads(gallery_json_string)\n name_of_gallery = gallery_dictionary['name_of_gallery']\n description_of_gallery = gallery_dictionary['description_of_gallery']\n image_file_dir = 'content/images/galleries/' + gallery_name + '/'\n image_list = gallery_dictionary['image_list']\n image_list_with_path = []\n for img in image_list:\n img_to_add = img\n img_to_add['image_file_path'] = image_file_dir + img['image_file_name']\n image_list_with_path.append(img_to_add)\n row_separator_markup = \"\\n</div><!-- .row -->\\n<div class='row'>\\n\"\n template = loader.get_template('content/gallery.html')\n context = {\n 'adsense_ads': adsense_ads,\n 'name_of_gallery': name_of_gallery,\n 'description_of_gallery': description_of_gallery,\n 'image_file_dir': image_file_dir,\n 'data_file_path': data_file_path,\n 'image_list_with_path': image_list_with_path,\n 'row_separator_markup': row_separator_markup,\n }\n return HttpResponse(template.render(context, request))", "title": "" }, { "docid": "baa608d5ad299f0eacdcb7d226f63fd4", "score": "0.46297327", "text": "def conduct_experiment_4(self):\n # Instantiation of the class related to the forth experiment\n self.experiment_4 = EclipseExp4Launcher( \\\n data_set_file=self.pre_processed_data_set_file_path, \\\n developers_dict_file=self.developers_dict_file_path, \\\n developers_list_file=self.developers_list_file_path)\n super().conduct_experiment_4()", "title": "" }, { "docid": "1e9576a7a7e3c7b5b671ac0134687372", "score": "0.46264476", "text": "def gen_page(app, sdir, pname):\r\n\tlink = '<div class=\"gallery_class\">'\r\n\r\n\tlink_template = \"\"\"\\\r\n\t<table><caption align=\"bottom\"><a href=\"%s\"<b>%s</b></a</caption>\r\n\t<tr>\r\n\t<td><a href=\"%s\"><img src=\"%s\" border=\"5\" alt=\"%s\"/></a>\r\n\t</td>\r\n\t</tr>\r\n\t</table>\r\n\t\"\"\"\r\n\t\r\n\tdata = []\r\n\tthumbnails = {}\r\n\trows = [\"<br/>\", link]\r\n\r\n\t# need to join srcdir\r\n\tfor item in sorted(glob.glob(os.path.join(app.builder.srcdir, sdir + \"\\\\*_thumb.png\"))):\r\n\t\tpath, filename = os.path.split(item)\r\n\t\tbasename, ext = os.path.splitext(item)\r\n\t\t\r\n\t\t# get rid of _thumb and path\r\n\t\twidgetName = basename.replace(\"_thumb\", \"\")\r\n\t\ttoRemove = app.builder.srcdir + \"\\\\\" + sdir + \"\\\\\"\r\n\t\twidgetName = widgetName.replace(toRemove, \"\")\r\n\t\t\r\n\t\t# get rid of srcdir\r\n\t\tthumbFile = item.replace(app.builder.srcdir + \"\\\\\", \"\")\r\n\t\t# get rid of _thumb\r\n\t\tlargeFile = thumbFile.replace(\"_thumb\", \"\")\r\n\t\t# fix up path sep for html\r\n\t\tthumbFile = thumbFile.replace(\"\\\\\", \"/\")\r\n\t\tlargeFile = largeFile.replace(\"\\\\\", \"/\")\r\n\r\n\t\tlinkName = \"\"\r\n\t\tlinkKey = widgetName\r\n\t\tlinkKeyL = len(linkKey)\r\n\t\tif linkKey[linkKeyL-1:linkKeyL].isdigit():\r\n\t\t\tlinkKey = linkKey[:linkKeyL-1]\r\n\t\tif galleryToClassIndex.pictureIndex.has_key(linkKey):\r\n\t\t\t# lets find the documentation link for this image\r\n\t\t\tlinkName = galleryToClassIndex.pictureIndex[linkKey]\r\n\t\telse:\r\n\t\t\tlinkName = pname\r\n##\t\tprint \"base: %s\" % basename\r\n##\t\tprint \"toRemove: %s\" % toRemove\r\n##\t\tprint \"widget: %s\" % widgetName\r\n##\t\tprint \"linkKey: %s\" % linkKey\r\n##\t\tprint \"thumb: %s\" % thumbFile\r\n##\t\tprint \"large: %s\" % largeFile\r\n##\t\tprint \"link: %s\" % linkName\r\n##\t\tprint link_template\r\n\r\n\t\trows.append(link_template % (linkName, widgetName, largeFile, thumbFile, widgetName))\r\n\r\n\trows.append(\"</div>\")\r\n\trows.append('<br clear=\"all\"> ')\r\n\t\r\n\tcontent = template % '\\n'.join(rows)\r\n\tgallery_path = os.path.join(app.builder.srcdir, '_templates', pname)\r\n\r\n\r\n\t# check if file has already up to date content\r\n\tfhCheck = file(gallery_path, 'r')\r\n\toldContent = fhCheck.read()\r\n\tif oldContent.strip() == content.strip():\r\n\t\tprint \"gallery file is already up to date\"\r\n\telse:\r\n\t\tfh = file(gallery_path, 'w')\r\n\t\tfh.write(content)\r\n\t\tfh.flush()\r\n\t\tfh.close()\r\n\t\tprint \"gallery file updated\"\r\n\r\n\tfh = file(gallery_path, 'w')\r\n\tfh.write(content)\r\n\tfh.flush()\r\n\tfh.close()", "title": "" }, { "docid": "415b7b689f4eb8c7a47d941ff214505b", "score": "0.4624191", "text": "def expand_gallery(generator, metadata):\r\n if \"gallery\" not in metadata or metadata['gallery'] is None:\r\n return # If no gallery specified, we do nothing\r\n\r\n lines = [ ]\r\n base_path = _image_path(generator)\r\n in_path = path.join(base_path, metadata['gallery'])\r\n template = generator.settings.get('GALLERY_TEMPLATE', DEFAULT_TEMPLATE)\r\n thumbnail_name = generator.settings.get(\"GALLERY_THUMBNAIL\", DEFAULT_GALLERY_THUMB)\r\n thumbnail_prefix = generator.settings.get(\"\")\r\n resizer = _resizer(thumbnail_name, '?x?')\r\n for dirpath, _, filenames in os.walk(in_path):\r\n for filename in filenames:\r\n url = path.join(dirpath, filename).replace(base_path, \"\")[1:]\r\n url = path.join('/static', generator.settings.get('IMAGE_PATH', DEFAULT_IMAGE_DIR), url).replace('\\\\', '/')\r\n logger.debug(\"GALLERY: {0}\".format(url))\r\n thumbnail = resizer.get_thumbnail_name(filename)\r\n thumbnail = path.join('/', generator.settings.get('THUMBNAIL_DIR', DEFAULT_THUMBNAIL_DIR), thumbnail).replace('\\\\', '/')\r\n lines.append(template.format(\r\n filename=filename,\r\n url=url,\r\n thumbnail=thumbnail,\r\n ))\r\n metadata['gallery_content'] = \"\\n\".join(lines)", "title": "" }, { "docid": "e2adc141c98c4da44b5a3278dcff0234", "score": "0.46222746", "text": "def crawler_create_new_sea_profiles(\n submission_tracker=None, num_pages_to_load=20\n):\n # we need only 'singapore' tags now\n _create_profiles_from_instagram_hashtags(\n hashtags_keys=('singapore', ),\n pipeline_class_name=pipelines.SEAPipeline.__name__,\n submission_tracker=submission_tracker,\n num_pages_to_load=num_pages_to_load\n )", "title": "" }, { "docid": "c62b3010854774eb7df833482fc8b924", "score": "0.46192607", "text": "def __initialize(self):\n self.__thumbnailsDirectory = os.path.join(\n Utilities.getConfigDir(), \"web_browser\", \"thumbnails\")\n # Create directory if it does not exist yet\n if not os.path.exists(self.__thumbnailsDirectory):\n os.makedirs(self.__thumbnailsDirectory)\n \n self.__load()", "title": "" }, { "docid": "eb0d5b9d448c31d0bb8c3d1373fad766", "score": "0.4615031", "text": "def gallery():\n files = [f for f in listdir(MOTION_FOLDER_REL) if isfile(join(MOTION_FOLDER_REL, f))]\n files.sort()\n files.reverse()\n files = files[1:]\n pictures = list(map(lambda x: MOTION_FOLDER + x, files))\n return render_template('gallery.html', pictures=pictures)", "title": "" }, { "docid": "2d6a834bdacec2a32d65413400b36fa1", "score": "0.46139553", "text": "def new_experiment(base_folder, gup, saf, name, md={}):\n user_folder, user_md = new_user(base_folder, gup, saf, name, md={})\n beamtime_md = setup_beamtime(user_folder, overwrite=True)\n return user_folder, user_md, beamtime_md", "title": "" }, { "docid": "9d371946e8ee90bbbfa2dcc65216b824", "score": "0.46107787", "text": "def main():\n meta_features_description()", "title": "" }, { "docid": "ce19d5d36db4d35b0822e39a3e4c948f", "score": "0.46050224", "text": "def gen_preview_pages(page_dir, out_dir):\n md = markdown.Markdown(extensions=[\"markdown.extensions.meta\"])\n # content_template = jinja_env.get_template(preview_base)\n content_pages = get_page_names(page_dir, ext='.md')\n options = {'title': 'Joseph\\'s Blog',\n 'index': '',\n 'projects': '',\n 'contact': '',\n 'year': datetime.datetime.now().year,\n 'posts': []\n }\n options[out_dir] = 'active'\n post_list = []\n for page in content_pages:\n content = md.convert(get_page(os.path.join(page_dir, page)))\n post_details = {'content_title': md.Meta[\"content_title\"][0],\n 'publication_date': md.Meta[\"publication_date\"][0],\n 'img_link': md.Meta[\"img_link\"][0],\n 'image_subtext': md.Meta[\"image_subtext\"][0],\n 'output_link': '{}/{}'.format(\n page_dir, os.path.splitext(page)[0]),\n 'content_text': content,\n }\n post_list.append(copy.deepcopy(post_details))\n options['posts'] = sorted(post_list,\n key=lambda k: k['publication_date'],\n reverse=True,\n )\n return options", "title": "" }, { "docid": "71365802af9d722f23f58d192a3a1adc", "score": "0.4602787", "text": "def _create_gallery_only(cls, id, attachments, client):\n\n url = reverse('api-galleries-list')\n\n data = {\n 'title': 'Gallery Title %d' % id,\n 'attachment_json': attachments\n }\n\n return client.post(url, data, format='json')", "title": "" }, { "docid": "2302c178defacccab6cb165d98c2faf5", "score": "0.45954788", "text": "def manage_profile(request):\n django_user = request.user\n user_profile = TASUser(username=request.user.username)\n try:\n ds_profile = DesignSafeProfile.objects.get(user__id=django_user.id)\n except DesignSafeProfile.DoesNotExist:\n logout(request)\n return HttpResponseRedirect(reverse('designsafe_auth:login'))\n\n try:\n demographics = django_user.profile\n except ObjectDoesNotExist as e:\n demographics = {}\n logger.info('exception e:{} {}'.format(type(e), e))\n\n context = {\n 'title': 'Account Profile',\n 'profile': user_profile,\n 'ds_profile': ds_profile,\n 'demographics': demographics,\n 'user': django_user,\n }\n return render(request, 'designsafe/apps/accounts/profile.html', context)", "title": "" }, { "docid": "0c83b899596b2e9a2ec5fa0747b2e043", "score": "0.45949697", "text": "def product_page_thumbnail_images(product_additional_images: list):\n thumbnail_block_html = ''\n for thumbnail_number in range(len(product_additional_images)):\n thumbnail_block_html += f'''<div class=\"column\">\n\t\t\t\t\t\t\t\t\t<img class=\"demo cursor\" src=\"{ product_additional_images[thumbnail_number].image.url }\" onclick=\"currentSlide({ thumbnail_number + 2 })\" alt={ product_additional_images[thumbnail_number].product.title }\">\n\t\t\t\t\t\t\t\t</div>'''\n \n return mark_safe(thumbnail_block_html)", "title": "" }, { "docid": "2805d01ee46ce9970456ea07c286d950", "score": "0.45708162", "text": "def test_galleria_picasauserandid(self):\n self.portal.invokeFactory('Link', 'picassa_link')\n link = self.portal['picassa_link']\n link.setRemoteUrl('https://picasaweb.google.com/user_id/galleria_id')\n galleria = Galleria(link, self.request)\n self.assertEquals(galleria.plugins(plname='picasaweb'),\n ('user_id', 'galleria_id'))", "title": "" }, { "docid": "8b4e498ebfc8b17bf08f13a6de06ca00", "score": "0.4568516", "text": "def profiles(request):\n lp = []\n for profile in models.Profile.objects.all():\n lp.append({\"name\" : profile.name, \"vo\" : profile.vo,\n \"version\" : profile.version,\n \"description\" : profile.description,\n \"metric_instances\" : profile.metric_instances.all().\\\n values('metric', 'fqan', 'vo', 'service_flavour')\n })\n\n return render_to_response('profiles', {'result' : lp}, mimetype='application/json')", "title": "" }, { "docid": "28a0d1f197cb91784010c729f0de56c8", "score": "0.45666793", "text": "def include_slides(filename):\n p = presenter\n\n slides = yaml.load_all(file(filename)) \n\n background='bg'\n\n for i,slide in enumerate(slides):\n if slide==None:\n continue\n if 'bookmark' in slide:\n p.bookmark(slide['bookmark'])\n elif 'title' in slide:\n p.bookmark(slide['title'])\n elif 'bmark' in slide:\n p.bookmark(slide['bmark'])\n else:\n p.bookmark(filename+' - '+str(i))\n\n if 'background' in slide:\n background=slide['background']\n \n if 'bg' in slide:\n background=slide['bg']\n\n if 'pdf' in slide and 'slides' in slide:\n images = pdf2ppm_cache(slide['pdf'],slide['slides'])\n p.play(load_image_slides(images,library='pdf',background=background, content=slide.get('content',None)))\n p.pause()\n elif 'svg' in slide:\n images = svg2png_cache(slide['svg'])\n p.play(load_image_slides(images,library='svg',background=background, content=slide.get('content',None)))\n p.pause()\n elif 'image_files' in slide:\n images = imagefiles_to_images(slide['image_files'])\n p.play(load_image_slides(images,library='image_files',background=background, content=slide.get('content',None)))\n p.pause()\n\n elif 'slideshow' in slide:\n images = imagefiles_to_images(slide['slideshow'])\n slideshow_anim = images_slideshow(images,\n library='image_files',\n background=background,\n delay=slide.get('delay',2.0),\n repeat=slide.get('repeat',1),\n fade_time=slide.get('fade_time',0.5))\n\n\n p.play(slideshow_anim)\n p.pause()\n\n \n elif 'images' in slide:\n if 'library' in slide:\n lib = slide['library']\n else:\n lib = 'default'\n p.play(load_image_slides(slide['images'],library=lib, background=background,\n content=slide.get('content',None)))\n p.pause()\n\n elif 'rst' in slide:\n images = rst2ppm_cache(i,slide.get('title',''),slide['rst'],slide.get('rst_style'))\n p.play(load_image_slides(images,library='pdf',\n background=background,content=slide.get('content',None)))\n p.pause()\n \n else:\n p.play(load_slide_content(slide['content']))\n p.pause()", "title": "" }, { "docid": "19b194b06a1940e9ca018db6ba4a918e", "score": "0.4564923", "text": "def test_profile(self):\n pass", "title": "" }, { "docid": "9472daa0a9381480f277c9db4406f096", "score": "0.45637825", "text": "def install(request):\n \n if not request.user.is_staff: return HttpResponse(\"Unauthorized\")\n \n # if a user named 'anonymous_user' does not exist, create it\n if not User.objects.filter(username='anonymous_user'):\n anonymous_user = User(username='anonymous_user')\n anonymous_user.save()\n \n # go through every ever and create a profile object if it doesn't already\n # exist. Also, give them each admin 10000 initial rep points\n for user in User.objects.all():\n profile = Profile.objects.get_or_create(user=user)[0]\n if user.is_staff:\n if profile.reputation < 1000:\n profile.reputation = 1000\n profile.save()\n \n return HttpResponse(\"done\")", "title": "" }, { "docid": "74c8eacedd1862906a96da132b8ebef8", "score": "0.45587233", "text": "def post(self):\n parent_key = db.Key.from_path('Persons', users.get_current_user().email())\n person = db.get(parent_key)\n if person == None: #store this person in the db if it is not in db yet\n newProfile = Persons(key_name=users.get_current_user().email())\n newProfile.email = users.get_current_user().email()\n newProfile.put()\n \n newProfile = Persons(key_name=users.get_current_user().email())\n if self.request.get('person_name')!='':\n newProfile.name=self.request.get('person_name')\n #check an img\n if self.request.get('face_img')!='':\n try:\n img = images.resize(self.request.get('face_img'), 200, 200)\n picture = Picture(parent=parent_key)\n picture.content = db.Blob(img)\n #delete prev uploads\n photo = db.GqlQuery(\"SELECT * FROM Picture WHERE ANCESTOR IS :1\", parent_key)\n for item in photo:\n item.delete()\n #save new one\n picture.put()\n except TypeError: #does not redirect now\n self.redirect(\"/errormsg?error=Not a supported type&continue_url=profile\")\n except:\n self.redirect(\"/errormsg?error=Unexpected error&continue_url=profile\")\n else: #cannot detect no-file situation\n err_exist = True\n msg = \"?error=No File Chosen&continue_url=profile\"\n #determine gender\n if self.request.get('person_gender') == \"Male\":\n newProfile.gender = True\n else:\n newProfile.gender = False \n\n newProfile.year = int(self.request.get('person_year'))\n newProfile.faculty = self.request.get('person_faculty') \n newProfile.residence = self.request.get('person_residence')\n interesttags = self.request.get('person_interest')\n newProfile.interest = interesttags.split()\n err_exist = False\n if len(newProfile.interest) > 6:\n err_exist = True\n msg = \"?error=Too Many Interests&continue_url=profile\"\n if newProfile.name == \"None\" or newProfile.name == \"default\" or newProfile.name == \"\":\n err_exist = True\n msg = \"?error=Name Is Illegal&continue_url=profile\"\n\n if err_exist:\n self.redirect('/errormsg'+msg)\n else:\n newProfile.put()\n self.redirect('/profile')", "title": "" }, { "docid": "84d7a5919fe711c9ecd1fc2b1331daf8", "score": "0.4549485", "text": "def crawler_create_new_mommy_profiles(\n submission_tracker=None, num_pages_to_load=20\n):\n _create_profiles_from_instagram_hashtags(\n hashtags_keys=('mom_hashtags', ),\n pipeline_class_name=pipelines.MommyPipeline.__name__,\n submission_tracker=submission_tracker,\n num_pages_to_load=num_pages_to_load\n )", "title": "" }, { "docid": "416b61479c2b0cd85debf1bfdfd28e99", "score": "0.4547153", "text": "def test_preview_urls(self):\n pass", "title": "" }, { "docid": "c572a4011ddf4e73e39243bfec54b9dc", "score": "0.45380628", "text": "def fake_profile():\n return FakeProfile()", "title": "" }, { "docid": "4021ca556d928de67c6468740112d780", "score": "0.4534768", "text": "def home():\n skills = MONGO.db.skills.find()\n projects = MONGO.db.portfolio.find().limit(3)\n qualifications = MONGO.db.qualifications.find()\n experience = MONGO.db.work_experience.find()\n return render_template(\n 'pages/index.html', \n skills=skills, \n projects=projects,\n qualifications=qualifications,\n experience=experience,\n image_height='full-screen'\n )", "title": "" }, { "docid": "05eac7fd579b7f5405d781eb48f25df8", "score": "0.45264235", "text": "def setUpPloneSite(self, portal):\n # Plone 5 support\n if HAS_PA_CONTENTTYPES:\n self.applyProfile(portal, 'plone.app.contenttypes:default')\n\n self.applyProfile(portal, 'ps.plone.mlstiles:default')\n self.applyProfile(portal, 'ps.plone.mls:testfixture')\n\n if HAS_COVER:\n # setup test content\n self.applyProfile(portal, 'collective.cover:default')\n self.applyProfile(portal, 'collective.cover:testfixture')\n self.applyProfile(portal, 'ps.plone.mlstiles:support_cover')\n create_standard_content_for_tests(portal)\n\n if HAS_MOSAIC:\n self.applyProfile(portal, 'plone.app.mosaic:default')\n self.applyProfile(portal, 'ps.plone.mlstiles:support_mosaic')\n\n portal.portal_workflow.setDefaultChain('simple_publication_workflow')\n\n # Prevent kss validation errors in Plone 4.2\n portal_kss = getattr(portal, 'portal_kss', None)\n if portal_kss:\n kss = portal_kss.getResource('++resource++plone.app.z3cform')\n kss.setEnabled(False)", "title": "" }, { "docid": "b6538541d8cf6718cefd19f1ab8ac996", "score": "0.45254296", "text": "def _create_gallery(self, cust_name, new_gallery):\n if os.path.exists(os.path.join(self.media_root, cust_name, new_gallery)):\n return True\n cust_dir = os.path.join(self.media_root, cust_name)\n if not os.path.exists(cust_dir):\n os.makedirs(cust_dir)\n\n n_gallery_path = os.path.join(cust_dir, new_gallery)\n os.makedirs(n_gallery_path)\n return os.path.exists(n_gallery_path)", "title": "" }, { "docid": "7c32b07f2256061200c9d0ca54dab193", "score": "0.45170277", "text": "def url(self):\n return url_galleries(self.parameters, url_domain=self.url_domain)", "title": "" }, { "docid": "02501120ed858f8ed9d777f3c0b93d2b", "score": "0.45111984", "text": "def project_dua_preview(request, project_slug, **kwargs):\n project = kwargs['project']\n\n return render(request, 'project/project_dua_preview.html', {'project': project, 'dua': project.dua})", "title": "" }, { "docid": "7bedd4eeac83144797078e521e862106", "score": "0.45110306", "text": "def crawler_create_new_lifestyle_profiles(\n submission_tracker=None, num_pages_to_load=20\n):\n _create_profiles_from_instagram_hashtags(\n hashtags_keys=('lifestyle_hashtags', ),\n pipeline_class_name=pipelines.LifestylePipeline.__name__,\n submission_tracker=submission_tracker,\n num_pages_to_load=num_pages_to_load\n )", "title": "" }, { "docid": "b4ae2b229d1c41f9351019f45d827211", "score": "0.45057726", "text": "def gallery(view):\n page = request.args.get(\"page\", 1, type=int)\n if view == 'hot':\n dogs = Dog.objects.order_by('-likes', '-upload_date').paginate(\n page=page, per_page=6)\n elif view == 'new':\n dogs = Dog.objects.order_by('-upload_date').paginate(\n page=page, per_page=6)\n animate = request.args.get('animate')\n return render_template('main/gallery.html', title=\"Gallery\", dogs=dogs,\n view=view, animate=animate)", "title": "" }, { "docid": "993038131c6c6422872b448123bcb51b", "score": "0.45012936", "text": "def set_box_people_list(self, element):\n self.shortcode_name = \"epfl_people\"\n\n BASE_URL = \"https://people.epfl.ch/cgi-bin/getProfiles?\"\n\n # prepare a dictionary with all GET parameters\n parameters = {}\n\n # parse the unit parameter\n parameters['unit'] = Utils.get_tag_attribute(element, \"query\", \"jahia:value\")\n\n # parse the template html\n template_html = Utils.get_tag_attribute(element, \"template\", \"jahia:value\")\n\n # Check if \"function\" exists (it's a filter for information)\n function = Utils.get_tag_attribute(element, \"function\", \"jahia:value\")\n\n if function:\n parameters['function'] = function\n\n # check if we have an HTML template\n if not template_html:\n logging.warning(\"epfl_people: no HTML template set\")\n self.content = \"[epfl_people error: no HTML template set]\"\n return\n\n # extract template key\n template_key = Utils.get_tag_attribute(\n minidom.parseString(template_html),\n \"jahia-resource\",\n \"key\"\n )\n\n # these rules are extracted from jsp of jahia\n if template_key == 'epfl_peopleListContainer.template.default_bloc':\n parameters['struct'] = 1\n template = 'default_struct_bloc'\n elif template_key == 'epfl_peopleListContainer.template.default_bloc_simple':\n template = 'default_bloc'\n elif template_key == 'epfl_peopleListContainer.template.default_list':\n template = 'default_list'\n else:\n template = Utils.get_tag_attribute(minidom.parseString(template_html), \"jahia-resource\", \"key\")\n parameters['tmpl'] = \"WP_\" + template\n\n # in the parser we can't know the current language.\n # so we assign a string that we will replace by the current language in the exporter\n parameters['lang'] = self.UPDATE_LANG\n\n url = \"{}{}\".format(BASE_URL, urlencode(parameters))\n self.content = '[{} url=\"{}\" /]'.format(self.shortcode_name, url)", "title": "" }, { "docid": "e5ebb31d870e4be2d500ceaaa1876f8e", "score": "0.44998512", "text": "def _local_init(self):\n self.width_key = 'interface.media-sel-width'\n self.height_key = 'interface.media-sel-height'\n self.preview = Gtk.Image()\n self.preview.set_size_request(int(THUMBSCALE),\n int(THUMBSCALE))\n vbox = self.glade.get_object('select_person_vbox')\n vbox.pack_start(self.preview, False, True, 0)\n vbox.reorder_child(self.preview,1)\n self.preview.show()\n self.selection.connect('changed',self._row_change)", "title": "" }, { "docid": "6773e061f564fca7294eb00095ab3e32", "score": "0.44996986", "text": "def buildProfile(self, config, base, psf, gsparams, logger):\n if (base['obj_num'] % self.ngal != 0):\n return None\n else:\n self.neighbor_gals = []\n for i in range(self.ngal-1):\n gal = galsim.config.BuildGSObject(base, 'gal', gsparams=gsparams, logger=logger)[0]\n self.neighbor_gals.append(gal)\n galsim.config.RemoveCurrent(base['gal'], keep_safe=True)\n\n rng = galsim.config.GetRNG(config, base, logger, 'BlendSet')\n ud = galsim.UniformDeviate(rng)\n self.neighbor_pos = [galsim.PositionI(int(ud()*2*self.sep-self.sep),\n int(ud()*2*self.sep-self.sep))\n for i in range(self.ngal-1)]\n #print('neighbor positions = ',self.neighbor_pos)\n\n self.main_gal = galsim.config.BuildGSObject(base, 'gal', gsparams=gsparams,\n logger=logger)[0]\n\n self.profiles = [ self.main_gal ]\n self.profiles += [ g.shift(p) for g, p in zip(self.neighbor_gals, self.neighbor_pos) ]\n if psf:\n self.profiles = [ galsim.Convolve(gal, psf) for gal in self.profiles ]\n return self.profiles", "title": "" }, { "docid": "038d5b2b8556f25b8b3b745fe243e729", "score": "0.44990706", "text": "def _state_house_slide_preview(slug, page):\n context = make_context()\n\n context['body'] = _state_house_slide(slug, page).data\n\n return render_template('slide_preview.html', **context)", "title": "" }, { "docid": "1c6948f35e5fd01023975a69d4a50a14", "score": "0.4498637", "text": "def test_featured_courses_endpoint(client, featured):\n course = CourseFactory.create(featured=featured)\n # this should be filtered out\n CourseFactory.create(runs=None)\n\n resp = client.get(reverse(\"courses-list\") + \"featured/\")\n\n assert resp.data.get(\"count\") == (1 if featured else 0)\n if featured:\n assert resp.data.get(\"results\")[0][\"id\"] == course.id", "title": "" }, { "docid": "2a91335db599eec08d5e8dc8c3055c91", "score": "0.4496663", "text": "def profile(name=None):\n return render_template('profile.html',name=name)", "title": "" }, { "docid": "80152fff9f3846994d3f2ffedaaf81cf", "score": "0.44953153", "text": "def create_instance_profile(InstanceProfileName=None, Path=None):\n pass", "title": "" }, { "docid": "1a3bd73464f076fc277b7f9c15a76a04", "score": "0.44932672", "text": "def download_tinder_profile_pictures():\n recs = get_recommendations()['results']\n\n clear_tinder_unlabeled_static()\n ### TODO this is a terrible hack \n person = recs[0]\n urls = get_all_photo_urls(person, size=172)\n for i in range(len(urls)):\n save_image_from_url(urls[i], \n os.path.join(UNLABELED_STATIC_DIR, \"img%d\"%i))", "title": "" }, { "docid": "51845496fe82e8366efb92b06c964489", "score": "0.4491417", "text": "def Album():\n return render_template(\n 'PictureAlbum.html',\n title='Pictures',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "a58841f9cd62b39549e9b939aa559650", "score": "0.44903168", "text": "def project_preview(request, project_slug, subdir='', **kwargs):\n project, authors = (kwargs[k] for k in ('project', 'authors'))\n authors = project.get_author_info()\n invitations = project.authorinvitations.filter(is_active=True)\n corresponding_author = authors.get(is_corresponding=True)\n corresponding_author.text_affiliations = ', '.join(a.name for a in corresponding_author.affiliations.all())\n\n references = project.references.all().order_by('order')\n publication = project.publications.all().first()\n topics = project.topics.all()\n parent_projects = project.parent_projects.all()\n languages = project.programming_languages.all()\n citations = project.citation_text_all()\n platform_citations = project.get_platform_citation()\n show_platform_wide_citation = any(platform_citations.values())\n main_platform_citation = next((item for item in platform_citations.values() if item is not None), '')\n passes_checks = project.check_integrity()\n\n if passes_checks:\n messages.success(request, 'The project has passed all automatic checks.')\n else:\n for e in project.integrity_errors:\n messages.error(request, e)\n\n (display_files, display_dirs, dir_breadcrumbs, parent_dir,\n file_error) = get_project_file_info(project=project, subdir=subdir)\n files_panel_url = reverse('preview_files_panel', args=(project.slug,))\n file_warning = get_project_file_warning(display_files, display_dirs, subdir)\n\n # Flag for anonymous access\n has_passphrase = kwargs['has_passphrase']\n\n return render(\n request,\n 'project/project_preview.html',\n {\n 'project': project,\n 'display_files': display_files,\n 'display_dirs': display_dirs,\n 'authors': authors,\n 'corresponding_author': corresponding_author,\n 'invitations': invitations,\n 'references': references,\n 'publication': publication,\n 'topics': topics,\n 'languages': languages,\n 'passes_checks': passes_checks,\n 'dir_breadcrumbs': dir_breadcrumbs,\n 'files_panel_url': files_panel_url,\n 'citations': citations,\n 'subdir': subdir,\n 'parent_dir': parent_dir,\n 'file_error': file_error,\n 'file_warning': file_warning,\n 'platform_citations': platform_citations,\n 'parent_projects': parent_projects,\n 'has_passphrase': has_passphrase,\n 'is_lightwave_supported': ProjectFiles().is_lightwave_supported(),\n 'show_platform_wide_citation': show_platform_wide_citation,\n 'main_platform_citation': main_platform_citation,\n },\n )", "title": "" }, { "docid": "6e2b1ad029a5ca02b12bbf4d6ce572d8", "score": "0.4489479", "text": "def scandal_photo(db):\n description = 'SCANDAL!!'\n pk = 22\n filename = f'scandal.{pk}.jpg'\n try:\n return ImageFile.objects.get(description=description)\n except ImageFile.DoesNotExist:\n pass\n source = Path(__file__).parent / 'fixtures' / 'dummy.jpg'\n shutil.copy(source, Path(settings.MEDIA_ROOT) / filename)\n yield ImageFile.objects.create(\n id=pk,\n original=filename,\n stem='scandal',\n description=description,\n )", "title": "" }, { "docid": "e360658f0c0e3622023218eedf41cdb8", "score": "0.4486429", "text": "def image_preview(filepath, filename, postid):\n (shortname, extension) = os.path.splitext(filename)\n extension = extension.lower().strip('.')\n preview = False\n if extension in IMAGE_EXTENSIONS:\n preview = True\n \n return {'preview': preview,\n 'STATIC_URL': settings.STATIC_URL,\n 'filepath': filepath,\n 'postid': postid\n }", "title": "" }, { "docid": "e806f8879416144d4635635d9a5cd99c", "score": "0.44857582", "text": "def content_carousel_for_topic(context, topic, type):\n try:\n context['slides'] = BCCFChildPage.objects.by_topic(topic).filter(page_for=type).order_by('-created')[:12]\n context['carousel_color'] = topic.carousel_color\n context['carousel_title'] = type\n context['carousel_name'] = type.replace(' ', '_').lower()\n except ObjectDoesNotExist, e:\n log.info('Object Does Not Exist')\n log.error(e)\n except Exception, e:\n log.info('Unspecified Exception')\n log.error(e)\n return context", "title": "" }, { "docid": "ad4a9c27aa9021411375221e01a9376c", "score": "0.44857132", "text": "def main():\n arcss = os.path.dirname(os.path.abspath(__file__))\n hi = os.path.join(arcss, 'images', 'GalleryPhotos')\n low = os.path.join(hi, 'low')\n\n # Compare the files in the hi and low folders\n hi_list = list_files(arcss, hi)\n low_list = list_files(arcss, low)\n\n # If there are files in the hi folder that are not in the low folder, generate thumbnails and code\n new_images = set(hi_list).difference(low_list)\n for i in new_images:\n print(\"new image: \" + i)\n\n # Start generating thumbnails inside of \"low\"\n with open('captions.json', 'r') as f:\n j = json.load(f)\n print(\"Generating thumbnails...\")\n size = 200, 200\n for infile in hi_list:\n os.chdir(hi)\n # file, ext = os.path.splitext(infile)\n im = Image.open(infile)\n # Check if this image has a caption or not.\n if infile not in j:\n # If there's no caption entry, then show the image and ask user for a caption.\n im.show()\n print(\"Please provide a caption for the image shown:\")\n caption = raw_input()\n j[infile] = caption\n # Create thumbnail image\n im.thumbnail(size)\n os.chdir(low)\n # Save the new thumbnail in the low folder\n im.save(infile)\n im.close()\n\n os.chdir(arcss)\n # Write out the new captions json data\n with open('captions.json', 'w+') as f:\n json.dump(j, f, indent=2)\n\n # Generate html code for new images\n add_new_html(hi_list, j)\n print(\"Complete!\")\n\n return", "title": "" }, { "docid": "6a1bd25eab0e83726509454c744c7344", "score": "0.44776198", "text": "def profiles_add(profile: dict) -> None:\n CRAWLERS_DB_SESSION.add(models.Profiles(\n name=profile[\"name\"],\n phone=profile[\"phone\"],\n website=profile[\"website\"]\n ))\n commit_session(\n session=CRAWLERS_DB_SESSION,\n func_name='profiles_add'\n )", "title": "" }, { "docid": "6a77a87c3721dc02e7df87f1e8d139c5", "score": "0.44755906", "text": "def get_thumbnails(obj, arg):\n arg = arg.lower()\n if not isinstance(obj, Profile):\n raise TypeError(\"This is not a valid product model\")\n choices = dict(profile_pic_thumbnails.dr_profile_thumb_choices)\n # print(choices)\n if not choices.get(arg):\n raise TypeError(\"This is not a valid type for this model\")\n return obj.profile_pic_thumbnails_set.filter(type=arg).first().photo.url", "title": "" }, { "docid": "058b08f174d9aaefa3a314a211830f29", "score": "0.44739574", "text": "def _scaffold_dirs(self):\n from subprocess import call\n call([\"mkdir\", \"features\"])\n call([\"mkdir\", \"features/steps\"])", "title": "" }, { "docid": "da31ed405a5ba06bf6dea88e0b972181", "score": "0.44738996", "text": "def main():\n fdir = os.path.join('public', 'image', 'intro_pics')\n pic_lst = []\n\n for pic_name in os.listdir(fdir):\n pic_lst.append(os.path.join(fdir, pic_name))\n\n with open(os.path.join('data', 'intro_pics.json'), 'w+') as fjson:\n json.dump(pic_lst, fjson, indent=4)", "title": "" }, { "docid": "ec88c39dad7c37d6c071329b83c8b725", "score": "0.44697195", "text": "def construct_preview_mc_profile(self) -> ManagedCluster:\n # construct the default ManagedCluster profile\n mc = self.construct_default_mc_profile()\n # set up http proxy config\n mc = self.set_up_http_proxy_config(mc)\n # set up node resource group\n mc = self.set_up_node_resource_group(mc)\n return mc", "title": "" }, { "docid": "995c8c1c5df3286398777c05a758aafe", "score": "0.44696486", "text": "def profiles(userid):\n if not userid:\n profiles = [(profile.id, find_photo(profile.id), profile.firstname, profile.lastname, \n profile.gender, profile.location) for profile in db.session.query(Profile).all()]\n return render_template('profiles.html', profiles=profiles)\n else:\n try:\n user = db.session.query(Profile).filter_by(id=userid).first()\n \n return render_template('details.html', name=user.firstname + \" \" + user.lastname,\n email=user.email, location=user.location, bio=user.biography, \n photo=find_photo(user.id), registered=user.joined_.strftime('%B %d, %Y'))\n \n except:\n return render_template(\"404.html\")", "title": "" }, { "docid": "5d18ebc8f87b6fd0df39f5959f9f3da2", "score": "0.44673792", "text": "def setupVarious(context):\n\n if context.readDataFile('collective_history.txt') is None:\n return\n\n portal = context.getSite()\n if \"portal_history\" not in portal.objectIds():\n createHistory(portal)\n updateHistoryContainer(portal.portal_history)\n updateCatalog(portal.portal_catalog)\n updatePermissions(portal.portal_history)", "title": "" } ]
5f5a8118ee90ba9e1701b257a7d1a143
Base method to send SMS. This method can be extended by child classes to implement SMS sending.
[ { "docid": "1690f7412ec9e9cfd82fcd436276c908", "score": "0.6439799", "text": "def send_sms(self, from_, to, message, callback_url=None):\n\n url = self.base_url + self._PATHS['smsoutbound']\n\n # If 'to' contains only digits, it's an MSISDN, else it's an obfuscated identity\n data = {'to': 'tel:+' + to if to.isdigit() else 'alias:' + to,\n 'message': message}\n # If 'from_' contains only digits, it's an MSISDN, else it's a sender name\n if from_:\n data['from'] = 'tel:+' + from_ if from_.isdigit() else 'alias:' + from_\n if callback_url:\n data['callbackUrl'] = callback_url\n\n resp = self._make_request(url, data)\n\n return resp['id']", "title": "" } ]
[ { "docid": "6509fd35a9af8eb08b911ce9c72047bc", "score": "0.8653039", "text": "def sendSMS(self):\n pass", "title": "" }, { "docid": "43250301a65dfa6a517da438b5c8fba7", "score": "0.7880894", "text": "def send_sms(self, dest, text, sender=''):\n raise NotImplementedError", "title": "" }, { "docid": "934f27af410bbbd4fdd08dc93bd2f14f", "score": "0.7316244", "text": "def test_send_sms(self):\n pass", "title": "" }, { "docid": "7abb8dfbf76b1b78dd99d7146ad6a3bc", "score": "0.7299175", "text": "def send(self, sms):\n logger.log_send(self, sms)\n print sms", "title": "" }, { "docid": "4eba4ed4a60a3d19ae12672cbc862dd5", "score": "0.7221023", "text": "def send(self):\n\n if Validate.message(self.message) and Validate.number(self.to):\n url = '{}&method={}&message={}&to={}&sender={}'.format(BASEURL, SMSMessageRequest.SMS, self.message,\n self.to, SENDERID)\n if self.dlr_url:\n url += '&dlrurl={}'.format(self.dlr_url)\n if self.custom:\n url += '&custom={}'.format(self.custom)\n if self.unicode:\n url += '&unicode={}'.format(self.unicode)\n if self.flash:\n url += '&flash={}'.format(self.flash)\n response = Klient(url).response()\n sms_message_response = SMSMessageResponse(response=response)\n return sms_message_response", "title": "" }, { "docid": "65fc604d5ef2a7c2d58ca83eecec823a", "score": "0.71102226", "text": "def send_sms(self, number, message):\n with self.session() as conn:\n res = conn.send('sms sendtxt %s' % number)\n if 'Start sms input' not in res:\n return False\n conn.write('%s\\n.\\n' % message)\n return True", "title": "" }, { "docid": "cd37854936d37582fce9802924c87bd4", "score": "0.6899766", "text": "def send_sms(self, phone_number: str, message: str) -> 'WebDriver':\n ext_name = 'mobile: sendSms'\n args = {'phoneNumber': phone_number, 'message': message}\n try:\n self.assert_extension_exists(ext_name).execute_script(ext_name, args)\n except UnknownMethodException:\n # TODO: Remove the fallback\n self.mark_extension_absence(ext_name).execute(Command.SEND_SMS, args)\n return cast('WebDriver', self)", "title": "" }, { "docid": "5923c8f12eadf2e70deed7a1550060b8", "score": "0.68723243", "text": "def send_sms():\n text = request.args.get('text')\n client.messages.create(\n to=os.environ.get('MY_PHONE_NUMBER'),\n from_=os.environ.get('TWILIO_PHONE_NUMBER'),\n body=text\n )\n return \"Message sent.\"", "title": "" }, { "docid": "9bbc4bbd283c42c75f7e6fd85f351e63", "score": "0.6856622", "text": "def send_message(self, message):", "title": "" }, { "docid": "fb63d9d1684c0ecc50dc710cdc3442d9", "score": "0.6854443", "text": "def test_sms_send(self):\n self._text_message_content = \"Automated Test %s\" % str(time.time())\n self._last_message = \".message-list li\"\n self._unread = \"#threads-container li\"\n\n # launch the app\n self.launch_by_touch(\"sms\")\n self.apps.switch_to_displayed_app()\n self.messages = Messages(self.marionette)\n self.messages.wait_for_message_list()\n\n # click new message\n new_message = self.messages.tap_create_new_message()\n new_message.type_phone_number(self.testvars['carrier']['phone_number'])\n new_message.type_message(self._text_message_content)\n\n #click send\n self.message_thread = new_message.tap_send()\n self.message_thread.tap_back_button()\n last_thread = self.marionette.find_element(By.CSS_SELECTOR, self._unread)\n self.wait_for_condition(self.wait_for_unread_thread, 23)", "title": "" }, { "docid": "cc7781fb0bd8e43abe66103bfa27cb1c", "score": "0.68374705", "text": "def sendSms(self, message):\n # We need all the destinations comma separated\n destinations = ','.join(self.destination)\n\n # Set the default parameters that needs to be sent\n params = {'username': self.username,\n 'password': self.password,\n 'destination': destinations,\n 'responsetype': self.responseType,\n 'sender': self.sender,\n 'body': message\n }\n\n # If there is a reference set, add it to the parameters\n if not self.reference == None:\n params.update({'reference': self.reference})\n\n # If there is a timestamp set, add it to the parameters\n if not self.timestamp == None:\n params.update({'timestamp': self.timestamp})\n\n # If testing, add it to the parameters\n if self.test == True:\n params.update({'test': self.test})\n\n # urlencode all the paramters\n postParams = urllib.urlencode(params)\n\n # Set the HTTP Headers\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n\n httpConnection = httplib.HTTPConnection('api.messagebird.com')\n httpConnection.request('POST', '/api/sms', postParams, headers)\n httpResponse = httpConnection.getresponse()\n\n # Read the response data/info\n self.httpResponseStatus = httpResponse.status\n self.httpResponseReason = httpResponse.reason\n self.httpResponseData = httpResponse.read()\n\n # Close the HTTP connection\n httpConnection.close()\n\n if self.responseType == 'XML':\n self.xmlResponseData = parseString(self.httpResponseData).documentElement", "title": "" }, { "docid": "acbe78f5e44dad79793a20d30bc24094", "score": "0.6828932", "text": "def submit_sms(self, params):\n return self.post(SMS_URL, params)", "title": "" }, { "docid": "ce564828877ccd26e006df98e8fd5e8c", "score": "0.6818365", "text": "def process_sms(self, P):\n if self.state in [SessionState.BOUND_TX, SessionState.BOUND_TRX]:\n R = SubmitSmResp()\n R.sequence_number = Integer(P.sequence_number.value, 4)\n if(P.sm_length.value > 64000):\n R.command_status = Integer(command_status.ESME_RINVMSGLEN, 4)\n else:\n if(P.short_message.value and P.sm_length.value < 5000):\n message = P.short_message.value.decode(encoding='ascii')\n else:\n message = P.message_payload.value.value\n db_storage = self.server_db_store(P.destination_addr.value, message, self.user_id, P.source_addr.value)\n # in db_storage the message id of sms is returned\n R.message_id = CString(str(db_storage))\n else:\n R = SubmitSmResp()\n R.sequence_number = Integer(P.sequence_number.value, 4)\n R.command_status = Integer(command_status.ESME_RINVBNDSTS, 4)\n data = R.encode()\n self.socket.send(data)", "title": "" }, { "docid": "5ac8117ddc05c8c773abc9648ee95d23", "score": "0.68122613", "text": "def send_sms(self, dest, text, sender=''):\n data = [{'sender': sender, 'dest': dest, 'text': text}]\n return self.make_request(action='send', data=data)", "title": "" }, { "docid": "6285a1d056e215a81f154e389ca84693", "score": "0.67472386", "text": "def sms(self):\n return sms.SMS(self)", "title": "" }, { "docid": "bf0b9bef351d779425f41ce4a3144b4d", "score": "0.668898", "text": "def _send(self, recipient, message, session):\n if not recipient:\n raise self.NoRecipientError(\"YesssSMS: recipient number missing\")\n if not isinstance(recipient, str):\n raise ValueError(\"YesssSMS: str expected as recipient number\")\n if not message:\n raise self.EmptyMessageError(\"YesssSMS: message is empty\")\n\n csrf_token = self._get_csrf_token(session)\n\n sms_data = {\n \"to_nummer\": recipient,\n \"nachricht\": message,\n \"token\": csrf_token,\n }\n req = session.post(self._send_sms_url, data=sms_data)\n\n if req.status_code not in (200, 302):\n raise self.SMSSendingError(\"YesssSMS: error sending SMS (1)\")\n\n if _UNSUPPORTED_CHARS_STRING in req.text:\n raise self.UnsupportedCharsError(\n \"YesssSMS: message contains unsupported character(s)\"\n )\n\n if _SMS_SENDING_SUCCESSFUL_STRING not in req.text:\n raise self.SMSSendingError(\"YesssSMS: error sending SMS (2)\")", "title": "" }, { "docid": "10356becc1a02e4d4929f6d7416c88ed", "score": "0.6679513", "text": "def sendSMS(sender,recipients,smsBody):\r\n def printOutput(sender,recipients,smsBody):\r\n \"\"\"dev, debugging utility method\"\"\"\r\n message = ' sender : ' + sender\r\n message += '\\n to : ' + recipients[0]\r\n message += '\\n body : ' + smsBody\r\n print ''\r\n print ''\r\n print '____________________________________________________________________'\r\n print message\r\n print '____________________________________________________________________'\r\n\r\n def parseOutput(output):\r\n \"\"\"Returns parsed values from output with format:\r\n SUCCESS MessageId: 357958; Cost: 0.80; 0: Accepted for delivery;\r\n\r\n Returns:\r\n boolean (success),\r\n int (MessageId),\r\n int (status),\r\n float (cost),\r\n string (status message)\r\n \"\"\"\r\n vls=output.split(';')\r\n if len(vls)>=3:\r\n sm=vls[0].split(' ')\r\n cs=vls[1].split(':')\r\n st=vls[2].split(':')\r\n return str(sm[0]).find('SUCCESS')>=0,int(sm[2]),int(st[0].lstrip()),float(cs[1].lstrip()),st[1].lstrip()\r\n else:\r\n return False,-1,-1,0,output\r\n\r\n url='http://www.amdtelecom.net/api/sendsms.php'\r\n parameters={\r\n 'from' : sender,\r\n 'to' : recipients[0],\r\n 'username' : settings.NOTIFICATION_SMS_PROVIDER_API_USERNAME,\r\n 'password' : settings.NOTIFICATION_SMS_PROVIDER_API_PASSWORD,\r\n 'text' : stringToAscii(smsBody)\r\n }\r\n fetchRes=None\r\n msg='util.sendSMS:logging.info'\r\n try:\r\n logging.info('util.sendSMS.fetchHttpRequestData')\r\n msg='FETCHING SMS SEND FROM API'\r\n fetchRes=fetchHttpRequestData(parameters,\r\n url,\r\n request_output='text',\r\n request_method='GET')\r\n if fetchRes is not None:\r\n msg='PARSING SMS SEND FETCH API OUTPUT: '\r\n bst,msgid,stid,cs,msg=parseOutput(fetchRes)\r\n if not bst:logging.error('ERROR RETURNED FROM SMS SEND API:'+fetchRes+' - PARAMS'+str(parameters))\r\n return fetchRes,bst,msgid,stid,float(cs),msg\r\n else:\r\n logging.error(msg+' - PARAMS'+str(parameters))\r\n return (None,False,-1,-1,float(0),\r\n msg+' - PARAMS'+str(parameters))\r\n except Exception, ex:\r\n if fetchRes is None:fetchRes='None'\r\n logging.error('ERROR '+msg+' - EXCEPTION:'+str(ex)+'- FETCH RES:'+fetchRes)\r\n return (None,False,-1,-1,float(0),\r\n msg+' - PARAMS'+str(parameters)+' - FETCH RES:'+fetchRes)", "title": "" }, { "docid": "9b3fed0dd24acbf7674dfb13530ac41c", "score": "0.6672503", "text": "def send_entity(self):\n self.ensure_one()\n\n my_model = self.env['ir.model'].search([('model', '=', self.model)])\n vals = {\n 'record_id': self.record_id,\n 'model_id': my_model[0].id,\n 'account_id': self.from_mobile_id.account_id.id,\n 'from_mobile': self.from_mobile_id.mobile_number,\n 'to_mobile': self.to_number,\n 'sms_content': self.sms_content,\n 'status_string': '-',\n 'direction': 'O',\n 'message_date': datetime.utcnow(),\n 'status_code': 'queued',\n 'by_partner_id': self.env.user.partner_id.id\n }\n\n if self.delivery_time:\n vals.update({\n 'message_date': self.delivery_time\n })\n if self.media_id:\n vals.update({\n 'media_id': self.media_id\n })\n\n # Create the queued sms\n self.env['sms.message'].create(vals)\n return True", "title": "" }, { "docid": "a5124a247040c8e2451dabaaf39ae233", "score": "0.6660116", "text": "def send_text(self, phone_number):\n sms_params = urllib.urlencode({\n '_rnr_se': self.key,\n 'phoneNumber': phone_number,\n 'text': self.text\n })\n # Send the text, display status message \n self.response = \"true\" in self.opener.open(self.sms_url, sms_params).read()", "title": "" }, { "docid": "bc1ecf04da56ec39dcb08d1ad878784f", "score": "0.66388434", "text": "def send_direct_message(self):", "title": "" }, { "docid": "236c7c225064873a25165af56eb68165", "score": "0.6605685", "text": "def sms_send(self, number, contents):\n self.ctrl_lock.acquire()\n try:\n self.ctrl_port.write(('AT+CMGS=\"%s\"\\r\\n' % number).encode())\n # Perform a SIM test first.\n self.ctrl_port.write((contents+chr(26)).encode())\n result = self.ctrl_port.return_data()\n # A text number is an integer number, returned in the\n # last returned entry of the result, just after the \": \" part.\n text_number = int(result[-1].split(': ')[1])\n return text_number\n finally:\n self.ctrl_lock.release()", "title": "" }, { "docid": "9c3bb5c06b5d64646874d714d51ce0a3", "score": "0.6601742", "text": "def send_sms(self, body, to):\n try:\n self.client.messages.create(\n body=body,\n to=to,\n from_=self.from_number,\n )\n except twilio.TwilioRestException as e:\n print(e)", "title": "" }, { "docid": "1b0d2a96c26d99ffbe7eb18c64089d29", "score": "0.6600797", "text": "def _send(self, to, msg):\n raise NotImplementedError", "title": "" }, { "docid": "c394ec8e419783875c0d78ab8a54dfc9", "score": "0.656399", "text": "def send_sms(sMsg):\n try:\n sms = TwilioClient.sms.messages.create(body=\"{0}\".format(sMsg),to=\"{0}\".format(sSMSSender),from_=\"{0}\".format(sTwilioNumber))\n except:\n print(\"Error inside function send_sms\")", "title": "" }, { "docid": "19393da9f21d5b784ecdbe70693dbf8b", "score": "0.654648", "text": "def send(self, message):\n pass", "title": "" }, { "docid": "c43c3c03f67dd73854afa62954c7cc79", "score": "0.6545064", "text": "def send_message(self, msg, **kwargs):\n\n raise NotImplementedError(\"send_message should be implemented in a derived class\")", "title": "" }, { "docid": "a514e9cfba26915a728e8647aaefa923", "score": "0.651786", "text": "def send_message(self, message: str):\n raise NotImplementedError()", "title": "" }, { "docid": "b46e46947e911b4a8ee1156e880e97b9", "score": "0.64994764", "text": "def _send_sms(user_results):\n if len(user_results.movies) > 0 and user_results.phone_number:\n client = Client(Config.ACCOUNT_SID, Config.AUTH_TOKEN)\n print(Config.ACCOUNT_SID, Config.AUTH_TOKEN)\n body = _construct_sms(user_results.movies)\n to = '+1' + str(user_results.phone_number)\n print(body, to)\n client.messages.create(\n from_='+15854929141',\n body=_construct_sms(user_results.movies),\n to='+1' + str(user_results.phone_number)\n )", "title": "" }, { "docid": "31e1bfc5d93455d4cab2a7187b13966c", "score": "0.64614505", "text": "def send(self):\n send_message(self.phone, self.message)\n self.sent = True\n self.save()", "title": "" }, { "docid": "ee0d22af1ccc33317b3d481cc385dc4c", "score": "0.64483774", "text": "def send_bulk_sms(self, dests, text, sender=''):\n raise NotImplementedError", "title": "" }, { "docid": "6c57871010a267c8993795a18d252105", "score": "0.64444846", "text": "def send_sms(self, payment_id, phone, company):\n if company.upper() not in ('TELCEL' 'MOVISTAR' 'IUSACELL' 'NEXTEL'):\n raise WrongPhoneCompanyError(\n 'Las unicas companias soportadas son TELCEL, IUSACELL y MOVISTAR y NEXTEL'\n )\n payload = json.dumps({'customer_phone': phone, 'customer_company_phone': company})\n return requests.post(\n '/'.join((self.url_base, 'charges', payment_id, 'sms')),\n data = payload,\n auth = self.auth,\n headers = self.headers\n ).json()", "title": "" }, { "docid": "08c89c3ad52ddfbe1ad6f546fafff345", "score": "0.64197886", "text": "def send_sms(to_tel, msg):\n to_tel = to_tel.strip().replace(\" \", \"\").replace(\"-\", \"\").replace(\"/\", \"\")\n msg = msg.strip()\n log.info(\"Sending SMS to %s: '%s' (%s chars)\" % (to_tel, msg, len(msg)))\n\n if not settings.SEND_SMS:\n log.info(\"- disabled by settings.SEND_SMS\")\n return\n\n url = \"https://www.intellisoftware.co.uk/smsgateway/sendmsg.aspx\"\n values = {'username' : settings.INTELLISENSE_USERNAME,\n 'password' : settings.INTELLISENSE_PASSWORD,\n 'to' : to_tel,\n 'text': msg\n }\n\n data = urllib.urlencode(values)\n req = urllib2.Request(url, data)\n response = urllib2.urlopen(req)\n the_page = response.read()\n log.info(\"- sms response: %s\" % the_page)", "title": "" }, { "docid": "9a39905c71a955e1eea613bca6666235", "score": "0.6418008", "text": "def send_sms(phone, content):\n if settings.SMS_DEBUG:\n print('Sms will be send as: %s, %s', phone, content)\n return True, ''\n apikey = settings.YUNPIAN['apikey']\n url = 'http://yunpian.com/v1/sms/send.json'\n data = {\n 'apikey': apikey,\n 'mobile': phone,\n 'text': content\n }\n\n try:\n response = requests.post(url, data).json()\n if response['code'] == 0:\n return True, ''\n return False, response['msg']\n except Exception as e:\n return False, e.message", "title": "" }, { "docid": "0e9076799044f572c454f8da055220f7", "score": "0.6410605", "text": "def test_send_mms(self):\n pass", "title": "" }, { "docid": "d636a7126234a3bb1c11fa4f05f87d19", "score": "0.6409859", "text": "def action_send_sms(self):\n for o in self:\n # get the phone that ordered from context\n agent_phone = self._context.get(\"agent_phone\", None)\n alt_msg = []\n if agent_phone:\n msg, receipients = None, list(set([agent_phone, o.customer_id.phone]))\n else:\n msg, receipients = None, list(set([o.partner_id.phone, o.customer_id.phone]))\n if o.state == \"draft\":\n if o.partner_id.is_agent and o.partner_id.agent_type_id == self.env.ref(\"copia_partner.conf_t_chama\") \\\n and o.partner_id.partner_data[0].chama_member_ids and o.partner_id == o.customer_id:\n for member in o.partner_id.partner_data[0].chama_member_ids:\n receipients.append(member.phone)\n\n l_msg = \", \".join(\"%s for %s at KES %s\" % (\n l.name, l.product_uom_qty, l.price_unit * l.product_uom_qty\n ) for l in o.order_line if l.sms_include)\n msg = \"Copia Order # %s for delivery on %s confirmed for %s.\" \\\n \" Total: KES %s\" % (o.name, o.date_delivery, l_msg, o.amount_total)\n elif o.state == \"rejected\":\n if o.reject_reason == \"qty_exception\":\n l_msg = \" \".join(\"[%s]\" % l.product_id.default_code for l in o.order_line)\n msg = \"Sorry. The ordered quantity for Product code %s is wrong.\" \\\n \" Kindly enter a valid quantity.\" % l_msg\n elif o.reject_reason == \"stock_exception\":\n l_msg = \", \".join(\n \"[%s] is %s\" % (l.product_id.default_code, l.product_id.max_sale_qty) for l in o.order_line)\n msg = \"Sorry. The maximum order quantity for product code %s.\" \\\n \" Kindly order an alternative or contact your Sales Advisor for more information.\" % l_msg\n # Checks which products are out of stock and checks for their alternative products\n elif o.reject_reason == \"out_of_stock\":\n alternatives_available = []\n for l in o.order_line:\n tmpl_id = l.product_id.product_tmpl_id.id\n qty = l.product_uom_qty\n alternatives = self.check_alternative_products(tmpl_id, int(qty))\n if not alternatives:\n a_msg = 'Sorry. %s is currently not available in the market. We are working to get '\\\n 'you an alternative product as soon as possible.' % l.product_id.display_name\n alt_msg.append(a_msg)\n else:\n alternatives_available.append(alternatives)\n alt_string = ['%s %s' % (alt['product_name'], alt['product_code']) for alt in alternatives]\n #_logger.info(alt_string)\n alt_string = ', '.join(alt_string)\n a_msg = 'Sorry. %s is currently not available in the market. Copia recommends %s instead!'\\\n % (l.product_id.display_name, alt_string)\n alt_msg.append(a_msg)\n #_logger.info('alternatives are %s', alternatives_available)\n #_logger.info(alt_msg)\n else:\n l_msg = \" \".join(\"[%s]\" % l.product_id.default_code for l in o.order_line)\n msg = \"Sorry. You have exceeded the daily maximum order quantity allowed for product code %s.\" \\\n \" Kindly place your order again tomorrow.\" % l_msg\n else:\n # Unknown Issue or Bad state\n # raise UserError(_(\n # \"SMS could not be sent. Sale Order # %s has a state of %s, or is duplicated\" % (o.name, o.state)\n # ))\n continue\n\n # Create the SMS and Submit to the outgoing_sms Queue\n # with_context(add_to_queue=True/False) helps in testing\n if msg is not None:\n for r in receipients:\n _logger.info('%s--%s' % (r, msg))\n self.env[\"sms.message\"].with_context(add_to_queue=True).create({\n \"type\": \"outbox\",\n \"from_num\": \"Copia\",\n \"to_num\": r,\n \"date\": datetime.datetime.today().isoformat(),\n \"text\": msg,\n \"note\": \"Order Created in %s\" % o.mode,\n \"order_created\": True,\n \"partner_id\": o.partner_id.id\n })\n \n for msg in alt_msg:\n for r in receipients:\n _logger.info('%s--%s' % (r, msg))\n self.env[\"sms.message\"].with_context(add_to_queue=True).create({\n \"type\": \"outbox\",\n \"from_num\": \"Copia\",\n \"to_num\": r,\n \"date\": datetime.datetime.today().isoformat(),\n \"text\": msg,\n \"note\": \"Order Created in %s\" % o.mode,\n \"order_created\": True,\n \"partner_id\": o.partner_id.id\n })\n\n return True", "title": "" }, { "docid": "ca5e5902f3d1d0071bfd75a40189282e", "score": "0.6404837", "text": "def send(self, mailbox, message, **kwargs):\n pass", "title": "" }, { "docid": "46bba36af8d366f61b9596c2327a3fff", "score": "0.6401457", "text": "def send_sms(username, password, sender, recipients, sms):\n multiple = 0\n msgs = []\n try:\n if len(sms)>160:\n multiple = len(sms)/160 + 1\n last = 0\n for i in range(0, multiple):\n msgs.append(sms[i*160:i*160+160])\n last = i\n else:\n msgs.append(sms)\n except:\n print \"Unexpected error while splitting sms message:\", sys.exc_info()[0]\n\n for sms in msgs:\n url = None\n details = None\n cookie = None\n opener = None\n site = None\n tmp = None\n try:\n url = 'https://oma.saunalahti.fi/settings/smsSend'\n details = r\"username=\"+username+\\\n r\"&login=Sisään&password=\"+password\n cookie = cookielib.CookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\n opener.addheaders = [('Referer', \\\n 'https://oma.saunalahti.fi/settings/'),\n ('Content-Type', \\\n 'application/x-www-form-urlencoded'),\n ('User-Agent', \\\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; '+ \\\n 'en-US; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14')]\n site = opener.open(url, details)\n tmp = site.read()\n site.close()\n details = \"sender=\" + sender + \\\n \"&recipients=\" + recipients + \\\n \"&text=\" + sms + \\\n \"&size=\" + str(len(sms)) + \\\n \"&send=Lähetä\"\n site = opener.open(url, details)\n tmp = site.read()\n site.close()\n except:\n print \"Unexpected error while sending sms from send_sms function:\",\\\n sys.exc_info()[0]", "title": "" }, { "docid": "a06954ce19075ef3771750de90d65ee8", "score": "0.6378817", "text": "def send(self, msg):\n if type(msg) is str:\n msg = bytes(msg, \"utf8\")\n super().send( msg )", "title": "" }, { "docid": "54bf1e321845e5ac68214431c0cbb5a1", "score": "0.63424456", "text": "def _send_twilio_sms(self, message, recipient, sender):\n\n if not recipient.startswith(\"+\"):\n recipient = \"+{}\".format(recipient)\n if not sender.startswith(\"+\"):\n sender = \"+{}\".format(sender)\n\n self.client.messages.create(to=recipient, from_=sender, body=message)", "title": "" }, { "docid": "990bf9107b33d589e847de6f58a9abce", "score": "0.63329804", "text": "def send_message(self, _, *args, **kwargs):\n return self.core.send_message(*args, **kwargs)", "title": "" }, { "docid": "2d328896d4ee93a3f1446ef59970e200", "score": "0.6329809", "text": "def push_sms(self, content):\r\n\r\n if not self.clients:\r\n return\r\n\r\n for client in self.clients:\r\n response = self.nexmo.send_message({'from': self.sender,\r\n 'to': '{0}'.format(client.phone),\r\n 'text': content})\r\n client.sms = response", "title": "" }, { "docid": "4139d78239ecf891a3961a8e82523f89", "score": "0.6326449", "text": "def sendPostRequest(self, phoneNo: str, textMessage: str):\r\n req_params = {\r\n 'apikey': self.apiKey,\r\n 'secret': self.secretKey,\r\n 'usetype':'stage',\r\n 'phone': phoneNo,\r\n 'message': textMessage,\r\n 'senderid':'SMSIND'\r\n }\r\n return requests.post(self.URL, req_params)", "title": "" }, { "docid": "2d22205cf7fa33d9d83f7e0bf57e2a93", "score": "0.63212866", "text": "def sms():\n known_patients = [item.mobile for item in\n db.session.query(models.Patient.mobile).all()]\n number = request.values.get('From').replace('+', '')\n message = request.values.get('Body')\n\n if number in known_patients:\n print \"Attempting to send SMS response to %s\" % (number)\n return Manager().respond({'message': message, 'number': number})\n else:\n print 'Logging that an unknown patient has sent the service an SMS.' + \\\n 'Their mobile number and message was: %s, %s' % (number, message)\n return ''", "title": "" }, { "docid": "f523405333532ccc53ef823dafdffcf8", "score": "0.63140166", "text": "def send(self, raw_message):\n pass", "title": "" }, { "docid": "56305aa4627c37391733cff78c3fa5e2", "score": "0.62593204", "text": "def send_sms(self, dest, text, sender=''):\n result = self.make_request(\n data=self.prepare_data(dest=dest, text=text, sender=sender))\n if result.find('id') == -1:\n return False\n return {'id': result.strip().split(':')[1]}", "title": "" }, { "docid": "8fcfc1eaa1edd4aa071b274cf5ed1aaf", "score": "0.6240025", "text": "def handleSms(sms):\n handle_message(sms.text, sms.number.replace('+61', '0'), 'sms')\n transaction.commit()", "title": "" }, { "docid": "299ace661f15ed6dc20e1e5f8e5084c4", "score": "0.62384075", "text": "def post(self, request):\n phone_number = request.data[\"phone_number\"]\n message = request.data[\"message\"]\n if send_sms(phone_number, message):\n return Response({\"success\": True}, status=status.HTTP_200_OK)\n return Response({\"success\": False}, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "f179460b278742e37f6d8d6e0ab2961a", "score": "0.62302464", "text": "def send_sms_response(self, P):\n if(P.command_status.value == 0):\n print(\"Message having message id \" + str(P.message_id.value.decode(encoding='ascii')) + \" has been scheduled for sending.\")\n elif(P.command_status.value == command_status.ESME_RINVMSGLEN):\n print(\"Sorry message having message id \" + str(P.message_id.value.decode(encoding='ascii')) + \" cannot be send due to invalid message length.\")\n elif(P.command_status.value == command_status.ESME_RINVBNDSTS):\n print(\"Sorry message cannot be send because Sending Sms is not allowed in this session state.\")", "title": "" }, { "docid": "80cf8900d397c91b3ac6052036c03bf5", "score": "0.6205172", "text": "def _send(self, data):", "title": "" }, { "docid": "59fa90f6e83d808909e5f0519d9ac9ba", "score": "0.61985046", "text": "def SendSMS(self,tag,receivers=[],message='TEST',values=None):\n self.warning = self.info = fandango.printf\n if not receivers and hasattr(tag,'__iter__'):\n tag,receivers = tag[0],tag[1:]\n alarm = (self.Alarms.get(tag) or [None])[0]\n\n self.info( 'In SendSMS(%s,%s,%s,%s)'%(tag,receivers,message,values))\n if not SMS_ALLOWED or not 'smslib' in globals():\n self.warning( 'SMS Messaging is not allowed '\n +'or smslib not available!!!')\n return\n report = ''\n username,password=self.SMSConfig.split(':',1)\n source = self.FromAddress \n self.warning('SendSMS from %s as %s'%(source,username))\n\n now = time.time()\n try:\n self.lock.acquire()\n while self.SMS_Sent and self.SMS_Sent[0]<now-(24*60*60): \n self.SMS_Sent.popleft()\n sends = len(self.SMS_Sent)\n finally:\n self.lock.release()\n if sends>=self.SMSMaxPerDay:\n self.warning( 'Max Daily SMS messages (%d) has been exceeded!' \n % self.SMSMaxPerDay)\n return\n elif receivers:\n sms = '((SMS:)?([\\+]?[0-9]{9,13}))'\n smslist = []\n for s in receivers:\n numbers = re.findall(sms,s)\n [smslist.append(t[-1]) for t in numbers if t and len(t)>1]\n smslist = list(set(smslist))\n if smslist:\n try:\n self.lock.acquire()\n self.info( 'SMS Sending: the phone numbers to be reported'\n ' for %s are: %s' % (tag,','.join(smslist)) )\n \n if message in ('ALARM',) and tag in self.Alarms:\n formula,text = (self.Alarms[tag].formula, ';%s'\n %self.Alarms[tag].parse_description())\n else:\n formula,text = '',';%s'%message\n \n report = 'Alarm '+tag+': '+text\n if values: \n svalues = str(values).replace('{','').replace('}','')\n report += ';Values=' + svalues\n \n if len(report) > self.SMSMaxLength:\n report = report[:self.SMSMaxLength-5]+'...'\n \n self.info( 'SMS Sending: message is: %s' % (report))\n except:\n self.warning( 'Exception generating SMS report: %s' \n % traceback.format_exc())\n finally:\n self.lock.release()\n\n if 'sms' not in self.AlertOnRecovery.lower() \\\n and message.strip() not in ('ALARM','TEST'):\n self.warning('SMS sending not allowed for %s message type.'\n %message)\n else:\n #To avoid device hungs all SMS messages are sent \n #in a background thread\n if not hasattr(self,'sms_threads'): self.sms_threads = 0\n self.sms_threads += 1\n self.info( 'Sending SMS in a different Thread ... '\n +'SMS-Send%d'%self.sms_threads)\n source = (re.findall('[a-zA-Z]+',str(source)) or [''])[0]\n thr = smslib.SMSThread(message=report, dest=smslist, \n username=username, password=password, source=source)\n thr.setDaemon(True)\n thr.start()\n self.SMS_Sent.append(now)\n \n self.info( '%d SMS messages sent in the last 24 hours.' \n % len(self.SMS_Sent))\n for s in smslist:\n self.SentSMS[s.lower()]+=1\n return 'DONE'\n return 'FAILED'", "title": "" }, { "docid": "5353c09cac6fb2f4e89f2cf607a1487d", "score": "0.6189894", "text": "def send(self):\n raise NotImplementedError(\"To be implemented in derived class.\")", "title": "" }, { "docid": "9e8c7d4d29637ae1aa2040af8e653be6", "score": "0.6167825", "text": "def send_sms(msg, phone_number):\n account_sid = os.environ.get('TWILIO_SID')\n auth_token = os.environ.get('TWILIO_TOKEN')\n\n client = Client(account_sid, auth_token)\n message = client.messages.create(\n body=msg,\n from_=os.environ.get('TWILIO_NUMBER'),\n to=phone_number\n )\n if message.status == 'sent':\n print('message_sent')\n elif message.status == 'queued':\n print('message queued')", "title": "" }, { "docid": "cb3f3e9381b9c08715032d361af94476", "score": "0.61623955", "text": "def send_text(number, message):\n textbelt_key = passwords.textbelt_key()\n r = requests.post(\"https://textbelt.com/text\", data={\n \"phone\": number,\n \"message\": message,\n \"key\": textbelt_key,\n \"replyWebhook\": \"https://www.ultron.sh/server/handleSmsReply\"\n })\n print(r.json())\n return", "title": "" }, { "docid": "a9e4d2175cc6e2f3339d6a7b7019b3a0", "score": "0.61445624", "text": "def send_message(self, message: str = \"\", **kwargs: Any) -> None:\n\n targets = kwargs.get(ATTR_TARGET, self.default_targets)\n if not targets or not message:\n return\n\n if self.router.suspended:\n _LOGGER.debug(\n \"Integration suspended, not sending notification to %s\", targets\n )\n return\n\n try:\n resp = self.router.client.sms.send_sms(\n phone_numbers=targets, message=message\n )\n _LOGGER.debug(\"Sent to %s: %s\", targets, resp)\n except ResponseErrorException as ex:\n _LOGGER.error(\"Could not send to %s: %s\", targets, ex)\n finally:\n self.router.notify_last_attempt = time.monotonic()", "title": "" }, { "docid": "c104da63eb5f877476bbcc2fe23ce9c1", "score": "0.61176217", "text": "def deliver_sms(self):\n\n try:\n if not self._can_do('deliver_sm'):\n raise InvalidSessionState(\"\")\n except InvalidSessionState as e:\n print(e.value)\n sms = self.sever_fetch_sms(self.user_id)\n if(sms is None):\n pass\n else:\n msg_length = int(len(sms.msg))\n P = DeliverSm()\n P.sequence_number = Integer(self._next_seq_num(), 4)\n P.source_addr = CString(str(sms.sms_from))\n P.destination_addr = CString(str(sms.sms_to))\n P.schedule_delivery_time = CString(str(sms.schedule_delivery_time))\n P.validity_period = CString(\"\")\n if(msg_length < 255):\n P.sm_length = Integer(msg_length, 1) # page 134\n P.short_message = CString(str(sms.msg))\n else:\n P.message_payload = TLV(tlv_tag.message_payload, message)\n data = P.encode()\n self.socket.send(data)", "title": "" }, { "docid": "cf52c78a38c3c2da7608ad47c91d05d3", "score": "0.61093503", "text": "def send_sms(code, mobile):\n text = SMS_TEMPLATE % code\n params = urllib.urlencode({'apikey': SMS_KEY, 'text': text, 'mobile': mobile})\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\"}\n conn = httplib.HTTPConnection(host, port=port, timeout=30)\n conn.request(\"POST\", sms_send_uri, params, headers)\n response = conn.getresponse()\n response_str = response.read()\n conn.close()\n status_code = int(re.match(r'\\{\"code\":(-?\\d+),.*', response_str).group(1))\n if status_code == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "a5e53ff61157c73ee86efa9b4b519bda", "score": "0.60978085", "text": "def send(self, msg, time=-1):\n raise NotImplementedError(\"Please Implement this method\")", "title": "" }, { "docid": "55b5f4d5636340f46f525765edf1f20e", "score": "0.6090989", "text": "def do_sending(self, arg):\r\n spell_helper(\"Sending\")", "title": "" }, { "docid": "e8362294bbc1f9bd3c5aa49b35032a83", "score": "0.60834914", "text": "def SendMsg(self, msg, wp=0, lp=0):", "title": "" }, { "docid": "b862ed06d585a8ff7cfe2b525bff030f", "score": "0.6072074", "text": "def send_message(self, msg):\n return \"Not implemented\"", "title": "" }, { "docid": "d4ced733e218cddf3fe4ec3d5d6707fe", "score": "0.6071219", "text": "def send_sms(self, recipient, message, sender):\n try:\n if not self._can_do('submit_sm'):\n raise InvalidSessionState(\"SMPP Session not in a state to allow sending SMSes.\")\n msg_length = int(len(message))\n P = SubmitSm()\n P.sequence_number = Integer(self._next_seq_num(), 4)\n if sender:\n P.source_addr = CString(sender)\n P.destination_addr = CString(recipient)\n P.schedule_delivery_time = CString(\"\")\n P.validity_period = CString(\"\")\n P.sm_default_msg_id = Integer(0, 1)\n if(msg_length < 5000):\n P.sm_length = Integer(msg_length, 1)\n P.short_message = CString(str(message))\n else:\n P.message_payload = TLV(tlv_tag.message_payload, message)\n data = P.encode()\n #storing pdu in dictionary named responses\n self.pdus.update({P.sequence_number.value: {'req': P, 'resp': '', 'read': 'false'}})\n self.socket.send(data)\n except InvalidSessionState as e:\n print(e.value)", "title": "" }, { "docid": "1f98153f2ad7d136685998d7acb3d073", "score": "0.60693514", "text": "def send_sms(user_phone: UserPhone, db: Session = Depends(get_db)) -> Response:\n user = User.query.filter(User.phone_number == user_phone.phone).first()\n if user:\n perform_send_sms_to_user(user, user_phone.phone, db)\n return Response(status_code=status.HTTP_200_OK)\n else:\n return Response(\n status_code=status.HTTP_404_NOT_FOUND,\n content=str({\"error\": \"User not found.\"}),\n )", "title": "" }, { "docid": "1e997d26c1ba83730fa9e8d6f38df73a", "score": "0.60467976", "text": "def process_multiple_sms(self, P):\n if self.state in [SessionState.BOUND_TX, SessionState.BOUND_TRX]:\n R = SubmitMultiResp()\n R.sequence_number = Integer(P.sequence_number.value, 4)\n if(P.sm_length.value > 64000):\n R.command_status = Integer(command_status.ESME_RINVMSGLEN, 4)\n else:\n if(P.short_message.value and P.sm_length.value < 5000):\n message = P.short_message.value.decode(encoding='ascii')\n else:\n message = P.message_payload.value.value\n db_storage = self.server_db_store(P.destination_addr.value, message, self.user_id, P.source_addr.value)\n # in db_storage the message id of sms is returned\n R.message_id = CString(str(db_storage))\n else:\n R = SubmitSmResp()\n R.sequence_number = Integer(P.sequence_number.value, 4)\n R.command_status = Integer(command_status.ESME_RINVBNDSTS, 4)\n data = R.encode()\n self.socket.send(data)", "title": "" }, { "docid": "27e1b4991555bf2682cf25c9621c76f3", "score": "0.6030116", "text": "def send(self, message):\n self.sc.sendall(message.encode('ascii'))", "title": "" }, { "docid": "6d03fc0041fe8df6488c1b5bac038fa4", "score": "0.60259485", "text": "def build_simple_sms(self, content):\r\n return '{0}'.format(content)", "title": "" }, { "docid": "009eb7ed0466a43d4f7929d2129d2591", "score": "0.6021699", "text": "def send(self, content):\n raise NotImplementedError()", "title": "" }, { "docid": "9b7b8a12db0bf157b6ebafa1b24ce36a", "score": "0.60202384", "text": "def send_sms(self, message, subscriber_number=None, access_token=None):\n data = {\n 'message': message,\n 'address': subscriber_number or self.subscriber_number,\n 'access_token': access_token or self.access_token,\n }\n url = self.api_base_url + \"/smsmessaging/v1/outbound/%s/requests\" % \\\n self.shortcode\n return requests.post(url, data=data)", "title": "" }, { "docid": "be5563005d14875ebdbb179915d36dd3", "score": "0.60159194", "text": "def _send(self, telegram):\n \n self.device.write(telegram)", "title": "" }, { "docid": "7ff2053ceda9e021dc60a2c5968b3422", "score": "0.60021794", "text": "async def send_message(self, message):\n raise NotImplementedError", "title": "" }, { "docid": "5898f6805c60f8325d5c0787cf622149", "score": "0.5986255", "text": "def send_message(self, msg, values):\n pass", "title": "" }, { "docid": "5ebb4958c579d8a38a90f4c7c5a714a9", "score": "0.59727526", "text": "def _send(self, *args, **kwargs):\n message = px.Message(*args, **kwargs)\n self.stream.send(message)\n marsh = message.serialize().decode(\"utf-8\") # unicode object.\n _logger.debug(u\"Sending '%s'.\", marsh)", "title": "" }, { "docid": "0a92c5f35f81fb3238c7c87f91976508", "score": "0.59721017", "text": "def send_message(self, message, recipient_list, from_phone=None, id=None):\n params = {\n 'method': 'send',\n 'username': self.get_username(),\n 'password': self.get_password(),\n 'message': message,\n 'dest': ','.join(map(self.phone_num_cleanse, recipient_list)),\n }\n if id:\n params.update({'id': int(id)})\n\n params = urlencode(params)\n response = requests.get(self.get_api_url(), params=params, verify=False)\n\n if not response.ok:\n if not self.fail_silently:\n raise Exception(\"Digicel Send sms response header returned status code of %d\" % response.status_code)\n else:\n return False\n\n response_dom = minidom.parseString(response.content)\n response_status_code = int(response_dom.getElementsByTagName('status')[0].firstChild.data)\n if response_status_code != 0:\n if not self.fail_silently:\n raise Exception(\"Digicel Send response status code error %d: %s\" % (response_status_code,\n DIGICEL_RESPONSE_STATUS_CODES[response_status_code]))\n else:\n return False\n else:\n return True", "title": "" }, { "docid": "bcad146eeaa2108f806c6661d32d72bb", "score": "0.5951917", "text": "def send(self, fail_silently: bool = False) -> int:\n if not self.recipients:\n # Don't brother creating the network connection if there's nobody\n # to send the text message to\n return 0\n connection: Type['BaseSmsBackend'] = self.get_connection(fail_silently)\n count = connection.send_messages([self]) # type: ignore\n\n post_send.send(sender=self.__class__, instance=self)\n\n return count", "title": "" }, { "docid": "8c9eb383d1e7b5f4b29a9557b6d81410", "score": "0.5950099", "text": "def _send(self, what, address):\n\n print '_send: please override me.'", "title": "" }, { "docid": "fef33be57c3b905f41fe78b2d9afca8a", "score": "0.59374917", "text": "def message_me(text, **kwargs):\n output = None\n try:\n voice = Voice()\n voice.login(email, password)\n phone = \"phone-number-here\"\n\n if kwargs is not None:\n for key, value in kwargs.items():\n text += \" | {}: {}\".format(key, value)\n\n voice.send_sms(phone, text)\n output = \"Sucessfully sent message\"\n\n except Exception as e:\n output = \"Error sending message: {}\".format(e)\n\n finally:\n print(output)", "title": "" }, { "docid": "febcc5e6e04a416785f4d061685ee697", "score": "0.5926374", "text": "def send(message):\n\n _MESSAGING.send(message)", "title": "" }, { "docid": "424b142e4073653788a482ca476f54b1", "score": "0.59210837", "text": "def send_alert(self, name: str, monitor: Monitor) -> None:\n\n alert_type = self.should_alert(monitor)\n if alert_type not in [AlertType.FAILURE, AlertType.SUCCESS]:\n return\n\n message = self.build_message(AlertLength.SMS, alert_type, monitor)\n\n url = f\"https://{self.api_host}/eapi/submission/send_sms/2/2.0\"\n params = {\n \"username\": self.username,\n \"password\": self.password,\n \"message\": message,\n \"msisdn\": self.target,\n \"sender\": self.sender,\n \"repliable\": \"0\",\n }\n\n if not self._dry_run:\n try:\n response = requests.get(url, params=params, timeout=self.timeout)\n status = response.text\n if not status.startswith(\"0\"):\n self.alerter_logger.error(\n \"Unable to send SMS: %s (%s)\",\n status.split(\"|\")[0],\n status.split(\"|\")[1],\n )\n except requests.exceptions.RequestException:\n self.alerter_logger.exception(\"SMS sending failed\")\n else:\n self.alerter_logger.info(\n \"dry_run: would send SMS: %s with message %s\", url, message\n )", "title": "" }, { "docid": "8c39d4a75b20ee3ec0fe2534c5b08bec", "score": "0.59135807", "text": "def send(self, message):\n\t\tself.conn.send(\"MESSA;{};{};{}\".format(self.room, self.name, message).encode())", "title": "" }, { "docid": "8c8314a5abd5df87b8df3f9524fc8dd1", "score": "0.59033483", "text": "def send_message (request):\r\n\r\n if not \"FLAG\" in request.POST or not \"PARENTS\" in request.POST or not \"STUDENTS\" in request.POST or not \"MESSAGE\" in request.POST: # making sure the user is not accessing the url directly\r\n return HttpResponseForbidden (\"<title>Code እምቢየው</title><h1 style='font-weight:normal;'>Error: Cannot access this page directly</h1>\")\r\n\r\n if request.POST [\"FLAG\"] == \"SMS\":\r\n PARENTS = Parent.objects.filter(id__in = request.POST [\"PARENTS\"].split(\"_\"))\r\n STUDENTS = Student.objects.filter(id__in = request.POST [\"STUDENTS\"].split(\"_\"))\r\n\r\n ERROR_FLAG = False\r\n for parent in PARENTS:\r\n MESSAGE = \"\"\r\n PARENT_CHILDREN = parent.student_set.all()\r\n\r\n for CHILD in PARENT_CHILDREN:\r\n if CHILD in STUDENTS:\r\n MESSAGE += CHILD.first_name +\" \"+ CHILD.father_name +\", \"\r\n\r\n MESSAGE = MESSAGE[:-2] +\": \"+ request.POST [\"MESSAGE\"]\r\n\r\n try:\r\n api.send_sms(body=MESSAGE, from_phone=getattr(settings, 'TWILIO_NUMBER', ''), to=[str(parent.phone_number)])\r\n except:\r\n messages.add_message (request, messages.ERROR, \"SMS message has not been sent, Please contact your SMS gateway provider\")\r\n ERROR_FLAG = True\r\n break\r\n\r\n if not ERROR_FLAG:\r\n messages.add_message (request, messages.INFO, \"SMS message has been sent successfully\")\r\n\r\n #TODO: Log\r\n return HttpResponseRedirect (\"/TheCondor/condor/student/\")\r\n\r\n elif request.POST [\"FLAG\"] == \"EMAIL\":\r\n PARENTS = Parent.objects.filter(id__in = request.POST [\"PARENTS\"].split(\"_\"))\r\n STUDENTS = Student.objects.filter(id__in = request.POST [\"STUDENTS\"].split(\"_\"))\r\n\r\n ERROR_FLAG = False\r\n for parent in PARENTS:\r\n MESSAGE = \"\"\r\n\r\n if len (parent.email) > 3:\r\n PARENT_CHILDREN = parent.student_set.all()\r\n\r\n for CHILD in PARENT_CHILDREN:\r\n if CHILD in STUDENTS:\r\n MESSAGE += CHILD.first_name +\" \"+ CHILD.father_name +\", \"\r\n\r\n MESSAGE = MESSAGE[:-2] + \": \" + request.POST [\"MESSAGE\"]\r\n\r\n try:\r\n send_mail(getattr(settings, 'EMAIL_SUBJECT', ''), MESSAGE, getattr(settings, 'EMAIL_FROM', ''), [parent.email], fail_silently = False)\r\n except:\r\n messages.add_message (request, messages.ERROR, \"Email message has not been sent, Please check your email settings\")\r\n ERROR_FLAG = True\r\n break\r\n\r\n if not ERROR_FLAG:\r\n messages.add_message (request, messages.INFO, \"Email message has been sent successfully\")\r\n\r\n #TODO: log\r\n return HttpResponseRedirect (\"/TheCondor/condor/student/\")\r\n\r\n elif request.POST [\"FLAG\"] == \"BOTH\":\r\n PARENTS = Parent.objects.filter(id__in = request.POST [\"PARENTS\"].split(\"_\"))\r\n STUDENTS = Student.objects.filter(id__in = request.POST [\"STUDENTS\"].split(\"_\"))\r\n\r\n MESSAGE_ADDED = False\r\n for parent in PARENTS:\r\n MESSAGE = \"\"\r\n PARENT_CHILDREN = parent.student_set.all()\r\n\r\n for CHILD in PARENT_CHILDREN:\r\n if CHILD in STUDENTS:\r\n MESSAGE += CHILD.first_name +\" \"+ CHILD.father_name +\", \"\r\n\r\n MESSAGE = MESSAGE[:-2] +\": \"+ request.POST [\"MESSAGE\"]\r\n\r\n try:\r\n api.send_sms(body=MESSAGE, from_phone=getattr(settings, 'TWILIO_NUMBER', ''), to=[str(parent.phone_number)])\r\n if not MESSAGE_ADDED:\r\n messages.add_message (request, messages.INFO, \"SMS message has been sent successfully\")\r\n MESSAGE_ADDED = True\r\n except:\r\n messages.add_message (request, messages.ERROR, \"Some SMS messages have not been sent, Please contact your SMS gateway provider\")\r\n break\r\n\r\n MESSAGE_ADDED = False\r\n for parent in PARENTS:\r\n MESSAGE = \"\"\r\n\r\n if len (parent.email) > 3:\r\n PARENT_CHILDREN = parent.student_set.all()\r\n\r\n for CHILD in PARENT_CHILDREN:\r\n if CHILD in STUDENTS:\r\n MESSAGE += CHILD.first_name +\" \"+ CHILD.father_name +\", \"\r\n\r\n MESSAGE = MESSAGE[:-2] + \": \" + request.POST [\"MESSAGE\"]\r\n\r\n try:\r\n send_mail(getattr(settings, 'EMAIL_SUBJECT', ''), MESSAGE, getattr(settings, 'EMAIL_FROM', ''), [parent.email], fail_silently = False)\r\n if not MESSAGE_ADDED:\r\n messages.add_message (request, messages.INFO, \"Email message has been sent successfully\")\r\n MESSAGE_ADDED = True\r\n except:\r\n messages.add_message (request, messages.ERROR, \"Email messages have not been sent\")\r\n break\r\n\r\n #TODO: log\r\n return HttpResponseRedirect (\"/TheCondor/condor/student/\")", "title": "" }, { "docid": "73187e26916c4422ef6296f44e69db18", "score": "0.58980674", "text": "def sendSmsEagle(smsEagle, text, number):\n try:\n baseUrl = \"http://\" + smsEagle[\"address\"] + \"/index.php/http_api/send_sms\"\n args = {\n \"login\": smsEagle[\"username\"],\n \"pass\": smsEagle[\"password\"],\n \"to\": number,\n \"message\": text,\n }\n except KeyError:\n logging.error(\"[smsLib.py] Error parsing args for smsEagle modem, cannot send sms\")\n logging.debug(\"Debug info:\", exc_info=True)\n return 1\n except TypeError:\n logging.error(\"[smsLib.py] Error in function args, cannot send sms\")\n logging.debug(\"Debug info:\", exc_info=True)\n return 1\n\n encodedArgs = urllib.parse.urlencode(args)\n url = baseUrl + \"?\" + encodedArgs\n with urllib.request.urlopen(url) as response:\n result = response.read().decode(\"utf-8\")\n\n logging.debug(\"sms url: %s\", url)\n logging.debug(\"result: %s\", result)\n\n if result.split(\";\")[0].strip() == \"OK\":\n r = 0\n else:\n r = 2\n return r", "title": "" }, { "docid": "b56e759d6d1ec4d1a017e2ae68d7ace2", "score": "0.5886853", "text": "def send_message(self, text):\n if self.chat_id is 0:\n data = {'chat_id': self.master_id, 'text': text}\n else:\n data = {'chat_id': self.chat_id, 'text': text}\n response = self.post_request(data, '', self.api['sendMessage'])\n return self.construct(response)", "title": "" }, { "docid": "0520e67761100ff275c66ddfb1912004", "score": "0.58856565", "text": "def send_sms(message, phone_numbers):\n\n ACCOUNT_SID = settings.TWILIO_ACC_SID\n AUTH_TOKEN = settings.TWILIO_ACC_AUTH_TOKEN\n NOTIFY_SERVICE_SID = settings.TWILIO_NOTIFY_SERVICE_SID\n\n client = Client(ACCOUNT_SID, AUTH_TOKEN)\n\n bindings = list(map(lambda number: json.dumps(\n {'binding_type': 'sms', 'address': number}), phone_numbers))\n\n try:\n client.notify.services(NOTIFY_SERVICE_SID).notifications.create(\n to_binding=bindings,\n body=message)\n except Exception as e:\n logging.warning(e)", "title": "" }, { "docid": "843659871533a4bc5ee7f8f1d616888f", "score": "0.58765244", "text": "def _send_message_helper(request):\n return self.queue.send_message(**request)", "title": "" }, { "docid": "44c6a50abb0f41e9174030345593263c", "score": "0.5866572", "text": "def send_sms(phone_number, message, sender_id):\n sns = boto3.client(\n 'sns',\n region_name=settings.AWS_REGION_NAME,\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,\n )\n sns.publish(\n PhoneNumber=str(phone_number),\n Message=message,\n MessageAttributes={\n 'AWS.SNS.SMS.SenderID': {\n 'DataType': 'String',\n 'StringValue': (sender_id if sender_id else settings.AMAZON_SNS_SENDER_ID)\n },\n 'AWS.SNS.SMS.SMSType': {\n 'DataType': 'String',\n 'StringValue': 'Transactional'\n }\n }\n )", "title": "" }, { "docid": "a19d47303cf3a42eaa91a96f1cd0b17c", "score": "0.5864567", "text": "def sms(request, phone, url=None, user=None):\n\n # set phone number in session\n # this phone will be used for verification code\n request.session['phone'] = phone\n\n if user:\n request.session['user_id'] = user.id\n\n # send SMS to phone number\n api.phones.verification_start(phone, '+380', via='sms')\n # api.phones.verification_start(\"+380950968326\", '+380', via='sms')\n\n if url:\n return HttpResponseRedirect(url)\n else:\n return HttpResponseRedirect(reverse('main'))", "title": "" }, { "docid": "d3bd8bc9dd22ff99f4e9434092f7f8a7", "score": "0.58597946", "text": "def send_messages(self, messages):\n if not messages:\n return\n self._lock.acquire()\n\n # For a single Message object\n if isinstance(messages, Message):\n messages = [messages]\n\n # At least lets make sure we are being supplied with a list. We\n # can make sure the list contains Message objects later.\n else:\n assert isinstance(messages, list), 'If you are trying to send \\\n a single SMS, supply a Message object else supply a list \\\n (or tuple) of Message objects'\n\n sending_errors = 0\n \n try:\n # The try-except is nested to allow for\n # Python 2.4 support (Refs #12147)\n try:\n stream_created = self.open()\n for message in messages:\n # Check each element in the list to make sure its\n # a Message object.\n if isinstance(message, Message) and message.to:\n self.stream.write(render_message(message))\n self.stream.write('\\n')\n self.stream.write('-'*79)\n self.stream.write('\\n')\n self.stream.flush() # flush after each message\n else:\n self.stream.write('Message sending failed. Invalid message detected.')\n self.stream.write('\\n')\n self.stream.flush() # flush after each message\n sending_errors += 1\n if stream_created:\n self.close()\n except:\n if not self.fail_silently:\n raise\n finally:\n self._lock.release()\n return len(messages) - sending_errors", "title": "" }, { "docid": "9d83673ed7b2619ff0def63e41515188", "score": "0.5841163", "text": "def send_message(self, contact, headers, text_content, attachments):\n msg = \"To: %s\\n\\n%s\\n\" % (contact, text_content)\n \n if self.spool != '-':\n self.send(contact, msg)\n return True\n else:\n return False", "title": "" }, { "docid": "ed3c04dd6ac3a9fc650c127174c8cb0f", "score": "0.5840988", "text": "def respond(number, message):\n auth = authentication()\n number = number.replace('-', '')\n params = {'address': number, 'message': message}\n sms = req.post(SEND, auth=auth, params=params)\n return sms", "title": "" }, { "docid": "7cd4f1147476dc82b22f0c8e9628ff60", "score": "0.5834491", "text": "def _send_msg(self, message: bin, location: \"AbstractWorker\"):\n pass", "title": "" }, { "docid": "3b1bd99073245939cbcd6c10321da696", "score": "0.5831784", "text": "def send_text_message():\n \n phone_number = request.form.get(\"phone-number\") \n account_sid = os.environ[\"ACCOUNT_SID\"]\n auth_token = os.environ[\"AUTH_TOKEN\"] \n\n client = Client(account_sid, auth_token)\n\n message = client.messages.create(\n to=phone_number,\n from_=\"+19073121980\",\n body=\"\"\"If you need help, call 1-888-373-7888.\"\"\")\n\n print(message.sid)\n\n return \"Your message has been sent.\"", "title": "" }, { "docid": "9b855615505e83634e295eafbffddfcd", "score": "0.5815938", "text": "def send(self, message, callback=None):\n raise NotImplementedError", "title": "" }, { "docid": "a3c074c892cb876c279a2c489f40cf46", "score": "0.5808222", "text": "def message_sent(self, message, address, *args, **kwargs):", "title": "" }, { "docid": "aa7674503b3fdc3f43a2d9016ffb10e3", "score": "0.5801697", "text": "def send(self, data, **kwargs):\n super().send(data, None)", "title": "" }, { "docid": "5b74fd96e02f974a4286e1d1e03009d7", "score": "0.577865", "text": "def sms_to_tv():\n\tif request.method == 'GET':\n\t\treturn render_template(\"sms.html\", title = 'SMS')\n\telif request.method == 'POST':\n\t\t#format date and time\n\t\trdate = datetime.datetime.strptime(request.form['rdate'], \"%m-%d-%Y\")\n\t\trtime = datetime.datetime.strptime(request.form['rtime'], \"%H.%M\")\n\t\terror_found = TvManager().post_sms(rdate.strftime(\"%Y-%m-%d\"), \\\n\t\t\t rtime.strftime(\"%H:%M:%S\"), \\\n\t\t\t request.form['rnumber'], \\\n\t\t\t request.form['rname'], \\\n\t\t\t request.form['snumber'], \\\n\t\t\t request.form['sname'], \\\n\t\t\t request.form['mbody'])\n\t\treturn 'Not OK' if error_found else 'OK'", "title": "" }, { "docid": "9da1f151e2a881bb8ca6db5bd493d2fb", "score": "0.5777506", "text": "def send(self, contact, msg):\n #outfile = os.tempnam(self.spool,'send_')\n outfile = tempfile.mkstemp(prefix='send_', dir=self.spool)\n f = open(outfile[1], \"w\")\n f.write(msg)\n f.close()\n os.chmod(f.name,0666)\n log.debug('sent SMS to %s: %s, file = %s' % (contact, msg, outfile))", "title": "" }, { "docid": "3b7ae7ca5188cf21b0a08fbf33e420ab", "score": "0.5770427", "text": "def test_mt_sms_singlepart_gsm(self):\n return self._setup_sms(set_system_model_gsm, RAT_GSM, self.phoneNumber,\n rand_ascii_str(SINGLE_PART_LEN),\n DIRECTION_MOBILE_TERMINATED)", "title": "" }, { "docid": "bd0936dbd301a9f8429a55d01148c701", "score": "0.5764815", "text": "def send(self,name,number,email,channel,amount,callback,description,cl_ref,sec_callback=\"\"):\n\n base_url = \"https://api.hubtel.com/v1/merchantaccount/merchants/{0}/send/mobilemoney\".format(self.merchant_id)\n payload = {\n \"RecipientName\": name,\"RecipientMsisdn\": number,\"CustomerEmail\": email,\"Channel\": channel,\"Amount\": amount,\n \"PrimaryCallbackUrl\": callback,\"SecondaryCallbackUrl\": sec_callback,\"Description\": description,\n \"ClientReference\": cl_ref}\n try:\n r = requests.post(base_url, headers=self.headers, data=payload)\n return r.json()\n except Exception as e:\n print(e)", "title": "" }, { "docid": "5bfa664283d2c9f7744724ebda6556cb", "score": "0.5757693", "text": "def send(self, from_addr, recipients, message):", "title": "" }, { "docid": "ff5a112d5fe8192bf3c5b23851e9f8d5", "score": "0.57515854", "text": "def sendText(self, text):\n self.person.sendMessage(text, None)", "title": "" }, { "docid": "a844afc00f70e5604ff59e5e3d1f70fe", "score": "0.5739191", "text": "def send(self, message):\n self.bot.sendline(message)", "title": "" } ]
681fc39a4870de1c1a271867c75b5d32
Although this is a stateless net, we use the "state" parameter to pass in the previous labels, unlike LSTMs where state would represent hidden activations of the network.
[ { "docid": "094a33d07d0505c0ce9c03f5b164856b", "score": "0.0", "text": "def forward(\n self, y: Optional[torch.Tensor] = None, state: Optional[List[torch.Tensor]] = None,\n ):\n outs = []\n\n [B, U] = y.shape\n appended_y = y\n if state != None:\n appended_y = torch.concat([state[0], y], axis=1)\n context_size = appended_y.shape[1]\n\n if context_size < self.context_size:\n # This is the case at the beginning of an utterance where we have\n # seen less words than context_size. In this case, we need to pad\n # it to the right length.\n padded_state = torch.ones([B, self.context_size], dtype=torch.long, device=y.device) * self.blank_idx\n padded_state[:, self.context_size - context_size :] = appended_y\n elif context_size == self.context_size + 1:\n padded_state = appended_y[:, 1:]\n # This is the case where the previous state already has reached context_size.\n # We need to truncate the history by omitting the 0'th token.\n else:\n # Context has just the right size. Copy directly.\n padded_state = appended_y\n\n for i in range(self.context_size):\n out = self.embeds[i](padded_state[:, self.context_size - 1 - i : self.context_size - i])\n outs.append(out)\n else:\n for i in range(self.context_size):\n out = self.embeds[i](y)\n\n if i != 0:\n out[:, i:, :] = out[\n :, :-i, :\n ].clone() # needs clone() here or it might complain about src and dst mem location have overlaps.\n out[:, :i, :] *= 0.0\n outs.append(out)\n\n out = self.dropout(torch.concat(outs, axis=-1))\n out = self.norm(out)\n\n state = None\n if y is not None:\n state = [appended_y[:, appended_y.shape[1] - self.context_size + 1 :]]\n return out, state", "title": "" } ]
[ { "docid": "61360cec1be5e56379cec7fe0daf3464", "score": "0.6701732", "text": "def transfer_states_to_neuron(self):", "title": "" }, { "docid": "4c02c69e0c8275194edcd4056c1c0d8f", "score": "0.6692958", "text": "def __setstate__(self, state):\n self._weights, self._biases, self._layer_stack, self._cost, self._inputs, \\\n self._expected_outputs, self.__trainer_type, self.__train_params, \\\n self._global_step, self._srng, self._training, \\\n self._used_training, self._intermediate_activations, \\\n self._patch_separation, self._layers, self.__train_kw_params = state", "title": "" }, { "docid": "a5133e4ab9ded41f16bbeda49ee1c18b", "score": "0.6688391", "text": "def transfer_states_from_neuron(self):", "title": "" }, { "docid": "e1f148a39cf3a6bd930a4cea2f92789c", "score": "0.66281766", "text": "def process_state_for_network(self, state):\n pass", "title": "" }, { "docid": "e77ac2ab8cce21c03528bace401359af", "score": "0.64914274", "text": "def Lstm_b(self, previous_hidden_state, x):\n \n r = tf.sigmoid(tf.matmul(x, self.Wr1) + tf.matmul(previous_hidden_state, self.Ur1) + self.br1)\n \n # U Gate\n u = tf.sigmoid(tf.matmul(x, self.Wu1) + tf.matmul(previous_hidden_state, self.Uu1) + self.bu1)\n \n # Final Memory cell\n c = tf.tanh(tf.matmul(x, self.Wh1) + tf.matmul( tf.multiply(r, previous_hidden_state), self.Uh1) + self.bh1)\n \n # Current Hidden state\n current_hidden_state = tf.multiply( (1 - u), previous_hidden_state ) + tf.multiply( u, c )\n \n return current_hidden_state", "title": "" }, { "docid": "9bce0ba7ee6029cd86cb3857b19c5ca8", "score": "0.6451758", "text": "def network_process_state(self, state):\n return state", "title": "" }, { "docid": "dacf48c136a8754496f7d91792b5cd1f", "score": "0.6386683", "text": "def Lstm_f(self, previous_hidden_state, x):\n \n # R Gate\n r = tf.sigmoid(tf.matmul(x, self.Wr) + tf.matmul(previous_hidden_state, self.Ur) + self.br)\n \n # U Gate\n u = tf.sigmoid(tf.matmul(x, self.Wu) + tf.matmul(previous_hidden_state, self.Uu) + self.bu)\n \n # Final Memory cell\n c = tf.tanh(tf.matmul(x, self.Wh) + tf.matmul( tf.multiply(r, previous_hidden_state), self.Uh) + self.bh)\n \n # Current Hidden state\n current_hidden_state = tf.multiply( (1 - u), previous_hidden_state ) + tf.multiply( u, c )\n \n return current_hidden_state", "title": "" }, { "docid": "9724d7b429bed802ad04382910ae2287", "score": "0.6384829", "text": "def state( self, label ):\n if label in self.states:\n raise ValueError( \"node's label {} already in use\".format( label ) )\n s = spa.State( vocab=self.voc, subdimensions=1, label=label )\n self.states[ label ] = s", "title": "" }, { "docid": "e08f7cfce0f18c14a3c7a3d8781b48a0", "score": "0.63333327", "text": "def forward(self, state):\n x = reduce(lambda a,b: F.relu(b(a)), self.fcls[:-1], state)\n return self.fcls[-1](x)", "title": "" }, { "docid": "c13a241ae6d3c1d5508b173ec414cf57", "score": "0.6295892", "text": "def update_state(state, kernel, learning_rate, x_i, y_i):\n # *** START CODE HERE ***\n beta = learning_rate * (y_i - predict(state, kernel, x_i))\n state.append((beta, x_i))\n # *** END CODE HERE ***", "title": "" }, { "docid": "0e08f2c4f9093e083de5b86309e30c51", "score": "0.6286899", "text": "def process_state_for_network(self, state):\n return state", "title": "" }, { "docid": "7123764ca2fff21659aa9c44fa5a1e8b", "score": "0.6277504", "text": "def run(self, states):\n \"*** YOUR CODE HERE ***\"\n layer_input = states\n layer_output = None\n for i in range(len(self.parameters)-2):\n term = self.parameters[i]\n # weight node\n if i % 2 == 0:\n layer_output = nn.Linear(layer_input,term)\n # bias node\n else:\n layer_output = nn.ReLU(nn.AddBias(layer_output,term))\n layer_input = layer_output\n\n layer_output = nn.AddBias(nn.Linear(layer_output,self.parameters[-2]),self.parameters[-1])\n return layer_output", "title": "" }, { "docid": "6835823789d4d020679cd5e94e9aa488", "score": "0.6271908", "text": "def __getstate__(self):\n state = (self._weights, self._biases, self._layer_stack, self._cost,\n self._inputs, self._expected_outputs, self.__trainer_type,\n self.__train_params, self._global_step, self._srng,\n self._training, self._used_training,\n self._intermediate_activations,\n self._patch_separation, self._layers, self.__train_kw_params)\n return state", "title": "" }, { "docid": "c7df03afec8238bc1fc25e64005df13f", "score": "0.6251576", "text": "def process_state_for_network(self, state):\n if self._flag_start_net:\n self._stacked_return_net = np.zeros((state.size\n ,self._history_length));\n self._stacked_return_net[:,-1] = state;\n self._flag_start_net = False;\n else:\n for i in range(self._history_length - 1):\n self._stacked_return_net[:,i] = \\\n self._stacked_return_net[:,i + 1];\n self._stacked_return_net[:,-1] = state;\n \n return np.copy(self._stacked_return_net.reshape((1,) \n + self._stacked_return_net.shape));", "title": "" }, { "docid": "ed930353d37eec2f48230961293487f0", "score": "0.6245084", "text": "def forward(self, state):\n # fully connected model\n x = F.relu(self.l1(state))\n x = F.relu(self.l2(x))\n x = torch.tanh(self.l3(x))\n return x", "title": "" }, { "docid": "b006dfa2891392b1de5e2f9be406d9f2", "score": "0.6227533", "text": "def forward(self, state):\r\n\r\n x = self.bn1(F.relu(self.conv1(state)))\r\n x = self.bn2(F.relu(self.conv2(x)))\r\n x = self.bn3(F.relu(self.conv3(x)))\r\n x = x.view(-1, 64*7*7*4)\r\n x = F.relu(self.fc1(x))\r\n x = self.fc2(x)\r\n\r\n return x", "title": "" }, { "docid": "13f25b92e206975c9b777abc2cae83c2", "score": "0.62236553", "text": "def _label_nodes(self):\n attributes = {}\n for node in self.graph.nodes:\n state = self._get_state(node)\n if node == self.start_state[0]:\n attributes[node] = {'state': state, 'label': \"((%s)) %s\" % (\n str(node), state)}\n else:\n attributes[node] = {'state': state, 'label': \"(%s) %s\" % (\n str(node), state)}\n\n networkx.set_node_attributes(self.graph, attributes)", "title": "" }, { "docid": "a8b4c303d1aa0ed1120066a12e57a5e6", "score": "0.6196896", "text": "def forward(\n self,\n prev_state: torch.Tensor,\n actions: torch.Tensor,\n prev_belief: torch.Tensor,\n observations: Optional[torch.Tensor] = None,\n nonterminals: Optional[torch.Tensor] = None,\n ) -> List[torch.Tensor]:\n # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)\n T = actions.size(0) + 1\n beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = (\n [torch.empty(0)] * T,\n [torch.empty(0)] * T,\n [torch.empty(0)] * T,\n [torch.empty(0)] * T,\n [torch.empty(0)] * T,\n [torch.empty(0)] * T,\n [torch.empty(0)] * T,\n )\n beliefs[0], posterior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state\n\n # Loop over time sequence\n for t in range(T - 1):\n # Select appropriate previous state\n _state = prior_states[t] if observations is None else posterior_states[t]\n _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal\n # Compute belief (deterministic hidden state)\n hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))\n # h_t = f(h_{t-1}, s_{t-1}, a_{t-1})\n beliefs[t + 1] = self.rnn(hidden, beliefs[t])\n\n # Compute state prior by applying transition dynamics\n # s_t ~ p(s_t | h_t) (Stochastic State Model)\n prior_states[t + 1] = self.stochastic_state_model.sample({\"h_t\": beliefs[t + 1]}, reparam=True)[\"s_t\"]\n loc_and_scale = self.stochastic_state_model(h_t=beliefs[t + 1])\n prior_means[t + 1], prior_std_devs[t + 1] = loc_and_scale[\"loc\"], loc_and_scale[\"scale\"]\n\n if observations is not None:\n # Compute state posterior by applying transition dynamics and using current observation\n # s_t ~ q(s_t | h_t, o_t) (Observation Model)\n t_ = t - 1 # Use t_ to deal with different time indexing for observations\n posterior_states[t + 1] = self.obs_encoder.sample({\"h_t\": beliefs[t + 1], \"o_t\": observations[t_ + 1]}, reparam=True)[\"s_t\"]\n loc_and_scale = self.obs_encoder(h_t=beliefs[t + 1], o_t=observations[t_ + 1])\n posterior_means[t + 1] = loc_and_scale[\"loc\"]\n posterior_std_devs[t + 1] = loc_and_scale[\"scale\"]\n\n # Return new hidden states\n hidden = [\n torch.stack(beliefs[1:], dim=0),\n torch.stack(prior_states[1:], dim=0),\n torch.stack(prior_means[1:], dim=0),\n torch.stack(prior_std_devs[1:], dim=0),\n ]\n if observations is not None:\n hidden += [torch.stack(posterior_states[1:], dim=0), torch.stack(posterior_means[1:], dim=0), torch.stack(posterior_std_devs[1:], dim=0)]\n return hidden", "title": "" }, { "docid": "9dcdc268e065778a44cf431a2a618c9c", "score": "0.6174504", "text": "def forward(self, inputs, initial_state):\n\n\n T = inputs.size(1)\n hidden_states = []\n\n if len(initial_state) == 0:\n initial_state = [[] for i in range(self.blocks)]\n\n inputs = inputs.transpose(0, 1) # make sure the input is S, B, C_new, H_new, W_new\n\n for i in range(1, self.blocks + 1):\n cur_convrelu = getattr(self, 'convrelu' + str(i))\n inputs = cur_convrelu(inputs)\n\n cur_rnn = getattr(self, 'rnn' + str(i))\n\n inputs, state_stage = cur_rnn(seq_len=T, inputs=inputs, initial_state=initial_state[i-1])\n hidden_states.append(state_stage)\n\n return tuple(hidden_states)", "title": "" }, { "docid": "55094b9ed0f4de655f46f83e38b991d3", "score": "0.61391026", "text": "def forward(self, input_variables):\n\n # initialize State(t-1) if it is not provided\n if input_variables[0] is None:\n self.batch_size = input_variables[1].value.shape[0]\n\n prev_state_variable = Variable(np.zeros((self.batch_size, self.hidden_size)))\n input_variables[0] = prev_state_variable\n\n # forward registration\n super(RNNCell, self).forward(input_variables)\n\n # remember variables\n self.prev_state_variable = self.input_variables[0]\n self.input_variable = self.input_variables[1]\n\n # input to hidden\n state_value = np.dot(self.input_variable.value, self.U.value)\n\n # hidden to hidden\n state_value += np.dot(self.prev_state_variable.value, self.W.value)\n\n # nonlinear\n state_value = np.tanh(state_value)\n\n # create variable\n self.state_variable = Variable(state_value)\n return self.state_variable", "title": "" }, { "docid": "5d124b3248e5e37be792d18ccc4b2514", "score": "0.6133859", "text": "def predict(network, state):\n return network.predict(to_array(state))[0]", "title": "" }, { "docid": "c200da58d4711d088a1aaea75ff09b0f", "score": "0.61123246", "text": "def forward(self, state):\n # Define the hidden layers\n hidden = F.relu(self.fc1(state))\n hidden = F.relu(self.fc2(hidden))\n hidden = F.relu(self.fc3(hidden))\n\n return self.fc4(hidden)", "title": "" }, { "docid": "53a1fe3e04e8bbae744fba0f8ff31ac8", "score": "0.611199", "text": "def pool_hidden_state(self, last_hidden_state):\n last_hidden_state = last_hidden_state[0]\n mean_last_hidden_state = torch.mean(last_hidden_state, 1)\n return mean_last_hidden_state", "title": "" }, { "docid": "ce95f9f58db2ccab9858c1f978a7e91a", "score": "0.6098959", "text": "def call(self, inputs, state):\n num_proj = self._num_units if self._num_proj is None else self._num_proj\n sigmoid = math_ops.sigmoid\n\n if self._state_is_tuple:\n (c_prev, m_prev) = state\n else:\n c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])\n m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])\n\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n \n # No feedback, if desired; also, gcnn/cnn do not have feedback\n if self._no_feedback or self._gate_mod in [\"gcnn\", \"cnn\"]:\n m_prev = tf.zeros(m_prev.shape)\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n if self._ngram:\n lstm_matrix = inputs + math_ops.matmul(m_prev, self._kernel)\n else:\n lstm_matrix = math_ops.matmul(\n array_ops.concat([inputs, m_prev], 1), self._kernel)\n lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)\n\n i, j, f, o = array_ops.split(\n value=lstm_matrix, num_or_size_splits=4, axis=1)\n # Diagonal connections\n if self._use_peepholes:\n c = (sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev +\n sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))\n elif self._gate_mod == \"lstm\":\n c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *\n self._activation(j))\n elif self._gate_mod == \"rkm_lstm\":\n c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * j)\n elif self._gate_mod == \"rkm_cifg\":\n c = (sigmoid(f + self._forget_bias) * c_prev + (1 - sigmoid(f + self._forget_bias)) *j)\n elif self._gate_mod in [\"gated_linear\", \"linear\"]:\n# sigma2_f = 0.5\n# sigma2_i = 0.5\n# c = (sigma2_f * c_prev + sigma2_i * j)\n c = (self._sigma2_f * c_prev + self._sigma2_i * j) \n elif self._gate_mod in [\"gcnn\", \"cnn\"]:\n sigma2_i = 1\n c = sigma2_i * j\n else:\n raise NotImplementedError(\"Invalid gate_mod: {0}\".format(self._gate_mod))\n \n if self._layer_norm:\n c = tf.contrib.layers.layer_norm(c)\n \n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n \n if self._use_peepholes:\n m = sigmoid(o + self._w_o_diag * c) * self._activation(c)\n elif self._gate_mod == \"lstm\":\n m = sigmoid(o) * self._activation(c)\n elif self._gate_mod in [\"rkm_lstm\", \"rkm_cifg\", \"gated_linear\", \"gcnn\"]:\n m = sigmoid(o) * c\n elif self._gate_mod in [\"linear\", \"cnn\"]:\n m = self._activation(c)\n else:\n raise NotImplementedError(\"Invalid gate_mod: {0}\".format(self._gate_mod)) \n \n if self._num_proj is not None:\n m = math_ops.matmul(m, self._proj_kernel)\n\n if self._proj_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)\n # pylint: enable=invalid-unary-operand-type\n\n new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else\n array_ops.concat([c, m], 1))\n return m, new_state", "title": "" }, { "docid": "55ab243e2488c0bbaacfa0a60f8be23e", "score": "0.6093648", "text": "def label_transition(self, obs, obs_next):\n instructions = self.label_batch_transition([(obs, obs_next)])\n return instructions[0]", "title": "" }, { "docid": "9a860236d28e26384d5c9ca3fce399de", "score": "0.6085672", "text": "def get_state(self, state):\n\n h5_trainer = state.require_group('trainer')\n h5_trainer.attrs['epoch_number'] = self.epoch_number\n h5_trainer.attrs['update_number'] = self.update_number\n if 'cost_history' in h5_trainer:\n h5_trainer['cost_history'].resize(self._cost_history.shape)\n h5_trainer['cost_history'][:] = self._cost_history\n else:\n h5_trainer.create_dataset(\n 'cost_history', data=self._cost_history, maxshape=(None,),\n chunks=(1000,))\n\n if self._network is not None:\n self._network.get_state(state)\n self._training_iter.get_state(state)\n if self._optimizer is not None:\n self._optimizer.get_state(state)", "title": "" }, { "docid": "064da09b509e31dd0a604054e73face7", "score": "0.6083095", "text": "def forward_state(self, sequence, state=None):\n if state is None:\n state = self.initial_state(sequence.size(0))\n\n data = self.input_layer(sequence)\n\n state_outputs = []\n\n for layer_length, layer in zip(self.hidden_layers, self.lstm_layers):\n # Partition hidden state, for each layer we have layer_length of h state and layer_length of c state\n current_state = state[:, :, :layer_length * 2]\n state = state[:, :, 2 * layer_length:]\n\n # Split into h and c state\n current_h = current_state[:, :, :layer_length]\n current_c = current_state[:, :, layer_length:]\n\n # Propagate through the LSTM state\n data, (new_h, new_c) = layer(data, (current_h, current_c))\n\n state_outputs.append(new_h)\n state_outputs.append(new_c)\n\n output_data = self.output_activation(self.output_layer(data))\n\n concatenated_hidden_output = torch.cat(state_outputs, dim=2)\n\n return output_data, concatenated_hidden_output", "title": "" }, { "docid": "1466d798b3b026104062598959d4a278", "score": "0.60764307", "text": "def forward(self, state):\n x_1 = self.fully_connected_1(state)\n x_relu_1 = F.relu(x_1)\n x_2 = self.fully_connected_2(x_relu_1)\n x_relu_2 = F.relu(x_2)\n action_values = self.fully_connected_3(x_relu_2)\n return action_values", "title": "" }, { "docid": "83feda8329e3a18d230e2c9177a0431e", "score": "0.6067058", "text": "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return F.softmax(self.fc5(x), dim=0)", "title": "" }, { "docid": "9b185be5ffe395b114a54b952a8fc48f", "score": "0.6059747", "text": "def set_state(self, state):\n\n # set initial position\n self.x = state[2]\n self.y = state[3]\n\n # set initial position\n f_ = self.branin()/10\n\n # init state description\n # step 0\n self.branin_step(np.array([0,0]))\n f_ = self.branin()/10\n self.prev_loss = 30 * f_ /(30+abs(f_))\n self.prev_unscaled = self.unscaled\n\n # step 0.1\n self.branin_step(np.array([0.1,0.1]))\n f_ = self.branin()/10\n self.state[0] = np.sign(self.prev_unscaled - f_) * np.min([np.abs(self.prev_unscaled - f_),30])\n self.prev_loss = 30 * f_ /(30+abs(f_))\n self.prev_unscaled = self.unscaled\n\n # step -0.1\n out = self.branin_step(np.array([-0.1,-0.1]))\n f_ = self.branin()/10\n self.state[0] = np.sign(self.prev_unscaled - f_) * np.min([np.abs(self.prev_unscaled - f_),30])\n self.prev_loss = out\n self.prev_unscaled = self.unscaled\n\n # reset counter\n self.count = 0\n return np.array(self.state)", "title": "" }, { "docid": "cf8140d3919fdcde9cfbe370913d0805", "score": "0.60529464", "text": "def forward(self, state, action):\n # fully connected model\n x = F.relu(self.l1(torch.cat([state, action], dim=-1)))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return x", "title": "" }, { "docid": "f79b76ac7174594616f1d58679966b83", "score": "0.6044524", "text": "def forward(self, state):\n x = F.relu(self.bn1(self.conv1(state)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n x = x.reshape(x.size(0),-1)\n\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "title": "" }, { "docid": "825a0c854df442517b43d8cb2e3a579d", "score": "0.6041739", "text": "def forward(self, inputs, initial_state):\n\n inputs = inputs.transpose(0, 1) # to S, B, C, H, W\n T = inputs.size(0)\n hidden_states = []\n\n if len(initial_state) == 0:\n initial_state = [[] for i in range(self.blocks)]\n\n for i in range(1, self.blocks + 1):\n cur_rnn = getattr(self, 'rnn' + str(i))\n inputs, state_stage = cur_rnn(seq_len=T, inputs=inputs, initial_state=initial_state[i-1])\n hidden_states.append(state_stage)\n\n return tuple(hidden_states)", "title": "" }, { "docid": "84ef624c65e6370e8ee1d182e20b30d7", "score": "0.6038964", "text": "def target_state(self, s):\n raise NotImplementedError()", "title": "" }, { "docid": "d83b51fa507bf95bf84e82b312ed893c", "score": "0.6012603", "text": "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "title": "" }, { "docid": "d83b51fa507bf95bf84e82b312ed893c", "score": "0.6012603", "text": "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "title": "" }, { "docid": "31d3dfc7a7de7c748f23536c44355f1b", "score": "0.6007137", "text": "def train(self, state, target):\n self.sess.run(\n self.training_step,\n feed_dict={self.State: state, self.Target: target})", "title": "" }, { "docid": "d6b689e6fa7cba6d5fa16a5d5aa6b6a1", "score": "0.5997748", "text": "def forward(self, state):\n return self.fc3(F.relu(self.fc2(F.relu(self.fc1(state)))))", "title": "" }, { "docid": "d22f39219242696d247b2e5520e3994d", "score": "0.59975463", "text": "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "title": "" }, { "docid": "5cf0e48445df1f63adb705404ebdf004", "score": "0.59914356", "text": "def forward(self, state):\n\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "title": "" }, { "docid": "3af65ca14f2bd37c834e7e01fe044f26", "score": "0.5977803", "text": "def predict(self, state):\n return self.sess.run(\n self.y_, feed_dict={self.State: state})", "title": "" }, { "docid": "3af65ca14f2bd37c834e7e01fe044f26", "score": "0.5977803", "text": "def predict(self, state):\n return self.sess.run(\n self.y_, feed_dict={self.State: state})", "title": "" }, { "docid": "921df8851728b1a4f54c959ead0a1ce6", "score": "0.59723866", "text": "def predict(self, state):\r\n return self.sess.run(self.out, feed_dict={\r\n self.state: state\r\n })", "title": "" }, { "docid": "3e992f2e1dd9dba647b9e816e39465ac", "score": "0.59660894", "text": "def goto (self, state) :\n if state is None or state in self._removed :\n state = self.current()\n if state in self._marking :\n if self._current != state :\n self._current = state\n self.net.set_marking(self._marking[state])\n else :\n raise ValueError(\"unknown state\")", "title": "" }, { "docid": "77750c43c2aa2543359a58c79dc543f4", "score": "0.59599423", "text": "def forward(self, state):\n\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n value = self.linear3(x)\n\n return value", "title": "" }, { "docid": "c7425f75fae56ac595ddc05046ebad3a", "score": "0.59494215", "text": "def __call__(self, inputs, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__): # \"CustomLSTMCell\"\n c, h = state\n h *= self._dropout_mask\n concat = projection(tf.concat([inputs, h], 1), 3 * self.output_size, initializer=self._initializer)\n i, j, o = tf.split(concat, num_or_size_splits=3, axis=1)\n i = tf.sigmoid(i)\n new_c = (1 - i) * c + i * tf.tanh(j)\n new_h = tf.tanh(new_c) * tf.sigmoid(o)\n new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)\n return new_h, new_state", "title": "" }, { "docid": "cc6e30a77f5d02ebf7c805dc1172d274", "score": "0.59420246", "text": "def _network_template(self, state):\n net = tf.cast(state, tf.float32)\n\n logger.info('state {}'.format(state.get_shape()))\n logger.info('net {}'.format(net.get_shape()))\n\n net = mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False)(net)\n q_values = slim.fully_connected(net, self.num_actions, activation_fn=None)\n\n return self._get_network_type()(q_values)", "title": "" }, { "docid": "b6a0ead9b03068e96de466495d3124a0", "score": "0.5940925", "text": "def forward(self, inputs, states):\n tick, hx, cx = states\n hx[tick], cx[tick] = self.lstm(inputs, (hx[tick], cx[tick]))\n tick = (tick + 1) % self.r\n out = sum(hx) / self.r # TODO verify that network output is mean of hidden states\n return out, (tick, hx, cx)", "title": "" }, { "docid": "1c2dee259cbfc469b4d45552eb97f448", "score": "0.59408903", "text": "def getNetState(self):\n if not self.useSeqLength:\n return self.netState[-1]\n out = mx.nd.zeros(shape = (len(self.netState),) + self.netState[0].shape)\n for i in range(len(self.netState)):\n out[i,:,:] = self.netState[i]\n \n return out", "title": "" }, { "docid": "d7535604ba2b7bfe3ff14eb5f54efdc3", "score": "0.59287095", "text": "def set_state(self, state):", "title": "" }, { "docid": "fc82a6ee9cfcc24ef1f1f2554b87b31d", "score": "0.59100175", "text": "def __call__(self, inputs, state, scope=None):\n with vs.variable_scope(scope or type(self).__name__):\n c1, c2, h1, h2 = state\n\n # change bias argument to False since LN will add bias via shift\n concat = _linear([inputs, h1, h2], 5 * self._num_units, False)\n\n i, j, f1, f2, o = array_ops.split(concat, 5, 1)\n\n # add layer normalization to each gate\n i = ln(i, scope='i/')\n j = ln(j, scope='j/')\n f1 = ln(f1, scope='f1/')\n f2 = ln(f2, scope='f2/')\n o = ln(o, scope='o/')\n\n new_c = (c1 * nn.sigmoid(f1 + self._forget_bias) +\n c2 * nn.sigmoid(f2 + self._forget_bias) +\n nn.sigmoid(i) * self._activation(j))\n\n # add layer_normalization in calculation of new hidden state\n new_h = self._activation(ln(new_c, scope='new_h/')) * nn.sigmoid(o)\n new_state = rnn.LSTMStateTuple(new_c, new_h)\n\n return new_h, new_state", "title": "" }, { "docid": "f5c5f66e8bc6a73b3519a1ca1bb533d0", "score": "0.59081954", "text": "def set_state(self, **labels):\n\n self._set_state(**labels)", "title": "" }, { "docid": "ccdb6edcc47d2928dfc8e60be48d7b52", "score": "0.5907913", "text": "def state_transition(self, state):\n self.state = state", "title": "" }, { "docid": "ccdb6edcc47d2928dfc8e60be48d7b52", "score": "0.5907913", "text": "def state_transition(self, state):\n self.state = state", "title": "" }, { "docid": "9554867463f78b77244f68f9b8ea44d0", "score": "0.59032995", "text": "def predict_target(self, state):\r\n return self.sess.run(self.target_out, feed_dict={\r\n self.target_state: state\r\n })", "title": "" }, { "docid": "e04b212402094d51a371905b586370c4", "score": "0.58976114", "text": "def on_train_end(self, nn_state):\n pass", "title": "" }, { "docid": "25318a1af518697a306f26c8401d6efd", "score": "0.5890535", "text": "def forward(self, state):\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = torch.tanh(self.linear3(x))\n return x", "title": "" }, { "docid": "3ddb905a66956247d698b75dcb3c2cf2", "score": "0.58845687", "text": "def create_network(self): \n state_input = tf.placeholder(tf.float32, [None] + self.input_shape)\n net = tflearn.fully_connected(state_input, 100, activation='relu')\n net = tflearn.fully_connected(net, 50, activation ='relu')\n # net = tflearn.fully_connected(net, 25, activation='relu')\n output = tflearn.fully_connected(net, self.output_size, activation = 'linear')\n return state_input, output", "title": "" }, { "docid": "b72d3115d1ca99505abcba59bb8fd612", "score": "0.58755434", "text": "def __call__(self,inputs,state,scope=None):\r\n with tf.variable_scope(scope or type(self).__name__): # \"BasicLSTMCell\"\r\n # Parameters of gates are concatenated into one multiply for efficiency.\r\n if self._state_is_tuple:\r\n c, h = state\r\n else:\r\n c, h = tf.split(1, 2, state)\r\n concat = tf.layers.dense(tf.concat([inputs, h],axis=1), 4 * self._num_units)\r\n\r\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\r\n i, j, f, o = tf.split(concat, 4, 1)\r\n\r\n i=tf.layers.dense(i,self._num_units)#, activation=tf.nn.relu)\r\n #j=tf.layers.dense(j,self._num_units)#, activation=tf.nn.relu)\r\n f=tf.layers.dense(f,self._num_units)#, activation=tf.nn.relu)\r\n o=tf.layers.dense(o,self._num_units)#, activation=tf.nn.relu)\r\n\r\n #i=tf.layers.dense(i,self._num_units)\r\n #j=tf.layers.dense(j,self._num_units)\r\n #f=tf.layers.dense(f,self._num_units)\r\n #o=tf.layers.dense(o,self._num_units)\r\n\r\n new_c = (c * tf.sigmoid(f) + tf.sigmoid(i) *\r\n self._activation(j))\r\n new_h = self._activation(new_c) * tf.sigmoid(o)\r\n\r\n if self._state_is_tuple:\r\n new_state = rnn.LSTMStateTuple(new_c, new_h)\r\n else:\r\n new_state = tf.concat(1, [new_c, new_h])\r\n return new_h, new_state", "title": "" }, { "docid": "c96cef6f411af97c74450b6678540011", "score": "0.58657706", "text": "def target_state(self, s):\n # YOUR CODE HERE\n return np.asarray([s, 0., 0.])", "title": "" }, { "docid": "bdcb506fdffa2c1287c22da38a45cd70", "score": "0.58638835", "text": "def _compute_state(self):", "title": "" }, { "docid": "eea3aef65788e4e00eeedeb23150e3d8", "score": "0.586287", "text": "def reset_states(self):\n self.true_positives.assign(tf.zeros((self.num_classes), self.dtype))\n self.false_positives.assign(tf.zeros((self.num_classes), self.dtype))\n self.false_negatives.assign(tf.zeros((self.num_classes), self.dtype))\n self.true_negatives.assign(tf.zeros((self.num_classes), self.dtype))", "title": "" }, { "docid": "49cdfe0aa512cba753ccc82900d58da5", "score": "0.58626676", "text": "def state(self, t=None):\n pass", "title": "" }, { "docid": "57df1f188341cd3940a17fadc5ffc3e9", "score": "0.58486396", "text": "def __call__(self,inputs,state,scope=None):\r\n with tf.variable_scope(scope or type(self).__name__): # \"BasicLSTMCell\"\r\n # Parameters of gates are concatenated into one multiply for efficiency.\r\n if self._state_is_tuple:\r\n c, h = state\r\n else:\r\n c, h = tf.split(1, 2, state)\r\n concat = tf.layers.dense(tf.concat([inputs, h],axis=1), 4 * self._num_units)\r\n\r\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\r\n i, j, f, o = tf.split(concat, 4, 1)\r\n\r\n new_c = (c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) *\r\n self._activation(j))\r\n new_h = self._activation(new_c) * tf.sigmoid(o)\r\n\r\n if self._state_is_tuple:\r\n new_state = rnn.LSTMStateTuple(new_c, new_h)\r\n else:\r\n new_state = tf.concat(1, [new_c, new_h])\r\n return new_h, new_state", "title": "" }, { "docid": "c4bf7b046ecf49e04943e9280e6b3b4c", "score": "0.5844337", "text": "def state(self, new_st):\n if new_st in self.__states.keys():\n self._state = self.__states[new_st]", "title": "" }, { "docid": "e5c4095c87176ff0b06f9444cb4f95b1", "score": "0.58370966", "text": "def forward(self, state):\n for layer in self.hidden_layers:\n state = layer(state)\n state = F.relu(state)\n out = self.output_layer(state)\n return F.tanh(out)", "title": "" }, { "docid": "76fa305b3d842f051b6d0c7064a6049d", "score": "0.5834474", "text": "def __call__(self,inputs,state,scope=None):\r\n with tf.variable_scope(scope or type(self).__name__): # \"BasicLSTMCell\"\r\n # Parameters of gates are concatenated into one multiply for efficiency.\r\n if self._state_is_tuple:\r\n c, h = state\r\n else:\r\n c, h = tf.split(1, 2, state)\r\n\r\n alphas = tf.matmul(inputs, self._v)\r\n alphas = tf.add(alphas,self._v_bias)\r\n alphas = tf.nn.softmax(alphas) * self.input_depth #softmax * inputdepth \r\n inputs = tf.multiply(inputs,alphas)\r\n\r\n gate_inputs = tf.matmul(\r\n tf.concat([inputs, h], 1), self._kernel)\r\n gate_inputs = tf.add(gate_inputs, self._bias)\r\n\r\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\r\n i, j, f, o = tf.split(gate_inputs, 4, 1)\r\n\r\n new_c = (c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) *\r\n self._activation(j))\r\n new_h = self._activation(new_c) * tf.sigmoid(o)\r\n\r\n if self._state_is_tuple:\r\n new_state = rnn.LSTMStateTuple(new_c, new_h)\r\n else:\r\n new_state = tf.concat(1, [new_c, new_h])\r\n return new_h, new_state", "title": "" }, { "docid": "4febc72e1aec95abc376b805e96edbd4", "score": "0.5832719", "text": "def create_actor(self, state_input):\n with tf.variable_scope(\"actor\"):\n layer_1 = tf.layers.dense(state_input, self.n_hidden_1, activation=tf.nn.relu)\n layer_2 = tf.layers.dense(layer_1, self.n_hidden_2, activation=tf.nn.relu)\n\n last_layer = tf.layers.dense(layer_2, self.action_size)\n self.action_prob = tf.nn.softmax(last_layer)", "title": "" }, { "docid": "b2340a2f77343a0a133227f296ce4b47", "score": "0.58309436", "text": "def forward(self, state):\n x = state.view(-1, self.state_size)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x", "title": "" }, { "docid": "226d42a19c460fb43d6ccda7acb1b1bc", "score": "0.58294314", "text": "def predict(self, state):\n with self.graph.as_default():\n K.set_session(self.sess)\n feed_dict = {self.infer_state: state}\n return self.sess.run(self.infer_v, feed_dict)", "title": "" }, { "docid": "fd2d037e7a45d62a067fd560caacfa2a", "score": "0.582496", "text": "def bottle_hidden(linear, states):\n size = states.size()\n result = linear(states.view(-1, self.total_hidden_dim))\n return F.relu(result).view(size)", "title": "" }, { "docid": "fd2d037e7a45d62a067fd560caacfa2a", "score": "0.582496", "text": "def bottle_hidden(linear, states):\n size = states.size()\n result = linear(states.view(-1, self.total_hidden_dim))\n return F.relu(result).view(size)", "title": "" }, { "docid": "3a378dcee4aa5d84578bb7943052de38", "score": "0.581776", "text": "def on_train_start(self, nn_state):\n pass", "title": "" }, { "docid": "2aa589e5c05f2dc6babde63166f176b3", "score": "0.5814781", "text": "def net1_state_trans_matrix():\n W_a = [\n [0., 1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.]\n ]\n\n W_b = [\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 1., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.]\n ]\n\n W_c = [\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 1., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.]\n ]\n\n W_d = [\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0.],\n [0., 0., 0., 0., 1., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.]\n ]\n\n W_e = [\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 1., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.]\n ]\n\n W_f = [\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.]\n ]\n\n W_g = [\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 1.],\n [0., 0., 0., 0., 0., 0., 0.]\n ]\n\n W = np.asarray([\n np.asarray(W_a), np.asarray(W_b), np.asarray(W_c),\n np.asarray(W_d), np.asarray(W_e), np.asarray(W_f),\n np.asarray(W_g)\n ])\n\n return W", "title": "" }, { "docid": "e730003cc954ad5110204b3e96e6f792", "score": "0.5797389", "text": "def forward(self, state):\n x = self.fc1(state)\n #x = self.bn1(x)\n x = F.relu(x)\n x = self.fc2(x)\n #x = self.bn2(x)\n x = F.relu(x)\n x = self.fc3(x)\n #x = self.bn3(x)\n x = F.relu(x)\n action = F.tanh(self.fc4(x))\n\n action = action * self.action_lim\n\n return action", "title": "" }, { "docid": "f9043c5001f49627f54172f517a75a1c", "score": "0.57786113", "text": "def forward(self, x: torch.Tensor, \n init_states: Optional[Tuple[torch.Tensor, torch.Tensor]]=None\n ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:\n for l in range(self.num_layers):\n state = None if init_states is None else (init_states[0][l], init_states[1][l])\n self.layers[l].set_states(x, state)\n \n \"\"\"Assumes x is of shape (sequence, batch, feature)\"\"\"\n seq_sz, bs, _ = x.size()\n hidden_seq = []\n for t in range(seq_sz): # iterate over the time steps\n x_t = x[t, :, :]\n for layer in self.layers:\n x_t = layer(x_t)\n hidden_seq.append(x_t.unsqueeze(Dim.batch))\n hidden_seq = torch.cat(hidden_seq, dim=Dim.batch)\n # reshape from shape (sequence, batch, feature) to (batch, sequence, feature)\n hidden_seq = hidden_seq.transpose(Dim.batch, Dim.seq).contiguous()\n\n \"\"\"Retrieve hidden states\"\"\"\n h_layers = []\n c_layers = []\n for l in range(len(self.layers)):\n h_t, c_t = self.layers[l].get_states()\n h_layers.append(h_t.unsqueeze(Dim.batch))\n c_layers.append(c_t.unsqueeze(Dim.batch))\n\n h_layers = torch.cat(h_layers, dim=Dim.seq)\n c_layers = torch.cat(c_layers, dim=Dim.seq)\n return hidden_seq, (h_layers, c_layers)", "title": "" }, { "docid": "fab75cc9153e12790bb179b68ee3d903", "score": "0.577846", "text": "def get_state(self):\n state = {'graph' : self.graph.get_state(),\n 'weights' : self.weights.get_state()}\n return state", "title": "" }, { "docid": "393146527b90f3542ec8291d620c1c83", "score": "0.57753277", "text": "def call(self, inputs, state):\r\n #############################################\r\n # TODO: YOUR CODE HERE #\r\n\r\n\r\n concat = linear(inputs, c, h)\r\n \r\n f = tf.sigmoid(tf.matmul(concat, self.W_f) + self._forget_bias)\r\n i = tf.sigmoid(tf.matmul(concat, self.W_i))\r\n new_c_hat = tf.tanh(tf.matmul(concat, self.W_j))\r\n o = tf.sigmoid(tf.matmul(concat, self.W_o))\r\n \r\n new_c = tf.multiply(c, f) + tf.multiply(i, new_c_hat) \r\n new_h = tf.matmul(tf.multiply(o, tf.tanh(new_c)), self.W_h) # Add a parameter to make the dimension of new_h consistent\r\n \r\n new_state = tf.concat([new_c, new_h], axis = 1)\r\n\r\n return new_h, new_state\r\n #############################################\r", "title": "" }, { "docid": "162350ad91b408c401b7d425f6f8c2ad", "score": "0.5768006", "text": "def forward(self, state):\n x = self.fc1(state)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n action_values = self.fc3(x)\n \n return action_values", "title": "" }, { "docid": "3dc76bf5a2a33805f724f703e2bd03ba", "score": "0.5753678", "text": "def forward(self, \n state: np.ndarray,\n ) -> torch.tensor:\n \n x = self.layer1(state)\n x = torch.nn.functional.relu(x)\n x = self.layer2(x)\n x = torch.nn.functional.relu(x)\n \n value = self.V(x)\n return value", "title": "" }, { "docid": "076231dc303199be38c96e02bef3f7be", "score": "0.5741232", "text": "def __call__(self, inputs, state):\n with tf.variable_scope('GRU'):\n W_r = tf.get_variable('W_r', shape=[self.state_size, self.state_size], initializer=tf.contrib.layers.xavier_initializer())\n U_r = tf.get_variable('U_r', shape=[self.input_size, self.state_size], initializer=tf.contrib.layers.xavier_initializer())\n b_r = tf.get_variable('b_r', shape=[self.state_size,], initializer = tf.constant_initializer(0.0))\n\n\n W_z = tf.get_variable('W_z', shape=[self.state_size, self.state_size], initializer=tf.contrib.layers.xavier_initializer())\n U_z = tf.get_variable('U_z', shape=[self.input_size, self.state_size], initializer=tf.contrib.layers.xavier_initializer())\n b_z = tf.get_variable('b_z', shape=[self.state_size,], initializer = tf.constant_initializer(0.0))\n\n W_o = tf.get_variable('W_o', shape=[self.state_size, self.state_size], initializer=tf.contrib.layers.xavier_initializer())\n U_o = tf.get_variable('U_o', shape=[self.input_size, self.state_size], initializer=tf.contrib.layers.xavier_initializer())\n b_o = tf.get_variable('b_o', shape=[self.state_size,], initializer = tf.constant_initializer(0.0))\n\n z_t = tf.nn.sigmoid(tf.matmul(inputs, U_z) + tf.matmul(state, W_z) + b_z)\n r_t = tf.nn.sigmoid(tf.matmul(inputs, U_r) + tf.matmul(state, W_r) + b_r)\n o_t = tf.nn.tanh(tf.matmul(inputs, U_o) + tf.matmul(r_t * state, W_o)+ b_o)\n new_state = z_t * state + (1 - z_t) * o_t\n\n output = new_state\n return output, new_state", "title": "" }, { "docid": "89885e77e477a5ef4aa983141f0d1a91", "score": "0.57411563", "text": "def get_layer_state(layer, state_dict):\n\treturn {'weights': state_dict[layer + '.weight'],\n\t\t\t'biases': state_dict[layer + '.bias'],\n\t\t\t}", "title": "" }, { "docid": "114d863173234648af1c79b7d5534ecf", "score": "0.5736472", "text": "def load_state(self, state):", "title": "" }, { "docid": "c9b7380b012e858613a72b9d3ef0cef1", "score": "0.57343394", "text": "def state_kernel(self, t, u):\n \n flux = self.flux_kernel(t, u)\n \n # Since there is no reaction term to be learned, du/dt = fluxes\n state = flux + self.func_nn(u).squeeze()\n \n return state", "title": "" }, { "docid": "66b0c654492d14b84ebe17cdd64f4803", "score": "0.5727295", "text": "def forward(self, seq_len, inputs, initial_state):\n # if both initial_state and inputs are [], raise an error\n if len(initial_state) == 0 and len(inputs) == 0:\n raise(ValueError('Both initial_state and inputs are []'))\n\n\n # if initial_state is None, initialize it with zeros\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n if len(initial_state) == 0:\n hx = torch.zeros(inputs.size(1), self.num_features, self.shape[0],\n self.shape[1]).to(device)\n cx = torch.zeros(inputs.size(1), self.num_features, self.shape[0],\n self.shape[1]).to(device)\n else:\n hx, cx = initial_state\n\n output_inner = []\n\n # apply dropout (combining CNN version and RNN version of bayesian dropout)\n if self.dropout_rate == 0:\n pass\n else:\n self.dropout_layer = BayesianDropout(self.dropout_rate,\n torch.zeros(hx.size(0), self.num_features * 4, self.shape[0],\n self.shape[1]))\n\n # for each time step, perform a CNN on a slice of the sequence of images and record the hidden state and cell state\n for index in range(seq_len):\n if len(inputs) == 0:\n x = torch.zeros(hx.size(0), self.input_channels, self.shape[0],\n self.shape[1]).to(device)\n else:\n x = inputs[index, ...]\n\n # combining input and last hidden state\n \n combined = torch.cat((x, hx), 1)\n\n # apply CNN forward pass\n gates = self.conv(combined) # gates: (B, num_features*4, H, W)\n\n # apply group norm\n gates = self.groupnorm(gates)\n\n # apply the same dropout mask at each time step\n if self.dropout_rate == 0:\n pass\n else:\n gates = self.dropout_layer(gates)\n\n # it should return 4 tensors: i,f,g,o following the literature of LSTM\n ingate, forgetgate, cellgate, outgate = torch.split(\n gates, self.num_features, dim=1)\n ingate = torch.sigmoid(ingate)\n forgetgate = torch.sigmoid(forgetgate)\n cellgate = torch.tanh(cellgate)\n outgate = torch.sigmoid(outgate)\n\n cy = (forgetgate * cx) + (ingate * cellgate)\n hy = outgate * torch.tanh(cy)\n output_inner.append(hy)\n hx = hy\n cx = cy\n\n return torch.stack(output_inner), (hy, cy)", "title": "" }, { "docid": "3d92f5868d7bd55a2267b05268f8c631", "score": "0.57263386", "text": "def forward(self, state):\r\n\t\treturn self.model.forward(state)", "title": "" }, { "docid": "e2447edb90e64d401e02e91df4b027bf", "score": "0.57256263", "text": "def forward(self, state, action):\n x = torch.cat([state, action], 1)\n x = torch.sigmoid(self.linear1(x)) #**sigmoid activation to help guard against exploding gradients!\n x = torch.sigmoid(self.linear2(x))\n x = torch.sigmoid(self.linear3(x))\n x = torch.tanh(self.linear4(x))\n\n return x", "title": "" }, { "docid": "766656490d23cb436bd85a7bf67f143c", "score": "0.5724858", "text": "def _state_switch(self):\n self._state = 1 - self._state", "title": "" }, { "docid": "d4455deb66f319e9daaeaa2222455a6a", "score": "0.57228374", "text": "def __init__(self):\n \n stateStruct = namedtuple('stateStruct', ['labels','mean', 'var','n']);\n \n self.states = stateStruct({},[],[],[]);\n \n #self.labels = [] # class labels\n #self.mean = [] # class mean\n #self.var = [] # class variances\n #self.n = 0 # nbr of classes", "title": "" }, { "docid": "fb2c6d0d4245c03fbb5c6845e5b659e3", "score": "0.57099104", "text": "def get_states_b(self):\n \n all_hidden_states = self.get_states_f()\n \n # Reversing the hidden and memory state to get the final hidden and\n # memory state\n last_hidden_states = all_hidden_states[-1]\n \n # For backward pass using the last hidden and memory of the forward\n # pass\n initial_hidden = last_hidden_states\n \n # Getting all hidden state throuh time\n all_hidden_memory_states = tf.scan(self.Lstm_b, self.processed_input_rev, initializer=initial_hidden, name='states')\n \n # Now reversing the states to keep those in original order\n #all_hidden_states = tf.reverse(all_hidden_memory_states, [False, True, False])\n \n return all_hidden_states", "title": "" }, { "docid": "a751dcc9502e97b8cd7fc6352c2a03ab", "score": "0.570199", "text": "def state(self):\n pass", "title": "" }, { "docid": "a751dcc9502e97b8cd7fc6352c2a03ab", "score": "0.570199", "text": "def state(self):\n pass", "title": "" }, { "docid": "a751dcc9502e97b8cd7fc6352c2a03ab", "score": "0.570199", "text": "def state(self):\n pass", "title": "" }, { "docid": "88e1a6806c6c63436821f23f92767013", "score": "0.57018584", "text": "def __init__(self, _num_states, _num_obs, prior_trans = None, prior_obs = None):\n\n #constant\n self.num_states =_num_states + 2 #+2 for start & end state\n self.num_obs = _num_obs\n self.start_idx = 0 #start state index\n self.end_idx = -1 #end state index\n self.word_dict={i:i for i in range(self.num_obs)}\n self.online = None #online predictor is stored here if necessary\n np.random.seed(123456) #set seed for reproducibility\n rnd.seed(123456)\n\n #to be filled in training\n self.threshold = 0.001 #threshold for fractional change in norm\n #self.null_added = 1 #have nulls been appended?\n\n #var\n self.seq = None #current observation sequence\n self.curr_state = None #current state\n self.obs_count = np.zeros([self.num_states,_num_obs]) #count for observations\n self.state_count = np.zeros([self.num_states]*2) #count for transitions\n\n #-- Set up matrices\n #state transition matrix. Rows -> From & Columns -> To\n self.A = np.zeros((self.num_states, self.num_states))\n #observation matrix. Rows -> State & Columns -> Observations\n self.O = np.zeros((self.num_states, self.num_obs))\n self.init_matrices(prior_trans, prior_obs)", "title": "" }, { "docid": "4a3c5e3ba027b9b29ccdd2b834d0801e", "score": "0.5698582", "text": "def act(self, state):\n\n state = torch.from_numpy(state).float()\n\n self.qnetwork_local.eval()\n with torch.no_grad():\n action, _, _ = self.qnetwork_local(state.unsqueeze(0))\n #action = action.cpu().squeeze().numpy() + self.noise.sample()\n #action = np.clip(action, -1,1)[0]\n self.qnetwork_local.train()\n return action.cpu().squeeze().numpy().reshape((self.action_size,))", "title": "" }, { "docid": "4d3934e23f7349f3cca74082b60c3a78", "score": "0.56974334", "text": "def _gru_layer_with_state_bias(h_prev, x, state, name='gru', x_dim=256, y_dim=1024, s_dim=512, reuse=None):\n\n with tf.variable_scope(name):\n\n # Reset gate\n with tf.variable_scope('reset_gate', reuse=reuse):\n Wi_r = tf.get_variable(name='weight_input', shape=(x_dim, y_dim), initializer=tf.random_normal_initializer(stddev=0.01))\n Wh_r = tf.get_variable(name='weight_hidden', shape=(y_dim, y_dim), initializer=initializer.orthogonal_initializer(0.01))\n Ws_r = tf.get_variable(name='weight_state', shape=(s_dim, y_dim), initializer=tf.random_normal_initializer(stddev=0.01))\n b_r = tf.get_variable(name='bias', shape=(y_dim,), initializer=tf.constant_initializer(0.1))\n r = tf.sigmoid(tf.matmul(x, Wi_r) + tf.matmul(h_prev, Wh_r) + tf.matmul(state, Ws_r) + b_r)\n\n # Update gate\n with tf.variable_scope('update_gate', reuse=reuse):\n Wi_z = tf.get_variable(name='weight_input', shape=(x_dim, y_dim), initializer=tf.random_normal_initializer(stddev=0.01))\n Wh_z = tf.get_variable(name='weight_hidden', shape=(y_dim, y_dim), initializer=initializer.orthogonal_initializer(0.01))\n Ws_z = tf.get_variable(name='weight_state', shape=(s_dim, y_dim), initializer=tf.random_normal_initializer(stddev=0.01))\n b_z = tf.get_variable(name='bias', shape=(y_dim,), initializer=tf.constant_initializer(0.1))\n z = tf.sigmoid(tf.matmul(x, Wi_z) + tf.matmul(h_prev, Wh_z) + tf.matmul(state, Ws_z) + b_z)\n\n # Candidate update\n with tf.variable_scope('candidate_update', reuse=reuse):\n Wi_h_tilde = tf.get_variable(name='weight_input', shape=(x_dim, y_dim), initializer=tf.random_normal_initializer(stddev=0.01))\n Wh_h_tilde = tf.get_variable(name='weight_hidden', shape=(y_dim, y_dim), initializer=initializer.orthogonal_initializer(0.01))\n Ws_h_tilde = tf.get_variable(name='weight_state', shape=(s_dim, y_dim), initializer=tf.random_normal_initializer(stddev=0.01))\n # b_h_tilde = tf.get_variable(name='bias', shape=(y_dim,), initializer=tf.constant_initializer(0.1))\n b_h_tilde = tf.get_variable(name='bias', shape=(y_dim,),\n initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1.0))\n h_tilde = tf.tanh(tf.matmul(x, Wi_h_tilde) + \\\n tf.matmul(r * h_prev, Wh_h_tilde) + \\\n tf.matmul(state, Ws_h_tilde) + \\\n b_h_tilde)\n\n # Final update\n h = tf.sub(np.float32(1.0), z) * h_prev + z * h_tilde\n\n return h", "title": "" }, { "docid": "e688b692bce571a467f64bc749eea3cb", "score": "0.5694419", "text": "def update_target(self, ):\n self.target_net.load_state_dict(self.eval_net.state_dict())", "title": "" }, { "docid": "3172eb837e21ce8ceab1df07f6d6e314", "score": "0.56922746", "text": "def call(self, inputs, state):\n # inputs = realinputs + m +rt\n # rt's length is self._num_units\n # state = rt * older state \n # input = first 2 part\n totalLength=inputs.get_shape().as_list()[1]\n inputs_=inputs[:,0:totalLength-self._num_units]\n rth=inputs[:,totalLength-self._num_units:]\n inputs=inputs_\n state=math_ops.multiply(rth,state)\n with vs.variable_scope(\"gates\"): # Reset gate and update gate.\n # We start with bias of 1.0 to not reset and not update.\n bias_ones = self._bias_initializer\n if self._bias_initializer is None:\n dtype = [a.dtype for a in [inputs, state]][0]\n bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)\n value = math_ops.sigmoid(\n _linear([inputs, state], 2 * self._num_units, True, bias_ones,\n self._kernel_initializer))\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n with vs.variable_scope(\"candidate\"):\n c = self._activation(\n _linear([inputs, r * state], self._num_units, True,\n self._bias_initializer, self._kernel_initializer))\n new_h = u * state + (1 - u) * c\n return new_h, new_h", "title": "" }, { "docid": "e48168eff179a9e2d85f899e1065fd5a", "score": "0.5691771", "text": "def get_output(self, hidden_state):\n output = tf.nn.sigmoid(tf.matmul(hidden_state, self.Wo) + self.bo)\n \n return output", "title": "" }, { "docid": "ca8ac0a4007c91711c177f15d7b25d3d", "score": "0.5687247", "text": "def forward(self, state, action):\n input = torch.cat([state, action], 1)\n\n q1 = F.relu(self.linear1(input))\n q1 = F.relu(self.linear2(q1))\n q1 = self.linear3(q1)\n\n q2 = F.relu(self.l4(input))\n q2 = F.relu(self.l5(q2))\n q2 = self.l6(q2)\n\n return q1, q2", "title": "" }, { "docid": "42b28ff63f53c97622be27fea56a5538", "score": "0.5686772", "text": "def target_state(self, s):\n # YOUR CODE HERE\n tmp = s\n state = np.asarray([0., 0., 0.])\n trans = np.eye(3)\n for i in range(len(self.subpaths)):\n if tmp > self.subpaths[i].total_length:\n if i == len(self.subpaths) - 1:\n return compute_twist(trans, state, self.subpaths[i].end_state)\n tmp -= self.subpaths[i].total_length\n state = compute_twist(trans, state, self.subpaths[i].end_state)\n trans = rigid(state)\n else:\n return compute_twist(trans, state, self.subpaths[i].target_state(tmp))\n print(\"path error\")\n exit(1)", "title": "" } ]
f52497ff6f61c9d916ba7093bec81fb8
Draws the current screen into the delegate surface.
[ { "docid": "a9e4a2c22ec5aa8ab39948f0ee55e842", "score": "0.84791386", "text": "def draw(self):\n self.get_current_screen().draw(self._surface)", "title": "" } ]
[ { "docid": "c5d38c3a6f645ce37cd070c245ef3740", "score": "0.80792165", "text": "def draw(self, screen):\r\n pass", "title": "" }, { "docid": "b8114732a0c0f6957a74b9dfc4ecb49c", "score": "0.7948619", "text": "def draw_to_screen(self):\n self.screen.blit(self.image, self.rect)", "title": "" }, { "docid": "e8b9b5bd6b5043d937ca9ff92664c35a", "score": "0.78644127", "text": "def draw(self, screen):", "title": "" }, { "docid": "dde0fd695453a9058dedb5a7dd5e3d93", "score": "0.7766858", "text": "def draw(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "9f7074db023c536f6a8e08796d83b3ae", "score": "0.7650116", "text": "def draw(self):\r\n\t\tpygame.draw.rect(self.screen,self.color,self.rect)", "title": "" }, { "docid": "4b91658181b747d7641f81018ec48403", "score": "0.7523164", "text": "def draw(self):\n App.screen.blit(self.img, self.rect)", "title": "" }, { "docid": "8dbe81eb65f45c063a2df2e2ed5fc1b4", "score": "0.75100857", "text": "def paint(self, screen):\n if self._background:\n screen.blit(self._background, (0, 0))\n else:\n screen.fill(self._background_color)\n if self._overlay:\n screen.blit(self._overlay, self._overlay.get_rect(center=self._rect.center))\n for text_surface, pos in self._texts:\n screen.blit(text_surface, pos)\n for outline_surface, pos in self._outlines:\n screen.blit(outline_surface, pos)\n self._need_update = False", "title": "" }, { "docid": "07a9f9d9c44bc9dc54a256f87f645dfb", "score": "0.75048673", "text": "def draw(self, screen) -> None:\n\n #Render new position\n screen.blit(self.image, self.rect)", "title": "" }, { "docid": "3ae97ebc171329129c11b5b41275fece", "score": "0.7486759", "text": "def draw(self):\r\n self.screen.fill(BLACK)\r\n self.screen.blit(self.background, (0, 0))\r\n # self.draw_grid()\r\n self.draw_character()", "title": "" }, { "docid": "f37526dd1f586b6606c451ae28e23846", "score": "0.744492", "text": "def _draw_interface(self) -> None:\r\n surface = pygame.display.get_surface()\r\n\r\n surface.fill(BACK_GROUND_COLOR)\r\n\r\n self._draw_board_background(surface)\r\n\r\n self._draw_board_item()\r\n\r\n pygame.display.flip()", "title": "" }, { "docid": "f9fb8cd248d8ef5e4c32b1bdad22fe50", "score": "0.74377364", "text": "def draw(self):\n\t\tif self.is_visible():\n\t\t\tself.draw2D()", "title": "" }, { "docid": "8424875cd9103afe552f309b736afec6", "score": "0.7422123", "text": "def draw_to_screen(self, screen):\n self._rect = pygame.draw.line(screen, self._color, self._start_pos, self._end_pos, self._line_width)", "title": "" }, { "docid": "bc75b46594b9ae519b349c9c5a61f707", "score": "0.735864", "text": "def draw(self, screen):\r\n screen.blit(self.img, (self.x, self.y))", "title": "" }, { "docid": "d3615448e039a22526a3c75640be9d5a", "score": "0.7351539", "text": "def draw(self, display):\n display.blit(self.current_image, self.rect)", "title": "" }, { "docid": "b6dd1456739320fc8e7b2a357680651d", "score": "0.7334399", "text": "def draw(self):\n self.set_rend()\n self.screen.blit(self.rend, self.rect)", "title": "" }, { "docid": "b6dd1456739320fc8e7b2a357680651d", "score": "0.7334399", "text": "def draw(self):\n self.set_rend()\n self.screen.blit(self.rend, self.rect)", "title": "" }, { "docid": "0cedadee10d4b57b3fdfa55a94d4a232", "score": "0.7329635", "text": "def paint(self, screen):\n screen.fill((0, 0, 0)) # Clear background\n screen.blit(self.background, self.background.get_rect(center=self.rect.center))", "title": "" }, { "docid": "9fa00366dccef81627667d2ff22cd16a", "score": "0.7308135", "text": "def draw(self, screen):\n screen.fill(pygame.Color('black'))\n self.draw_board(screen)\n self.draw_player_stats(screen)\n self.draw_players(screen)", "title": "" }, { "docid": "6aec8eb7e5ad085e6c1c19f3da4fb5f5", "score": "0.7298559", "text": "def draw_screen(self, surf: Any) -> None:\n self.draw_base_map(surf)\n self.draw_units(surf)\n self.draw_overlay(surf)", "title": "" }, { "docid": "ad5bac1a9f2ce84ddde126dcc31cd642", "score": "0.72797287", "text": "def draw(self, screen):\n screen.blit(self.surf, self.position)\n self.cursor_show = not self.cursor_show\n if self.cursor_show:\n screen.blit(self.cursor, (self.cursor_pos + self.position[0], 4 + self.position[1]))", "title": "" }, { "docid": "8ecda826666c7fc8ee15df733f820c9c", "score": "0.7259867", "text": "def draw(self):\n window_surface.fill((0, 0, 0))\n for element in self.gui:\n element.draw()\n pygame.display.update()", "title": "" }, { "docid": "f8babfd0eec13f78953e044314be7e93", "score": "0.7244425", "text": "def draw(self, surface):\n pass", "title": "" }, { "docid": "f8babfd0eec13f78953e044314be7e93", "score": "0.7244425", "text": "def draw(self, surface):\n pass", "title": "" }, { "docid": "0d2bfb90efe41ce9c8ac922f33d66c72", "score": "0.7237872", "text": "def render(self):\n self.screen.reset()\n self.screen.blit(self.corners)\n self.screen.blit(self.lines, (1, 1))\n self.screen.blit(self.rects, (int(self.screen.width / 2) + 1, 1))\n self.screen.blit(self.circle, (0, int(self.screen.height / 2) + 1))\n self.screen.blit(self.filled, (int(self.screen.width / 2) + 1,\n int(self.screen.height / 2) + 1))\n\n self.screen.update()\n self.clock.tick()", "title": "" }, { "docid": "14d6a469e09c6921dd7c950320b07b8d", "score": "0.7231258", "text": "def draw(self, surface):\n surface.blit(self.backgroundImage, self.backgroundImage_rect)\n self.player.draw(surface)\n self.enemyManager.draw(surface)\n self.bossenemyManager.draw(surface)\n self.bulletManager.draw(surface)\n self.explosionManager.draw(surface)\n self.powerupManager.draw(surface)\n\n self.render_infobar(surface)\n\n if self.statsManager.powerup_active:\n surface.blit(self.powerup_image, self.powerup_image_rect)\n surface.blit(self.powerup_text, self.powerup_text_rect)\n\n if not self.isEnd:\n surface.blit(self.current_song, self.current_song_rect)\n surface.blit(self.highscore_text, self.highscore_text_rect)\n\n if self.isPaused:\n surface.blit(self.dim_screen, (0, 0))\n surface.blit(self.paused_message, self.paused_message_rect)\n # surface.blit(self.paused_message_subtitle,\n # self.paused_message_subtitle_rect)\n self.draw_buttons(surface)\n if self.isStart:\n surface.blit(self.dim_screen, (0, 0))\n surface.blit(self.start_message, self.start_message_rect)\n\n self.surface = surface.copy()", "title": "" }, { "docid": "a82ba83f9ae937614d98b006d6964d98", "score": "0.72009856", "text": "def draw(self):\n if self.visible == True:\n fill(self.color[0], self.color[1], self.color[2])\n rect(self.x,self.y, self.width,self.height)", "title": "" }, { "docid": "8610bca826c0d7087c1cff2718c1e99a", "score": "0.7200256", "text": "def draw(self):\n self.screen.blit(self.msg_image, self.msg_image_rect)", "title": "" }, { "docid": "697be4032b9d408c8a62e2f3fd9bcea5", "score": "0.71959656", "text": "def draw(self):\n if self.rendered:\n graphics.set_color(1,1,1,1)\n draw.image(self.image,self.x,self.y)\n else:\n self.rendered = True\n self.draw_initial()\n graphics.set_color(0,0,0,1)\n graphics.set_line_width(1)\n draw.rect_outline(self.x,self.y,self.x+self.width,self.y+self.height)", "title": "" }, { "docid": "bdd287009a87ffd82f76def3e9cdb8bf", "score": "0.7192944", "text": "def draw(self, screen):\n screen.fill(BLACK)\n ships = self.state.get_ships()\n systems = self.state.get_systems()\n draw_systems(screen, systems)\n draw_ships(screen, ships, systems)\n pg.display.flip()", "title": "" }, { "docid": "dbe4f311571aec8798ae14d06f52c588", "score": "0.7190755", "text": "def draw(self, screen):\n\n # Draw the background\n screen.fill(PURPLE)\n screen.blit(self.background, (0, 0))\n\n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n self.target_list.draw(screen)", "title": "" }, { "docid": "b7fb5533b0d5e728d380b8b774e3451d", "score": "0.7187679", "text": "def draw(self):\r\n self.scr.blit(self.image, self.rect)", "title": "" }, { "docid": "72c47b3235ed5d3af9cf270fba8d5b11", "score": "0.7172821", "text": "def draw(self):\r\n if self.__visible:\r\n self.__canvas.create_image(self.x,\r\n self.y,\r\n image = self.__image)", "title": "" }, { "docid": "a6cf5c2713ba42af36f8c28800384ccd", "score": "0.71617675", "text": "def draw(self):\n self._draw_background()\n\n self._draw_python_logo()\n self._draw_misty_logo()\n self._draw_back_button()\n\n self._play_sound()\n self._draw_cursor()", "title": "" }, { "docid": "84faede3932dc279bce99ac4f6f08e10", "score": "0.7147253", "text": "def draw(self):\n if self._background.source != None:\n self._background.draw(self.view)\n if self._game != None:\n self._game.draw(self.view)\n if self._msgs != None:\n for i in self._msgs:\n i.draw(self.view)", "title": "" }, { "docid": "3149ba7617c06655cfba0ed880e6414e", "score": "0.71399134", "text": "def draw(self, screen: pygame.Surface):\n self.playAnimation(0.01)\n screen.blit(self.image, self.koordinaten, self.rect)\n if not self.playing:\n self.event.animation(screen, pygame.time.get_ticks(), 100, (0, 50))\n\n # self.event.show(screen)", "title": "" }, { "docid": "9e0646907dab066e3ea9f0bbece6117a", "score": "0.712912", "text": "def draw(self, surface):\r\n pygame.draw.rect(surface, self.Color, (self.X, self.Y, self.Width, self.Height))", "title": "" }, { "docid": "32f09123cd571b2c4919dbd908e64d99", "score": "0.7120638", "text": "def draw(self):\n self.snake_game.window.fill(self.snake_game.settings.back_color, self.rect)\n self.snake_game.window.blit(self.msg_image, self.msg_image_rect)\n pygame.draw.rect(\n self.snake_game.window,\n self.snake_game.settings.snake_color,\n self.rect,\n 1 # width\n )", "title": "" }, { "docid": "4b41c4b77a3f7fcf61e8567c7478b136", "score": "0.7117182", "text": "def update(self):\n self.loop.widget = self.frame\n self.loop.draw_screen()", "title": "" }, { "docid": "241945d9d08d3b991ba4e1017e822360", "score": "0.71167153", "text": "def draw(self):\n\n self._screen.blit(self._screen_background, [0, 0, ccs.SETTING_SIZE[0], ccs.SETTING_SIZE[1]])\n\n # self._screen.fill(ccc.BEIGE)\n\n self._all_sprites.draw(self._screen)\n\n # --- Drawing code should go here\n\n pg.display.flip()\n pg.display.update()\n\n # --- Limit to 60 frames per second\n self._clock.tick(60)", "title": "" }, { "docid": "3287a0aadcf34d48ebb6b8680b9a376d", "score": "0.71023136", "text": "def draw(screen):\n MY.background.draw(screen)\n MY.player.draw(screen)\n MY.boss.draw(screen)\n MY.ending_overlay.draw(screen)", "title": "" }, { "docid": "3287a0aadcf34d48ebb6b8680b9a376d", "score": "0.71023136", "text": "def draw(screen):\n MY.background.draw(screen)\n MY.player.draw(screen)\n MY.boss.draw(screen)\n MY.ending_overlay.draw(screen)", "title": "" }, { "docid": "0d6d7c96222382c23a6ad7d7798f6f97", "score": "0.7102235", "text": "def draw(self, screen, ):\n\n # Draw the background\n screen.fill(BLACK)\n\n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n self.enemy_list.draw(screen)", "title": "" }, { "docid": "4bc0389e0214957a7aef817b49d60a58", "score": "0.71001023", "text": "def display(self):\n global screen\n screen.blit(self.img, self.rect)", "title": "" }, { "docid": "ef25caf57afb55e3a61c91336c17f919", "score": "0.70811003", "text": "def draw(self, surface):\n surface.blit(self.image, self.rect)", "title": "" }, { "docid": "ef25caf57afb55e3a61c91336c17f919", "score": "0.70811003", "text": "def draw(self, surface):\n surface.blit(self.image, self.rect)", "title": "" }, { "docid": "ef25caf57afb55e3a61c91336c17f919", "score": "0.70811003", "text": "def draw(self, surface):\n surface.blit(self.image, self.rect)", "title": "" }, { "docid": "904b5e7c091f2f717b740d2f7ac0c192", "score": "0.70704305", "text": "def draw(self, screen: pygame.Surface) -> None:\n x, y = self.pos\n rect = pygame.Rect(x - self.WIDTH // 2, y - self.WIDTH // 2, self.WIDTH, self.WIDTH)\n pygame.draw.rect(screen, PLACE, rect)", "title": "" }, { "docid": "b9d35b43deca769838e668318c062eb8", "score": "0.70553255", "text": "def _draw(self):\n self.screen.fill((0,0,0))\n self.chat_panel.draw()\n\n\n\n for user in self.panels:\n panel = self.panels[user][PANEL_INDEX]\n lines = self.panels[user][LINES_INDEX]\n\n\n panel.clear()\n panel.draw(lines)\n\n pygame.display.update()\n self.clock.tick(FPS_LIMIT)", "title": "" }, { "docid": "0b9ed6fde5d4fa8367d1dcf95a6e7449", "score": "0.7047266", "text": "def draw(self, screen):\n \n # Draw the background\n screen.fill(constants.BLUE)\n \n # Draw all the sprite lists that we have\n self.map.render(screen)\n self.enemy_list.draw(screen)", "title": "" }, { "docid": "148212ba6ba7d7f144d9fa924a4cd99a", "score": "0.703876", "text": "def draw(self, screen):\n\n pygame.draw.rect(screen, self.color, self.rect, 0)\n screen.blit(self.text_surface, self.text_center)", "title": "" }, { "docid": "3c0d696c607dd7cbda4de34f0545de08", "score": "0.7031694", "text": "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n\n # Draw the play button if game is inactive.\n if not self.stats.game_active:\n self.play_button.draw_button()\n else:\n self.fields.draw(self.screen)\n self.lines.draw(self.screen)\n\n self.pd.show_active_player()\n\n self.lines.draw(self.screen)\n pygame.display.flip()", "title": "" }, { "docid": "25d74ff1de8639143fe27baececbcae3", "score": "0.7026014", "text": "def draw(self, SCREEN):\r\n SCREEN.blit(self.image, (0, 0))", "title": "" }, { "docid": "3661c5ae23eb76a822d0cae076860b59", "score": "0.70240986", "text": "def draw(self):\n # CLEAR THE SCREENS\n self.fill((0, 0, 0))\n self.screen.fill((0, 0, 0))\n\n #self.fill((155, 0, 0))\n #self.screen.fill((0, 255, 0))\n # self.timeScreen.fill((100, 0, 100))\n # self.dateScreen.fill((0, 155, 0))\n # self.weekScreen.fill((0, 0, 200))\n\n # blit time screen\n self.timeScreen.fill((0, 0, 0))\n self.timeScreen.blit(self.update_time_screen(time.localtime()), (0, 0))\n\n # blit date screen\n if self.hours_passed == 0:\n if not self.date_updated:\n self.dateScreen.fill((0, 0, 0))\n self.dateScreen.blit(self.update_date_screen(time.localtime()), (0, 0))\n self.date_updated = True\n else:\n self.date_updated = False\n\n # blit week screen\n if self.hours_passed == 0:\n if not self.week_updated:\n self.weekScreen.fill((0, 0, 0))\n self.weekScreen.blit(self.update_week_screen(time.localtime()), (0, 0))\n self.week_updated = True\n else:\n self.date_updated = False\n\n # current weather screen\n if self.minutes_passed == 0 or self.minutes_passed == 10 or self.minutes_passed == 20 or self.minutes_passed == 30 or self.minutes_passed == 40 or self.minutes_passed == 50:\n if not self.current_weather_updated:\n self.currentWeatherScreen.blit(currentWeatherForecast.update_weather(self.currentWeatherScreen.get_width(), self.currentWeatherScreen.get_height(), self.verySmallFont, self.fontcolor, self.verySmallHeight), (0, 0))\n self.current_weather_updated = True\n else:\n pass\n else:\n self.current_weather_updated = False\n\n # hourly weather screen\n if self.minutes_passed == 0:\n if not self.hourly_weather_updated:\n self.hourlyWeatherScreen.blit(hourlyWeatherForecast.update_weather(self.hourlyWeatherScreen.get_width(), self.hourlyWeatherScreen.get_height(), self.tiniestFont, self.fontcolor, self.tiniestHeight), (0, 0))\n self.hourly_weather_updated = True\n else:\n pass\n else:\n self.hourly_weather_updated = False\n\n # BLIT EVERYTHING ONTO MAIN SCREEN\n self.screen.blit(self.dateScreen, (0, self.dateStartingHeight))\n self.screen.blit(self.timeScreen, (0, self.timeStartingHeight))\n self.screen.blit(self.weekScreen, (0, self.weekStartingHeight))\n self.screen.blit(self.currentWeatherScreen, (self.screen.get_width() / 2, self.weatherStartingHeight))\n self.screen.blit(self.hourlyWeatherScreen, (0, self.hourlyWeatherStartingHeight))\n self.blit(self.screen, (40, 30))", "title": "" }, { "docid": "bb6c9fade7bb665345acaf577616c4e7", "score": "0.7022754", "text": "def draw():\r\n global gDesktop\r\n gDesktop.draw()", "title": "" }, { "docid": "be0ecc7c9daa51460063c5f19ef0cc32", "score": "0.7019428", "text": "def render(self) -> None:\r\n if not self._visualize:\r\n return\r\n\r\n # Need this on OSX due to pygame bug\r\n pygame.event.peek(0)\r\n\r\n self._screen.fill(WHITE)\r\n self._sprite_group.draw(self._screen)\r\n self._stats_group.draw(self._screen)\r\n self._clock.tick(FPS)\r\n pygame.display.flip()", "title": "" }, { "docid": "d9b9b31dfae140fd4850c1c4af3c0f89", "score": "0.70190537", "text": "def draw(self, screen):\n screen.fill(constants.BLACK)\n # screen.blit(self.background, [0, 0])\n\n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n self.query_box_list.draw(screen)\n self.turret_list.draw(screen)", "title": "" }, { "docid": "670d5d3fbf02c3ca658d47ed9d52c11c", "score": "0.70078045", "text": "def draw(self):\n\n self.screen.fill(black)\n self.draw_map()\n self.all_sprites.draw(self.screen)\n\n for border in self.borders:\n border.draw()\n\n pygame.display.update()", "title": "" }, { "docid": "e157521f00141e98917dfbf1ce7a9dea", "score": "0.70011616", "text": "def draw(self, surface):\n pygame.draw.lines(surface, self.color, True, self.get_pos(), 5)", "title": "" }, { "docid": "15b1c157d2777f94fc03ae1fe530c33d", "score": "0.6989607", "text": "def draw(self, screen):\n if self.paused:\n screen.blit(onOff_bg, (self.x, self.y))\n screen.blit(self.play, (self.x, self.y))\n else:\n screen.blit(self.pause, (self.x, self.y))", "title": "" }, { "docid": "dd930e86a6c0d3932d2baf1c41072d1f", "score": "0.69639313", "text": "def draw(self):\n self.base.draw()", "title": "" }, { "docid": "16225570f7f3cc5eb7f8bedb38112d80", "score": "0.69193417", "text": "def draw(self, screen: pygame.Surface) -> None:\n x, y = self.pos\n rect = pygame.Rect(x - self.WIDTH // 2, y - self.WIDTH // 2, self.WIDTH, self.WIDTH)\n pygame.draw.rect(screen, BUS_STOP, rect)", "title": "" }, { "docid": "fb1d5a1da1b8d62df1c2be04dbeba91c", "score": "0.69097537", "text": "def draw_interface(self, screen: pygame.Surface):\n\n self.selected_info.update()\n screen.blit(self.selected_info.image, self.selected_info.rect)\n self.minimap.update()\n screen.blit(self.minimap.image, self.minimap.rect)\n self.messages.update()\n self.messages.draw(screen)\n self.commands.draw(screen)\n self.player_empire_info.update()\n self.player_empire_info.draw(screen)\n self.enemy_empire_info.update()\n self.enemy_empire_info.draw(screen)", "title": "" }, { "docid": "f93c731f7e9a4cf1258fef2f0b202cc3", "score": "0.69005954", "text": "def draw(screen, background, flies):\n\n # Redraw screen here\n flies.clear(screen, background)\n dirty = flies.draw(screen)\n\n # Flip the display so that the things we drew actually show up.\n pg.display.update(dirty)", "title": "" }, { "docid": "f719ec584f2362e1ce72921f3269e3a4", "score": "0.6899807", "text": "def draw(self, screen):\n \n # Draw the background\n \n # # Draw all the sprite lists that we have\n if self.player.game_over == False:\n self.platform_list.draw(screen)\n self.coin_list.draw(screen)", "title": "" }, { "docid": "98a05a059531eaf6fd20dfe9e57e2e9f", "score": "0.68768495", "text": "def draw(self)-> None:\n x, y = pg.mouse.get_pos()\n\n self._draw_background()\n\n self._draw_play_button(x, y)\n self._draw_gitlab_button(x, y)\n self._draw_other_buttons(x, y)\n\n self._play_sound()\n self._draw_cursor(x, y)\n\n pg.display.update()", "title": "" }, { "docid": "d410ffe3297e5c6b1c4c4feb2998af20", "score": "0.68651044", "text": "def draw(self, screen):\n screen.blit(self.surf, (0, 0))\n screen.blit(self.popup, (600, 440))\n #I considered rendering the text every frame, but since it doesn't change, we don't need to.\n # for i,opt in enumerate(self.options):\n # text = font.render(opt.text, False, (255,255,255))\n # screen.blit(text, (150, 200+i*25))\n # print('blit'+str(200+i*25))", "title": "" }, { "docid": "20a4d0dd8cce6482c780ea294dcec567", "score": "0.6862189", "text": "def _update_screen(self):\n\t\tself.screen.fill(self.settings.bg_color)\n\t\tself.ship.blitme()\n\t\tfor bullet in self.bullets.sprites():\n\t\t\tbullet.draw_bullet()\n\t\tself.target.blitme()\n\n\t\t# Draw the play button if the game is inactive.\n\t\tif not self.settings.game_active:\n\t\t\tself.play_button.draw_button()\n\n\t\tpygame.display.flip()", "title": "" }, { "docid": "d866f8a419ee4c1e52218657cb300a97", "score": "0.6842616", "text": "def draw_frame(self):\r\n\r\n self.moving_sprites.update()\r\n\r\n # CLEAR THE PREVIOUS SPRITES\r\n self.screen.fill(BLACK)\r\n\r\n # UPDATE THE LATEST POSITION OF ALL SPRITES\r\n self.moving_sprites.draw(self.screen)\r\n self._show_ai_path()\r\n self.walls.draw(self.screen)\r\n pygame.display.update()", "title": "" }, { "docid": "6d745db4e7a40d6a51f411452593976b", "score": "0.6837359", "text": "def draw(self, win):\n win.blit(self.img, (self.x, self.y))", "title": "" }, { "docid": "cc6e045552cae5f5a0ff43d83ab47b19", "score": "0.6818804", "text": "def draw_window(self):\n self.window.mainloop()", "title": "" }, { "docid": "365858744ad3441eae604e6225763ae1", "score": "0.68120474", "text": "def draw(self):\r\n\r\n # Print out the exit cell\r\n self.screen.blit(self.exitSurf, self.exitRect)\r\n\r\n # Go through the cells\r\n for i in range(len(self.wallRects)):\r\n self.screen.blit(self.wallSurfs[i], self.wallRects[i])", "title": "" }, { "docid": "384362671c2797bdb65b6eceb053c091", "score": "0.68076724", "text": "def draw(self, screen):\n \n # Draw the background\n # We don't shift the background as much as the sprites are shifted\n # to give a feeling of depth.\n screen.fill(constants.BLUE)\n #layer 1 \n screen.blit(self.background,(self.world_shift_x // 2,(self.world_shift_y // 1) -360))\n #layer 2\n screen.blit(self.background2,(self.world_shift_x // 1,(self.world_shift_y // 1) -360))\n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n self.enemy_list.draw(screen)", "title": "" }, { "docid": "c582cf65cb84ccce2323aec3c7cf5d89", "score": "0.68041056", "text": "def draw(self, screen, color, specificRect: pygame.Rect = None):\r\n pass", "title": "" }, { "docid": "2f4d8e0789dc5967021b9c1d81f27ce5", "score": "0.6800818", "text": "def draw(self, surface):\n pygame.draw.lines(surface, (0, 0, 0), False, flip_and_round_list(self.left_wall), 1)\n pygame.draw.lines(surface, (0, 0, 0), False, flip_and_round_list(self.right_wall), 1)", "title": "" }, { "docid": "fe67f5bb65ca55c492565ecf01869f9c", "score": "0.6797988", "text": "def draw(self, screen):\r\n\r\n # Draw all the sprite lists that we have\r\n self.platform_list.draw(screen)\r\n self.collectible_list.draw(screen)", "title": "" }, { "docid": "aeb29dc6fce4ee8721f1f63a411ce4ad", "score": "0.67979634", "text": "def redraw(self):\n\n dc = self.getDC()\n self.doDrawing(dc)", "title": "" }, { "docid": "ee64ce543a42030a1073a46e34876459", "score": "0.67936426", "text": "def _update_screen(self):\n self.screen.fill(self.settings.bg_colour) #Redraw screen during each pass\n self.liam.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.targets.draw(self.screen)\n self.sb.show_score()\n if not self.stats.game_active:\n self.play_button.draw_button()\n pygame.display.flip() #Make the newest drawn screen visible", "title": "" }, { "docid": "ed4dad9ce9897d8bfaffbd2ab9e3eced", "score": "0.6782495", "text": "def __drawWorld(self):\n self.map.sprites.draw(self.window.screen)\n self.players.draw(self.window.screen)\n self.enemies.draw(self.window.screen)\n self.bullets.draw(self.window.screen)\n self.userInterface.draw()", "title": "" }, { "docid": "f1c9de1f3f69e8f3f1bd608097b76596", "score": "0.677487", "text": "def draw(self, window):", "title": "" }, { "docid": "a2571164ad2b90942b083da39f02fb90", "score": "0.6771294", "text": "def draw_frame(self):\n self.window.fill(COLOURS_MAP['background_color'])\n counter = 0\n for y in range(self.height):\n for x in range(self.width):\n x_pos = x * self.scale\n y_pos = y * self.scale\n # Skip 0(background color) as we are already clearing screen at beginning.\n if self.display_buffer[counter]:\n pixel = Rect(x_pos, y_pos, self.scale, self.scale)\n draw.rect(self.window, COLOURS_MAP['foreground_color'], pixel)\n counter += 1\n self.update_display()\n self.needs_screen_update = False", "title": "" }, { "docid": "004c4a76ba828838a4a52e114482fd3f", "score": "0.6763731", "text": "def draw(self):\n\n # Draw background\n try:\n self.win.blit(self.bg, (0,0))\n except:\n print(\"exeption\")\n\n # Get mouse position\n mouse_x, mouse_y = pygame.mouse.get_pos()\n\n # Draw buttons\n for button in self.buttons:\n button.draw(self.win)\n button.mouse_hover(mouse_x, mouse_y)", "title": "" }, { "docid": "82ad8ce127f760cce32db2d918fd3495", "score": "0.6755033", "text": "def update_screen(self):\r\n self.screen.fill(self.bg_color)\r\n if self.game_active == False:\r\n self.button._draw_button()", "title": "" }, { "docid": "6f462a2e38d9090f143c1b540e6c5ec5", "score": "0.675303", "text": "def draw(self, screen):\r\n pygame.draw.rect(screen, self.color, (self.x, self.y, self.width, self.height), 0)\r\n text = BUTTONS_FONT.render(self.text, 1, self.text_color)\r\n screen.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))", "title": "" }, { "docid": "9370ca88f5e950a8cfc6ea60948e5089", "score": "0.6745748", "text": "def render(self, destination_rect):\n pygame.draw.rect(self.surface, pygame.Color(\"black\"), destination_rect)", "title": "" }, { "docid": "21e6e42ca2f5793b03f90a4f38e7776e", "score": "0.67377555", "text": "def update(self):\n # this is not really neccesary because the surface is black after initializing\n self.corners.fill(BLACK)\n self.corners.draw_dot((0, 0), self.colors[0])\n self.corners.draw_dot((self.screen.width - 1, 0), self.colors[0])\n self.corners.draw_dot((self.screen.width - 1, self.screen.height - 1),\n self.colors[0])\n self.corners.draw_dot((0, self.screen.height - 1), self.colors[0])\n\n self.lines.fill(BLACK)\n self.lines.draw_line((1, 0), (self.lines.width - 1, 0), self.colors[1])\n self.lines.draw_line((0, 1), (0, self.lines.height - 1), self.colors[3])\n self.lines.draw_line((0, 0), (self.lines.width - 1,\n self.lines.height - 1), self.colors[2])\n\n self.rects.fill(BLACK)\n self.rects.draw_rect((0, 0), (int(self.rects.width / 2) - 1,\n self.rects.height),\n self.colors[2], self.colors[3])\n self.rects.draw_rect((int(self.rects.width / 2) + 1, 0),\n (int(self.rects.width / 2) - 1,\n self.rects.height),\n self.colors[3], self.colors[2])\n\n self.circle.fill(BLACK)\n radius = int(min(self.circle.width, self.circle.height) / 2) - 1\n self.circle.draw_circle((int(self.circle.width / 2) - 1,\n int(self.circle.height / 2) - 1), radius,\n self.colors[4], self.colors[5])\n\n self.filled.fill(self.colors[6])", "title": "" }, { "docid": "27277a4dc521c953f0a28ba4213e583f", "score": "0.67344874", "text": "def draw(self):\n self.reset_image()\n self.draw_car()\n self.draw_track()\n self.draw_all_lidars()\n self.draw_demands()\n self.draw_laptime()\n self.render_image()\n self.update_camera_position()", "title": "" }, { "docid": "8d8ca78a28c1af8014380e2fed9d0878", "score": "0.67194754", "text": "def draw(self):\n self.rect = self.main.screen.blit(self.pawn, (670, 60))\n self.rect = self.main.screen.blit(self.card, (660, 150))\n\n for playerInfo in self.playerInfoList[self.playerIndex]:\n playerInfo.draw()\n for cardInfo in self.cardInfoList:\n cardInfo.draw()\n for optionInfo in self.optionInfoList:\n optionInfo.draw()\n for info in self.infoList:\n info.draw()\n\n pass", "title": "" }, { "docid": "c0b68b901e94029edcedd9e48baa1dd3", "score": "0.67168856", "text": "def on_draw(self):\n if (settings.GFX):\n self.clear()\n self.set_3d()\n glColor3d(1, 1, 1)\n self.world.batch.draw()\n if isinstance(self.currentPlayer(), HumanPlayer): self.draw_focused_block()\n self.set_2d()\n self.draw_labels()\n self.draw_reticle()", "title": "" }, { "docid": "d79ee3e43e55707d4544102b58ac319c", "score": "0.66936153", "text": "def draw(self, screen):\n screen.fill((187, 128, 68)) # Fill the screen with black.\n\n # Redraw screen here.\n\n # Draw the grid\n self.grid.draw(screen)\n\n global score\n\n self.font.render_to(screen, (5, 5), \"Score: \")\n self.font.render_to(screen, (182, 5), str(score))\n\n self.allSprites.draw(screen)\n\n # Flip the display so that the things we drew actually show up.\n pygame.display.flip()", "title": "" }, { "docid": "2d91f6fcda2264c858b83a939de00719", "score": "0.66908103", "text": "def draw(self):\n self._screen.fill(self._bg_color)\n\n for edge in self._edges: # draw the lines first\n self.draw_edge(edge)\n\n for edge in self._edges: # have to loop again to avoid lines overdrawing the indicators\n self._draw_end_point_indicator(edge)\n\n for vertex in self._vertices: # draw the icons\n self.draw_vertex(vertex)\n\n self.draw_info_box() # draw the info text", "title": "" }, { "docid": "39bc8859ab6c6f2b729ce92879264e95", "score": "0.669065", "text": "def draw(self):\r\n pygame.display.set_caption(\"{:.2f}\".format(self.clock.get_fps()))\r\n self.screen.fill(DARKGREY)\r\n for sprite in self.all_sprites:\r\n self.screen.blit(sprite.image, self.camera.apply(sprite))\r\n\r\n pygame.display.flip()", "title": "" }, { "docid": "e91dd542f6bf446f7f5f14cb600d0b81", "score": "0.6685854", "text": "def draw(self):\n\t\tlogging.warn(\"Not implemented draw\")", "title": "" }, { "docid": "4bf7ec1ba3643767e9f01e943090107c", "score": "0.66854644", "text": "def draw(self):\n # static\n surf = self.surf.copy()\n\n # dynamic\n pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*180), 33)\n self.button_rect = self.button_surf.get_rect(center=pos)\n surf.blit(self.button_surf, self.button_rect)\n self.button_rect.move_ip(self.xpos, self.ypos) # move of button box to correct screen position\n\n # screen\n screen.blit(surf, (self.xpos, self.ypos))", "title": "" }, { "docid": "5fc8647c817fb3e79165504520edf692", "score": "0.6684988", "text": "def draw(self):\r\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.6676739", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.6676739", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.6676739", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.6676739", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.6676739", "text": "def draw(self):\n pass", "title": "" }, { "docid": "0079ddcf2d33bbab0e2964ce7cc53b82", "score": "0.66763085", "text": "def DisplayGameDraw(self):\n raise NotImplementedError('This interface has not yet been implemented') #None", "title": "" } ]
9cf5b7f802daf350491f7a8ef3479226
Sets the low_power_usb of this VmediaPolicyAllOf.
[ { "docid": "0e0153707a1160a732bbf16c6c24f3da", "score": "0.8350906", "text": "def low_power_usb(self, low_power_usb):\n\n self._low_power_usb = low_power_usb", "title": "" } ]
[ { "docid": "8b3ea43d75887285e4acb801785985e4", "score": "0.7069202", "text": "def set_low_power(self):\n logger.debug('Enable low power mode by setting mods=00.')\n self.write_byte_data(MMA8451_REG_CTRL_REG2, MODS_LOW_POWER)", "title": "" }, { "docid": "25fe1506f473f48c2a5ed745908fb2b5", "score": "0.5845206", "text": "def enter_low_power_mode(self) -> None:\n pass # Antenna Deployer has no-op", "title": "" }, { "docid": "219327d85d408bde74958ce74091bfde", "score": "0.58155984", "text": "def low_battery(self, new_state):\n if new_state == 'False' or new_state == False:\n self._low_battery = False\n else:\n self._low_battery = True", "title": "" }, { "docid": "d3838a6dded36fc8d2ef9a479d1a83d6", "score": "0.58116305", "text": "def set_power(self, on_off):\n state = {\n 'power': on_off\n }\n return self.set_state(state)", "title": "" }, { "docid": "f1af7e2cc4c5c55cb70c28f7f547c81f", "score": "0.5722831", "text": "def do_set_power(self, power) -> None:\n logging.info(__name__ + ' : Setting power to %s' % power)\n c_power = ctypes.c_float(power)\n close = False\n if not self._open:\n self._handle = ctypes.c_void_p(self._dll.sc5511a_open_device(self._serial_number))\n close = True\n completed = self._dll.sc5511a_set_level(self._handle, c_power)\n if close:\n self._dll.sc5511a_close_device(self._handle)\n return completed", "title": "" }, { "docid": "404454597e19bef715a50be1fb859400", "score": "0.5702238", "text": "def set_high_power(self, on):\n if on:\n self.spi_write(Register.OCP, RF.OCP_OFF)\n self.spi_write(Register.TESTPA1, 0x5D)\n self.spi_write(Register.TESTPA2, 0x7C)\n else:\n self.spi_write(Register.OCP, RF.OCP_ON | RF.OCP_TRIM_95)\n self.spi_write(Register.TESTPA1, 0x55)\n self.spi_write(Register.TESTPA2, 0x70)", "title": "" }, { "docid": "9399b9f24535374d82607d4e838e50bf", "score": "0.5701382", "text": "def set_power(self, widget, power):\n if widget.get_active():\n self.main.dbus_exec(\"set_power\", (power,))", "title": "" }, { "docid": "565f6b6e5105329625ec7a071294704c", "score": "0.56371427", "text": "def set_power(on):\n try:\n #print (\"setting power to inverted %s -> %s \" %(str(on), str(int(not on))))\n _set_value(\"bl_power\", int(not on))\n except PermissionError:\n _perm_denied()", "title": "" }, { "docid": "d18ede56894dcf8288137a053e783810", "score": "0.56318223", "text": "def SetLow(self, low):\n if low < self._hard_min:\n raise ValueError('%s is too low for wxProperSpinBox.' % low)\n self._low = low\n if self.GetValue() < low:\n self.SetValue(low)", "title": "" }, { "docid": "ff669ada66be8fa6a8048190c12c2737", "score": "0.55570495", "text": "def set_low(self):\n digitalWrite(self.pin, 0)", "title": "" }, { "docid": "ea8e90b10742353dd5ff7db4a41721fa", "score": "0.5552095", "text": "def powerOn(self):\r\n self.refreshStat(True)\r\n if self.status:\r\n raise ModemAlreadyOnError\r\n else:\r\n try:\r\n GPIO.output(POWER, GPIO.LOW)\r\n sleep(0.1)\r\n GPIO.output(POWER, GPIO.HIGH)\r\n sleep(0.4)\r\n GPIO.output(POWER, GPIO.LOW)\r\n sleep(0.1)\r\n except:\r\n raise GPIOoutputError\r\n sleep(5)\r\n self.refreshStat(True)\r\n if not self.status:\r\n raise ModemPowerOnError", "title": "" }, { "docid": "4139fbe306478d0f23979b2f0956225d", "score": "0.54885924", "text": "def setPower(self,power):\n self.set_power(power)", "title": "" }, { "docid": "01abaa4d89f6159ebc1a1746248e125b", "score": "0.5486864", "text": "def set_brightness(self, brightness):\n if (int(brightness) > 0):\n \"\"\"Restore the value rescaled with MIN_BRIGHTNESS.\"\"\"\n brightness = min(max(int(brightness / 255 * (255 - MIN_BRIGHTNESS) + MIN_BRIGHTNESS), MIN_BRIGHTNESS), 255)\n value = max(brightness * 100 / 255.0, MIN_BRIGHTNESS_VAL)\n else:\n brightness = value = 0\n if self._control_device(\"brightnessSet\", {\"value\": value}):\n self._update_data(\"brightness\", brightness)", "title": "" }, { "docid": "6ca72eb572bebcc7ef7aa5394a8baa25", "score": "0.54846984", "text": "def set_waveform_brightness(self, brightness=50):\n assert brightness >= 0 and brightness <= 100\n self.open.write(\":DISP:WBR {0}\".format(brightness))", "title": "" }, { "docid": "1c5db73851fbb0e84864599c6769a0f1", "score": "0.5473502", "text": "def set_power_source(self, src):\n if src == \"5V\":\n self._llint.changePowerSource(self._llint.PWR_SRC_5V)\n pass\n elif src == \"host\":\n self._llint.changePowerSource(self._llint.PWR_SRC_HOST)\n pass\n elif src == \"off\" or src is None or src == False:\n self._llint.changePowerSource(self._llint.PWR_SRC_OFF)\n pass\n else:\n raise AttributeError(\"Unknown source %s, valid sources: '5V', 'host', 'off'\")", "title": "" }, { "docid": "082daac75b937360abea7b3b8a7ce8ea", "score": "0.5455126", "text": "def power_on(self):\n svc_type = 'urn:UuVol-com:service:UuVolControl:5'\n response = self._send_cmd('SetPowerState', NewPowerStateValue='ON',\n service_type=svc_type, omitInstanceId=True)\n self._pwrstate = 'on'\n return response", "title": "" }, { "docid": "14d4781532086eae6f65a65b5c21609a", "score": "0.5451575", "text": "def powerset(self):\r\n self.powerflag=True", "title": "" }, { "docid": "f0c10240e258dab203e863df8f02312e", "score": "0.54387885", "text": "def set_power(self):\n warnings.warn(\n \"`set_power` is deprecated, and will be removed in pyuvdata version \"\n \"2.2. Use `_set_power` instead.\",\n DeprecationWarning,\n )\n self._set_power()", "title": "" }, { "docid": "e578a3d03c0ae34872427da62cca5f1f", "score": "0.5433522", "text": "def set_power(self, value):\n self.instrument.write('POW ' + str(value) + ' dBm')", "title": "" }, { "docid": "3d86640b6bcda1f0fd069f25a09864f9", "score": "0.5415647", "text": "def set_powerLineFreq(self, freq: LineFreq):\n if self.is_mpcamHost:\n self.log.info(\"camera power-line freq = {}\".format(freq))\n CameraManager.set_param('power_line_frequency', int(freq))", "title": "" }, { "docid": "ac419d5d82bac4ddf9f203b8f61506b3", "score": "0.53835946", "text": "def set_brightness(self, value):\n value = max(0, min(value, 255))\n self.devh.controlMsg(0x40, 13, \"\", min(value, 255), min(value, 255), self.usb_timeout_ms)", "title": "" }, { "docid": "deb1bf9ff6f1d5371d50b281f56be1e7", "score": "0.538305", "text": "def power_on(self):\n self.light_source.on = True", "title": "" }, { "docid": "e102b45a7c6cb7f6468dc224ddc947ab", "score": "0.53519183", "text": "def power_up_avialable(self):\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop()\n ev3.Sound.beep().wait()\n self.mqtt_client.send_message('change_power_up_state', [])", "title": "" }, { "docid": "054f1e6c8cb179d53177686ef606088b", "score": "0.5351015", "text": "def allLow(self):\n self.Send(\"allLow()\")", "title": "" }, { "docid": "11c226d20eafd98180deb3dbe795feb1", "score": "0.53504676", "text": "def on_lighton(self):\n try:\n self.arm.light_on()\n except usb.core.USBError as usbe:\n self.show_err(str(usbe))", "title": "" }, { "docid": "82220f429dfb33e909c62224019bf89f", "score": "0.53392905", "text": "def power(self, new_power: int):\n if not self._is_active:\n raise RuntimeError(\"This LabBrick has been disconnected!\")\n\n if self._min_pow <= new_power <= self._max_pow:\n power_level = int(new_power / POW_SCALAR)\n set_power_level(self._device_handle, power_level)\n self._power = new_power\n print(\n \"Successfully set power to \"\n + \"{0:{1}}\".format(new_power, \"+\" if new_power else \"\")\n )\n else:\n print(\"Failed to set power - out of bounds\")", "title": "" }, { "docid": "71a9d53d6ed483f44e58b74a8369cc7b", "score": "0.53210896", "text": "def power_on(self):\n\n self._send_and_receive({'power': True})", "title": "" }, { "docid": "63417a4e73b162e84cf058f65ae3bfc5", "score": "0.5298671", "text": "def resp_set_lightpower(self, resp, power_level=None):\n if power_level is not None:\n self.power_level = power_level\n elif resp:\n self.power_level = resp.power_level", "title": "" }, { "docid": "d13581933491b5e72a8966db2b676833", "score": "0.5296905", "text": "def plan_low_floor_vehicle(self, plan_low_floor_vehicle):\n\n self._plan_low_floor_vehicle = plan_low_floor_vehicle", "title": "" }, { "docid": "1806937d265b9d4ba0ad3b4ef8e4ac62", "score": "0.5279072", "text": "def set_power(self,power):\n super(DcMotor,self).set_power(self.motorport,power*100.0)", "title": "" }, { "docid": "a7d021ca5bdea9a814287f0e21b781d0", "score": "0.52754235", "text": "def set_brightness(self, brightness=0):\n brightness = clamp(brightness)\n self.state.brightness = brightness\n self.send_command(Command.SET_BRIGHTNESS, [int(brightness)])", "title": "" }, { "docid": "c88968ef7d56200644b920a9906c0cc4", "score": "0.5265768", "text": "def setPower(self):\n \n self.moveShutter(True)\n fluence = Fluence()\n angle, reference = fluence.calculateWaveplateAngle(\n float(self.txt_power.toPlainText()))\n self.Waveplate.moveStage_absolute([angle])\n self.moveShutter(False)", "title": "" }, { "docid": "372ed9e4ee96af1ba58e54967bc36faf", "score": "0.52417445", "text": "def power_limit(self, power_limit):\n self._power_limit = power_limit", "title": "" }, { "docid": "278cd2ea4dc415d077d500bfae91cd87", "score": "0.5223102", "text": "def set_power(self, value, callb=None, duration=0, rapid=False):\n on = [True, 1, \"on\"]\n off = [False, 0, \"off\"]\n if value in on:\n myvalue = MAX_UNSIGNED_16_BIT_INTEGER_VALUE\n else:\n myvalue = 0\n mypartial = partial(self.resp_set_lightpower, power_level=myvalue)\n if callb:\n mycallb = lambda x, y: (mypartial(y), callb(x, y))\n else:\n mycallb = lambda x, y: mypartial(y)\n if not rapid:\n response = self.req_with_ack(\n LightSetPower,\n {\"power_level\": myvalue, \"duration\": duration},\n callb=mycallb,\n )\n else:\n response = self.fire_and_forget(\n LightSetPower,\n {\"power_level\": myvalue, \"duration\": duration},\n num_repeats=1,\n )\n self.power_level = myvalue\n if callb:\n callb(self, None)", "title": "" }, { "docid": "ce7edaf8bf5aedd1dfd24b9969b6c7ac", "score": "0.52205503", "text": "def set_power(power):\n if (power==\"ON\"):\n print \"turning on power...\"\n error = __send_command(\"POWERON\")[0]\n elif (power==\"OFF\"):\n print \"turning off power...\"\n error = __send_command(\"POWEROFF\")[0]\n else:\n print \"unrecognized power argument\", power\n\n return error", "title": "" }, { "docid": "43291605e2881306d1e20251b1332ebd", "score": "0.52150506", "text": "def set_brightness(self, brightness=LED_BRIGHTNESS):\n brightness = LED_BRIGHTNESS if brightness > LED_BRIGHTNESS else brightness\n self.strip.setBrightness(brightness)\n self.strip.show()", "title": "" }, { "docid": "b8da6502d58be1d6704993abb5a7001d", "score": "0.5208481", "text": "def set_brightness(self, brightness) -> None:\n if (\n int(brightness) >= self.settings[\"brightness_min\"]\n and int(brightness) <= self.settings[\"brightness_max\"]\n ):\n command_value = f\"{brightness}\"\n self.sequence_done = False\n self.command_queue.append(\n {\n \"type\": \"command\",\n \"key\": \"brightness\",\n \"value\": command_value,\n }\n )\n else:\n self.logger.error(\n \"Specified brightness isn't supported (%s)\", brightness\n )\n sys.exit(1)", "title": "" }, { "docid": "78a77d2157efaaa4cb72eb0549c9cfec", "score": "0.52027065", "text": "def set_low_limit(self, low_limit):\n self.__lowpos = int(4096.0 * (low_limit / 1000.0) * self.__frequency)\n\n if (self.__lowpos < 0) or (self.__lowpos > 4095):\n raise ValueError('set_low_limit: value out of range')", "title": "" }, { "docid": "1fc97bdbb2c68e618084eb7a2ee6970c", "score": "0.51905465", "text": "async def async_set_poweron(self, poweron):\n await self._try_command(\n \"Error in set_poweron\", \n self._hysen_device.set_poweron, \n HASS_POWERON_TO_HYSEN[poweron])", "title": "" }, { "docid": "488ec403fabae2456153e18236fa31ba", "score": "0.5188205", "text": "def power_enable(self):\n\t\tENABLE_CONFIGURATION = (TMD2671_REG_ENABLE_PON)\n\t\tbus.write_byte_data(TMD2671_DEFAULT_ADDRESS, TMD2671_REG_ENABLE | TMD2671_COMMAND_BIT, ENABLE_CONFIGURATION)", "title": "" }, { "docid": "7d31452a1d79c625e29c926fb4c59542", "score": "0.5170723", "text": "def PowerLowSideDrivers(self, drivers):\n assert len(drivers) == 3, 'Expecting 3 low side driver power settings.'\n byte = 0\n for driver, power in enumerate(drivers):\n byte += (2 ** driver) * int(power)\n self.sci.low_side_drivers(byte)", "title": "" }, { "docid": "27480a370aabf0caed0458163465db7a", "score": "0.51627874", "text": "def power(self, power):\n self._power = power", "title": "" }, { "docid": "8e065246718dd2cf1fc0913da46d5b4c", "score": "0.5154129", "text": "def on_lightoff(self):\n try:\n self.arm.light_off()\n except usb.core.USBError as usbe:\n self.show_err(str(usbe))", "title": "" }, { "docid": "e6e275c5a7e37aa565baae203e543a1a", "score": "0.51539314", "text": "def set_port_power(self, port, power):\n assert port in self.ports, f\"This VNA does not have port {port}!\"\n minp = self._port_powers[port][0]\n maxp = self._port_powers[port][1]\n if power < minp or power > maxp:\n raise ValueError(f\"Power level outside allowable range for port {port}: ({minp} - {maxp}) dBm.\")\n self.interface.write(f\"SOUR:POW{port} {power}\")", "title": "" }, { "docid": "1d672f545fc913f070db13e837f44266", "score": "0.51341134", "text": "def power(self, power):\n\n self._power = power", "title": "" }, { "docid": "207d85f114453183123e29f058522d61", "score": "0.5126705", "text": "def is_battery_low(self):\n return self.battery_volts <= self.low_battery_threshold", "title": "" }, { "docid": "ed5c20c587bd7b14dff153fcb7622cdc", "score": "0.512618", "text": "def low_battery(self):\n return self._low_battery", "title": "" }, { "docid": "add596e8609f6959ada6354228646199", "score": "0.51163703", "text": "def power(self):\n\t\tself.command('Power')\n\t\ttime.sleep(0.5)\n\t\tself.command('Power')", "title": "" }, { "docid": "2ca3a69b930c83814940e406db873472", "score": "0.5113195", "text": "def set_output_rf_power(self, power: int):\n self.__send_command(\"PC\", parameter=f\"{power:0>3}\")", "title": "" }, { "docid": "11c464b417261d931ce05635fb82eea1", "score": "0.5110046", "text": "def powerOn(self):\n self.write(\"LOAD ON\")", "title": "" }, { "docid": "1a52dd1110f44044334df0f98672601a", "score": "0.5101822", "text": "def setPowered(self, powered):\n return _enigma.eNetworkTechnologyPtr_setPowered(self, powered)", "title": "" }, { "docid": "93f9e34a851978436bcef9518ec1242d", "score": "0.50956833", "text": "def set_brightness(self, brightness: int) -> None:\n if not self.is_dimmable: # pragma: no cover\n raise SmartDeviceException(\"Bulb is not dimmable.\")\n\n self._raise_for_invalid_brightness(brightness)\n\n light_state = {\"brightness\": brightness}\n self.set_light_state(light_state)", "title": "" }, { "docid": "24b3e79e89335f9220effff5afa0a1d2", "score": "0.5066441", "text": "def low(self):\n\n\t\treturn float(self.device.ask('source{0}:marker{1}:voltage:low?'.format(self.channel, self.number)))", "title": "" }, { "docid": "24c9c25c2faf44cea79769a4d377e03e", "score": "0.5052691", "text": "def _set_power_limits(self,\n min_power: Union[int, float],\n max_power: Union[int, float]) -> None:\n self.source_power.vals = Numbers(min_value=min_power,\n max_value=max_power)", "title": "" }, { "docid": "dfa447b18191384651eff8b521008e23", "score": "0.5052399", "text": "def power_on(self):\n status = self.get_status()\n if status == Status.ON or status == Status.WARM_UP:\n return\n elif status == Status.OFF:\n self._controller.set(cmd='POWR', param='1', reset=True)\n self.emit('status_changed', self._last_status, Status.WARM_UP)\n self._last_status = Status.WARM_UP\n else:\n raise ProjectorException(\n 'Projector is cooling down now. Unable to power on.')", "title": "" }, { "docid": "75ff99cac38179e737444543c94fde52", "score": "0.5049121", "text": "def _set_power_limits(self,\n min_power: Union[int, float],\n max_power: Union[int, float]) -> None:\n self.power.vals = Numbers(min_value=min_power,\n max_value=max_power)\n for port in self.ports:\n port._set_power_limits(min_power, max_power)", "title": "" }, { "docid": "9727e0e8a58c82a08b7c56b32ab27a2a", "score": "0.50486964", "text": "def on(self, brightness=255):\n self.current_brightness = brightness", "title": "" }, { "docid": "9450068f6308f4ef43496fc34025ac36", "score": "0.5032433", "text": "def set_pv_installed_power(self, installed_pv):\n self.pvgen.installed_pv = installed_pv", "title": "" }, { "docid": "e2278557afd8e2746677902e913836fd", "score": "0.5026454", "text": "def lower_volume(self):\n # Lower volume if playing, volume isn't already lowered\n # and ducking is enabled\n if (self.normal_volume is None and self.player.is_playing() and\n self.config.get('duck', False)):\n self.normal_volume = self.player.audio_get_volume()\n self.player.audio_set_volume(self.low_volume)", "title": "" }, { "docid": "1136bd2bf55bc819e6b658431ecb2d12", "score": "0.5025349", "text": "def set_hardwareflowcontrol(self, hwflow):\r\n\r\n ret = False\r\n d6, d7 = self.__derive_hardwareflowcontrol(hwflow)\r\n\r\n try:\r\n self._xbee_manager.xbee_device_ddo_set_param(\r\n self._extended_address, 'D6', d6)\r\n self._xbee_manager.xbee_device_ddo_set_param(\r\n self._extended_address, 'D7', d7)\r\n ret = True\r\n except:\r\n pass\r\n\r\n return ret", "title": "" }, { "docid": "e33269de4a3705e50518b22fb2825757", "score": "0.50165015", "text": "def save_brightness(self, off_value, on_value):\n self.devh.controlMsg(0x40, 14, \"\", off_value + on_value * 256, 0, self.usb_timeout_ms)", "title": "" }, { "docid": "810ecb803375719cde8ce7093fe22afe", "score": "0.501261", "text": "def power(self, power: float):\n\n self._power = power", "title": "" }, { "docid": "bc8bf3f981ede2ec4377484be5bb635f", "score": "0.5009178", "text": "def poll_usb_device_enable(self):\n return self._poll_device_enable", "title": "" }, { "docid": "84a08b1f4b799531f9fbc4577c15dc5f", "score": "0.500066", "text": "def enable_measurement(self):\r\n BUS.write_byte_data(self.address, POWER_CTL, MEASURE)", "title": "" }, { "docid": "1a27357bde1008e6d4c4cfe3bca0b434", "score": "0.49971405", "text": "def set_power(self, value, callb=None, rapid=False):\n on = [True, 1, \"on\"]\n off = [False, 0, \"off\"]\n mypartial = partial(self.resp_set_power, power_level=value)\n if callb:\n mycallb = lambda x, y: (mypartial(y), callb(x, y))\n else:\n mycallb = lambda x, y: mypartial(y)\n if value in on and not rapid:\n response = self.req_with_ack(\n SetPower,\n {\"power_level\": MAX_UNSIGNED_16_BIT_INTEGER_VALUE},\n callb=mycallb,\n )\n elif value in off and not rapid:\n response = self.req_with_ack(SetPower, {\"power_level\": 0}, callb=mycallb)\n elif value in on and rapid:\n response = self.fire_and_forget(\n SetPower, {\"power_level\": MAX_UNSIGNED_16_BIT_INTEGER_VALUE}\n )\n self.power_level = MAX_UNSIGNED_16_BIT_INTEGER_VALUE\n elif value in off and rapid:\n response = self.fire_and_forget(SetPower, {\"power_level\": 0})\n self.power_level = 0", "title": "" }, { "docid": "b893320c879094b4d5fc0211e2a3c149", "score": "0.49940455", "text": "def laser_frequency(self, frequency):\n self._set_iio_attr_int(\"altvoltage0\", \"frequency\", True, frequency, self._ctrl)", "title": "" }, { "docid": "1aba12be4ec003210a47ab472e25ea5c", "score": "0.499276", "text": "def set_level(self, val):\n speed = self._value_to_fan_speed(val)\n if val == 0:\n self.off()\n else:\n set_command = ExtendedSend(\n self._address, COMMAND_LIGHT_ON_0X11_NONE, self._udata, cmd2=speed\n )\n set_command.set_checksum()\n self._send_method(set_command, self._on_message_received)", "title": "" }, { "docid": "668bf794e855d70e14a8cc60a442bc3a", "score": "0.49722525", "text": "def power_off(self):\n self.light_source.on = False", "title": "" }, { "docid": "b20341643999deeaaa58f91ec8e5ed01", "score": "0.49714926", "text": "def reset_host(self):\n self.wreg(rUSBCTL,bmCHIPRES); #Stop the oscillator.\n self.wreg(rUSBCTL,0x00); #restart it.\n \n #FIXME: Why does the OSC line never settle?\n #Code works without it.\n \n #print \"Waiting for PLL to sabilize.\";\n #while self.rreg(rUSBIRQ)&bmOSCOKIRQ:\n # #Hang until the PLL stabilizes.\n # pass;\n #print \"Stable.\";", "title": "" }, { "docid": "d23d4685608cdc1fe49f2a1d074660c8", "score": "0.4960743", "text": "def setPowered(self, powered):\n return _enigma.eNetworkTechnology_setPowered(self, powered)", "title": "" }, { "docid": "52ff34fdb1c331b2ad4d4630d7f3e757", "score": "0.4945358", "text": "def setFilter(self, lowpass=gaussfilter, bandwidth=0.15):\n if (self.lowpass != lowpass) or (self.bandwidth != bandwidth):\n self.lowpass = lowpass\n self.bandwidth = bandwidth\n self.precalc = np.array([])", "title": "" }, { "docid": "6588beca3950983ed85330e1ee16c7b9", "score": "0.49375993", "text": "def update_power(self):\n power = self.main.dbus_exec(\"get_power\", (), None)\n for item in self.get_children():\n if hasattr(item, \"power\") and item.power == power:\n item.set_active(True)", "title": "" }, { "docid": "92232fa251720cceb3d4f0f083a59645", "score": "0.49338955", "text": "def powerOff(self):\r\n\r\n self.refreshStat(True)\r\n if not self.status:\r\n raise ModemAlreadyOffError\r\n else:\r\n try:\r\n serialCOM.sendATCommand(\"AT+QPOWD\", [\"OK\", \"ERROR\"], 1)\r\n except:\r\n self.status=True\r\n return\r\n else:\r\n self.status=False\r\n sleep(5)\r\n self.refreshStat(True)\r\n if self.status:\r\n raise ModemPowerOffError", "title": "" }, { "docid": "4927ded063b120ab89e162f93db2f8d9", "score": "0.49232027", "text": "def turn_on(self, percentage: int = None, **kwargs) -> None:\n self._device.set_power(True)\n if percentage is not None:\n self.set_percentage(percentage)", "title": "" }, { "docid": "1ca6414f333c08a31299b61b9b84a3f0", "score": "0.49184737", "text": "def set_timed_power(self, brightness, delay, callb=None):\n if \"set_scene\" in self.support:\n self.send_msg(\n {\n \"method\": \"set_scene\",\n \"params\": [\"auto_delay_off\", brightness, delay],\n },\n callb,\n )\n return True\n return False", "title": "" }, { "docid": "4247fe0b28d4312db3b02b2075038408", "score": "0.49120376", "text": "def set_state(self):\n if self.has_brightness is True:\n # when the light has brightness then set max range for turn on\n # and min range for turn off\n self.device.set_value(\n MIN_RANGE if self.state is True else MAX_RANGE)\n else:\n # set device value to 0 when turn off and to 1 when turn on\n self.device.set_value(1 if self.state is False else 0)", "title": "" }, { "docid": "837649398a46b6a9e4d133c699e72454", "score": "0.49118802", "text": "async def brightness_set(\n self, ctx: commands.Context, brightness: int = 254, *, name: Optional[str] = None\n ) -> None:\n if not await self.get_bridge():\n return await ctx.send(\"No IP has been set.\")\n\n def _change(brightness, name):\n for light in self.lights:\n if name is None or light.name.lower() == name.lower() and light.on:\n light.brightness = self.max_min_check(brightness, 254, 0)\n\n await ctx.bot.loop.run_in_executor(None, _change, brightness, name)\n await ctx.tick()", "title": "" }, { "docid": "98142644e08ab17898f89f38848eccf2", "score": "0.4911673", "text": "def pulse_lower_volume():\n for sink in pulse.sink_input_list():\n if sink.name != 'mycroft-voice':\n v = sink.volume\n v.value_flat *= 0.3\n pulse.volume_set(sink, v)", "title": "" }, { "docid": "dbee890d97c33b7bc6266b32483cb844", "score": "0.49114764", "text": "def Power(self, on):\r\n if on:\r\n self._SendInstruction(0x0c)\r\n else:\r\n self._SendInstruction(0x08)", "title": "" }, { "docid": "0cf668358f581d36bd3ba295e5df2267", "score": "0.4910375", "text": "def setOutputLow(self, output_num=None):\r\n try:\r\n if output_num:\r\n self.instr.write('INST:NSEL ' + str(output_num))\r\n self.instr.write(':VOLT:RANG LOW')\r\n except visa.VisaIOError:\r\n print('Agilent E3649A set output voltage LOW fails')", "title": "" }, { "docid": "99a951c57fc356390d311d87accef8d7", "score": "0.49072725", "text": "def SetUnitPower(self, unit: str = \"MW\"):\n if unit not in [\"MW\", \"DBM\"]:\n logging.warning(\n \"Bristol671A warning in SetUnitPower() unit not \"\n \" valid\"\n )\n try:\n self.write(\":UNIT:POW {0}\".format(unit))\n except Bristol671Error as err:\n logging.warning(\"Bristol671A warning in SetUnitPower()\" + str(err))", "title": "" }, { "docid": "7cc0ece1acf58fb9ff711a8753bb36e9", "score": "0.48905656", "text": "def _do_set_low_setting(self, val, fire_event=True):\r\n new_values = None\r\n if self._low_setting != val:\r\n\r\n # Save the new setting.\r\n self._low_setting = val\r\n\r\n # If val is 'auto' or 'track', get the corresponding numerical\r\n # value.\r\n if val == 'auto':\r\n if len(self.sources) > 0:\r\n val = min([source.get_bounds()[0]\r\n for source in self.sources])\r\n else:\r\n val = -inf\r\n elif val == 'track':\r\n if len(self.sources) > 0 or self._high_setting != 'auto':\r\n val = self._high_value - self.tracking_amount\r\n else:\r\n val = -inf\r\n\r\n # val is now a numerical value. If it is the same as the current\r\n # value, there is nothing to do.\r\n if self._low_value != val:\r\n self._low_value = val\r\n if self._high_setting == 'track':\r\n self._high_value = val + self.tracking_amount\r\n if fire_event:\r\n self.updated = (self._low_value, self._high_value)\r\n else:\r\n new_values = (self._low_value, self._high_value)\r\n\r\n return new_values", "title": "" }, { "docid": "eb475b2c2e5b4ff2947ab02366d5c1b2", "score": "0.48823687", "text": "def power(self, arg=None, channel=1,warning=True):\r\n try:\r\n if arg is not None:\r\n if arg > 0 and warning:\r\n print('Warning: HI-power')\r\n \r\n self.cmdque('SOUR{}:POW'.format(channel), arg)\r\n \r\n test = np.round(float(self.query('SOUR{}:POW'.format(channel))), 3) \r\n if test != np.round(float(arg), 3):\r\n self.output(0)\r\n print(('Error in setting the power, actual power: {} dBm.' +\r\n 'Signal generator output is OFF now!').format(test))\r\n raise Exception('POWER')\r\n else:\r\n return self.cmdque('SOUR{}:POW'.format(channel), arg)\r\n except:\r\n print(\"Device {}\\n does not respond on channel {}\"\r\n .format(self.identify(), channel))", "title": "" }, { "docid": "b2bd27226e9f76b921f22530a40a64c2", "score": "0.4873865", "text": "def allow_soft_power_off():\n return api.request.version.minor >= versions.MINOR_27_SOFT_POWER_OFF", "title": "" }, { "docid": "6a5bf7d862d5f46e3965aed80a2a714b", "score": "0.4852846", "text": "def powered(self, state):\n if state:\n self.device.writeReg(LIS2DS12._CTRL1, 0xf0)\n sleep(0.025)\n else:\n self.device.writeReg(LIS2DS12._CTRL1, 0x00)", "title": "" }, { "docid": "877c7c698b76bc188bbbe3cbe9305bdc", "score": "0.48499337", "text": "def set_brightness(self, value):\n if (self.has_brightness\n and (value > MIN_RANGE and value <= MAX_RANGE)):\n self.device.set_value(value)", "title": "" }, { "docid": "7fc98f6acf5934cc8cabe57681413624", "score": "0.48495835", "text": "def set_level(self, val):\n if val == 0:\n self.off()\n else:\n setlevel = 255\n if val < 1:\n setlevel = val * 100\n elif val <= 0xFF:\n setlevel = val\n set_command = StandardSend(\n self._address, COMMAND_LIGHT_ON_0X11_NONE, cmd2=setlevel\n )\n self._send_method(set_command, self._on_message_received)", "title": "" }, { "docid": "a55621e85c8985a1bfe6a8df217f37e3", "score": "0.4845649", "text": "def resp_set_power(self, resp, power_level=None):\n if power_level is not None:\n self.power_level = power_level\n elif resp:\n self.power_level = resp.power_level", "title": "" }, { "docid": "4969e5efdb15cc1939d305d71b85207c", "score": "0.48419327", "text": "def on(self, brightness=100):\n pass", "title": "" }, { "docid": "4969e5efdb15cc1939d305d71b85207c", "score": "0.48419327", "text": "def on(self, brightness=100):\n pass", "title": "" }, { "docid": "efa3601672b8771c88f66d5f42f8dd79", "score": "0.48417524", "text": "def x4driver_set_tx_power(self, tx_power: 'uint8_t') -> \"void\":\n return _moduleconnectorwrapper.PyXEP_x4driver_set_tx_power(self, tx_power)", "title": "" }, { "docid": "252a32cbdb61508b25b7fce785a680b6", "score": "0.4838537", "text": "def set_usbdev(*args):\n return _ftdi1.set_usbdev(*args)", "title": "" }, { "docid": "3068b2a3f7fe4324ade12f0a82b43286", "score": "0.48192686", "text": "def set_power(self, target):\n power_watts = target/1000\n command = str(\"p \" + power_watts)\n self.print(\"Setting power to\", target, \"mW\")\n self._send_command(command)\n self._target_power = self._send_command(\"p?\")\n return", "title": "" }, { "docid": "245456c11f9fcd08173e62a98a7c2e65", "score": "0.48104498", "text": "def powerOff(self):\n self.write(\"LOAD OFF\")", "title": "" }, { "docid": "49d67125de8046b5a93ddb56327882c4", "score": "0.4807144", "text": "def turn_on_backlight(self):\n self._lcd.setBacklight(0)", "title": "" }, { "docid": "10b7ab5f1c0a2bb3392c3d3bac88c6dd", "score": "0.48042223", "text": "def activation_code_low_warning(self, activation_code_low_warning):\n\n self._activation_code_low_warning = activation_code_low_warning", "title": "" }, { "docid": "3bdf4e9b6f382b462d1cd2cbd54caf6b", "score": "0.48031908", "text": "def low(self, low):\n if low is None:\n raise ValueError(\"Invalid value for `low`, must not be `None`\") # noqa: E501\n\n self._low = low", "title": "" }, { "docid": "5deaae28b556d6153ceefd7ecb6cd0f3", "score": "0.48010653", "text": "def butter_low_pass_filter(signal, sample_rate, low_cut, order=5):\n\n low = low_cut / (0.5 * sample_rate)\n b, a = butter(order, low, btype='lowpass')\n return lfilter(b, a, signal)", "title": "" }, { "docid": "3a7968caa9065cdd14740a1bd295fe66", "score": "0.47993064", "text": "def hal_backlight_on(self):\n self.i2c.mem_write(85, self.bl_i2c_addr, 0x08)", "title": "" }, { "docid": "69dc8cd5a13b0ca63e80c9b003361d74", "score": "0.47920567", "text": "def turn_on(self):\n if self._state == STATE_OFF:\n self._device.send_key(\"POWER\")\n\n self._state = STATE_IDLE", "title": "" } ]
7487a15acbb8dfd8d4a407ea4c31057a
Get Holy Day by a calendar date.
[ { "docid": "672203003ea0314e126c6176bb7f0299", "score": "0.7131204", "text": "def getHolyDayByDate(self, requestedDate):\n msg = \"Attempting to retrieve holy days from db\"\n LOGGER.info(msg)\n\n nowUtc = datetime.datetime.now(tz=pytz.utc)\n timezone = pytz.timezone(\"America/Los_Angeles\")\n now = nowUtc.astimezone(timezone)\n targetDate = requestedDate.replace(year=now.year)\n\n # Query the database\n # NOTE: the 'scan' operation in dynamodb is expensive. If the DB ever\n # grows to a very large size (10k+), this will be a bad solution.\n # However, more elegant solutions rely on knowing the keyname of the\n # DB entry which is impossible in our current schema.\n items = []\n try:\n filterExp = Key(\"eventCategory\").eq(\"holyday\")\n response = self.dbTable.scan(\n FilterExpression=filterExp,\n )\n LOGGER.info(\"response: {}\".format(response))\n # If no items are found:\n if response['Count'] == 0:\n LOGGER.info(\"No items found for query.\")\n return []\n # Store found items\n items = response['Items']\n # If DB query raises an error:\n except ClientError as e:\n LOGGER.info(\"query for holy day by date failed.\")\n LOGGER.error(e.response['Error']['Message'])\n return []\n\n # Good, we found some items. Cross reference the target date:\n for item in items:\n year = item.get(\"eventYear\", 1976)\n month = item.get(\"eventMonth\", 11)\n day = item.get(\"eventDay\", 15)\n thisEventDate = datetime.date(year, month, day)\n if thisEventDate == targetDate:\n return item\n return {}", "title": "" } ]
[ { "docid": "b79f4db962ca72f0fc671163a0e769ec", "score": "0.61988795", "text": "def get_day(self, year, month, day):\n calendar = self.get_shift(year=year, month=month)\n worked_hours = []\n # return next(day_it for day_it in calendar if day_it['day'] == day)\n for day_it in calendar:\n if day_it.get('day') == day:\n worked_hours.append(day_it)\n return worked_hours", "title": "" }, { "docid": "424b2b71b8c5fae8118f38c5da52f145", "score": "0.60981095", "text": "def getDayOfWeek(date):\n cal = Calendar.getInstance()\n cal.setTime(date)\n return cal.get(Calendar.DAY_OF_WEEK)", "title": "" }, { "docid": "b344c699b65656061de17da0ea561d02", "score": "0.60906255", "text": "def getHolyDayByEnum(self, dayEnum):\n msg = \"Attempting to retrieve holy day mass times for {} from db\"\n msg = msg.format(dayEnum)\n LOGGER.info(msg)\n\n # Assemble the primary key for quick DB retrieval\n namespace = \"event:mass:holyday:{}\".format(dayEnum)\n\n # Query the database\n item = None\n try:\n response = self.dbTable.get_item(\n TableName=\"StKilian\",\n Key={'namespace': namespace}\n )\n # If the item is not found:\n if not 'Item' in response:\n return None\n\n # Store the item\n item = response['Item']\n # If the database query raises an error:\n except ClientError as e:\n LOGGER.info(\"get_item for mass times failed.\")\n LOGGER.error(e.response['Error']['Message'])\n return None\n\n # Good, we found the event. Return it.\n return item", "title": "" }, { "docid": "ba84b4fc7d4ab6b13cf24e71070a31ad", "score": "0.5964568", "text": "def day_of_week(input_date = datetime.datetime.today()):\n return input_date.weekday()", "title": "" }, { "docid": "6af0901874408297745bc7491d53b830", "score": "0.5928956", "text": "def get_day(date: str) -> int: \n return int(date[8:])", "title": "" }, { "docid": "28f92b66dc38bce842a519da162c941b", "score": "0.59088385", "text": "def day(date, **params):\n return News().date(date, **params)", "title": "" }, { "docid": "0a2465425ef67d8ca926dd7dbc152b2c", "score": "0.58173126", "text": "def CWof(date):\n return date.isocalendar()[1]", "title": "" }, { "docid": "8f6d0faf9f944294bb8eb66c1a1322bd", "score": "0.57942", "text": "def findHoliday():\n\ttoday = datetime.date.today()\n\tmonth = calendar.month_name[today.month]\n\tday = today.day \n\tcurrent_time = \"{0} {1}\".format(month,day)\n\tprint(current_time)\n\n\t## append holidays to hols\n\thols = []\n\tfor d in holidays:\n\t\tif d[\"day\"] == current_time:\n\t\t\th = d['holiday']\n\t\t\thols.append(h)\n\t\n\t## sometimes there are two entries for a day\n\tif len(hols) > 0:\n\t\tboop = random.choice(hols)\n\t\treturn boop\n\telse:\n\t\treturn \"Royal Spaggetti Day\"", "title": "" }, { "docid": "de0f2849eefffa1ce953b87693c3eeb8", "score": "0.5768516", "text": "def get_day_of_week(condition):\n try:\n raw_date = condition['date']\n _date = datetime.strptime(raw_date, '%Y-%m-%d')\n weekday = DAY_WEEK[_date.weekday()]\n except KeyError:\n raise DayOfWeekException\n\n return weekday", "title": "" }, { "docid": "46ca951ea14f1a95c22c9a6410803bde", "score": "0.57632947", "text": "def get_calendar():\n return calendar", "title": "" }, { "docid": "9a80e63767f7179a6515353c3fc8cdf6", "score": "0.5702107", "text": "def get_hass_date(arw, is_all_day):\n if is_all_day:\n return {'date': ICSCalendarData.get_date_formatted(arw, is_all_day)}\n return {'dateTime': ICSCalendarData.get_date_formatted(arw, is_all_day)}", "title": "" }, { "docid": "46d817d3faa3ccd899e9701fc633317e", "score": "0.5636997", "text": "def meetup_day(year, month, weekday, type):\n\n weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',\n 'Saturday', 'Sunday']\n\n first_weekday, last_day = monthrange(year, month)\n\n # type tells us which seven days of the month the requested date must be in\n # this is the first date of these seven days\n first_day_of_week = {'1st': 1,\n '2nd': 8,\n 'teenth': 13,\n '3rd': 15,\n '4th': 22,\n 'last': last_day - 6}[type]\n\n first_day_of_week_weekday = first_weekday + first_day_of_week - 1\n\n # how many days between the first of the seven days and the wanted weekday\n delta_days = weekdays.index(weekday) - first_day_of_week_weekday\n\n # go only forward from the first day\n delta_days %= 7\n\n return date(year, month, first_day_of_week + delta_days)", "title": "" }, { "docid": "853e9aad86d5e260171a21fd61f1e3a7", "score": "0.56282246", "text": "def weekday(year, month, day):\n return datetime.date(year, month, day).weekday()", "title": "" }, { "docid": "ed287274481e476fb7fd5bf296a585e1", "score": "0.5619509", "text": "def get_day(date: str) -> str:\n return date.split('/')[1]", "title": "" }, { "docid": "43fcaa192aac7159d2dc573f1bc80e2f", "score": "0.56032026", "text": "def weekday_of_birth_date(date):\n return days[calendar.weekday(date.year, date.month, date.day)]", "title": "" }, { "docid": "9e59cf1623317aeb56d019aeb3e2bfed", "score": "0.552493", "text": "def getWeekDay(dt):\n year, month, day = (int(x) for x in dt.split('-')) \n weekDay = datetime.date(year, month, day).weekday() \n return weekDay", "title": "" }, { "docid": "e320b4e31a5859a30ffd6ba7a12baf71", "score": "0.5509973", "text": "def getBusinessDay(cdate, Delta, COUNTRY_HOLIDAYS=default_holidays):\n \n #check if Weekend\n if cdate.weekday() == 5: #check if Saturday\n if Delta > 0:\n bdate = next_weekday(cdate,2) \n elif Delta < 0:\n bdate = prev_weekday(cdate,2)\n \n elif cdate.weekday() == 6: #check if Sunday\n if Delta > 0:\n bdate = next_weekday(cdate,1) \n elif Delta < 0:\n bdate = prev_weekday(cdate,1)\n\n else:\n bdate = cdate\n \n LIST_HOLIDAYS=[]\n for date,name in getHolidays(COUNTRY_HOLIDAYS,[bdate.year]):\n LIST_HOLIDAYS.append(date.strftime('%d/%m/%Y'))\n\n \n while bdate.strftime('%d/%m/%Y') in LIST_HOLIDAYS:\n if Delta > 0:\n bdate = next_weekday(bdate,1) \n elif Delta < 0:\n bdate = prev_weekday(bdate,1)\n \n return bdate", "title": "" }, { "docid": "2a60e1fe0303469436de283850ed5dfc", "score": "0.5504735", "text": "def getEventByDay(self, day):\n datetime_of_given_day = self.tb.getThisWeekDatetimeByDay(day)\n\n start_of_given_day = self.tb.formatStartOfDay(datetime_of_given_day)\n end_of_given_day = self.tb.formatEndOfDay(datetime_of_given_day)\n\n events_on_this_day = db.engine.execute( \n \"SELECT * FROM event WHERE datetime BETWEEN '{}' AND '{}';\".format(start_of_given_day, end_of_given_day)\n )\n\n event_description = \"Event(s) on \" + day + \"\\n\\n\"\n has_event = False \n\n for event in events_on_this_day:\n event_description += (self.getEventDescription(event))\n has_event = True\n \n if has_event:\n return Response(text=event_description, has_markup=True, reply_markup=None)\n\n return Response(text=\"Seems like nothing is happening this day\", has_markup=True, reply_markup=None)", "title": "" }, { "docid": "276a797c7427831514a8f6cfd062bb9b", "score": "0.5483429", "text": "def find_horoscope(date, db):\n\tday = date[5:]\n\n\tsign = db.signs.find_one({\"$and\":[ {\"start\":{\"$lte\":day}}, {\"end\":{\"$gte\":day}}]})\n\n\tif (sign is None):\n\t\treturn db.signs.find_one({\"name\":\"Capricorn\"})\n\t# because of using string comparison to find appropriate sign, this fails\n\t# for capricorn because it wraps around dec to jan. A more sophistocated program\n\t# would use date objects that took into account the wraparound.\n\n\treturn sign", "title": "" }, { "docid": "1a6db4c26d5996e089d4efe0daaa03fe", "score": "0.54654205", "text": "def get_valid_date(holiday: date_class) -> date_class:\n if not holiday:\n raise RuntimeError('No date passed in. A date is required to get a valid payday.')\n if not isinstance(holiday, date_class):\n raise RuntimeError('A date class object was not passed.')\n\n # for a weekday, placeholder for Monday is 0 and Sunday is 6\n\n num_of_days_after_holiday = 1\n week_day_placeholder = (holiday + timedelta(days=num_of_days_after_holiday)).weekday()\n while week_day_placeholder in {5,6}:\n num_of_days_after_holiday += 1\n week_day_placeholder = (holiday + timedelta(days=num_of_days_after_holiday)).weekday()\n \n num_of_days_before_holiday = 1\n week_day_placeholder = (holiday - timedelta(days=num_of_days_before_holiday)).weekday()\n while week_day_placeholder in {5,6}:\n num_of_days_before_holiday += 1\n week_day_placeholder = (holiday - timedelta(days=num_of_days_before_holiday)).weekday()\n\n # POST PROCESS\n # Note: If the number of days before and after are same, the default date will be next day.\n if num_of_days_before_holiday < num_of_days_after_holiday:\n return holiday - timedelta(days=num_of_days_before_holiday)\n else:\n return holiday + timedelta(days=num_of_days_after_holiday)", "title": "" }, { "docid": "f2088c599981ee4a3a2bae6c4ace63d3", "score": "0.5458922", "text": "def get_schedule(day, hour):\n\n return Semigroup.get_by_day_and_hour(day, hour)", "title": "" }, { "docid": "ffe43d686e5af553ffd1471ee64ffb0f", "score": "0.5455263", "text": "def getHour24(date):\n cal = Calendar.getInstance()\n cal.setTime(date)\n return cal.get(Calendar.HOUR_OF_DAY)", "title": "" }, { "docid": "11e3de3eb974a40d1b77eec4a62c6951", "score": "0.5450243", "text": "def getDayOfYear(date):\n cal = Calendar.getInstance()\n cal.setTime(date)\n return cal.get(Calendar.DAY_OF_YEAR)", "title": "" }, { "docid": "9933be84e65e24328ab31044c5ba2a57", "score": "0.5449267", "text": "def get_hass_date(arw, is_all_day):\n if is_all_day:\n return {'date': MealPlannerEventDevice.get_date_formatted(arw, is_all_day)}\n return {'dateTime': MealPlannerEventDevice.get_date_formatted(arw, is_all_day)}", "title": "" }, { "docid": "63b0b2733ff1b28344b07d317e103352", "score": "0.5445511", "text": "def sunday_week_of_the(date):\n return _date_weekday_week_containing(date = date,\n weekday = _IDX_SUNDAY)", "title": "" }, { "docid": "1e177bca8d65fc8c5dabf2a9292d4307", "score": "0.5439342", "text": "async def _async_find_candidate_date(self, day1: date) -> date:\n week = day1.isocalendar()[1]\n weekday = day1.weekday()\n year = day1.year\n if self._frequency in [\"weekly\", \"even-weeks\", \"odd-weeks\", \"every-n-weeks\"]:\n # Everything except montthly\n # convert to every-n-weeks\n if self._frequency == \"weekly\":\n period = 1\n first_week = 1\n elif self._frequency == \"even-weeks\":\n period = 2\n first_week = 2\n elif self._frequency == \"odd-weeks\":\n period = 2\n first_week = 1\n else:\n period = self._period\n first_week = self._first_week\n offset = -1\n if (week - first_week) % period == 0: # Collection this week\n for day_name in self._collection_days:\n day_index = WEEKDAYS.index(day_name)\n if day_index >= weekday: # Collection still did not happen\n offset = day_index - weekday\n break\n iterate_by_week = 7 - weekday + WEEKDAYS.index(self._collection_days[0])\n while offset == -1: # look in following weeks\n candidate = day1 + relativedelta(days=iterate_by_week)\n week = candidate.isocalendar()[1]\n if (week - first_week) % period == 0:\n offset = iterate_by_week\n break\n iterate_by_week += 7\n return day1 + relativedelta(days=offset)\n elif self._frequency == \"every-n-days\":\n try:\n if (day1 - self._first_date).days % self._period == 0:\n return day1\n offset = self._period - ((day1 - self._first_date).days % self._period)\n except TypeError:\n raise ValueError(\n f\"({self._name}) Please configure first_date and period \"\n \"for every-n-days collection frequency.\"\n )\n return day1 + relativedelta(days=offset)\n elif self._frequency == \"monthly\":\n # Monthly\n if self._period is None or self._period == 1:\n return await self._async_monthly_candidate(day1)\n else:\n candidate_date = await self._async_monthly_candidate(day1)\n while (candidate_date.month - self._first_month) % self._period != 0:\n candidate_date = await self._async_monthly_candidate(\n candidate_date + relativedelta(days=1)\n )\n return candidate_date\n elif self._frequency == \"annual\":\n # Annual\n try:\n conf_date = datetime.strptime(self._date, \"%m/%d\").date()\n except TypeError:\n raise ValueError(\n f\"({self._name}) Please configure the date \"\n \"for annual collection frequency.\"\n )\n candidate_date = date(year, conf_date.month, conf_date.day)\n if candidate_date < day1:\n candidate_date = date(year + 1, conf_date.month, conf_date.day)\n return candidate_date\n elif self._frequency == \"group\":\n candidate_date = None # type: ignore\n try:\n for entity_id in self._entities:\n entity = self.hass.data[DOMAIN][SENSOR_PLATFORM][entity_id]\n d = await entity.async_find_next_date(day1)\n if candidate_date is None or d < candidate_date:\n candidate_date = d\n except KeyError:\n raise ValueError\n except TypeError:\n _LOGGER.error(\"(%s) Please add entities for the group.\", self._name)\n raise ValueError\n return candidate_date\n _LOGGER.error(\"(%s) Unknown frequency %s\", self._name, self._frequency)\n raise ValueError", "title": "" }, { "docid": "d274ef67c5b11ad5f22764a7b7ef13e1", "score": "0.54360795", "text": "def extractDay(self, input):\n day = self.extractDays(input)\n if day:\n return day[0]\n return None", "title": "" }, { "docid": "e19c55766c5e09cc512975710d4fa0fb", "score": "0.5429785", "text": "def tuesday_week_of_the(date):\n return _date_weekday_week_containing(date = date,\n weekday = _IDX_TUESDAY)", "title": "" }, { "docid": "dba37ec6b29efbf9821d2c3873e17d51", "score": "0.5421003", "text": "def get_day(self, **kwargs):\n year = kwargs.get('year')\n month = kwargs.get('month')\n day = kwargs.get('day')\n result = None\n if year and month and day:\n try:\n result = date(int(year, 10), int(month, 10), int(day, 10))\n except: # Invalid input\n pass\n if not result:\n result = date.today()\n return result", "title": "" }, { "docid": "82557cd1151f968de33246a099c680f2", "score": "0.53950244", "text": "def test_calendar_day(self):\n date = iso.Date()\n base = iso.Date()\n base_overflow = iso.Date()\n base = iso.Date(century=19, year=69, month=7, day=20)\n base_overflow = iso.Date(century=19, year=69, month=7, day=21)\n base_max = iso.Date(century=99, year=99, month=12, day=25)\n date = iso.Date(century=19, year=69, month=7, day=20)\n self.assertTrue(\n date.get_calendar_day() == (19, 69, 7, 20), \"simple case\")\n try:\n date = iso.Date(year=69, month=7, day=20)\n self.fail(\"truncation without base\")\n except iso.DateTimeError:\n pass\n date = iso.Date(year=69, month=7, day=20, base=base)\n self.assertTrue(date.get_calendar_day() == (19, 69, 7, 20),\n \"truncated century\")\n date = iso.Date(year=69, month=7, day=20, base=base_overflow)\n self.assertTrue(date.get_calendar_day() == (20, 69, 7, 20),\n \"truncated century with overflow\")\n date = iso.Date(month=7, day=20, base=base)\n self.assertTrue(date.get_calendar_day() == (19, 69, 7, 20),\n \"truncated year\")\n date = iso.Date(month=7, day=20, base=base_overflow)\n self.assertTrue(date.get_calendar_day() == (19, 70, 7, 20),\n \"truncated year with overflow\")\n date = iso.Date(day=20, base=base)\n self.assertTrue(date.get_calendar_day() == (19, 69, 7, 20),\n \"truncated month\")\n date = iso.Date(day=20, base=base_overflow)\n self.assertTrue(date.get_calendar_day() == (19, 69, 8, 20),\n \"truncated month with overflow\")\n try:\n incomplete = iso.Date(century=19, year=69, month=7)\n date = iso.Date(day=20, base=incomplete)\n self.fail(\"incomplete base with truncation\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(base=base)\n self.fail(\"empty constructor with base\")\n except ValueError:\n pass\n try:\n date = iso.Date(day=1, base=base_max)\n self.fail(\"10001-01-01: illegal date\")\n except ValueError:\n pass\n date = iso.Date(century=19, year=69, month=7)\n self.assertTrue(date.get_calendar_day() == (19, 69, 7, None),\n \"month precision\")\n date = iso.Date(century=19, year=69)\n self.assertTrue(date.get_calendar_day() == (19, 69, None, None),\n \"year precision\")\n date = iso.Date(century=19)\n self.assertTrue(date.get_calendar_day() == (19, None, None, None),\n \"century precision\")\n base_overflow = iso.Date(century=19, year=69, month=8, day=1)\n date = iso.Date(year=69, month=7, base=base)\n self.assertTrue(date.get_calendar_day() == (\n 19, 69, 7, None), \"month precision, truncated century\")\n date = iso.Date(year=69, month=7, base=base_overflow)\n self.assertTrue(date.get_calendar_day() == (20, 69, 7, None),\n \"month precision, truncated century with overflow\")\n date = iso.Date(month=7, base=base)\n self.assertTrue(date.get_calendar_day() == (19, 69, 7, None),\n \"month precision, truncated year\")\n date = iso.Date(month=7, base=base_overflow)\n self.assertTrue(date.get_calendar_day() == (19, 70, 7, None),\n \"month precision, truncated year with overflow\")\n base_overflow = iso.Date(century=19, year=69, month=1, day=1)\n date = iso.Date(year=69, base=base)\n self.assertTrue(date.get_calendar_day() == (19, 69, None, None),\n \"year precision, truncated century\")\n date = iso.Date(year=68, base=base_overflow)\n self.assertTrue(date.get_calendar_day() == (20, 68, None, None),\n \"year precision, truncated century with overflow\")\n try:\n date = iso.Date(century=100, year=69, month=7, day=20)\n self.fail(\"bad century\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, year=100, month=7, day=20)\n self.fail(\"bad year\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, year=69, month=13, day=20)\n self.fail(\"bad month\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, year=0, month=2, day=29)\n self.fail(\"bad day\")\n except iso.DateTimeError:\n pass", "title": "" }, { "docid": "08c8adc3c5be04ddbc9f72d0f38bc7b4", "score": "0.537107", "text": "def _FirstSunday(self, dt):\n return dt + timedelta(days=(6-dt.weekday()))", "title": "" }, { "docid": "6881b9be85c185987b6def88f2fe7528", "score": "0.53692913", "text": "def test_week_day(self):\n date = iso.Date()\n base = iso.Date(century=19, year=69, month=7, day=20)\n base_overflow = iso.Date(century=19, year=69, month=7, day=21)\n date = iso.Date(century=19, decade=6, year=9, week=29, weekday=7)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 29, 7),\n \"simple case\")\n self.assertTrue(date.get_calendar_day() == (19, 69, 7, 20),\n \"calendar cross check\")\n date = iso.Date(century=19, decade=6, year=9, week=1, weekday=1)\n self.assertTrue(date.get_calendar_day() == (\n 19, 68, 12, 30), \"calendar cross check underflow\")\n date = iso.Date(century=19, decade=7, year=0, week=53, weekday=5)\n self.assertTrue(date.get_calendar_day() == (19, 71, 1, 1),\n \"calendar cross check overflow\")\n try:\n date = iso.Date(decade=6, year=9, week=29, weekday=7)\n self.fail(\"truncation without base\")\n except iso.DateTimeError:\n pass\n date = iso.Date(decade=6, year=9, week=29, weekday=7, base=base)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 29, 7),\n \"truncated century\")\n date = iso.Date(decade=6, year=9, week=29, weekday=7,\n base=base_overflow)\n self.assertTrue(date.get_week_day() == (20, 6, 9, 29, 7),\n \"truncated century with overflow\")\n date = iso.Date(year=9, week=29, weekday=7, base=base)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 29, 7),\n \"truncated decade\")\n date = iso.Date(year=9, week=29, weekday=7, base=base_overflow)\n self.assertTrue(date.get_week_day() == (19, 7, 9, 29, 7),\n \"truncated decade with overflow\")\n date = iso.Date(week=29, weekday=7, base=base)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 29, 7),\n \"truncated year\")\n date = iso.Date(week=29, weekday=7, base=base_overflow)\n self.assertTrue(date.get_week_day() == (19, 7, 0, 29, 7),\n \"truncated year with overflow\")\n date = iso.Date(weekday=7, base=base)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 29, 7),\n \"truncated week\")\n date = iso.Date(weekday=1, base=base_overflow)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 30, 1),\n \"truncated week with overflow\")\n date = iso.Date(century=19, decade=6, year=9, week=29)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 29, None),\n \"week precision\")\n date = iso.Date(century=19, year=69, month=7)\n try:\n date.get_week_day()\n self.fail(\"month precision\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, decade=6, year=9)\n self.fail(\"year precision\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, decade=6)\n self.fail(\"decade precision\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, decade=6, year=9, week=-1, weekday=1)\n self.fail(\"negative week\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=20, decade=1, year=6, week=53, weekday=1)\n self.fail(\"too large week\")\n except iso.DateTimeError:\n pass\n base_overflow = iso.Date(century=19, decade=6, year=9, week=30,\n weekday=2)\n date = iso.Date(decade=6, year=9, week=29, base=base)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 29, None),\n \"week precision, truncated century\")\n date = iso.Date(decade=6, year=9, week=29, base=base_overflow)\n self.assertTrue(date.get_week_day() == (20, 6, 9, 29, None),\n \"week precision, truncated century with overflow\")\n date = iso.Date(year=9, week=29, base=base)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 29, None),\n \"week precision, truncated decade\")\n date = iso.Date(year=9, week=29, base=base_overflow)\n self.assertTrue(date.get_week_day() == (19, 7, 9, 29, None),\n \"week precision, truncated decade with overflow\")\n date = iso.Date(week=29, base=base)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 29, None),\n \"week precision, truncated year\")\n date = iso.Date(week=29, base=base_overflow)\n self.assertTrue(date.get_week_day() == (19, 7, 0, 29, None),\n \"week precision, truncated year with overflow\")\n date = iso.Date(weekday=1, base=base_overflow)\n self.assertTrue(date.get_week_day() == (19, 6, 9, 31, 1),\n \"weekday precision, truncated week with overflow\")\n date = iso.Date(century=20, decade=1, year=5, week=1, weekday=3)\n self.assertTrue(date.get_calendar_day() == (20, 14, 12, 31),\n \"underflow on caldenar conversion (non leap)\")\n date = iso.Date(century=20, decade=1, year=6, week=1, weekday=3)\n self.assertTrue(date.get_calendar_day() == (20, 16, 1, 6),\n \"convert to caldenar (leap year)\")\n date = iso.Date(weekday=1, base=iso.Date(century=20, decade=1, year=6,\n week=52, weekday=3))\n self.assertTrue(date.get_calendar_day() == (20, 17, 1, 2),\n \"convert to caldenar (leap year) with overflow\")\n date = iso.Date(century=20, year=16, month=1, day=1)\n self.assertTrue(date.get_week_day() == (20, 1, 5, 53, 5),\n \"underflow on week conversion\")\n date = iso.Date(century=20, year=14, month=12, day=31)\n self.assertTrue(date.get_week_day() == (20, 1, 5, 1, 3),\n \"overflow on week conversion\")\n try:\n date = iso.Date(decade=6, year=9, base=base)\n self.fail(\"year precision, truncated century\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(decade=6, base=base)\n self.fail(\"decade precision, truncated century\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=100, decade=6, year=9, week=29, weekday=7)\n self.fail(\"bad century\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, decade=10, year=9, week=29, weekday=7)\n self.fail(\"bad decade\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, decade=6, year=10, week=29, weekday=7)\n self.fail(\"bad year\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, decade=6, year=8, week=53, weekday=1)\n self.fail(\"bad week\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, decade=6, year=8, week=52, weekday=8)\n self.fail(\"bad day\")\n except iso.DateTimeError:\n pass", "title": "" }, { "docid": "7da948fbb53b2e0f8001d48d95197647", "score": "0.5365521", "text": "def get_factory(self, holiday):\n return self.factory_map[holiday]", "title": "" }, { "docid": "9075f6821962fd8b8efb51e28353d526", "score": "0.53625095", "text": "def _first_instance_weekday_after(date, weekday):\n return date + datetime.timedelta(\n days = ((weekday - date.weekday()) % _DAYS_IN_WEEK))", "title": "" }, { "docid": "77cd3ae35f2774ca78a32f723a935ce6", "score": "0.5346407", "text": "def _date_weekday_week_containing(date, weekday):\n return date + datetime.timedelta(days = weekday - date.weekday())", "title": "" }, { "docid": "f0494e93ac57a5ae6f4a8820711a7b6c", "score": "0.53424585", "text": "def saturday_week_of_the(date):\n return _date_weekday_week_containing(date = date,\n weekday = _IDX_SATURDAY)", "title": "" }, { "docid": "3982f0169c77238f98a4cd800c168adf", "score": "0.5335708", "text": "def get_day_of_week():\n comp_df = pd.read_csv(os.path.join(os.path.dirname(__file__), \"../../sentiment-tagging/data/vader_compound_dailies.csv\"),\n parse_dates=['day'], infer_datetime_format=True)\n days = (comp_df.index.to_numpy() - 1) % 7\n return days.reshape(-1, 1)", "title": "" }, { "docid": "61a96b8ac6c2ea294b2f8e0f1cd69ea0", "score": "0.5334901", "text": "def weekday_of_birth_date(date):\n bday_weekday = calendar.weekday(date.year, date.month, date.day)\n weekdays = {0: \"Monday\",\n 1: \"Tuesday\",\n 2: \"Wednesday\",\n 3: \"Thursday\",\n 4: \"Friday\",\n 5: \"Saturday\",\n 6: \"Sunday\"}\n return weekdays[bday_weekday]", "title": "" }, { "docid": "02ce2d0d2053bbdcd90ee0d778bb03c0", "score": "0.5326399", "text": "def day_or_night(now=False):\n if now:\n solar = get_sunrise_and_sunset()\n if not (solar['sunrise'] < datetime.now() < solar['sunset']):\n return 'night'\n\n return 'day'", "title": "" }, { "docid": "1add5cfee80eafdb286a6f1850770560", "score": "0.5321105", "text": "def get_day():\n d = datetime.weekday(datetime.today())\n if d == 0:\n return 'Monday'\n elif d == 1:\n return 'Tuesday'\n elif d == 2: \n return 'Wednesday'\n elif d == 3:\n return 'Thursday'\n elif d == 4:\n return 'Friday'\n elif d == 5:\n return 'Saturday'\n else:\n return 'Sunday'", "title": "" }, { "docid": "11edfc57fb9b1529e23a237b363d3ac2", "score": "0.53167754", "text": "def is_today_holiday():\n auth_token = user.get_user_token(os.getenv('TEST_USER'))\n return groupware.check_date_is_holiday(auth_token)", "title": "" }, { "docid": "f9e3f3f66c8933943899d3b36c59e184", "score": "0.5301616", "text": "def getDay(argument):\n city_day = {\n 1: \"Monday\",\n 2: \"Tuesday\",\n 3: \"Wednesday\",\n 4: \"Thursday\",\n 5: \"Friday\",\n 6: \"Saturday\",\n 7: \"Sunday\",\n 0: \"All\"\n }\n return city_day.get(argument, \"Invalid Day\")", "title": "" }, { "docid": "9271f84cfcf601a9aa1852f0c26c20c7", "score": "0.530159", "text": "def get_holiday_schedule(self):\n return self.call_api('sched', 'holiday')", "title": "" }, { "docid": "81aa4f255c29600406c8bb58fb245bec", "score": "0.52954", "text": "def Out_day(cal, y, m, d):\r\n flag = 0\r\n for i in cal:\r\n if y == i['year'] and m == i['month'] and d == i['day']:\r\n print \"\\n-------------------------------\\nAt \", y, m, d, \\\r\n \"\\n\", i['teams'], i['score'], ' total', i['total'], \\\r\n \"\\n--------------------------------\"\r\n flag = 1\r\n if flag == 0:\r\n print \"Day not found\"", "title": "" }, { "docid": "d6e79fb1ea62add6802d4a08ed8574ae", "score": "0.52865076", "text": "def day(self) -> Optional[pulumi.Input[Union[str, 'ScheduleDay']]]:\n return pulumi.get(self, \"day\")", "title": "" }, { "docid": "32b1d6027012e18d12fe67153ae851c0", "score": "0.52808684", "text": "def wednesday_week_of_the(date):\n return _date_weekday_week_containing(date = date,\n weekday = _IDX_WEDNESDAY)", "title": "" }, { "docid": "594f9a1fd92363b811038e120bcd5a5a", "score": "0.52806437", "text": "def run():\n year = int(utils.get_input(\"Year (1583 - ...) > \"))\n month = int(utils.get_input(\"Month (1 - 12) > \"))\n ndays = dd.ndays_in_month(month, year)\n day = int(utils.get_input(\"Day (1 - {}) > \".format(ndays)))\n date = dd.Date(year, month, day)\n print(\"{} is a {}.\".format(date, date.weekday(h=True)))", "title": "" }, { "docid": "c50031838209bd5bd160d8be94846a4c", "score": "0.527768", "text": "def find_week_from_date(t):\n return t.isocalendar()[:2]", "title": "" }, { "docid": "ca107044e9413cf6568ea1e7c50fc67a", "score": "0.5275483", "text": "def _get_horoscope(self, day='today'):\n if not is_valid_day(day):\n raise HoroscopeException(\"Invalid day. Allowed days: [today|yesterday|tomorrow]\" )\n\n horoscope = ''.join([str(s).strip() for s in self.tree.xpath('//*[@id=\"%s\"]/p/text()' % day)])\n\n if day is 'yesterday':\n date = self.date_today - timedelta(days=1)\n elif day is 'today':\n date = self.date_today\n elif day is 'tomorrow':\n date = self.date_today + timedelta(days=1)\n\n return {\n 'date': date.strftime(\"%Y-%m-%d\"),\n 'sunsign': self.sunsign.capitalize(),\n 'horoscope': horoscope + \"(c) Kelli Fox, The Astrologer, http://new.theastrologer.com\",\n 'meta': self._get_horoscope_meta(day),\n 'credit': '(c) Kelli Fox, The Astrologer, http://new.theastrologer.com'\n }", "title": "" }, { "docid": "64855e5837e7fa7b97737e0c36976a37", "score": "0.527155", "text": "def day_from_date(fmt_date):\n return fmt_date.split(\" \")[0]", "title": "" }, { "docid": "89852f2fa0f2d7eeeaa7afd66cccc683", "score": "0.5264488", "text": "def celebrate():\n day = datetime.today().date()\n today = datetime.now().strftime(\"%d-%B\")\n us_holidays = CountryHoliday('US').get(day) # checks if the current date is a US holiday\n in_holidays = CountryHoliday('IND', prov='TN', state='TN').get(day) # checks if Indian (esp TN) holiday\n if in_holidays:\n return in_holidays\n elif us_holidays and 'Observed' not in us_holidays:\n return us_holidays\n elif today == birthday:\n return 'Birthday'", "title": "" }, { "docid": "76ad674cf9757adc2ce232f1a33b21b2", "score": "0.5260292", "text": "def test_contest_get_game_for_date(self):\n self.initialize() \n contest_helper = ContestHelper()\n\n # no game yesterday\n the_date = datetime.today() - timedelta(1)\n try:\n contest_helper.get_game_for_date(self.contest, the_date)\n raise Exception(\"No NoGameException raised!\")\n except NoGameException:\n the_game = None\n self.assertEquals(the_game, None)\n\n # game1 today\n the_date = datetime.now()\n the_game = contest_helper.get_game_for_date(self.contest, the_date)\n self.assertEquals(the_game, self.game1)\n\n # game2 tomorrow\n the_date = datetime.today() + timedelta(1)\n the_game = contest_helper.get_game_for_date(self.contest, the_date)\n self.assertEquals(the_game, self.game2)", "title": "" }, { "docid": "b1a03fd699b0362f25cb49e6ae089bb3", "score": "0.52548796", "text": "def Date(date, dt=0, dayofweek=None, dateformat=\"%Y%m%d\"):\n d = datetime.strptime(str(date),\"%Y%m%d\") + timedelta(days=dt)\n if dayofweek!=None:\n if isinstance( dayofweek, int ):\n if d.isoweekday() == 7:\n isoweekday = 0;\n else:\n isoweekday = d.isoweekday()\n d = ( d - timedelta(days=isoweekday+dayofweek) )\n else:\n printf(\"Unknown value for dayofweek in the Date function !\", \"warning\")\n printf(\"Ignoring the dayofweek option in the Date function ...\", \"warning\")\n return d.strftime(dateformat)", "title": "" }, { "docid": "dc45007aea7ccf0400c55f23daba3857", "score": "0.52508295", "text": "def _get_day(self):\n if self.date.weekday() < cn.SATURDAY:\n day_type = 'weekday'\n else:\n day_type = 'weekend'\n return day_type", "title": "" }, { "docid": "ae5a1933ebef37e0d43934cc34215add", "score": "0.5246121", "text": "def _FirstSunday(self, dt):\r\n return dt + timedelta(days=(6-dt.weekday()))", "title": "" }, { "docid": "5fab22f77abfe8d3a0de3f6efaf89ad7", "score": "0.52413124", "text": "def iso2h_day_convert(iso_day):\n if 1 <= iso_day <= 6:\n day = iso_day + 1\n elif iso_day == 7:\n day = 1\n else:\n day = None\n return day", "title": "" }, { "docid": "d32d1241f27fe8476bb9ae90a4248b8b", "score": "0.5240029", "text": "def to_day_of_week_int(date):\n\n return date.weekday()", "title": "" }, { "docid": "4fe8a75474f1a0924f9a05e7044b2db1", "score": "0.52320117", "text": "def test_weekend(self):\n with self.assertRaises(DateException):\n menu = fetch('2016-05-21')", "title": "" }, { "docid": "25190f86ec1d97db2c92bd58e590fd1e", "score": "0.52319175", "text": "def get_day(self):\n if self.airing_datetime:\n day = self.airing_datetime.weekday()\n current_day = datetime.now(timezone('US/Pacific')).weekday()\n\n if day >= current_day:\n days = day - current_day\n else:\n days = day - current_day + 7\n\n if days == 0:\n return \"Newest episode airs today!\"\n else:\n return f\"{days} days until new episode\"\n else:\n return \"No airdate information.\"", "title": "" }, { "docid": "9b51c12a1c2946e64561f9ddc32d2c69", "score": "0.5222249", "text": "def getDayENG(num = 'None'):\n import time\n days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n if num != 'None':\n return days[num - 1]\n return days[time.localtime().tm_wday]", "title": "" }, { "docid": "d574caf30e046ffb3c4ac7907fc96932", "score": "0.52200717", "text": "def hong_kong_rules(dt):\n prc_day = datetime(dt.year,10,1)\n if prc_day.weekday() == 6:\n prc_day = prc_day + timedelta(1)\n if dt.weekday() == 6:\n dt = dt + timedelta(1)\n if dt.month == 10 and dt.day == prc_day.day:\n return dt.replace(day=prc_day.day+1)\n return dt", "title": "" }, { "docid": "e32aace67f94eb60c34e83ca1a47e53c", "score": "0.5207895", "text": "def from_fixed(cls, fixed_date):\n julian_date = JulianDate.from_fixed(fixed_date)\n month_prime = amod(1 + julian_date.month, 12)\n year_prime = (julian_date.year if month_prime != 1 \n else (julian_date.year + 1 if (julian_date.year != -1) else 1))\n kalends1 = RomanDate(year_prime, month_prime, Event.Kalends, 1, False).to_fixed()\n \n if julian_date.day == 1:\n return RomanDate(julian_date.year, julian_date.month, Event.Kalends, 1, False)\n elif julian_date.day <= cls.nones_of_month(julian_date.month):\n return RomanDate(julian_date.year,\n julian_date.month,\n Event.Nones, \n cls.nones_of_month(julian_date.month) - julian_date.day + 1,\n False)\n elif julian_date.day <= cls.ides_of_month(julian_date.month):\n return RomanDate(julian_date.year,\n julian_date.month,\n Event.Ides,\n cls.ides_of_month(julian_date.month) - julian_date.day + 1,\n False)\n elif (julian_date.month != MonthOfYear.February) or not julian_date.is_leap_year(julian_date.year):\n return RomanDate(year_prime,\n month_prime,\n Event.Kalends,\n kalends1 - fixed_date + 1,\n False)\n elif julian_date.day < 25:\n return RomanDate(julian_date.year, MonthOfYear.March, Event.Kalends, 30 - julian_date.day, False)\n else:\n return RomanDate(julian_date.year, MonthOfYear.March, Event.Kalends, 31 - julian_date.day, julian_date.day == 25)", "title": "" }, { "docid": "df871a48f48d9af677fed4cba9d7db21", "score": "0.52077776", "text": "def get_day(self):\n a = Article.objects.get(**{\n '%s__year' % self.date_field: self.get_year(),\n '%s__month' % self.date_field: MONTHS.index(self.get_month()),\n self.get_slug_field(): self.kwargs.get(self.slug_url_kwarg),\n })\n day = a.created.day\n return str(day)", "title": "" }, { "docid": "099f2254bf8db68954c334190bb1603e", "score": "0.5205529", "text": "def georgian_day(date):\n try:\n fmt = '%m/%d/%Y'\n return datetime.datetime.strptime(date, fmt).timetuple().tm_yday\n except (ValueError, TypeError):\n return 0", "title": "" }, { "docid": "20f43a0166fd1ae4c8fa7965b1f87fbc", "score": "0.5204728", "text": "def day_from_int(day: int):\n days = [\n '',# 1 based\n 'MONDAY',\n 'TUESDAY',\n 'WEDNESDAY',\n 'THURSDAY',\n 'FRIDAY',\n 'SATURDAY',\n 'SUNDAY',\n ]\n return days[day]", "title": "" }, { "docid": "3a63b08820aeb8c6d6d68d828a3b5138", "score": "0.5204658", "text": "def calendar(self, name=None, cal_id=None):\n return self.calendar_home_set.calendar(name, cal_id)", "title": "" }, { "docid": "7937e2a451899502179d3e54aab04165", "score": "0.5204016", "text": "def day_of_week(self) -> DayOfWeek:\n return DayOfWeek(self)", "title": "" }, { "docid": "d95285d27c61e8bb0b0cb31c8d2bc49b", "score": "0.51917166", "text": "def GetPublicHolidays():\n publicHolidays = []\n days = ael.Calendar['ZAR Johannesburg']\n for day in days.dates():\n publicHolidays.append(str(day.daynbr))\n return publicHolidays", "title": "" }, { "docid": "d4905fceae67d8fd5c373539ce7d40d5", "score": "0.5185829", "text": "def sunday_following(date):\n return _first_instance_weekday_after(date = date,\n weekday = _IDX_SUNDAY)", "title": "" }, { "docid": "06107f5d620d56b63521f9e1944f6fc2", "score": "0.51742625", "text": "def holidays_by_date(self):\n by_date = []\n for code in self.holidays:\n h = self.by_code[code][0]\n d = h.day(self.year)\n by_date.append({ 'day': d, 'holidays': self.by_code[code] })\n # New Years Day is a special case, because it could place a federal\n # holiday in the previous year, so we want to see where next year's New\n # Years Day is going to be placed as well as this year's.\n if 'new-years' in self.by_code:\n h = self.by_code['new-years'][0]\n d = h.day(self.year + 1)\n by_date.append({ 'day': d, 'holidays': self.by_code['new-years'] })\n return by_date", "title": "" }, { "docid": "21677649bfbd3a957e76e6c2a58202d2", "score": "0.5163545", "text": "def get_day(self, symbol, length_or_date, format='npy'):\r\n try:\r\n if len(length_or_date) == 8: # eg: 20101209\r\n date = datetime.datetime.strptime(length_or_date, '%Y%m%d').date()\r\n y = self.dbm.daystore.get_by_date(symbol, date)\r\n else:\r\n length = length_or_date\r\n y = self.dbm.daystore.get(symbol, int(length))\r\n if length == 1:\r\n y = y[0]\r\n\r\n if format == 'npy':\r\n memfile = StringIO()\r\n np.save(memfile, y)\r\n data = memfile.getvalue()\r\n del(y)\r\n else:\r\n data = json_encode(y.tolist())\r\n self._write_response(data)\r\n except KeyError:\r\n self.request.write(\"-ERR Symbol %s not exists.\\r\\n\" % symbol)", "title": "" }, { "docid": "e9cc1b20993bcbafb4402b12af7194a7", "score": "0.5154085", "text": "def next_date(date: datetime, day: int) -> datetime:\n return date + timedelta(days=(day - date.weekday()) % 7)", "title": "" }, { "docid": "0273e1e6433c19f8428d56e3391cbc7c", "score": "0.5153473", "text": "def get_calendar(self):\r\n return self.bot.calendar_manager", "title": "" }, { "docid": "481fdaf676daf512c469b8c86586bce5", "score": "0.5150399", "text": "def _FirstSunday(self, dt):\n return dt + datetime.timedelta(days=(6-dt.weekday()))", "title": "" }, { "docid": "8e0ec088afe933d1748faf96dc228475", "score": "0.51359177", "text": "def get_calendar(locale, fwday):\n\n if locale is None:\n return calendar.TextCalendar(fwday)\n else:\n return calendar.LocaleTextCalendar(fwday, locale)", "title": "" }, { "docid": "97fffad1afe1a7cb125f094554bd2fb7", "score": "0.5130424", "text": "def test_ordinal_day(self):\n date = iso.Date()\n base = iso.Date()\n base_overflow = iso.Date()\n base = iso.Date(century=19, year=69, month=7, day=20)\n base_overflow = iso.Date(century=19, year=69, month=7, day=21)\n date = iso.Date(century=19, year=69, ordinalDay=201)\n self.assertTrue(date.get_ordinal_day() == (19, 69, 201),\n \"simple case \")\n self.assertTrue(date.get_calendar_day() == (19, 69, 7, 20),\n \"calendar cross check\")\n date = iso.Date(century=19, year=69, ordinalDay=1)\n self.assertTrue(date.get_calendar_day() == (19, 69, 1, 1),\n \"calendar cross check Jan 1st\")\n date = iso.Date(century=19, year=68, ordinalDay=366)\n self.assertTrue(date.get_calendar_day() == (\n 19, 68, 12, 31), \"calendar cross check Dec 31st (leap)\")\n date = iso.Date(century=19, year=69, ordinalDay=365)\n self.assertTrue(date.get_calendar_day() == (19, 69, 12, 31),\n \"calendar cross check Dec 31st (non-leap)\")\n try:\n date = iso.Date(year=69, ordinalDay=201)\n self.fail(\"truncation without base\")\n except iso.DateTimeError:\n pass\n date = iso.Date(year=69, ordinalDay=201, base=base)\n self.assertTrue(date.get_ordinal_day() == (19, 69, 201),\n \"truncated century\")\n date = iso.Date(year=69, ordinalDay=201, base=base_overflow)\n self.assertTrue(date.get_ordinal_day() == (20, 69, 201),\n \"truncated century with overflow\")\n date = iso.Date(ordinalDay=201, base=base)\n self.assertTrue(date.get_ordinal_day() == (19, 69, 201),\n \"truncated year\")\n date = iso.Date(ordinalDay=201, base=base_overflow)\n self.assertTrue(date.get_ordinal_day() == (19, 70, 201),\n \"truncated year with overflow\")\n date = iso.Date(century=19, decade=6, year=9, week=29)\n try:\n date.get_ordinal_day()\n self.fail(\"ordinal day with week precision\")\n except iso.DateTimeError:\n pass\n date = iso.Date(century=19, year=69, month=7)\n try:\n date.get_ordinal_day()\n self.fail(\"ordinal day with month precision\")\n except iso.DateTimeError:\n pass\n date = iso.Date(century=19, year=69)\n self.assertTrue(date.get_ordinal_day() == (19, 69, None),\n \"year precision\")\n date = iso.Date(century=19)\n self.assertTrue(date.get_ordinal_day() == (19, None, None),\n \"century precision\")\n base_overflow = iso.Date(century=19, year=69, month=1, day=1)\n date = iso.Date(year=69, base=base)\n self.assertTrue(date.get_ordinal_day() == (19, 69, None),\n \"year precision, truncated century\")\n date = iso.Date(year=68, base=base_overflow)\n self.assertTrue(date.get_ordinal_day() == (20, 68, None),\n \"year precision, truncated century with overflow\")\n try:\n date = iso.Date(century=100, year=69, ordinalDay=201)\n self.fail(\"bad century\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, year=100, ordinalDay=201)\n self.fail(\"bad year\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, year=68, ordinalDay=367)\n self.fail(\"bad ordinal - leap\")\n except iso.DateTimeError:\n pass\n try:\n date = iso.Date(century=19, year=69, ordinalDay=366)\n self.fail(\"bad ordinal - non-leap\")\n except iso.DateTimeError:\n pass", "title": "" }, { "docid": "f4c01000bcaf34ed66136b5c2e854d44", "score": "0.5130381", "text": "def _FirstSunday(self, dt):\n\t\treturn dt + datetime.timedelta(days=(6-dt.weekday()))", "title": "" }, { "docid": "9778145a1358c5aba991e33590f985c9", "score": "0.51240957", "text": "def strftime_to_cal_wkday(day):\n if day==0:\n return 6\n else:\n return day-1", "title": "" }, { "docid": "3bfd68e668744eb9572f4c5956ad4b02", "score": "0.5123035", "text": "def get_first_day(self, start):\n start_day = start.weekday()\n days = 0\n while True:\n day = self.get_day_hours(True, start_day)\n if day and \\\n not self.is_holiday(datetime.timedelta(days=days) + start):\n return start_day, days\n start_day = (start_day + 1) % 7\n days += 1\n if days >= 7:\n # Calendar has no hours on any day. This case can also occur\n # if a calendar has seven consecutive holidays.\n return None, None", "title": "" }, { "docid": "0de460d3524760312b6485588c0b15d8", "score": "0.5122435", "text": "def JulianDay(ADate):\r\n C.execute('''SELECT julianday(?)''', (ADate,))\r\n return C.fetchall()[0][0]", "title": "" }, { "docid": "d93bae759d339bf7815ff7b2d8b81ea0", "score": "0.51209503", "text": "def specific_date(self):\n country_code = self.get_country_code()\n self.jarvis.say(\"Please enter day/month\")\n try:\n day, month = re.split(r'[ /-]+', self.jarvis.input().strip())\n self.check_if_date_is_valid(day, month)\n except ValueError:\n self.specific_date()\n return\n j = requests.get(\"https://nameday.abalin.net/api/V1/getdate\",\n params={\"country\": country_code, \"day\": day, \"month\": month}).json()\n names = j[\"nameday\"][country_code]\n if names != \"n/a\":\n self.jarvis.say(\"Say some kind words to \" + names + \" on \" + day + \"/\" + month)\n else:\n self.jarvis.say(\"No name days at \" + day + \"/\" + month + \" in \" + self.location)", "title": "" }, { "docid": "31d83b17c4d82e882ef17abdc93eceef", "score": "0.5120935", "text": "def saturday_following(date):\n return _first_instance_weekday_after(date = date,\n weekday = _IDX_SATURDAY)", "title": "" }, { "docid": "975396934ff42737f3127831835b29e2", "score": "0.5119275", "text": "def fromisocalendar(cls, year, week, day):\n # Year is bounded this way because 9999-12-31 is (9999, 52, 5)\n if not MINYEAR <= year <= MAXYEAR:\n raise ValueError(f\"Year is out of range: {year}\")\n\n if not 0 < week < 53:\n out_of_range = True\n\n if week == 53:\n # ISO years have 53 weeks in them on years starting with a\n # Thursday and leap years starting on a Wednesday\n first_weekday = _ymd2ord(year, 1, 1) % 7\n if (first_weekday == 4 or (first_weekday == 3 and\n _is_leap(year))):\n out_of_range = False\n\n if out_of_range:\n raise ValueError(f\"Invalid week: {week}\")\n\n if not 0 < day < 8:\n raise ValueError(f\"Invalid weekday: {day} (range is [1, 7])\")\n\n # Now compute the offset from (Y, 1, 1) in days:\n day_offset = (week - 1) * 7 + (day - 1)\n\n # Calculate the ordinal day for monday, week 1\n day_1 = _isoweek1monday(year)\n ord_day = day_1 + day_offset\n\n return cls(*_ord2ymd(ord_day))", "title": "" }, { "docid": "7aee3346cfdfdc647a1a04aab1c30a22", "score": "0.511121", "text": "def _find_day(self, l, n):\n if n == -1:\n n = 0\n l.reverse()\n j = [x for x in l if x != 0]\n return j[n]", "title": "" }, { "docid": "2a8b7385501115314d6ded093ae8c976", "score": "0.50979775", "text": "def __call__(self, *args, **kwargs) -> HolidayBase:\n cls = self.get_entity()\n return cls(*args, **kwargs) # type: ignore[misc, operator]", "title": "" }, { "docid": "7d8672bae40488b324b8d8a28e7ae117", "score": "0.50972384", "text": "def wednesday_following(date):\n return _first_instance_weekday_after(date = date,\n weekday = _IDX_WEDNESDAY)", "title": "" }, { "docid": "bd01714fbf8db4d5b666b6b294d89cb3", "score": "0.507847", "text": "def _get_weekday_name_from_isoformat(date):\n\n dt_obj = dt.date(*_get_date_tuple_from_string(date))\n weekday = dt_obj.isoweekday()\n\n return weekday_names[weekday - 1]", "title": "" }, { "docid": "4352aa3b0b0768fbdcb6a610db3d608d", "score": "0.5077992", "text": "def calculate_anchor_day(current_year: int)->str:\n fist_part_year:int = int(str(current_year)[:2])\n last_part_year:int = int(str(current_year)[2:])\n \n if not is_even(last_part_year):\n last_part_year += 11\n\n last_part_year = last_part_year/2\n\n if not is_even(last_part_year):\n last_part_year += 11\n \n multiple_of_7_lower: int = multiple_of_7_greater_than(last_part_year) \n\n the_current_day:int = multiple_of_7_lower - last_part_year + get_century_anchor(fist_part_year)\n \n return DAYS[(int)(the_current_day % 7)]", "title": "" }, { "docid": "fe6b02bf3fa59e07640ce073eff68398", "score": "0.5076348", "text": "def get(self, name):\n\n return self.holidays.get(name)", "title": "" }, { "docid": "f63a188f1b937781f1b1cf0c573404b7", "score": "0.5070156", "text": "def get_next_weekday(date, weekday):\n # check that input weekday is valid\n assert weekday in list(range(7)), 'invalid weekday'\n \n # we copy input date so we don't modify its internal state\n date_out = copy(date)\n \n # add a day until we're at a given weekday\n while date_out.weekday() != weekday:\n date_out += timedelta(days=1)\n \n return date_out", "title": "" }, { "docid": "b6a669656ec7a3f6c72632a3e7b51a12", "score": "0.5067923", "text": "def archive_day(self, date, **kwargs):\n\t\treturn self.public(published__day=date.day, published__month=date.month, published__year=date.year)", "title": "" }, { "docid": "d5d4c5d4543762b2baf8e377148f095e", "score": "0.50671077", "text": "def day_of_week (day, month, year):\n\n a = (14-month)//12\n\n y = year - a\n\n m = month + 12*a - 2\n\n d = (day + y + y//4 - y//100 + y//400 + (31*m)//12)%7\n\n return day_name(d)", "title": "" }, { "docid": "732d0416bc5908b97e35c1ac111c5c49", "score": "0.50582546", "text": "def songkran_festival_last_day_observance(dt):\n if dt.weekday() == SUNDAY or dt.weekday() == MONDAY:\n return dt + timedelta(days=1)\n return dt", "title": "" }, { "docid": "f70b97ebedbbe5e283ff6d9cf026567a", "score": "0.5054317", "text": "def day_date(date,month,year):\n assert date > 0 and month > 0, \"Input error.\"\n \n weekdays = {\n 0:\"Sunday\",\n 1:\"Monday\",\n 2:\"Tuesday\",\n 3:\"Wednesday\",\n 4:\"Thursday\",\n 5:\"Friday\",\n 6:\"Saturday\"\n }\n weekdaysrev = {\n \"Sunday\":0,\n \"Monday\":1,\n \"Tuesday\":2,\n \"Wednesday\":3,\n \"Thursday\":4,\n \"Friday\":5,\n \"Saturday\":6,\n }\n monthdays = {\n 1:31,\n 2:28,\n 3:31,\n 4:30,\n 5:31,\n 6:30,\n 7:31,\n 8:31,\n 9:30,\n 10:31,\n 11:30,\n 12:31\n }\n monthdaysleap = {\n 1:31,\n 2:29,\n 3:31,\n 4:30,\n 5:31,\n 6:30,\n 7:31,\n 8:31,\n 9:30,\n 10:31,\n 11:30,\n 12:31\n }\n# doomsdays = {\n# 1:3,\n# 2:28,\n# 3:14,\n# 4:4,\n# 5:9,\n# 6:6,\n# 7:11,\n# 8:8,\n# 9:5,\n# 10:10,\n# 11:7,\n# 12:12\n# }\n# doomsleap = {\n# 1:4,\n# 2:29,\n# 3:14,\n# 4:4,\n# 5:9,\n# 6:6,\n# 7:11,\n# 8:8,\n# 9:5,\n# 10:10,\n# 11:7,\n# 12:12\n# }\n months = {\n 1:\"January\",\n 2:\"February\",\n 3:\"March\",\n 4:\"April\",\n 5:\"May\",\n 6:\"June\",\n 7:\"July\",\n 8:\"August\",\n 9:\"September\",\n 10:\"October\",\n 11:\"November\",\n 12:\"December\"\n }\n \n #anchor day\n \n if year == 0:\n \n print (\"Year 0 does not exist.\")\n \n else:\n \n yeartr = year\n \n if year > 0 :\n \n yeartr = yeartr % 400\n \n minc1 = 0\n maxc1 = 99\n \n minc2 = 100\n maxc2 = 199\n \n minc3 = 200\n maxc3 = 299\n \n minc4 = 300\n maxc4 = 399\n \n if yeartr >= minc1 and yeartr <= maxc1:\n anchor = weekdays[2]\n \n elif yeartr >= minc2 and yeartr <= maxc2:\n anchor = weekdays[0]\n \n elif yeartr >= minc3 and yeartr <= maxc3:\n anchor = weekdays[5]\n \n elif yeartr >= minc4 and yeartr <= maxc4:\n anchor = weekdays[3]\n \n else:\n anchor = \"Error.\"\n \n else:\n \n yeartr = yeartr % -400\n \n minc1 = 0\n maxc1 = -99\n \n minc2 = -100\n maxc2 = -199\n \n minc3 = -200\n maxc3 = -299\n \n minc4 = -300\n maxc4 = -399\n \n if yeartr <= minc1 and yeartr >= maxc1:\n anchor = weekdays[3]\n \n elif yeartr <= minc2 and yeartr >= maxc2:\n anchor = weekdays[5]\n \n elif yeartr <= minc3 and yeartr >= maxc3:\n anchor = weekdays[0]\n \n elif yeartr <= minc4 and yeartr >= maxc4:\n anchor = weekdays[2]\n \n else:\n anchor = \"Error.\" \n \n #doomsday per year\n yearstr = str(year)\n \n last2 = int(yearstr[int(len(yearstr))-2:int(len(yearstr))])\n \n step1 = last2 // 12\n \n step2 = abs(last2 - (step1 * 12))\n \n step3 = step2 // 4\n \n step4 = weekdaysrev[anchor]\n \n step5 = step1 + step2 + step3 + step4\n \n doomsdayyear = step5 % 7\n \n #what day of the week\n \n if year % 4 == 0:\n if year % 100 != 0:\n #leap\n julian = date\n monthcalc = month - 1\n while monthcalc > 0:\n julian += monthdaysleap[monthcalc]\n monthcalc -= 1\n if date > monthdaysleap[month]:\n print (\"Date error.\")\n juliandoom = 4\n \n else:\n if year % 400 == 0:\n #leap\n julian = date\n monthcalc = month - 1\n while monthcalc > 0:\n julian += monthdaysleap[monthcalc]\n monthcalc -= 1 \n if date > monthdaysleap[month]:\n print (\"Date error.\")\n juliandoom = 4\n \n else:\n #not leap\n julian = date\n monthcalc = month - 1\n while monthcalc > 0:\n julian += monthdays[monthcalc]\n monthcalc -= 1 \n if date > monthdays[month]:\n print (\"Date error.\") \n juliandoom = 3\n \n else:\n #not leap\n julian = date\n monthcalc = month - 1\n while monthcalc > 0:\n julian += monthdays[monthcalc]\n monthcalc -= 1 \n if date > monthdays[month]:\n print (\"Date error.\") \n juliandoom = 3\n \n #day difference\n day1 = julian % 7\n day2 = day1 - juliandoom\n day3 = day2 + doomsdayyear\n \n if day3 < 0:\n finalday = day3 + 7\n else:\n finalday = day3\n \n print (date, months[month], year, \"is a\", weekdays[finalday])\n return finalday", "title": "" }, { "docid": "17e4f5bb153b0c92695ed64db6ea4c47", "score": "0.5051644", "text": "def tuesday_following(date):\n return _first_instance_weekday_after(date = date,\n weekday = _IDX_TUESDAY)", "title": "" }, { "docid": "84369121b8ffaf2b2766d69a8bdf8e7b", "score": "0.50474304", "text": "def sunday_based_day_of_week(day_of_week):\n if day_of_week < 6:\n return day_of_week + 1\n else:\n return 0", "title": "" }, { "docid": "0757a092092b62197421733b0c3ef153", "score": "0.5046047", "text": "def find_start_date_from_date(t):\n # weekday gives us monday = 0 ... sunday = 6. Add 1 and modulo 7 to get\n # sunday = 0 .. saturday = 6\n weekday = (t.weekday() + 1) % 7\n delta = timedelta(weekday)\n\n return t - delta", "title": "" }, { "docid": "4653532ddafa48fee8b1d2eb46d53be3", "score": "0.504541", "text": "def get_day():\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n\n while True:\n try:\n prompt_day = (\"Please enter the number of the corresponding day you'd like to filter \" \\\n \"your chosen dataset by. \\n \\n \" \\\n \"0: All \\n 1: Monday \\n 2: Tuesday \\n 3: Wednesday \\n 4: Thursday \\n 5: Friday \\n \" \\\n \"6: Saturday \\n 7: Sunday \\n\\n \" \\\n \"Choose your filter here: \")\n\n day_fltr = int(input(prompt_day))\n\n day = days[day_fltr]\n\n break\n\n except ValueError:\n print(\"Please enter a valid filter number from the list: \\n\")\n\n continue\n\n except KeyError:\n print(\"Please enter a valid filter number from the list: \\n\")\n\n continue\n\n if day_fltr in days:\n print(\"Great! You've selected {} as you filter. \\n\\n\".format(day))\n\n return day", "title": "" }, { "docid": "654936615feb24a7f4d43f764165335b", "score": "0.5044887", "text": "def getDayOfYear(self, year, month, day):\n return datetime(year,month,day).timetuple().tm_yday", "title": "" }, { "docid": "dc2a1693d14db0513cc0f41776285ee8", "score": "0.50396466", "text": "def calendar(self):\n return self[\"calendar\"]", "title": "" } ]
0e1f6e0748b12918c1e4009c77bcba45
empty() Returns True if the number of elements is zero right now. Note that in theory, another thread might add an element right after this function returns.
[ { "docid": "01e9f670209cb7c4b87b0e2d2f7b7cf3", "score": "0.7976789", "text": "def empty(self):\n return len(self) == 0", "title": "" } ]
[ { "docid": "bf783b751c53b1ba6330e4774a061220", "score": "0.86242735", "text": "def empty(self):\n size = len(self.elements)\n return size == 0", "title": "" }, { "docid": "8923db9a35badf5089ab54692bedb339", "score": "0.8620145", "text": "def is_empty(self):\n return self.num_elements == 0", "title": "" }, { "docid": "ec2021b8ec6b80cd6287de16291a9096", "score": "0.8553534", "text": "def is_empty(self):\n return self.elements == []", "title": "" }, { "docid": "480c48194d5e91dbaf52dc79d531ac1f", "score": "0.8487231", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "480c48194d5e91dbaf52dc79d531ac1f", "score": "0.8487231", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "480c48194d5e91dbaf52dc79d531ac1f", "score": "0.8487231", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "6b1111ab7e05cfaad978bbd27ef4013b", "score": "0.8483983", "text": "def is_empty(self):\n return len(self.elements) == 0", "title": "" }, { "docid": "06714f08c2cdd2f3b399c6c10a413970", "score": "0.84098035", "text": "def is_empty(self):\r\n\t\treturn self._size == 0", "title": "" }, { "docid": "7ccf35d888ff9005a52bbe278c80ec19", "score": "0.8391371", "text": "def empty(elements):\n return len(elements) == 0", "title": "" }, { "docid": "cc508863748ad64b94376026d3412965", "score": "0.83804846", "text": "def is_empty(self):\n\t\treturn self._size == 0", "title": "" }, { "docid": "8ed9dc7e89b958c0aff8e45b498707a3", "score": "0.835604", "text": "def empty(self):\n if self.size > 0:\n return True\n else:\n return False", "title": "" }, { "docid": "c43721d011c5af04a2740a6b3646cac0", "score": "0.83402103", "text": "def is_empty(self):\r\n return self._size==0", "title": "" }, { "docid": "0685db77c23d3bcdb4e801a64a5c9ee1", "score": "0.8328189", "text": "def isEmpty(self):\r\n if self.elements == []:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "9c3a901b990b0cefa51a05adc00aaf6e", "score": "0.831502", "text": "def is_empty(self) -> bool:\n return self.size == 0", "title": "" }, { "docid": "3288824c7b2323fe6a92c4aba8a78690", "score": "0.82980883", "text": "def empty(self):\n return (self.size() == 0)", "title": "" }, { "docid": "3288824c7b2323fe6a92c4aba8a78690", "score": "0.82980883", "text": "def empty(self):\n return (self.size() == 0)", "title": "" }, { "docid": "cc5f1055f1cceda6c13030003189a97a", "score": "0.8297961", "text": "def empty(self) -> bool:\n return self.size == 0", "title": "" }, { "docid": "636bd72f6f63fca2d73055c6c2b351b0", "score": "0.82868046", "text": "def is_empty(self):\n return self.size == 0", "title": "" }, { "docid": "636bd72f6f63fca2d73055c6c2b351b0", "score": "0.82868046", "text": "def is_empty(self):\n return self.size == 0", "title": "" }, { "docid": "636bd72f6f63fca2d73055c6c2b351b0", "score": "0.82868046", "text": "def is_empty(self):\n return self.size == 0", "title": "" }, { "docid": "8fc4c2b244d29c88cf388c1a36c2f6d8", "score": "0.8271662", "text": "def is_empty(self):\n return self.size() == 0", "title": "" }, { "docid": "8fc4c2b244d29c88cf388c1a36c2f6d8", "score": "0.8271662", "text": "def is_empty(self):\n return self.size() == 0", "title": "" }, { "docid": "8fc4c2b244d29c88cf388c1a36c2f6d8", "score": "0.8271662", "text": "def is_empty(self):\n return self.size() == 0", "title": "" }, { "docid": "8fc4c2b244d29c88cf388c1a36c2f6d8", "score": "0.8271662", "text": "def is_empty(self):\n return self.size() == 0", "title": "" }, { "docid": "8fc4c2b244d29c88cf388c1a36c2f6d8", "score": "0.8271662", "text": "def is_empty(self):\n return self.size() == 0", "title": "" }, { "docid": "808a0b2ee0f4e6155337e7b26eedf5e6", "score": "0.8258326", "text": "def empty(self):\n return self.size == 0", "title": "" }, { "docid": "808a0b2ee0f4e6155337e7b26eedf5e6", "score": "0.8258326", "text": "def empty(self):\n return self.size == 0", "title": "" }, { "docid": "f1d569a1fc45d54441b922f9fd5018bd", "score": "0.8244557", "text": "def empty(self):\r\n return self.qsize() == 0", "title": "" }, { "docid": "7c4cabc7b8d1721122997c087a60932b", "score": "0.82351613", "text": "def is_empty(self):\n\t\t# runtime O(1) checking the value\n\t\tif self.size == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "title": "" }, { "docid": "7c4cabc7b8d1721122997c087a60932b", "score": "0.82351613", "text": "def is_empty(self):\n\t\t# runtime O(1) checking the value\n\t\tif self.size == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "title": "" }, { "docid": "fc62f532b81c56a9e40cfbcf0ee18ae9", "score": "0.82204044", "text": "def empty(self) -> bool:", "title": "" }, { "docid": "07dba2835ca487a0670094ed038db579", "score": "0.8217335", "text": "def is_empty(self):\n return self.qsize() == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "294060799cfcb611c05c7377e71c6379", "score": "0.82041085", "text": "def is_empty(self):\n return self._size == 0", "title": "" }, { "docid": "af5dd3d5dd9b006f873ebc31d341c13f", "score": "0.8176485", "text": "def empty(self) -> bool:\n return not self.qsize()", "title": "" }, { "docid": "6f1f045856d2c54f25bfc4714370dd65", "score": "0.8172682", "text": "def is_empty(self):\n \n if self._size == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "f98e8ebafdfefff61f98af45ce726720", "score": "0.8172374", "text": "def empty(self):\n return self.qsize() == 0", "title": "" }, { "docid": "f98e8ebafdfefff61f98af45ce726720", "score": "0.8172374", "text": "def empty(self):\n return self.qsize() == 0", "title": "" }, { "docid": "f98e8ebafdfefff61f98af45ce726720", "score": "0.8172374", "text": "def empty(self):\n return self.qsize() == 0", "title": "" }, { "docid": "f98e8ebafdfefff61f98af45ce726720", "score": "0.8172374", "text": "def empty(self):\n return self.qsize() == 0", "title": "" }, { "docid": "f98e8ebafdfefff61f98af45ce726720", "score": "0.8172374", "text": "def empty(self):\n return self.qsize() == 0", "title": "" }, { "docid": "8b744e23eae0f18e6a9410886948bead", "score": "0.8152074", "text": "def is_empty(self):\n\t\treturn self.n == 0", "title": "" }, { "docid": "db7be9d52950c933623943b5cc6ecd6f", "score": "0.8114345", "text": "def is_empty(self):\n return self.count == 0", "title": "" }, { "docid": "db7be9d52950c933623943b5cc6ecd6f", "score": "0.8114345", "text": "def is_empty(self):\n return self.count == 0", "title": "" }, { "docid": "0422c231cc8a4918d2aa4729a1cd9713", "score": "0.8103299", "text": "def empty(self):\n\t\treturn self.__len == 0", "title": "" }, { "docid": "c9201a5aa177a2a2993af7fdebcc612e", "score": "0.80834544", "text": "def empty(self) -> bool:\r\n if len(self.a) == 0:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "19cd17b93a6993e7c3d4c71158a6bc6c", "score": "0.8080214", "text": "def empty(self):\n return not self.qsize()", "title": "" }, { "docid": "29d2d58167cbd2cb0bc6f5ec34793ce4", "score": "0.8076291", "text": "def is_empty(self):\n return self.add_count == 0", "title": "" }, { "docid": "0eae897252ee233a8abaf27aee8baf3d", "score": "0.8070254", "text": "def is_empty(self) -> bool:\n return self.heap.length() == 0", "title": "" }, { "docid": "0eae897252ee233a8abaf27aee8baf3d", "score": "0.8070254", "text": "def is_empty(self) -> bool:\n return self.heap.length() == 0", "title": "" }, { "docid": "0693a1ad0a63a059f8cc265c0c48a2a1", "score": "0.8050749", "text": "def is_empty(self):\n return True if len(self.list) == 0 else False", "title": "" }, { "docid": "71fba4ce653e920a74df3469c569c764", "score": "0.80404", "text": "def is_empty(self):\r\n return self.queue[\"size\"] == 0", "title": "" }, { "docid": "dde5613748ddb03c3e03451938366a3f", "score": "0.80357957", "text": "def is_empty(self):\n\t\treturn len(self) == 0", "title": "" }, { "docid": "ff0c0b5ecb220a5ea5aeb50a67213f91", "score": "0.80162764", "text": "def is_empty(self):\n return self._n == 0", "title": "" }, { "docid": "ff0c0b5ecb220a5ea5aeb50a67213f91", "score": "0.80162764", "text": "def is_empty(self):\n return self._n == 0", "title": "" }, { "docid": "756db0fa9d826f896876defaa5da8cb8", "score": "0.80018336", "text": "def is_empty(self):\n\t\treturn len(self) == 0\n\n\t\t#Notice __iter__ depends on concrete implementation", "title": "" }, { "docid": "bef306a6115bced2341a006af9848090", "score": "0.79916173", "text": "def isEmpty(self):\n if self.size == 0:\n return True\n return False", "title": "" }, { "docid": "83782a805b1ec0e0fceeee2ed883d4a7", "score": "0.7983209", "text": "def is_empty(self):\n return self.num_items == 0", "title": "" }, { "docid": "83782a805b1ec0e0fceeee2ed883d4a7", "score": "0.7983209", "text": "def is_empty(self):\n return self.num_items == 0", "title": "" }, { "docid": "eb607e97f2d320aad746f24693b65b8a", "score": "0.7968039", "text": "def empty(self) -> bool:\n return len(self) == 0", "title": "" }, { "docid": "9090478fcf2a9a4b09826ebfa8b0846e", "score": "0.796775", "text": "def is_empty(self):\n return 0 == len(self)", "title": "" }, { "docid": "752f89501ab851d40585d093b5fe215e", "score": "0.79596597", "text": "def is_empty(self):\n return self.size == 0 and self.head is None and self.tail is None", "title": "" }, { "docid": "9b550c8a27ad5698510e07e04b80e550", "score": "0.7953261", "text": "def is_empty(self):\r\n return self.items == []", "title": "" }, { "docid": "d5fa493913de007e1e7710cdc085f1f0", "score": "0.7946633", "text": "def is_empty(self):\n return False if self.list.head else True", "title": "" }, { "docid": "4bdd0d570c7c98016ceb4b185d927bd3", "score": "0.7946453", "text": "def isEmpty(self):\r\n\t\treturn len(self.internalQueue) == 0", "title": "" }, { "docid": "54ca9904befc71bb66fefedd484e99cd", "score": "0.79271996", "text": "def __is_empty(self):\n return len(self.__container) == 0", "title": "" }, { "docid": "eed8613c80b70420c0ed211bbae77c05", "score": "0.7919142", "text": "def is_empty(self):\n return self._items == []", "title": "" }, { "docid": "eed8613c80b70420c0ed211bbae77c05", "score": "0.7919142", "text": "def is_empty(self):\n return self._items == []", "title": "" }, { "docid": "e363579edc3f176a670198812d08d04f", "score": "0.79061747", "text": "def is_empty(self):\n return len(self.entries) == 0", "title": "" }, { "docid": "332ed3fed4d1d5c3c96b72080e79a1b1", "score": "0.7906088", "text": "def is_empty(self):\n return len(self) == 0", "title": "" }, { "docid": "332ed3fed4d1d5c3c96b72080e79a1b1", "score": "0.7906088", "text": "def is_empty(self):\n return len(self) == 0", "title": "" }, { "docid": "332ed3fed4d1d5c3c96b72080e79a1b1", "score": "0.7906088", "text": "def is_empty(self):\n return len(self) == 0", "title": "" }, { "docid": "332ed3fed4d1d5c3c96b72080e79a1b1", "score": "0.7906088", "text": "def is_empty(self):\n return len(self) == 0", "title": "" }, { "docid": "332ed3fed4d1d5c3c96b72080e79a1b1", "score": "0.7906088", "text": "def is_empty(self):\n return len(self) == 0", "title": "" }, { "docid": "b450b83e1fcdd739e6574faeca41c0d8", "score": "0.7899167", "text": "def empty(self):\n if len(self.queue) == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "54ffd309d31ba5573b30e548ad57e93b", "score": "0.7896365", "text": "def is_empty(self):\n return self._len == 0", "title": "" }, { "docid": "29549ac57d615088d704963c3f3c4709", "score": "0.788822", "text": "def is_empty(self):\n return self.items == []", "title": "" }, { "docid": "29549ac57d615088d704963c3f3c4709", "score": "0.788822", "text": "def is_empty(self):\n return self.items == []", "title": "" }, { "docid": "29549ac57d615088d704963c3f3c4709", "score": "0.788822", "text": "def is_empty(self):\n return self.items == []", "title": "" }, { "docid": "29549ac57d615088d704963c3f3c4709", "score": "0.788822", "text": "def is_empty(self):\n return self.items == []", "title": "" }, { "docid": "a46a9c47fd319ee200da063dcf7bafc0", "score": "0.78808045", "text": "def is_empty(self):\n return self._item_count == 0", "title": "" }, { "docid": "5866db0eed5e0343ecc4a5924670da37", "score": "0.78798366", "text": "def isempty(self):\n if len(self.list)==0:\n return True\n else:\n return False", "title": "" }, { "docid": "b3bb6e9c6a07da5acf93339089807f40", "score": "0.7872719", "text": "def isEmpty(self):\n return self.size == 0", "title": "" } ]
1a7c1e11ab620d2d2f722df6363b4910
this function iteratively fits data x into kmeans model. The result of the iteration is the cluster centers.
[ { "docid": "ad98ebbf93f2f20d7067e3531f00c41a", "score": "0.77107215", "text": "def fit(self, x):\n print(\"Fitting to the dataset\")\n # intialize self.centers\n self.init_center(x)\n\n sse_vs_iter = []\n for iter in range(self.max_iter):\n # finds the cluster index for each x[i] based on the current centers\n labels = self.predict(x)\n\n # revises the values of self.centers based on the x and current labels\n self.revise_centers(x, labels)\n\n # computes the sse based on the current labels and centers.\n sse = self.get_sse(x, labels)\n\n sse_vs_iter.append(sse)\n\n return sse_vs_iter", "title": "" } ]
[ { "docid": "bdf5b75e75a58acd0d2c646dd0b33d12", "score": "0.75578046", "text": "def fit(self, X):\n centroids = self.initialize_centroid(X, self.k)\n for _ in range(self.max_iterations):\n clusters = self.create_clusters(centroids, X)\n previous_centroids = centroids\n centroids = self.update_centroids(clusters, X)\n difference = centroids - previous_centroids\n\n # print(difference)\n if not difference.numpy().any():\n break\n\n self.KMeans_Centroids = centroids\n return centroids", "title": "" }, { "docid": "15cb21ea2496fd43b5116e41f8368fba", "score": "0.7336482", "text": "def do_kmeans(self, data, k):\n centers = self.init_centers_rnd(data, k)\n old_centers = []\n #print(\"Initial centers: \", centers)\n\n while not old_centers == centers:\n old_centers = copy.copy(centers)\n\n data = self.get_labels(data, centers)\n centers = list(self.get_new_centers(data, k))\n\n return data, centers", "title": "" }, { "docid": "4b882a5ce50ee548516418b1298f34cc", "score": "0.73033386", "text": "def model_kmeans(data, k):\r\n return KMeans(n_clusters=k).fit(data)", "title": "" }, { "docid": "8f06070b4f1dcf45a8e01bdd7f2482da", "score": "0.73018795", "text": "def fit(self) -> None:\r\n self.__init_centroid()\r\n idx = np.zeros(self.n_data)\r\n idx_new = np.zeros(self.n_data)\r\n count = 0\r\n data_re = self.data.reshape(self.n_data, 1, 1, self.dim_data)\r\n for _ in tqdm(range(self.iter), desc=\"K-Means Fit Progress\", ncols=0, unit='iter'):\r\n dist = np.sum((data_re - self.centroid.reshape(1, self.n_cluster, self.dim_data))**2, axis=3)\r\n idx = np.argmin(dist, axis=2).flatten()\r\n for i in range(self.n_cluster):\r\n self.centroid[i] = self.data[idx==i].mean(axis=0)\r\n if np.all(idx==idx_new) and count == 2:\r\n self.idx = idx\r\n break\r\n elif np.all(idx==idx_new) and count < 2:\r\n count += 1\r\n idx_new = idx\r\n else:\r\n idx_new = idx", "title": "" }, { "docid": "84504156c95e26e305e59ed5c9ea838d", "score": "0.725004", "text": "def kmeans(self, data, k, initial_centroids=None, maxiter=1000, verbose = False, seed=None):\n\n centroids = initial_centroids[:] if initial_centroids is not None else self._k_meanpp_initialize(data, k, seed)\n prev_cluster_assignment = None \n for itr in range(maxiter): \n cluster_assignment = self._assign_clusters(data, centroids)\n centroids = self._update_centroids(data, k, cluster_assignment)\n # if no changes detected\n if prev_cluster_assignment is not None and (prev_cluster_assignment==cluster_assignment).all():\n break\n if verbose :\n print('Iteration '+ str(itr)+ ' centroids : ')\n print(centroids)\n prev_cluster_assignment = cluster_assignment[:]\n return centroids, cluster_assignment", "title": "" }, { "docid": "6aafbec7612d4060efdefca70f7b5663", "score": "0.71789056", "text": "def run_kmeans(x, nmb_clusters, verbose=False, seed=DEFAULT_SEED):\n n_data, d = x.shape\n\n # faiss implementation of k-means\n clus = faiss.Clustering(d, nmb_clusters)\n clus.niter = 20\n clus.max_points_per_centroid = 10000000\n clus.seed = seed\n res = faiss.StandardGpuResources()\n flat_config = faiss.GpuIndexFlatConfig()\n flat_config.useFloat16 = False\n flat_config.device = 0\n index = faiss.GpuIndexFlatL2(res, d, flat_config)\n\n # perform the training\n clus.train(x, index)\n _, I = index.search(x, 1)\n losses = faiss.vector_to_array(clus.obj)\n if verbose:\n print('k-means loss evolution: {0}'.format(losses))\n\n return [int(n[0]) for n in I], losses[-1]", "title": "" }, { "docid": "4f1ebbc68e0f0f2a728fbfb86a91251e", "score": "0.7160452", "text": "def k_means(data, k_partitions=8, max_centeroid_distance=20, max_iterations=10):\n\n print(\"starting k-means algorithm\")\n\n clustering = list() # list of cluster_ids\n\n normalizer = list() # store max values for initial center calculation\n for init in range(len(data[0])):\n normalizer.append(0)\n\n print(\"calculating max values for initial center vectors\")\n for datapoint in data:\n clustering.append(0) # assign each data point to cluster 0 initially\n for value in range(len(data[0])):\n normalizer[value] = max(normalizer[value], datapoint[value])\n\n print(\"choosing initial centers\")\n # chose k points as center\n new_centers = list()\n old_centers = list()\n\n for cluster_id in range(0, k_partitions):\n new_centers.append(random_centeroid(normalizer, cluster_id))\n\n running = True\n iterations = 0\n while running:\n print(\"current centers:\")\n print(new_centers)\n running = False\n print(\"iteration step :\" + str(iterations))\n\n print(\"assigning closest centeroid\")\n # iterate over data points\n for point_index in range(len(data)):\n datapoint = data[point_index]\n current_cluster_id = clustering[point_index]\n current_center, cluster_id = new_centers[current_cluster_id]\n dist_min = euklidian_dist_generic(datapoint, current_center)\n for center, cluster_id in new_centers:\n new_dist = euklidian_dist_generic(datapoint, center)\n if new_dist < dist_min: # data point is closer to other center\n clustering[point_index] = cluster_id # assign new cluster_id\n dist_min = new_dist # new minimal distance\n\n print(\"recalculating centeroids\")\n # calculate new centeroids\n\n del old_centers[:]\n old_centers += new_centers\n del new_centers[:]\n\n # finding average for each vector value\n sums = list()\n for i in range(k_partitions):\n zerolist = [0] * len(data[0])\n counter = 0\n sums.append((zerolist, counter))\n\n for point_index in range(len(data)):\n datapoint = data[point_index]\n cluster_id = clustering[point_index]\n # vector to sum for center calculation\n totals1, counter = sums[cluster_id]\n for i, value in enumerate(datapoint):\n totals1[i] += value\n counter += 1\n sums[cluster_id] = totals1, counter\n\n for cluster_id in range(k_partitions):\n totals2, counter = sums[cluster_id]\n if counter == 0:\n print(\"regenerating centroid\")\n centeroid = random_centeroid(normalizer, cluster_id)\n else:\n centeroid = (tuple(\n value / counter for value in totals2\n ), cluster_id)\n new_centers.append(centeroid)\n\n print(\"calculating biggest center distance, max = \" + str(max_centeroid_distance))\n dist = 0\n dists = list()\n for i in range(k_partitions):\n oc, ocid = old_centers[i]\n nc, ncid = new_centers[i]\n old_dist = euklidian_dist_generic(oc, nc)\n dists.append((i, old_dist))\n dist = max(dist, old_dist)\n\n print(list(str(i) + \":\" + str(round(old_dist,2)) for i, old_dist in dists))\n\n # check for exit conditions\n if dist > max_centeroid_distance:\n running = True\n\n iterations += 1\n if iterations >= max_iterations:\n running = False\n\n return clustering", "title": "" }, { "docid": "8f88b47d246193e33e64a53fa3375685", "score": "0.71574074", "text": "def kmeans(data, k=3, max_iterations=5):\n clusters = [Cluster(*random.choice(data)) for _ in range(k)]\n\n iterations = 0\n while iterations < max_iterations:\n\n print(f'Iteration #{iterations}')\n\n for i, pixel in enumerate(data):\n nearest = min(clusters, key=lambda cluster: euclidean_distance(cluster.centroid, pixel))\n nearest.new_sample(*pixel)\n\n iterations += 1\n\n return [c.centroid for c in clusters]", "title": "" }, { "docid": "5014a0f4685e43c6c54240ab5ca8a153", "score": "0.7148842", "text": "def kmeans_coresets(X, w, n_clusters=8, n_init=10, max_iter=300, tol=.0001):\n\n assert X.shape[0] == w.shape[0], \\\n \"X and w must have the same number of samples. {} != {}\".format(X.shape[0], w.shape[0])\n\n best_centers, best_inertia, best_labels = None, None, None\n\n n_samples = X.shape[0]\n\n for i in range(n_init):\n\n # Initialize the centers using the k-means++ algorithm\n centers = init_centers_d2_sampling(X, n_clusters)\n\n it = 0\n prev_L = 0\n while it < max_iter:\n\n L = 0\n # Assign to each point the index of the closest center\n labels = np.zeros(n_samples, dtype='int')\n for j in range(n_samples):\n d_2 = np.sum((centers-X[j,:])**2, axis=1)\n labels[j] = np.argmin(d_2)\n L += w[i,0] * d_2[labels[j]]\n L /= w.sum()\n\n # Update\n for l in range(n_clusters):\n if np.sum(labels==l) == 0:\n continue\n P = X[labels==l,:]\n pw = w[labels==l,:]\n centers[l] = np.sum(pw * P, axis=0) / pw.sum()\n\n # Check convergence\n if abs(prev_L - L) < tol:\n break\n prev_L = L\n\n it += 1\n\n # logger.info('Finished with {} iterations!'.format(it))\n\n\n # Compute intertia and update the best parameters\n inertia = L\n if best_inertia is None or inertia < best_inertia:\n best_inertia = inertia\n best_centers = centers\n best_labels = labels\n\n return best_centers", "title": "" }, { "docid": "00524989279fe3c0e560e603c3b965a3", "score": "0.7145264", "text": "def k_means_clustering(self):\n\n for k in range(1, 21):\n min_sse = m.inf\n prev_cluster_type = None\n k_cluster_type = None\n print(\"k :\"+str(k))\n\n for _ in range(1000): # iterating 1000 times for each k-means (while selecting the random centroids) for better accuracy.\n centroid = self.initial_centroid(k)\n k_cluster_type = KCluster()\n\n while True:\n for ind in range(k): # Calculation for each k-means clustering\n # print(\"k :\"+str(k))\n # print(ind)\n # print(len(centroid))\n c = ClusterPrototype(centroid[ind])\n\n k_cluster_type.add_cluster(c) # recording each k-means clustering\n\n for row in self.data_list: # dividing the data between the clusters based on euclidean distance\n min_dist = m.inf\n rec = None\n for cluster in k_cluster_type.get_cluster_list():\n dist = self.calculate_euclidean_dist(cluster.get_centroid(), row)\n # cluster_curr = None\n if dist < min_dist:\n min_dist = dist\n cluster_curr = cluster\n\n if cluster_curr is not None:\n cluster_curr.add_record(row)\n prev_centroid = centroid\n centroid = k_cluster_type.centroid_recalculation()\n if len(centroid) != len(prev_centroid):\n print('hello')\n if self.calculate_difference(centroid, prev_centroid, threshold=0.1):\n # self.clusters.append(k_cluster_type)\n break\n else:\n k_cluster_type.flush_KCluster()\n sse_total = k_cluster_type.sse_total()\n if sse_total < min_sse:\n min_sse = sse_total\n prev_cluster_type = k_cluster_type\n if prev_cluster_type is not None:\n self.clusters.append(prev_cluster_type)\n print([cluster_type.sse_total() for cluster_type in self.clusters])\n\n '''for cluster_type in self.clusters:\n print(\"Cluster identity: \"+str(cluster_type.get_identity()))\n count = 1\n for cluster in cluster_type.get_cluster_list():\n print(\"Cluster \"+str(count))\n print(\"size: \"+str(len(cluster.get_data())))\n count += 1'''\n\n self.plot_k_vs_sse(self.clusters) # Plotting K v/s (sum of squared error)", "title": "" }, { "docid": "abcfa6ca2370738627101ee3ded32712", "score": "0.7063481", "text": "def train(self,k,maxitr=30):\r\n self.k=k #store the required no of cluster in the process\r\n self.centroids=np.zeros((k,self.clusters.shape[1]),dtype=float) #stores centeroid location\r\n self.cluster_index_list=[[]]*k # stores index of assignedclusters for a particuar centroid\r\n self.initial_cent() #assigns initial centeroid positions\r\n prev=None #compare changes to verify convergence\r\n itr=0 #count iteration\r\n while itr<maxitr and prev!=self.cluster_index_list: #iterate while we are in max itration budget until convergence hen model is trained\r\n prev=list(self.cluster_index_list) #store current values for comparison \r\n self.cluster_index_list=[[]]*k #empty list for storing new centeroid positions\r\n self.asign_centeroids() #assign cluster of data to each centeroid \r\n self.move_centroids() #move the centeroids to their center of cluster\r\n itr=itr+1 #update iteration\r\n return self.centroids,self.cluster_index_list,itr #return our calculated results\r", "title": "" }, { "docid": "05fe0557a71b1926a164a1dd36b80414", "score": "0.704106", "text": "def _fit_cluster(self,X, n_cluster, n_iter=5):\r\n iterations = range(1, n_iter + 1)\r\n \r\n ref_inertias = pd.Series(index=iterations)\r\n \r\n for iteration in iterations:\r\n clusterer = KMeans(n_clusters=n_cluster, n_init=3, n_jobs=-1)\r\n # If you are using Windows server n_jobs = -1 will be dangerous. So the \r\n # value should be set to max cores - 3 . If we use all the cores available\r\n # in Windows server sklearn tends to throw memory error \r\n clusterer.fit(X)\r\n ref_inertias[iteration] = clusterer.inertia_\r\n \r\n mean_nertia = ref_inertias.mean()\r\n \r\n return mean_nertia", "title": "" }, { "docid": "90793405da92c83e66db4fc664d8ae28", "score": "0.70406735", "text": "def fit(self, data):\n data = np.asarray(data, dtype=np.double)\n if self.verbose:\n logger.info(\"Initializing clusters\")\n if self.init == 'random':\n self.cluster_centers_ = np.random.random(\n (self.n_clusters, data.shape[1]), dtype=np.double)\n elif self.init == 'kmeans++':\n self.cluster_centers_ = np.zeros(\n (self.n_clusters, data.shape[1]), dtype=np.double)\n jobs = min(self.n_jobs, self.n_init)\n if jobs > 1:\n self.cluster_centers_ = _minibatch.kmeanspp_multi(\n data, self.cluster_centers_, self.n_samples,\n self.n_init, jobs)\n else:\n self.cluster_centers_ = _minibatch.kmeanspp(\n data, self.cluster_centers_, self.n_samples)\n elif isinstance(self.init, np.ndarray):\n if not self.init.flags['C_CONTIGUOUS']:\n raise TypeError(\"init ndarray must be C_CONTIGUOUS\")\n elif self.init.shape != (self.n_clusters, data.shape[1]):\n raise TypeError(\"init cluster not of correct shape \"\n \"%r != (%d, %d)\" % (self.init.shape,\n self.n_clusters,\n data.shape[1]))\n self.cluster_centers_ = self.init\n\n if self.verbose:\n logger.info(\"Running minibatch\")\n jobs = min(self.n_jobs, self.n_runs)\n if jobs > 1:\n self.cluster_centers_ = _minibatch.minibatch_multi(\n data, self.cluster_centers_, self.n_samples, self.max_iter,\n self.n_runs, jobs, self.bic_termination,\n self.reassignment_ratio)\n else:\n self.cluster_centers_ = _minibatch.minibatch(\n data, self.cluster_centers_, self.n_samples, self.max_iter,\n self.bic_termination, self.reassignment_ratio)\n\n if self.compute_labels:\n if self.verbose:\n logger.info(\"Computing labels\")\n self.labels_ = np.zeros((data.shape[0], ), dtype=np.intc)\n self.labels_ = _minibatch.assign_centroids(\n data, self.cluster_centers_, self.labels_, self.n_jobs)\n\n return self", "title": "" }, { "docid": "36c1da6c428589ba2b42ad6eac8d3aae", "score": "0.7039461", "text": "def kmeans(X, k):\n\n Kmean = sklearn.cluster.KMeans(n_clusters=k)\n Kmean.fit(X)\n\n C = Kmean.cluster_centers_\n clss = Kmean.labels_\n\n return C, clss", "title": "" }, { "docid": "1bf994df8cae9713d8cca6a020a9348a", "score": "0.7028262", "text": "def fit(self,data):\r\n self.clusters=data #store our training data set as a 2D Tensor(matrix) features in rows and samples in columns\r", "title": "" }, { "docid": "30d9eeeba9090a7cf211086d80fe4f34", "score": "0.7026531", "text": "def kmeans(X, k):\n Kmean = sklearn.cluster.KMeans(n_clusters=k)\n Kmean.fit(X)\n\n C = Kmean.cluster_centers_\n clss = Kmean.labels_\n\n return C, clss", "title": "" }, { "docid": "80086756803873025bb296492658a080", "score": "0.70150244", "text": "def get_clusters(X):\n\t# TODO\n\tmodel = make_pipeline(KMeans(n_clusters=10))\n\tmodel.fit(X)\n\treturn model.predict(X)", "title": "" }, { "docid": "9a98d158e3f3a2f4728993841016f833", "score": "0.6993362", "text": "def k_means(self, number_of_clusters=0):\n if number_of_clusters == 0:\n number_of_clusters = self.number_of_clusters\n k_means = KMeans(number_of_clusters).fit(self.data)\n means = k_means.cluster_centers_\n print(\"K-Means model has been fit.\")\n print(\"Centers:\")\n print(means)\n print()\n # self.show(k_means=means)\n return means", "title": "" }, { "docid": "97fe6fdb0cd2c2de0ff77589478e11d8", "score": "0.69738203", "text": "def _KMeans(self, data, clusters, iterations=100):\r\n (n_samples, n_features) = np.shape(data)\r\n centroids = self._centroids_initialization(data, clusters)\r\n assigned = self._centroid_assignment(data, centroids)\r\n centroids = self._centroid_computation(data, assigned, centroids)\r\n se = self._compute_se(data, centroids, assigned, n_samples)\r\n for i in range(iterations):\r\n assigned = self._centroid_assignment(data, centroids)\r\n centroids = self._centroid_computation(data, assigned, centroids)\r\n aux_se = self._compute_se(data, centroids, assigned, n_samples)\r\n if aux_se == se:\r\n break\r\n else:\r\n se = aux_se\r\n return assigned", "title": "" }, { "docid": "e9feaac6ed45504901cd1843caa2cd1e", "score": "0.68603414", "text": "def k_means_1d(x, k, max_iter=100):\n sorted_x = sorted(list(set(x)))\n x = np.array(x)\n if len(sorted_x) < k:\n raise ValueError(\"too few buckets\")\n gap = len(sorted_x) / k\n \n centroids = np.array([sorted_x[int(x * gap)] for x in range(k)])\n assign = None\n \n for i in range(max_iter):\n # Cluster Assignment step\n assign = np.array([np.argmin([np.absolute(x_i - x) for x in centroids]) for x_i in x])\n # Move centroids step\n new_centroids = np.array([x[assign == k].mean() for k in range(k)])\n if (new_centroids == centroids).all():\n centroids = new_centroids\n break\n centroids = new_centroids\n return np.array(centroids), assign", "title": "" }, { "docid": "099c6bcbb6fbc460b847294a6d5f9a0f", "score": "0.6849092", "text": "def fit(self, X, y=None):\n n_samples, m_features = X.shape\n\n # variance\n variances = np.mean(np.var(X, 0))\n self.tol *= variances\n\n # Initialize weight matrix\n if hasattr(self.init_weight, '__array__'):\n print(self.init_weight)\n self.weights_ = self.init_weight\n elif self.init_weight == 'random':\n self.weights_ = _weightmatrix(self.n_clusters, m_features)\n elif self.init_weight == 'fixed':\n self.weights_ = np.ones((self.n_clusters, m_features)) * (1 / m_features)\n else:\n raise Exception('init_weight_must be `random` , `fixed` or numpy.array')\n \n # Initialize center\n # need robust data check \n if hasattr(self.init_center, '__array__'):\n self.cluster_centers_ = self.init_center\n elif self.init_center == 'k-means++':\n random_state = check_random_state(self.random_state)\n self.cluster_centers_ = _k_init(X=X, n_clusters=self.n_clusters, random_state=random_state)\n elif self.init_center == 'random':\n random_state = check_random_state(self.random_state)\n chosen_ids = random_state.permutation(n_samples)[:self.n_clusters]\n self.cluster_centers_ = X[chosen_ids]\n else:\n raise Exception('init_center must be `random`, `kmeans++` or np.array')\n\n if self.verbose:\n print('origin center_ \\n', self.cluster_centers_)\n \n \n # Iteration\n for i in range(self.max_iter):\n \t# update label \n self._update_label(X)\n # update center\n center_shift_total = self._update_center(X)\n # if weight is fixed continue\n if self.init_weight in ['random', 'fixed']:\n \tself._update_weight()\n\n if self.verbose:\n print('Iteration %i cluster_centers_\\n' % i, self.cluster_centers_)\n print('Iteration %i tolerance: ' % i, center_shift_total)\n if center_shift_total < self.tol:\n break", "title": "" }, { "docid": "c401b9d71b2140a41e5ed319d6ded7af", "score": "0.6827114", "text": "def kMeansInitCentroids(X, K):\n\n# You should return this values correctly\n centroids = np.zeros((K, X.shape[1]))\n\n# ====================== YOUR CODE HERE ======================\n# Instructions: You should set centroids to randomly chosen examples from\n# the dataset X\n#\n for i in range(0, K):\n centroids[i] = X[get_random_index_of_ndarray(X)]\n\n# =============================================================\n return centroids", "title": "" }, { "docid": "394d9e4e05bb71a35ec70495c48a6031", "score": "0.6803521", "text": "def kmeans(X, centres, delta=0.001, maxiter=10, metric='euclidean', p=2, verbose=1):\n # if centres is None:\n # centres = random.sample( X, p)\n if not issparse(X):\n X = np.asanyarray(X) # ?\n centres = centres.todense() if issparse(centres) else centres.copy()\n N, dim = X.shape\n k, cdim = centres.shape\n if dim != cdim:\n raise ValueError(\n 'kmeans: X %s and centres %s must have the same number of columns'\n % (X.shape, centres.shape)\n )\n if verbose:\n print(\n 'kmeans: X %s centres %s delta=%.2g maxiter=%d metric=%s'\n % (X.shape, centres.shape, delta, maxiter, metric)\n )\n allx = np.arange(N)\n prevdist = 0\n for jiter in range(1, maxiter + 1):\n D = cdist_sparse(X, centres, metric=metric, p=p) # |X| x |centres|\n xtoc = D.argmin(axis=1) # X -> nearest centre\n distances = D[allx, xtoc]\n avdist = distances.mean() # median ?\n if verbose >= 2:\n print('kmeans: av |X - nearest centre| = %.4g' % avdist)\n if (1 - delta) * prevdist <= avdist <= prevdist or jiter == maxiter:\n break\n prevdist = avdist\n for jc in range(k): # (1 pass in C)\n c = np.where(xtoc == jc)[0]\n if len(c) > 0:\n centres[jc] = X[c].mean(axis=0)\n if verbose:\n print('kmeans: %d iterations cluster sizes:' % jiter, np.bincount(xtoc))\n if verbose >= 2:\n r50 = np.zeros(k)\n r90 = np.zeros(k)\n for j in range(k):\n dist = distances[xtoc == j]\n if len(dist) > 0:\n r50[j], r90[j] = np.percentile(dist, (50, 90))\n print('kmeans: cluster 50 % radius', r50.astype(int))\n print('kmeans: cluster 90 % radius', r90.astype(int))\n # scale L1 / dim, L2 / sqrt(dim) ?\n return centres, xtoc, distances", "title": "" }, { "docid": "a05bd150612e6902327430f7dcf3a9e7", "score": "0.67744136", "text": "def create_cluster_kmeans(self):\n cluster = KMeans(n_clusters=50, random_state=10)\n cluster.fit(self.train[['latitude', 'longitude']])\n for df in [self.train, self.test]:\n df.loc[:, 'cluster'] = cluster.predict(df[['latitude', 'longitude']])", "title": "" }, { "docid": "41f66f424f07ab85278318fa0f3750a8", "score": "0.6765617", "text": "def run_kmeans(X, ncentroids=1, whiten=False):\n import faiss\n import numpy\n \n if isinstance(X, jax.numpy.ndarray):\n X = numpy.array(X)\n \n X = X.reshape((X.shape[0], -1))\n \n if whiten:\n import scipy.cluster.vq as vq\n X = vq.whiten(X)\n\n ## Run kmeans\n d = X.shape[1]\n kmeans = faiss.Kmeans(d, ncentroids, niter=20, verbose=True)\n kmeans.train(X)\n \n ## Find nearest neighbor in data to cluster center\n index = faiss.IndexFlatL2(d)\n index.add(X)\n _, I = index.search(kmeans.centroids, 1)\n \n return kmeans, I", "title": "" }, { "docid": "a479d608fa8402722d43f0e1466500e2", "score": "0.6749479", "text": "def fit(self, X):\r\n counter = 0\r\n # Generate the initial centroids\r\n if self.centroids == []:\r\n\r\n for _ in range(self.ncentroid):\r\n self.centroids.append(\r\n (\r\n np.random.uniform(min(X[:, 0]), max(X[:, 0])),\r\n np.random.uniform(min(X[:, 1]), max(X[:, 1])),\r\n np.random.uniform(min(X[:, 2]), max(X[:, 2])),\r\n )\r\n )\r\n\r\n while counter < self.max_iter:\r\n prediction = self.predict(X)\r\n for i in range(self.ncentroid):\r\n filtered_points = X[prediction == i]\r\n self.centroids[i] = (\r\n np.mean(filtered_points[:, 0]),\r\n np.mean(filtered_points[:, 1]),\r\n np.mean(filtered_points[:, 2]),\r\n )\r\n counter += 1\r\n return None", "title": "" }, { "docid": "383ae18d682060352f76bdeaa09d64e5", "score": "0.6712821", "text": "def transform(self, X):\n check_is_fitted(self, 'cluster_centers_')\n\n return self._transform(X)", "title": "" }, { "docid": "27c64b759ae7b28685db8f789cf8d994", "score": "0.6712682", "text": "def _update_centroids(self, data, k, cluster_assignment):\n\n new_centroids = []\n for i in range(k):\n cluster_points = data[cluster_assignment==i]\n centroid = cluster_points.mean(axis=0)\n centroid = centroid.ravel()\n new_centroids.append(centroid)\n return np.array(new_centroids)", "title": "" }, { "docid": "99c0463a0d0dade51c59d01a0f68ad82", "score": "0.671216", "text": "def calc_centroid(num_centroids, dataloader, key=None,\n num_kmeans_iters=1):\n kmeans = MiniBatchKMeans(n_clusters=num_centroids)\n\n for i in range(num_kmeans_iters):\n for j, data in enumerate(dataloader):\n if key is None:\n value = data\n else:\n value = key(data)\n\n kmeans.partial_fit(value)\n\n return kmeans.cluster_centers_", "title": "" }, { "docid": "3d4212399b31bbbab95589303890564c", "score": "0.6711042", "text": "def predict(self, X=None):\n cluster_centers = list(map((lambda i: i.get_center()), self.micro_clusters))\n #centers_weights = list(map((lambda i: i.get_weight()), self.micro_clusters))\n kmeans = KMeans(n_clusters=self.nb_macro_cluster, random_state=1)\n result = kmeans.fit_predict(X=cluster_centers, y=None)\n return result", "title": "" }, { "docid": "9d3391d3c0a97ff06db734a2e8788f53", "score": "0.67084193", "text": "def KMeansClustering(data, k, seed_init=12345, large_output = False):\n\n random.seed(seed_init)\n D = len(data)\n centroids = []\n clusters = [[] for i in range(k)]\n point_cluster_mask = [0] * D\n n_changes = D\n\n # Select intial centroids\n centroid_numbers = set()\n while len(centroid_numbers) < k:\n r = random.randint(1,D-1)\n centroid_numbers.add(r)\n for i in centroid_numbers:\n centroids.append(data[i])\n\n while n_changes: \n # Reset clusters\n clusters = [[] for i in range(k)]\n\n # Calculate distances from centroids and assign to clusters\n n_changes = D\n distances = np.zeros((D, k))\n for d in range(D):\n for c in range(k):\n distances[d][c] = _point_distance(data[d], centroids[c])\n old_assignment = point_cluster_mask[d]\n assignment = np.argmin(distances[d])\n clusters[assignment].append(data[d])\n point_cluster_mask[d] = assignment\n if old_assignment == assignment:\n n_changes -= 1\n \n # Calculate new centroids\n for c in range(k):\n centroids[c] = np.sum(clusters[c], axis=0) / len(clusters[c])\n\n # Annotate data with cluster\n result = np.empty((D,3))\n for d in range(D):\n result[d] = np.append(data[d], point_cluster_mask[d])\n\n if large_output:\n return result, clusters, centroids, point_cluster_mask\n else:\n return result", "title": "" }, { "docid": "b77c8bc6b69439a0cbdc5f1e2b1d427f", "score": "0.6699993", "text": "def kmeans(X, k, iterations=1000):\n if type(iterations) is not int or iterations < 1:\n return None, None\n\n cent = initialize(X, k)\n if cent is None:\n return None, None\n\n for i in range(iterations):\n cpy = np.copy(cent)\n dist = np.linalg.norm(X - cent[:, np.newaxis], axis=2)\n cls = np.argmin(dist, axis=0)\n\n for j in range(k):\n if len(X[j == cls]) == 0:\n cent[j] = initialize(X, 1)\n else:\n cent[j] = np.mean(X[j == cls], axis=0)\n if (cpy == cent).all():\n return cent, cls\n\n dist = np.linalg.norm(X - cent[:, np.newaxis], axis=2)\n cls = np.argmin(dist, axis=0)\n\n return cent, cls", "title": "" }, { "docid": "b6d985c645a1567aef56821a0dd306cf", "score": "0.6695425", "text": "def k_means(data, k, eps=1e-4, mu=None):\n if type(data) != type(np.array(0)): # if data is not a numpy array\n data = np.array(data) # convert it to one\n\n n, d = data.shape\n if mu is None:\n # randomly choose k points as initial centroids\n mu = data[random.sample(range(data.shape[0]), k)]\n\n # stores cluster index assigned to each of n points\n assignments = np.zeros(n)\n cluster_sizes = np.zeros(k)\n cluster_sums = np.zeros((k,d))\n\n # store the previous mu to detect when to stop\n prev_mu = np.zeros((k,d))\n\n converged = False\n iters = 0\n while (not converged):\n iters += 1\n print(\"Iter:\", iters)\n\n # assign each point to the euclidean closest cluster\n for pt in range(n):\n cluster_id = 0\n best_euc_dist = float('inf')\n\n for u in range(k):\n resid = data[pt]-mu[u]\n euc_dist = np.linalg.norm(resid, ord=2)\n\n if euc_dist < best_euc_dist:\n cluster_id = u\n best_euc_dist = euc_dist\n\n assignments[pt] = cluster_id\n cluster_sizes[cluster_id] += 1\n cluster_sums[cluster_id] += data[pt]\n\n # recompute means: divide each cluster sum by the num pts. in that cluster\n for i in range(k):\n mu[i] = cluster_sums[i] / cluster_sizes[i]\n\n # check if converged\n for i in range(k):\n if np.linalg.norm((mu[i] - prev_mu[i]), ord=2) > eps: # if a mean has changed by more than epsilon\n converged = False\n else:\n converged = True\n\n # make sure this is here!\n prev_mu = mu\n \n return (mu, assignments)", "title": "" }, { "docid": "4e8f471dedec632de7b1bd47e9e52736", "score": "0.6693203", "text": "def update_clusters(self, set_x, max_iter=20):\n\n if self.centroids is None:\n # make leaf variable after editing it then wrap in param\n self.centroids = nn.Parameter(torch.zeros((self.num_classes * self.k, set_x.shape[1]), requires_grad=True).cuda())\n\n super().update_clusters(set_x, max_iter=max_iter)", "title": "" }, { "docid": "4e8f471dedec632de7b1bd47e9e52736", "score": "0.6693203", "text": "def update_clusters(self, set_x, max_iter=20):\n\n if self.centroids is None:\n # make leaf variable after editing it then wrap in param\n self.centroids = nn.Parameter(torch.zeros((self.num_classes * self.k, set_x.shape[1]), requires_grad=True).cuda())\n\n super().update_clusters(set_x, max_iter=max_iter)", "title": "" }, { "docid": "4e8f471dedec632de7b1bd47e9e52736", "score": "0.6693203", "text": "def update_clusters(self, set_x, max_iter=20):\n\n if self.centroids is None:\n # make leaf variable after editing it then wrap in param\n self.centroids = nn.Parameter(torch.zeros((self.num_classes * self.k, set_x.shape[1]), requires_grad=True).cuda())\n\n super().update_clusters(set_x, max_iter=max_iter)", "title": "" }, { "docid": "dd3960980c8da6264426911db462eb58", "score": "0.6692395", "text": "def cluster(self):\n\n # Step 0: Prepare data\n self.generateDataPoints()\n\n # Step 1: Generate random seeds to begin clustering with\n self.generateSeeds()\n centroids = []\n for i in range(len(self.seeds)):\n centroids.append(self.data_points[self.seeds[i]])\n\n # Step 2: Run computation epochs\n change_flag = True\n clusters = None\n num_epochs = 0\n while change_flag:\n change_flag, centroids, clusters = self.computeCentroids(centroids, clusters)\n num_epochs = num_epochs + 1\n print(\"Epoch: {0}\\n\".format(num_epochs))\n for i in range(len(centroids)):\n if self.config.diff_labels:\n print(\"Centroid Labels: {0}\".format(self.config.diff_labels))\n print(\"Centroid: {0}, Occupancy: {1}, Values: {2}\". format(i, len(clusters[i]), centroids[i]))\n\n #print(\"Final Centroids: {0}\\n\".format(centroids))", "title": "" }, { "docid": "819f566430d26c9813f106c6bf713e68", "score": "0.6685836", "text": "def clustering(self, x: np.ndarray, k: int,\n **kwargs) -> np.ndarray or tuple:", "title": "" }, { "docid": "219b764dc7bf6744806382989812944d", "score": "0.6683328", "text": "def _reevaluate_centers(self):\n clusters = self.clusters\n newmu = []\n keys = sorted(self.clusters.keys())\n for k in keys:\n newmu.append(np.mean(np.take(self.X, clusters[k], axis=0), axis=0))\n self.mu = np.asarray(newmu)", "title": "" }, { "docid": "32254cac4ddbcf03fa4b6fa57bb006fd", "score": "0.66690993", "text": "def k_means(data, k):\n means = get_k_random_datapoints(data, k)\n p_means = [np.zeros(len(means[0])) for m in means]\n SSEs = []\n while diffs_above_threshold(means, p_means, threshold=0.0001):\n clusters = [[m] for m in means]\n for x in data:\n add_x_to_nearest_cluster(x, clusters, means)\n p_means = means\n means = update_means(clusters)\n SSEs += [get_SSE(clusters, means)]\n return means, SSEs", "title": "" }, { "docid": "82bb49f764418506d80126ae16f025ee", "score": "0.66604984", "text": "def init_center(self, x):\n print(\"initing center\")\n # Using a for loop, we select random seeds that arent the same to be our clusters\n centers = np.zeros(self.k, dtype=list)\n \n for i in range(self.k):\n rand = random.randint(0, len(x)-1)\n \n if(np.any(centers[:] == x[rand])):\n i -= 1\n else:\n centers[i] = x[rand]\n\n self.centers = centers", "title": "" }, { "docid": "b6e292cd7a9c4775169b667c5a8ad45b", "score": "0.66505563", "text": "def compute_clusters(self, data, clusters=None):\r\n self.set_clusters(clusters)\r\n return self._KMeans(data, self.clusters)", "title": "" }, { "docid": "dcb9ba4d4eb7543124c722c4e3cdc9fc", "score": "0.66471225", "text": "def kmeans(X, k, C):\r\n\r\n\t# S = [[]]*k\t# weird why it doesn't work \r\n\tS = [[] for _ in range(k)]\r\n\t#print (\"centroid before\", C)\r\n\r\n\t# assignment step \r\n\tfor i in range(len(X)):\r\n\t\tx = X[i]\r\n\t\t#print(\"sample\", i, x)\r\n\t\tcluster_index = closest(x, C)\r\n\t\t#print(\"assign to cluster\", cluster_index)\r\n\t\tS[cluster_index].append(i)\r\n\t\r\n\t#print (\"S\", S)\t\r\n\t#print (\"cluster 0 coordiante\", X[S[0]])\r\n\t#print (\"cluster 1 coordiante\", X[S[1]])\r\n\t\r\n\r\n\t# update centroids \r\n\t# new_C = copy.deepcopy(C)\r\n\tfor i in range(k):\r\n\t\tC[i] = numpy.sum(X[S[i]], axis=0)/len(S[i])\r\n\r\n\t# print (\"C\",)\r\n\r\n\treturn C, S", "title": "" }, { "docid": "a7968ae1444356692497699f85677078", "score": "0.66061246", "text": "def cluster(X, number_of_categories, data, name):\n km = KMeans(n_clusters=5, init='k-means++', max_iter=100, n_init=1)\n km.fit(X)\n print(name)\n print(\"NMI: %0.3f\" % normalized_mutual_info_score(data.target, km.labels_))", "title": "" }, { "docid": "8b0955299c93ed9e7204901b4d55962d", "score": "0.65911573", "text": "def kmeans_fast(features, k, num_iters=100):\n\n N, D = features.shape\n\n assert N >= k, 'Number of clusters cannot be greater than number of points'\n\n # Randomly initalize cluster centers\n idxs = np.random.choice(N, size=k, replace=False)\n centers = features[idxs]\n assignments = np.zeros(N)\n\n for n in range(num_iters):\n ### YOUR CODE HERE\n #find assignments\n distances = cdist(features, centers, 'euclidean')\n new_assign = np.argmin(distances, axis = 1)\n if np.array_equal(assignments, new_assign):\n break\n assignments = new_assign\n #generate new clusters\n for i in range(len(centers)):\n indexes = [j for j in range(N) if assignments[j] == i]\n centers[i] = np.mean(features[indexes], axis = 0)\n ### END YOUR CODE\n\n return assignments", "title": "" }, { "docid": "497034ceac8e8a79eab417aad74effff", "score": "0.65880626", "text": "def k_means(data, n_clusters=4, max_iterations=100, tolerance=1e-8):\n start_time = time.time()\n samples, dimensions = data.shape\n # we need to (randomly) choose initial centroids\n centroids = data[np.random.choice(len(data), n_clusters, replace=False), :]\n distances = get_distances_to_clusters(data, centroids)\n cluster_labels = assign_points_to_clusters(distances)\n\n #centroid_list = []\n #temp_centroids = centroids.copy()\n #centroid_list.append(temp_centroids)\n # For each cluster we need to update the centroid by calculating new means\n # for all the data points in the cluster and repeat\n for iteration in range(max_iterations):\n prev_centroids = centroids.copy()\n for k in range(n_clusters):\n # Here we find the mean for each centroid\n vector_mean = np.zeros(dimensions)\n n = 0\n for i in range(samples):\n if cluster_labels[i] == k:\n vector_mean += data[i, :]\n n += 1\n # And update according to the new means\n centroids[k, :] = vector_mean / n\n distances = np.zeros((samples, n_clusters))\n\n # we need to use copies to avoid overwriting (pointer stuff)\n #temp_centroids = centroids.copy()\n #centroid_list.append(temp_centroids)\n\n # we find the squared Euclidean distance from each centroid to every point\n distances = get_distances_to_clusters(data, centroids)\n # we assign each point to a cluster\n cluster_labels = assign_points_to_clusters(distances)\n\n centroid_difference = np.sum(np.abs(centroids - prev_centroids))\n if centroid_difference < tolerance:\n print(f'Converged at iteration: {iteration}')\n print(f'Runtime: {time.time() - start_time} seconds')\n return cluster_labels, centroids\n\n\n print(f'Did not converge in {max_iterations} iterations')\n print(f'Runtime: {time.time() - start_time} seconds')\n return cluster_labels, centroid_list", "title": "" }, { "docid": "31e4c0f2e067c4f2960887da26f5113d", "score": "0.658654", "text": "def assign_cluster(data):\n kmeans = KMeans(k=2, seed=1, featuresCol=\"features_scaled\", predictionCol=\"label\")\n model = kmeans.fit(data)\n label_df = model.transform(data)\n return label_df", "title": "" }, { "docid": "5420f7ee42c55f1d76492db97a82416e", "score": "0.6576598", "text": "def kCluster(self):\n done = False\n \n while not done:\n self.iterationNumber += 1\n self.updateCentroids()\n self.assignPointsToCluster()\n #\n # we are done if fewer than 1% of the points change clusters\n #\n if float(self.pointsChanged) / len(self.memberOf) < 0.01:\n done = True\n print(\"Final SSE: %f\" % self.sse)", "title": "" }, { "docid": "1c613a6118d7e7957aa1789d2ba00e57", "score": "0.6574336", "text": "def kmeans(X, k):\n\n # randomly select initial clusters from input data\n clusters = np.random.choice(np.squeeze(X), size=k)\n prevClusters = clusters.copy()\n stds = np.zeros(k)\n converged = False\n\n while not converged:\n \"\"\"\n compute distances for each cluster center to each point \n where (distances[i, j] represents the distance between the ith point and jth cluster)\n \"\"\"\n distances = np.squeeze(np.abs(X[:, np.newaxis] - clusters[np.newaxis, :]))\n\n # find the cluster that's closest to each point\n closestCluster = np.argmin(distances, axis=1)\n\n # update clusters by taking the mean of all of the points assigned to that cluster\n for i in range(k):\n pointsForCluster = X[closestCluster == i]\n if len(pointsForCluster) > 0:\n clusters[i] = np.mean(pointsForCluster, axis=0)\n\n # converge if clusters haven't moved\n converged = np.linalg.norm(clusters - prevClusters) < 1e-6\n prevClusters = clusters.copy()\n\n distances = np.squeeze(np.abs(X[:, np.newaxis] - clusters[np.newaxis, :]))\n closestCluster = np.argmin(distances, axis=1)\n\n clustersWithNoPoints = []\n for i in range(k):\n pointsForCluster = X[closestCluster == i]\n if len(pointsForCluster) < 2:\n # keep track of clusters with no points or 1 point\n clustersWithNoPoints.append(i)\n continue\n else:\n stds[i] = np.std(X[closestCluster == i])\n\n # if there are clusters with 0 or 1 points, take the mean std of the other clusters\n if len(clustersWithNoPoints) > 0:\n pointsToAverage = []\n for i in range(k):\n if i not in clustersWithNoPoints:\n pointsToAverage.append(X[closestCluster == i])\n pointsToAverage = np.concatenate(pointsToAverage).ravel()\n stds[clustersWithNoPoints] = np.mean(np.std(pointsToAverage))\n\n # return clusters, stds\n return clusters", "title": "" }, { "docid": "6bae7b0844ee66bf27136dd6cf1d0a14", "score": "0.6563738", "text": "def my_kmeans_coords(image, centroid_num, using_kmeanspp=False,iteration_num=10):\n clusters = dict()\n image_shape = image.shape\n assignments = np.zeros(image_shape[:2])\n assignments -= 2\n # initialize\n images_coords = []\n for i in range(image_shape[0]):\n tmp = []\n for j in range(image_shape[1]):\n tmp.append([i, j])\n images_coords.append(tmp)\n\n images_coords = np.array(images_coords)\n images_coords = images_coords.transpose(2, 0, 1)\n data_points = np.vstack([image.transpose(2, 0, 1), images_coords])\n data_points = data_points.transpose(1, 2, 0)\n\n if using_kmeanspp:\n centroids = kmeanspp(data_points,centroid_num)\n else:\n centroids = random_pick(data_points,centroid_num)\n\n\n # main iterations:\n for i in range(iteration_num):\n\n clusters = dict()\n for m in range(centroid_num):\n clusters[m] = []\n\n # E - step\n for j, row in enumerate(image):\n for k, channels in enumerate(row):\n point = np.hstack((channels,j,k))\n current_idx,_ = nearest_centroid(centroids,point)\n assignments[j][k] = current_idx\n # record assignments\n clusters[current_idx].append(point)\n\n # M - step\n # re-generate new centroids by the mean of points in cluster\n new_centroids = []\n for centroid_idx, points in clusters.items():\n if points:\n current_new_centroid = np.mean(points, axis=0)\n else:\n # remain\n current_new_centroid = centroids[centroid_idx]\n new_centroids.append(current_new_centroid)\n centroids = np.array(new_centroids)\n\n return clusters", "title": "" }, { "docid": "8d1afb3cae3c2b1ee877abc1c73407e6", "score": "0.6557094", "text": "def create_clusters(k, centroids, datadict, iterations):\r\n for iteration in range(iterations):\r\n #print(\"****Iteration\", iteration, \"****\")\r\n clusters = []\r\n for i in range(k):\r\n clusters.append([])\r\n\r\n for key in datadict:\r\n distances = []\r\n for cl_index in range(k):\r\n dist = euclid_distance(datadict[key], centroids[cl_index])\r\n distances.append(dist)\r\n min_dist = min(distances)\r\n index = distances.index(min_dist)\r\n clusters[index].append(key)\r\n\r\n dimensions = 2\r\n for cl_index in range(k):\r\n sums = [0]*dimensions\r\n for key in clusters[cl_index]:\r\n data_points = datadict[key]\r\n for ind in range(2):\r\n sums[ind] = sums[ind] + data_points[ind]\r\n for ind in range(len(sums)):\r\n cl_len = len(clusters[cl_index])\r\n if cl_len != 0:\r\n sums[ind] /= cl_len\r\n centroids[cl_index] = sums\r\n\r\n #for c in clusters:\r\n #print(\"CLUSTER\")\r\n #for key in c:\r\n #print(datadict[key], end=\" \")\r\n #print()\r\n\r\n return clusters", "title": "" }, { "docid": "84d92bc668ae5699fbd87187f9489b4e", "score": "0.6552349", "text": "def km(dat, k, random_state = 10):\n\n random.seed(random_state)\n\n # initialize with k random data points\n centroids = random.sample(dat, k)\n # allocate to centroid of minimum cosine distance\n clusters = allocate_to_centroids(dat, centroids)\n # recalculate centroids as average over the cluster\n new_centroids = recalculate_centroids(dat, clusters)\n\n iternum = 0\n\n while not np.array_equal(centroids, new_centroids):\n iternum += 1\n centroids = new_centroids\n clusters = allocate_to_centroids(dat, centroids)\n new_centroids = recalculate_centroids(dat, clusters)\n print 'iteration', iternum\n\n return(clusters)", "title": "" }, { "docid": "9b2b1b87a3f6e30e9bcc784ea22dd3d8", "score": "0.65522057", "text": "def _classify(self):\n \n self._cluster[self._iteration] = {}\n self._label[self._iteration] = {}\n \n for centroid_number in range(self.k):\n self._cluster[self._iteration][centroid_number] = []\n \n for d in self.data:\n distances = [] \n \n for centroid_number in range(self.k):\n \n distances.append(self.metric(d.vector, self._centroid[self._iteration][centroid_number]))\n \n closest_centroid = np.argmin(distances)\n \n self._cluster[self._iteration][closest_centroid].append(d)\n self._label[self._iteration][d.name] = closest_centroid", "title": "" }, { "docid": "d1a09ecb74cddf459a3f444de06063c1", "score": "0.6551096", "text": "def cluster(self, n_clusters=8, n_init=10):\n\n self.k_means = cluster_k_means(self.proc_data.drop('country', axis=1), n_clusters, n_init)\n self.proc_data['cluster'] = self.k_means.labels_\n return self.k_means", "title": "" }, { "docid": "2aa054be2aeebdd854432464539e4b18", "score": "0.6550475", "text": "def collect_kmeans(cnn_embedding, n_clusters, out_dir):\n kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(cnn_embedding)\n print('Fit kmeans with {0} clusters.'.format(n_clusters))\n \n fn_out = os.path.join(out_dir, 'kmeans_clusters_'+str(n_clusters))\n np.savez(fn_out, kmeans=kmeans.cluster_centers_)\n\n print('Saved cluster centers to '+fn_out+'.npz')\n \n # return centers as well, in case you want to do something with them\n return kmeans.cluster_centers_", "title": "" }, { "docid": "fcf54b788992f3bed1ccf79de456ede0", "score": "0.65419906", "text": "def _fit_transform(self):\n if self.seed != 0:\n k = KMeans(n_clusters=self.n_clusters, random_state=self.seed)\n else:\n k = KMeans(n_clusters=self.n_clusters)\n transf = k.fit_transform(self.data.T)\n # fit = k.fit_predict(self.data.T)\n # print(fit[:100]) # TODO change shape based on classification.\n transf = pd.DataFrame(transf, index=self.data.columns)\n\n labels = pd.DataFrame(\n k.labels_, columns=['label'], index=self.data.columns\n )\n\n self.expt.update_metadata(labels)\n return k, transf", "title": "" }, { "docid": "34e6a60faf1703936ec48c286caa44c6", "score": "0.65327823", "text": "def assign_clusters_k_means(points, clusters):\n # NB: \"cluster_weights\" is used as a common term between functions\n # the name makes more sense in soft-clustering contexts\n def distance(p,c):\n return ((p[0] - c[0])**2 + (p[1] - c[1])**2)**0.5\n \n cluster_weights = np.zeros((len(points), len(clusters)))\n for i,point in enumerate(points):\n d = [distance(point, cluster) for cluster in clusters]\n cluster_weights[i][np.argmin(d)] = 1\n \n return cluster_weights", "title": "" }, { "docid": "697e4304b7cb7942c9d8deacd4414920", "score": "0.6530926", "text": "def run_k_means(samples, initial_centroids, n_iter):\n\n centroid_history = []\n current_centroids = initial_centroids\n clusters = []\n for iteration in range(n_iter):\n centroid_history.append(current_centroids)\n print(\"Iteration %d, Finding centroids for all samples...\" % iteration)\n clusters = find_closest_centroids(samples, current_centroids)\n print(\"Recompute centroids...\")\n current_centroids = get_centroids(samples, clusters)\n print(current_centroids)\n\n return clusters, centroid_history", "title": "" }, { "docid": "b053dae0ab475a85ddcd96db279b42aa", "score": "0.6518499", "text": "def findClusters(self, data, nr_cluster):\n clusters = [random.choice(data) for _ in range(nr_cluster)]\n \n for _ in range(10):\n cluster_points = self.assign_vector_to_cluters(data, clusters)\n #update the cluster mean vectors\n clusters = self.calculate_mean_points( cluster_points)\n return clusters", "title": "" }, { "docid": "62896776f4abe94839b69dedb6d562b6", "score": "0.65184146", "text": "def get(self, x, labels, preserved_labels=None):\n self.logger.info(\"Initializing SeededKMeans.\")\n\n unq_labels = np.unique(labels[labels >= 0])\n n_clusters = len(unq_labels)\n centroids = []\n self.logger.info(\n \"Found {0} unique labels. Using seeded KMeans.\".format(n_clusters))\n\n for i in range(n_clusters):\n centroid = np.mean(x[labels == unq_labels[i]], axis=0)\n centroids.append(centroid)\n\n centroids = np.array(centroids)\n\n kmeans = KMeans(n_clusters=n_clusters, init=centroids, n_init=1)\n labels = kmeans.fit_predict(x)\n return labels", "title": "" }, { "docid": "418a06553d6607e8152dd83a76781347", "score": "0.6515415", "text": "def kcenter(data, num_clusters= 2, metric= \"euclidean\"):\n N = data.shape[0]\n\n # Initialize cluster centers to -1\n center_indices = -np.ones((num_clusters,), dtype=np.int32) \n\n # Compute the distance between every pair of points \n dist_mat = dist(data, data, metric= metric) # N x N\n\n # Compute centers\n for i in range(num_clusters):\n if i == 0:\n # Choose an id arbitrarily from N\n new_center_id = np.random.randint(N)\n else:\n # Choose maximum of the minimum distance of the previous center indices\n new_center_id = np.argmax(np.min(dist_mat[center_indices[0:i]], axis= 0))\n\n center_indices[i] = new_center_id\n\n # Get the labels of the points\n cluster_labels = np.argmin(dist_mat[center_indices], axis= 0) \n\n # Get the cost of clustering\n cost = np.max(np.min(dist_mat[center_indices], axis= 0))\n\n return data[center_indices], cluster_labels, cost", "title": "" }, { "docid": "7b39fa67ebe5e88c5f3fa050f3d5e314", "score": "0.64840204", "text": "def revise_centers(self, x, labels):\n print(\"revising center\")\n # I think this updates it?\n for i in range(self.k):\n wherei = np.squeeze(np.argwhere(labels == i), axis=1) \n self.centers[i] = x[wherei, :].mean(0)", "title": "" }, { "docid": "4bc27f5792175b727ff9bba8a35b41fc", "score": "0.64774215", "text": "def _compute_means_sklearn(self):\n self.link = {}\n\n for n in self.k_range:\n means = np.mean(self.cluster_centers[n], axis=1)\n self.plot_data[n] = np.take(means, self.labels[n].values)\n self.link[n] = dict(zip(means, range(n)))", "title": "" }, { "docid": "3fc60304d327aef80876a99511387eab", "score": "0.6464893", "text": "def cluster(self):\n song_cluster_pipeline = Pipeline([('scaler', StandardScaler()), \n ('kmeans', KMeans(n_clusters=20, \n verbose=2, n_jobs=4))], verbose=True)\n # Select numerical features for training\n X = self.df.select_dtypes(np.number)\n self.number_cols = list(X.columns)\n song_cluster_pipeline.fit(X)\n song_cluster_labels = song_cluster_pipeline.predict(X)\n # Label each data with clustered result\n self.df['cluster_label'] = song_cluster_labels\n return song_cluster_pipeline", "title": "" }, { "docid": "082e3e873c3420fcb5cc9b0557cf9f26", "score": "0.6452463", "text": "def _initialize_cluster_centers_kmpp(self):\n\n cluster_indices = np.zeros(self.K, dtype = int)\n self.cluster_centers_ = np.zeros((self.K, self.P))\n\n # pick the first one at random from the data\n cluster_indices[0] = np.random.choice(self.N)\n # ... loading the full datapoint, or ...\n if self.HDX is not None:\n self.cluster_centers_[0] = self.HDX[cluster_indices[0]]\n # ... using the masked one\n else:\n self.cluster_centers_[0][self.mask[cluster_indices[0]]] = \\\n self.RHDX[cluster_indices[0]]\n\n # initialize the previous distance counter to max float\n # (so it's guaranteed to be overwritten in the loop)\n d_prev = np.ones(self.N) * float_info.max\n\n # now pick the remaining k-1 cluster_centers\n for k in range(1,self.K):\n # distance from all the data points to the last cluster added\n d_curr = self.pairwise_distances(Y = self.cluster_centers_[k-1])[:,0]\n # ||x - U|| is either this distance or the current minimum\n d_curr = np.min((d_prev, d_curr), axis = 0)\n # overwrite previous distances with new closest\n d_prev = np.copy(d_curr)\n\n # compute this to normalize d_curr_sum into a prob density, and\n # also for the check used below\n d_curr_sum = d_curr.sum()\n\n # if the mask didn't obliterate all distance information, then\n # pick a datapoint at random with prob proportional to its distance\n # from the current cluster set\n if d_curr_sum > 0:\n cluster_indices[k] = np.random.choice(self.N, p = d_curr/d_curr_sum)\n else:\n # then the mask obliterated all distance information, so just\n # pick one uniformly at random that's not already been chosen\n available_indices = set(range(self.N)).difference(set(cluster_indices))\n cluster_indices[k] = np.random.choice(list(available_indices))\n # finally, assign the cluster, either by setting all P entires \n # from the dense HDX ...\n if self.HDX is not None:\n self.cluster_centers_[k] = self.HDX[cluster_indices[k]]\n # ... or by setting only M entries from the sparse RHDX\n else:\n self.cluster_centers_[k][self.mask[cluster_indices[k]]] = \\\n self.RHDX[cluster_indices[k]]\n\n\n return [self.cluster_centers_, cluster_indices]", "title": "" }, { "docid": "91cf89315b0cef316d52220243865a02", "score": "0.6451448", "text": "def kMeans(dataset, num_clusters):\n kMeans_runner = KMeans(n_clusters=num_clusters, random_state=42, n_jobs=3)\n\n labels = kMeans_runner.fit_predict(dataset)\n\n return labels", "title": "" }, { "docid": "b1b28a92af07aae460df589eb4e1cc62", "score": "0.6448099", "text": "def cluster(positions, num_iters, k):\n len_data = len(positions)\n\n # Allocate random cluster starting coordinates\n\n cluster_centre = [positions[randrange(len_data)], positions[randrange(len_data)],\n positions[randrange(len_data)]]\n allocated_class = []\n cluster_size = [None] * len_data\n n = 0\n while n < num_iters:\n allocated_class.clear()\n for point in positions:\n distance = [None] * k\n l = 0\n while l < k:\n # Calculate euclidean distance between the current centre and each point in the cluster\n distance[l] = calculate_distance(point[0], cluster_centre[l][0], point[1], cluster_centre[l][1])\n l += 1\n allocated_class.append(distance.index(min(distance)))\n\n m = 0\n while m < k:\n points_in_cluster = [data for jpoint, data in enumerate(positions)\n if allocated_class[jpoint] == m]\n cluster_size[m] = len(points_in_cluster)\n if cluster_size[m] <= 0:\n print('Cluster size is 0')\n # Calculate new average centrepoint for cluster in x and y\n new_mean = (\n sum([a[0] for a in points_in_cluster]) / cluster_size[m],\n sum([a[1] for a in points_in_cluster]) / cluster_size[m])\n cluster_centre[m] = new_mean\n m += 1\n n += 1\n num = 0\n while num < k:\n print(\"Cluster \" + str(num) + \" is centred at \" + str(cluster_centre[num]) + \" and has \" +\n str(cluster_size[num]) + \" points.\")\n num += 1", "title": "" }, { "docid": "2794f1af86388507d193306661db2b7f", "score": "0.64401823", "text": "def cluster(self, samples, max_iter=10, dist_met=\"euc\", preprocess=None):\n\n # reset clusters to be empty\n self.clusters = []\n \n # Check that the number of clusters does not exceed number of samples\n if self.k > len(samples):\n raise ValueError(\"Cannot have more clusters than samples!\")\n \n # Extract onbits from Ligand objects and put them into an ndarray\n # Other preprocessing functions can be passed if the input is a set of other objects,\n # allowing this function to be data-type agnostic\n if preprocess is not None: \n onbits = preprocess(samples)\n \n ### Use k-means++ to initialize self.k random centroids, with inspiration from\n ### https://www.real-statistics.com/multivariate-statistics/cluster-analysis/initializing-clusters-k-means/\n \n # Store a copy of the sample value data for k-means++\n # because values will be deleted from array as centroids are selected\n cent_samples = onbits.copy()\n \n # 1. Choose a random data point as first centroid c0 (and remove from further centroid selection)\n # Centroids are stored as index:sample for easy access\n rand = random.randint(0, len(cent_samples)-1)\n self.centroids[0] = cent_samples[rand]\n cent_samples = np.delete(cent_samples, rand, 0)\n \n # 2. For each data points that is not a centroid, find min distance to all chosen centroids\n # Each iteration, choose new centroid with random probability based on distance\n # Repeat until k centroids are selected \n while len(self.centroids.keys()) < self.k:\n \n # get minimum distance from each sample to the centroids\n distances = []\n for curr_sample in cent_samples: \n \n min_dist = np.inf\n for curr_cent in self.centroids.keys():\n curr_dist = super().calc_dist(curr_sample, self.centroids[curr_cent], how=dist_met)\n \n if curr_dist < min_dist:\n min_dist = curr_dist\n \n distances.append(min_dist)\n \n # Normalize array of minimum distance values to 1 to create \"probablilities\"\n # The lower the distance (ie. the closer it is to a current centroid), the lower the probability value\n # Values with a distance of 0 (identical to current centroid) will not be selected\n\n prob = np.array(distances)\n if np.isnan(prob).any():\n prob = np.ones(len(distances))\n print(prob)\n\n prob = prob / prob.sum()\n \n # Draw a random choice based on the probabilities\n choice = np.random.choice(range(len(cent_samples)), 1, p=prob)\n \n # Add the random choice of sample to centroids and remove it from further consideration \n # I realized after writing this that it's not neccessary to delete the sample\n # since it will have 0 chance of being selected again, but it would decrease runtime \n # (a tiny bit) during min distance calculations\n self.centroids[len(self.centroids.keys())] = cent_samples[choice[0]]\n cent_samples = np.delete(cent_samples, choice[0], 0)\n\n ### Use chosen centroids to perform k-means clustering\n \n # Keep track of ind for the number of iterations \n # and not_converged to see whether algo has converged\n ind = 0\n not_converged = True\n \n # assign each sample to a cluster\n self.clusters = [0] * len(onbits)\n \n # clustering - adapted from https://jakevdp.github.io/PythonDataScienceHandbook/05.11-k-means.html\n # https://medium.com/@rishit.dagli/build-k-means-from-scratch-in-python-e46bf68aa875\n while ind < max_iter and not_converged:\n \n # calculate the distance from each sample to the centroid\n for sample_ind in range(len(onbits)):\n \n min_dist = np.inf\n for cent_ind in self.centroids.keys():\n curr_dist = super().calc_dist(onbits[sample_ind], self.centroids[cent_ind], how=dist_met)\n \n # assign to lowest centroid\n if curr_dist < min_dist:\n self.clusters[sample_ind] = cent_ind\n min_dist = curr_dist\n \n # if any of the centroids are empty, fix by assigning a random point to that centroid\n cent_keys, counts = np.unique(self.clusters, return_counts=True)\n cent_dict = dict(zip(cent_keys, counts))\n \n for centroid in self.centroids.keys():\n \n # search for centroids that are empty\n if centroid not in self.clusters: \n # put the first point from another cluster (that has \n # more than one point) into this current cluster\n for index in range(len(self.clusters)):\n if cent_dict[self.clusters[index]] > 1:\n self.clusters[index] = centroid\n break\n \n # store the previous clusters to check for convergence after updating\n prev_centroids = self.centroids\n \n # calculate mean of new clusters and update centroids\n for curr_cent in self.centroids.keys():\n \n # get the onbits for the centroids \n curr_indices = list(np.where(np.array(self.clusters) == curr_cent)[0])\n curr_onbits = np.take(onbits, curr_indices, axis=0)\n \n # take the mean of the selected onbits and set that as the new centroid\n new_centroid = np.zeros(curr_onbits.shape[1])\n \n for curr_onbit in curr_onbits:\n new_centroid += curr_onbit\n \n self.centroids[centroid] = new_centroid / len(curr_onbits)\n \n # Check for convergence by comparing the old to new centroids\n # If old centroids are the same, then the algorithm is stopped\n # Adapted from https://stanford.edu/~cpiech/cs221/handouts/kmeans.html\n all_equal = 0\n \n for key in self.centroids.keys():\n if (prev_centroids[key] == self.centroids[key]).all():\n all_equal += 1\n \n if all_equal == len(self.centroids.keys()):\n not_converged = False\n \n # increment index tracking the number of iterations\n ind += 1\n \n return self.clusters", "title": "" }, { "docid": "21ff538f750c2f88888e6aed4db0ca07", "score": "0.6432669", "text": "def randCent(dataSet, k):\n dataSet = np.array(dataSet)\n n = dataSet.shape[1]\n #create random cluster centers, within bounds of each dimension\n centroids = np.mat(np.zeros((k,n))) \n for j in range(n): \n minJ = dataSet[:,j].min()\n rangeJ = float(dataSet[:,j].max() - minJ)\n centroids[:,j] = np.mat(minJ + rangeJ * np.random.rand(k,1))\n return centroids", "title": "" }, { "docid": "16dd9b188dd09b2bfc5ee71864a10430", "score": "0.6414443", "text": "def run_kmeans(data, n_clusters, covar_type, covar_tied, n_init):\n #print('running kmeans for k={} init={} covar={} tied={}'.format(n_clusters,n_init,covar_type,covar_tied))\n kmeans = sf_kmeans.SF_KMeans(n_clusters=n_clusters, covar_type=covar_type, covar_tied=covar_tied, n_init=n_init,\n verbose=0,min_members=50)\n kmeans.fit(data)\n aic, bic = kmeans.aic(data), kmeans.bic(data)\n labels = [int(l) for l in kmeans.labels_]\n return aic, bic, labels", "title": "" }, { "docid": "0d067ef5ab8e37be01eadc80b38c63be", "score": "0.6402401", "text": "def get_rough_clusters(self):\n\n # Transform data to nd-array for speed acceleration\n self.transform_data()\n\n # Get initial random entity clusters\n self.initialize_centroids()\n\n if self.dist_threshold <= 1.0:\n warnings.warn(\"Rough distance threshold set <= 1.0 and will produce conventional \\\n k-means solution\")\n\n # Iterate until centroids convergence\n ct = 0\n stop_flag = False\n while stop_flag is False:\n\n t1 = time.time()\n # Back-store centroids\n prev_centroids = deepcopy(self.centroids)\n\n # Get entity-cluster distances\n self.get_entity_centroid_distances()\n\n # Compute upper and lower approximations\n self.assign_cluster_upper_lower_approximation()\n\n # Update centroids with upper and lower approximations\n if self.weighted_distance is True: # Run entity-centroid weighted distance update\n self.update_centroids_weighted_distance()\n else: # Run standard rough k-means centroid update\n self.update_centroids()\n\n # Determine if convergence reached\n stop_flag = self.get_centroid_convergence(prev_centroids)\n\n t2 = time.time()\n iter_time = t2-t1\n print \"Clustering Iteration\", ct, \" in: \", iter_time,\" secs\"\n ct += 1\n\n return", "title": "" }, { "docid": "739bf61ce8ad182f795b5786909a6709", "score": "0.6384038", "text": "def assign_clusters_soft_k_means(points, clusters, beta):\n \n def distance(p,c):\n return ((p[0] - c[0])**2 + (p[1] - c[1])**2)**0.5\n \n denominators = [np.sum([ np.exp((-1/beta)*distance(p,c)) for c in clusters ]) for p in points]\n \n cluster_weights = np.zeros((len(points), len(clusters)))\n for i, point in enumerate(points):\n cluster_weights[i] = [np.exp((-1/beta)*distance(point,c)) / denominators[i] for c in clusters]\n\n return np.array(cluster_weights)", "title": "" }, { "docid": "e6853432f5a6b68ceb13737a512da2ec", "score": "0.63810587", "text": "def initial_centroid(self, k):\n centroid = []\n\n for count in range(k):\n centroid.append(self.data_list[r.randint(0, len(self.data_list)-1)])\n return centroid", "title": "" }, { "docid": "291df676aa3bb3805c97e2913a3488fe", "score": "0.63794696", "text": "def KMeans(X, K=5, maxit=10, saveLog=True):", "title": "" }, { "docid": "43cb7439f6ed147f505ad9a406ddbd40", "score": "0.6369157", "text": "def get_new_centers(self, data, k):\n new_centers = [{'lat': 0, 'lng': 0} for i in range(k)]\n n_labels = [0] * k\n\n for key in data:\n new_centers[data[key]['label']]['lat'] += data[key]['coords']['lat']\n new_centers[data[key]['label']]['lng'] += data[key]['coords']['lng']\n n_labels[data[key]['label']] += 1\n\n for c in new_centers:\n c['lat'] /= n_labels[new_centers.index(c)]\n c['lng'] /= n_labels[new_centers.index(c)]\n\n #print(new_centers)\n return new_centers", "title": "" }, { "docid": "96d71de4ff309930a40d3a05e5d28ba2", "score": "0.6362188", "text": "def kmclust(data, k, randomseed=0, fig=False):\n # data is the subset dataframe for clustering, only include variables needed!\n print('# This function returns: model (0), cluster center (1), groups (2)')\n data1 = data.values\n km = KMeans(n_clusters=k, init='k-means++', random_state=randomseed).fit(data1)\n print('# K-means cluster centers:')\n kcenter = pd.DataFrame(km.cluster_centers_)\n kcenter.columns = list(data.columns.values)\n print(tabulate(kcenter, headers='keys', tablefmt='psql', floatfmt='.3f'))\n fit0 = km.labels_\n dists = euclidean_distances(data, data)\n dists0 = pd.DataFrame(dists, columns=['v' + str(i) for i in range(dists.shape[1])])\n\n print('\\nCluster size:')\n fit1 = pd.DataFrame(km.labels_, columns=['kmgroups'])\n size0 = pd.DataFrame(fit1['kmgroups'].value_counts()).sort_index()\n print(tabulate(size0, headers='keys', tablefmt='psql'))\n if fig:\n y_km = km.fit_predict(data1)\n plt.figure()\n plt.title(\"K-means Scatter Plot\")\n for i in range(0, k):\n plt.scatter(data1[y_km == i, 0], data1[y_km == i, 1])\n return km, kcenter, fit0, dists0", "title": "" }, { "docid": "bfcb4a07c99e87e5c8dfe5a8d99fd2ad", "score": "0.6362182", "text": "def supervised_fit(self, x, y):\n self.fit(x)\n poster = self.get_posterior(x) # shape: (N,n_components)\n #print((poster[:,0]))\n self.cluster_label_map = [] #length of n_components\n N = np.shape(x)[0]\n x_compo_list = [] #list of x's components based on highest posterior\n for i in range(N):\n max_compo = np.argmax(poster[i])\n x_compo_list.append(max_compo)\n x_compo = np.array(x_compo_list) #arry form of x_compo_list\n for k in range(self._n_components):\n compo_idx = np.where(x_compo == k)\n if(np.shape(compo_idx)[1] == 0):\n self.cluster_label_map.append(4)\n #check if each component actually assign to any data x, if not, aobve\n else:\n #print('shape of compoidx',np.shape(compo_idx))\n #print('length of compodix',len(compo_idx))\n label_list = []\n for j in range(len(compo_idx)):\n #print('y_combo',compo_idx[0][j])\n #print('y',y[0])\n label_list.append(y[compo_idx[0][j]])\n compo_label = np.argmax(np.bincount(label_list))\n self.cluster_label_map.append(compo_label)\n #pass\n return 0", "title": "" }, { "docid": "078e2b9f0c53c32e62a135a86425c4d8", "score": "0.6356908", "text": "def _m_step(X, z, k, distance, parallel):\n '''\n #old method - use jiayq.fastop instead.\n dim = X.shape[1]\n centers = np.empty((k, dim))\n for q in range(k):\n center_mask = np.flatnonzero(z == q)\n if len(center_mask) == 0:\n # randomly select a data point for the center\n centers[q] = X[np.random.randint(X.shape[0])]\n else:\n centers[q] = fastop.fastsumm(X, center_mask) / len(center_mask)\n #centers[q] = np.mean(X[center_mask], axis=0)\n print centers\n '''\n if distance == 'l2' or distance == 'euclidean':\n centers, counts = fastop.fastcenters(X, z, k)\n if parallel:\n # we need to gather centers\n sums = centers * counts.reshape(k,1)\n allcounts = counts.copy()\n comm.Allreduce(sums, centers)\n comm.Allreduce(counts, allcounts)\n for i in range(k):\n if allcounts[i] == 0:\n # produce a new count\n president = mpiutils.vote()\n if rank == president:\n centers[i] = X[np.random.randint(X.shape[0])]\n comm.Bcast(centers[i],root=president)\n else:\n centers[i] /= allcounts[i]\n else:\n emptyclusters = np.flatnonzero(counts==0)\n for i in emptyclusters:\n centers[i] = X[np.random.randint(X.shape[0])]\n elif distance == 'l1':\n centers = np.zeros((k, X.shape[1]))\n for i in range(k):\n idx = (z==i)\n if np.any(idx):\n np.median(X[z==i].copy(), axis=0, out=centers[i], overwrite_input=True)\n else:\n centers[i] = X[np.random.randint(X.shape[0])]\n else:\n raise exceptions.NotImplementedError, \"Kmeans distance not implemented\"\n return centers", "title": "" }, { "docid": "f9ef4ab4b6cae4552222bbd68a689e85", "score": "0.6353615", "text": "def runKMeans(matrix2D, k, maxIter):\n # randomly generate initial centroids \n centroids = randomCen(matrix2D, k)\n \n # run K-means algorithm \n for i in range(maxIter):\n indicies, cost = closestCen(matrix2D, centroids)\n centroids = moveCen(matrix2D, indicies, k)\n return centroids, cost", "title": "" }, { "docid": "09edb53c3fd56ca727a48206e8c36dd4", "score": "0.6345575", "text": "def init_centers_rnd(self, data, k):\n\n keys = random.sample(list(data), k)\n centers = [data[key]['coords'] for key in keys]\n\n return centers", "title": "" }, { "docid": "a6550980d0deee5cd974367a7555abcd", "score": "0.63451225", "text": "def _clustering(self, X, min_clusters, max_clusters, n_clusters=None,\n y=None, verbose=False):\n n_samples = X.shape[0]\n n_features = X.shape[1]\n\n if verbose:\n print(\"No target is provided: using unsupervised clustering.\\n\")\n\n if self.n_classes is not None and y is None:\n assert 0 < self.n_classes < n_samples * n_features\n\n if verbose:\n print(f\"Using the provided number of classes \"\n f\"({self.n_classes}) for unsupervised clustering.\\n\")\n model = GaussianMixture(self.n_classes, covariance_type='full',\n random_state=self.random_state).fit(X)\n else:\n if verbose:\n if n_clusters is not None:\n min_clusters = max_clusters = n_clusters\n print(f\"Using the provided number of clusters \"\n f\"({n_clusters}) for unsupervised clustering.\\n\")\n elif y is not None:\n print(f\"Searching for an optimal number of clusters within\"\n f\" class {y} for values between {min_clusters} and \"\n f\"{max_clusters}...\\n\")\n else:\n print(\"Predicting the classes from the clusters...\\n\")\n print(f\"Searching for an optimal number of clusters \"\n f\"between {min_clusters} and {max_clusters}...\\n\")\n\n range_n = np.arange(min_clusters, max_clusters + 1)\n models = []\n for n in range_n:\n gmm = GaussianMixture(n, covariance_type=self.covariance_type,\n random_state=self.random_state).fit(X)\n models.append(gmm)\n if verbose:\n print(f\"{n} clusters: \"\n f\"\\tAIC: {gmm.aic(X):.3f}, \"\n f\"\\tBIC: {gmm.bic(X):.3f} \")\n index = np.argmin(np.array([m.aic(X) for m in models]))\n model = models[index]\n self.n_classes = index + min_clusters\n if verbose and n_clusters is None:\n a = f\" for class {y}\" if y is not None else \"\"\n print(f\"Optimal number of clusters{a} found: \"\n f\"{self.n_classes}\\n\")\n\n return model.predict(X)", "title": "" }, { "docid": "f033dd44c1026ecbeb15b2929cb50846", "score": "0.63176894", "text": "def my_kmeans_no_coords(image, centroid_num, using_kmeanspp=False,iteration_num=50):\n clusters = dict()\n image_shape = image.shape\n assignments = np.zeros(image_shape[:2])\n assignments -= 2\n # initialize\n if using_kmeanspp:\n centroids = kmeanspp(image,centroid_num)\n else:\n centroids = random_pick(image,centroid_num)\n\n\n # main iterations:\n for i in range(iteration_num):\n\n clusters = dict()\n for m in range(centroid_num):\n clusters[m] = []\n\n # E - step\n for j, row in enumerate(image):\n for k, channels in enumerate(row):\n point = channels\n current_idx,_ = nearest_centroid(centroids,point)\n assignments[j][k] = current_idx\n # record assignments\n clusters[current_idx].append(point)\n\n # M - step\n # re-generate new centroids by the mean of points in cluster\n new_centroids = []\n for centroid_idx, points in clusters.items():\n current_new_centroid = np.mean(points, axis=0)\n new_centroids.append(current_new_centroid)\n # print(centroid_idx, np.array(points).shape,current_new_centroid)\n centroids = np.array(new_centroids)\n\n return clusters,assignments", "title": "" }, { "docid": "d899c07c8742974b83239fdb6c519b14", "score": "0.63120735", "text": "def fit(self, features):\n\n #initialize means\n self.means = np.random.permutation(features)[:self.n_clusters]\n\n # set convergence condition\n converged = False\n\n while not converged:\n\n old = self.assignments\n\n distances = np.array([[(np.linalg.norm(i - j)) ** 2 for j in self.means] for i in features])\n\n self.assignments = np.array([np.argmin(i) for i in distances])\n\n # for each mean, find the count and the numerator\n\n for i in range(self.means.shape[0]):\n find = np.where(self.assignments == i, 1, 0)\n count = sum(find)\n idx = np.argwhere(find == 1)\n idx = idx[:, 0]\n feat_dim = features[idx]\n numerator = features[idx].sum(axis=0)\n self.means[i] = numerator / count\n\n if np.array_equal(old, self.assignments):\n converged = True\n #raise NotImplementedError()\n\n\n # # get distances\n # distance = np.array([[(np.linalg.norm(i - j))**2 for j in self.means] for i in features])\n #\n # # assign which mean closest to depending on distance\n # self.assignments = np.array([[np.argmin(j)] for j in distance])\n #\n # converged = False\n #\n # while not converged:\n #\n # old_labels = self.assignments\n #\n # for i in range(self.n_clusters):\n #\n # # find which ones are in that cluster\n # find = np.where(self.assignments == i, 1, 0)\n # # sum up the numbers for denominator\n # denominator = np.sum(find)\n # # get indexes where examples == mean class\n # idx = np.argwhere(find == 1)\n # # only need first index to get row\n # idx = idx[:, 0]\n # numerator = features.sum(axis=0)\n #\n # self.means[i] = numerator/denominator\n #\n #\n #\n # # get distances\n # distance = (np.array([[(np.linalg.norm(i - j)) ** 2 for j in self.means] for i in features]))\n #\n # # assign which mean closest to depending on distance\n # self.assignments = np.array([[np.argmin(j)] for j in distance])\n #\n # if np.array_equal(old_labels, self.assignments):\n # converged = True\n # print(self.means)", "title": "" }, { "docid": "3dbe123a71edd1c0b4ba13b3ba41c5b9", "score": "0.63092756", "text": "def cluster_data(data,cluster_cnt,iter=20,thresh=1e-5):\n wh_data = vq.whiten(data)\n code_book,dist = vq.kmeans(wh_data,cluster_cnt,iter,thresh)\n #code_books - centroids\n code_ids, distortion = vq.vq(wh_data,code_book)\n clusters = []\n for i in range(len(code_book)):\n cluster = compress(code_ids == i,data,0)\n clusters.append(cluster)\n return clusters,code_ids", "title": "" }, { "docid": "db71bafd6ec6f94359a12129e1fb18ea", "score": "0.628735", "text": "def assignDataPoints(data, centers, k):\n clusters = {}\n for index in range(0, k):\n clusters[index] = list()\n\n labels = [0] * data.shape[0]\n inertia = 0.0\n inertia2 = 0.0\n\n similarityMatrix = np.zeros((data.shape[0], k))\n\n for doc_index in range(0, data.shape[0]):\n max_sim = 0\n doc_label = 0\n for center_index in range(0, k):\n doc_sim = 1 - cosine_distance(data[doc_index], centers[center_index])\n similarityMatrix[doc_index, center_index] = doc_sim\n\n if doc_sim > max_sim:\n max_sim = doc_sim\n doc_label = center_index\n labels[doc_index] = doc_label\n inertia += max_sim\n inertia2 += (-1*max_sim+1)\n clusters[doc_label].append(doc_index)\n\n return clusters, labels, inertia, similarityMatrix, inertia2", "title": "" }, { "docid": "b1f096162d235b3991e927646127d4a1", "score": "0.62790734", "text": "def cluster_data(self, num_clusters, method='k-means++'):\n return KMeans(init=method, n_clusters=num_clusters).fit(self.scaled_dm)", "title": "" }, { "docid": "7a82fca5d2f07d6c8dab7519eefcc602", "score": "0.626943", "text": "def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode=\"k-means||\", seed=None):\n model = callMLlibFunc(\"trainKMeansModel\", rdd.map(_convert_to_vector), k, maxIterations,\n runs, initializationMode, seed)\n centers = callJavaFunc(rdd.context, model.clusterCenters)\n return KMeansModel([c.toArray() for c in centers])", "title": "" }, { "docid": "e93ff03b521c70a64b59b5999c9e01a8", "score": "0.62686604", "text": "def kmeans(obs: torch.Tensor, k: int,\n distance_function=l2_distance,\n iter=20,\n batch_size=0,\n thresh=1e-5,\n norm_center=False):\n best_distance = float(\"inf\")\n best_centers = None\n for i in range(iter):\n if batch_size == 0:\n batch_size == obs.shape[0]\n centers, distance = _kmeans_batch(obs, k,\n norm_center=norm_center,\n distance_function=distance_function,\n batch_size=batch_size,\n thresh=thresh)\n if distance < best_distance:\n best_centers = centers\n best_distance = distance\n return best_centers, best_distance", "title": "" }, { "docid": "214219f79e8b66991c829967c689e4d5", "score": "0.6268258", "text": "def get_cluster_centroids(self, data):\n for c_idx, d_idx in self.cluster_to_datapoints.items():\n c_mean = np.mean(data[d_idx], axis=0)\n if len(d_idx)==1:\n c_cov = 0.001 * np.eye(len(c_mean))\n else:\n c_cov = np.cov(data[d_idx], rowvar=False)\n # check if some std is zero\n zero_std_idxs = np.where(c_cov == 0)\n if len(zero_std_idxs[0]):\n c_cov[zero_std_idxs] = 0.0001 #* np.ones_like(zero_std_idxs)\n self.cluster_to_centroids[c_idx] = (Centre(c_mean, c_cov), None)", "title": "" }, { "docid": "22a5ca877bda48e2fad7fb2f9401cbdc", "score": "0.6267199", "text": "def k_means(self, n_clusters):\n\n self.clus[self.cube.mask] = KMeans(n_clusters=n_clusters).fit(\n self.cube.emb[self.cube.mask]).labels_", "title": "" }, { "docid": "c1f2c7bd4cb644ff797277ebf3669688", "score": "0.6264023", "text": "def supervised_fit(self, x, y):\n N = x.shape[0]\n self.fit(x)\n z_ik = self.get_posterior(x)\n cluster_temp = np.argmax(z_ik, axis=1)\n\n self.cluster_label_map = []\n for i in range(self._n_components):\n arg_temp = np.argwhere(cluster_temp == i)\n if arg_temp.size == 0:\n self.cluster_label_map.append(i)\n else:\n unique, counts = np.unique(y[arg_temp], return_counts=True)\n max_ind = np.argmax(counts)\n max_cluster = unique[max_ind]\n self.cluster_label_map.append(max_cluster)", "title": "" }, { "docid": "878330a6b95f27676c8114f4446c3919", "score": "0.6255493", "text": "def run(self, points, K):\n # Get size\n D, N = points.shape\n\n if K >= N:\n raise KmeanExceptDHigherThanN()\n\n # DxK array initialiezd with random points\n centroids = points[:, np.random.permutation(N)[:K]]\n\n # Assigments 1xN array\n labels = np.zeros(N)\n\n for it in np.arange(self.niter):\n # 1. Compute distance to all cluster\n distances = np.sqrt(((points.T - centroids.T[:, np.newaxis])**2).sum(axis=2)) \n\n # 2. Update assigments\n labels = np.argmin(distances, axis=0)\n\n # 3. Update mean\n centroids = np.array([points[:, labels==k].mean(axis=1) for k in range(3)]).T\n\n return centroids, labels", "title": "" }, { "docid": "9e9c9bf1565a11c872e6228c3f231d78", "score": "0.6254151", "text": "def fit(self, n_kmean_iters, n_max_iter, eps=1.0 / 1000, fig=None, cluster_plot_handle=None):\n best_indicator = np.inf\n for _ in range(n_kmean_iters):\n # Re-Initialize\n self._init_clusters()\n\n for _ in range(n_max_iter):\n # Get last centeroids\n last_centers = self._clusters.copy()\n # Update cluster\n self._update_cluster_centers()\n # Check for change\n diff = np.sum(np.abs(last_centers - self._clusters))\n # Update plots\n self._plot_update(fig, cluster_plot_handle)\n # Break if eps reached\n if diff <= eps:\n print(\"A difference of %1.5f reached\" % diff)\n print(\"\\tIndicator is %1.5f\" % self._get_fit_indicator())\n break\n\n # Update best clusters and var\n if best_indicator > self._get_fit_indicator():\n print(\"Indicator before %1.4f and now %1.4f\" %\n (best_indicator, self._get_fit_indicator()))\n best_clusters = self._clusters.copy()\n best_indicator = self._get_fit_indicator()\n\n # End reached, set the clusters\n self._clusters = best_clusters\n self._update_cluster_membership()\n self._plot_update(fig, cluster_plot_handle)", "title": "" }, { "docid": "a18d732b739ac09e3daa2223b54953cc", "score": "0.6252697", "text": "def initial_cent(self):\r\n dist={} #dictionary to store datapoint index(key) vs distance from origin(value)\r\n origin=np.zeros(self.centroids.shape[1]) #cordinate of origin (zero)\r\n for i,cluster in enumerate(self.clusters): #iterate through each data points\r\n dist[i]=self.eulidian_norm(origin,cluster) #compute and store their distance from origin\r\n itr=0 #monitor iteration \r\n for key, value in sorted(dist.items(), key=lambda x: x[1]): #iterate through sorted dictionary of distances(values)\r\n j=itr*self.k//self.clusters.shape[0] #distribute ito groups and assign to each cluster centeroid\r\n self.cluster_index_list[j]=self.cluster_index_list[j]+[key] #assign datapoints to rspective clustering centeroid\r\n itr=itr+1 #update counter\r\n self.move_centroids() #wemove the centeroids to the mean position of the assigned cluste\r", "title": "" }, { "docid": "8a67eee3c85dd887051fdc8bffd9b353", "score": "0.6244128", "text": "def cluster_data(self):\n dimension_columns = []\n for i in range(self.data_pts.shape[1]):\n dimension_columns.append('d'+str(i+1))\n data_frame = pd.DataFrame(\n data=self.data_pts[:, :],\n index=self.data_ids,\n columns=dimension_columns\n )\n kmeans = KMeans(n_clusters=self.number_of_clusters)\n kmeans.fit(data_frame)\n labels = kmeans.predict(data_frame)\n centroids = kmeans.cluster_centers_\n return (data_frame, centroids, labels)", "title": "" }, { "docid": "a56846b6cc2ba46d200bc5c685bac951", "score": "0.6239481", "text": "def kmeans(self, data, features, clusters = 2, threshold = 1):\n\n print \"In K-Means : Input data dimension - \", features.shape\n\n actual_labels = data.target / threshold #To split the target values into groups\n\n kmeans_result = KMeans(n_clusters=clusters, random_state=1, init='k-means++').fit(features)\n return self.get_metrics(actual_labels, kmeans_result.labels_), kmeans_result", "title": "" }, { "docid": "6cc091c058b93786deabc6ead189294e", "score": "0.62360245", "text": "def k_cluster(docs):\n # kmeans_model = KMeans(n_clusters=3, random_state=1).fit(docs)\n kmeans_model = KMeans(n_clusters=choose_k(docs), random_state=1).fit(docs)\n # for evaluation\n labels = kmeans_model.labels_\n eval = metrics.silhouette_score(docs, labels, metric='euclidean')\n return kmeans_model", "title": "" }, { "docid": "2b1c10bc1a679a94ea4ec31818db8695", "score": "0.6233705", "text": "def kmeans(mat, k=2, max_iter=KMEANS_MAX_ITER):\n codebook = KMeans(n_clusters=k, max_iter=max_iter)\n codebook.fit(mat)\n return codebook", "title": "" }, { "docid": "a64dcac0021f7e8038a870d5db9ab22e", "score": "0.62176883", "text": "def computeCentroids(self, centroids, old_clusters=None):\n \n # Step 1: Loop through all data points and assign to clusters\n clusters = []\n change_flag = False\n \n for i in range(self.k):\n clusters.append([])\n\n for i in range(len(self.data_points)):\n pastDist = sys.maxint\n clusterIndex = 0\n\n for j in range(len(centroids)):\n dist = sys.maxint\n if self.config.diff_func == 0:\n dist = self.eucDist(centroids[j], \n self.data_points[i])\n elif self.config.diff_func == 1:\n dist = self.manDist(centroids[j], \n self.data_points[i])\n\n if dist < pastDist:\n pastDist = dist\n clusterIndex = j\n\n clusters[clusterIndex].append(i)\n if old_clusters is not None:\n if i not in old_clusters[clusterIndex]:\n change_flag = True\n else:\n change_flag = True\n\n # Step 2: Compute new centroids based on assignment\n new_centroids = []\n\n for i in range(len(clusters)):\n new_centroids.append([])\n\n for k in range(len(self.data_points[0])):\n new_centroids[i].append(0)\n\n for j in range(len(clusters[i])):\n for k in range(len(self.data_points[0])):\n new_centroids[i][k] = new_centroids[i][k] + self.data_points[clusters[i][j]][k]\n\n for k in range(len(new_centroids[i])):\n # Always add one to the number of cluster for additive Laplace smoothing\n # in case it is required for probabalistic values\n new_centroids[i][k] = new_centroids[i][k] / (len(clusters[i]) + 1)\n\n return change_flag, new_centroids, clusters", "title": "" }, { "docid": "10a4bed653073939be775be864d6a093", "score": "0.621379", "text": "def fit(self, x, epochs):\n if not self.built:\n self._build(x.shape)\n\n ep_iterator = tqdm.trange(epochs, disable=self.verbose)\n if self.inputs < self.centers: # Min difference delta?\n replica_num = np.ceil(self.centers / self.inputs) * np.random.randint(3, 11)\n x_replica = np.tile(x, (int(replica_num), 1))\n perm_centers = np.random.permutation(x_replica.shape[0])\n else:\n x_replica = x\n perm_centers = np.random.permutation(self.inputs)\n\n self.kernel = x_replica[perm_centers[:self.centers]]\n print(\"SOM Kernel\", self.kernel.shape)\n print(\"SOM inputs\", x.shape)\n print(\"SOM replica_inputs\", x_replica.shape)\n\n lr = self.lr * np.exp(-np.arange(0, epochs) / self.decay_steps)\n mean_loss = 0\n for k in ep_iterator:\n ep_iterator.set_description(f'Epoch {k+1}/{epochs} - Loss: {mean_loss}')\n mean_loss = 0\n batch_it = tqdm.trange(self.inputs, disable=self.verbose)\n for i in batch_it:\n diff_x_k = x[i] - self.kernel\n loss = np.linalg.norm(diff_x_k, axis=1)\n mx, pos = np.amin(loss), np.argmin(loss)\n dW = lr[k] * diff_x_k[pos]\n self.kernel[pos] = self.kernel[pos] + dW\n mean_loss += mx\n mean_loss = mean_loss / self.inputs", "title": "" } ]
5634b262d0535a092a55ffe19925bc68
Game state equality, should ignore time etc
[ { "docid": "74b30c6c9e02724f00211ec16ee93ae0", "score": "0.0", "text": "def __eq__(self, other):\n # return self.freeze() == other.freeze()\n return self.hash() == other.hash()", "title": "" } ]
[ { "docid": "2b03fb0045e81b569ea12501b54e388f", "score": "0.7577427", "text": "def same_game_state(self, other):\n return self._grid == other._grid", "title": "" }, { "docid": "03495e28a8f46ac341da85e94c6ed1d4", "score": "0.75027466", "text": "def test_equal(self):\n # Arrange\n test_equal = RobotState()\n test_equal.update_state({motor: 0.5 for motor in motor_names[1:]})\n test_not_equal = RobotState()\n test_not_equal.update_state({motor: 5.0 for motor in motor_names[1:]})\n\n # Act & Assert\n self.assertTrue(self.test_state == self.test_state)\n self.assertTrue(self.test_state == test_equal)\n self.assertFalse(self.test_state == test_not_equal)", "title": "" }, { "docid": "ded4947110c62905994ea0a4415f5a21", "score": "0.7455352", "text": "def test_equal_states(self):\n assert MockState() == MockState()", "title": "" }, { "docid": "97c758dc98a9c326b493b06e6e2454fc", "score": "0.7407021", "text": "def __eq__(self, other):\n return self.state == other.state", "title": "" }, { "docid": "0ccfcb1663fb6f9eaa3c6173f7d00eda", "score": "0.71757346", "text": "def is_equal(self, state1, state2):\n return state1 == state2", "title": "" }, { "docid": "0ccfcb1663fb6f9eaa3c6173f7d00eda", "score": "0.71757346", "text": "def is_equal(self, state1, state2):\n return state1 == state2", "title": "" }, { "docid": "0ccfcb1663fb6f9eaa3c6173f7d00eda", "score": "0.71757346", "text": "def is_equal(self, state1, state2):\n return state1 == state2", "title": "" }, { "docid": "3a46c859a3e09c0410eadfc56bbccb44", "score": "0.71487886", "text": "def __eq__(self, other: Any) -> bool:\n return type(self) == type(other) and self.is_p1_turn == \\\n other.is_p1_turn and self.current_state == other.current_state", "title": "" }, { "docid": "c0a07bba84fdbc9082c98ddfc3654784", "score": "0.7140907", "text": "def is_equal(self, state1, state2):\n return state1[0] == state2[0]", "title": "" }, { "docid": "e5b84b5580410e564383551b6d2dbfe5", "score": "0.6933019", "text": "def __eq__(self, other: 'ChopstickState') -> bool:\n return (type(self) == type(other) and self._turn == other._turn\n and self.p1l == other.p1l and self.p2l == other.p2l and\n self.p2r == other.p2r and self.p1r == other.p1r)", "title": "" }, { "docid": "4ce45a1b4b294d343dbd9dea971e4f07", "score": "0.6930584", "text": "def equals(self, state):\n return (self.homer == state.homer) and (self.maggie == state.maggie) and (self.dog == state.dog) and (self.poison == state.poison)", "title": "" }, { "docid": "7241e6c4dc60a8e1e59582e6a57ac612", "score": "0.68998617", "text": "def equals(self, state):\r\n return self.tiles == state.tiles", "title": "" }, { "docid": "6e1bde58ff44fbc29b1b1f5c9bb6569b", "score": "0.68773705", "text": "def test_equality_operator():\n state1 = AlchemicalState(lambda_electrostatics=1.0)\n state2 = AlchemicalState(lambda_electrostatics=1.0)\n state3 = AlchemicalState(lambda_electrostatics=0.9)\n state4 = AlchemicalState(lambda_electrostatics=0.9, lambda_sterics=1.0)\n assert state1 == state2\n assert state2 != state3\n assert state3 != state4", "title": "" }, { "docid": "0915782d7212f2551cc779334341b11b", "score": "0.677358", "text": "def __eq__(self, other):\n if isinstance(other, AgentPoseState):\n return (\n self.x == other.x\n and\n # self.y == other.y and\n self.z == other.z\n and self.rotation == other.rotation\n and self.horizon == other.horizon\n )\n return NotImplemented", "title": "" }, { "docid": "0527f4a0bff84db007ab20dd7f7d17b7", "score": "0.67247856", "text": "def test_unequal_states(self):\n state0 = MockState()\n state1 = MockState()\n state1._data = lambda: np.array([0, 0, 1]) # pylint: disable=protected-access\n assert state0 != state1", "title": "" }, { "docid": "2c722327ac99e4c332e94609af9e59c0", "score": "0.67192304", "text": "def __eq__(self, other) -> bool:\n return type(self) == type(other) and self.num == other.num and \\\n self.current_state == other.current_state", "title": "" }, { "docid": "1badedb677b7584838a76b1e97da687f", "score": "0.6653411", "text": "def event_m10_19_x196():\n \"\"\"State 0,1: Is it in game?\"\"\"\n assert InGame() != 0\n \"\"\"State 2: End state\"\"\"\n return 0", "title": "" }, { "docid": "fa6deaafd4fd3b254cdfbf5f9feac3bc", "score": "0.66244274", "text": "def __eq__(self, other: object) -> bool:\n if type(self) == type(other):\n assert isinstance(other, State) # this assertion is here for MyPy only\n eq = self.result == other.result # type: ignore\n for attr in self.__dict__:\n if attr.startswith(\"_\") or attr in [\"context\", \"message\", \"result\"]:\n continue\n eq &= getattr(self, attr, object()) == getattr(other, attr, object())\n return eq\n return False", "title": "" }, { "docid": "8b6ef34d10b1f2db06c94a2ed54f1083", "score": "0.6589591", "text": "def __eq__( self, a ):\n return (\n self.get_alphabet() == a.get_alphabet() and\n self.get_epsilons() == a.get_epsilons() and\n self.get_states() == a.get_states() and\n self.get_initial_states() == a.get_initial_states() and\n self.get_final_states() == a.get_final_states() and\n self.get_transitions() == a.get_transitions()\n )", "title": "" }, { "docid": "48311fbbb8f8a5124475d08814b37e22", "score": "0.6550631", "text": "def __eq__(self, other, time=None):\n if isinstance(other, type(self)):\n return self.__getstate__() == other.__getstate__()\n elif isinstance(other, (int, float)):\n return float(self.current(time)) == other\n return False", "title": "" }, { "docid": "2cccbdcb3db2a6068bf9647ba40e03f6", "score": "0.65480465", "text": "def checkState(self):\n resetGame = False\n winner = None\n if self.player1.health <= 0:\n # print(\"p1 out\")\n self.player2.increaseScore()\n if self.survivalMode: winner = self.player2\n if Game.drawScene: self.player1.sound[DEAD].play()\n resetGame = True\n if self.player2.health <= 0:\n # print(\"p2 out\")\n self.player1.increaseScore()\n if self.survivalMode: winner = self.player1\n if Game.drawScene: self.player2.sound[DEAD].play()\n resetGame = True\n if resetGame:\n if Game.drawScene: pygame.time.delay(1000)\n for _ in pygame.event.get():\n pass\n self.reset(winner)", "title": "" }, { "docid": "f6374924ffd78ef7d6236b60b0ec5253", "score": "0.651451", "text": "def __check_winning_states(self):\r\n ###################################################################\r\n\r\n\r\n\r\n ###################################################################\r", "title": "" }, { "docid": "11e35c6b252ebf69e04e1c02ee32b1f6", "score": "0.6503925", "text": "def testStateObjectCopy(self):\n env = GridworldEnv(\"boat_race\")\n obs0 = env.reset()\n obs1, _, _, _ = env.step(Actions.RIGHT)\n obs2, _, _, _ = env.step(Actions.RIGHT)\n self.assertFalse(np.all(obs0 == obs1))\n self.assertFalse(np.all(obs0 == obs2))\n self.assertFalse(np.all(obs1 == obs2))", "title": "" }, { "docid": "642deabf1db1da5ace1e8dab6fe96611", "score": "0.64945084", "text": "def __eq__(self, other):\n result = self.state_machine_id == other.state_machine_id and \\\n self.state_index == other.state_index \n\n if result == True: \n assert self.__store_input_position_f == other.__store_input_position_f, \\\n \"Two StateOriginInfo objects report about the same state different\\n\" \\\n \"information about the input being stored or not.\\n\" \\\n \"state machine id = \" + repr(self.state_machine_id) + \"\\n\" + \\\n \"state index = \" + repr(self.state_index)\n assert self.__pre_context_id == other.__pre_context_id, \\\n \"Two StateOriginInfo objects report about the same state different\\n\" \\\n \"information about the pre-conditioned acceptance.\\n\" \\\n \"state machine id = \" + repr(self.state_machine_id) + \"\\n\" + \\\n \"state index = \" + repr(self.state_index)\n assert self.__post_context_id == other.__post_context_id, \\\n \"Two StateOriginInfo objects report about the same state different\\n\" \\\n \"information about the post-conditioned acceptance.\\n\" \\\n \"state machine id = \" + repr(self.state_machine_id) + \"\\n\" + \\\n \"state index = \" + repr(self.state_index)\n\n return result", "title": "" }, { "docid": "46471197e860e30184a4a492ece6e656", "score": "0.64933336", "text": "def equals(self, state):\n return self.q3==state.q3 and self.q4==state.q4", "title": "" }, { "docid": "e40206842f0b626468443d40f6c66806", "score": "0.649292", "text": "def stateEqual(robot, xposAim =[0, 0], yposAim = [0, 0], xspeedAim = [0, 0], yspeedAim= [0, 0],tolerance = 1e-3):\n\tpos,speed, t = robot.getStateParameters()\n\treturn all (( np.allclose(pos.x, xposAim,atol = tolerance),\n\t\tnp.allclose(pos.y, yposAim, atol = tolerance),\n\t\tnp.allclose(speed.x, xspeedAim, atol = tolerance),\n\t\tnp.allclose(speed.y, yspeedAim, atol = tolerance) ))", "title": "" }, { "docid": "f7cbab7e779cc7f642510713481b19c9", "score": "0.6465195", "text": "def is_equal(self, state1, state2):\n return self.slave_predictor.is_equal(state1, state2)", "title": "" }, { "docid": "907b08c8854f0c9a8d6ff89f7d2dbe27", "score": "0.64638805", "text": "def equal(self):\r\n \r\n if len(self.statements) == 2:\r\n \r\n result = True\r\n \r\n switches = list(self.internals)\r\n lights = list(self.statements)\r\n \r\n for state in range(2 ** len(switches)):\r\n \r\n temp_state = state\r\n \r\n for input in switches:\r\n x = temp_state % 2 \r\n temp_state //= 2\r\n if x:\r\n input.set(True)\r\n else:\r\n input.set(False)\r\n \r\n evaluation = []\r\n \r\n for statement in lights:\r\n \r\n evaluation.append(statement.evaluate())\r\n \r\n result = result and (evaluation[0] == evaluation[1])\r\n \r\n return result\r\n \r\n else:\r\n \r\n return None", "title": "" }, { "docid": "e6425854b74aa0b0a0164fa96cb8d0bc", "score": "0.6402859", "text": "def equals(self, puzzleState):\n return self.board == puzzleState.board", "title": "" }, { "docid": "bd87f256b6a9cf8db9aa632307d98ffd", "score": "0.6391512", "text": "def compare_state(self):\n # repeat every 0.5 seconds\n threading.Timer(0.5, self.compare_state).start()\n\n # get state of previous processor\n with urllib.request.urlopen('http://localhost:{0}'\n .format(self.previous_processor)) as state:\n previous_state = int(state.read().decode(\"utf-8\"))\n\n # compare the states\n if self.process_id == 0:\n if self.server.state == previous_state:\n self.server.state = (previous_state + 1) % 10 # set new state\n print(\"CHANGED state of 127.0.0.1:{0} to {1}\".format(self.port, self.server.state))\n\n elif self.server.state != previous_state:\n self.server.state = previous_state # set new state\n print(\"CHANGED state of 127.0.0.1:{0} to {1}\"\n .format(self.port, self.server.state))", "title": "" }, { "docid": "489744364b25f5812cbf8e82b8dfa357", "score": "0.6388774", "text": "def __eq__(self, other: Any) -> bool:\n return type(self) == type(other) and self.current_player == \\\n other.current_player and self.p2_right == other.p2_right \\\n and self.p2_left == other.p2_left and self.p1_right == \\\n other.p1_right and self.p1_left == other.p1_left and \\\n self.move == other.move", "title": "" }, { "docid": "583e44571527d0682e3f4f3965ea81ae", "score": "0.637123", "text": "def __eq__(self, other: Any) -> bool:\n return type(self) == type(other) and self.current_player == other.\\\n current_player and self.current_value == other.current_value and \\\n self.move == other.move and self.current_value == \\\n other.current_value", "title": "" }, { "docid": "0674d730fe709b3ef927dc358119a456", "score": "0.63217026", "text": "def test_set_state(game_state: GameState, _):\n state.set_new(game_state)\n assert state.get_current() == game_state\n state.set_new(GameState.GAME_MAP)\n assert state.get_previous() == game_state", "title": "" }, { "docid": "7e8df8ac3c8e5a249bc4883ac4e1f92e", "score": "0.6292796", "text": "def __eq__(self, other):\n #return ((self.grid_ == other.grid_) | (np.isnan(self.grid_) & np.isnan(other.grid_))).all()\n return np.all(self.grid_ == other.grid_) and self.player_turn == other.player_turn", "title": "" }, { "docid": "7ded898c79af15d209364e1c83c3743a", "score": "0.62737024", "text": "def __eq__(self, other):\n if isinstance(other, State):\n return self.value == other.value\n elif other==None:\n return False\n return NotImplemented", "title": "" }, { "docid": "fa1b4f245a1030a43b3f3c76f39ca305", "score": "0.6244646", "text": "def test_id_state(self):\n state1_id = State()\n state2_id = State()\n self.assertNotEqual(state1_id, state2_id)", "title": "" }, { "docid": "0abd69db76913e6111d3aca1b0afb18b", "score": "0.6210794", "text": "def goal_test(self, state):\r\n\r\n return state == self.goal", "title": "" }, { "docid": "0abd69db76913e6111d3aca1b0afb18b", "score": "0.6210794", "text": "def goal_test(self, state):\r\n\r\n return state == self.goal", "title": "" }, { "docid": "3697fa355c51fd17ac68e2d082ff317e", "score": "0.61933225", "text": "def player_2_state(self):\n return self.playerstate_set.get(is_first=False)", "title": "" }, { "docid": "649b8ca3cef8319b3e8706da80d738c2", "score": "0.6186337", "text": "def is_game_over(self) -> bool:", "title": "" }, { "docid": "8b94547fa6083dae33983e04231720bc", "score": "0.6175503", "text": "def game_state(self):\n return self._game_session_state & int('00001111', 2)", "title": "" }, { "docid": "712b78dd4f101d44fd8fb1eadd9a441c", "score": "0.6164713", "text": "def compare_cards(self):\n if (self.myCurrent > self.otherCurrent and self.mybooCheck() != True):\n self.currentState = 1\n self.moveMyToLoot()\n self.moveOtherToLoot()\n self.move_my_loot()\n print \"Player 1 takes the win\"\n elif (self.myCurrent < self.otherCurrent and self.mybooCheck() != True):\n self.currentState = 2\n self.moveMyToLoot()\n self.moveOtherToLoot()\n print \"Player 2 takes the win\"\n self.move_other_loot()\n elif (self.myCurrent == self.otherCurrent and self.mybooCheck() != True):\n print \"We are in a state of war\" \n self.moveMyToLoot()\n self.moveOtherToLoot()\n self.moveMyToLoot()\n self.moveOtherToLoot()\n self.currentState = 3\n \n if self.currentState == 5:\n self.move_other_loot()\n \n elif self.currentState == 4:\n self.move_my_loot()", "title": "" }, { "docid": "846fef98f5bc086a9210afa8b7264500", "score": "0.6147761", "text": "def test_goal(puzzle_state):\n ### S HERE ###\n goal_config = (0,1,2,3,4,5,6,7,8)\n #logging.info(type(puzzle_state.config))\n #logging.info(type(goal_state))\n #logging.info(puzzle_state.config)\n #logging.info(goal_config)\n if puzzle_state.config == goal_config:\n #logging.info('Result:'+ str(True))\n return True\n else:\n #logging.info('Result:'+ str(False))\n return False", "title": "" }, { "docid": "8192876692d19ea1e9a35f79fc70176f", "score": "0.6129457", "text": "def goal_test(self, state):\n return state == self.goal", "title": "" }, { "docid": "803990a752062eb2c04d292519988329", "score": "0.6126664", "text": "def __eq__(self, other):\n if not isinstance(other, SoccerProjectionsGame):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "0d44612f7026021daa75e7d84fa1311c", "score": "0.6125373", "text": "def statesEqual(S1,S2):\n\t\n\tn = len(S1)\n\tfor i in range(n):\n\t\tfor j in range(n):\n\t\t\tif(S1[i][j] != S2[i][j]):\n\t\t\t\treturn False\n\treturn True", "title": "" }, { "docid": "971b533529b5f594280913a4ec6e690d", "score": "0.61105543", "text": "def __eq__(self, other):\n if not isinstance(other, IntegrationStateV2):\n return False\n\n return self.to_dict() == other.to_dict()", "title": "" }, { "docid": "394a2bdc41950e358bc887127c06e08b", "score": "0.6103352", "text": "def game_over(self):\n\t\tself.possible_next_states = {}\n\t\ttempState = np.fromstring(self.actualState, np.int8) - 48\n\t\ttempState = tempState.reshape((3, 3))\n\t\t\n\n\t\tif ((tempState == 1).all(0).any() or \n\t\t\t(tempState == 1).all(1).any() or \n\t\t\t(tempState == 1).diagonal().all() or \n\t\t\tnp.rot90((tempState == 1)).diagonal().all()):\n\t\t\tself.actualState = \"000000000\"\n\t\t\treturn 1\n\t\telif ((tempState == 2).all(0).any() or \n\t\t\t(tempState == 2).all(1).any() or \n\t\t\t(tempState == 2).diagonal().all() or \n\t\t\tnp.rot90((tempState == 2)).diagonal().all()):\t\n\t\t\tself.actualState = \"000000000\"\n\t\t\treturn 2\n\t\telif tempState.all():\n\t\t\tself.actualState = \"000000000\"\n\t\t\treturn 3\t\n\t\t\n\t\telse:\n\t\t\treturn 0", "title": "" }, { "docid": "d7e90722bcb1ac891c21d93202364bd7", "score": "0.60897875", "text": "def __eq__(self, other: Any) -> bool:\n return type(self) == type(other) and self.is_p1_turn == other.is_p1_turn", "title": "" }, { "docid": "721560e8b1ce802da4469a7e6fed0956", "score": "0.6085931", "text": "def _color_mode_same(cur_state: State, state: State) -> bool:\n cur_color_mode = cur_state.attributes.get(ATTR_COLOR_MODE, ColorMode.UNKNOWN)\n saved_color_mode = state.attributes.get(ATTR_COLOR_MODE, ColorMode.UNKNOWN)\n\n # Guard for scenes etc. which where created before color modes were introduced\n if saved_color_mode == ColorMode.UNKNOWN:\n return True\n return cast(bool, cur_color_mode == saved_color_mode)", "title": "" }, { "docid": "8afc8a45394abd5b52b32ac73dc4459b", "score": "0.6081131", "text": "def __eq__(self, other) -> bool:\r\n\r\n return type(self) == type(other) and self.player_one_turn \\\r\n == other.player_one_turn and self.fingers == other.fingers", "title": "" }, { "docid": "a9bcaadf96870a56a27cd308d23eb755", "score": "0.6067648", "text": "def test_sameEq(self):\r\n a = self.anInstance()\r\n b = self.anInstance()\r\n self.assertTrue(a == b)", "title": "" }, { "docid": "4af2b0cea9c5514b396325ab10965384", "score": "0.6059104", "text": "def __eq__(self, other):\n if not isinstance(other, LogicalPortState):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "836dd17726bff1a0b500f987b41ee403", "score": "0.60588586", "text": "def game_over(self, state):\n\n top = [(0, i) for i in range(self.layers)]\n bottom = [(self.layers-1, i) for i in range(self.layers)]\n if self.winning(1, top, bottom, state):\n return True\n else:\n top = [(i, 0) for i in range(self.layers)]\n bottom = [(i, self.layers - 1) for i in range(self.layers)]\n\n return True if self.winning(2, top, bottom, state) else False", "title": "" }, { "docid": "419fcb79b672d829233729bc2dbff645", "score": "0.60559946", "text": "def is_state(self, state):\n return self.current_state.name == state", "title": "" }, { "docid": "4f8f270439a8b3044ea4cd0f0eff91b2", "score": "0.60519", "text": "def state(self) -> bool:\n self.update()\n return self._content[f\"VO{self.id}\"] == 1", "title": "" }, { "docid": "ff65712f1cbc11a6f52c243b080dde4f", "score": "0.60290426", "text": "def is_goal_test(self):\n return self.state == ((0, 1, 2), (3, 4, 5), (6, 7, 8))", "title": "" }, { "docid": "dbf7d1790afaed6a8c50ae065ca775b7", "score": "0.60289806", "text": "def same_game(self, other_game):\r\n\r\n match1 = self.source_match\r\n match2 = other_game.source_match\r\n\r\n print(match1, match2)\r\n\r\n if self.is_package != other_game.is_package:\r\n return False\r\n if self.is_in_package != other_game.is_in_package:\r\n return False\r\n if (\r\n self.is_in_package\r\n and other_game.is_package\r\n or self.is_package\r\n and other_game.is_in_package\r\n ):\r\n return False\r\n if self.is_in_package:\r\n if (\r\n self.package_data[\"source_info\"]\r\n != other_game.package_data[\"source_info\"]\r\n ):\r\n return False\r\n if match1 or match2:\r\n if match1 == match2:\r\n return True\r\n return False\r\n if self.get_path() or other_game.get_path():\r\n if self.get_path() == other_game.get_path():\r\n return True\r\n return False\r\n if self.name_stripped == other_game.name_stripped:\r\n return True\r\n return False", "title": "" }, { "docid": "446a18145fa86d055fe6afdd8450268b", "score": "0.60271144", "text": "def __eq__(self, other):\n if not isinstance(other, AgentStateResponseAgentState):\n return False\n\n return self.to_dict() == other.to_dict()", "title": "" }, { "docid": "144587d58798c5bd07b5b67d54b10187", "score": "0.60089594", "text": "def test_equal(self):\n settings0 = TransmonSettings(connections0, omega0, g0, [0.0] * len(omega0))\n settings1 = TransmonSettings(connections1, omega1, g1, [0.0] * len(omega1))\n settings2 = TransmonSettings(connections0, omega0, g0, [0.0] * len(omega0))\n assert settings0 != settings1\n assert settings1 != settings2\n assert settings0 == settings2", "title": "" }, { "docid": "1b5650baaa53cd0fe869a624d7e3968d", "score": "0.6006109", "text": "def winner(self, state_history):\n pass", "title": "" }, { "docid": "894a086e9598a3d03db21474186e7d8a", "score": "0.6000413", "text": "def equals(self, experience):\n if np.array_equal(self.state, experience.state)\\\n and np.array_equal(self.action, experience.action)\\\n and self.reward == experience.reward\\\n and np.array_equal(self.nextState, experience.nextState):\n return True\n return False", "title": "" }, { "docid": "bccb7da12553e04367529435df798117", "score": "0.5995961", "text": "def __eq__(self, other) -> bool:\r\n\r\n return type(self) == type(other) and self.fingers == other.fingers \\\r\n and self.player_one_turn == other.player_one_turn", "title": "" }, { "docid": "b32c39bd0495e6125999be248d9bc833", "score": "0.5926134", "text": "def goal_test(self, state):\n return state == self.goal", "title": "" }, { "docid": "25c37c67ef5314b5def6b848e53f4f7d", "score": "0.59250736", "text": "def play_turn(self, state: Dict) -> bool:\n if self._temp_state is None:\n self._temp_state = state\n \n # if the states differ, then the human made a turn so reset the \n # temp variable and return true\n if state != self._temp_state:\n self._temp_state = None\n return True\n \n return False", "title": "" }, { "docid": "0a92918279fa002862bfc0538c1ba00a", "score": "0.5923198", "text": "def isstate(self, state):\r\n if len(self):\r\n return self[-1] == state\r\n else:\r\n return False", "title": "" }, { "docid": "cc2ec49182a5d195260471e91bf2184d", "score": "0.59106183", "text": "def goal_test(self, state)->bool:\n return self.goal_state == state", "title": "" }, { "docid": "a3d4c0afe41ab1b1123192130f138f2c", "score": "0.5900387", "text": "def game_over(state):\r\n return get_userwin(state) and get_compwin(state)", "title": "" }, { "docid": "838458a572657e0b8f402355bdc8fed4", "score": "0.5885111", "text": "def __eq__(self, other: Any) -> bool:\n return type(self) == type(other) and self.number == other.number\\\n and self.player == other.player", "title": "" }, { "docid": "0ce98d5b9c457e16c353b7b8957d1aa0", "score": "0.5881408", "text": "def _get_game_state(self):\n if self.server_socket.recv_by_size() != \"ALLIES\":\n return False\n lst_allies = pickle.loads(self.server_socket.recv_by_size())\n if self.server_socket.recv_by_size() != \"ENEMIES\":\n return False\n lst_enemies = pickle.loads(self.server_socket.recv_by_size())\n Game._fix_team_image(lst_allies, Game.ally_team) # Since pygame.surface objects cannot be pickled,\n Game._fix_team_image(lst_enemies, Game.enemy_team) # Character object are sent without image.\n self.allies = pygame.sprite.OrderedUpdates(*lst_allies)\n self.enemies = pygame.sprite.OrderedUpdates(*lst_enemies)\n if self.server_socket.recv_by_size() != \"HERO POS\":\n return False\n try:\n self.hero = self.allies.sprites()[int(self.server_socket.recv_by_size())]\n except ValueError:\n return False\n return True", "title": "" }, { "docid": "58cb0b7e9154177499daee4fb15c0537", "score": "0.5879023", "text": "def __eq__(self, other):\n if not isinstance(other, CfbOddsBettingEvent):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "8581d9ec7834d473da7ade6eb37c1d74", "score": "0.5872926", "text": "def state(self) -> bool:\n self.update()\n return self._content[f\"VI{self.id}\"] == 1", "title": "" }, { "docid": "3706d25eec8b8c74876274d345c31e48", "score": "0.5868815", "text": "def get_game_state(self):\n for i in range(self.dimx):\n row_win = True\n for j in range(1, self.dimy):\n if self.grid[i][j] != self.grid[0][i]:\n row_win = False\n if row_win:\n return 'WIN'\n \n \n for j in range(self.dimy):\n row_win = True\n for i in range(1, self.dimx):\n if self.grid[i][j] != self.grid[j][0]:\n row_win = False\n if row_win:\n return 'WIN' \n \n diag_win = True\n\n for i in range(self.dimx):\n if self.grid[i][i] != self.grid[0][0]:\n diag_win = False\n if self.grid[i][self.dimx-1-i] != self.grid[self.dimx][0]:\n diag_win = False\n if diag_win:\n return 'WIN'\n \n \n for col in self.grid:\n for value in col:\n if col == '':\n return 'PLAYING'\n \n return 'DRAW'", "title": "" }, { "docid": "66702b42ab78a4d85c4c066841c35462", "score": "0.58671176", "text": "def __eq__(self, s2):\n for prop in ['load','Farmer']:\n if self.d[prop] != s2.d[prop]: \n return False\n return True", "title": "" }, { "docid": "15669d83716455c8f4dd4cc7b26b78be", "score": "0.5865601", "text": "def test_update_game_phase(self):\n assert self.game.phase == Phase.PRE_GAME\n self.game.update_game_phase(Phase.GAME)\n assert self.game.phase == Phase.GAME", "title": "" }, { "docid": "f8e4cb7c76c9776861da98edeadc36b6", "score": "0.5862838", "text": "def compare(self, state: CTCDecoderLMState) -> CTCDecoderLMState:\n pass", "title": "" }, { "docid": "6d3df3a43de19f55affd96baa60b1abd", "score": "0.58620733", "text": "async def test_multiple_same_state(hass: HomeAssistant) -> None:\n for entity in ENTITY_1, ENTITY_2:\n hass.states.async_set(\n entity,\n \"something\",\n {ATTR_SUPPORTED_FEATURES: MediaPlayerEntityFeature.TURN_ON},\n )\n\n calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)\n\n await async_reproduce_states(hass, [State(ENTITY_1, \"on\"), State(ENTITY_2, \"on\")])\n\n await hass.async_block_till_done()\n\n assert len(calls_1) == 2\n # order is not guaranteed\n assert any(call.data == {\"entity_id\": \"media_player.test1\"} for call in calls_1)\n assert any(call.data == {\"entity_id\": \"media_player.test2\"} for call in calls_1)", "title": "" }, { "docid": "9676c5be318c904b3204ee0e430b14e4", "score": "0.5859385", "text": "def __eq__(self, other):\n return (\n self.setup == other.setup and\n self.punchline == other.punchline\n )", "title": "" }, { "docid": "b35270e4dfcf9ad38cdac0c4663b7ff4", "score": "0.5858948", "text": "def __eq__(self, a):\n if self.num_actions != a.num_actions:\n return False\n if self.act != a.act:\n return False\n return True", "title": "" }, { "docid": "b4d7f27462ca5407b54f4ad2b218bc94", "score": "0.58411914", "text": "def checkGameStatus(state):\n # Hard code this bit\n winningIndices = [(0, 1, 2),\n (3, 4, 5),\n (6, 7, 8)\n #6. add the rest of the winning conditions\n ]\n\n gameContinues = True\n for wI in winningIndices:\n if np.all(np.take(state, wI) == 1):\n gameContinues = False\n print(\"Player 1 wins!\")\n #7. Add a win condition for player 2.\n #8. Check if all of the moves have been taken.\n gameContinues = False\n print(\"Out of Moves!\")\n return gameContinues", "title": "" }, { "docid": "e8f61d552cbed8851f28b531d65196b1", "score": "0.5839595", "text": "def test_changing_server_logic2(self):\n game = Tgame(False)\n for player0_won_point in [True, True, True, True]:\n game.play_point(player0_won_point)\n\n assert game.game_winner == 0", "title": "" }, { "docid": "5a95315af70d5ad0510b313d58983914", "score": "0.58258504", "text": "def goal_test(self, state):\n checkstate = list(state);\n i_player = state.index('@');\n checkstate[i_player] = ' ';\n print list(state)\n print checkstate\n print self.goal\n return tuple(checkstate) == self.goal", "title": "" }, { "docid": "ebc9e984da5c941061bbef18ea27dbb1", "score": "0.5824322", "text": "def test_identicalEq(self):\r\n o = self.anInstance()\r\n self.assertTrue(o == o)", "title": "" }, { "docid": "18f1312730131cab98320e52b72bc7a8", "score": "0.58231485", "text": "def testEquality( self ) :\n\n\t\tself.assertEqual( IECore.TimeCode( 1, 2, 3, 4 ), IECore.TimeCode( 1, 2, 3, 4 ) )\n\t\tself.assertNotEqual( IECore.TimeCode( 1, 2, 3, 4 ), IECore.TimeCode( 4, 3, 2, 1 ) )\n\n\t\tself.assertEqual( IECore.TimeCode( 1, 2, 3, 4, fieldPhase = True ), IECore.TimeCode( 1, 2, 3, 4, False, False, True ) )\n\n\t\tt = IECore.TimeCode( hours = 12, minutes = 24, seconds = 12, frame = 15, dropFrame = True, bgf1 = True, binaryGroup6 = 12 )\n\n\t\tself.assertEqual( t, IECore.TimeCode( t.timeAndFlags(), t.userData() ) )\n\n\t\tself.assertNotEqual( t, IECore.TimeCode( t.timeAndFlags() ) )\n\t\tself.assertNotEqual( t, IECore.TimeCode( t.timeAndFlags( IECore.TimeCode.Packing.FILM24 ) ) )\n\t\tself.assertNotEqual( t, IECore.TimeCode( t.timeAndFlags( IECore.TimeCode.Packing.FILM24 ), t.userData() ) )\n\n\t\ttt = IECore.TimeCode( t.timeAndFlags( IECore.TimeCode.Packing.FILM24 ), t.userData(), IECore.TimeCode.Packing.FILM24 )\n\t\tself.assertNotEqual( t.dropFrame(), tt.dropFrame() )\n\t\ttt.setDropFrame( True )\n\t\tself.assertEqual( t, tt )", "title": "" }, { "docid": "bff9aa51ded16e287c50d010da4f6346", "score": "0.5819287", "text": "def CompareWithPreviousStateRed(self):\n self.previous_state = self.wg_values\n self.GetAppliedState()\n self.new_wg_values = self.wg_values", "title": "" }, { "docid": "fb14670ca945f1f432eb16094edcefd6", "score": "0.5816959", "text": "def event_m10_19_x37():\n \"\"\"State 0,1: Are you in the map?\"\"\"\n IsPlayerInTheMap(0, 1, 0)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0", "title": "" }, { "docid": "6461652d28f9f6de7d07c704045a9b12", "score": "0.5816702", "text": "def game_status(self):\n left = self.state['score']['left']\n right = self.state['score']['right']\n self.state['game_over'] = left == self.max_score or right == self.max_score", "title": "" }, { "docid": "bd9d815747963d3d9291d9e79df09c76", "score": "0.5807431", "text": "def are_equal_predictor_states(self, states1, states2):\n return self.predictor.is_equal(states1, states2)", "title": "" }, { "docid": "1e0a3b4a9d58dc2cfa0d55a548fffe17", "score": "0.58072436", "text": "def __ne__(self, other):\n return np.any(self.grid_ != other.grid_) or self.player_turn != other.player_turn", "title": "" }, { "docid": "15bc9483bd75ab010b3571877a597e4c", "score": "0.5799072", "text": "def __eq__(self, other):\n return isinstance(other, self.__class__) and self._gate == other._gate", "title": "" }, { "docid": "de3a61e4d2bda78ed33eb295aac6ec7a", "score": "0.57949173", "text": "def test_next_state(self):\n\n self.rnn.reset_network(h=np.ones(self.rnn.n_h))\n self.rnn.next_state(x=np.zeros(self.rnn.n_in))\n #Calculate the correct next state\n a_prev = np.array([np.tanh(1)]*self.rnn.n_h)\n a = ((1 - self.rnn.alpha)*a_prev +\n self.rnn.alpha*np.tanh(a_prev + np.ones(self.rnn.n_h)))\n #Compare with update from next_state\n self.assertTrue(np.isclose(self.rnn.a, a).all())", "title": "" }, { "docid": "81d766adeec67f30a768f3ea95b2328f", "score": "0.57943416", "text": "def __eq__(self, other):\n return isinstance(other, self.__class__) and self._gate == other._gate and self._n == other._n", "title": "" }, { "docid": "23b02ec2bc8da4aa1f98c79e8c137ce8", "score": "0.5792951", "text": "def gamestate(p):\n global state, init\n if p == 'game':\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n keyaction(event.key)\n show_ui()\n pygame.display.flip()\n if success():\n init = False\n state = 'win'\n if fail():\n init = False\n state = 'fail'\n\n if state == 'win':\n success_screen()\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sys.exit()\n if event.key == pygame.K_n:\n newgame()\n init = True\n\n if state == 'fail':\n fail_screen()\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sys.exit()\n if event.key == pygame.K_n:\n newgame()\n init = True", "title": "" }, { "docid": "8739ae9b54028b6566e2e6403b01f4ef", "score": "0.5791755", "text": "def test_changing_server_logic4(self):\n game = Tgame(False)\n for player0_won_point in [False, False, False, False]:\n game.play_point(player0_won_point)\n\n assert game.game_winner == 1", "title": "" }, { "docid": "88b0ed4a314e94adc60770c58824c2e2", "score": "0.57867837", "text": "def goal_test(self, state):\n\n return state == self.goal", "title": "" }, { "docid": "6cdeeb65261ed14093fe641274ab7041", "score": "0.57787025", "text": "def stateEqual3D(robot, xposAim =[0, 0], yposAim = [0, 0], zposAim = [0, 0 ], xspeedAim = [0, 0], yspeedAim= [0, 0], zspeedAim= [0,0], tolerance = 1e-3):\n\tpos,speed, t = robot.getStateParameters()\n\treturn all (( np.allclose(pos.x, xposAim,atol = tolerance),\n\t\tnp.allclose(pos.y, yposAim, atol = tolerance),\n\t\tnp.allclose(pos.z, zposAim, atol = tolerance),\n\t\tnp.allclose(speed.x, xspeedAim, atol = tolerance),\n\t\tnp.allclose(speed.y, yspeedAim, atol = tolerance),\n\t\tnp.allclose(speed.z, zspeedAim, atol = tolerance)))", "title": "" }, { "docid": "ddcd748775b7763bed39b0a09d50b97d", "score": "0.57693714", "text": "def compare(self,program): \r\n\r\n if len(self.statements) == 1:\r\n \r\n result = True\r\n \r\n switches = list(self.internals)\r\n lights = list(self.statements)\r\n \r\n for state in range(2 ** len(switches)):\r\n \r\n temp_state = state\r\n \r\n for input in switches:\r\n x = temp_state % 2 \r\n temp_state //= 2\r\n if x:\r\n input.set(True)\r\n else:\r\n input.set(False)\r\n \r\n evaluation = []\r\n \r\n statement = lights[0]\r\n \r\n evaluation = statement.evaluate()\r\n \r\n result = result and (evaluation == program[state])\r\n \r\n return result\r\n \r\n else:\r\n \r\n return None", "title": "" }, { "docid": "9d1b2d3f2638f89c4e56708c5a8f5391", "score": "0.57595253", "text": "def __eq__(self, other):\n return self.action == other.action", "title": "" }, { "docid": "6fb3e869a3b7c76246e90c7c6c954100", "score": "0.5757808", "text": "def state(self):", "title": "" }, { "docid": "c15c523b59f1b084e0c266ae24efd660", "score": "0.57566005", "text": "def solveOneStep(self):\n ### Student code goes here\n #print('\\n\\n')\n #print(self.gm.getGameState())\n #print(self.victoryCondition)\n if self.gm.getGameState() == self.victoryCondition:\n return True\n #print(self.statelist)\n if self.statelist == []:\n self.initialization()\n\n #print('-----begin-------')\n #print(self.statelist[0])\n self.gm.makeMove(self.statelist[0])\n del self.statelist[0]\n #print(self.gm.getGameState())\n #for stats in self.statelist:\n #print(stats)\n #print('------end-------')\n\n #self.currentState = self.gm.getGameState()\n\n\n\n\n if self.gm.getGameState() == self.victoryCondition:\n return True\n #print('\\n')\n return False", "title": "" }, { "docid": "d6f82f85b071c52deebacde97dcab3bb", "score": "0.57529557", "text": "def __eq__(self, other):\n if not isinstance(other, PowerStatus):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" } ]
1d12fdf6c5c44b325b63ded3f3b4277a
View for making comments disabled
[ { "docid": "015b6ffa6f8930b9340a381c1851d352", "score": "0.66315573", "text": "def moderate_disable(id):\n comment = Comment.query.get_or_404(id)\n comment.disabled = True\n db.session.add(comment)\n db.session.commit()\n return redirect(url_for('.moderate', page=request.args.get('page', 1, type=int)))", "title": "" } ]
[ { "docid": "d1edd7aa6f5051596a7b655d90922d2e", "score": "0.6276129", "text": "def _commentingAllowed(self):\n return not self.data.timeline.allWorkStopped() or (\n not self.data.timeline.allReviewsStopped() and\n self.data.mentorFor(self.data.task.org))", "title": "" }, { "docid": "83bcdafc9bcd0e2c0e85f9b1bc77bfc6", "score": "0.6207698", "text": "def moderate_enable(id):\n comment = Comment.query.get_or_404(id)\n comment.disabled = False\n db.session.add(comment)\n db.session.commit()\n return redirect(url_for('.moderate', page=request.args.get('page', 1, type=int)))", "title": "" }, { "docid": "58e2bec2a5c1d0ce9086028d5e674854", "score": "0.6133177", "text": "def disabledEdit(self):\n _ = self.request.getText\n return ('<span class=\"disabled\">%s</span>'\n % _('Immutable Page'))", "title": "" }, { "docid": "78f1ea1680c87531ccf5e3d56648b2fa", "score": "0.5938414", "text": "def view_comments(self):\n return self.comments", "title": "" }, { "docid": "3652ac331dfff54441e08f23e0586e5f", "score": "0.59275854", "text": "def disabled(func):\n\n\[email protected](func)\n\tdef emptyFunc(*args, **kargs):\n\t\tpass\n\t_append_docstring(emptyFunc, \"\\n@note Temporarily Disabled\")\n\treturn emptyFunc", "title": "" }, { "docid": "c93549d29b5a5704536167477077b775", "score": "0.5903741", "text": "def comments(self,table=None,moderated=False,readable=True,writable=True,deletable=False,hide=True):\n request,response,session,cache,T,db=self._globals()\n id=self.id or 0\n if table: table_name=str(table)\n else: table_name=request.env.path_info\n ctable=db.t2_comment\n labels=self._get_labels(ctable)\n form=SQLFORM(ctable,fields=ctable.exposes,labels=labels,\n _id='t2_comment_form') \n self._stamp(ctable,form,create=True)\n form.vars.table_name=table_name\n form.vars.record_id=id\n form.vars.status='pending' if moderated else 'approved'\n if form.accepts(request.vars,session):\n session.flash='Comment posted'\n self.redirect(args=request.args) \n if not self.is_gae:\n rows=db(ctable.table_name==form.vars.table_name)\\\n (ctable.status=='approved')\\\n (ctable.record_id==id)\\\n .select(orderby=ctable.parent_record|ctable.created_on)\n else:\n rows=[r for r in db(ctable.table_name==form.vars.table_name)\\\n (ctable.status=='approved')\\\n (ctable.record_id==id)\\\n .select()]\n rows.sort(lambda a,b: +1 if (a.parent_record,a.created_on)>(b.parent_record,b.created_on) else -1)\n ### FIX - REWITE THE LAST TWO LINES\n def format(row):\n return DIV('User %(created_signature)s on %(created_on)s says:' \\\n %row,WIKI(row.body))\n if not writable:\n def navbar(id,name='reply'):\n return SPAN('[',\n A('toggle',_onclick='$(\"#t2_comment_%i\").slideToggle();$(\"#t2_comment_form\").hide()'%id),']')\n else:\n def navbar(id,name='reply'):\n return SPAN('[',\n A('toggle',_onclick='$(\"#t2_comment_%i\").slideToggle();$(\"#t2_comment_form\").hide()'%id),'][',\n A(name,_onclick='$(\"#t2_comment_parent_record\").val(%i);$(\"#t2_comment_form\").show();$(\"#t2_comment_body\").focus();' % id),']')\n divs={0:DIV(navbar(0,'post'),'',UL(_class='comments',_id='t2_comment_0'))}\n for comment in rows:\n item=LI(format(comment),navbar(comment.id),\n UL(_class='comments',_id='t2_comment_%i'%comment.id))\n if not divs.has_key(comment.parent_record): continue ### BROKEN\n divs[comment.parent_record][2].append(item)\n divs[comment.id]=item \n script=SCRIPT('$(\"#t2_comment_form\").hide();')\n if hide: script[0]=script[0]+'$(\"#t2_comment_0\").hide();' \n form[0][0]['_style']='visibility: hidden'\n if not rows: divs[0]=self.messages.no_comments\n if not readable: div[0]=self.messages.no_visible_comments\n if not writable: return DIV(divs[0],script,_class='t2-comments')\n else: return DIV(divs[0],form,_class='t2-comments')", "title": "" }, { "docid": "305ebecaa60e86d706026d62e2b2524a", "score": "0.58867145", "text": "def perform_dislike(request, comment):\n flag, created = TreeCommentFlag.objects.get_or_create(comment=comment,\n user=request.user,\n flag=DISLIKEDIT_FLAG)\n if created:\n TreeCommentFlag.objects.filter(comment=comment,\n user=request.user,\n flag=LIKEDIT_FLAG).delete()\n else:\n flag.delete()\n\n signals.comment_feedback_toggled.send(\n sender=flag.__class__,\n flag=flag,\n comment=comment,\n created=created,\n request=request,\n )\n return created", "title": "" }, { "docid": "aad3813b41ec0f93eccaa90ea0c98b4c", "score": "0.58836746", "text": "def random_comment_exempt(view_func):\n def wrapped_view(*args, **kwargs):\n response = view_func(*args, **kwargs)\n response._random_comment_exempt = True\n return response\n return wraps(view_func)(wrapped_view)", "title": "" }, { "docid": "2b44b7796333e0f0065c93f8cc004cba", "score": "0.57626325", "text": "def getReadOnlyView(contact):", "title": "" }, { "docid": "a4bab0d45cae68605e42b7d089765fd1", "score": "0.57512224", "text": "def comment_fields(self):", "title": "" }, { "docid": "c25eab3426cd1e04dd07b9bf3df3e0f8", "score": "0.57470614", "text": "def is_make_viewer_no_comment(self):\n return self._tag == 'make_viewer_no_comment'", "title": "" }, { "docid": "e5dd2d5834580cd7b5af2cb1153cbb00", "score": "0.5684205", "text": "def disallow_discussion(self, request, queryset):\n queryset.update(allow_discussion=False)\n self.message_user(request, \"Document(s) no longer allow discussion.\")", "title": "" }, { "docid": "7b9a0850da0f7a3eb089e992adab035f", "score": "0.56442916", "text": "def test_can_not_add_comments_if_discussion_is_not_allowed(self):\n\n # Disable discussion\n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IDiscussionSettings)\n settings.globally_enabled = False\n\n def make_request(form={}):\n request = TestRequest()\n request.form.update(form)\n alsoProvides(request, IFormLayer)\n alsoProvides(request, IAttributeAnnotatable)\n return request\n\n provideAdapter(\n adapts=(Interface, IBrowserRequest),\n provides=Interface,\n factory=CommentForm,\n name=\"comment-form\",\n )\n\n request = make_request(form={\"form.widgets.text\": \"bar\"})\n\n commentForm = getMultiAdapter(\n (self.context, request),\n name=\"comment-form\",\n )\n commentForm.update()\n data, errors = commentForm.extractData() # pylint: disable-msg=W0612\n\n # No form errors, but raise unauthorized because discussion is not\n # allowed\n self.assertEqual(len(errors), 0)\n\n self.assertRaises(Unauthorized, commentForm.handleComment, commentForm, \"foo\")", "title": "" }, { "docid": "6b24106f4417fe8b0861cca739849917", "score": "0.56360376", "text": "def test_anonymous_can_not_add_comments_if_discussion_is_not_allowed(self):\n\n # Anonymous comments are disabled by default\n\n logout()\n\n def make_request(form={}):\n request = TestRequest()\n request.form.update(form)\n alsoProvides(request, IFormLayer)\n alsoProvides(request, IAttributeAnnotatable)\n return request\n\n provideAdapter(\n adapts=(Interface, IBrowserRequest),\n provides=Interface,\n factory=CommentForm,\n name=\"comment-form\",\n )\n\n request = make_request(form={\"form.widgets.text\": \"bar\"})\n\n commentForm = getMultiAdapter((self.context, request), name=\"comment-form\")\n commentForm.update()\n data, errors = commentForm.extractData() # pylint: disable-msg=W0612\n\n self.assertEqual(len(errors), 0)\n self.assertRaises(\n Unauthorized,\n commentForm.handleComment,\n commentForm,\n \"foo\",\n )", "title": "" }, { "docid": "9803fe5919d14fcecf1f1d99e79e0786", "score": "0.56034935", "text": "def get_approved_comments(self): \n return self.get_comments().filter(approved=True)", "title": "" }, { "docid": "c910ad676728f824c13b8398c251201c", "score": "0.56003", "text": "def is_disabled(self) -> bool:", "title": "" }, { "docid": "c45b941f85aa5f34ae0cbf6eb088c5a5", "score": "0.55939287", "text": "def moderate():\n page = request.args.get('page', 1, type=int)\n pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(\n page,\n per_page=current_app.config['YURAIST_POSTS_PER_PAGE'],\n error_out=False)\n comments = pagination.items\n\n return render_template('moderate.html',\n comments=comments,\n pagination=pagination,\n page=page)", "title": "" }, { "docid": "73d5aa042bf89fc9a95f586e643eaad4", "score": "0.5557544", "text": "def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n if request.user.has_perm('blog.add_comments'):\n form.base_fields['user'].initial = request.user\n disabled_fields = ('user',)\n for item in disabled_fields:\n if item in form.base_fields:\n form.base_fields[item].disabled = True\n form.base_fields[item].widget = forms.HiddenInput()\n\n return form", "title": "" }, { "docid": "59b99e0273d724db477e81fda4320b7a", "score": "0.5508342", "text": "def is_disabled(self):\n return False", "title": "" }, { "docid": "ff1abd2b8b8b40acb2dffa110f857bb0", "score": "0.5505273", "text": "def get_queryset(self):\n # return self.filter(private=False)\n return super(CommentManager, self).get_queryset().filter(private=False)", "title": "" }, { "docid": "50182ee1139b005061638188b4f6fc72", "score": "0.54932815", "text": "def comment(self, comment):", "title": "" }, { "docid": "ae1a617df940789ebc33f2c2920e4148", "score": "0.5450886", "text": "def test_general_comments(self):\n self._test_general_comments(user_is_owner=False)", "title": "" }, { "docid": "cdba48e0ac9293192003c71fc496f384", "score": "0.54341173", "text": "def question_10_set_approved_to_false(comment):\n comment.refresh_from_db()\n return comment.approved is False", "title": "" }, { "docid": "2213a55c1f6265bb20be84653b96ad53", "score": "0.5432087", "text": "def __bool__(self):\n return bool(self.comments)", "title": "" }, { "docid": "10d778ab5b625cd080ce564713e942d1", "score": "0.5411794", "text": "def disable_view(self, request, id):\n try:\n obj = self.get_object(request, id)\n obj.disable(request.user.username)\n return self.response_view(request, True, 'disable_view', 'Disable Success %d' % obj.id)\n except Exception as e:\n return self.response_view(request, False, 'disable_view', 'Error: {}'.format(str(e)))", "title": "" }, { "docid": "2f06b17d82b8d4e213cee6294926d981", "score": "0.5402131", "text": "def Disable(self):\n ...", "title": "" }, { "docid": "e74bc63704e23a30da55d6cc966ea89d", "score": "0.5396451", "text": "def comment_unlike(self, comment_pk: int) -> bool:\n return self.comment_like(comment_pk, revert=True)", "title": "" }, { "docid": "3e1569ffa2acc1529be0cce01e3acac6", "score": "0.5391986", "text": "def is_viewer_no_comment(self):\n return self._tag == 'viewer_no_comment'", "title": "" }, { "docid": "bd1380611b06c20bca68c98a76a84946", "score": "0.5381551", "text": "def get_queryset(self) -> QuerySet:\n return super().get_queryset().filter(kind=UserRelItemKind.comment)", "title": "" }, { "docid": "1dac3c00a24f36975f8f1bc0101b44b6", "score": "0.53780264", "text": "def can_comment(self):\n if not self.can_view():\n return False\n\n if self.reviewer.external_id and not self.share.can_comment:\n # Reviewer can view but not comment\n return False\n\n return True", "title": "" }, { "docid": "e99733b209e4225629a09f67649673b6", "score": "0.537413", "text": "def comment_toggle(self):\n blocks = self.get_selected_blocks()\n\n # iterate through lines in doc commenting or uncommenting\n # based on whether everything is commented or not\n commentAllOut = any([not str(block.text()).lstrip().startswith('#')\n for block in blocks])\n if commentAllOut:\n for block in blocks:\n cursor = QtGui.QTextCursor(block)\n cursor.select(QtGui.QTextCursor.LineUnderCursor)\n selectedText = cursor.selectedText()\n right_split = len(selectedText.lstrip())\n count = len(selectedText)\n split_index = count-right_split\n split_text = selectedText[split_index:]\n newText = ' '*split_index + '#' + split_text\n cursor.insertText(newText)\n else:\n for block in blocks:\n cursor = QtGui.QTextCursor(block)\n cursor.select(QtGui.QTextCursor.LineUnderCursor)\n selectedText = cursor.selectedText()\n newText = str(selectedText).replace('#', '', 1)\n cursor.insertText(newText)", "title": "" }, { "docid": "6656590d54afbd807eaec818daed3bad", "score": "0.5370458", "text": "def comment(self, comment_list: list, link: str) -> None:\r\n\r\n if not self._check_current_url(link):\r\n self.driver.get(link)\r\n time.sleep(2)\r\n\r\n disabled = False\r\n # Check disabled comments\r\n try:\r\n enter_key = \"\\ue007\"\r\n\r\n # gets the comment field and clicks\r\n comment_field = self.driver.find_element_by_class_name(\"Ypffh\").click()\r\n\r\n # self.driver.execute_script(\r\n # 'window.scrollTo(0, document.body.scrollHeight)')\r\n except:\r\n print(\"The comments are disabled\")\r\n disabled = True\r\n\r\n if not disabled:\r\n try:\r\n seconds = random.randint(1, 3)\r\n time.sleep(seconds)\r\n\r\n # Get one comment\r\n comment_text = random.choice(comment_list)\r\n\r\n # We have to search for the field again\r\n comment_field = self.driver.find_element_by_css_selector(\".Ypffh\")\r\n comment_field.send_keys(comment_text + enter_key)\r\n\r\n time.sleep(random.randint(1, 3))\r\n except Exception as e:\r\n print(\"Error adding the comment\")\r\n print(e)", "title": "" }, { "docid": "e9a5efda8c918f1d473203d6cc71871e", "score": "0.5350735", "text": "def set_comments_active(self, repo_id):\n self.ic.execute(\"\"\"UPDATE pull_request_review_comments SET state=1\nWHERE repository_id={} AND state=0\"\"\".format(repo_id))\n self.m.commit()", "title": "" }, { "docid": "539db43d86e9d104ca6a2e7341796d65", "score": "0.53304255", "text": "def comment_canedit(user_id):\n if not c.user.id:\n return 'false'\n\n if c.user.id == user_id or c.user.admin:\n return 'true'\n\n return 'false'", "title": "" }, { "docid": "73d90655195a6c54dab3e6f4ca4b4df3", "score": "0.5306584", "text": "def _can_we_disable (self, opt):\n return True", "title": "" }, { "docid": "f284c9b169c69c36d0bb9d1df527f4f6", "score": "0.5292783", "text": "def disable(self):\n self.is_disabled = True", "title": "" }, { "docid": "ea0383f8b4597afdf1511d1265e659d4", "score": "0.5289602", "text": "def send_pr_comment():\n pass", "title": "" }, { "docid": "acdc9dfd7058623969e19d9ab6d4840d", "score": "0.5284497", "text": "def addCommentBar(self):", "title": "" }, { "docid": "918dbb9b7bfd9a6c80da897bd31ed667", "score": "0.52843", "text": "def test_deleted_comment_no_vote(self):\n pass", "title": "" }, { "docid": "545ae14505c3b0bfb1e0dd0f317ab7f6", "score": "0.5275825", "text": "def templatePath(self):\n return 'v2/modules/gci/task/_comments.html'", "title": "" }, { "docid": "8dfbf000c05ea53c1052cd770726d668", "score": "0.5262291", "text": "def dislike(request, comment_id, next=None):\n comment = get_object_or_404(get_comment_model(), pk=comment_id)\n\n if not has_app_model_option(comment)['allow_feedback']:\n ctype = ContentType.objects.get_for_model(comment.content_object)\n raise Http404(\"Comments posted to instances of '%s.%s' are not \"\n \"explicitly allowed to receive 'disliked it' flags. \"\n \"Check the COMMENTS_TREE_APP_MODEL_OPTIONS \"\n \"setting.\" % (ctype.app_label, ctype.model))\n # Flag on POST\n if request.method == 'POST':\n perform_dislike(request, comment)\n return next_redirect(request,\n fallback=(next or 'comments-tree-dislike-done'),\n c=comment.pk)\n # Render a form on GET\n else:\n disliked_it = request.user in comment.users_flagging(DISLIKEDIT_FLAG)\n return render(request, 'django_comments_tree/dislike.html',\n {'comment': comment,\n 'already_disliked_it': disliked_it,\n 'next': next})", "title": "" }, { "docid": "af60a1752db0d0d21b81825c978506f3", "score": "0.5262235", "text": "def python_mode_allowed(self):\r\n return self.state not in (State.COMMENT,)", "title": "" }, { "docid": "fae549d209c31a8f9fcc244d57ef0344", "score": "0.5261262", "text": "def disable_views_buttons() -> None:\n disable_button(enter_views_button)\n disable_button(rename_button)\n disable_button(next_button)\n disable_button(exit_button)", "title": "" }, { "docid": "ee1e192d5aa5d8c47e04eb1348df8bf1", "score": "0.5257291", "text": "def allow_discussion(self, request, queryset):\n queryset.update(allow_discussion=True)\n self.message_user(request, \"Document(s) now allow discussion.\")", "title": "" }, { "docid": "ebd09105099b20c480cb96d3f545bfb6", "score": "0.52332556", "text": "def disconnectedDisabled(self):\n self.disconnect_button.setEnabled(0)\n self.startExperimentWidget.setEnabled(0)\n self.stopExperimentWidget.setEnabled(0)\n self.retrieveDataButton.setEnabled(0)\n self.sendDataButton.setEnabled(0)\n \n self.connect_button.setEnabled(1)\n self.disconnect_button.setEnabled(0)", "title": "" }, { "docid": "249e830dbdde0da6b0b029156b2df97b", "score": "0.5227447", "text": "def test_deleted_comment_no_reply(self):\n pass", "title": "" }, { "docid": "0f7d3a126b24ef48e77ef66f923a71a2", "score": "0.52250904", "text": "def DISABLED(self):\n return \"disabled\"", "title": "" }, { "docid": "c160c0b21895568bcd0c56a98b36992f", "score": "0.52206045", "text": "def is_invite_viewer_no_comment(self):\n return self._tag == 'invite_viewer_no_comment'", "title": "" }, { "docid": "c160c0b21895568bcd0c56a98b36992f", "score": "0.52206045", "text": "def is_invite_viewer_no_comment(self):\n return self._tag == 'invite_viewer_no_comment'", "title": "" }, { "docid": "0b00b61ed9281ac86736ce6ae2eb0268", "score": "0.5220074", "text": "def test_deleted_comment_replies_edit(self):\n pass", "title": "" }, { "docid": "c865fb54c1816dc76c4b51626ce5ec10", "score": "0.5207959", "text": "def plotcomments(self, name, univers):\n toplot = [i for i in self.comments if i['name'] == name]\n res = '<ol id=\"comments\">'\n for i in range(len(toplot)):\n comment = toplot[i]\n res += '''<div id=\"comment\"><dt><b>{}</b>:</dt><dt>{}</dt></div>\n <dt><small>(<a href=\"reportcomment?univers={}&name={}&i={}\"\n onclick=\"return confirm('Commentaire signalé')\">Signaler ce commentaire</a>)\n </small></dt>'''.format(comment['pseudo'], comment['text'], univers, name, i)\n res += '</ol>'\n return res", "title": "" }, { "docid": "751c72409613698f1618624ef44bb5cb", "score": "0.5197531", "text": "def comment_text(self) -> str: # pragma: no cover", "title": "" }, { "docid": "3f371380b9c3a36bb129be28780e9053", "score": "0.5194888", "text": "def disable() -> None:", "title": "" }, { "docid": "3f371380b9c3a36bb129be28780e9053", "score": "0.5194888", "text": "def disable() -> None:", "title": "" }, { "docid": "49d1b297358e30c0a0fb2833a583eb6d", "score": "0.5190189", "text": "def test_no_comment(self):\n time = datetime.datetime.now()\n time = time - datetime.timedelta(hours=1)\n with self.settings(START_DATETIME=time.strftime(\"%d/%m/%Y %H:%M:%S\")):\n # auth token\n token = Token.objects.get(user__username='test3')\n\n # auth header\n header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}\n\n # comment create response\n response = self.client.get(reverse('comment:comment_list',\n kwargs={'release': 1}),\n {}, **header)\n\n # run test\n # proper status code test\n self.assertEqual(response.status_code, 403)\n # returned comments test\n self.assertEqual(response.content, b'')", "title": "" }, { "docid": "f638b5a51ffd533e0daf967bc8b66a19", "score": "0.5184632", "text": "def can_edit_comment(comment):\n\treturn comment.creator == current_user._get_current_object()", "title": "" }, { "docid": "fbfbab71126e95fe0dc318af72a71520", "score": "0.5181334", "text": "def editPost(request, article_id):\n if request.user.is_superuser:\n try:\n article = Article.objects.get(pk=article_id)\n comments = article.comment_set.all().order_by('date')\n except Article.DoesNotExist:\n raise Http404(\"Arr, cap'n, that booty be nowhere.\")\n context = {\n 'article': article,\n 'comments': comments,\n }\n return render(request, 'blog/editPost.html', context)\n else:\n return HttpResponseForbidden()", "title": "" }, { "docid": "14acdf43e2d12bac4c02556633a86015", "score": "0.5178581", "text": "def give_feedback(self):\n studentFeedback(self.comments)", "title": "" }, { "docid": "66ff6c106723b7dd0502e11e0f18af25", "score": "0.5177538", "text": "def test_viewlet_not_enabled(self):\n viewlet = SocialBookmarksViewlet(self.folder, self.request, None)\n viewlet.update()\n self.assertEqual(viewlet.enabled, False)", "title": "" }, { "docid": "857417736b774ef54fcd9013621c08a5", "score": "0.5176239", "text": "def on_commentButton_clicked(self):\n self.__insertString(\"(?#)\", -1)", "title": "" }, { "docid": "857417736b774ef54fcd9013621c08a5", "score": "0.5176239", "text": "def on_commentButton_clicked(self):\n self.__insertString(\"(?#)\", -1)", "title": "" }, { "docid": "69a29296cfe99c6c426ff604f34a6f0e", "score": "0.5165759", "text": "def test_diff_comments(self):\n self._test_diff_comments(user_is_owner=False)", "title": "" }, { "docid": "586c2d66cfdf81ab8169e29c15761a59", "score": "0.51642674", "text": "def tip_01_Do_Not(self):", "title": "" }, { "docid": "83e30440eff1abf2afbdd88bd6b7baf5", "score": "0.5158008", "text": "def _prerender_comment(self):\n comment = self.getComment()\n display_format = self.display_format\n formatted = self.ShowDescription(comment+' ', display_format)\n \n formatted = self._getIssue()._findIssueLinks(formatted)\n \n self._prerendered_comment = formatted", "title": "" }, { "docid": "2250bea4d88bd0cff1826138760aee90", "score": "0.51514703", "text": "def comment(request): # pragma: no cover\n vs = ValueSet.get(request.matchdict['id'])\n return HTTPFound(request.blog.post_url(vs, request, create=True) + '#comment')", "title": "" }, { "docid": "d2349cdd37639a7be255a682c97e6e0f", "score": "0.5145251", "text": "def test_screenshot_comments(self):\n self._test_screenshot_comments(user_is_owner=False)", "title": "" }, { "docid": "da5132fd97c3ef7f0380a0e734a7afac", "score": "0.5139268", "text": "def test_screenshot_comments_with_draft(self):\n self._test_screenshot_comments(user_is_owner=True)", "title": "" }, { "docid": "1af24cc082699d91aba33177203a42ff", "score": "0.5138207", "text": "def t_COMMENT(t):\n return t", "title": "" }, { "docid": "69eb9928dd26aa3ca631384db23c58ca", "score": "0.5133192", "text": "def have_access_comment(fn):\n @wraps(fn)\n def wrapper(id, *args, **kwargs):\n comment_json = dbc.collection('comments').document(id).get()\n comment = comment_json.to_dict()\n if is_admin() == False:\n if comment['created_by'] != user_id():\n flash(\"Ce n'est pas l'un de vos commentaires !\")\n return redirect(url_for('profile'))\n return fn(id, *args, **kwargs)\n return wrapper", "title": "" }, { "docid": "bbc5a52fca08f8a75060b2b59b616e87", "score": "0.51240885", "text": "def test_editing_a_comment_without_comment(self):\n response = self.app.put(\n \"api/1.0/comments/releases/{}\".format(self.release_ids[0]),\n data=json.dumps(\n dict(\n commentID=self.valid_comment_ids[0]\n )\n ),\n content_type=\"application/json\",\n headers={\n \"User\": self.valid_users[0],\n \"Authorization\": self.valid_tokens[0]\n }\n )\n self.assertEqual(400, response.status_code)", "title": "" }, { "docid": "68f706257265b07718546b5ade0d323d", "score": "0.51034814", "text": "def removeCommentBar(self):", "title": "" }, { "docid": "57996cafded405d380bd120a40f0ab48", "score": "0.5088942", "text": "def disable(self):\n raise NotImplementedError()", "title": "" }, { "docid": "afadf1b811d0cfad70e21050c95a0b02", "score": "0.5084799", "text": "def send_issue_comment():\n pass", "title": "" }, { "docid": "a5398299648f0bbc4259d7b3205f2d19", "score": "0.5076062", "text": "def dislikes(self):\n return self.get_queryset().filter(vote__lt=0)", "title": "" }, { "docid": "52e18bc88d7ea8a6baf1b7fa99a98d3a", "score": "0.50754136", "text": "def context_disable(self):\r\n session = self.session_info()\r\n context = {}\r\n \r\n if session[\"Rights\"] == \"user\":\r\n context[\"disabled\"] = \"disabled\"\r\n else:\r\n context[\"disabled\"] = \"\"\r\n\r\n return context", "title": "" }, { "docid": "46a5ae4897686a0bb00ba85e2fdc8b3c", "score": "0.50723696", "text": "def comment(status, message=None):", "title": "" }, { "docid": "0fb3c4b851b55eb594fc637c2cdd29f8", "score": "0.50713503", "text": "def generate_comments(self, eid):\n return \"\", 0", "title": "" }, { "docid": "ef0e7e85845d752aa40c3861f68d76d0", "score": "0.50704634", "text": "def disable(self):\r\n\t\treturn self.setEnabled(False)", "title": "" }, { "docid": "cf68462d1579509c6618a5935a9d2fb5", "score": "0.5067632", "text": "def test_as_action_not_enabled(self):\n viewlet = SocialBookmarksViewlet(self.page, self.request, None)\n viewlet.update()\n self.assertEqual(viewlet.action_enabled, False)", "title": "" }, { "docid": "c8b08bc97de5f10b04dec4f5650bab04", "score": "0.5053899", "text": "def perform_like(request, comment):\n flag, created = TreeCommentFlag.objects.get_or_create(comment=comment,\n user=request.user,\n flag=LIKEDIT_FLAG)\n if created:\n TreeCommentFlag.objects.filter(comment=comment,\n user=request.user,\n flag=DISLIKEDIT_FLAG).delete()\n else:\n flag.delete()\n\n signals.comment_feedback_toggled.send(\n sender=flag.__class__,\n flag=flag,\n comment=comment,\n created=created,\n request=request,\n )\n return created", "title": "" }, { "docid": "c68ba099b8cecf392be2f3d3e67ff750", "score": "0.5053376", "text": "async def disable_list(self, ctx):\n\n state = state_instance.get_state(ctx.guild.id)\n cmds = state.get_commands()\n \n dis_cmds = \"\"\n for cmd in cmds:\n comd = state.get_var(cmd)\n if comd.server_wide == False:\n dis_cmds += f\"`{cmd}` disabled server-wide\\n\"\n if len(comd.channels) > 0:\n dis_cmds += f\"`{cmd}` disabled in channels {str(comd.channels).strip('{}')}\\n\"\n if len(comd.roles) > 0:\n dis_cmds += f\"`{cmd}` disabled for roles {str(comd.roles).strip('{}')}\\n\"\n \n if dis_cmds == \"\":\n return await ctx.send(\"Disabled command list is empty, darkness is its only friend now\")\n\n dlist = discord.Embed(\n title = \"Disabled Commands\",\n description = dis_cmds, \n color = discord.Colour.from_rgb(0,250,141), timestamp = ctx.message.created_at)\n\n dlist.set_author(name = \"Note: Disabling all commands server-wide will override all rules of other commands. Also, disabling for roles checks if the member has that role as the toprole. IQ is required to use these command\")\n \n dlist.set_footer(\n text=f\"Requested by {ctx.author}\",\n icon_url=ctx.author.avatar_url)\n\n await ctx.send(embed = dlist)", "title": "" }, { "docid": "1db0ab38d5dcfca830e13c2ebbc02573", "score": "0.5047403", "text": "def disable_editing(self):\n self.open_param_editor_btn[\"state\"] = \"disabled\"\n self.load_btn[\"state\"] = \"disabled\"\n self.save_btn[\"state\"] = \"disabled\"\n self.save_as_btn[\"state\"] = \"disabled\"\n self.check_btn[\"state\"] = \"disabled\"\n self.run_btn[\"state\"] = \"disabled\"\n\n self.ion_name_entry[\"state\"] = \"disabled\"\n self.ion_energy_entry[\"state\"] = \"disabled\"\n self.ion_name_entry_var.set(\"\")\n self.ion_energy_entry_var.set(\"\")\n\n self.target_type_cb[\"state\"] = \"disabled\"", "title": "" }, { "docid": "ce9b092e7c1ac71bed10eccbbe89ccba", "score": "0.503864", "text": "def disabled(f):\n def decorated(*args, **kwargs):\n pass\n decorated.__name__ = f.__name__\n decorated.__doc__ = f.__doc__\n return decorated", "title": "" }, { "docid": "9e1621020de7af320f023964f4ae196a", "score": "0.50299215", "text": "def disable(self):\n raise NotImplementedError('This method should be implemented by a descendant!')", "title": "" }, { "docid": "dffc1e66881147a1f03d5aa72f92f998", "score": "0.50187397", "text": "def get(self):\n comment_id = self.request.get('comment_id')\n comment = self.get_comment_by_id(comment_id)\n user_id = self.check_user_id_cookie()\n\n self.render('edit-comment.html', comment=comment, user_id=user_id)", "title": "" }, { "docid": "ae23cd02c00e4966dfffb346db624401", "score": "0.5011287", "text": "def comments():\n if request.method == \"GET\":\n response = []\n for comment in Comment.query.all():\n response.append({'id': comment.id,\n 'date': comment.date.strftime(\"%Y:%M:%D %H:%M:%S\"),\n 'body': comment.body,\n 'comment_to_response': comment.comment_to_response,\n 'is_response': comment.is_response,\n 'user_id': comment.user_id,\n 'task_id': comment.task_id})\n return json.dumps(response), 200\n\n else:\n body = request.form[\"body\"]\n task_id = request.form[\"task_id\"]\n comment_to_response = request.form.get(\"comment_to_response\", None)\n\n user_id = session.get('user_id', None)\n if user_id is None:\n json.dumps({'status': 401,\n 'message': \"not authorized\"}), 401\n if not Task.query.filter(Task.id == task_id).first():\n return json.dumps({'status': 404,\n 'message': \"task not found\"}), 404\n if comment_to_response and not Comment.query.filter(Comment.id == comment_to_response).first():\n return json.dumps({'status': 404,\n 'message': \"comment not found\"}), 404\n else:\n new_comment = Comment(datetime.datetime.now(),\n body=body,\n comment_to_response=comment_to_response,\n user_id=user_id,\n task_id=task_id)\n db.session.add(new_comment)\n db.session.commit()\n return new_comment.to_json(), 201", "title": "" }, { "docid": "f3990f6e77a4f9acdd692fab35c72e69", "score": "0.5006656", "text": "def comments(self):\n return Comment.all().filter(\"post = \", str(self.key().id()))", "title": "" }, { "docid": "e090e4d1666e1aec1620dbd2f5ef07ba", "score": "0.5004167", "text": "def comment_editing(comment_id):\n if c.editing_comment == comment_id:\n return 'true'\n return 'false'", "title": "" }, { "docid": "c8663a1f733e5139d09d46cc478ce7cb", "score": "0.5000209", "text": "def test_disabled_for_testing_flag(self):\n self.settings[\"REWRITE\"] = True\n settings.TESTING = True\n response = self.browser.get(\"/plain-view.html\")\n self.assertTrue(self.hidden not in response.content.decode())\n settings.TESTING = False", "title": "" }, { "docid": "6294b14173ae7d0e831d26d1b01d4e5f", "score": "0.499847", "text": "def show_comments(request, pk):\n ticket = get_object_or_404(Ticket, pk=pk) if pk else None\n comments = Comment.objects.filter(ticket_id=pk).order_by('-creation_ts')\n return render(request, \"showcomments.html\", {'ticket': ticket, 'comments': comments})", "title": "" }, { "docid": "20e329488e38288faa3089a43a2138e1", "score": "0.499076", "text": "def is_hidden_ed(self):\n return False", "title": "" }, { "docid": "980893035b90b51c75b3767fab85122c", "score": "0.49776515", "text": "def comments_view(request):\n if request.method == \"GET\":\n\n # Get user info\n current_user = request.user\n\n # Set session for the main left side bar active menu\n request.session['active_sidebar_menu'] = \"comments\"\n\n fsearch = request.GET.get('q')\n\n if fsearch:\n # Get all reviews\n bl_data = get_bp_comments_list(current_user.id, fsearch, 100)\n else:\n # Get all reviews\n bl_data = get_bp_comments_list(current_user.id, \"\", 500)\n fsearch = ''\n\n paginator = Paginator(bl_data, 5) # Show 5 rows per page\n page = request.GET.get('page', 1)\n\n try:\n data_pages = paginator.page(page)\n except PageNotAnInteger:\n data_pages = paginator.page(1)\n except EmptyPage:\n data_pages = paginator.page(paginator.num_pages)\n\n # Get the index of the current page\n index = data_pages.number - 1 # edited to something easier without index\n max_index = len(paginator.page_range)\n start_index = index - 2 if index >= 2 else 0\n end_index = index + 2 if index <= max_index - 2 else max_index\n page_range = list(paginator.page_range)[start_index:end_index]\n totRows = \"{:,}\".format(paginator.count)\n\n return render(request, 'bp/comments.html',\n {\n 'title': 'Business Comments',\n 'meta_desc': 'Manage your business comments',\n 'bl_data': bl_data,\n 'data_pages': data_pages,\n 'page_range': page_range,\n 'totRows': totRows,\n 'q': fsearch\n })", "title": "" }, { "docid": "1453b16b265d4550839853f02e2f43df", "score": "0.49757078", "text": "def HIDDEN(self):\n return \"hidden\"", "title": "" }, { "docid": "7f6201ec70beb20e2c1d8a27981658f6", "score": "0.49739233", "text": "def get(self):\n if self.user:\n comment_id = self.request.get('comment_id')\n comment_id = int(comment_id)\n c = Comment.by_id(comment_id)\n\n if not c:\n self.error(404)\n return\n\n if self.user.name == c.creator:\n self.render(\"editcomment.html\", comment=c.comment)\n else:\n error = \"you can only EDIT your own comment\"\n self.render('editcomment.html', error=error)\n\n else:\n self.redirect(\"/blog/login\")", "title": "" }, { "docid": "f679e45eefc323376593bfdd1338b5de", "score": "0.4971878", "text": "def is_comment(self):\n if self.instanceType == 'Comments':\n return True", "title": "" }, { "docid": "9704483c6463f324299e42ff48a55583", "score": "0.4963851", "text": "def disable(self):\n\n self._f_number_input.config(state='disabled')\n self._lbl_user_instruction.config(state='disabled')", "title": "" }, { "docid": "4c5c8e93cb28d89fb235b63013b4ff26", "score": "0.49594015", "text": "def test_show_thread_no_permission(self):\n self.thread.is_hidden = True\n self.thread.save()\n\n self.override_acl({'can_hide_threads': 1})\n\n thread_json = self.get_thread_json()\n self.assertTrue(thread_json['is_hidden'])\n\n self.override_acl({'can_hide_threads': 0})\n\n response = self.patch(\n self.api_link, [\n {\n 'op': 'replace',\n 'path': 'is-hidden',\n 'value': False,\n },\n ]\n )\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "4491ca7bba01b395db3918aece0cae2d", "score": "0.4952988", "text": "def commentable(func):\n @wraps(func)\n def add_comment(obj, *args, **kwargs):\n stmts = func(obj, *args, **kwargs)\n if obj.description is not None:\n stmts.append(obj.comment())\n return stmts\n return add_comment", "title": "" }, { "docid": "bd909b5f58ed5bada3bf9471b5dfe2bd", "score": "0.49513128", "text": "def change_view(self, request, object_id, extra_context=None):\n\n from django_mises import comments\n\n data = request.POST.copy() or None\n\n obj = models.Post.objects.get(id=object_id)\n\n internal_comments = comments.get_model().objects.for_model(obj).select_related(depth=1).filter(comment_type='internal').order_by('id')\n\n ## Handle our data, let the rest flow over\n comment_form = comments.get_internal_form()(obj, data)\n context = {\n 'comments': internal_comments,\n 'comment_form': comment_form,\n }\n\n if data is not None:\n if data.has_key('internal_comment'):\n\n if comment_form.is_bound:\n if comment_form.is_valid():\n ## Do not allow tampering\n comment_form.cleaned_data['user'] = request.user\n\n comment = comment_form.save()\n\n ## Maybe redirect out, maybe complain\n return super(PostAdmin, self).change_view(request, object_id, extra_context=context)", "title": "" }, { "docid": "2627d9ecc5a0a5a245f8d7a29e536120", "score": "0.49479723", "text": "def disable(self) -> None:\n self._disabled = True\n return", "title": "" } ]
3c6f5cdd46e7eaa0cc49ddef5eb8891f
Load the model from a file.
[ { "docid": "e5cc9afc8f4498687ccd2a8e123d696d", "score": "0.78549993", "text": "def load_model(self):\n file = open(self.config.MODEL_PATH, \"rb\")\n self.model = pickle.load(file, encoding=\"ASCII\")", "title": "" } ]
[ { "docid": "18ad33d60e09bfae3e842f06f0a67318", "score": "0.8907899", "text": "def load_model(self, model_file=None):", "title": "" }, { "docid": "2cab4c4c48e3d4248586f2e5f9e4b748", "score": "0.8749351", "text": "def load_model(from_file):\n\n raise NotImplementedError", "title": "" }, { "docid": "693704ffefa06355b9f6424c9ef860c2", "score": "0.85518026", "text": "def load_model(self, filepath):\n self.model = load_model(filepath)", "title": "" }, { "docid": "81b70e8a96d5faad5abe77c6174f8a17", "score": "0.8367919", "text": "def load_from_file(file_name):\n model = pickle.load(open(file_name, \"rb\"))\n return model", "title": "" }, { "docid": "53832e6d408a48979432de794d849052", "score": "0.821308", "text": "def load_model(self, filename=\"model.mdl\"):\n with open(filename, \"rb\") as input_stream:\n self._model = pickle.loads(input_stream.read())", "title": "" }, { "docid": "1471bd212c94610c5758ec6995675f6a", "score": "0.81722134", "text": "def load_from(filename):\n model = pickle.load(open(filename, 'rb'))\n return model", "title": "" }, { "docid": "39f04d4f653e4ec33122dad5a76f9786", "score": "0.81568944", "text": "def load_model(self, file):\n self.nn_model = load_model(file)", "title": "" }, { "docid": "d9af42e72f4c1de5d8cb3fd5064ca6f6", "score": "0.81153256", "text": "def load_model(self):\n #Load the model from disk\n self.loaded_model = pickle.load(open(self.model_file_path, 'rb' ))", "title": "" }, { "docid": "dd20bbb67c42cd2b0ac47508f8d52a91", "score": "0.800633", "text": "def load_model(filename):\r\n return K.models.load_model(filename)", "title": "" }, { "docid": "4a0838c8119577abe126b1f8cc027a0f", "score": "0.79323816", "text": "def load_model(filepath):\n import pickle\n\n loaded_model = pickle.load(open(filepath, 'rb'))\n return loaded_model", "title": "" }, { "docid": "063735b4b053c5a80b5b9dd9073ffdff", "score": "0.7927491", "text": "def load_model(filename):\n return K.models.load_model(filename)", "title": "" }, { "docid": "17c0dcfe246e2f15e9c9f14f92329d62", "score": "0.78813773", "text": "def loadmodel( fname):\n with gzip.open(fname, 'r') as fin:\n D = load(fin)\n return D\n print 'Load model from file: {}'.format(fname)", "title": "" }, { "docid": "a1f24e5c6bbf50e247edceb58a56942f", "score": "0.7851059", "text": "def loadmodel(filename=\"Request\"):\r\n pickle.load(open(path+\"/model/\"+filename+\".pkl\", 'rb'))", "title": "" }, { "docid": "91db7fcd479a00b12999b61470b6daec", "score": "0.7837873", "text": "def loadModel(fileName):\n # with open('model/'+fileName, 'rb') as handle :\n with open(fileName, 'rb') as handle:\n return pickle.load(handle)", "title": "" }, { "docid": "91db7fcd479a00b12999b61470b6daec", "score": "0.7837873", "text": "def loadModel(fileName):\n # with open('model/'+fileName, 'rb') as handle :\n with open(fileName, 'rb') as handle:\n return pickle.load(handle)", "title": "" }, { "docid": "2d40a9cd72d3943f45403aab6e9a98f0", "score": "0.7823432", "text": "def load(self, path):\r\n\r\n self.model = self.module.load(path)\r\n return self.model", "title": "" }, { "docid": "efe1c37c25fdeb1ee634980e33a3ca09", "score": "0.7811605", "text": "def load_model(self, filepath, custom_objects=None, compile=True):\n self.model = load_model(filepath, custom_objects=custom_objects,\n compile=compile)\n self.update_target_model_hard()", "title": "" }, { "docid": "0ade6c4368e8a4ef3fcdb65b1d0f832c", "score": "0.77950484", "text": "def load_model_from_file(self, from_file=False, file_name=None):\n if from_file and file_name is not None:\n file_path = os.path.join('models', file_name)\n with open(file_path, 'rb') as f:\n self.detection_model = pickle.load(f)\n\n print(\"Model Loaded\")", "title": "" }, { "docid": "78c0626d1a17e041c9c94c4d20a9dee3", "score": "0.7784185", "text": "def load(self, filename):\r\n try:\r\n self.__dict__ = pickle.load(open(filename, \"rb\"))\r\n except:\r\n print(\"ERROR: Error loading model from \" + filename)", "title": "" }, { "docid": "cee301ffe176522f3033b96baaa28764", "score": "0.7751828", "text": "def load(self, filename):\n checkpoint = torch.load(filename)\n self.model.load_state_dict(checkpoint['model'])\n print(f\"Loaded {self.__class__.__name__} model\")", "title": "" }, { "docid": "86c602a1ef009bfd7ea3c5bbd25b1c37", "score": "0.7698568", "text": "def load_from_saved(self, models_path,file_name):\n self.model = models.load_model(os.path.join(models_path, file_name))", "title": "" }, { "docid": "573a2bb797c4e9acce1d63231a293b9f", "score": "0.7677588", "text": "def load_model(self):\n filename = filedialog.askopenfilename()\n if filename:\n self.model_path = filename\n self.reload()", "title": "" }, { "docid": "9abad67730ffe85cda2b754e7d3699bb", "score": "0.7663492", "text": "def load(self):\n with open(self.filename_model) as f:\n self.model = model_from_json(f.read())\n self.model.load_weights(self.filename_weights,\n by_name=False)\n # TODO: save parameters and compile with them\n self._compile_model()", "title": "" }, { "docid": "15363a3248e03e344aadfbd48da44cf1", "score": "0.7613705", "text": "def load_model(filename, model):\n model_params = torch.load(str(filename))\n model.load_state_dict(model_params)\n return model", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.7603216", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.7603216", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.7603216", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.7603216", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.7603216", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "2a01fc0f0ff22fcd0aacf1a8e98a44e4", "score": "0.7592288", "text": "def load_model(self, filename):\n [self.num_layers, self.sizes, self.weights, self.biases] = np.load(\n filename, allow_pickle=True)", "title": "" }, { "docid": "76221917fc39a38687d72486402112b2", "score": "0.75899065", "text": "def load(self, file_name):\n model_data = codecs.open(file_name, 'r', encoding='utf-8').readline().strip()\n model = json.loads(model_data)\n self.emissions = model['emissions']\n self.transitions = model['transitions']\n self.tags = model['tags']\n self.vocabulary = model['vocabulary']", "title": "" }, { "docid": "510e4bbeb38aa16747cc654644fed747", "score": "0.75843215", "text": "def load_model(self, model):\n res = model\n if isinstance(model, str):\n if os.path.isfile(model):\n self.info(\"Loading model from file '{}'\".format(model))\n res = load_model(model)\n else:\n self.error(\"Loading model from file '{}': file not found\".format(model))\n return res", "title": "" }, { "docid": "6461f9c24b62fe54ef89321a1a442f4a", "score": "0.7565367", "text": "def load_model(self, filepath, load_format=\"json\"):\n # Load model\n if load_format == \"json\":\n with open(filepath, \"r\") as f:\n model_metadata = json.load(f, object_hook=_decode_helper)\n elif load_format == \"cbor\":\n if utilmodel.cbor2 is None:\n raise ModuleNotFoundError(\"No module named 'cbor2'\")\n with open(filepath, \"rb\") as f:\n model_metadata = utilmodel.cbor2.\\\n load(f, object_hook=_decode_helper_cbor)\n elif load_format == \"pickle\":\n with open(filepath, \"rb\") as f:\n model_metadata = pickle.load(f)\n else:\n raise ValueError(\"Wrong load format.\")\n\n for key, val in model_metadata.items():\n setattr(self, key, val)", "title": "" }, { "docid": "58ce021ae435fbb2be05804918660bd1", "score": "0.75616837", "text": "def load_model(self, filepath=None):\n filepath = filepath or self.model_base_path + '.m'\n try:\n if self.is_training():\n self.tmodel = keras.models.load_model(filepath)\n sclog('Loaded model \"{0}.\"'.format(filepath))\n except IOError:\n raise ScNetError('Error reading model file \"{0}\"'.format(filepath))", "title": "" }, { "docid": "b6bf45e7b7bdcb1037e75f931b492b1b", "score": "0.75456315", "text": "def from_file(fname):\n\n params = dict(sc.load(fname).items())\n model = Model(fname, **params)\n if \"seed\" in params:\n model.set_seed(model[\"seed\"])\n return model", "title": "" }, { "docid": "0489e4546100cda9f6653f63310f2472", "score": "0.7541842", "text": "def _load_model(self):\n pass", "title": "" }, { "docid": "2dcd7779fa579b72d9d7c2846083f5d6", "score": "0.7537971", "text": "def load(cls, path):\n logger.debug('Loading: {}'.format(cls.__name__))\n with open(path, 'rb') as f:\n model = pickle.load(f)\n\n self.model = model\n\n return self", "title": "" }, { "docid": "1b12ed244f1a56f653aaf75c1a90c087", "score": "0.75228554", "text": "def loadModel(self, saveFile=\"model.h5\"):\n pass", "title": "" }, { "docid": "0f2394a7decefae77967aac9319e91cb", "score": "0.7508142", "text": "def load_model(path_to_model : str):\n return pickle.load(open(path_to_model, 'rb'))", "title": "" }, { "docid": "fa6b63fe081f82d11f19a50c9b965e5c", "score": "0.75060314", "text": "def load_model(fpath):\n check = torch.load(fpath, map_location=torch.device(device) )\n model = check['model']\n model.load_state_dict(check['state_dict'])\n return model", "title": "" }, { "docid": "70042d3262cfcfd04215e0bcbeee5182", "score": "0.7500715", "text": "def load(self, model_file):\n if model_file:\n print('Loading the persona_model from', model_file)\n self.model.load_state_dict(torch.load(model_file))", "title": "" }, { "docid": "11bcb24708af398c3d246755d2163287", "score": "0.749948", "text": "def load_model(self, json_file):\n with open(json_file) as f:\n self.model = json.load(f)", "title": "" }, { "docid": "9698d5f88cdc155b2c21661527393fb7", "score": "0.748785", "text": "def load_model(path):\n with open(path, 'rb') as f:\n loaded_serialized_model = f.read()\n loaded_model = dill.loads(loaded_serialized_model)\n return loaded_model", "title": "" }, { "docid": "937985b1975ddcf45aea46691efb0f93", "score": "0.7484538", "text": "def load_model():\n # TODO: INSERT CODE\n # return model", "title": "" }, { "docid": "fc5780cfd914af83c4b540b45effc860", "score": "0.7474059", "text": "def load_model(model_file_name):\n model = liblinear.load_model(_cstr(model_file_name))\n if not model:\n print(\"can't open model file %s\" % model_file_name)\n return None\n model = toPyModel(model)\n return model", "title": "" }, { "docid": "b57b1706d30a942327242b97e2d1d991", "score": "0.7449266", "text": "def load_model(self, filename):\n print(f\"Requested model {filename}\\nLoading model...\")\n with open(filename, \"rb\") as handle:\n self._policy = pickle.load(handle)", "title": "" }, { "docid": "07ede3a355d09d3fae2f8352a14f9657", "score": "0.7427756", "text": "def load(name):\n sp = _os.path.join(curr_path, '../models', name)\n model = _pkl.load(open(_os.path.join(sp, 'model.pkl'), 'rb'))\n _logger.info(\"loaded from : {0}\".format(_os.path.join(sp, name)))\n return model", "title": "" }, { "docid": "4fa5a89963ac44a32eaa42f6aaab7596", "score": "0.73985755", "text": "def load_model(model):\n pass \n # TODO", "title": "" }, { "docid": "c7ed89bcc2d33d50befe8b77fa73b129", "score": "0.7370144", "text": "def load_model(self, path):\n\n print(colored(f'\\nLoading keras model from {path}\\n', \"green\"))\n self.model = load_model(path)", "title": "" }, { "docid": "d0aa5428f07f013e62592f25c3890729", "score": "0.7365449", "text": "def load(cls, path: utils.URLPath):\n config = io_functions.load_json(path / \"config.json\")\n\n model = keras.models.load_model(\n str(path / \"model.h5\"),\n )\n model.layers[-1].activation = keras.activations.linear\n model = vu.utils.apply_modifications(model)\n\n binarizer = io_functions.load_joblib(path / \"binarizer.joblib\")\n\n data_ids = {\n \"validation\": io_functions.load_json(path / \"ids_validate.json\"),\n \"train\": io_functions.load_json(path / \"ids_train.json\"),\n }\n return cls(model, binarizer, config, data_ids=data_ids)", "title": "" }, { "docid": "d06dd0fa7436abc5d1cb1d83a163f0fd", "score": "0.73615795", "text": "def load_model(path_to_model:str):\n return pickle.load(open(path_to_model, 'rb'))", "title": "" }, { "docid": "1214cf95de961dec1a913529454fec56", "score": "0.7359912", "text": "def loadModel(self):\n pass", "title": "" }, { "docid": "41b856448960dee7061352511105590f", "score": "0.7355754", "text": "def load(self, path='model/'):\r\n if path[-1] != '/':\r\n path = path + '/'\r\n\r\n self.model = tf.keras.models.load_model(path + 'model.h5')\r\n with open(path+'dataset.pickle', 'rb') as f:\r\n self.dataset = pickle.load(f)", "title": "" }, { "docid": "92c6b6d8e557dc649fc6ddfe2f6a3de1", "score": "0.73541504", "text": "def load(self, filename):\n\n logger.info(\"Loading model from %s\", filename)\n\n # Load settings and create model\n logger.debug(\"Loading settings from %s_settings.json\", filename)\n with open(filename + \"_settings.json\", \"r\") as f:\n settings = json.load(f)\n self._unwrap_settings(settings)\n self._create_model()\n\n # Load scaling\n try:\n self.x_scaling_means = np.load(filename + \"_x_means.npy\")\n self.x_scaling_stds = np.load(filename + \"_x_stds.npy\")\n logger.debug(\n \" Found input scaling information: means %s, stds %s\", self.x_scaling_means, self.x_scaling_stds\n )\n except FileNotFoundError:\n logger.warning(\"Scaling information not found in %s\", filename)\n self.x_scaling_means = None\n self.x_scaling_stds = None\n\n # Load state dict\n logger.debug(\"Loading state dictionary from %s_state_dict.pt\", filename)\n self.model.load_state_dict(torch.load(filename + \"_state_dict.pt\", map_location=\"cpu\"))", "title": "" }, { "docid": "c37811897f738402f426a77f5a09bb08", "score": "0.73525953", "text": "def load_model(self, filename):\n model = np.load(f\"models/{filename}\", allow_pickle=True)\n self.beta = model[\"beta\"].reshape(-1, 1)", "title": "" }, { "docid": "a4bdeb336974dbe876ccb8ae1f537fb4", "score": "0.7343479", "text": "def read_model( path ):\n path = os.path.join(models_folder,path + '.pck' )\n with open( path , 'r') as f:\n model = pickle.load(f)\n return model", "title": "" }, { "docid": "0e52742ca2fd5cb07456c9414ffb2c65", "score": "0.73281676", "text": "def load_model(filename):\n state = torch.load(filename)\n class_name = state['class_name']\n \n classes = {\n 'BertClassifier': BertClassifier,\n 'BertRegressor' : BertRegressor}\n\n model_ctor = classes[class_name]\n model = model_ctor(restore_file = filename)\n return model", "title": "" }, { "docid": "eebdaa29bc97c5be1781640b30157bc2", "score": "0.7319745", "text": "def load_model(self, name='model', model='model'):\n model_path = self.config.get('model', 'model_path').format(name)\n print(model_path)\n self.__dict__[model] = joblib.load(model_path)", "title": "" }, { "docid": "4ff07421afa0192264f18ce7c0b266f8", "score": "0.73111016", "text": "def load(self):\n self.model = load_model(self.model_path)\n return self.model", "title": "" }, { "docid": "65e4c9d48fa0bbf4820579a23df2cc13", "score": "0.73080707", "text": "def load_model(fp: str):\n return tf.saved_model.load(fp)", "title": "" }, { "docid": "44c37ed227c1103c444f5ae1e7722a10", "score": "0.7304927", "text": "def load_model(filename):\n\n from keras.models import load_model\n return load_model(filename)", "title": "" }, { "docid": "ff7b56fa23a477d05f6f1a9f0e9a7717", "score": "0.72989595", "text": "def load_model(filename):\n model = LinearModel1()\n\n with open(filename, 'rb') as file:\n params_dict = pickle.load(file)\n\n model.num_inputs = params_dict[\"num_inputs\"]\n model.num_weights = params_dict[\"num_weights\"]\n model.layers = params_dict[\"layers\"]\n model.weights = params_dict[\"weights\"]\n\n return model", "title": "" }, { "docid": "1d21555dfa6d658f565f8dbde1082828", "score": "0.7288303", "text": "def load(self,file):\n self.set_defaults()\n if \"+\" in file:\n files = file.split(\"+\")\n else:\n files = [file]\n for file in files:\n if \".pymodel\" in file:\n with open(file,\"r\") as stream:\n obj = cPickle.load(stream)\n if type(obj)==LineRecognizer:\n for k,v in obj.__dict__:\n self.__dict__[k] = v\n else:\n self.cmodel = obj\n elif \".cmodel\" in file:\n self.cmodel = ocropy.load_IModel(file)\n elif \".csize\" in file:\n self.linemodel = SimpleLineModel()\n self.linemodel.load(file)\n else:\n raise Exception(\"unknown extension\")", "title": "" }, { "docid": "e42cf43aaa617c575ebcb3df1e1782c3", "score": "0.7287264", "text": "def load_model(self, file_name):\n self.w_nodes = np.loadtxt(file_name)", "title": "" }, { "docid": "b88a46df6d34389d42ea2c361f3e8644", "score": "0.7282094", "text": "def load(self, path):\n if not path or not isinstance(path, str):\n raise ValueError(\"Please provide a valid path\")\n\n self.__model = torch.load(path)", "title": "" }, { "docid": "812947d010f33b4af84c563744697b61", "score": "0.7274042", "text": "def loadModel(self, model_file):\n with open(model_file) as f:\n self.q_table = json.load(f)", "title": "" }, { "docid": "75c32e1df1aefa9a70f54ff8aba22cb7", "score": "0.7267207", "text": "def read_model(self, model_file):\n \n f = bob.io.base.HDF5File(model_file, 'r')\n model = bob.learn.libsvm.Machine(f)\n del f\n\n return model", "title": "" }, { "docid": "e5b9a46354e9280e2e2d9de6dcceb3dd", "score": "0.7253545", "text": "def loadModel(self, model) -> None:\n ...", "title": "" }, { "docid": "3dd65fb5382c4ee880e84ce9185c989f", "score": "0.72482675", "text": "def load_model(filename):\n # https://github.com/tensorflow/docs/blob/r1.12/site/en/api_docs/python\n # /tf/keras/models/load_model.md\n return K.models.load_model(\n filepath=filename\n )", "title": "" }, { "docid": "72fb4d2b05c85142fb4dcc5c934dee58", "score": "0.72296286", "text": "def load_model(self):\n\t\tself.model = load('flask_app/pima_model.joblib')", "title": "" }, { "docid": "7bc07adea956cb3ffe7425ad81a92b4b", "score": "0.71961886", "text": "def load_model(self, weight_file): \n\t\tself.w = np.load(weight_file)", "title": "" }, { "docid": "2a097a36c4e0e34a2ac3bce3e3c3897d", "score": "0.71913564", "text": "def load(self, model_path, *args, **kwargs):\n import pickle\n with open(model_path, 'rb') as file:\n self.clf = pickle.load(model_path)\n print(f'Loaded pretrained model from {model_path}.')", "title": "" }, { "docid": "df627f0e6aacae21efb65937b8fd5cc6", "score": "0.71845657", "text": "def load_model(filename, verbose=False):\n import os\n prefix, ext = os.path.splitext(filename)\n model_loader_fn = {'off': load_off,\n 'obj': load_obj,\n 'splat': load_splat}\n\n return model_loader_fn[ext[1:]](filename, verbose)", "title": "" }, { "docid": "72c5438f059c11a4538e42312b9a9ed0", "score": "0.7182625", "text": "def load_model(name: str):\n if not os.path.exists('{}{}'.format(paths.model_path, name)):\n raise FileNotFoundError('A model with this name does not exist yet.')\n\n # Load model\n model = joblib.load(open('{}{}/model.pkl'.format(paths.model_path, name), 'rb'))\n\n # Load list of prepared features from file\n with open('{}{}/prepared_features.txt'.format(paths.model_path, name)) as f:\n prepared_features = f.read().splitlines()\n\n # Load dict of actual features\n with open('{}{}/features.json'.format(paths.model_path, name)) as handle:\n features = json.loads(handle.read())\n\n return model, prepared_features, features", "title": "" }, { "docid": "bcb8346692cef1d33c2c6242075f0622", "score": "0.7180663", "text": "def load_model(self, train = False,fileName = 'best_predictor'):\n \n if not train:\n self.predictor = joblib.load(fileName + '.pkl')\n else:\n self.train_model()\n return", "title": "" }, { "docid": "6d24f9644735a8843ce5e9db1acec9e6", "score": "0.717884", "text": "def load_model(file_path):\n model = HabrHubRatingRegressor('')\n model.load(file_path)\n return model", "title": "" }, { "docid": "d97fe121b9d11ae7224652699d6982d6", "score": "0.7170411", "text": "def load_model(path_to_model):\n model_path = os.path.join(path_to_model)\n model = pickle.load(open(model_path, 'rb'))\n return model", "title": "" }, { "docid": "a8ad50792c682c0c84e97e70c8cb3aa1", "score": "0.716444", "text": "def load(self):\n model_file, _ = self.get_model('.pt')\n device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')\n model = BertSentimentClassifier(n_classes=3)\n model.load_state_dict(torch.load(model_file, map_location=device))\n model.eval()\n self.model = model", "title": "" }, { "docid": "e3f9e83616b60e13deb9310669b85bcb", "score": "0.715968", "text": "def load_savefile(cls, filename):\n model = cls.load(filename)\n\n # check that this actually loaded a model object\n # load() will load any gnome object from json...\n if not isinstance(model, cls):\n raise ValueError('This does not appear to be a save file '\n 'for a model\\n'\n 'loaded a {} instead'\n .format(type(model)))\n else:\n return model", "title": "" }, { "docid": "a7023f701d45dd0d22a23b36f69b51a1", "score": "0.71596754", "text": "def loadModel(self, fileName):\n self.model = load_model('{}'.format(fileName))\n # getting the model filter numbers\n thirdDimension = self.model.input.shape[2]\n self.column_nbr = thirdDimension.__int__()", "title": "" }, { "docid": "24758200ccb5a3737314b566b2f8f956", "score": "0.71585226", "text": "def load(self, path_file): \r\n\r\n # Loading the model\r\n super(NonParametricModel, self).load(path_file)\r\n\r\n # Re-introduce the C++ object\r\n if 'smooth' in self.name.lower() :\r\n self.model = _KernelModel(self.bandwidth, self.kernel_type)\r\n else:\r\n self.model = _KaplanMeierModel()\r\n self.load_properties()", "title": "" }, { "docid": "1b4ea148bd9c9522995b340647fcb100", "score": "0.7156503", "text": "def load_model(self, weight_file): \n\n\t\tself.w = np.load(weight_file)", "title": "" }, { "docid": "bcc45d3041992ae57319bf77758b33d5", "score": "0.71430284", "text": "def loadModel(path):\n model = load_model(os.path.join(path,'model.h5'))\n return model", "title": "" }, { "docid": "1a22047c82adfe2155462643ecef6118", "score": "0.71331954", "text": "def load_model():\n # print(\"LOADING THE MODEL...\")\n with open(MODEL_PATH, \"rb\") as model_file:\n saved_model = pickle.load(model_file)\n return saved_model", "title": "" }, { "docid": "661842d8986f95c4507020860835d5e0", "score": "0.7133159", "text": "def load_model(model_name=None):\n if model_name is None:\n model_filepath = find_latest_model_filepath()\n else:\n model_filepath = get_model_filepath(model_name)\n model = load(model_filepath)\n return model", "title": "" }, { "docid": "d85a8e0f06f0992f214c4d0ff9ef90c8", "score": "0.7130847", "text": "def load_model(self):\n saved_path = Path(self.config.model_load_path).resolve()\n if saved_path.exists():\n self.build_model()\n self.model.load_state_dict(torch.load(str(saved_path)))\n self.model.eval()", "title": "" }, { "docid": "f33cea078922aea9d3b908843712d5a4", "score": "0.71295893", "text": "def load_model(path):\n res = H2OConnection.post_json(\"Models.bin/\",dir=path,_rest_version=99)\n return get_model(res['models'][0]['model_id']['name'])", "title": "" }, { "docid": "850849eba82584a36d70b6c4ac5673be", "score": "0.71292263", "text": "def load_model(self) -> None:\n\n try:\n model_class, tokenizer_class, preprocessing_function = MODEL_TYPES[\n self.model_type\n ]\n except KeyError:\n raise KeyError(f\"model type: {self.model_type} not supported\")\n if (\n os.path.exists(self.resources_path)\n and len(os.listdir(self.resources_path)) > 0\n ):\n model_name_or_path = self.resources_path\n else:\n model_name_or_path = self.model_name\n self.preprocessing_function = preprocessing_function\n self.tokenizer = tokenizer_class.from_pretrained( # type:ignore\n model_name_or_path\n )\n self.model = model_class.from_pretrained(model_name_or_path)\n self.model.to(self.device)\n\n # adjusting length\n if self.model_type == \"auto-seq2seq-lm\":\n self.length = adjust_length_to_model(\n self.length, self.tokenizer.model_max_length\n )\n else:\n self.length = adjust_length_to_model(\n self.length, self.model.config.max_position_embeddings\n )", "title": "" }, { "docid": "fa32036f63f638d39900c1c427cee9e8", "score": "0.71258354", "text": "def load(self, file_name):\n super(NeuralNet, self).load(file_name)\n self._model = models.load_model(file_name)\n read_list = joblib.load('{}_nn'.format(file_name))\n self._stopped_epoch = read_list[0]\n self._inference_model = None\n self._inference_batch_size = None", "title": "" }, { "docid": "be04a8c26525e1cd646517c6dcfeead4", "score": "0.71250165", "text": "def load_model(cls, utool, repo, filename):\n data = repo.yaml(filename, encoding='utf-8')\n cls.check_compatibility(data)\n cls._load_params(data, utool, repo, 'beam')\n cls._load_params(data, utool, repo, 'twiss')\n session = cls(utool, repo)\n session.data = data\n for f in data.get('init-files', []):\n session.call(f)\n return session", "title": "" }, { "docid": "1ffabcbf5009957ae58b3c5b7b922b24", "score": "0.71245676", "text": "def load_model():\n with open('./app/assets/iris_svc.pkl', 'rb') as model_file:\n loaded_model = pickle.load(model_file)\n\n return loaded_model", "title": "" }, { "docid": "3d6889867f41ce46d658f8c3c8faf0ad", "score": "0.71179694", "text": "def load_model(self, model_file_path):\n with tf.gfile.GFile(model_file_path, 'rb') as fid:\n modelgraph_def = tf.GraphDef()\n modelgraph_def.ParseFromString(fid.read())\n\n self.modelgraph_def = modelgraph_def", "title": "" }, { "docid": "c891b872e82f065c05437d1c0aed0682", "score": "0.7109629", "text": "def model_load(fn):\n\tif args.cluster:\n\t\tpass\n\twith open(fn, 'rb') as f:\n\t\tmodel, criterion, optimizer = torch.load(f)\n\treturn model, criterion, optimizer", "title": "" }, { "docid": "e0beafa546cad31a7e619dd6ff8632df", "score": "0.7106125", "text": "def load(*args, **kwargs) -> keras.Model:\n pass", "title": "" }, { "docid": "6a1bcbf1ad373ab4b2c6f580aaceffde", "score": "0.70929396", "text": "def readModel(self, path) -> None:\n ...", "title": "" }, { "docid": "a181204d37f68203a2db6b431f3ad4ef", "score": "0.7079087", "text": "def load(filepath):\r\n r = np.load(filepath)\r\n if isinstance(r,BaseRecommender):\r\n model = r\r\n else:\r\n model = np.loads(str(r['model']))\r\n model._load_archive(r) # restore any fields serialized separately\r\n return model", "title": "" }, { "docid": "53f75f24f783c71103106b818e64e1f7", "score": "0.7063569", "text": "def load_model(filename, model_name):\n p = Persistor(filename)\n if model_name in p.list_available_models():\n data_dict = p.get_model_details(model_name)\n model = registered_models[data_dict['class']](**data_dict[\"saved_data\"])\n else:\n raise NameError(\"Model name {} has not yet been created.\".format(model_name))\n return model", "title": "" }, { "docid": "63b96dcda1333bd425035aa084d71d04", "score": "0.70591897", "text": "def load(self, path):\n with path.open('rb') as f:\n weights = torch.load(f)\n load_model_from_dict(self, weights)\n return self", "title": "" }, { "docid": "24e508760b13595ab5cecb160fd7708f", "score": "0.7046039", "text": "def loadModel(self,model_path=''):\r\n cache_dir = os.path.join(os.path.expanduser('~'), '.faceai')\r\n\r\n if (self.__modelLoaded == False):\r\n if(self.__modelType == \"\"):\r\n raise ValueError(\"You must set a valid model type before loading the model.\")\r\n elif(self.__modelType == \"dan\"):\r\n des_file = '/'.join((cache_dir,self.__modelType))\r\n self.modelPath = download_file_from_google_drive(self.__model_id[self.__modelType], des_file)\r\n model = dan(self.modelPath)\r\n self.__model_collection.append(model)\r\n self.__modelLoaded = True\r\n elif (self.__modelType == \"prnet\"):\r\n des_file = '/'.join((cache_dir, self.__modelType))\r\n self.modelPath = download_file_from_google_drive(self.__model_id[self.__modelType], des_file)\r\n model = PRN(self.modelPath)\r\n self.__model_collection.append(model)\r\n self.__modelLoaded = True", "title": "" }, { "docid": "9857f3a89b1713f1cac07a034e043498", "score": "0.7043509", "text": "def load_model():\n return joblib.load(MODEL_LOCAL_PATH)", "title": "" } ]
3b7c4cb44eafac4ca3a16bc12bd88e8c
Constructor of a E3DC object (does not connect)
[ { "docid": "e96047e58faa3af1385bd64817240138", "score": "0.0", "text": "def __init__(self, connectType, **kwargs):\n \n self.connectType = connectType\n self.username = kwargs['username']\n if connectType == self.CONNECT_LOCAL:\n self.ip = kwargs['ipAddress']\n self.key = kwargs['key']\n self.password = kwargs['password']\n self.rscp = E3DC_RSCP_local(self.username, self.password, self.ip, self.key)\n self.poll = self.poll_rscp\n \n else:\n self.serialNumber = kwargs['serialNumber']\n if 'isPasswordMd5' in kwargs:\n if kwargs['isPasswordMd5'] == True:\n self.password = kwargs['password']\n else:\n self.password = hashlib.md5(kwargs['password']).hexdigest()\n self.rscp = E3DC_RSCP_web(self.username, self.password, self.serialNumber)\n self.poll = self.poll_ajax\n \n self.jar = None\n self.guid = \"GUID-\" + str(uuid.uuid1())\n self.lastRequestTime = -1\n self.lastRequest = None\n self.connected = False\n self.idleCharge = None\n self.idleDischarge = None", "title": "" } ]
[ { "docid": "96ef8aefbc4172ef186932c042c41f2c", "score": "0.70455325", "text": "def __init__(self, dmc, dc):\n self.dmc = dmc\n self.dc = dc", "title": "" }, { "docid": "c17b28b949b3c432f89cf53a2bd9acb5", "score": "0.65099466", "text": "def __init__(self):\n\t\tself.c = Cortex(user, debug_mode=True)\n\t\tself.c.bind(new_com_data=self.on_new_data)", "title": "" }, { "docid": "63e0d3108a7c03c73f3db7139fbe9d2e", "score": "0.647518", "text": "def __init__(self):\n\t\tself.c = Cortex(user, debug_mode=True)\n\t\tself.c.bind(new_fe_data=self.on_new_data)", "title": "" }, { "docid": "2ec9d90fa99379bd38743cbf95ad7484", "score": "0.6467903", "text": "def __init__(self, *args):\n this = _btk.new_btkC3DFileIO(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "3768b3de71b27d173f5b530d90304ab3", "score": "0.6441685", "text": "def __init__(self, c = None, p = None, d1 = None, d2 = None, obj = None, s = None, f = None):\n self.c = c\n self.p = p\n self.d1 = d1\n self.d2 = d2\n self.obj = obj\n self.s = s\n self.f = f", "title": "" }, { "docid": "ef3271b8bfb21e82aecab047dd86ea08", "score": "0.6313943", "text": "def __init__(self, database, dbid, stable_id, flat_exons):\n\n self.database = database\n self.dbid = dbid\n self.stable_id = stable_id\n self.flat_exons = flat_exons", "title": "" }, { "docid": "c88b8db1f74693e211dc503e57b8c497", "score": "0.63048095", "text": "def __init__(self, env, dataset):\n self.env = env\n self.ds = dataset\n self.dc_strings = []\n self.dcs = []", "title": "" }, { "docid": "855e77a4d97f38d7f52deaf70123446c", "score": "0.6300469", "text": "def __init__(self):\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_D)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_A)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_C)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n self.messenger = None\n\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy", "title": "" }, { "docid": "a6861bb8185d0df01d00702c7fb25634", "score": "0.62798876", "text": "def __init__(self, \n eccopath='../../data/ECCOv4r2'\n ):\n\n\n if not os.path.isdir(eccopath):\n raise ValueError(\"Check your eccopath. \"\n \"The directory %s does not exist\" % eccopath)\n\n self.eccopath = eccopath\n self.fieldDescriptions = get_ecco_field_descriptions()", "title": "" }, { "docid": "3673d210a604a8068fc7ce18365d1100", "score": "0.62667656", "text": "def __init__(self, inputs, dc_prev, obs, equatorial=True):\n self.dc_prev = ee.Image(dc_prev)\n self.temp = inputs.temp\n self.rain = inputs.rain\n self.obs = obs\n self.equatorial = equatorial", "title": "" }, { "docid": "b298935009e8230d5176639d91e4620c", "score": "0.624659", "text": "def setUpElectrode(self):\n self.device = Device(name='device_name')\n self.elec = IntracellularElectrode(name=\"elec0\", slice='tissue slice',\n resistance='something measured in ohms',\n seal='sealing method', description='a fake electrode object',\n location='Springfield Elementary School',\n filtering='a meaningless free-form text field',\n initial_access_resistance='I guess this changes',\n device=self.device)", "title": "" }, { "docid": "c9af7f3cf26937289f8226da263cac5c", "score": "0.6231736", "text": "def __init__(self, *args, **kwargs):\n kwargs = self.__pop_kwargs(**kwargs)\n DCM.__init__(self, *args, **kwargs)", "title": "" }, { "docid": "74f83354b7a9e264ca87ff95f078ca75", "score": "0.6208187", "text": "def __init__(self, data, device):\n AbodeDevice.__init__(self, data, device)", "title": "" }, { "docid": "c1521c70507229af44b72f5086f14ecd", "score": "0.6185272", "text": "def __init__(self, eid, nodes, comment=''):\n BaseCard.__init__(self)\n if comment:\n self.comment = comment\n self.eid = eid\n self.nodes = nodes\n self.nodes_ref = None", "title": "" }, { "docid": "3e12c93b6dcdf595d03b4f1650353796", "score": "0.6164022", "text": "def __init__(self, sid, q0, cntrlnd, eids, comment=''):\n ThermalLoad.__init__(self)\n if comment:\n self.comment = comment\n\n #: Load set identification number. (Integer > 0)\n self.sid = sid\n\n #: Heat flux into element\n self.q0 = q0\n\n #: Control point for thermal flux load. (Integer > 0; Default = 0)\n self.cntrlnd = cntrlnd\n\n #: CHBDYj element identification numbers\n self.eids = expand_thru_by(eids)\n self.eids_ref = None", "title": "" }, { "docid": "6cd992b24ae18602b664d0c3a83291fe", "score": "0.61507595", "text": "def __init__(self, record):\n super(KCubeDCServo, self).__init__(record, KCube_DCServo_FCNS)", "title": "" }, { "docid": "915c7b768743e1545d8db4971be6a6bc", "score": "0.61379725", "text": "def __init__(self):\n raise Exception(\"This class can't be created directly. \" +\n \"Please use: ldns_rdf_new, ldns_rdf_new_frm_data, \" +\n \"ldns_rdf_new_frm_str, ldns_rdf_new_frm_fp, \" +\n \"ldns_rdf_new_frm_fp_l\")", "title": "" }, { "docid": "0df487641e652d4676690725dfbfc1e7", "score": "0.6125727", "text": "def __init__(self, db_host = None, db_name = None, db_user = None, db_pw = None, db_port = 25060):\n print('Initializing Embedding Object')\n self.db_connection = None\n # DB/embeddings connection/filepath details\n self.db_host = db_host\n self.db_name = db_name\n self.db_user = db_user\n self.db_pw = db_pw\n self.db_port = db_port\n\n if db_host is not None and db_name is not None and db_user is not None and db_pw is not None and db_port is not None:\n self.__connect_to_db()\n else:\n # We have a problem...\n raise Exception(\"Please initialize Embeddings with fuil DB connection parameters.\")", "title": "" }, { "docid": "5be3281c079f489f7e5dbd9a89c6cfcb", "score": "0.61154634", "text": "def __init__(self, config_file):\n from pyecobee import Ecobee\n self.ecobee = Ecobee(config_file)", "title": "" }, { "docid": "81d2c00ed6f6b8627b3c1db8f9e9af53", "score": "0.6102702", "text": "def __init__(self,eds_file):\n self.eds_file = eds_file\n self.I1 = I100(1,self.eds_file)\n self.T2 = T100(2,self.eds_file)\n self.T3 = T100(3,self.eds_file)\n self.T4 = T100(4,self.eds_file)\n self.I5 = I100(5,self.eds_file)\n self.G0 = G100(6,self.eds_file)\n self.G6 = G100(7,self.eds_file)\n print \"Climbot5d init is over!\"", "title": "" }, { "docid": "4c05b616edd09858bff6d7eee106256e", "score": "0.6101939", "text": "def __init__(self, a, b, c):\n\t\tself.a = a\n\t\tself.b = b \n\t\tself.c = c", "title": "" }, { "docid": "c7623b7e195129899870852aede06f74", "score": "0.60990614", "text": "def __init__(self, *args):\n this = _render3d.new_CDynamicTexture(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "f8f82f4342015ebc09c298fac233162d", "score": "0.60848695", "text": "def __init__(self, *args):\n args_len = len(args)\n if args_len in [1,2] and type(args[0]) == NationalInstruments.VeriStand.SystemDefinitionAPI.DAQCM_Active_Edge:\n self._dotnet_instance = args[0]\n self._py_field_name = args[1] if args_len == 2 else \"\"\n else:\n raise ValueError(\"No instance constructor for DAQCM_Active_Edge\")", "title": "" }, { "docid": "1ca790e12ea9349dbdbd88de7fb9a624", "score": "0.60756105", "text": "def __init__(self, **kwargs):\n # Initialise superclass\n super(HexitecAdapter, self).__init__(**kwargs)\n\n self.hexitec = Hexitec(self.options)\n\n self.adapters = {}\n\n logging.debug('HexitecAdapter loaded')", "title": "" }, { "docid": "5956d4900aa2dd9fcb852fedf4309817", "score": "0.6068407", "text": "def __init__(self, context):\n self.context = context\n self.attached = 0\n self.opened = 0\n\n # ignoring caller parameters for now\n\n self.uuid = (ctypes.c_ubyte * 1)(0)\n self.coh = ctypes.c_uint64(0)\n self.poh = ctypes.c_uint64(0)\n self.info = daos_cref.ContInfo()\n # Get access to container input params\n self.input = DaosInputParams()\n # Export the cont create params structure for user.\n self.cont_input_values = self.input.get_con_create_params()\n self.cont_prop = None", "title": "" }, { "docid": "2a48be96336c85be3f86ebd1da6a062a", "score": "0.6067063", "text": "def setUpContainer(self):\n self.device = Device(name='device_name')\n elec = IntracellularElectrode(\n name=\"elec0\",\n slice='tissue slice',\n resistance='something measured in ohms',\n seal='sealing method',\n description='a fake electrode object',\n location='Springfield Elementary School',\n filtering='a meaningless free-form text field',\n initial_access_resistance='I guess this changes',\n device=self.device,\n cell_id=\"this_cell\",\n )\n return elec", "title": "" }, { "docid": "2f58df071070c26b5fa2a4135f8f72cf", "score": "0.60528415", "text": "def __init__(self, c):\n self.c = c", "title": "" }, { "docid": "e7f9ba0e40f2107de189b6f3adbd5a89", "score": "0.60464287", "text": "def __init__(self):\n\t\tprint \"C init\"\n\t\tsuper(C, self).__init__()", "title": "" }, { "docid": "91409075787f62aca549d3a528416beb", "score": "0.6025097", "text": "def __init__(self, *args):\n this = _render3d.new_CFileTexture(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "7373cf749560e94c716b64ba4ff1d707", "score": "0.6020464", "text": "def __init__(self, *args):\n _itkQuadEdgeMeshPointPython.itkQuadEdgeMeshPointF3_swiginit(self,_itkQuadEdgeMeshPointPython.new_itkQuadEdgeMeshPointF3(*args))", "title": "" }, { "docid": "1ea9b355a6c6e5888ac8305be4b4d893", "score": "0.5992727", "text": "def __init__(self, dcgid, db_conn, preload=None):\n self._db = db_conn\n self._dcgid = int(dcgid)\n if preload:\n self._data = preload", "title": "" }, { "docid": "16391de1a2ac3f27ace0386d758228ac", "score": "0.5978737", "text": "def __init__(self,tdb):\n self.qry=libtokyocabinet.tctdbqrynew(tdb.tdb)", "title": "" }, { "docid": "35d8b9ec776f499d558ba3f0f1eb0c5c", "score": "0.5963176", "text": "def _constructor(self):\n return ChemPanel", "title": "" }, { "docid": "43bbf3a1c48ad7b35a753f0dc77407d6", "score": "0.5959922", "text": "def __init__(self, E):\n self.E = E", "title": "" }, { "docid": "a3c2f1824d4d7581d5e2dba39463892b", "score": "0.59516466", "text": "def __init__(self, commcell, csdb):\n\n self.log = logger.get_log()\n self.commcell = commcell\n self.csdb = csdb\n self.commserv_name = self.commcell.commserv_name\n self.base_dir = self.get_base_dir()", "title": "" }, { "docid": "f46201eb0cedf64d52ff98afda4756e2", "score": "0.5951601", "text": "def __init__(self, addr):\n XBeeDevice.__init__(self, addr)\n # Verify that device is truly a LTH Adapter\n if self.product_type != XBeeSensorLTHAdapter:\n raise ValueError, \"Adapter is not a %s\" % (GetXBeeProductName(XBeeSensorLTHAdapter))\n\n # The XBee needs to have analog IO 1, 2, and 3 (pins 19, 18, 17) set to 'ADC'\n self.XBeeCommandSet(\"d1\", 2)\n self.XBeeCommandSet(\"d2\", 2)\n self.XBeeCommandSet(\"d3\", 2)\n self.XBeeCommandSet(\"wr\", \"\")\n self.XBeeCommandSet(\"ac\", \"\")", "title": "" }, { "docid": "7ee234da96be484e05b8b8e975bcb224", "score": "0.59502715", "text": "def __init__(self, name: str, port: str, emu: str, file: str):\r\n logger.debug(f'Initializing with name: {name}, port: {port}, emu: {emu} and file: {file}')\r\n self.name = name\r\n self.port = port\r\n self.emu = emu\r\n self.file = file\r\n self.bot = None\r\n self.device = None\r\n self.screen = None\r\n self.location = 'unknown'", "title": "" }, { "docid": "af999782ddd0868915108ede1bdf9049", "score": "0.5942507", "text": "def __init__(self) -> None:\n super(COCODataset, self).__init__()\n self._state[\"dataset\"] = None", "title": "" }, { "docid": "c62102ade4373aff6ce59d8d61dec0c0", "score": "0.59257764", "text": "def __init__(self, ioc, bl_prefix):\n\n self.epics_environment = {}\n self.ioc_type = ioc['type']\n self.basename = self.ioc_type[2:].lower()\n\n # These epics environment variables are set by the user for each IOC\n self.user_entered_env = ['ENGINEER', 'PORT', 'IOC', 'CAM-CONNECT', 'PREFIX', 'CTPREFIX', 'HOSTNAME', 'IOCNAME']\n \n self.asyn_port = ioc['asyn_port']\n self.epics_environment['PORT'] = self.asyn_port\n self.epics_environment['IOC'] = 'ioc{}'.format(self.ioc_type)\n self.epics_environment['EPICS_CA_AUTO_ADDR_LIST'] = 'NO'\n self.epics_environment['EPICS_CA_ADDR_LIST'] = \"NA\"\n self.epics_environment['EPICS_CA_MAX_ARRAY_BYTES'] = \"6000000\" \n \n self.connection = ioc['connection']\n \n self.ioc_prefix = ioc['device_prefix']\n self.epics_environment['PREFIX'] = '{}{}'.format(bl_prefix, self.ioc_prefix)\n self.epics_environment['CTPREFIX'] = '{}{}'.format(bl_prefix, self.ioc_prefix)\n \n self.ioc_port = ioc['telnet_port']\n self.ioc_name = ioc['name']\n self.epics_environment['IOCNAME'] = self.ioc_name", "title": "" }, { "docid": "509418f53d6fc872f015024376e2b644", "score": "0.59251577", "text": "def __init__(self, *args):\n this = _pymaxwell.new_Cbase(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "4ee5f27409593fffeebc140fab6d07fa", "score": "0.5923615", "text": "def __init__(self, course_id=None, conflicting_changes=None):\n self._course_id = course_id\n self._conflicting_changes = conflicting_changes\n\n self.logger = logging.getLogger(\"py3canvas.Exceptionrecord\")", "title": "" }, { "docid": "75444c7d2ae6343c0eabd7a4f7b7b976", "score": "0.5920104", "text": "def __init__(self,\n aag_databases=None,\n application_node=None,\n databases=None,\n error_message=None,\n unknown_host_name=None,\n ):\n\n # Initialize members of the class\n self.aag_databases = aag_databases\n self.application_node = application_node\n self.databases = databases\n self.error_message = error_message\n self.unknown_host_name = unknown_host_name", "title": "" }, { "docid": "f28b15885d7e1746ea0e4195e6f9576f", "score": "0.59164673", "text": "def __init__(self):\n self.dc={}", "title": "" }, { "docid": "459730e257b5a39d0bdbfe213dba792f", "score": "0.59145546", "text": "def __init__(self, adb, serial):\n self.adb = adb\n self.serial = serial", "title": "" }, { "docid": "5f2241b2cbfd075a341e3809a8180041", "score": "0.59112775", "text": "def __init__(self, *args):\n this = _render3d.new_CDynTexture(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "9e3d2dd2bba4b3945a0733e4d4d803c3", "score": "0.5903717", "text": "def __init__(self):\n\t\t\n\t\t#if settings.database['database'] == 'neo4j': #I removed this to remove the dependency on Neo4J, which no one will likely use\n\t\t#\tself.database = Neo4J() #initialize connection to Neo4J\n\t\t\t\n\t\tif settings.database['database'] == 'flatFile':\n\t\t\tself.database = FlatFileDB()", "title": "" }, { "docid": "adaf3b2507c85e3eef6cedb624e08732", "score": "0.5897579", "text": "def __init__(self, *args, **kwargs):\n self.device = ChromiumOSDevice(*args, **kwargs)", "title": "" }, { "docid": "6a7fd38e12b386589b78e9b887362532", "score": "0.5892199", "text": "def __init__(self, username=None, upass=None, dlog=None, debug=0, logname='sf3.base', setupLog=True):\r\n sForceApi3.__init__(self, username=username, upass=upass, dlog=dlog, debug=debug, logname=logname)\r\n self.setupBase(setupLog=setupLog)\r\n self.setupChild()", "title": "" }, { "docid": "39248748ba5bc7a4d1bc5f56f6bb6abb", "score": "0.5884447", "text": "def __init__(self,\n datastore_id=None,\n disable_network=None,\n network_id=None,\n powered_on=None,\n prefix=None,\n preserve_tags=None,\n resource_id=None,\n suffix=None,\n ):\n\n # Initialize members of the class\n self.datastore_id = datastore_id\n self.disable_network = disable_network\n self.network_id = network_id\n self.powered_on = powered_on\n self.prefix = prefix\n self.preserve_tags = preserve_tags\n self.resource_id = resource_id\n self.suffix = suffix", "title": "" }, { "docid": "02c1261b1ab70952f9b0e76e45bca970", "score": "0.5880686", "text": "def __init__(self, diagnostic=None, pulseNumber=None, experiment='AUGD', edition=0):\n self.diaref = ctypes.c_int32(0)\n if diagnostic!=None and pulseNumber!=None:\n self.open(diagnostic, pulseNumber, experiment, edition)", "title": "" }, { "docid": "88bc586d7c5a67490137c2a4007eccf8", "score": "0.587725", "text": "def __init__(self, cdid, cdtitle, cdartist):\r\n self.__id = cdid\r\n self.__title = cdtitle\r\n self.__artist = cdartist", "title": "" }, { "docid": "85f053102a8df4ac574921cfea77b9f5", "score": "0.58740586", "text": "def __init__(self, experiment_id: int, db: DataReader) -> None:\n super().__init__(experiment_id, db)", "title": "" }, { "docid": "3dc208242e730583292b2860c3e549df", "score": "0.5868588", "text": "def __init__(self, *args, **kwargs):\n super(EB_BerkeleyGW, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "b2098c72dc3f08f109ee9a16239a4c35", "score": "0.5864307", "text": "def __init__(self, *args):\n this = _render3d.new_CMovieTexture(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "3236e12a21c98d02fca4d39466ec4450", "score": "0.5858529", "text": "def __init__(self, **kwargs):\n pass", "title": "" }, { "docid": "3236e12a21c98d02fca4d39466ec4450", "score": "0.5858529", "text": "def __init__(self, **kwargs):\n pass", "title": "" }, { "docid": "3236e12a21c98d02fca4d39466ec4450", "score": "0.5858529", "text": "def __init__(self, **kwargs):\n pass", "title": "" }, { "docid": "807e0e239625f4819b9d930f415ed490", "score": "0.5858068", "text": "def __init__(self, host, user, password, database, port):\n self.connptr = lib4d_sql.fourd_init()\n self.cursors = []\n if self.connptr == ffi.NULL:\n raise InterfaceError(\"Unable to intialize connection object\")\n\n connected = lib4d_sql.fourd_connect(self.connptr,\n host.encode('utf-8'),\n user.encode('utf-8'),\n password.encode('utf-8'),\n database.encode('utf-8'),\n port)\n if connected != 0:\n self.connected = False\n raise OperationalError(\"Unable to connect to 4D Server: {}\".format(ffi.string(self.connptr.error_string)))\n else:\n self.connected = True\n self.__private_cursor__ = self.cursor()", "title": "" }, { "docid": "619e7433c01ba2b73a17caad335b3457", "score": "0.5855063", "text": "def __init__(self):\n raise Exception(\"This class can't be created directly. \" +\n \"Please use: ldns_pkt_new, ldns_pkt_query_new \" +\n \"or ldns_pkt_query_new_frm_str\")", "title": "" }, { "docid": "2a36caa4c9fadcb2e88d51a18d2f4e91", "score": "0.5854685", "text": "def __init__(self, x=0, y=0, z=0, id=0):\n self.x = x\n self.y = y\n self.z = z\n self.id = id", "title": "" }, { "docid": "539bbb3e7aea0830566824397a87a89b", "score": "0.5845759", "text": "def __init__(self, item: ThreeDimDiagramItem):\n data: type_data = {'x': [], 'y': [], 'z': []}\n\n super().__init__(data, item, '3d')", "title": "" }, { "docid": "10227f5c84e4d1ad1fc4656908c5998a", "score": "0.5845487", "text": "def __init__(self, *args , **kargs):\n neo.core.Block.__init__(self, *args, **kargs)\n OEBase.__init__(self, *args, **kargs)", "title": "" }, { "docid": "5ef2c773e1ef38d627b24b143b58f155", "score": "0.5843756", "text": "def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z", "title": "" }, { "docid": "d00a42b154d061b1d7815d5e99444b14", "score": "0.58428264", "text": "def __init__(self, *args):\n _itkVectorContainerPython.vectoritkVectorD3_swiginit(self,_itkVectorContainerPython.new_vectoritkVectorD3(*args))", "title": "" }, { "docid": "03d6946f7fae521eb7e26e3c1d500cf4", "score": "0.5842731", "text": "def __init__(self, *args):\n this = _pymaxwell.new_CfBase(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "ccfb9e7e8610e770fea424c494fb7e6e", "score": "0.58400744", "text": "def __init__(self, rcfilename):\n\n self.ID = 'CarbonTracker CO2' # the identifier gives the platform name\n self.load_rc(rcfilename)\n\n logging.debug(\"Data Assimilation System initialized: %s\" % self.ID)", "title": "" }, { "docid": "8566d967be9c6cec7ba448fb3d9d3065", "score": "0.5835265", "text": "def __init__(self, entityID, certFile=constants.LOCATION_SERVICE_CERTFILE, database=constants.LOCATION_SERVICE_USER_DATABASE):\n # Set filename and logging level for log messages, and output formats.\n FORMAT = \"%(asctime)s;%(levelname)s;%(message)s\"\n DATEFORMAT = '%Y-%m-%d %H:%M:%S'\n formatter = logging.Formatter(fmt=FORMAT, datefmt=DATEFORMAT)\n self.log = logging.getLogger(__name__)\n handler = logging.FileHandler(__name__+'.log')\n self.log.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n self.log.addHandler(handler)\n\n self.entityID = entityID\n self.certFile = certFile\n self.database = database\n self.x509 = self.readX509CertificateFromFile()\n\n # Log values of arguments.\n self.log.info(\"Database filename set: %s\", self.database)\n # Set key filenames if key files are to be used.\n self.log.info(\"User Agent object instantiated.\")", "title": "" }, { "docid": "78f8a65c61e3975ff0c3afd65dc22c72", "score": "0.5835106", "text": "def __init__(self, parameters):\n pass", "title": "" }, { "docid": "7a8416363971242a05f28ffc13dbc590", "score": "0.5833818", "text": "def __init__(self, policy, debug):\n \n # creating an output helper\n self.oh = OutputHelper(\"green\", \"EVSE\")\n\n # debug print\n self.oh.out(\"init\", \"initializing EVSE\")\n \n # setting initial parameters\n self.storage_soc = EVSE_STORAGE_CAPACITY\n self.storage_capacity = EVSE_STORAGE_CAPACITY\n self.recharge_from_storage = True\n self.charging = False\n self.policy = policy\n self.debug = debug", "title": "" }, { "docid": "4c9c64d041a1a5ebe7633748a1c861a5", "score": "0.58318514", "text": "def _constructor(self):\n return ChemSeries", "title": "" }, { "docid": "9e944858d591a2d5a5c8f25328ed9041", "score": "0.58314747", "text": "def __init__(self):\n # add the UVParameters to the class\n # use the same names as in UVData so they can be automatically set\n self.citation = None\n\n self._telescope_name = uvp.UVParameter(\n \"telescope_name\", description=\"name of telescope \" \"(string)\", form=\"str\"\n )\n desc = (\n \"telescope location: xyz in ITRF (earth-centered frame). \"\n \"Can also be set using telescope_location_lat_lon_alt or \"\n \"telescope_location_lat_lon_alt_degrees properties\"\n )\n self._telescope_location = uvp.LocationParameter(\n \"telescope_location\",\n description=desc,\n acceptable_range=(6.35e6, 6.39e6),\n tols=1e-3,\n )\n desc = (\n \"Antenna diameters in meters. Used by CASA to \"\n \"construct a default beam if no beam is supplied.\"\n )\n self._antenna_diameters = uvp.UVParameter(\n \"antenna_diameters\",\n required=False,\n description=desc,\n expected_type=np.float,\n tols=1e-3, # 1 mm\n )\n # possibly add in future versions:\n # Antenna positions (but what about reconfigurable/growing telescopes?)\n\n super(Telescope, self).__init__()", "title": "" }, { "docid": "f1ff46dc3de6c85e5c7b7c1bff20a6ec", "score": "0.58302903", "text": "def __init__(self, emObj, fpRef, ao):\n _OutputEngine.__init__(self, emObj, fpRef, ao) # provide event name\n self.name = 'EngineCsoundExternal'\n self.doc = lang.docOeCsoundExternal\n # compatable with all orchestras\n self.orcIncompat = []\n self.outAvailable = ['csoundScore']\n self.outMin = ['csoundScore']", "title": "" }, { "docid": "32a7c69f2e571afc8e1af49f4887aa65", "score": "0.58281094", "text": "def __init__(self, d):\n self.d = d", "title": "" }, { "docid": "2b6527d5cb067de3f89cd7ab58dcfc52", "score": "0.58238775", "text": "def __init__(self, **kwargs):\n\n self.ip = kwargs.get('ip', 'localhost')\n self.port = kwargs.get('port', '5025')\n\n self.logger = kwargs.get('logger', logging.getLogger(__name__))\n self.term = kwargs.get('term', '\\n')\n self.timeout = kwargs.get('timeout', 100000)\n\n import visa\n rm = visa.ResourceManager()\n #Connect to a Socket on the local machine at 5025\n #Use the IP address of a remote machine to connect to it instead\n try:\n self.CMT = rm.open_resource('TCPIP0::{}::{}::SOCKET'.format(self.ip,self.port))\n except Exception as e:\n self.logger.critical('Cannot establish SCPI connection!',exc_info=True)\n\n #The VNA ends each line with this. Reads will time out without this\n self.CMT.read_termination = self.term\n\n #Set a really long timeout period for slow sweeps\n self.CMT.timeout = self.timeout", "title": "" }, { "docid": "c617786ba8d61878d460bef8678ec682", "score": "0.5821947", "text": "def __init__(self, *args, **kwargs):\n raise NotImplementedError('__init__')", "title": "" }, { "docid": "48b74e26413148e44995ae5aedc243ec", "score": "0.58213454", "text": "def __init__(self,x=0.0,y=0.0,z=0.0):\n \n self.x = x #Cartesian x coordinate\n self.y = y #Cartesian y coordinate\n self.z = z #Cartesian z coordinate", "title": "" }, { "docid": "d9de2a43e7093106314b1d14ffc31632", "score": "0.5821289", "text": "def __init__(self, config, blockchain_state):\r\n params = config.get('ccc', {})\r\n self.blockchain_state = blockchain_state\r\n self.testnet = config.get('testnet', False)\r\n thin = config.get('thin', True)\r\n\r\n if thin:\r\n color_data_class = ThinColorData\r\n color_data_builder = AidedColorDataBuilder\r\n else:\r\n color_data_class = ThickColorData\r\n color_data_builder = FullScanColorDataBuilder\r\n \r\n self.store_conn = DataStoreConnection(\r\n params.get(\"colordb_path\", \"color.db\"))\r\n self.cdstore = ColorDataStore(self.store_conn.conn)\r\n self.metastore = ColorMetaStore(self.store_conn.conn)\r\n self.colormap = ColorMap(self.metastore)\r\n \r\n cdbuilder = ColorDataBuilderManager(\r\n self.colormap, self.blockchain_state, self.cdstore,\r\n self.metastore, color_data_builder)\r\n\r\n self.colordata = color_data_class(\r\n cdbuilder, self.blockchain_state, self.cdstore, self.colormap)", "title": "" }, { "docid": "67dc4a6a93f806c5623efb46ae67ee51", "score": "0.58203876", "text": "def __init__(self, *args):\n this = _render3d.new_CStaticTexture(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "c9f496062dc5056168487d5fd7e0209b", "score": "0.581315", "text": "def __init__(self, sid, q0, eids, t_source=None,\n ce=0, vector_tableds=None, control_id=0, comment=''):\n ThermalLoad.__init__(self)\n if comment:\n self.comment = comment\n #: Load set identification number. (Integer > 0)\n self.sid = sid\n self.q0 = q0\n self.t_source = t_source\n self.ce = ce\n self.control_id = control_id\n\n if vector_tableds is None:\n self.vector_tableds = [0., 0., 0.]\n else:\n self.vector_tableds = vector_tableds\n self.eids = eids\n self.eids_ref = None", "title": "" }, { "docid": "cd35bb3957bef9b3b4ae70a583cdd2ae", "score": "0.58127433", "text": "def __init__(self):\n\n # initialize yacs config node (CfgNode)\n self._CONFIG = CN()\n\n # initialize main sections of settings with default values\n self._initKittConfig()\n self._initVisionConfig()", "title": "" }, { "docid": "2aaa020f17fe544468dae7c310943493", "score": "0.5807132", "text": "def __init__(self, *args):\n this = _render3d.new_CFontTexture(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "6f6a6f78502e83b094ce1ee6e9c876ed", "score": "0.58062303", "text": "def __init__(self):\n super(ParticleEngine, self).__init__()\n self.componenttypes = (Particle,)\n self._createfunc = None\n self._deletefunc = None\n self._updatefunc = None", "title": "" }, { "docid": "c5bab259dca63d1d1cf04ed686a4e10f", "score": "0.5803877", "text": "def __init__(self, ip_addr, port=22, admin_name=\"\", admin_password=\"\"):\n\n # Invoke the superclass initialization method to initialize\n # inherited attributes\n NetworkDevice.__init__(self, 'Cisco', 'eos')\n\n # Initialize this class specific attributes\n self._ip_addr = ip_addr\n self._port = port\n self._admin_name = admin_name\n self._admin_pswd = admin_password\n self._remote_shell = None", "title": "" }, { "docid": "6bf0620feac10f1426c83c1234493b7d", "score": "0.58017826", "text": "def __init__(self, inputs, dmc_prev, obs, equatorial=True):\n self.dmc_prev = ee.Image(dmc_prev)\n\n self.temp = inputs.temp\n self.rhum = inputs.rhum\n self.rain = inputs.rain\n self.obs = obs\n self.equatorial = equatorial", "title": "" }, { "docid": "33433c966e7f0c787f88c0d7aec55e59", "score": "0.5800153", "text": "def __init__(self, experiment, **kwargs):\n if not alamo_enabled:\n raise errors.AlamoDisabledError()\n self._x, self._z, self._xv, self._zv = None, None, None, None\n self._kwargs = kwargs\n self._exp = experiment\n self._rsrc = self._create_resource()\n self._exp.add(self._rsrc)", "title": "" }, { "docid": "b9fa73d530edcd073cf21368c04d0ce7", "score": "0.5799555", "text": "def __init__(self):\n # Nothing to do here since there is very little state in the class.\n pass", "title": "" }, { "docid": "71d56ad26bae070fd65eabbece3d7321", "score": "0.57995075", "text": "def __init__(self, *args):\n _itkVectorContainerPython.vectoritkPointD3_swiginit(self,_itkVectorContainerPython.new_vectoritkPointD3(*args))", "title": "" }, { "docid": "840c85d05ac3808a0de450260aac9ae2", "score": "0.57964957", "text": "def __init__(self):\n\t\tself.algorithm = \"mcdsat\"\n\t\tself.c2d_path = \"\"\n\t\tself.models = \"\"\n\t\tself.cnf_file = \"\"\n\t\tself.compiled_dnnf = \"\"\n\t\tself.query_models = \"\"", "title": "" }, { "docid": "b81efc1ad1c090f3a4ddc6ca43c39602", "score": "0.5794689", "text": "def __init__(self, emObj, fpRef, ao):\n _OutputEngine.__init__(self, emObj, fpRef, ao) # provide event name\n self.name = 'EngineAcToolbox'\n self.doc = lang.docOeAcToolbox\n # define orcs that are not compatible w/ this engine\n self.orcIncompat = [] # all orchestras compatable\n self.outAvailable = ['acToolbox']\n self.outMin = ['acToolbox']\n # store structured data\n self.codeList = []\n self.codeCmt = 'created with %s' % lang.msgAth", "title": "" }, { "docid": "0e51117e6353110c1d9dbfce5f2c6b12", "score": "0.5789894", "text": "def __init__(self, *args):\n this = _pymaxwell.new_Cattribute(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "005986ce6c389017dced786fbf5440f6", "score": "0.578806", "text": "def __init__(self, Object1, Object2, Object3):\n # CREATE OBJECTS\n self.ui = Object1\n self.controller = Object2\n self.comms = Object3", "title": "" }, { "docid": "02143a15f004236f2c5e915f966e62e5", "score": "0.5784553", "text": "def __init___0(self, material, te):\n super(CraftFurnace, self).__init__(material)\n self.furnace = te", "title": "" }, { "docid": "a61fffaedc5bc19cb466c4a3f03f3e49", "score": "0.57835364", "text": "def __init___1(self, p_conId, p_ratio, p_action, p_exchange, p_openClose, p_shortSaleSlot, p_designatedLocation):\r\n pass #self.__init__(p_conId, p_ratio, p_action, p_exchange, p_openClose, p_shortSaleSlot, p_designatedLocation, -1)# exemptCode \r", "title": "" }, { "docid": "ebdb7ce241356023162eb3f52176ffd2", "score": "0.57804734", "text": "def __init__(self, *args):\n this = _render3d.new_CEffect(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "aa1720af0bb0552bdac9020fdceea8fe", "score": "0.57796204", "text": "def __init__(self, qeConf):\n self.filename = qeConf.filename\n self.atomicSpecies = OrderedDict()\n self.formatString = '%# .8f %# .8f %# .8f'\n # optConstraints three 1/0 for each coordinate of each atom\n self.optConstraints = []\n self.qeConf = qeConf\n #self.qeConf.parse()\n #self.setStructureFromQEInput()\n self.lattice = None\n self.structure = None\n self.nat = None\n self.ntyp = None", "title": "" }, { "docid": "42975903e8fe10b5014cdc6c55396521", "score": "0.57786715", "text": "def __init__(self, Panneau_Affichage, tel_entree, tel_sortie, Borne_Ticket, Camera, Parking):\n\t\tself.panneau_affichage = Panneau_Affichage\n\t\tself.tel_entree = tel_entree\n\t\tself.tel_sortie = tel_sortie\n\t\tself.borne_bicket = Borne_Ticket\n\t\tself.camera = Camera\n\t\tself.parking = Parking", "title": "" }, { "docid": "b0c73b38139338b5984416041e23092d", "score": "0.57777673", "text": "def __init__(self):\n\n self._auv_data = dict()\n for variable_name in variables_list():\n self._auv_data[variable_name] = None\n\n self._current_waypoint = dict()\n self._current_waypoint['x'] = 0.0\n self._current_waypoint['y'] = 0.0\n self._current_waypoint['depth'] = 0.0\n\n self.watchdog = Watchdog()\n self.dye = DyeSensor()\n\n self.auv_control = AuvMOOS(\n config['auv']['host'],\n int(config['auv']['port']),\n config['auv']['client_name'],\n variables_list())\n self.auv_control.set_data_callback(self._process_auv_data)", "title": "" }, { "docid": "9b70996b6657765528726bbf6cdfe197", "score": "0.5776151", "text": "def __init__(self):\n # Left and right eye chosen landmarks.\n self.eye_idxs = {\n \"left\": [362, 385, 387, 263, 373, 380],\n \"right\": [33, 160, 158, 133, 153, 144],\n }\n\n # Used for coloring landmark points.\n # Its value depends on the current EAR value.\n self.RED = (0, 0, 255) # BGR\n self.GREEN = (0, 255, 0) # BGR\n\n # Initializing Mediapipe FaceMesh solution pipeline\n self.facemesh_model = get_mediapipe_app()\n\n # For tracking counters and sharing states in and out of callbacks.\n self.state_tracker = {\n \"start_time\": time.perf_counter(),\n \"DROWSY_TIME\": 0.0, # Holds the amount of time passed with EAR < EAR_THRESH\n \"COLOR\": self.GREEN,\n \"play_alarm\": False,\n }\n\n self.EAR_txt_pos = (10, 30)", "title": "" }, { "docid": "abb968b97906bf44d212ed6263626678", "score": "0.57755625", "text": "def __init__(self):\n\n super().__init__()\n\n # Gadget state\n self.heel_mode = False\n self.patrol_mode = False\n self.sitting = False\n\n # Ev3dev initialization\n self.leds = Leds()\n self.sound = Sound()\n\n # Connect infrared and touch sensors.\n self.ir = InfraredSensor()\n self.ts = TouchSensor()\n # Init display\n self.screen = Display()\n\n # Connect medium motor on output port A:\n self.medium_motor = MediumMotor(OUTPUT_A)\n # Connect two large motors on output ports B and C:\n self.left_motor = LargeMotor(OUTPUT_B)\n self.right_motor = LargeMotor(OUTPUT_C)\n\n\n # Gadget states\n self.bpm = 0\n self.trigger_bpm = \"off\"\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()\n threading.Thread(target=self._heel_thread, daemon=True).start()\n threading.Thread(target=self._touchsensor_thread, daemon=True).start()", "title": "" }, { "docid": "ea44d63bc540d1b3538d22673cbfe910", "score": "0.5775057", "text": "def __init__(self, location=None, eid=0, **kwargs):\n\n super(Entity, self).__init__()\n\n self.eid = eid\n\n if location is None:\n self.location = Location()\n else:\n self.location = location", "title": "" }, { "docid": "d2450cd1d5518cf63f77a26fcd7f5f2d", "score": "0.57738364", "text": "def __init__(self,\n cloud_owner: str,\n cloud_region_id: str,\n orchestration_disabled: bool,\n in_maint: bool,\n *, # rest of parameters are keyword\n cloud_type: str = \"\",\n owner_defined_type: str = \"\",\n cloud_region_version: str = \"\",\n identity_url: str = \"\",\n cloud_zone: str = \"\",\n complex_name: str = \"\",\n sriov_automation: str = \"\",\n cloud_extra_info: str = \"\",\n upgrade_cycle: str = \"\",\n resource_version: str = \"\") -> None:\n super().__init__()\n self.cloud_owner = cloud_owner\n self.cloud_region_id = cloud_region_id\n self.orchestration_disabled = orchestration_disabled\n self.in_maint = in_maint\n self.cloud_type = cloud_type\n self.owner_defined_type = owner_defined_type\n self.cloud_region_version = cloud_region_version\n self.identity_url = identity_url\n self.cloud_zone = cloud_zone\n self.complex_name = complex_name\n self.sriov_automation = sriov_automation\n self.cloud_extra_info = cloud_extra_info\n self.upgrade_cycle = upgrade_cycle\n self.resource_version = resource_version", "title": "" } ]
73254f537c8813c1e5a36d369989817a
Return the key containing the name.
[ { "docid": "f1629cfe983f8b594a7731d02471a2fc", "score": "0.0", "text": "def get_name(self, key):\n for key in (\"name\", \"title\"):\n if self.has_key(key) and self.key_type(key) != self.NULL:\n return self.get_as_string(key)\n\n return \"\"", "title": "" } ]
[ { "docid": "fcbc38fcf59b807073d7b2b11a46a90b", "score": "0.7750027", "text": "def key_name(self) -> str:\n return pulumi.get(self, \"key_name\")", "title": "" }, { "docid": "b9e7d32ea82387c66113d51c9652e778", "score": "0.76042306", "text": "def get_key() -> str:\n pass", "title": "" }, { "docid": "71fbc3ef7000494dc2dcb50935722e4a", "score": "0.76030874", "text": "def named_key(m) -> str:", "title": "" }, { "docid": "fdf449e4dd320c6cd1af4c06208a56f3", "score": "0.7548896", "text": "def key(self, keyName):\n return self.storage.get(keyName, None)", "title": "" }, { "docid": "a7b945d19d51381efbf8a6adaba30c6d", "score": "0.7532042", "text": "def keyname(self) :\n\t\ttry :\n\t\t\treturn self._keyname\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "b2ebbfefc391fd808b53732c7bf9674f", "score": "0.74615717", "text": "def get_key(self, instance_name):\n return \".\".join([self.location.name, instance_name])", "title": "" }, { "docid": "db632fde6a423f61bbc46b0c8cb2f533", "score": "0.7383717", "text": "def get_key(self) -> str:\n raise NotImplementedError('TBA')", "title": "" }, { "docid": "11f98ba35de4f77266872bd0df6a720a", "score": "0.7361617", "text": "def _get_key(self, split_name):\n return self._SPLIT_KEY.format(split_name=split_name)", "title": "" }, { "docid": "ccac71091386273075b2ae2fb23b041a", "score": "0.7324476", "text": "def get_key(self, key_name):\n\n return self.boto_bucket.get_key(key_name)", "title": "" }, { "docid": "438909f6f7a9bcc110286e9e70df1df3", "score": "0.731794", "text": "def keyname(self):\n return self.container.keyname", "title": "" }, { "docid": "fd93608bd8b74f2f2ae0ec088966bf04", "score": "0.73130363", "text": "def _get_key(self, segment_name):\n return self._SEGMENTS_KEY.format(segment_name=segment_name)", "title": "" }, { "docid": "14f072994ea45481ae7c3763f83f76fa", "score": "0.726158", "text": "def get_key(self):\n return self.obj['key']", "title": "" }, { "docid": "b2d4d3f5c6c23b4e6a53d399635c14d0", "score": "0.71965635", "text": "def get_key(self):\n return self.key", "title": "" }, { "docid": "74c0a3f2581a2b287a31f20c299684ea", "score": "0.7186578", "text": "def get_key(cls):\n return getattr(cls, \"key\", cls.__name__.lower())", "title": "" }, { "docid": "ebaf46492e4cb8ca8fb10fadfa9fc60e", "score": "0.7161005", "text": "def getKeyName(self):\n\n return keynames.getKeyName(self.event_string)", "title": "" }, { "docid": "fadb8d4f102842cec60ac5e86a0ba018", "score": "0.7150067", "text": "def key_pair_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_pair_name\")", "title": "" }, { "docid": "1c431b2653bd411ae3841676478efb75", "score": "0.71444243", "text": "def key(self, key):\n key = key.lower()\n if key in self.map:\n name = self.map[key]\n return self.name(name)\n return ''", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "b1e11a964cf140aad9ecad3c078419ee", "score": "0.71190214", "text": "def key(self) -> str:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8bf80507731e72964ba5f0969d2d7c85", "score": "0.71000993", "text": "def generate_key(cls, name):\n return ndb.Key(cls, hashlib.sha1(name).hexdigest())", "title": "" }, { "docid": "8bf80507731e72964ba5f0969d2d7c85", "score": "0.71000993", "text": "def generate_key(cls, name):\n return ndb.Key(cls, hashlib.sha1(name).hexdigest())", "title": "" }, { "docid": "5815fcf7d8fafea63ca31644bd0c96a3", "score": "0.70886785", "text": "def make_key(cls, name):\n return db.Key.from_path('Course', name)", "title": "" }, { "docid": "e94dd20f0dad53ef19ce135d6825f9b7", "score": "0.70657486", "text": "def get_key(file_name):\n with open(file_name, \"r\") as f:\n key = f.read()\n return key", "title": "" }, { "docid": "c67a9d0c03b1e2eba86eea756789d866", "score": "0.70649135", "text": "def get_key(self):\n return self.__key", "title": "" }, { "docid": "f9d00c89bf32a1f344cd0f5754b706d1", "score": "0.6973609", "text": "def name(self):\n return self._unique_key", "title": "" }, { "docid": "15b3c6c15afa2d2ab0347cf662cd05fb", "score": "0.6943651", "text": "def get_key(self, key) -> str:\n name = self.get_string_var(key).get()\n if name != self.DEFAULT_NAME:\n return self._Data[key].get(\"data\", {}).get(name, \"\")\n else:\n return \"\"", "title": "" }, { "docid": "f7f93988222b76f40ae58cd4451b5b1c", "score": "0.69345635", "text": "def key(self):\n return self.get_data(\"key\")", "title": "" }, { "docid": "f7f93988222b76f40ae58cd4451b5b1c", "score": "0.69345635", "text": "def key(self):\n return self.get_data(\"key\")", "title": "" }, { "docid": "2c99e04c922de746ff91c2c630b65135", "score": "0.69327664", "text": "def get_key_name(self, short_key_pair_name):\n name = '{}-{}'.format(self.evpc.name, short_key_pair_name)\n for key_name in self.key_names:\n if key_name.startswith(name):\n return key_name\n if key_name == short_key_pair_name:\n return key_name", "title": "" }, { "docid": "7c28ba119dd073f881420148078e4672", "score": "0.69313693", "text": "def _get_key_string(self):\n return self.__key_string", "title": "" }, { "docid": "22f2e8c6f99dee2b845cef2ad14119c4", "score": "0.69081557", "text": "def getKey(self):\n\t\treturn self.dict_key", "title": "" }, { "docid": "cda89e3a8c3a1321d2f2f9b6e9cfc76c", "score": "0.6901809", "text": "def get_key(self):\n key = self.stringvar_entry.get()\n if len(key) > 0 and valid_key(key):\n return key", "title": "" }, { "docid": "1d5e399847a0528c6cef35f41f60b282", "score": "0.6900495", "text": "def resolve_key(self, name=None, currentResource=None):\n if not name:\n return self.get()\n\n if name == \"path\":\n return self.tp.name\n elif name == \"contents\":\n with open(self.tp.name, \"r\") as f:\n return f.read()\n else:\n raise KeyError(name)", "title": "" }, { "docid": "237bc874ccccc1f712ea9642fd0a7ba7", "score": "0.68927556", "text": "def key(self):\n return Key.from_args(self.project_id, self.name, region=self.region)", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "0db5e7b7d8d7205ccfeaf12317029a56", "score": "0.6879882", "text": "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "6f4d498f36c1a7707137c4827425ef3d", "score": "0.6874708", "text": "def get_obs_key(self, name: str) -> str:\n return self.value.format(name)", "title": "" }, { "docid": "6f4d498f36c1a7707137c4827425ef3d", "score": "0.6874708", "text": "def get_obs_key(self, name: str) -> str:\n return self.value.format(name)", "title": "" }, { "docid": "1ad1a490ae3ab1719983567cdb22157c", "score": "0.6868771", "text": "def get_cache_key(self, name, filename=None):\r\n hash = sha1(name.encode('utf-8'))\r\n if filename is not None:\r\n if isinstance(filename, unicode):\r\n filename = filename.encode('utf-8')\r\n hash.update('|' + filename)\r\n return hash.hexdigest()", "title": "" }, { "docid": "fefcc86f1debe2f1117519b3386fb964", "score": "0.68474174", "text": "def get_key(self) -> Tuple:\n\n # implemented in sub classes", "title": "" }, { "docid": "2da9533eec1f422f2c9e64ab50cc6814", "score": "0.6813883", "text": "def get_key_pair(self, name):\n conn = self.get_nova_connection()\n\n try:\n return conn.get_all_key_pairs(keynames=name.encode('ASCII'))[0]\n except IndexError:\n return None", "title": "" }, { "docid": "0c7ddf0864c0973ee1d580f4ba0408c7", "score": "0.68107253", "text": "def key(self) -> _EntryKey:\n return _EntryKey(self.token, self.string)", "title": "" }, { "docid": "d7d42ca47a78e9b042260ac13d6f2275", "score": "0.6795493", "text": "def get_entity_key(cls, name):\n raise NotImplementedError()", "title": "" }, { "docid": "118c0d427b3d76240e2bfee9796f6d05", "score": "0.6773614", "text": "def key(self):\n key = self._impl.key()\n if self._prefix is not None:\n return key[len(self._prefix):]\n return key", "title": "" }, { "docid": "aded1cdae482a814afe33b23c097e397", "score": "0.67708176", "text": "def Key(self) -> str:", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "8dd54d54778d9744f2ea8cc3db02927a", "score": "0.6762992", "text": "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "title": "" }, { "docid": "74b846dbd2ffd8645980c1c27efabb05", "score": "0.67545635", "text": "def get_key(self):\n return int(self.key.value())", "title": "" }, { "docid": "5b106cc398979606667b3e038f94e8ea", "score": "0.6748213", "text": "def key(self):\n return \"{}\".format(self)", "title": "" }, { "docid": "0a97c22167ff14beaba9ef19126a4eea", "score": "0.6746427", "text": "def key(self):\n return self.config.get('key').lower()", "title": "" } ]
676b480059773d0858c5368503ccb578
Method to authenticate a gateway user with keycloak
[ { "docid": "e20774330e11fc2e7c2d3f316b80732e", "score": "0.0", "text": "def authenticate_using_user_details(self, user_credentials):\n try:\n token, user_info = self._get_token_and_user_info_password_flow(user_credentials)\n return token, user_info\n except Exception as e:\n return None", "title": "" } ]
[ { "docid": "c10f37577cc7938d2d6345b30b60916a", "score": "0.717711", "text": "def __authenticate(self) -> None:\n print(\"Autenticando em \"+self.keycloak_url)\n url = self.keycloak_url+'auth/realms/master/protocol/openid-connect/token'\n authorization_redirect_url = {\n 'grant_type': 'password',\n 'username': self.keycloak_admin_user,\n 'password': self.keycloak_admin_password,\n 'client_id': 'admin-cli'\n }\n try:\n response = requests.post(url=url, data=authorization_redirect_url)\n self.access_token = response.json()['access_token']\n except requests.exceptions.ConnectionError:\n print(\"Conexão com o servidor keycloak recusada\")\n exit()", "title": "" }, { "docid": "7d6e50f412e858579daca782a31fa628", "score": "0.64246374", "text": "def authenticate(self, handler, data):", "title": "" }, { "docid": "b79b75386b3833c7dfcd672a5b4a4138", "score": "0.6304279", "text": "def authenticate(self, user, passwd):", "title": "" }, { "docid": "9b525f30fcbc12c47f1576da1be8c969", "score": "0.62468314", "text": "def authenticate():\n login, pwd = _get_creds(request)\n if login is None or pwd is None:\n return jsonify({'message': \"login and password should be provided\"}), HTTP_400_BAD_REQUEST\n if 'user_id' in session:\n logout()\n user = User(login=str(login), pwd=str(pwd))\n result = user.fetch()\n if result == None:\n return jsonify(WRONG_CREDS)\n login_user(user, remember=True)\n host_uid = user.get_host_as_owner().get(DB_UID)\n session['host_id'] = user.workplace_uid\n return jsonify({'code': 0, 'user_id': user.uid, 'host_id': host_uid})", "title": "" }, { "docid": "e26a75c610010f4150ca2d8d9d41d5c1", "score": "0.6212006", "text": "def authenticate(self, login, passcode):", "title": "" }, { "docid": "b7752f5ae586bb460997fcfe70858b45", "score": "0.6177273", "text": "def authenticate(self):\n self.client.authenticate()", "title": "" }, { "docid": "b7752f5ae586bb460997fcfe70858b45", "score": "0.6177273", "text": "def authenticate(self):\n self.client.authenticate()", "title": "" }, { "docid": "06d7edf14b40860e9f5e1a690fc18824", "score": "0.61628467", "text": "def handle_api_auth(self, http_context):\n\n body_data = json.loads(http_context.body.decode())\n mode = body_data['mode']\n username = body_data.get('username', None)\n password = body_data.get('password', None)\n\n auth = AuthenticationService.get(self.context)\n user_auth_id = f'{username}@{auth.get_provider().id}'\n\n if mode == 'normal':\n auth_info = auth.check_password(username, password)\n if auth_info:\n if aj.tfa_config.data.get(user_auth_id, {}).get('totp', []):\n return {\n 'success': True,\n 'username': username,\n 'totp': True\n }\n\n auth.prepare_session_redirect(http_context, username, auth_info)\n return {\n 'success': True,\n 'username': username,\n }\n\n # Log failed login for e.g. fail2ban\n remote_addr = http_context.env.get('REMOTE_ADDR', None)\n if len(aj.config.data['trusted_proxies']) > 0:\n if remote_addr in aj.config.data['trusted_proxies']:\n ip = http_context.env.get('HTTP_X_FORWARDED_FOR', '').split(',')[0]\n else:\n ip = remote_addr\n logging.warning(f\"Failed login from {username} at IP : {ip}\")\n\n gevent.sleep(3)\n return {\n 'success': False,\n 'error': None,\n }\n\n elif mode == 'sudo':\n target = 'root'\n try:\n if auth.check_sudo_password(username, password):\n self.context.worker.terminate()\n auth.prepare_session_redirect(http_context, target, None)\n return {\n 'success': True,\n 'username': target,\n }\n\n gevent.sleep(3)\n return {\n 'success': False,\n 'error': _('Authorization failed'),\n }\n except SudoError as e:\n gevent.sleep(3)\n return {\n 'success': False,\n 'error': e.message,\n }\n\n elif mode == 'totp':\n # Reset verify value before verifying\n aj.tfa_config.verify_totp[user_auth_id] = None\n self.context.worker.verify_totp(user_auth_id, password)\n gevent.sleep(0.3)\n if aj.tfa_config.verify_totp[user_auth_id]:\n auth.prepare_session_redirect(http_context, username, None)\n return {\n 'success': True,\n 'username': username,\n }\n return {\n 'success': False,\n 'error': 'Invalid mode',\n }", "title": "" }, { "docid": "4dec72a95f94f4d14943dfaf403bfeab", "score": "0.6120916", "text": "def authenticate_user_internal(request):\n logging.debug(\"Logging in user\")\n return Response(\n {\n \"success\": True,\n \"user_id\": request.user.id,\n \"username\": str(request.user),\n \"first_name\": request.user.first_name,\n \"last_name\": request.user.last_name,\n \"is_admin\": request.user.is_admin,\n }\n )", "title": "" }, { "docid": "ed5fe937071da51d96048f3e14c26ff2", "score": "0.6066046", "text": "def authenticate(self, user, password):\n pass", "title": "" }, { "docid": "17983b9f6241ad1d004fe085794058e9", "score": "0.6054969", "text": "def authenticate():\n grant_type = request.form.get(\"grant_type\", \"password\")\n if grant_type != \"password\":\n abort(400, \"Invalid grant type\")\n\n username = password = state = authorized_party = None\n\n try:\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n state = request.form[\"state\"]\n authorized_party = request.form.get(\"azp\", None)\n except KeyError:\n abort(400, \"Invalid parameters\")\n\n # Avoid CSRF attacks\n if not state or state != session[\"state\"]:\n abort(400, \"Invalid state\")\n\n user_info = authenticate_user(username, password)\n\n if not user_info:\n # Indicate to Authentiq Connect that we haven't been able to\n # authenticate this user locally. Note that you\n if user_info is None:\n abort(404, \"User not found\")\n\n abort(403, \"Authentication failed\")\n\n now = datetime.datetime.utcnow()\n\n token = {\n # Token type\n \"token\": \"login_token\",\n # Local user account ID\n \"sub\": username,\n # Issued by ourselves\n \"iss\": CLIENT_ID,\n # The Authentiq Connect instance that is to consume this token\n \"aud\": [AUTHENTIQ_BASE.lower()],\n # When issued\n \"iat\": now,\n # Not valid in the past\n \"nbf\": now,\n # Valid just to sign in\n \"exp\": now + datetime.timedelta(minutes=5),\n }\n\n # Merge in user information from our \"backend\".\n token.update(user_info)\n\n # Authentiq Connect can request to bind the token to an Authentiq ID\n # enabling that user to sign in without a password in the future.\n if authorized_party:\n # Mark the token as a link token.\n token[\"token\"] = \"link_token\"\n\n # Add the requested Authentiq ID as the authorised party.\n token[\"azp\"] = authorized_party\n token[\"aud\"].append(authorized_party)\n\n # Remove the expiry time claim. You could also set this to e.g. a month\n # if you require your users to re-link their accounts once in a while.\n del token[\"exp\"]\n\n # Sign the token with HS256 and your client secret.\n # We plan to support different algorithms soon, keep an eye on\n # https://connect.authentiq.io/.well-known/openid-configuration\n token_jwt = jwt.encode(token, key=app.config[\"AQ_CLIENT_SECRET\"],\n algorithm=\"HS256\")\n\n resp = make_response(token_jwt)\n resp.status_code = 200\n resp.mimetype = \"application/jwt\"\n resp.cache_control.private = True\n resp.cache_control.no_cache = True\n\n return resp", "title": "" }, { "docid": "8d1f9c6044f2be26f94eaf033d798855", "score": "0.604143", "text": "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token)\n data['access_token'] = access_token\n kwargs.update(data)\n kwargs.update({'response': data, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "title": "" }, { "docid": "4677fe30ad84c4de60165da3107d5d4e", "score": "0.5989322", "text": "def authenticate(self, *args):\n return None", "title": "" }, { "docid": "2e73ece2cd825dcaa28a2f5c337deff4", "score": "0.5960223", "text": "def authenticate(self, request):\n ...", "title": "" }, { "docid": "2e73ece2cd825dcaa28a2f5c337deff4", "score": "0.5960223", "text": "def authenticate(self, request):\n ...", "title": "" }, { "docid": "2e73ece2cd825dcaa28a2f5c337deff4", "score": "0.5960223", "text": "def authenticate(self, request):\n ...", "title": "" }, { "docid": "d4be20d7cd7872f8b0e5f505e074b28c", "score": "0.5958701", "text": "def auth(self,username,password,nat=None,mtu=None,callback=None):\n\t\treturn self.handle(AuthCommand(username,password,3,self.clientname,self.clientver,nat,1,'utf8',mtu),callback)", "title": "" }, { "docid": "672ee73487daad567c739219214349d0", "score": "0.59278876", "text": "def handle_login():\n\n username = request.form.get('username')\n password = request.form.get('password')\n provider = request.form.get('provider')\n org_uid = request.form.get('org_uid')\n\n return basic_auth(provider, org_uid, username, password)", "title": "" }, { "docid": "7ad91aeab82a0fc46b53eafd91b0a55c", "score": "0.5917545", "text": "def authenticate(self):\n req = {\n \"AccountID\": self.userid,\n \"Password\": self.password,\n \"AuthCode\": self.auth_code,\n \"AppID\": self.appid\n }\n\n self.reqid += 1\n self.reqAuthenticate(req, self.reqid)", "title": "" }, { "docid": "18c4e2cf1e46fb219f40d900819719b7", "score": "0.5898834", "text": "def auth(self, request):\n user = authenticate(\n username=request.data[\"username\"], password=request.data[\"password\"]\n )\n if user:\n # get or create a token\n token = Token.objects.get(user=user)\n return Response({\"token\": token.key, \"user_id\": user.pk})\n else:\n return Response(\n {\"details\": \"Invalid credentials\"}, status=status.HTTP_401_UNAUTHORIZED\n )", "title": "" }, { "docid": "ff2837efcb19bebeb1b0c9a317ef37f9", "score": "0.5893853", "text": "def _login(self):\n url = self.app + '/authentication'\n data = {'username': self.username, 'password': self.password, 'type': 'credentials'}\n attempt = self._make_req(url, data, 'app').json()\n\n if attempt.get('access_token'):\n self.headers['api']['authorization'] = 'Bearer {}'.format(attempt.get('access_token'))\n else:\n raise Exception(\"Login failed\")", "title": "" }, { "docid": "a4767fe96b0bfc2b11f7dbcc485967f0", "score": "0.58749855", "text": "def _doAuthenticate(self, request):\n pass", "title": "" }, { "docid": "4cf56a2791d1fc018157e17ff1cf59e0", "score": "0.5856177", "text": "def authenticate_another_user(self):\n self.register_and_login(user={\"user\": make_user()})", "title": "" }, { "docid": "ca6e085d87719de27e73b3fc79cec7cb", "score": "0.5813895", "text": "def admin_authentication(self, request):\n try:\n token_type, token = request.headers.get(\"Authorization\").split()\n assert token_type == \"Bearer\"\n if auth_model.check_webhook(token):\n return self.view(request)\n except Exception:\n return JsonResponse(data={\"error\": \"Invalid Credentials\"}, status=403)", "title": "" }, { "docid": "b9759175ab1cbaf05d78b4b05990fc3f", "score": "0.58089775", "text": "def login():\n # Not sure if we should return a token so the server could check always auth\n # If not modifying the client could be enough to get access to the service\n user = request.args.get('username')\n passwd = request.args.get('password')\n\n if check_login(user, passwd):\n return \"\"\n else:\n abort(401)", "title": "" }, { "docid": "533638269c226c65f745538b1714a76f", "score": "0.57749784", "text": "def authenticated_user(client):\n user = User(username='test_username_123')\n user.set_password('test_password_123')\n user.save() \n \n client.login(username='test_username_123', password='test_password_123')\n return user", "title": "" }, { "docid": "477076294aaa6350dbadef82a0c5c851", "score": "0.57684684", "text": "def authenticate(request):\n if not request.user.is_anonymous():\n return Customer.objects.get(user=request.user)\n\n\n # If we have an static API key defined allow it here\n if 'api_key' in request.GET:\n try:\n customer = _get_api_customer(request.GET.get('api_key'))\n request.user = customer.user\n if customer.subscription is None:\n raise PermissionDenied(\"Not premium user\")\n except PermissionDenied, e:\n return HttpResponseForbidden(\"invalid API key specified\")\n return customer\n elif 'access_token' in request.GET:\n authentication = OAuth2Authentication('API')\n done = authentication.is_authenticated(request.GET['access_token'])\n if not done:\n return HttpResponseForbidden(\"Invalid access token\")\n customer = Customer.objects.get(user=request.user)\n return customer\n else:\n return HttpResponseForbidden(\"No credentials forbidden\")\n\n return \"#%02x%02x%02x\" % (r, g, b)", "title": "" }, { "docid": "6a8d7c7900b7e0b8390b4bc226c715de", "score": "0.57432604", "text": "def authenticate_user(request):\n try:\n email = request.data['email']\n password = request.data['password']\n\n user = User.objects.get(email=email, password=password)\n if user:\n try:\n payload = jwt_payload_handler(user)\n token = jwt.encode(payload, settings.SECRET_KEY)\n user_details = {}\n user_details['name'] = \"%s %s\" % (user.email, user.role)\n user_details['token'] = token\n user_logged_in.send(sender=user.__class__,\n request=request, user=user)\n return Response(user_details, status=status.HTTP_200_OK)\n except Exception as e:\n raise e\n else:\n res = {\n 'error': 'can not authenticate with the given credentials or the account has been deactivated'\n }\n return Response(res, status=status.HTTP_403_FORBIDDEN)\n except:\n res = {\n 'error': 'please provide a email or password'\n }\n return Response(res)", "title": "" }, { "docid": "ce72e23f2bb383243b8e23b77e37b8cf", "score": "0.57430196", "text": "def send_auth(self):\n print(\"Authenticating...\")\n self.emit('auth', {'x-api-key': self.api_key}, namespace=self.namespace)", "title": "" }, { "docid": "643ee53e63ee4397852fd380c7b8dd39", "score": "0.5728901", "text": "def login_stage(self):\n auth_url = self.__class__.auth_url\n username = self.__class__.username\n password = self.__class__.password\n\n password_mgr = HTTPPasswordMgrWithDefaultRealm()\n password_mgr.add_password(None, auth_url, username, password)\n handler = HTTPBasicAuthHandler(password_mgr)\n opener = build_opener(handler)\n\n resp = opener.open(auth_url)\n authtoken = resp.read()\n\n if not authtoken or not self.authtoken_re.match(authtoken):\n self.fail(\"Unable to retrive an authtoken for user:%s password: %s at %s got %s\" % (username, password, auth_url, str(authtoken)))\n return authtoken", "title": "" }, { "docid": "a14f312d1e0a37523d7f971aa6d59987", "score": "0.5704879", "text": "def login():\n if g.user is not None:\n current_app.logger.info('user %s logined' % g.user.username)\n return api_response(success=True, data={'user_id': g.user.id})\n else:\n return api_response(success=False, data=None,\n error_code=unknown_error[0], error_message=unknown_error[1])", "title": "" }, { "docid": "48f3f2f077d6ca1b902e12c9114f7cc3", "score": "0.5702841", "text": "def sign_in(self):\n self.session_id = self.service.doLoginEnc(\n userLogin=self.login,\n userHashPassword=self.enc_passwd,\n countryCode=self.country_code,\n webapiKey=self.api_key,\n localVersion=self.versions[self.country_id]\n ).sessionHandlePart", "title": "" }, { "docid": "8f7d38cee83406692707ba2f852f59c8", "score": "0.56714964", "text": "def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n provider = serializer.data.get('provider', None)\n strategy = load_strategy(request)\n \n try:\n backend = load_backend(strategy=strategy, name=provider,\n redirect_uri=None)\n \n except MissingBackend:\n return Response({'error': 'Please provide a valid provider'},\n status=status.HTTP_400_BAD_REQUEST)\n try:\n if isinstance(backend, BaseOAuth2):\n access_token = serializer.data.get('access_token')\n user = backend.do_auth(access_token)\n except HTTPError as error:\n return Response({\n \"error\": {\n \"access_token\": \"Invalid token\",\n \"details\": str(error)\n }\n }, status=status.HTTP_400_BAD_REQUEST)\n except AuthTokenError as error:\n return Response({\n \"error\": \"Invalid credentials\",\n \"details\": str(error)\n }, status=status.HTTP_400_BAD_REQUEST)\n except AuthForbidden as error:\n return Response({\n \"error\":\"Only Ateneo emails are allowed.\",\n \"details\": str(error)\n }, status=status.HTTP_400_BAD_REQUEST)\n \n try:\n authenticated_user = backend.do_auth(access_token, user=user)\n \n except HTTPError as error:\n return Response({\n \"error\":\"invalid token\",\n \"details\": str(error)\n }, status=status.HTTP_400_BAD_REQUEST)\n \n except AuthForbidden as error:\n return Response({\n \"error\":\"invalid token\",\n \"details\": str(error)\n }, status=status.HTTP_400_BAD_REQUEST)\n \n if authenticated_user and authenticated_user.is_active:\n #generate JWT token\n login(request, authenticated_user)\n data={\n \"token\": jwt_encode_handler(\n jwt_payload_handler(user)\n )}\n hostId = None\n if hasattr(authenticated_user, 'sanggu_host'):\n hostId = authenticated_user.sanggu_host.id\n elif hasattr(authenticated_user, 'org_host'):\n hostId = authenticated_user.org_host.id\n elif hasattr(authenticated_user, 'office_host'):\n hostId = authenticated_user.office_host.id\n #customize the response to your needs\n response = {\n \"token\": data.get('token'),\n \"email\": authenticated_user.email,\n \"userId\": hostId\n }\n return Response(status=status.HTTP_200_OK, data=response)", "title": "" }, { "docid": "e3da5a2ed4be70913c665d0088b640c4", "score": "0.5656453", "text": "def login_user(self, user):\n self.activate_user(user['phone_number'])\n url = reverse('authentication:login')\n data = {\n \"password\": \"thisIS24!#\",\n \"phone_number\": \"25670000000\",\n }\n response = self.client.post(url, data, format='json')\n return response.data['access']", "title": "" }, { "docid": "ceeb93e29ebadbad28385a4cb88bc5eb", "score": "0.56537426", "text": "def authenticate(self):\n\n if self.api_key is not None: # First try to use the API key\n return self.authenticate_key(self.api_key)\n elif self.username is not None: # Then try to use the user email\n return self.authenticate_password(username=self.username)\n else: # Read the key/name from the commandline and call authenticate_key or authenticate_password\n value = str(input('Please enter your email or API key: '))\n if '@' in value: # Use username/password authentication if the user entered email\n return self.authenticate_password(username=value)\n else: # use API key authentication if user entered key\n return self.authenticate_key(key=value)", "title": "" }, { "docid": "afb909a57a9c3d8efae3ecba32b823c1", "score": "0.5624107", "text": "def _client_gateway(self, c, ca):\n print(f'Client {ca} goes through gateway')\n\n # Identity confirmation\n verification_msg = receive_json_message(c)\n if verification_msg is None:\n print(f'{ca} has disconected')\n return\n res = self._verify_user(verification_msg)\n\n # Give response to the client\n if not res:\n send_json_message(c, { 'Response' : 'Failure', 'Message' : 'Could not authenticate with the server' } )\n print(f'Ending connection with client {ca}')\n c.close()\n self._print_connections()\n return\n\n print(f'Client {ca} has successfully authenticated with the server')\n \n # Add to connections list\n found = False\n for i, conn in enumerate(self._connections):\n if conn[2] == verification_msg['UID']:\n self._connections[i] = (c, ca, verification_msg['UID'])\n found = True\n break\n if not found:\n self._connections.append((c, ca, verification_msg['UID']))\n self._print_connections()\n\n # Handle the client\n self._client_handler(c, ca, verification_msg['UID'])", "title": "" }, { "docid": "8903c5439b56cacc4cc1c3233a11bfe2", "score": "0.5615673", "text": "def auth(client):\n def auth_user(username, password):\n rv = client.get(url_for('auth.login'))\n m = re.search(b'(<input id=\"csrf_token\" name=\"csrf_token\" '\n b'type=\"hidden\" value=\")([-A-Za-z.0-9]+)', rv.data)\n\n return client.post(url_for('auth.login'), data=dict(\n username=username,\n password=password,\n csrf_token=m.group(2).decode(\"utf-8\")\n ), follow_redirects=True)\n\n return auth_user", "title": "" }, { "docid": "5e39bc85fba6b69331c52f5485ae34b1", "score": "0.56145227", "text": "def authenticate(self):\n\n print(\"Getting new token\")\n self.getFrob()\n self.getAuthKey()\n self.getToken()\n self.cacheToken()", "title": "" }, { "docid": "4d56b228c6937bd937ce4bac1d71c4a7", "score": "0.56082815", "text": "def auth_login():\n user_fields = user_schema.load(request.json)\n\n user = User.query.filter_by(email=user_fields[\"email\"]).first()\n\n if not user or not bcrypt.check_password_hash(user.password, user_fields[\"password\"]):\n return abort(401, description=\"Invalid login details.\")\n\n expiry = timedelta(days=1)\n access_token = create_access_token(identity=str(user.id), expires_delta=expiry)\n\n return jsonify({\"token\": access_token})", "title": "" }, { "docid": "1c0c0c24ee97c21fbbee884079af89b9", "score": "0.55895495", "text": "def user_auth(userId: str) -> object:\n\n log.debug(\"For user %s\" % userId)\n\n conf = config()\n\n # Redirect to OAUTH\n getVars = {\n 'client_id': conf['client_id'],\n 'redirect_uri': conf['oauth_callback'],\n 'response_type': 'code',\n 'state': \"00000000000\"\n }\n return redirect(\"%s/oauth2/authorize?%s\" % (conf['api_url'], urllib.parse.urlencode(getVars)), code=302)", "title": "" }, { "docid": "8e882b35b350bc8f95d0ff349031f8e5", "score": "0.5587082", "text": "def authenticate(auth):\n\n order = {\n \"edicts\": [{\"op\": \"login\"}],\n \"header\": {\n \"asset_id\": \"1.3.0\",\n \"currency_id\": \"1.3.1\",\n \"asset_precision\": 5,\n \"currency_precision\": 5,\n \"account_id\": auth[\"account_id\"],\n \"account_name\": auth[\"account_name\"],\n \"wif\": auth[\"wif\"],\n },\n \"nodes\": bitshares_nodes(),\n }\n\n broker(order)\n print(\"\\nAuthenticated\\n\")\n return True", "title": "" }, { "docid": "df2acb7734614ce6b36e4d86115dedeb", "score": "0.55852675", "text": "def authenticate_credentials(self, payload):\n #print(payload.get('role'))\n try:\n role = payload.get('role')\n user_id = payload.get('user_id')\n except:\n raise exceptions.ValidationError({\"error\":11})\n try:\n if role == 'Client':\n user = UserClient.objects.get(pk=user_id)\n elif role == 'Kronero':\n user = UserKronero.objects.get(pk=user_id)\n elif (role == 'Global') or (role == 'Store') or (role == 'Chain') or (role == 'Application'):\n user = Administrator.objects.get(pk=user_id)\n else:\n raise exceptions.ValidationError({\"error\":21})\n except:\n raise exceptions.AuthenticationFailed({\"error\":20})\n\n if not user.is_active:\n raise exceptions.ValidationError({\"error\":8})\n\n return user", "title": "" }, { "docid": "2874bdf2a4cfbaa5bc6c59e2c4e0df3a", "score": "0.5585033", "text": "def auth(request):\n if request.method != 'POST':\n return HTTPMethodNotAllowed(\n text=u'ServerResponse: This endpoint only supports the POST method.')\n\n username = request.json_body.get('username', None)\n password = request.json_body.get('password', None)\n if username is None or password is None:\n return HTTPBadRequest(\n text=u'ServerResponse: Username and password must be provided')\n\n request.response.headerlist.extend((\n ('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')))\n\n try:\n user = DBSession().query(User).filter(User.username == username).one()\n except NoResultFound:\n return HTTPUnauthorized(text=u'ServerResponse: Authentication failed')\n else:\n if not user.validate_password(password):\n return HTTPUnauthorized(text=u'ServerResponse: Authentication failed')\n\n return {\n \"access_token\": create_token(request, username,\n user.roles and json.loads(user.roles) or [])\n }", "title": "" }, { "docid": "9e31d2d521471af161100692f4195fe3", "score": "0.55742615", "text": "def user_login():\n #try:\n \n auth = request.authorization\n\n if not auth or not auth.username or not auth.password:\n return make_response('Could not verify 1', 401, {'WWW-Authenticate' : 'Basic realm=\"Login required!\"'})\n\n user = {'username' : 'isaac', 'password' :'password'}\n \n\n if not user:\n return make_response('Could not verify 2', 401, {'WWW-Authenticate' : 'Basic realm=\"Login required!\"'})\n\n if user[\"password\"] == auth.password:\n token = jwt.encode({'username':user[\"username\"] , 'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, SECRET_KEY)\n\n return jsonify({'token' : token.decode('UTF-8')})\n\n return make_response('Could not verify 3', 401, {'WWW-Authenticate' : 'Basic realm=\"Login required!\"'})", "title": "" }, { "docid": "6c1c6432a730fa0abec39d81d78ee576", "score": "0.5565178", "text": "def authenticate(self, handler, data):\n username = data['username']\n try:\n pamela.authenticate(username, data['password'], service=self.service)\n except pamela.PAMError as e:\n if handler is not None:\n self.log.warning(\"PAM Authentication failed (%s@%s): %s\", username, handler.request.remote_ip, e)\n else:\n self.log.warning(\"PAM Authentication failed: %s\", e)\n else:\n return username", "title": "" }, { "docid": "b20b02ef25ea430f17435d0daf6d5420", "score": "0.5561615", "text": "def login_access_token(\n db: Session = Depends(deps.get_db), form_data: OAuth2PasswordRequestForm = Depends()\n) -> Any:\n user = user1.authenticate(db, email=form_data.username, password=form_data.password)\n if not user:\n raise HTTPException(status_code=400, detail=\"Incorrect email or password\")\n elif not user1.is_active(user):\n raise HTTPException(status_code=400, detail=\"Inactive user\")\n access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)\n return {\n \"access_token\": security.create_access_token(\n user.id, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "title": "" }, { "docid": "d466107b5f77e7f83a273ab3737ef653", "score": "0.55491817", "text": "def auth(user, passwd, endpoint):\n token = art_ad.auth(user, passwd, endpoint)\n if token == '':\n logging.error('Authentication failed!')\n return\n \n token_cache.save_token(token, endpoint)", "title": "" }, { "docid": "f3de1016cb37a53b44b2147a5c445ed3", "score": "0.5548229", "text": "def authenticate_credentials(self, userid, password, request: Optional[Any] = ...):\n ...", "title": "" }, { "docid": "4bf003821cc622a57956ac1ca43c2def", "score": "0.5547087", "text": "def authenticate():\n abort(401)", "title": "" }, { "docid": "ebef826a028da2898e8598c82f574695", "score": "0.5546569", "text": "def authenticateRequest(self, request, service_request, *args, **kwargs):\r\n username = password = None\r\n\r\n if 'Credentials' in request.headers:\r\n cred = request.headers['Credentials']\r\n\r\n username = cred['userid']\r\n password = cred['password']\r\n\r\n return self.gateway.authenticateRequest(service_request, username,\r\n password, *args, **kwargs)", "title": "" }, { "docid": "628b99b2f98ba21b17a24d8113d8cc3d", "score": "0.55393386", "text": "def auth(self): # {{{\r\n try:\r\n authResult = self.userStore.authenticate(self.username, self.password,\r\n CONSUMER_KEY, CONSUMER_SECRET)\r\n except Errors.EDAMUserException, e:\r\n if e.parameter == \"username\":\r\n raise StandardError(\"wrong username. username=%s\" % self.username)\r\n if e.parameter == \"password\":\r\n raise StandardError(\"password is incorrect.\")\r\n raise StandardError(\"unknown errors.\")\r\n\r\n # set the auth result.\r\n self.user = authResult.user\r\n self.authToken = authResult.authenticationToken\r\n # authResult.expiration && currenttime is msec\r\n self.__setAuthExpiration(authResult)", "title": "" }, { "docid": "5580d09877bb09dbb8ca3a4ea9fa6d38", "score": "0.5534928", "text": "def authenticateWithUsernameAndPassword(username, password):", "title": "" }, { "docid": "24f2550e274db16c4442b6fe9459ca85", "score": "0.5534241", "text": "def authorize(self):\n data = cherrypy.request.json\n logger.debug(data)\n model.mongo_connect()\n\n try:\n user = model.User.objects(\n email=data['email'],\n fb_userId=data['fb_userId']).get()\n except:\n user = model.User(\n name=data['name'],\n email=data['email'],\n fb_userId=data['fb_userId'],\n fb_accessToken=data['fb_accessToken'])\n user.save()\n\n token = self.get_long_ttl_token(user.fb_accessToken)\n user.fb_accessToken = token['access_token']\n user.updated_at=dt.datetime.now()\n user.save()\n return {'status': 200, 'statusText': 'OK'}", "title": "" }, { "docid": "5abba4767560efa5877991970c1fc207", "score": "0.55319107", "text": "def set_token(self):\n super(UserPassCredentials, self).set_token()\n try:\n token = self._context.acquire_token_with_username_password(\n self.resource,\n self.username,\n self.password,\n self.id\n )\n self.token = self._convert_token(token)\n except adal.AdalError as err:\n raise_with_traceback(AuthenticationError, \"\", err)", "title": "" }, { "docid": "53525b97c4d67f3ad1d13f309b9bc4e3", "score": "0.55269766", "text": "async def authenticate(\n self, conn: HTTPConnection\n ) -> Tuple[bool, Optional[User]]:\n authorization: str = conn.headers.get(\"Authorization\")\n if not authorization:\n return False, None\n scheme, credentials = get_authorization_scheme_param(authorization)\n if not (authorization and scheme and credentials):\n raise AuthenticationError(\"Not authenticated\")\n if scheme.lower() != \"token\":\n raise AuthenticationError(\"Invalid authentication credentials\")\n\n token = await Token.get(\n key=credentials,\n is_active=True,\n expires={\"$not\": {\"$lt\": get_now()}},\n )\n if token is None:\n return False, None\n conn.scope[\"token\"] = token\n\n user = await User.get(id=token.user_id)\n if user is None:\n return False, None\n\n return True, user", "title": "" }, { "docid": "7875bd53a1ab53302fbc8fb0fa17e8a1", "score": "0.5526969", "text": "def auth():\n username = f.request.form.get('username')\n password = f.request.form.get('password')\n if username == 'test' and password == 'test':\n f.session['authenticated'] = True\n return f.jsonify({'message': 'Success'})\n return make_error(401, 'The provided credentials were not accepted.')", "title": "" }, { "docid": "223a8e49517a56040c4756bae5f195c1", "score": "0.5523621", "text": "def authenticate(self):\n req = requests.post(BASE_API_URL + 'users',\n headers={'User-Agent': self._user_agent_string,\n 'Accept-Encoding': 'gzip',\n 'Content-Type': 'application/json; charset=UTF-8'},\n json={'client_id': '81e8a76e-1e02-4d17-9ba0-8a7020261b26',\n 'device_uid': self._device_uid,\n 'location': self._location}\n )\n\n if self._debug:\n print(\"authenticate: \" + req.text)\n\n if req.status_code == requests.codes.ok:\n self._access_token = req.json()['access_token']\n self._distinct_id = req.json()['distinct_id']\n self._expiration_date = req.json()['expiration_date']\n self._refresh_token = req.json()['refresh_token']\n\n time.sleep(5) # Workaround for certain actions being disabled for x seconds after authentication\n\n return True\n\n else:\n raise AuthenticationError(\"Server returned {}\".format(req.status_code))", "title": "" }, { "docid": "406417d7d6db3d75b3022de57324ccae", "score": "0.55174494", "text": "def authenticate_credentials(self, payload):\r\n username = jwt_get_username_from_payload(payload)\r\n payload_user_id = jwt_get_user_id_from_payload_handler(payload)\r\n\r\n if not username:\r\n msg = _('Invalid payload.')\r\n raise exceptions.AuthenticationFailed(msg)\r\n\r\n user = get_user_from_localized_databases(username)\r\n if not user:\r\n msg = _('Invalid signature.')\r\n raise exceptions.AuthenticationFailed(msg)\r\n\r\n if user.id != payload_user_id:\r\n msg = _('Invalid signature.')\r\n raise exceptions.AuthenticationFailed(msg)\r\n\r\n # Only for Athlete users who have a customer model extension\r\n if user.user_type == USER_TYPE_ATHLETE and hasattr(user.athleteuser, 'customer'):\r\n if user.athleteuser.customer.payment_status == 'locked_out':\r\n msg = _('User account has been locked out.')\r\n raise exceptions.AuthenticationFailed(msg)\r\n\r\n if not user.is_active:\r\n msg = _('User account is disabled.')\r\n raise exceptions.AuthenticationFailed(msg)\r\n\r\n orig_iat = int(payload['orig_iat'])\r\n jwt_last_expired = int(format(user.jwt_last_expired, 'U'))\r\n\r\n if orig_iat < jwt_last_expired:\r\n msg = 'Users must re-authenticate after logging out.'\r\n raise exceptions.AuthenticationFailed(msg)\r\n\r\n return user", "title": "" }, { "docid": "309548384ab227af2689aab170debf65", "score": "0.5511866", "text": "def authenticate(func):\n def auth_func(self, request, *args, **kwargs):\n user = request.getUser()\n password = request.getPassword()\n if auth_user and auth_password:\n if user != auth_user or password != auth_password:\n request.setResponseCode(http.UNAUTHORIZED)\n realm = 'basic realm=\"IRC Viewer\"'\n request.setHeader('WWW-authenticate', realm)\n return ''\n return func(self, request, *args, **kwargs)\n return auth_func", "title": "" }, { "docid": "361e9d678474c55e3859ea0f72a33860", "score": "0.55077827", "text": "async def authenticate(self):\n\n async with aiohttp.ClientSession(loop=asyncio.get_event_loop()) as session:\n params = {'username': self.username, 'password': self.password}\n\n async with session.post(urljoin(self.http_host, '/access_token/'), data=params) as response:\n\n data = await self.parse_answer(response)\n\n self.access_token = data['access_token']\n self.token_expire_time = time.time() + round(data['expires_in'] / 2)", "title": "" }, { "docid": "0bc067ee8a783d3b0b2c7bb66215812f", "score": "0.5504213", "text": "def auth_api_key(request, username, api_key):\n backend_cls = get_registered_auth_backend('bugzilla')\n if not backend_cls:\n return SERVICE_NOT_CONFIGURED\n\n backend = backend_cls()\n\n try:\n user = backend.authenticate_api_key(username, api_key)\n if user is None:\n return LOGIN_FAILED\n\n # The user will need to visit Bugzilla to obtain an API key.\n except BugzillaAPIKeyNeededError as e:\n return WebAPIResponseError(request, LOGIN_FAILED, extra_params={\n 'bugzilla_api_key_needed': True,\n 'bugzilla_api_key_url': e.url,\n })\n\n # The user hasn't logged in via the HTML interface yet. This\n # error response should be interpreted by clients to direct\n # them to log in to the web site.\n except WebLoginNeededError:\n protocol = SiteConfiguration.objects.get_current().get(\n 'site_domain_method')\n domain = Site.objects.get_current().domain\n login_url = '%s://%s%saccount/login' % (\n protocol, domain, settings.SITE_ROOT)\n\n extra = {\n 'web_login_needed': True,\n 'login_url': login_url,\n }\n return WebAPIResponseError(request, LOGIN_FAILED,\n extra_params=extra)\n\n # Django housekeeping.\n user.backend = 'mozreview.bugzilla.auth.BugzillaBackend'\n return user", "title": "" }, { "docid": "6b56e4b546ef40d2647e632e8ae8fa1e", "score": "0.55041575", "text": "def authenticate(user_name, passwd):\n values = {'jsonrpc': '2.0',\n 'method': 'user.login',\n 'params': {\n 'user': user_name,\n 'password': passwd\n },\n 'id': '0'\n }\n result = request(zab_url, values)\n return result", "title": "" }, { "docid": "2808a461ee6ff39454d535466c75e494", "score": "0.5500222", "text": "def authenticate_user(self, data):\n\n response = self.client.post(\n self.register_url, data, format='json')\n token = response.data['user_info']['token']\n self.verify_user_registration(token)\n response = self.client.post(\n self.login_url, data, format='json'\n )\n token = response.data['token']\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)", "title": "" }, { "docid": "1b7773b7ca1b9d7d78ab6f5640c46fac", "score": "0.5497275", "text": "def authenticate(self):\n try:\n response = self.session.post(\n f\"{self.base_url}/thirdparty-access/v1/authenticate\",\n json={\n \"userName\": self.dvla_username.get(),\n \"password\": self.dvla_password.get(),\n },\n )\n response.raise_for_status()\n except requests.HTTPError as e:\n if e.response.status_code == 401:\n # likely the old password has already expired\n current_app.logger.exception(\"Failed to generate a DVLA jwt token\")\n\n self.dvla_password.clear()\n raise DvlaRetryableException(e.response.json()[0][\"detail\"]) from e\n\n self._handle_common_dvla_errors(e)\n\n return response.json()[\"id-token\"]", "title": "" }, { "docid": "38911b4d009ca9cc0006b20e0887961a", "score": "0.54871696", "text": "def authenticate(self):\n req = urllib2.Request(self.baseURL+\"authenticate\")\n base64string = base64.encodestring('{}:{}'.format(self.user, self.passwd))[:-1]\n req.add_header(\"Authorization\", \"Basic {:s}\".format(base64string))\n req.add_data(\"deviceID=\"+self.deviceID)\n #using a context manager to assure the closure of the resource\n with closing(urllib2.urlopen(req)) as handle:\n result = json.loads(handle.read())\n #save the results in self\n self.sequence = result['sequence']\n self.token = result['token']", "title": "" }, { "docid": "3bca741cbe8d892beec6a7576d0ae2f0", "score": "0.54845846", "text": "def login(self, user=None):\n response = super().login(user) # login the user\n self.client.credentials(HTTP_AUTHORIZATION=\"Token \" + (json.loads(response.content))['user']['token'])\n return response", "title": "" }, { "docid": "037d2e83fec351a4f4d08ae879f098e2", "score": "0.5480574", "text": "def authenticate_with_server(self, sock):\n # get username and password from user\n success = False\n while not success:\n self.username = util.get_user_input(c.USERNAME)\n # password = util.get_user_input(c.PASSWORD)\n password = getpass.getpass(c.PASSWORD)\n success = self._authenticate_with_server_helper(sock, self.username, password)\n print(c.SUCCESS_LOGIN_MSG)", "title": "" }, { "docid": "cf29447aa30a7c23d5de7b4e66b89874", "score": "0.546227", "text": "def login():\n\n body = validate({\n \"email\": field(\"email\"),\n \"password\": field(\"password\")\n }, request.get_json(force=True, silent=True))\n\n token = user_login(\n body[\"email\"],\n body[\"password\"],\n request.access_route\n )\n\n return jsonify({\"token\": token})", "title": "" }, { "docid": "a23e08804b2576b344a40e726f0b1301", "score": "0.54597896", "text": "def authenticate(self):\n\n req = {\n \"UserID\": self.userid,\n \"BrokerID\": self.brokerid,\n \"AuthCode\": self.auth_code,\n \"AppID\": self.appid\n }\n\n if self.product_info:\n req[\"UserProductInfo\"] = self.product_info\n\n self.reqid += 1\n self.reqAuthenticate(req, self.reqid)", "title": "" }, { "docid": "a47bc709ec1436652e0e796dd4eb5ac9", "score": "0.5444874", "text": "async def access_token(\n params: schemas.LoginParams = Body(\n ...,\n example={\n \"login\": \"[email protected]\",\n \"password\": \"admin123\"\n },\n ),\n db: AsyncSession = Depends(deps_auth.db_session)\n) -> Any:\n account_id = await help_auth.authenticate_user(db, params=params)\n return security.generate_token(account_id)", "title": "" }, { "docid": "48f96e3a22bc70926b0bdb477ad7c8b2", "score": "0.54406834", "text": "def authenticate_user(self, response, **kwargs):\n\n host = urlparse(response.url).hostname\n\n try:\n auth_header = self.generate_request_header(response, host)\n except SPNEGOExchangeError:\n # GSS Failure, return existing response\n return response\n\n log.debug(\"authenticate_user(): Authorization header: {0}\".format(\n auth_header))\n response.request.headers['Authorization'] = auth_header\n\n # Consume the content so we can reuse the connection for the next\n # request.\n response.content\n response.raw.release_conn()\n\n _r = response.connection.send(response.request, **kwargs)\n _r.history.append(response)\n\n log.debug(\"authenticate_user(): returning {0}\".format(_r))\n return _r", "title": "" }, { "docid": "054771558db3a32a633cc1eb092b3f52", "score": "0.5438407", "text": "def _login(self):\n\n resp = requests.post(\n self.login_api.get(\"url\"),\n params={\n \"account\": self.username,\n \"password\": self.password,\n self.name: self.id\n }\n )\n\n if \"success\" == resp.json().get(\"status\"):\n return True\n else:\n # fail to sign in\n raise AuthenticationError(\"Fail to sign in Zentao\")", "title": "" }, { "docid": "c0ae81abc38eebb06608c4d7e68fd9c0", "score": "0.5437802", "text": "def auth_user(email, password, client_id, scope, opener):\n access_url = \"https://oauth.vk.com/oauth/authorize?redirect_uri=https://oauth.vk.com/blank.html\" + \\\n \"&response_type=token&client_id={}&scope={}&display=wap\".format(client_id, \",\".join(scope))\n response = opener.open(access_url)\n doc = response.read()\n parser = FormParser()\n parser.feed(doc.decode(\"utf-8\"))\n parser.close()\n if not parser.form_parsed or parser.url is None or \"pass\" not in parser.params or \"email\" not in parser.params:\n raise RuntimeError(\"Something wrong\")\n parser.params[\"email\"] = email\n parser.params[\"pass\"] = password\n if parser.method == \"POST\":\n response = opener.open(parser.url, bytes(urllib.parse.urlencode(parser.params), encoding=\"utf-8\"))\n else:\n raise NotImplementedError(u\"Method '{}'\".format(parser.method))\n return response.read(), response.geturl()", "title": "" }, { "docid": "791d8965723f8ec0f6f29cf8cfe61efd", "score": "0.543549", "text": "def __authenticate(self, client_id, client_secret):\n from LuckySim_Logging import LuckySim_Logging as LS_Logging\n\n curl_data = {\n 'grant_type': 'client_credentials'\n } \n auth_data = (client_id,client_secret)\n response = self.requests.post(self.auth_url, data=curl_data, auth=auth_data)\n message:str = \"{client_id}: \"\n if response.status_code == 200:\n message = f\" Authenticated Successfully with status code {response.status_code}, saving __access_token\"\n LS_Logging.write_log(message, \"INFO\")\n self.__access_token = self.json.loads(response.text)['access_token']\n self.__token_type = self.json.loads(response.text)['token_type']\n self.__expires_in = self.json.loads(response.text)['expires_in']\n return\n elif 499<response.status_code<600: # Server Errors \n message = f\" Authentication ran into a server error with status code {response.status_code}\"\n elif 399<response.status_code<500: # Client Errors\n message = f\" Authentication ran into a client error with status code {response.status_code}\"\n elif 299<response.status_code<400: # Client Errors\n message = f\" Authentication ran into a redirection error with status code {response.status_code}\"\n elif 200<response.status_code<300: # Success Outside of norm\n ##TODO: Change message, this could leak access token currently. Potentially deal with as windows informational error, security catalog should be access protected\n message = f\" Authenticated Successfully with status code {response.status_code}; /nAttempting to return token ________________________ /n{response.text} /n ________________________\"\n LS_Logging.write_auth_log(message, \"WARN\")\n self.__access_token = self.json.loads(response.text)['access_token']\n self.__token_type = self.json.loads(response.text)['token_type']\n self.__expires_in = self.json.loads(response.text)['expires_in']\n return\n elif 99<response.status_code<200: # Informational Errors\n message = f\" Authentication returned Information instead of Success with status code {response.status_code} /n{response.text}\"\n \n LS_Logging.write_log(message, \"ERROR\")\n raise self.Invalid_Response(text=f\"Please check the security Event Log, {message}\", status_code = response.status_code, expected_status_code = 200, expected_response = \"access token was expected as a response\")\n return", "title": "" }, { "docid": "c0610100e1b7091cbcd3a7510d8e1105", "score": "0.5435132", "text": "async def authenticate_if_needed(self):\n\n if not self.is_authenticated():\n await self.authenticate()", "title": "" }, { "docid": "7a937355f51d419bf83af72e411ec428", "score": "0.5430263", "text": "def _get_authentication_key(self):\n return self.__authentication_key", "title": "" }, { "docid": "028d52efb7c412359b160f8e1d54f5b3", "score": "0.5425113", "text": "def authorized_user():\n api = API()\n api.session.headers = pack_headers_with_authorization(SALES_PLAN_AFF)\n return api", "title": "" }, { "docid": "7af21654fe21dbab64d7cd6d27a5f9d2", "score": "0.542008", "text": "def authenticate(self):\n req = {\n \"UserID\": self.userid,\n \"BrokerID\": self.brokerid,\n \"AuthCode\": self.auth_code,\n \"AppID\": self.appid\n }\n\n if self.product_info:\n req[\"UserProductInfo\"] = self.product_info\n\n self.reqid += 1\n self.reqAuthenticate(req, self.reqid)", "title": "" }, { "docid": "bd70e37b107dbcae915d5c977e7eb8a6", "score": "0.5419856", "text": "def login(self, username, password, realm):\n req = self.dynamic_request()\n req.realm = realm\n req.username = username\n req.password = password\n resp = self.dynamic_call(req)\n return lb.web.protobuf_json.protobuf_to_dict(resp)", "title": "" }, { "docid": "f797dce6f20d6a4fd45c4023f27000b5", "score": "0.5418982", "text": "def login():\n data = request.get_json()\n username = data['username']\n password = data['password']\n result = user_class.login(username, password)\n return result", "title": "" }, { "docid": "40d2266655fab3780de088a54dcfbcb0", "score": "0.54165524", "text": "def handle(self, *args, **options):\n user = User.objects.get(username=options['username'])\n backend = options['backend']\n c = user.profile.get_credential(backend)\n self.stdout.write('{}\\n'.format(c.get_password()))", "title": "" }, { "docid": "678d2c9b7845fe1205bf7ae81c087654", "score": "0.54162633", "text": "def login_access_token(\n db: Session = Depends(deps.get_db), form_data: OAuth2PasswordRequestForm = Depends()\n) -> Any:\n user = crud.user.authenticate(\n db, email=form_data.username, password=form_data.password\n )\n if not user:\n raise HTTPException(\n status_code=400, detail=\"Incorrect email or password\")\n elif not crud.user.is_active(user):\n raise HTTPException(status_code=400, detail=\"Inactive user\")\n access_token_expires = timedelta(\n minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)\n return {\n \"access_token\": security.create_access_token(\n user.id, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "title": "" }, { "docid": "ae79c2db1290005f4269dc7136ed388c", "score": "0.5412705", "text": "def process_request(self, request):\n view_func = resolve(request.path)[0]\n if view_func in DJANGO_VIEW_AUTH_WHITELIST:\n return\n\n # AuthenticationMiddleware is required so that request.user exists.\n if not hasattr(request, 'user'):\n raise exceptions.ImproperlyConfigured(\n \"The Django remote user auth middleware requires the\"\n \" authentication middleware to be installed. Edit your\"\n \" MIDDLEWARE_CLASSES setting to insert\"\n \" 'django.contrib.auth.middleware.AuthenticationMiddleware'\"\n \" before the SpnegoUserMiddleware class.\")\n\n if 'HTTP_AUTHORIZATION' in request.META:\n type, authstr = request.META['HTTP_AUTHORIZATION'].split(' ', 1)\n\n if type == 'Negotiate':\n try:\n result, context = kerberos.authGSSServerInit('HTTP')\n if result != 1:\n return\n\n gssstring = ''\n r = kerberos.authGSSServerStep(context, authstr)\n if r == 1:\n gssstring = kerberos.authGSSServerResponse(context)\n request.META['GSS-String'] = 'Negotiate %s' % gssstring\n else:\n kerberos.authGSSServerClean(context)\n return\n\n username = kerberos.authGSSServerUserName(context)\n kerberos.authGSSServerClean(context)\n\n # In Trusted knox proxy, Hue must expect following:\n # Trusted knox user: KNOX_PRINCIPAL\n # Trusted knox proxy host: KNOX_PROXYHOSTS\n if 'desktop.auth.backend.KnoxSpnegoDjangoBackend' in AUTH.BACKEND.get():\n knox_verification = False\n principals = self.clean_principal(KNOX.KNOX_PRINCIPAL.get())\n principal = self.clean_principal(username)\n if principal.intersection(principals):\n # This may contain chain of reverse proxies, e.g. knox proxy, hue load balancer\n # Compare hostname on both HTTP_X_FORWARDED_HOST & KNOX_PROXYHOSTS+HUE_LB.\n # Both of these can be configured to use either hostname or IPs and we have to normalize to one or the other\n req_hosts = self.clean_host(request.META['HTTP_X_FORWARDED_HOST'])\n allowed_hosts = self.clean_host(KNOX.KNOX_PROXYHOSTS.get() + HUE_LB_HOSTS)\n if req_hosts.intersection(allowed_hosts):\n knox_verification = True\n else:\n access_warn(request, 'Failed to verify provided host %s with %s ' % (req_hosts, allowed_hosts))\n else:\n access_warn(request, 'Failed to verify provided username %s with %s ' % (principal, principals))\n # If knox authentication failed then generate 401 (Unauthorized error)\n if not knox_verification:\n request.META['Return-401'] = ''\n return\n\n if request.user.is_authenticated:\n if request.user.username == self.clean_username(username, request):\n return\n\n user = authenticate(username=username, request=request)\n if user:\n request.user = user\n login(request, user)\n knox_login_headers(request)\n msg = 'Successful login for user: %s' % request.user.username\n else:\n msg = 'Failed login for user: %s' % request.user.username\n request.audit = {\n 'operation': 'USER_LOGIN',\n 'username': request.user.username,\n 'operationText': msg\n }\n access_warn(request, msg)\n return\n except:\n LOG.exception('Unexpected error when authenticating against KDC')\n return\n else:\n request.META['Return-401'] = ''\n return\n else:\n if not request.user.is_authenticated:\n request.META['Return-401'] = ''\n return", "title": "" }, { "docid": "e2d7f603d2084fd617a646fae6406637", "score": "0.5411674", "text": "async def authenticate_user(self):\n username = self.get_body_argument(\"username\")\n password = self.get_body_argument(\"password\")\n\n user = await self.db.get_user_by_username(username)\n\n if not user:\n self._incorrect_auth()\n return None\n\n password_matches = await user.check_password(password)\n\n if not password_matches:\n self._incorrect_auth()\n return None\n\n return user", "title": "" }, { "docid": "5afd1a6c22095dbfc6f867c518cea5b7", "score": "0.54114705", "text": "def authenticate(scope):\r\n scope.get('__connection__').app_authenticate()\r\n return True", "title": "" }, { "docid": "d39a79640611d2065a68d5df1a3f528e", "score": "0.54095894", "text": "def login_api(request):\n username = request.data.get(\"username\")\n password = request.data.get(\"password\")\n\n if username is None or password is None:\n return Response({'error': 'Please provide both username and password'}, status=HTTP_400_BAD_REQUEST)\n user = authenticate(username=username, password=password)\n\n if not user:\n return Response({'error': 'Invalid Credentials'}, status=HTTP_404_NOT_FOUND)\n token, _ = Token.objects.get_or_create(user=user)\n return Response({'token': token.key}, status=HTTP_200_OK)", "title": "" }, { "docid": "87c36a72a6f01685e38c3bb12b2a962d", "score": "0.54078496", "text": "def login(self):\r\n res = self.sess.get(self.LOGIN_URL)\r\n execution = re.search(\r\n 'name=\"execution\" value=\"(.*?)\"', res.text).group(1)\r\n res = self.sess.get(\r\n url='https://zjuam.zju.edu.cn/cas/v2/getPubKey').json()\r\n n, e = res['modulus'], res['exponent']\r\n encrypt_password = self._rsa_encrypt(self.password, e, n)\r\n\r\n data = {\r\n 'username': self.username,\r\n 'password': encrypt_password,\r\n 'execution': execution,\r\n '_eventId': 'submit',\r\n \"authcode\": \"\"\r\n }\r\n res = self.sess.post(url=self.LOGIN_URL, data=data)\r\n # check if login successfully\r\n if '统一身份认证' in res.content.decode():\r\n raise LoginError('登录失败,请核实账号密码重新登录')\r\n print(\"统一认证平台登录成功~\")\r\n return self.sess", "title": "" }, { "docid": "5e4b3735e586c1579e6d2879ea5fbd9c", "score": "0.5407477", "text": "def login():\n req = request.get_json(force=True)\n username = req.get('username', None)\n password = req.get('password', None)\n user = guard.authenticate(username, password)\n ret = {'access_token': guard.encode_jwt_token(user)}\n return ret, 200", "title": "" }, { "docid": "1f8003a2b9879d5242b97391450ee941", "score": "0.53951615", "text": "def provide_auth(self, service, key, fsid, auth_supported, public_address):\n conversation = self.conversation(scope=service)\n opts = {\n 'radosgw_key': key,\n 'fsid': fsid,\n 'auth': auth_supported,\n 'ceph-public-address': public_address,\n }\n conversation.set_remote(**opts)", "title": "" }, { "docid": "6657400a49de239be8620e7c4d17725e", "score": "0.53904474", "text": "def __call__(user_id, key, secure=True):", "title": "" }, { "docid": "f95ebeb61009314e64b50af14b0e6a74", "score": "0.5390119", "text": "def login(self, user: User) -> str:", "title": "" }, { "docid": "b60ee405bb1d0691c14d525628d90d43", "score": "0.5387355", "text": "def on_auth_accepted(self, *p):\n self.authenticated = True", "title": "" }, { "docid": "b60ee405bb1d0691c14d525628d90d43", "score": "0.5387355", "text": "def on_auth_accepted(self, *p):\n self.authenticated = True", "title": "" }, { "docid": "c60d37c5cd234da00e09e816e69c677a", "score": "0.53861725", "text": "def two_factor_authenticate(client, validate=True):\n sms_sender = SmsSenderFactory.createSender(\"test\")\n json_data = '{\"email\": \"[email protected]\", \"password\": \"password\"}'\n response = client.post(\n \"/login\", data=json_data, headers={\"Content-Type\": \"application/json\"}\n )\n assert b'\"code\": 200' in response.data\n\n if validate:\n code = sms_sender.messages[0].split()[-1]\n response = client.post(\n \"/tf-validate\", data=dict(code=code), follow_redirects=True\n )\n assert response.status_code == 200", "title": "" }, { "docid": "4825dd6e90bfe391941bd2ba9401e44e", "score": "0.5385413", "text": "def authenticate_credentials(self, payload):\n\n username = self.jwt_get_username_from_payload(payload)\n\n if not username:\n msg = _('Invalid payload.')\n raise exceptions.AuthenticationFailed(msg)\n\n try:\n User = get_user_model()\n user = User.objects.get_by_natural_key(username)\n except User.DoesNotExist:\n msg = _('Invalid token.')\n raise exceptions.AuthenticationFailed(msg)\n\n if not user.is_active:\n msg = _('User account is disabled.')\n raise exceptions.AuthenticationFailed(msg)\n\n return user", "title": "" }, { "docid": "ad6c9eb71c4d72d2f56a15db7aefee9e", "score": "0.5379026", "text": "async def authorize(request):\n user = await session.load_user(request)\n if user and user.is_super:\n return user\n\n raise ResponseRedirect('/')", "title": "" }, { "docid": "b97b19f602cee16313844c5bf396740d", "score": "0.53740185", "text": "def auth_view(request):\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username, password=password)\n\n if user is not None:\n auth.login(request, user) \n return HttpResponseRedirect('/vms/users/dashboard') #CHANGE THIS!! -- SHOULD WORK ACCORDING TO USER\n else:\n return HttpResponseRedirect('/vms/')", "title": "" }, { "docid": "03ec91f56792a0f886f5b908d64f9fdc", "score": "0.5373271", "text": "async def authorize(request):\n required_fields = ['email', 'password']\n common.validate_fields(required_fields, request.json)\n password = bytes(request.json.get('password'), 'utf-8')\n auth_info = await auth_query.fetch_info_by_email(\n request.app.config.DB_CONN, request.json.get('email'))\n if auth_info is None:\n raise ApiUnauthorized(\"No user with that email exists\")\n hashed_password = auth_info.get('hashed_password')\n if not bcrypt.checkpw(password, hashed_password):\n raise ApiUnauthorized(\"Incorrect email or password\")\n token = common.generate_auth_token(\n request.app.config.SECRET_KEY,\n auth_info.get('email'),\n auth_info.get('public_key'))\n return json(\n {\n 'authorization': token\n })", "title": "" }, { "docid": "ec290e171ac5642b9daa613cf3933029", "score": "0.5373211", "text": "def _obtainauthtoken(self):\n self.login()\n self.gettoken()", "title": "" }, { "docid": "0f2a5efda9828337f2b3c4087c405a32", "score": "0.53669846", "text": "def login():\n # get payload json\n payload = request.get_json()\n # all to lowercase\n # payload['email'] = payload['email'].lower()\n payload['username'] = payload['username'].lower()\n\n try:\n # try to find user\n user = models.User.get(models.User.username == payload['username'])\n # if you find the User model convert in to a dictionary so you can edit and jsonify it\n user_dict = model_to_dict(user)\n # check bcrypt hash password\n if(check_password_hash(user_dict['password'], payload['password'])):\n # delete the password since the client doesn't need it\n del user_dict['password']\n # set up the session\n login_user(user)\n # return success response\n return jsonify(data=user_dict, status={\"code\": 200, \"message\": \"Login Success\"})\n else:\n # return fail response\n return jsonify(data={}, status={\"code\": 401, \"message\": \"Username or password incorrect\"})\n except models.DoesNotExist:\n # return fail response\n return jsonify(data={}, status={\"code\": 401, \"message\": \"Username or password incorrect\"})", "title": "" }, { "docid": "dc62e51cc1459b82101eac0506cfa6af", "score": "0.5357784", "text": "def authenticate(self, request, username, password):\n raise Exception", "title": "" } ]
008c164f15999607aa3ddeb5d7c9d707
The manifest is checked first for the version.
[ { "docid": "b20276b1c591ed526c8d07c5e39e1cfa", "score": "0.66188157", "text": "def test_version_from_manifest(self):\n bundle = self.mkbundle('in', output='out-%(version)s')\n self.env.manifest.version = 'manifest'\n self.env.versions.version = 'versions'\n assert bundle.get_version() == 'manifest'\n assert bundle.resolve_output() == self.path('out-manifest')", "title": "" } ]
[ { "docid": "be2504fb1e6538c36389418206ea740c", "score": "0.7081394", "text": "def getManifest():", "title": "" }, { "docid": "3a57e0dad785654907e4bb733f8d9293", "score": "0.7056929", "text": "def manifest(self):", "title": "" }, { "docid": "71afad78257634cc5cd7ea6bcb4b41e3", "score": "0.6671", "text": "def manifest_version(self):\n return self._manifest_version", "title": "" }, { "docid": "12d8ad188cc03ef07f23bb0bb8ef04f5", "score": "0.6356508", "text": "def initiate_manifest(self):\n self._manifest = Manifest(self._source_manifest_file)\n self._manifest.validate_manifest()", "title": "" }, { "docid": "821631422134b942683a9dc7f6623c68", "score": "0.63341653", "text": "def _GetNewestManifestVersion(self):\n full_version = self._GetNewestFullVersion()\n return None if full_version is None else full_version.split('-')[1]", "title": "" }, { "docid": "af85e348ff864d346e041605a96a1b39", "score": "0.6297025", "text": "def is_manifest_v2(check):\n manifest_file = get_manifest_file(check)\n if not file_exists(manifest_file):\n return False\n try:\n with open(manifest_file) as f:\n manifest = json.loads(f.read())\n except JSONDecodeError as e:\n raise Exception(\"Cannot decode {}: {}\".format(manifest_file, e))\n\n return manifest.get(\"manifest_version\") == \"2.0.0\"", "title": "" }, { "docid": "37eed5d14fade73931013ce0524287b4", "score": "0.6283921", "text": "def sync_manifest(self):\r\n self._name_check()\r\n if self.manifest:\r\n headers = self._make_headers()\r\n headers['Content-Length'] = \"0\"\r\n response = self.container.conn.make_request(\r\n 'PUT', [self.container.name, self.name], hdrs=headers,\r\n data='')\r\n response.read()\r\n if response.status < 200 or response.status > 299:\r\n raise ResponseError(response.status, response.reason)", "title": "" }, { "docid": "f9074eed2f403ee3c1dd9d96735da47b", "score": "0.6207176", "text": "def _SetChromeVersionIfApplicable(self, manifest):\n manifest_dom = minidom.parse(manifest)\n elements = manifest_dom.getElementsByTagName(lkgm_manager.CHROME_ELEMENT)\n\n if elements:\n chrome_version = elements[0].getAttribute(\n lkgm_manager.CHROME_VERSION_ATTR)\n logging.info(\n 'Chrome version was found in the manifest: %s', chrome_version)\n # Update the metadata dictionary. This is necessary because the\n # metadata dictionary is preserved through re-executions, so\n # SyncChromeStage can read the version from the dictionary\n # later. This is easier than parsing the manifest again after\n # the re-execution.\n self._run.attrs.metadata.UpdateKeyDictWithDict(\n 'version', {'chrome': chrome_version})", "title": "" }, { "docid": "f28b80c883c7cd7e5fbb614b9dc2a24b", "score": "0.611276", "text": "def load_manifest(self):\n apktool = Apktool(self.tools[\"apktool\"])\n apktool_dir = apktool.extract_resources(self.original_apk\n , self.output_dir)\n\n manifest_path = join(apktool_dir, \"AndroidManifest.xml\")\n if not isfile(manifest_path):\n quit(\"Couldn't find a manifest file at \" + manifest_path + \". \"\n \"Did ManifestParser worked properly?\")\n self.manifest = Manifest(manifest_path)", "title": "" }, { "docid": "42624c5727c8e6092acf47738dc31a04", "score": "0.61027455", "text": "def GetNextManifest(self):\n return self._run.config.manifest", "title": "" }, { "docid": "cd28a0ae6268b667d9aafaa780f935bc", "score": "0.60816497", "text": "def manifest(self):\n if not self._manifest:\n with open(self.manifest_path) as man:\n self._manifest = json.load(man)\n return self._manifest", "title": "" }, { "docid": "2148096ac2c786c8dfd0d7d43c017a5b", "score": "0.6077235", "text": "def getManifest(self, version=None):\n\t\tversion = version or self.target_version\n\n\t\tif version == self.manifestVersion:\n\t\t\tlogging.debug(\"Reusing manifest from cache.\")\n\t\t\treturn self.manifest\n\t\t#endif\n\n\t\tproperties = {\n\t\t\t\"gameID\": self.GAME_ID,\n\t\t\t\"version\": version\n\t\t}\n\n\t\t# First download the hash\n\t\thashURL = self.BASE_URL + self.HASH_URL.format(**properties)\n\n\t\tconn = self._getURL(hashURL, \"hash file (\" + hashURL + \")\", \"patch server\")\n\n\t\tproperties[\"hash\"] = conn.read().strip().decode(\"utf8\")\n\n\t\tlogging.debug(\"Hash downloaded.\")\n\n\t\t# Now download the manifest\n\t\tmanifestURL = self.BASE_URL + self.MANIFEST_URL.format(**properties)\n\n\t\tconn = self._getURL(manifestURL, \"manifest file (\" + manifestURL + \")\", \"patch server\")\n\t\t\n\t\tmanifest = conn.read()\n\t\tmanifest = zlib.decompress(manifest)\n\t\t# TODO: handle zlib errors\n\n\t\tlogging.debug(\"Manifest decompressed.\")\n\n\t\tmanifest = json.loads(manifest.decode(\"utf8\"))\n\n\t\t# Decode filenames.\n\t\tfiles = manifest[\"files\"]\n\t\tkeys = list(files.keys())\n\t\tencoding = manifest[\"filepath_encoding\"]\n\t\tfor key in keys:\n\t\t\t# Decode the filename.\n\t\t\tfilename = os.path.join(*base64.b64decode(key).decode(encoding).split(\"\\\\\"))\n\t\t\tfiles[filename] = files[key]\n\t\t\tdel files[key]\n\t\t#endfor\n\n\t\tself.manifest = manifest\n\t\tself.manifestVersion = version\n\n\t\treturn manifest", "title": "" }, { "docid": "fdbb77faef1355978c446b4c2b489706", "score": "0.6004611", "text": "def manifest_version_policy(self):\n return self._manifest_version_policy", "title": "" }, { "docid": "f48b972c3e6fa1c6def1245d57374668", "score": "0.5976323", "text": "def getLatestVersion():", "title": "" }, { "docid": "27bc21c24509697df24ffa8d126dbe9b", "score": "0.5962481", "text": "def manifest(self) -> str:\n return pulumi.get(self, \"manifest\")", "title": "" }, { "docid": "4f5958f432931fc88383d4f18ef325b5", "score": "0.5945054", "text": "def ForceVersion(self, version):\n logging.PrintBuildbotStepText(version)\n return self.manifest_manager.BootstrapFromVersion(version)", "title": "" }, { "docid": "eb951907b7148af6bbd4d1343860ece0", "score": "0.5932469", "text": "def _SetAndroidVersionIfApplicable(self, manifest):\n manifest_dom = minidom.parse(manifest)\n elements = manifest_dom.getElementsByTagName(lkgm_manager.ANDROID_ELEMENT)\n\n if elements:\n android_version = elements[0].getAttribute(\n lkgm_manager.ANDROID_VERSION_ATTR)\n logging.info(\n 'Android version was found in the manifest: %s', android_version)\n # Update the metadata dictionary. This is necessary because the\n # metadata dictionary is preserved through re-executions, so\n # UprevAndroidStage can read the version from the dictionary\n # later. This is easier than parsing the manifest again after\n # the re-execution.\n self._run.attrs.metadata.UpdateKeyDictWithDict(\n 'version', {'android': android_version})", "title": "" }, { "docid": "232ba7db31625e5acf8858600cb1a2a3", "score": "0.5843536", "text": "def mini_manifest(self):\n if self.is_blocked():\n return {}\n # Platform \"translates\" back the mini-manifest into an app manifest and\n # verifies that some specific key properties in the real manifest match\n # what's found in the mini-manifest. To prevent manifest mismatch\n # errors, we need to copy those properties from the real manifest:\n # name, description and author. To help Firefox OS display useful info\n # to the user we also copy content_scripts and version.\n # We don't bother with locales at the moment, this probably breaks\n # extensions using https://developer.chrome.com/extensions/i18n but\n # we'll deal with that later.\n try:\n version = self.latest_public_version\n except ExtensionVersion.DoesNotExist:\n return {}\n manifest = version.manifest\n mini_manifest = {\n # 'id' here is the uuid, like in sign_file(). This is used by\n # platform to do blocklisting.\n 'id': self.uuid,\n 'name': manifest['name'],\n 'package_path': version.download_url,\n 'size': version.size,\n 'version': manifest['version']\n }\n if 'author' in manifest:\n # author is copied as a different key to match app manifest format.\n mini_manifest['developer'] = {\n 'name': manifest['author']\n }\n if 'content_scripts' in manifest:\n mini_manifest['content_scripts'] = manifest['content_scripts']\n if 'description' in manifest:\n mini_manifest['description'] = manifest['description']\n return mini_manifest", "title": "" }, { "docid": "ebc7dd4bc4696a6399206436a892a7cb", "score": "0.5836709", "text": "def manifest_version(self, manifest_version):\n\n self._manifest_version = manifest_version", "title": "" }, { "docid": "713a60895c46e36c43fd8682a20d4497", "score": "0.58316827", "text": "def test_manifest_load(self):\n\n self.test_manifest_save()\n\n manifest = Manifest('tests/fixtures/test-data', '.test_manifest.yaml')\n self.assertTrue(manifest.manifest is None)\n manifest.load()\n\n self.assertEqual(manifest['documents/Important Document 1.odt'], 'd460a36805fb460c038d96723f206b20')\n self.assertEqual(manifest['documents/Important Presentation.odp'], '1911ec839cedcbf00739a7d3447ec3a3')\n self.assertEqual(manifest['pictures/Picture #1.jpg'], '6eec850e32622c0e33bdae08ced29e24')\n self.assertEqual(manifest['documents/exclude_me.txt'], '2e7d8cb32bb82e838506aff5600182d1')\n self.assertEqual(len(manifest.manifest), 4)", "title": "" }, { "docid": "496b3f661d1c968ed90a380ba87a76c7", "score": "0.5823017", "text": "def load_manifest(url, version, manifest_name):\n manifest_raw = do_curl(f\"{url}/{version}/Manifest.{manifest_name}\")\n manifest = {}\n if not manifest_raw:\n raise Exception(f\"Unable to load manifest {manifest_name}\")\n\n try:\n lines = manifest_raw.splitlines()\n for idx, line in enumerate(lines):\n content = line.split('\\t')\n if content[0] == \"MANIFEST\":\n manifest['format'] = content[1]\n elif content[0] == \"version:\":\n manifest['version'] = content[1]\n elif content[0] == \"previous:\":\n manifest['previous'] = content[1]\n elif content[0] == \"minversion:\":\n manifest['minversin'] = content[1]\n elif content[0] == \"filecount:\":\n manifest['filecount'] = content[1]\n elif content[0] == \"timestamp:\":\n manifest['timestamp'] = content[1]\n elif content[0] == \"contentsize:\":\n manifest['contentsize'] = content[1]\n elif content[0] == \"includes\":\n if not manifest.get('includes'):\n manifest['includes'] = []\n manifest['includes'].append(content[1])\n elif len(content) == 4:\n if not manifest.get('files'):\n manifest['files'] = {}\n manifest['files'][content[3]] = content\n except Exception as _:\n raise Exception(f\"Unable to parse manifest {manifest_name} at line {idx+1}: {line}\")\n\n if not manifest.get('includes'):\n manifest['includes'] = []\n if not manifest.get('files'):\n raise Exception(f\"Invalid manifest {manifest_name}, missing file section\")\n\n return manifest", "title": "" }, { "docid": "65f3be03c64ac67438a9b50672a62107", "score": "0.5804959", "text": "def _get_blank_manifest():\n\treturn json.load(open(\n\t\t_make_lib_path(os.path.join(\"template\", \"manifest.json\"))))", "title": "" }, { "docid": "c661306f28c9455820bad7d29fa3020b", "score": "0.5796193", "text": "def manifest():\n\n return app.send_static_file('manifest.html')", "title": "" }, { "docid": "08eeb1b188816e1a567905b1cbc52d53", "score": "0.578094", "text": "def commit_manifest(self, jar):\n if not self._manifest.is_empty():\n jar.writestr(Manifest.PATH, self._manifest.contents())", "title": "" }, { "docid": "bf5656f760702399097383cac4782831", "score": "0.5743345", "text": "def package_version():\n print(about_object)\n sys.exit(exit_codes['EX_OK']['Code'])", "title": "" }, { "docid": "156794ff3e4731a6067af9c39bda7c8c", "score": "0.57320446", "text": "def version(app_name):\n app = get_app(app_name)\n\n version = app.check_version()\n if version is None:\n error('%s is not installed' % app_name)\n exit(1)\n\n click.echo(version)", "title": "" }, { "docid": "2ae057433fcb71f66ee625e81c20ad8d", "score": "0.5727477", "text": "def checkForManifest():\n manifestsFound = []\n for manifestType in [\"contest\", \"problem\"]:\n if os.path.isfile(os.path.join(os.getcwd(), f\".cpu.{manifestType}_manifest.json\")):\n manifestsFound.append(manifestType)\n return manifestsFound", "title": "" }, { "docid": "02227cdf587122dd06071fbf0acc0d56", "score": "0.5707524", "text": "def version(self):\n ...", "title": "" }, { "docid": "a9af5d678406fe76685718d14f9ff5bd", "score": "0.56881684", "text": "def update_package_version(self):\r\n return", "title": "" }, { "docid": "5a2cfe7aa3dad8cf05d812d9c91bd8d1", "score": "0.5687783", "text": "def get_manifest(distro, version):\n manifest_name = distro + \"-\" + version + \".xml\"\n\n r = requests.get(manifest_repo_url + \"/\" + manifest_name)\n\n if (r.status_code != 200):\n return None\n\n return r.text", "title": "" }, { "docid": "bc70101f916362310cdf1bee503e1fb1", "score": "0.5652744", "text": "def load_manifest(check_name):\n manifest_path = get_manifest_file(check_name)\n if file_exists(manifest_path):\n return json.loads(read_file(manifest_path).strip())\n return {}", "title": "" }, { "docid": "4eaadff3bf02541e47c2e97b322716af", "score": "0.5652739", "text": "def update_manifest(self):\n repositories = self._manifest.repositories\n downstream_jobs = self._manifest.downstream_jobs\n build_name = os.path.basename(self._dest_manifest_file)\n\n for repo in repositories:\n repo[\"branch\"] = self._new_branch\n repo[\"commit-id\"] = \"\"\n self.update_repositories_with_lastest_commit(repositories)\n\n for job in downstream_jobs:\n job[\"branch\"] = self._new_branch\n repo[\"commit-id\"] = \"\"\n self.update_repositories_with_lastest_commit(downstream_jobs)\n \n self._manifest.build_name = build_name\n self._manifest.validate_manifest()", "title": "" }, { "docid": "a0d3c0f7ed226fb05a9d2d78896692eb", "score": "0.562182", "text": "def GetNextManifest(self):\n assert self.manifest_manager, \\\n 'Must run Initialize before we can get a manifest.'\n assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \\\n 'Manifest manager instantiated with wrong class.'\n assert self._run.config.master\n\n build_id = self._run.attrs.metadata.GetDict().get('build_id')\n logging.info('Creating new candidate manifest, including chrome version '\n '%s.', self._chrome_version)\n if self._android_version:\n logging.info('Adding Android version to new candidate manifest %s.',\n self._android_version)\n manifest = self.manifest_manager.CreateNewCandidate(\n android_version=self._android_version,\n chrome_version=self._chrome_version,\n build_id=build_id)\n if MasterSlaveLKGMSyncStage.external_manager:\n MasterSlaveLKGMSyncStage.external_manager.CreateFromManifest(\n manifest, build_id=build_id)\n\n return manifest", "title": "" }, { "docid": "3bdbe5280b6fd3be67be6831330a9cf6", "score": "0.56208766", "text": "def update_manifest_fields_from_latest_public_version(self):\n if self.is_blocked():\n raise BlockedExtensionError\n try:\n version = self.latest_public_version\n except ExtensionVersion.DoesNotExist:\n return\n if not version.manifest:\n return\n # Trigger icon fetch task asynchronously if necessary now that we have\n # an extension and a version.\n if 'icons' in version.manifest:\n fetch_icon.delay(self.pk, version.pk)\n\n # We need to re-extract the fields from manifest contents because some\n # fields like default_language are transformed before being stored.\n data = self.extract_manifest_fields(version.manifest)\n return self.update(**data)", "title": "" }, { "docid": "36ba8218d1cfb8535479f7e6a75eb018", "score": "0.5616086", "text": "def go_check():\n path_to_file = HERE / \"package.json\"\n with open(path_to_file) as dt:\n data_package = json.load(dt)\n new_version = data_package[\"version\"]\n return new_version", "title": "" }, { "docid": "f1f4df0526a4ea5ff9b53e0559316f68", "score": "0.56024456", "text": "def package_minifest(request):\n package_path, package_etag = get_package_info()\n\n manifest_content = json.dumps({\n 'description': 'Firefox Marketplace',\n 'developer': {\n 'name': 'Mozilla',\n 'url': 'http://mozilla.org',\n },\n 'icons': {\n '64': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAA2hpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDowNDgwMTE3NDA3MjA2ODExODIyQUI1QzMyMDQyNjY5NSIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDo2MDE5RjlGQTIyQjgxMUUyQkUyM0JCNEZBMkI4QTY1RSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDo2MDE5RjlGOTIyQjgxMUUyQkUyM0JCNEZBMkI4QTY1RSIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgQ1M2IChNYWNpbnRvc2gpIj4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6RkI3RjExNzQwNzIwNjgxMTgyMkFBQUUyODlFQjEzQUMiIHN0UmVmOmRvY3VtZW50SUQ9InhtcC5kaWQ6MDQ4MDExNzQwNzIwNjgxMTgyMkFCNUMzMjA0MjY2OTUiLz4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz5kNw/pAAAWz0lEQVR42uRbe5Bf1V3/nnPu/f32kc3mBaEbSHaTJaUJJIBxKLYQKEIrThWswAAFO+UxCgp2+IOpY8fRGYs6tRXGBx2RqtMpAh1DEYujghBEHpJAQiAPFpJssiTZ7GY3m939Pe695/h9nd/vtwlQaBPrjL/Mzb2/+zv3nu/j832esyaEAMYYOCGfL/7AgMV3Gwdg8TvNY+QWXoDHuflD5+ABb+AZv//958OJIId5Pa4MX/0wcmPvAJvciec+5sxaPixee2LcKONNKoRJX8i1V8Z9GMB7fwrfu/L+48mwPa4ibGu7F5n9M7BuBrP4HTwekOB1gmd31EFj8WzjM4YO6MfzX8EtT3zjeJJ4XDS88rtbPjk9Xb9n5zPvXISUI9EJn1izzgqzThnhM6sZWLVFEO2iZg1qNvA1HoVqGj+n/3zv8+Vy8rUt1678j58qpD/+N5tXW2e+FRJz8d63R2Fy24gwy5oiJgnhFsysEiw7cw4kbQmas0G+DfMsphsgqxSw4/VxgKkMLDLridEix4OgnsO8s06Cnt65EIrwUlHAndt+7cyXflyGkx/nwcX3b5qFhP9BDuaOhNkCmByeOho74phQq/3I7NzFHThQmbVHmTAy6FHDAy+PgmfN+xmvOnRgmhnGz3l4vLD8wc0P4PC7B25eNfZRaf/INrzoLzadBcG8YgN8BUkl4IKvIYEjk+JpG5yYBkdZxZN2GMLEnCkCD5UjMHJpDP1uidkgYxHj8o6RI5BnPtqgQVnegpeber+9+fwTasM99752o0nMt501beRrUnK6qLF9e8dgYvMQ6jpRSFvRuzosgwNPXtoFJYS0j2FJTRgFB9VqDgd2osCIKYIye2oviubvOcw/51RY2DMbTTtAjlDPyd6LkCPE7xq6ffV9x92GF3xz493ogP7IJQYSdqqGHW6eFbDr2QG6UIZdix3P9NSNe6R9njI0Y2+IzkscGBRGIFBkcq+UQv/Fy9DnWbF78mvs25jxP86L8NXhr5wTjgvDJ31jw9eDtV+lqEKMoqNiZ1ut1GD/hj3obCpNR4XUz4YcysynZbt1fLY8xON/0Y6iATiEeA6SfBTIbIZMkJUcRosJ9E7iDLUMXbPgtDWnofMriSzoOfZvaAre31/z5raxu96f6fdleP7Zd8/3zt3gE/SqfX1ri7ndV1icuEAcG+Q0R6IqY9MAo5MSZpCh36s8C7fBI9CWFBC9UjUngnB8YSXKtLiMwLZKEQq9OFJPOQmJy1kPJTzKidBUzRz8dbgCfrfzUmGc2Dm5Czq720SgeM+zQFDT44f/FQZ2PukQbcYXfzu28Z7xD+WluyYGbzk8Z8k9Bqn0O3dDsctBhvjNbQq5SzXOEkTx2hu4oPI2fGb8UdgOFG4SYQLoEB1SLBaVeuZKki25J+mlkRBEcEW4B+8E7U5QfWFYB2vzXnh2Vp/E572jMLUHoY7GbEKGTOeQIAJsXlyWen8ZahHmjW5tRxd+z4fy0vUi/yVL3pSSASTaoVu0CB+Ls9PLJe8laROVBdwy8U+aKxNsE1a6Qw/LckkpDAV0cHjgdQkFlZL9W0m8SuwLHP/mcAyZDD1DskyQaUeODd/7G+Pr0Kmp0EIujOP8NvdMl8XvjoIa0+VhKq9e+V68HaPh/v4vzx6pHfpZxyHGkNTY4xZke8Qgwt/naHEJPZrDOdm7sCS8AylplLTH8BTHJlr08E66AjZ0rMJ3JDE0tEQvee+a6TehP3tdMIHvIE2T36KxFqddAoNwPiLphVKP2nTBwkdDgwRpJGYtKShI2JuqV885o++62dt2fm/iAxlGs7siBJ+Q1yTJkY5JagkShnzyExiWOFoEvF5b24k2JzBlZ2ZiUWT5vNt9HH51we0Sf0AHQqvPCOKhOz4Nj438JSz1W/GrjDNcTyANFjj7urDYCS8UC5lZ43NhMohGCY2WiSoYlfgvyX1xNU7wwAdCumrNteQA6GGCCUvNNyFDGk882g1q16AE1uQ72DkTJBmm+J+jgyCJzmd70i8elnPmXGNJLuGmyBvaot/eLC2X2sLIXHRN70vQthOU5Jr6GzzO4bMpwVnsFqEv18S4jRkN0jvt3I0faMMLl92S5nl2SbNME+nZKNEIHWY6h54wCaeHt9gm2WM6y8QSbAjSCX6/oHgR5tWmQLMFPR97PbtWhbXFy5AiRSlympA/sMp44vldfW4QluQTaNc4f5Y1mUXBEQqtKoY1jc9NZ9M/t3LJdaX3hXQ+/+RrzP63U36QoFSIRDwmFFYTH5dTPR8Y3tfYrUicOBWKtdbkDGXOM0Ai1kI7Dv+Z3wM764vRXhMtGpopKI/H772wB8puguFO4YlgbPE/Tw6cIGs8h8Tr/Btwr1/FjJJSSLMNZn3RyNS48qplbry/99dhN9z33gwn6d2pVOOsXSI8Mg3qcDznuw7OMofhKru+4ZGNapSs0zmjJW3g37tgGla5bWyXtrX+D820myvBgoqNABKojMxP5oWMF+RN8IcvmGfhKb8ItocutVmxY+ubaSkjkuksYLrwd+Lr7jsW0neuL7ePj65g1QWpXphpSWnY5ix/R4/qh+Hr7nHodFUJP0byamIuSamQD6ylxIoTI1skJPAYzT7Jzm1LSKff0hKweaRW01cTewT4PXEM8Q6XwbeSdfCpsB99CMFZa2d1WFbNUEInCung3j6467+6j9GwzWqfX9VZsb59NvKL0itJOojFgoQXFPdp9gicl70FZxeb2EFRZcNpo6acVMawjRtJPTnTZJUL7EkLoQFlr/bShDiVks5TruxVu5IrW6SBeDIY9yiNnJOMw5/YR2Bjx7kYppbDkO9A3VjhE+FCPEPXJzDicLprnqpXrsV575/JsPdXfWnPA1DCOyX8Tzxkit+ReIy5CWuRtOL4N2GWxihzCmXt1rCAjBVfELMuowIwLdkuCdQTk1o5BSuCIJopLGF6J7FZTYQJyaW2ONdvhNWV1xiAGSKwwGpLiooC6ujUMhROhjde8NdcNTWD4Vv/DeO1v0iqOqO2l0iFR54XxGFY5NS1aJaZ5ZgbODaTIDgG0xh0YKRVYyUeN8IwfuHSmOYw0iggRAQ2I81AC8nCyHEBwV4LKG+1puQyrWAhOU9xmpChjo4moYwQpR4wfJLTqwVzPtz2NHnreqJZdR+l5DYyqJ7WISZpTiKctRsdlLampD1FCbkyThMm4nWNVkdSGRrWKvH5/JYCth7IedrlCxJYe7ZrJCM0NkCISAdN1LBoUWfjxS+QcCLTOaGMzAMT74Kuqdoi08AJqUtqHZcp7cj92fiGlzWDL86gQkG05cWpcANOG4rcx3HiqHkC27BZdkyxb6dx02mDku/bpiC2D/oGs/TZMZLDlgHfRAGPM3w4pz0EfRc7OEUXx31CCDkyI40RQZyTuciPUCqs8zMUirC66aW9X8IQo0FGG2+sNa8Ei5GS0xFmNeQYgWVk1katspCEcJqYyKvWCnjp7RxisyMeGwZzqNSDzGOoXlVB03NOTNZaPTtBIDOkyqDCQ3xGo9vL9LAAiR5UAYVs9N7LWhlenHNVEjiWNtrDDO1EiniFsnXChEuksLfadbWc/jUbHDJhwqGEtPvcazmXgi6GGz3o3vqNdTCYYplSyuM5hlKyQyal3SKjiUxkmis4GhcbK9r+cyZpZGmGsz8JURhmWxkO88greFfWF4izsdBCvG12aLiCCSJNLtytb/SpnAoEtHiAcgkm6wBDhwOjxqjPjgf923ckwHhNPDZQ0yFN8W7OjJnItFVza1m5oXhveE6r/Xzf6Io60S1LKPBKhl/QquF2SnWqSbcQxFD1ymSEuMCnMaFVBlkAKpxYKSHRDDOyeXQaGzbXYjiWhATfnWrWpWiEDRuxmEA/asuBnwuJncE0Z2lBHKLU2lbnk44NURDpJGRy0xQFUjPdsU82qxmWfNFJYslsO34Za9gXGYOEFIGziSGI4GQFZ0YzKf49EkZ+sYQOBANpHhwMjRYscc6H8bUXre3k6uepp6cbfa19YwXUUOjl1LKmKAQRaKkiY9NSJ0m1Pzkp1iKtalDtbIyakEQRr/Cn5KuedMb1qo4WDWNkRClUbZtGCMkM7FEQiutgAnllTHvHEn89PxMYf5jPtqcwsDPjeMRawXG9PQ5OPT2BU5Y6WLo4EWTouwd25KwCn4jjMngOrLWC7ZXhyz0wcaBcB7PphYbpRTuUXABRazu0NPXQyvAU5cq1tFOIt02nFWt1Sw081l7z5Y4dnNVCQWYzHI/kzZQyvru34Hq2pMfqT7eh9iltC7D6ghJWSM3fhgYxmbCCu0D2Se0hcmaNRrYIn5IgPltpAHJC09INFUqE9mrSrhrOj7RCmtdJpjE+25ZmhCRCnm3HNOwl/iapDcVHHmeDahe1liD0UECU/k1MFJQs8XNUQGx9pQb1ilRkaZuD9tRjGijvnJq2nDURcgIZed3zez2nXRm7OeKPvbsSaLUzwq0dTVuNenXygTXSsOcqaqqFYX+QThWCdGiJk/GFlq2iAedGh8ZKRsXeNVgt64AhTZo5OFRnItMIN8zzDu4N+riVjAlVVbKxFx/gwJ4cenoTKQS4wJB0kkIUFwVavgotXjOywMk1x+jMN7rennlq15XIbH+T4SLsphkqSblFvVrZtAiAF760GBCP6GUZyNumy9UCn+7vHigQskECkOaGsT0ruVKzEcAM4t09OzL42BLLsIXUa/sW2JZFKkWjjmYX4rSvTT/n2gbm0kqGTNoyowMJGmjacOH3UFiqmDbOZRtdRXSJPpY2sQBo9OBUu/ICHJuJA0EGKSS8+lwNjhyiPlfCGqTMvQ2vyyhIWpXgZjuey9R4x3sl59m0p0YCvPZMlevxmOE1EEXmZVyj0c+/BXGGrbsniO74SDWUBP+539mi4WILnWpEVmjZhgAzm4vU2mn9Hh2ceOlUOow41esvZDBx0Mhim5oBhzKNJ8Y1Fxophsu6OLWCaVXRwpEDAFuer8HKT6bslcFkXCtLH7xpq62kGF1wtOqNudzEG1OuLS5C/XdTw/9y8z4MWu9UbCL9u/iyYBsvYli0rBiAOjQ2w+jJ8PqNlzI4MmKZWdJcGzJRdtSAl+ZcmZrxaJe0lEJHCb+nqeP428YaD6z1IwcMbH8xExNqIEm7ul4984xWkRd643dap8LztCGG/RgyvXVmTyvP/j3Ns1vlQVruMNLhD4k8j5N6Lzl1I5qBwpw8JBJZYH1WGbPQ4aSujg0Bk3gpNZ3I2LlGkOFcWvazGF678rS8g/PkeNSHDWT1giKYFI2mpQ/WquHo4NTvRN9ACilnVdLw06hUvHlTC8M+f2heOnFr+yyUeruDBGNIgmopYUJPiwykBa5eEqP7Uhz3jCVmS2ymriQZRoR6TAQanUwjaIgpKMut0PqWlYbXOImh3pT6QjcL83sUmM0oJKWMPoMQJcdEHV5aZaAgYrC0zzPqUyc8ljoepprCXDOByqw/dExPy+XZ+rkwOe2S0CEVDpaCvMYjpRhhlL5ThcR8JZI8m8RqKOJ+amN5pbUNy0lJTGC40WdZG7y2ZiVc5dBcxSTPGwqFaLtm4Eb7xNSRpD4bVv42l5aQ5feIs/Q2l/UwKl9RACeHcXQA+RPHdC2L9Xf5Uzunt3PTuxQgxYMbeSTdMmq1TIk9EtAG/J1qXXKzBu+bdsNjbWqOaoVqCDLa3wnRBiUskcPyHhpt2RgCSesm2iOltW1GO5xIU2q1lHRMhylxe1SaBuVE62EyoRwRGGBhR2U3PH1H7T370rbUcVOowEZnCoZXYur4khLDmF5CXpoYM85pd0IdivNswxxGona9Zi4KV9CaOS5qkDZDFErw0XVwWAw2riuBvpfKTM8LX0ZDMaGM+30UYg3ig3J3ytlLiBz87mmR3BFK099536WW63/43Kv7Ovr2cugjKKc0GULEoW2keE5ju6ZgzdqS4XG0rmlIECUvYQakPAtGmPWqWe+bOx3iqiv3ujhLs+L8gt7TvS0cdqMJEbqohMR56AyukNSW7lGhb70WMMCF/3Dac+gLT7z46AcupuXp/Mtz8rC8Xu+5JqUXWWTcEaRIq9RCoVYiHSXpN4F+l7DmdTuSrgLqVqSgyzfcSpTlXWbcaAbnY/88SCOMowAKV44gmpYGFldQkoMQbQUrx7k60oxH4MwKC4eTbv6RC+JX/vCV1/eWTn/GEWxoDRYZTkKdHQG9zJB2EeokAMKTo/Wk1CvkFdCcTAh8C2R69goHy27AjGsRmkIXjpuNNONhsSSn5d7e6x10r3TcefTUSOdFA1laIYRJI8ELypzgmOYkOkyoyZoWhlALctBq5xCctu1zj7+27kcuiNNnVd/wJft3z5o+JUyWDWe4tHqI8YzStED+NGHvSL2jQPZLjdpEF4iCwpm17GDeSg+9l0mXvf9qXY/Ji5acXTZ3nHZpYG2PbjINs2fBEYNoUpxoZYGveXNMJpUUu+qcFs/qMZGHSV/OczvnPIDBD7cxbdGfj/vxdOnlNUJoXWIdb39AZnmpElM9BpyN4UfaELTyz7sGFZbEbN8vxITBwMhGD7seL2QhCane9c8AB17R7QsY3Hs/62HuWSJiCsJs+5zo6HqRUSdghBba32HZu6lDyAs2meGk/9bPrNs88ZF24l3yj5uffhtW3Ef7KrjYYIiT96sj3DDlK0iieqB0Q1HBl1VgziIP3XicsgY1erkXDTgpWg/tKKByUBe/qDgfKWB8W9yflTHTSz+Xw8Jz69B1qoeuHnGcgFIPvoJjqo2DPXOBdGS0uSVnnknub9X7H7vw4Te+8358feBey0vXvfnbz/7y4k+sKAYvI8ZDqrUnrTAiIyGzsprLBbokDKuvn8TvJW2Z6FpoqLETyqsGqsPUaZhmUVeHS5AuCBqDNEdEinovjTvw6o3dP0ZrX9k1EGRBvS5MEm0IOthV6dl84fcHrvyJ9lqu/cHgZ9/Jel5lhmvQnKQOkg3x1oW6ronELQ0VJVZjkm4RLnV7yGoOBp5MYcdjCdTwutStSbFXTRe6OwgTAio5IWqQ56rzGKp7Q6b1goJssLJg15pH3119XDaXnvf9d8/dUV/0DEO7rkeuQZ8mrgfZegh6g4jOa8I4EPMigPn9Febr4JYERrenLIe5S+vRXnTclD4Xt1xkIlR6P60W5KJNStm9Prq7Mv/NVf8w0ndcd9Oe9+jQxW9We79TV5umyXISeE2Yj/eEqEJgX6+LhnhfVQ2Zq0E6O+M1XzrSzgwWnIEM1vHIpmQcPZvV5Vl+T2CeCV1B5y7iGYdsH1/05FkPja48IduHP/XIri/vqC+7cmIKLShCXAmAFoLYgRQKfXT1UMuBF25RHWdeMUZLfBxPV/zKYYUx/a4HM6mmUxM0eQUOa7QiQp5CsLw13febax4euvyE/wnAli+e0hb81KbFHUeWk39yJc1tS1oWJrGdepRrtDCjpp1x7VuuWyJN65kYJeEOTXXtta78M/0Pjgz/r/4JwCs39v7iHLPnoY+1F12UZ3Oeq7mvSZrMxkVy08p8awtJKybQhe/IPDMbzQXPI9NQGckX33Hu3w0+8FP9M57NX1p0Q4cZvu/k9mxOosyyAHRBybiWDXimxZBCyxFbaUUL47kyWk0nj+QLfn/lg/t+or9wOe5/t7Tr9lP7JipT351fGlvTnUKJ94mnuq/Dzmz6zWjX+OYORNCNeUeqkB2qd29xacdNy+/f9+r/zT/UavmM/lZp9v76nD8shanLOtPpntSFNqzZadGeO7zSW5U/hah7dLi5qU5ns4YzaF8/rzP52sJvvrv3hP1l2v+nz/8IMABBqbSZZcgDWQAAAABJRU5ErkJggg==',\n },\n 'messages': [\n {'mobilenetwork': '/index.html'}\n ],\n 'name': getattr(settings, 'WEBAPP_MANIFEST_NAME', 'Marketplace'),\n 'size': storage.size(package_path),\n 'package_path': absolutify(reverse('package.zip')),\n 'permissions': {\n 'mobilenetwork': {'description': 'To detect mobile carrier and regional information.'}\n },\n #'release_notes': '',\n 'type': 'privileged',\n 'version': '0.0.4',\n })\n\n manifest_etag = hashlib.md5(manifest_content).hexdigest()\n\n @etag(lambda r: manifest_etag)\n def _inner_view(request):\n response = HttpResponse(manifest_content,\n mimetype='application/x-web-app-manifest+json')\n return response\n\n return _inner_view(request)", "title": "" }, { "docid": "82581acc1e2dfa6165c45ceaaa5a6c2a", "score": "0.559545", "text": "def mark_as_installed(target_version):", "title": "" }, { "docid": "78b0118e5250cacb678a395203b93abc", "score": "0.55903596", "text": "def _system_version(self):\n pass", "title": "" }, { "docid": "5308a939d9122fb5e96378bc55ecba10", "score": "0.5583285", "text": "def version(self):", "title": "" }, { "docid": "5308a939d9122fb5e96378bc55ecba10", "score": "0.5583285", "text": "def version(self):", "title": "" }, { "docid": "5308a939d9122fb5e96378bc55ecba10", "score": "0.5583285", "text": "def version(self):", "title": "" }, { "docid": "5308a939d9122fb5e96378bc55ecba10", "score": "0.5583285", "text": "def version(self):", "title": "" }, { "docid": "64ad440e01e687e24a06b82643cf3414", "score": "0.55697703", "text": "def is_installed(target_version):", "title": "" }, { "docid": "55924a026ead92a09a22357fc1adc349", "score": "0.5555216", "text": "def need_upgrade(mp):\n return (not mp.manifest.version ==\n mp.app_datamodel_version)", "title": "" }, { "docid": "0606299912b9d568a223c84e15ab32f5", "score": "0.5538605", "text": "def test_version_from_versioner(self):\n bundle = self.mkbundle('in', output='out-%(version)s')\n self.env.manifest.version = None\n self.env.versions.version = 'versions'\n assert bundle.get_version() == 'versions'\n assert bundle.resolve_output() == self.path('out-versions')", "title": "" }, { "docid": "9e4ecd54b878dd9c348e2a16feef37d4", "score": "0.5532818", "text": "def get_android_manifest(zip_file):\n try:\n file_info = zip_file.getinfo(ANDROID_MANIFEST)\n if file_info.file_size > 0:\n return ANDROID_MANIFEST\n except KeyError:\n return None", "title": "" }, { "docid": "bd03fa7faa14fc7930e4cc58e0c0473a", "score": "0.55006516", "text": "def test_version_copies(assets: VersionedAssets) -> None:\n # First asset will load highest available version of the requirement, which\n # conflicts with the second requested version. The same asset can't be requested\n # twice\n with pytest.raises(\n ValueError,\n match='jquery.js==.*? does not match already requested asset jquery.js==.*?',\n ):\n assets.require('jquery.form.js', 'jquery.js==1.7.1')", "title": "" }, { "docid": "a714cd99e8ad538f095439e28c945062", "score": "0.5497998", "text": "def update_extension_status_and_manifest_fields(sender, instance, **kw):\n instance.extension.update_status_according_to_versions()\n if instance.status == STATUS_PUBLIC:\n instance.extension.update_manifest_fields_from_latest_public_version()", "title": "" }, { "docid": "c2dab9068456658981a8f493c7765e25", "score": "0.5487715", "text": "def verify_manifest(self, sdk_dir):\n metafile = os.path.join(sdk_dir, 'meta', 'manifest.json')\n fileset = set()\n fileset.add(os.path.relpath(metafile, sdk_dir))\n with open(metafile, 'r') as input:\n metadata = json.load(input)\n for atom in metadata['parts']:\n fileset.add(atom['meta'])\n with open(os.path.join(sdk_dir, atom['meta']), 'r') as input:\n atom_meta = json.load(input)\n fileset.update(self.get_atom_files(atom_meta))\n self.assertTrue(len(fileset) != 0)\n # walk the sdk_dir matching the files in the set.\n for dir_name, _, file_list in os.walk(sdk_dir):\n for f in file_list:\n found_file = os.path.relpath(os.path.join(dir_name, f), sdk_dir)\n self.assertIn(found_file, fileset)\n fileset.remove(found_file)\n self.assertTrue(\n len(fileset) == 0, \"Files missing from manifest: %s\" % str(fileset))", "title": "" }, { "docid": "67d2f1b59da81ca409c1cc0d8955b31f", "score": "0.5478511", "text": "def version(self):\n pass", "title": "" }, { "docid": "ffa749e7963dc0aa7f14c23b03166233", "score": "0.5478322", "text": "def checkversion(self, version: str) -> bool:\n return version == self.__data[\"version\"]", "title": "" }, { "docid": "b14aefb0623482e964eadfd717815267", "score": "0.5471525", "text": "def test_unaffiliated_manifest(app1: Flask) -> None:\n manifest = WebpackManifest()\n with app1.app_context():\n assert len(manifest) == 0\n assert not list(iter(manifest))\n assert manifest('random') == 'data:,'\n assert manifest('random', 'default-value') == 'default-value'\n assert manifest.get('random') is None\n assert manifest.get('random', 'default-value') == 'default-value'\n with pytest.raises(KeyError):\n manifest['random']", "title": "" }, { "docid": "d4df0ebae7fda1e54b5d02af6adf1ee3", "score": "0.5463567", "text": "def write_manifest(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"write_manifest\")", "title": "" }, { "docid": "2eca9c99d6aea0e74add21345041c478", "score": "0.54577625", "text": "def main():\n compare_versions(Pathcom())", "title": "" }, { "docid": "1c8f25eb0fa08c94b4c7cb7b0e0526ba", "score": "0.5446894", "text": "def update(self):\n manifest = os.path.join(self.root, \".manifest\", \"manifest.yaml\")\n if os.path.isfile(manifest):\n with open(manifest, \"w\") as f:\n f.write(yaml.dump(self.data))\n else:\n raise FileNotFoundError", "title": "" }, { "docid": "facb5b7a80a39b5f63f1e9eee3a040a8", "score": "0.5439921", "text": "def INIT_VERSION(self):\n pass", "title": "" }, { "docid": "8152f3fd29dc6c3686a8600f1515adb1", "score": "0.5439856", "text": "def get_manifest() -> Tuple[Manifest, str]:\n logger.debug(\"Looking for manifest\")\n manifest_path = find_manifest()\n dfetch.manifest.validate.validate(manifest_path)\n\n logger.debug(f\"Using manifest {manifest_path}\")\n return (\n dfetch.manifest.manifest.Manifest.from_file(manifest_path),\n manifest_path,\n )", "title": "" }, { "docid": "a905a24f304b7b4f781845adf19669b9", "score": "0.5433459", "text": "def perform_valid_manifest_post(context, manifest, url):\n filename = \"data/{manifest}\".format(manifest=manifest)\n files = {'manifest[]': open(filename, 'rb')}\n endpoint = \"{coreapi_url}{url}\".format(coreapi_url=context.coreapi_url, url=url)\n response = requests.post(endpoint, files=files)\n response.raise_for_status()\n context.response = response.json()\n print(response.json())", "title": "" }, { "docid": "7a404d5df09aedb15d5cd2ed78ff40e3", "score": "0.54299986", "text": "def undelete(self):\n if not self.deleted:\n return False\n data = Extension.extract_manifest_fields(self.manifest, ('version',))\n self.update(deleted=False, **data)\n return True", "title": "" }, { "docid": "79e9117bb05856ba41663d742bea1e16", "score": "0.5414457", "text": "def getVersion(version):", "title": "" }, { "docid": "2884723c6897c11f0bfc0acde6f942fd", "score": "0.5413026", "text": "def clib_manifest(self, package):\n return self.clib_get(package).manifest", "title": "" }, { "docid": "3a9f855b4bbd41e824a1338ce98880cc", "score": "0.5405294", "text": "def test_mark_manifest_complete_no_manifest(self):\n provider = self.ocp_provider\n initial_update_time = provider.data_updated_timestamp\n mark_manifest_complete(\n self.schema, provider.type, manifest_list=None, provider_uuid=str(provider.uuid), tracing_id=1\n )\n\n provider = Provider.objects.filter(uuid=self.ocp_provider.uuid).first()\n self.assertGreater(provider.data_updated_timestamp, initial_update_time)", "title": "" }, { "docid": "86dbafd08f62a929c2699e89e13dd2be", "score": "0.5399738", "text": "def manifest():\n manifest_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'manifest.yaml')\n with open(manifest_path, 'r') as f:\n manifest_dict = yaml.load(f)\n return jsonify(**manifest_dict)", "title": "" }, { "docid": "5afb3cddb46a3a0c2574a5ff934656e2", "score": "0.5385435", "text": "def readAndroidMetadata(self):\n manifestFile = open(os.path.join(self.tempdir, 'AndroidManifest.xml'), 'r')\n manifest = parseXml(manifestFile)\n manifestFile.close()\n\n root = manifest.documentElement\n self.version = root.attributes['android:versionName'].value\n while self.version.count('.') < 2:\n self.version += '.0'\n self.version = '%s.%s' % (self.version, self.buildNum)\n\n usesSdk = manifest.getElementsByTagName('uses-sdk')[0]\n self.minSdkVersion = usesSdk.attributes['android:minSdkVersion'].value\n self.basename = os.path.basename(self.config.repository)", "title": "" }, { "docid": "4aed2b66e2581f98d2424ef63b347d0f", "score": "0.5376662", "text": "def update(self):\n ##self.assertAccessable()\n # Should this download the new meta information?? NO\n self.install()", "title": "" }, { "docid": "7b3273fd83cf4aabde0c24c7221b20ab", "score": "0.53670627", "text": "def test_get_version(self, _):\n self.helper._installed = True\n self.assertEqual(StrictVersion(\"1.3.2\"), self.helper.version)", "title": "" }, { "docid": "71726c5b8aae1a5491a1050cd7a5b3d8", "score": "0.5338423", "text": "def is_firmware_bundled(self):\n if self.is_bundle_cache is None:\n with open(self.firmware, \"rb\") as fh:\n signature = fh.read(16).lower()\n\n if b\"firmware\" in signature:\n self.is_bundle_cache = False\n elif b\"combined_content\" in signature:\n self.is_bundle_cache = True\n else:\n self.module.fail_json(msg=\"Firmware file is invalid. File [%s]. Array [%s]\" % (self.firmware, self.ssid))\n\n return self.is_bundle_cache", "title": "" }, { "docid": "8c25f11c842b14fa7cc65641b9e116d6", "score": "0.5334587", "text": "def _get_manifest(project_path):\n\ttry:\n\t\treturn json.load(\n\t\t\topen(_make_project_path(MANIFEST_FILE_NAME, project_path)))\n\texcept IOError:\n\t\traise IOError(\"no manifest file at {0}\".format(project_path))", "title": "" }, { "docid": "33522d55c9456de7e67d562ea7770559", "score": "0.5333383", "text": "def validate_upload_manifest():\n content_type = flask.request.headers.get(\"Content-Type\", \"\").lower()\n if content_type == \"application/json\":\n manifest_doc = parse.parse_request_json()\n else:\n manifest_doc = parse.parse_request_yaml()\n errors = manifest.validate_upload_manifest(manifest_doc)\n if errors:\n return flask.jsonify({\"valid\": False, \"errors\": errors})\n else:\n return flask.jsonify({\"valid\": True})", "title": "" }, { "docid": "ab7445397ae9cf6a327151e34e3bfaaa", "score": "0.53258187", "text": "def _FindUpstreamManifest(self, path):\n if (os.path.isdir(path) and\n os.listdir(path).count(android_manifest.AndroidManifest.FILENAME)):\n return android_manifest.AndroidManifest(app_path=path)\n dirpath = os.path.dirname(path)\n if self._IsPathInBuildTree(path):\n return self._FindUpstreamManifest(dirpath)\n logger.Log('AndroidManifest.xml not found')\n return None", "title": "" }, { "docid": "aad972e26ed4cc3393b1e4f40d33797f", "score": "0.53247875", "text": "def checkVersions():\n return os.environ.get(\"RAVEN_IGNORE_VERSIONS\", \"0\") != \"1\"", "title": "" }, { "docid": "78e4106b0970c8f81030272255d69509", "score": "0.53107125", "text": "def test_version_from_attr(self):\n bundle = self.mkbundle('in', output='out-%(version)s')\n self.env.manifest.version = 'manifest'\n self.env.versions.version = 'versions'\n bundle.version = 'attr'\n assert bundle.get_version() == 'attr'\n assert bundle.resolve_output() == self.path('out-attr')", "title": "" }, { "docid": "b515ce477bcdd8439ec749e573565b91", "score": "0.5306811", "text": "def load_manifest(self, results_filename, batch):\n pass", "title": "" }, { "docid": "c81bcaabb42e7254c5ec75b9dd06d52b", "score": "0.53068024", "text": "def _deploy_manifest(bucket_name, manifest_path):\n if sys.version_info.major == 3:\n manifest_suffix = '.3'\n else:\n manifest_suffix = ''\n\n common.execute('gsutil cp %s '\n 'gs://%s/clusterfuzz-source.manifest%s' %\n (manifest_path, bucket_name, manifest_suffix))", "title": "" }, { "docid": "d057cb83af31af908f9f0da2e2e44f95", "score": "0.5304693", "text": "def GetNextManifest(self):\n assert self.manifest_manager, \\\n 'Must run GetStageManager before checkout out build.'\n\n build_id = self._run.attrs.metadata.GetDict().get('build_id')\n\n to_return = self.manifest_manager.GetNextBuildSpec(build_id=build_id)\n previous_version = self.manifest_manager.GetLatestPassingSpec()\n target_version = self.manifest_manager.current_version\n\n # Print the Blamelist here.\n url_prefix = 'https://crosland.corp.google.com/log/'\n url = url_prefix + '%s..%s' % (previous_version, target_version)\n logging.PrintBuildbotLink('Blamelist', url)\n # The testManifestVersionedSyncOnePartBranch interacts badly with this\n # function. It doesn't fully initialize self.manifest_manager which\n # causes target_version to be None. Since there isn't a clean fix in\n # either direction, just throw this through str(). In the normal case,\n # it's already a string anyways.\n logging.PrintBuildbotStepText(str(target_version))\n\n return to_return", "title": "" }, { "docid": "35322580d628408193bb60a031f69e74", "score": "0.5302242", "text": "def _get_package_version(self):\n pkg_path = os.path.join(self.work_path, 'src')\n\n sys.path.insert(0, pkg_path)\n try:\n import sentry\n except Exception:\n version = None\n build = None\n else:\n log.info(\"pulled version information from 'sentry' module\".format(\n sentry.__file__))\n version = sentry.__version__\n build = sentry.__build__\n finally:\n sys.path.pop(0)\n\n if not (version and build):\n try:\n with open(self.work_path, 'sentry-package.json') as fp:\n data = json.loads(fp.read())\n except Exception:\n pass\n else:\n log.info(\"pulled version information from 'sentry-package.json'\")\n version, build = data['version'], data['build']\n\n return {\n 'version': version,\n 'build': build,\n }", "title": "" }, { "docid": "6e5279b94ebbded97aa5ef84eb8097c5", "score": "0.5299107", "text": "def check_latest_version(package):\n return Downloader().download_package(package).version()", "title": "" }, { "docid": "45bf74991c82782bd7e1bc229ac578ba", "score": "0.52976894", "text": "def _read_application_version(self):\n addr_major = self._app_start_addr + len(self._signature) + \\\n self._PRODUCT_TYPE_SIZE + 1\n addr_minor = self._app_start_addr + len(self._signature) + \\\n self._PRODUCT_TYPE_SIZE\n return FirmwareVersion(major=self._data[addr_major],\n minor=self._data[addr_minor],\n debug=False)", "title": "" }, { "docid": "672d3d038485b7c3cce9cfd588b41b10", "score": "0.5292948", "text": "def test_asset_versioned(assets: VersionedAssets) -> None:\n bundle = assets.require('jquery.js==1.7.1')\n assert bundle.contents == ('jquery-1.7.1.js',)\n bundle = assets.require('jquery.js<1.8.0')\n assert bundle.contents == ('jquery-1.7.1.js',)\n bundle = assets.require('jquery.js>=1.8.0')\n assert bundle.contents == ('jquery-1.8.3.js',)", "title": "" }, { "docid": "f5e4d4896fa48cd9e0e6dff237ade73c", "score": "0.52916837", "text": "def manifest_version_policy(self, manifest_version_policy):\n\n self._manifest_version_policy = manifest_version_policy", "title": "" }, { "docid": "19cd5e4a3b74672367d7b6aaaa20176f", "score": "0.52796966", "text": "def VerifyPackage(self, entry, _):\n if not entry.get('version'):\n self.logger.info(\"Insufficient information of Package %s; \"\n \"cannot Verify\" % entry.get('name'))\n return False\n success = self.cmd.run(\"/usr/local/bin/epkg -q -S -k %s-%s\" %\n (entry.get('name'),\n entry.get('version'))).success\n if not success:\n self.logger.debug(\"Package %s version incorrect\" %\n entry.get('name'))\n return success", "title": "" }, { "docid": "ce02002d1be89c0316cf2110bbd418a6", "score": "0.52618486", "text": "def record_manifest(self):\n manifest = super(PIPSatchel, self).record_manifest()\n manifest['all-requirements'] = self.get_combined_requirements()\n if self.verbose:\n pprint(manifest, indent=4)\n return manifest", "title": "" }, { "docid": "bb9a925353234424cb83f3be0408b5be", "score": "0.5259325", "text": "def version():\n\n app_dir = os.path.dirname((os.path.realpath(__file__)))\n version_filename = os.path.join(app_dir, 'version')\n with open(version_filename) as version_file:\n on_file_version = version_file.readline().strip('\\r\\n')\n\n return on_file_version", "title": "" }, { "docid": "0498227e763e5cc10a49b2a0000d6147", "score": "0.5257111", "text": "def test_retrieve_versions(self):\n app_versions = self.project._client.app_versions\n self.assertTrue(isinstance(app_versions, list))\n self.assertTrue(isinstance(app_versions[0], dict))\n self.assertTrue(\n set(app_versions[0].keys()),\n {\"app\", \"label\", \"version\", \"major\", \"minor\", \"patch\", \"prerelease\"},\n )", "title": "" }, { "docid": "1e24b9b23fa60afdd9d2b2265002ff9b", "score": "0.52564234", "text": "def checkmagic(self):\n self.lib.seek(self.start) #default - magic is at start of file\n if self.lib.read(len(self.MAGIC)) != self.MAGIC:\n raise RuntimeError, \"%s is not a valid %s archive file\" \\\n % (self.path, self.__class__.__name__)\n if self.lib.read(len(self.pymagic)) != self.pymagic:\n print \"Warning: pyz is from a different Python version\"\n self.lib.read(4)", "title": "" }, { "docid": "c7597a16373df6d6bccedca921733855", "score": "0.5255497", "text": "def version():\n banner()\n exit(0)", "title": "" }, { "docid": "003866e61bdeb2619d77bb37afc0b596", "score": "0.5251102", "text": "def test_application_version(app):\n assert app.name == \"flask_tutorial\"\n assert len(app_version(app.name)) > 0", "title": "" }, { "docid": "36d278382ab6afaf04020d2ffe4a7f38", "score": "0.5250871", "text": "def check_client_version(self):\n return self.m.swarming_client.ensure_script_version(\n 'swarming.py', MINIMAL_SWARMING_VERSION)", "title": "" }, { "docid": "97a21e8240407d40c816c78417e9757a", "score": "0.52503395", "text": "def test_manifest_create(self):\n\n manifest = Manifest('tests/fixtures/test-data', '.test_manifest.yaml')\n manifest.create()\n\n self.assertEqual(manifest['documents/Important Document 1.odt'], 'd460a36805fb460c038d96723f206b20')\n self.assertEqual(manifest['documents/Important Presentation.odp'], '1911ec839cedcbf00739a7d3447ec3a3')\n self.assertEqual(manifest['pictures/Picture #1.jpg'], '6eec850e32622c0e33bdae08ced29e24')\n self.assertEqual(manifest['documents/exclude_me.txt'], '2e7d8cb32bb82e838506aff5600182d1')\n self.assertEqual(len(manifest.manifest), 4)", "title": "" }, { "docid": "009bbfdf4f701e2f53900d5a80e6a439", "score": "0.52475905", "text": "def combine_manifests(new_manifest, previous_manifest):\n if not previous_manifest:\n return new_manifest\n\n for _, entry in new_manifest['files'].items():\n # hash equal, use previous manifest entry's version\n if not previous_manifest['files'].get(entry[3]):\n continue\n if entry[1] == previous_manifest['files'][entry[3]][1]:\n entry[2] = previous_manifest['files'][entry[3]][2]\n\n return new_manifest", "title": "" }, { "docid": "3066a9fdae530d8984101380051c1b51", "score": "0.52442575", "text": "def get_manifest_path():\n manifest_path = os.path.join(get_cache_dir(), \"manifest.json\")\n return manifest_path", "title": "" }, { "docid": "967e6dd8c4edb66bed88b931b6a2d698", "score": "0.524398", "text": "def run(self):\n try:\n format = \"%s-[0-9]*\" % self.distribution.metadata.get_name()\n dependency_versions = {}\n \n meta_version = compute_meta_version(\n self.distribution.dependencies_git_repositories,\n accepted_tag_pattern = format,\n cachedir = self.cache_directory,\n dependency_versions = dependency_versions\n )\n\n branch_suffix = get_branch_suffix(self.distribution.metadata, retrieve_current_branch())\n\n\n version = meta_version\n version_str = '.'.join(map(str, version))\n\n replace_inits(version, self.distribution.packages)\n replace_scripts(version, self.distribution.py_modules)\n\n replace_version_in_file(version, 'setup.py')\n\n self.distribution.metadata.version = version_str\n self.distribution.metadata.dependency_versions = dict([(k,'.'.join(map(str, v))) for k,v in dependency_versions.items()])\n self.distribution.metadata.branch_suffix = branch_suffix\n \n print \"Current version is %s\" % version_str\n print \"Current branch suffix is %s\" % branch_suffix\n\n except Exception:\n import traceback\n traceback.print_exc()\n raise", "title": "" }, { "docid": "606b1b1a444784b8564a6d209a07a4df", "score": "0.52432686", "text": "def version():\n print blue(\"Wordpress Workflow version 0.4.1\")", "title": "" }, { "docid": "ae51ab6cfbd4502e0728e5716b90d4eb", "score": "0.5242769", "text": "def test_version(self):\n pass", "title": "" }, { "docid": "fab0e1dd00c2de77e025d7b75e693bfa", "score": "0.52371764", "text": "def manifest_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"manifest_name\")", "title": "" }, { "docid": "587b5abe1dd9d2c4b666664a344f7e80", "score": "0.5233585", "text": "def check_package_version():\n packages = dict(pysam='0.9.1', tqdm='4.31.1', numpy='1.16.0')\n all_packages = True\n for package, required_version in packages.items():\n installed_version = import_package_check(package)\n if LooseVersion(installed_version) < LooseVersion(required_version):\n print(f'{package} {installed_version} < {package} {required_version}, please update {package}')\n print(f'pip3 install {package} --upgrade')\n all_packages = False\n return all_packages", "title": "" }, { "docid": "ec09115770ada5571e03c30e85c2616a", "score": "0.5229099", "text": "def test_load_manifest_from_file(app1: Flask) -> None:\n manifest = WebpackManifest(app1, filepath='test-manifest.json')\n assert len(manifest) == 0\n with app1.app_context():\n assert len(manifest) == 6\n assert 'test.css' in manifest\n assert 'other.css' not in manifest\n assert 'index.scss' in manifest\n assert 'index.css' in manifest\n assert (\n manifest['test.css']\n == manifest('test.css')\n == manifest.get('test.css')\n == 'test-asset.css'\n )\n assert (\n manifest['index.scss']\n == manifest('index.scss')\n == manifest.get('index.scss')\n == manifest['index.css']\n == manifest('index.css')\n == manifest.get('index.css')\n == 'test-index.css'\n )\n # Call iter(manifest) to confirm it works, then recast as set for comparison\n assert set(iter(manifest)) == {\n 'index.css', # index.css was created as a substitute name for index.scss\n 'index.scss',\n 'test.css',\n 'test.jpg',\n 'test.js',\n 'test.png',\n }\n # Since other.css is not present, a default value is returned\n assert manifest('other.css') == 'data:,'\n assert manifest('other.css', 'default-value') == 'default-value'\n assert manifest.get('other.css') is None\n assert manifest.get('other.css', 'default-value') == 'default-value'\n with pytest.raises(KeyError):\n manifest['other.css']", "title": "" }, { "docid": "8a7db2e13bd940df971cdd73cb107078", "score": "0.5222521", "text": "def reload_manifest():\n\n manage_webpack.reload_manifest()\n return redirect(url_for('main.main'))", "title": "" }, { "docid": "f780da20ae8ee80d1520ad45d4a95b71", "score": "0.5215285", "text": "def test_versions_are_in_sync():\n\n path = Path(__file__).resolve().parents[2] / \"pyproject.toml\"\n pyproject = toml.loads(open(str(path)).read())\n pyproject_version = pyproject[\"tool\"][\"poetry\"][\"version\"]\n\n package_init_version = xnemogcm.__version__\n\n assert package_init_version == pyproject_version", "title": "" }, { "docid": "0ddd6587f724371ae387738444c29138", "score": "0.521278", "text": "def GetVersion(self):\n self.cd.Get(self.version)\n version = self.cd.FindID('version')\n os = self.cd.FindID('os_type')\n if version:\n self.chrome_version = version.text\n else:\n return False\n if os:\n self.platform = os.text\n else:\n return False\n return True", "title": "" } ]
6c531d94ee0e432b369d07ab4a9f34f9
From the given sentence object each token is transformed to string and appended to the list of tokens
[ { "docid": "2143e175fb070bba04a2974497a4d6a4", "score": "0.0", "text": "def list_of_tokens(doc: object) -> list:\n token_list = [str(each_token.text) for each_token in doc]\n\n return token_list", "title": "" } ]
[ { "docid": "ffac20314eb07ec3f9f0ee6e88325681", "score": "0.70261323", "text": "def sentences_from_tokens(self, tokens):\n ...", "title": "" }, { "docid": "69db74418c3f4c52186cec53ef074834", "score": "0.66899365", "text": "def _bert_tokenize_sentence(self, tokenizer, sentence):\n ### Output\n bert_tokens = []\n\n # Token map will be an int -> int mapping between the `orig_tokens` index and\n # the `bert_tokens` index.\n index_map = []\n bert_tokens.append(\"[CLS]\")\n for word in sentence:\n index_map.append(len(bert_tokens))\n bert_tokens.extend(tokenizer.tokenize(word))\n bert_tokens.append(\"[SEP]\")\n return bert_tokens, index_map", "title": "" }, { "docid": "8ce92eaee3f174d4ffaaab9ecb172fa2", "score": "0.65306973", "text": "def process_tokens(tokens):\n terms = []\n for sentence in tokens:\n for token in sentence:\n terms.append(process_word(token))\n\n return list(terms)", "title": "" }, { "docid": "56b1e3ace1a38063f1fdc622aa2de715", "score": "0.64017737", "text": "def tokenize(self, sentence, *args, **kwargs):\n tokens = []\n if self.options.get('remove_tatweel', True):\n sentence = self.__remove_tatweel(sentence)\n if self.options.get('remove_diacritics', False):\n sentence = self.__remove_diacrtics(sentence)\n\n for t in super().tokenize(sentence, *args, **kwargs):\n temp_tokens = []\n # # لل\n if t.startswith('\\u0644\\u0644'):\n temp_tokens.append(t[0:1])\n temp_tokens.append(t[1:])\n t = ''\n else:\n tokens += self.__name_follows(t)\n tokens += temp_tokens\n return tokens", "title": "" }, { "docid": "79912984390e2b368d5378aa271c6d13", "score": "0.6290606", "text": "def sentencify(tokens):\r\n sentence = []\r\n for token in tokens:\r\n sentence.append(token)\r\n if token in PUNCTUATION:\r\n yield sentence\r\n sentence = []\r\n if sentence:\r\n yield sentence", "title": "" }, { "docid": "350420762ab3605a7dc1ceb590eaa424", "score": "0.6282245", "text": "def tokenization(sentence, wordlist):\n sentence = sentence.split()\n mwe_tokenizer = nltk.tokenize.MWETokenizer(wordlist)\n tokenized = mwe_tokenizer.tokenize(sentence)\n\n return tokenized", "title": "" }, { "docid": "611cad42cbf913403017d4ffce5514ad", "score": "0.6261593", "text": "def translate_to_tokens(text):\n \n doc = nlp(text)\n \n ISLTknOP = []\n \n for sent in doc.sents:\n ISLSent = eng_isl_translate(sent.as_doc())\n ISLTknOP.extend(ISLSent)\n \n return ISLTknOP", "title": "" }, { "docid": "0706db1af48822b4c0e07aace36f3a2d", "score": "0.6252947", "text": "def transform_sentence(self, sentence):\n vector = [self.word2idx.get(word, 3) for word in sentence]\n if self.start_end_tokens:\n vector = self.add_start_end(vector)\n return vector", "title": "" }, { "docid": "1ae19e22e9deb6dfe3b2c587f522eaf4", "score": "0.62213516", "text": "def tokenize(sentences, token='word'):\n if token == 'word':\n return [sentence.split(' ') for sentence in sentences]\n elif token == 'char':\n return [list(sentence) for sentence in sentences]\n else:\n print('ERROR: unkown token type '+token)", "title": "" }, { "docid": "00c87fa326b25223192404d12a874c1e", "score": "0.61795974", "text": "def tokenize(self, document):\n\n for pattern in SentenceTokenizer.PATTERNS:\n pattern = re.compile(pattern) # type: ignore\n document = re.sub(pattern, self.conv_period, document)\n\n result = []\n for line in document.split(\"\\n\"):\n line = line.rstrip()\n line = line.replace(\"\\n\", \"\")\n line = line.replace(\"\\r\", \"\")\n line = line.replace(\"。\", \"。\\n\")\n sentences = line.split(\"\\n\")\n\n for sentence in sentences:\n if not sentence:\n continue\n\n period_special = SentenceTokenizer.PERIOD_SPECIAL\n period = SentenceTokenizer.PERIOD\n sentence = sentence.replace(period_special, period)\n result.append(sentence)\n\n return result", "title": "" }, { "docid": "2c2b798abb56ee32fcba0a9c2999a330", "score": "0.61755633", "text": "def getSentenceForm(self, transformed):", "title": "" }, { "docid": "fa3f77cb1dba5dfc7820b3af07f0b387", "score": "0.6156034", "text": "def transform_sentence(sequence, vocab_processor):\r\n return next(vocab_processor.transform([sequence])).tolist()", "title": "" }, { "docid": "4475721f983024a052f176b5280fcc2f", "score": "0.6155823", "text": "def join_from_tokens(self):\n self.text = self.tokens.map(lambda x : \" \".join(x))", "title": "" }, { "docid": "f2634e46f4423e229e57b2522ec95a58", "score": "0.61476606", "text": "def get_tokenized(self, text):\n pass", "title": "" }, { "docid": "b5137eb9a91a13d08243ecd2c389f0ba", "score": "0.61459446", "text": "def tokenise(self, sentence):\n\t\tsentence = sentence.lower()\n\t\ttokens = []\n\t\tfor word in sentence.split():\n\t\t\ttry:\n\t\t\t\tword_type = WORD_TAGS[VOCABULARY[word]]\n\t\t\texcept KeyError:\n\t\t\t\ttry:\n\t\t\t\t\tvalue = int(word)\n\t\t\t\texcept ValueError:\n\t\t\t\t\ttokens.append( ('error', word))\n\t\t\t\telse:\n\t\t\t\t\ttokens.append( ('int', value))\n\t\t\telse:\n\t\t\t\ttokens.append( (word_type, word))\n\t\treturn tokens", "title": "" }, { "docid": "0cefac403dfbe1c50b93362ee09a9a8d", "score": "0.6137316", "text": "def add_sentence(self, sentence):\n for word in sentence.split(' '):\n self.add_word(word)", "title": "" }, { "docid": "5f0c2e47009f07688ed18bb242923bd5", "score": "0.61357635", "text": "def convert_sentence_to_token(sentence, seq_length, tokenizer):\n\n tokenized_text = tokenizer.tokenize(sentence)\n\n if len(tokenized_text) > seq_length - 2:\n tokenized_text = tokenized_text[0:(seq_length - 2)]\n\n special = []\n isSpecial = False\n whole_word = ''\n words = []\n position = []\n\n # Start position of S' sentence is moved 2 indexes due to [CLS] and [SEP]\n start_pos = len(tokenized_text) + 2\n\n for index in range(len(tokenized_text) - 1):\n\n # Dealing with words with a dash that are splitted. For example: \"co\", \"-\", \"developer\"\n if (tokenized_text[index + 1] == \"-\" and tokenized_text[index + 2] != \"-\") or \\\n (tokenized_text[index + 1] == \"–\" and tokenized_text[index + 2] != \"–\") or \\\n (tokenized_text[index + 1] == \"'\" and tokenized_text[index + 2] != \"'\"):\n special.append(start_pos + index)\n whole_word += tokenized_text[index] # \"co\"\n continue\n\n if tokenized_text[index] == \"-\" or tokenized_text[index] == \"–\" or tokenized_text[index] == \"'\":\n special.append(start_pos + index)\n whole_word += tokenized_text[index] # \"co\" + \"-\"\n if tokenized_text[index - 1] == \"-\" or tokenized_text[index - 1] == \"–\":\n words.append(whole_word)\n position.append(start_pos + index)\n special = []\n whole_word = ''\n continue\n\n if (tokenized_text[index] != \"-\" and tokenized_text[index - 1] == \"-\" and not tokenized_text[index - 2] == \"-\") or \\\n (tokenized_text[index] != \"–\" and tokenized_text[index - 1] == \"–\" and not tokenized_text[index - 2] == \"–\") or \\\n (tokenized_text[index] != \"'\" and tokenized_text[index - 1] == \"'\" and not tokenized_text[index - 2] == \"'\"):\n special.append(start_pos + index)\n whole_word += tokenized_text[index] # \"co\" + \"-\" + \"developer\"\n whole_word = whole_word.replace('##', '')\n if (tokenized_text[index + 1][0:2] != \"##\"):\n words.append(whole_word)\n position.append(special)\n special = []\n whole_word = ''\n isSpecial = False\n continue\n else:\n isSpecial = True\n continue\n\n # Dealing with subword tokens. For example: 'per', '##tus', '##sis'\n if (tokenized_text[index + 1][0:2] == \"##\"):\n special.append(start_pos + index)\n whole_word += tokenized_text[index] # 'per'\n isSpecial = True\n continue\n else:\n if isSpecial:\n isSpecial = False\n special.append(start_pos + index)\n whole_word += tokenized_text[index] # 'per' + '##tus'\n whole_word = whole_word.replace('##', '')\n words.append(whole_word)\n position.append(special)\n special = []\n whole_word = ''\n else:\n position.append(start_pos + index)\n words.append(tokenized_text[index])\n\n # Dealing with the last token\n if isSpecial:\n isSpecial = False\n special.append(start_pos + index + 1)\n position.append(special)\n whole_word += tokenized_text[index + 1] # 'per' + '##tus' + '##sis'\n whole_word = whole_word.replace('##', '')\n words.append(whole_word)\n else:\n position.append(start_pos + index + 1)\n words.append(tokenized_text[index + 1])\n\n return tokenized_text, words, position", "title": "" }, { "docid": "16e119b77b8bf2739e3659d73dfc11a0", "score": "0.6135341", "text": "def transform(self, input: str) -> list:\n return nltk.word_tokenize(input)", "title": "" }, { "docid": "d9e1bed8ad08d77655c75cd96621473d", "score": "0.6130125", "text": "def tokenize_sentence(self, sentence, pos_tagging=True):\n token_texts = sentence.split(' ')\n tokens = []\n offset = 0\n for token_text in token_texts:\n token = {'text': token_text, 'start': offset, 'end': offset + len(token_text)}\n if pos_tagging:\n token['pos_tag'] = 'x'\n tokens.append(token)\n offset += len(token_text)\n return tokens", "title": "" }, { "docid": "fcf4a4b0864799725e50f8f24896e83a", "score": "0.6121324", "text": "def infer(self, sentence: str, *args, **kwargs) -> list:\n return [self.tok2emb[t] for t in sentence.split()]", "title": "" }, { "docid": "0e65d585cda43f395fffb6986881bf01", "score": "0.6097358", "text": "def word_tokenize(self, s):\n ...", "title": "" }, { "docid": "61f7f1b70cbf501ea4eec57ce1312b03", "score": "0.60836226", "text": "def sentence_tokenizer_and_parser(self, article_list):\n sentences = []\n for article in article_list:\n article_text = article['text']\n parsed_article = self.sp_core_nlp(article_text)\n sentences.extend(parsed_article.sents)\n\n # TODO Clean the sentences and create a new list of parsed sentences\n print('Tokenized {} sentences'.format(len(sentences)))\n return sentences", "title": "" }, { "docid": "95d9a1137fc9ed8cb0d8d29365f67b01", "score": "0.6080735", "text": "def tokenize_sent(document):\n return sent_tokenize(document)", "title": "" }, { "docid": "e5e918b3ecd4c98c6ba5d21ee9fae402", "score": "0.60537297", "text": "def tokenize(text):", "title": "" }, { "docid": "1661e72ef10aca48ebf231eb87492946", "score": "0.60369515", "text": "def _process_parser(self, sentences, input_pack: DataPack):\n for sentence in sentences:\n Sentence(input_pack, sentence.start_char, sentence.end_char)\n\n if \"tokenize\" in self.processors:\n # Iterating through spaCy token objects\n for word in sentence:\n begin_pos_word = word.idx\n end_pos_word = begin_pos_word + len(word.text)\n token = Token(input_pack, begin_pos_word, end_pos_word)\n\n if \"pos\" in self.processors:\n token.pos = word.tag_\n\n if \"lemma\" in self.processors:\n token.lemma = word.lemma_", "title": "" }, { "docid": "777b31afb00b73cbaf63366c48b2ec10", "score": "0.6016639", "text": "def _encode_and_add_tokens(sentence: str, tokenizer: tfds.deprecated.text.SubwordTextEncoder) -> List[int]:\n start_token = tokenizer.vocab_size\n end_token = tokenizer.vocab_size + 1\n return [start_token] + tokenizer.encode(sentence) + [end_token]", "title": "" }, { "docid": "64f42b4090cebdd88f2e722896d0a772", "score": "0.6010215", "text": "def tokenize_sentences(sentences: list, tokenizer) -> list:\n sentences_tokenized = [tokenizer.encode(sentence, add_special_tokens=False) for sentence in sentences] # tokenize \n return sentences_tokenized", "title": "" }, { "docid": "a04b6beb69d01360ba1f5a514d232320", "score": "0.60016", "text": "def tokenize_sentence(contents_text):\n content_sentences = list(nlp(contents_text).sents)\n return content_sentences, contents_text", "title": "" }, { "docid": "371e66e10b083529b065cffee4ffcad9", "score": "0.5994442", "text": "def sentence_tokenize(text):\n sent_list = []\n for w in nltk.sent_tokenize(text):\n sent_list.append(w)\n return sent_list", "title": "" }, { "docid": "eb1cf3f12923d33fd9aed7ceb7ea7c5f", "score": "0.5991884", "text": "def processTokenisedForWordEmbed(tokenised_sentence):\n tokens_final = list(tokenised_sentence)\n for token in tokenised_sentence:\n blind_terms = ['non_target_substance','target_substance1','target_substance2']\n break_flag = 0\n \n # split terms that have a slash between them\n if re.match(r'\\w*/\\w*',token) != None:\n insertion_index = tokens_final.index(token) \n split_tokens = split_token(token,'/')\n tokens_final = insert_tokens(tokens_final, insertion_index, split_tokens)\n continue\n \n if re.match(r'\\w*~\\w*',token) != None:\n insertion_index = tokens_final.index(token) \n split_tokens = split_token(token,'~')\n tokens_final = insert_tokens(tokens_final, insertion_index, split_tokens)\n continue\n \n \n # split terms that have a full stop \n if re.match(r'\\w+\\.\\w+',token) != None:\n insertion_index = tokens_final.index(token) \n split_tokens = split_token(token,'.')\n tokens_final = insert_tokens(tokens_final, insertion_index, split_tokens)\n continue\n \n\n \n for term in blind_terms:\n if term + \".\" in token:\n insertion_index = tokens_final.index(token) \n split_tokens = split_token(token,'.')\n tokens_final = insert_tokens(tokens_final, insertion_index, split_tokens)\n break_flag = 1\n break\n \n if break_flag == 1: continue \n # get rid of plurals\n for term in blind_terms:\n if term + \"s\" in token:\n token_to_insert = []\n token_to_insert.append(term) \n insertion_index = tokens_final.index(token) \n tokens_final = insert_tokens(tokens_final, insertion_index, token_to_insert)\n break_flag = 1\n break\n \n if break_flag == 1: continue\n \n # split hyphenated terms like drug1-inducing\n for term in blind_terms:\n if term + '-' in token:\n insertion_index = tokens_final.index(token) \n split_tokens = split_token(token,'-')\n tokens_final = insert_tokens(tokens_final, insertion_index, split_tokens)\n break_flag = 1\n break\n \n if break_flag == 1: continue \n\n for term in blind_terms:\n if term + '*' in token:\n insertion_index = tokens_final.index(token) \n split_tokens = split_token(token,'*')\n tokens_final = insert_tokens(tokens_final, insertion_index, split_tokens)\n break_flag = 1\n break\n \n if break_flag == 1: continue \n \n # split hyphenated terms like pre-drug1\n for term in blind_terms: \n if '-' + term in token:\n insertion_index = tokens_final.index(token) \n split_tokens = split_token(token,'-')\n tokens_final = insert_tokens(tokens_final, insertion_index, split_tokens)\n break\n \n \n# =============================================================================\n# \n# TODO - psuedo\"targetsubstance\"\n# TODO - dihydroergotamine and ergotamine\n# \n# =============================================================================\n tokens_final_lowercase = []\n for token in tokens_final:\n token_lower = token.lower()\n tokens_final_lowercase.append(token_lower)\n return tokens_final_lowercase", "title": "" }, { "docid": "5e0af7ec7204fe8190423e28b2109814", "score": "0.59864825", "text": "def tag_nouns_and_verbs(sentence, tag_list=words):\n doc = en(sentence)\n tokens = []\n for token in doc:\n if (token.text in tag_list) & (doc[-3].text == 'to'):\n tokens.append(token.text.upper() + '>VERB')\n \n elif (token.text in tag_list) & (doc[-3].text == 'the'):\n tokens.append(token.text.upper() + '>NOUN')\n ##TODO## : if a word is in the tag list and is a noun or verb, add it as WORD>NOUN or WORD>VERB\n else:\n tokens.append(token.text)\n return ' '.join(tokens)", "title": "" }, { "docid": "7911effb538b012ca52b8784550c38b0", "score": "0.5984343", "text": "def word_tokenize(self, sentence):\r\n\t\treturn self.nlp.word_tokenize(sentence)", "title": "" }, { "docid": "4421bf44367f4e3523d84c5d92df9acf", "score": "0.5970521", "text": "def sentence(sentence):\n for word in sentence.split():\n yield word", "title": "" }, { "docid": "43baad5d7fb24302c56af9a02b9e9b2a", "score": "0.5969507", "text": "def sentence_tokenize(self,text):\n sent_list = []\n for w in nltk.sent_tokenize(text):\n sent_list.append(w)\n return sent_list", "title": "" }, { "docid": "f3273ee91209aee4166a27882aa0b5bc", "score": "0.5949155", "text": "def tokenize(text):\r\n return [tok.text for tok in spacy_en.tokenizer(text)]", "title": "" }, { "docid": "e0d3cde4de0f9590f46d4b265062767e", "score": "0.59477043", "text": "def transform(self, X, y=None):\n corpus = self._get_corpus()\n result = []\n for phrase in X:\n newphrase = []\n for word in phrase.split():\n newphrase.extend(corpus.get(word.lower(), []))\n result.append(\" \".join(newphrase))\n return result", "title": "" }, { "docid": "cb1494ebaa69c0a69b22ff29eede6b49", "score": "0.593477", "text": "def _sentence_tokenize(self):\n return self.docx.sentences", "title": "" }, { "docid": "a6d6c6d5742878e3bd21125880866c4c", "score": "0.59183604", "text": "def tokeniseForDistance(sentence):\n punc = list(string.punctuation)\n tokens = TreebankWordTokenizer().tokenize(sentence)\n #tokens = [token for token in tokens if token not in punc]\n \n return tokens", "title": "" }, { "docid": "7f6f995e87560bf9ce847bd1627748f5", "score": "0.5911474", "text": "def tokenize(sentence):\n \n return kWORDS.findall(sentence.lower())", "title": "" }, { "docid": "63eb633d10b6a4d7f149b0ed896c3dda", "score": "0.5910022", "text": "def pre_processing(sentence: str)->list:\n try:\n if not sentence:\n logging.error('Empty string detected!')\n return 402\n elif len(sentence) > MAXLENGTH:\n return 408\n doc = nlp(sentence)\n tokens = [token.text for token in doc] #Tokenise\n letter_case = [1 if token.istitle() else 0 for token in tokens]\n tokens = [token.lower() for token in tokens]\n pos_tags = [token.pos_ for token in doc] #Parts-of-speech tags\n token_tags = list(zip(tokens, pos_tags))\n return tokens, pos_tags, token_tags, letter_case\n except TypeError:\n logging.error('Invalid string detected!')\n return 405", "title": "" }, { "docid": "90782a2d4d9498cd0a0493ffbe54925d", "score": "0.58964837", "text": "def tokenize(self, sent):\n return self.tokenizer.tokenize(sent)", "title": "" }, { "docid": "dff46898c48db05508df0b4c237e78b8", "score": "0.5895517", "text": "def convertStringList(self, stringList):\n del self[:]\n for text in stringList:\n token = sentencetoken.Token(text)\n # print 'adding: ', token.text\n self.append(token)", "title": "" }, { "docid": "8d0e932f708a3985deabb0330b6b4bef", "score": "0.58949715", "text": "def get_tokens(self, sentence: str) -> List[str]:\n words = self.word_splitter.split_words(sentence)\n filtered_words = self.word_filter.filter_words(words)\n stemmed_words = [self.word_stemmer.stem_word(word) for word in filtered_words]\n return stemmed_words", "title": "" }, { "docid": "77bf7ad810e110c147970da468db530d", "score": "0.58866155", "text": "def getTokens(self, sentence):\r\n\t\ttok = {}\r\n\r\n\t\tfor s in self.annotate(sentence)['sentences']:\r\n\t\t\tfor d in s['tokens']:\r\n\t\t\t\ttok[d['index']] = {'word' : d['originalText'], 'lemma' : d['lemma'], 'pos' : d['pos'], 'ner': d['ner']}\r\n\t\treturn tok", "title": "" }, { "docid": "5e55126fb806be33f31ec46134796599", "score": "0.5885814", "text": "def process_sentence(sentence, plugin_names=[]):\n new_sentence = sentence\n for name in plugin_names:\n new_sentence = PLUGINS[name](new_sentence)\n return new_sentence", "title": "" }, { "docid": "f43e4cff9f6bdb086a7d0ee6b5079959", "score": "0.58767927", "text": "def tokenize(sent):\n lemmatizer = nltk.WordNetLemmatizer() \n for token in nltk.wordpunct_tokenize(sent):\n token = token.lower()\n yield lemmatizer.lemmatize(token)", "title": "" }, { "docid": "82efaeb3e52bbc2a5e50a243e871843d", "score": "0.5874686", "text": "def _tokenize(self, text, **kwargs):\n return [t.text for t in self.NLP.tokenizer(text)]", "title": "" }, { "docid": "45361c84c2d9065e5c8727fce6d769ce", "score": "0.5871192", "text": "def tokenise(self):\n self.tokens = self.text.split() # split by space; return a list", "title": "" }, { "docid": "bda7bb8c5040f71c484a7acd826d77e4", "score": "0.5864933", "text": "def tokenize(sentence):\n tokens = [RE_DIGITS.sub('#', w) for w in word_tokenize(sentence)]\n tokenized = ' '.join(tokens)\n return tokenized", "title": "" }, { "docid": "327d6702acf9228af1ea3d631b632c24", "score": "0.5863802", "text": "def _Tokenize(self,text):\n return text.split()", "title": "" }, { "docid": "2fe22d14da05ecef9a2c20a25b6ee126", "score": "0.5863306", "text": "def tokenize_and_censor(self, sentence):\n\n # you should not need to modify this function\n \n yield kSTART\n for ii in tokenize(sentence):\n if ii not in self._vocab:\n raise OutOfVocab(ii)\n yield ii\n yield kEND", "title": "" }, { "docid": "c52f8b6b89bcdf4585524b67c19f40ba", "score": "0.5845006", "text": "def get_entities(sent):\n\n \"\"\"I have defined a few empty variables in this chunk. prv_tok_dep and prv_tok_text will hold the dependency\n tag of the previous word in the sentence and that previous word itself, respectively. prefix and modifier will\n hold the text that is associated with the subject or the object.\"\"\"\n\n ent1 = \"\"\n ent2 = \"\"\n\n prv_tok_dep = \"\" # dependency tag of previous token in the sentence\n prv_tok_text = \"\" # previous token in the sentence\n\n prefix = \"\"\n modifier = \"\"\n\n #############################################################\n\n for tok in nlp(sent):\n\n \"\"\"Next, we will loop through the tokens in the sentence. We will first check if the token is a punctuation\n mark or not. If yes, then we will ignore it and move on to the next token. If the token is a part of a\n compound word (dependency tag = “compound”), we will keep it in the prefix variable. A compound word is a \n combination of multiple words linked to form a word with a new meaning \n (example – “Football Stadium”, “animal lover”).\n\n As and when we come across a subject or an object in the sentence, we will add this prefix to it. \n We will do the same thing with the modifier words, such as “nice shirt”, “big house”, etc.\"\"\"\n\n # if token is a punctuation mark then move on to the next token\n if tok.dep_ != \"punct\":\n # check: token is a compound word or not\n if tok.dep_ == \"compound\":\n prefix = tok.text\n # if the previous word was also a 'compound' then add the current word to it\n if prv_tok_dep == \"compound\":\n prefix = prv_tok_text + \" \" + tok.text\n\n # check: token is a modifier or not\n if tok.dep_.endswith(\"mod\") == True:\n modifier = tok.text\n # if the previous word was also a 'compound' then add the current word to it\n if prv_tok_dep == \"compound\":\n modifier = prv_tok_text + \" \" + tok.text\n\n \"\"\"Here, if the token is the subject, then it will be captured as the first entity in the ent1 variable. \n Variables such as prefix, modifier, prv_tok_dep, and prv_tok_text will be reset.\"\"\"\n if tok.dep_.find(\"subj\") == True:\n ent1 = modifier + \" \" + prefix + \" \" + tok.text\n prefix = \"\"\n modifier = \"\"\n prv_tok_dep = \"\"\n prv_tok_text = \"\"\n\n \"\"\"Here, if the token is the object, then it will be captured as the second entity \n in the ent2 variable. Variables such as prefix, modifier, prv_tok_dep, \n and prv_tok_text will again be reset.\"\"\"\n\n if tok.dep_.find(\"obj\") == True:\n ent2 = modifier + \" \" + prefix + \" \" + tok.text\n\n \"\"\"Once we have captured the subject and the object in the sentence, we will update the previous \n token and its dependency tag.\"\"\"\n\n # update variables\n prv_tok_dep = tok.dep_\n prv_tok_text = tok.text\n #############################################################\n\n return [ent1.strip(), ent2.strip()]", "title": "" }, { "docid": "e990bf2193fa5b2ee8e1514733863270", "score": "0.5827982", "text": "def naive(self, text):\n\n\t\t# Split each sentence into words at spaces\n\t\tsplit_sentences = [sentence.split() for sentence in text]\n\n\t\t# Remove empty strings and also leading and trailing spaces and convert the word to lower case.\n\t\t# Also remove tokens that are stand-alone punctuation marks\n\t\ttokenizedText = [[word.strip().lower() for word in temp_list if (len(word.strip()) > 0 and not word in string.punctuation)] for temp_list in split_sentences]\n\n\t\treturn tokenizedText", "title": "" }, { "docid": "3d9c565b5b5c084d45a5dcd0d4fc8e18", "score": "0.58212596", "text": "def tokenize(self, text, pos_tagging=True):\n pass", "title": "" }, { "docid": "157d5a278fe2f9b05c0dadd9894ec7aa", "score": "0.58198774", "text": "def tokenise(sentence, vocab):\n split_sentence = reuters.word_split(sentence)\n split_sentence = [word if word in vocab else '<UNK>'\n for word in split_sentence]\n sentence_ids = [int(vocab[word]) for word in split_sentence]\n return ' '.join(split_sentence), sentence_ids", "title": "" }, { "docid": "04930a9dc03c55d26699fcff9777dcea", "score": "0.58097553", "text": "def naive(self, text):\n\n\n\t\t#Fill in code here\n\t\n\t\ttokenizedText = []\n\t\tfor sent in text:\n\t\t\t# split the sentence at word separators like ' , -/ using re library\n\t\t\ttokenized_Text = re.split(\"[' ,-/]\", sent)\n\t\t\t# copy tokenized list\n\t\t\tlst = copy.deepcopy(tokenized_Text)\n\n\t\t\t# remove unwanted words like punctuations, spaces and empty words\n\t\t\tfor W in tokenized_Text:\n\t\t\t\tif W in ['?', ':', '!', '.', ',', ';', '\"']:\n\t\t\t\t\tlst.remove(W)\n\t\t\t\telif W =='':\n\t\t\t\t\tlst.remove(W)\n\t\t\t\telif W == ' ':\n\t\t\t\t\tlst.remove(W)\n\n\t\t\t\t# append the list to super list\n\t\t\t\ttokenizedText.append(lst)\n\n\t\treturn tokenizedText", "title": "" }, { "docid": "df8f2d1a637b833ce63aa1d4d6a862a0", "score": "0.5808596", "text": "def sent_tokenize(text):\n lang = detect_lang(text)\n sents = []\n if is_chinese(lang):\n for sent in re.findall(\"[^!?。\\.\\!\\?]+[!?。\\.\\!\\?]?\", text, flags=re.U):\n sents.append(sent)\n else:\n for paragraph in segmenter.process(text):\n for sentence in paragraph:\n sents.append(\" \".join([t.value for t in sentence]))\n return sents", "title": "" }, { "docid": "4bc577fc338e474244028911a32e5ad5", "score": "0.58030456", "text": "def process_tokens(self, tokens: List[Token]):", "title": "" }, { "docid": "b86a6aef7c3ad60b5e05f5e416dc1490", "score": "0.5798448", "text": "def train(self, sentences):", "title": "" }, { "docid": "5e5133506beb3a8b1ddf1a9bb163a6cc", "score": "0.5796216", "text": "def tag_nouns_and_verbs(sentence, tag_list=words):\n doc = en(sentence)\n tokens = []\n for token in doc:\n if (token.text in tag_list) & (doc[-1].text == 'quickly'):\n tokens.append(token.text.upper() + '>VERB')\n \n elif (token.text in tag_list) & (doc[-1].text == 'today'):\n tokens.append(token.text.upper() + '>NOUN')\n ##TODO## : if a word is in the tag list and is a noun or verb, add it as WORD>NOUN or WORD>VERB\n else:\n tokens.append(token.text)\n return ' '.join(tokens)", "title": "" }, { "docid": "5e5133506beb3a8b1ddf1a9bb163a6cc", "score": "0.5796216", "text": "def tag_nouns_and_verbs(sentence, tag_list=words):\n doc = en(sentence)\n tokens = []\n for token in doc:\n if (token.text in tag_list) & (doc[-1].text == 'quickly'):\n tokens.append(token.text.upper() + '>VERB')\n \n elif (token.text in tag_list) & (doc[-1].text == 'today'):\n tokens.append(token.text.upper() + '>NOUN')\n ##TODO## : if a word is in the tag list and is a noun or verb, add it as WORD>NOUN or WORD>VERB\n else:\n tokens.append(token.text)\n return ' '.join(tokens)", "title": "" }, { "docid": "5874cc0deb9096a232fb201498dee7d9", "score": "0.5792108", "text": "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "title": "" }, { "docid": "5874cc0deb9096a232fb201498dee7d9", "score": "0.5792108", "text": "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "title": "" }, { "docid": "7dbd6666ee7f22e6269e48336c85ea1b", "score": "0.57873243", "text": "def sentence_tokenize(text):\n return nltk_sent_tokenize(text)", "title": "" }, { "docid": "d3f01cd79d1dbb43a6cff3db50097e11", "score": "0.57792884", "text": "def _tokenize(self, item):\n\n tokenized = []\n for s in self._sentence_detector.tokenize(item):\n tokenized.extend([w.lower() for w in nltk.tokenize.word_tokenize(s)])\n\n return tokenized", "title": "" }, { "docid": "872db365ffa2264db19743ebb155f902", "score": "0.57772833", "text": "def do_tokenize(last, turn):\n if turn.startswith('customer: '):\n # agent:\n turn = turn[10:].strip()\n sot = start_of_turn1\n eot = start_of_turn2\n else: # agent:\n turn = turn[7:].strip()\n sot = start_of_turn2\n eot = start_of_turn1\n sentences = sent_tok(turn)\n tokenized_sents = []\n for s in sentences:\n words = word_tok(s)\n tokenized_sents.append(' '.join(words))\n flat_content = ' '.join(tokenized_sents)\n if last:\n return sot + ' ' + flat_content + ' ' + end_of_dialogue + ' ' + eot\n else:\n return sot + ' ' + flat_content", "title": "" }, { "docid": "9b362e43ce5befa99e7c9e0dc4e64448", "score": "0.5771953", "text": "def _tokenise(self):\n\n lines = self._line_iter()\n for sent in NLP.pipe(lines):\n yield [span for span in sent]", "title": "" }, { "docid": "71d36635af85e0dd0077a9fb64cb87d5", "score": "0.5770367", "text": "def transform(self, tokens: list) -> list:\n lemmatizer = nltk.WordNetLemmatizer()\n return [lemmatizer.lemmatize(token, pos='v') for token in tokens]", "title": "" }, { "docid": "af18c1a2ff64c44702440299475a9fda", "score": "0.5767589", "text": "def post_processing(sentence: str)->list:\n try:\n tokens, pos_tags, token_pos, token_case = pre_processing(sentence)\n if DEFAULT:\n suggested_tokens, intact_words_list = symspell_test(token_pos)\n else:\n suggested_tokens, intact_words_list = enchant_check(token_pos)\n corrected_sent = ' '.join(suggested_tokens)\n new_tokens, new_pos_tags, new_token_pos, new_token_case = pre_processing(corrected_sent)\n\n #modifying original list of tokens to add possible repetitions\n correct_token_list = [(word, index) for (index, word) in enumerate(suggested_tokens)\n if word != tokens[index]\n and word not in intact_words_list]\n original_tokens = tokens #Make a copy of the original list of tokens\n offset = 0 #Offset to account for shift in index after possible duplication of original incorrect tokens\n for (word, index) in correct_token_list:\n num_parts = len(nlp(word))\n original_tokens[index+offset : index+offset+1] = original_tokens[index+offset : index+offset+1] * num_parts\n if num_parts > 1:\n token_case.insert(index+offset, token_case[index+offset])\n offset += num_parts-1\n payload = []\n\n #Reassigining case to both, original and corrected tokens\n original_tokens = [token.title() if case == 1 else token for (case,token)\n in list(zip(token_case, original_tokens))]\n new_tokens = [token.title() if case == 1 else token for (case,token)\n in list(zip(token_case, new_tokens))]\n logging.info('Generating RESTful output payload format.')\n for i,_ in enumerate(new_tokens):\n payload.append({\"token\":new_tokens[i], \"pos\":new_pos_tags[i], \"raw\":original_tokens[i]})\n return payload\n except TypeError:\n return 410", "title": "" }, { "docid": "1358427a355a56abd727541dc1c69aa9", "score": "0.57605696", "text": "def clean_up_sentence(sentence):\n # tokenize the pattern\n sentence_words = nltk.word_tokenize(sentence)\n # stem each word\n sentence_words = clean_sequence(sentence_words)\n return sentence_words", "title": "" }, { "docid": "4ce1502cbab0678183366f2f6a8f8805", "score": "0.57567173", "text": "def __tokenize(self, snts: List[List[str]]) -> Tuple[torch.Tensor, torch.Tensor, List[List[int]], int, int]:\n # generate batch_size, seq_len\n batch_size, seq_len = len(snts), max(len(snt) for snt in snts)+2\n\n # clean sentences\n snts_cleaned, offsets = [], []\n # TODO: process english and chinese, character-based and word-based\n for snt in snts:\n if self.bert_transliterate is None:\n cleaned_words = []\n for word in snt:\n word = BERT_TOKEN_MAPPING.get(word, word)\n # This un-escaping for / and * was not yet added for the\n # parser version in https://arxiv.org/abs/1812.11760v1\n # and related model releases (e.g. benepar_en2)\n word = word.replace('\\\\/', '/').replace('\\\\*', '*')\n # Mid-token punctuation occurs in biomedical text\n word = word.replace('-LSB-', '[').replace('-RSB-', ']')\n word = word.replace('-LRB-', '(').replace('-RRB-', ')')\n if word == \"n't\" and cleaned_words:\n cleaned_words[-1] = cleaned_words[-1] + \"n\"\n word = \"'t\"\n cleaned_words.append(word)\n else:\n # When transliterating, assume that the token mapping is\n # taken care of elsewhere\n cleaned_words = [self.bert_transliterate(word) for word in snts]\n snts_cleaned.append(' '.join(cleaned_words))\n\n # tokenize sentences\n tokens = self.tokenizer(snts_cleaned, padding=PAD_STATEGY, max_length=512, truncation=TRUNCATION_STATEGY,\n return_attention_mask=True, return_offsets_mapping=True)\n ids, attention_mask, offsets_mapping = tokens['input_ids'], tokens['attention_mask'], tokens['offset_mapping']\n if self.subword == CHARACTER_BASED:\n assert len(ids[0]) == seq_len\n\n # generate offsets list\n output_len = len(ids[0])\n if self.subword != CHARACTER_BASED:\n for i, offset_mapping in enumerate(offsets_mapping):\n snt = snts_cleaned[i] + ' '\n offset, word_tail_idx, word_idx = [0], snt.find(' '), 1\n for subword_head_idx, subword_tail_idx in offset_mapping[1:]:\n if subword_tail_idx == 0:\n offset.append(word_idx+1)\n offset.extend([word_idx+2]*(output_len-len(offset)))\n break\n if subword_head_idx > word_tail_idx:\n word_tail_idx = snt.find(' ', word_tail_idx+1)\n assert word_tail_idx > subword_head_idx and subword_tail_idx <= word_tail_idx\n word_idx += 1\n offset.append(word_idx)\n offsets.append(offset)\n\n return (\n torch.tensor(ids, dtype=torch.long, device=self.device),\n torch.tensor(attention_mask, dtype=torch.int, device=self.device),\n offsets,\n batch_size,\n seq_len\n )", "title": "" }, { "docid": "6dec79c5586574afbd5e0d166b5b18bd", "score": "0.57567126", "text": "def gen_tokens(\n self,\n tags: List[Tag],\n sents: List[List[str]],\n tokenizer: Tokenizer = WordPieceTokenizer(),\n process_titles: bool = False,\n ):\n\n sents = [\" \".join(sent) for sent in sents]\n # in addition to making bio-tags for each token, it will also\n # infuse the token position (start, end) inside the respective tags.\n # this is needed later for mapping relations.\n\n heading_offset = 0 if process_titles else len(\" \".join(self.heading))\n\n tokens2d, tags = tokenizer.tokenize(\n sents, tags, heading_offset=heading_offset\n )\n\n return tokens2d, tags", "title": "" }, { "docid": "7d026e8be71b8a9d58cc3b5f43ff48c7", "score": "0.57553077", "text": "def tokenize(text):\r\n for word in text.split():\r\n for token in tokenize_word(word):\r\n yield token", "title": "" }, { "docid": "ce7095e3bcfb84454e9489c09b12d7b3", "score": "0.5755113", "text": "def tokenize_and_cut(sentence):\n tokens = self.tokenizer.tokenize(sentence)\n tokens = tokens[:self.max_input_len - 2]\n return tokens", "title": "" }, { "docid": "74e0a0f6aaba81cfeb53d753a3b4e726", "score": "0.5752896", "text": "def sent_tokenize(tagger, string):\n\n sentences = []\n current_sent = []\n lines = tagger.TagText(string)\n for line in lines:\n triple = get_treetagger_triple(line)\n if triple:\n word, pos, lemma = triple\n token = word.lower()\n # Replace o+e ligatures\n token = replace_oelig(token)\n if pos == 'SENT':\n if len(current_sent):\n current_sent.append(token)\n sentences.append(current_sent)\n current_sent = []\n else:\n # The sentence is empty. Ignore the punctuation mark.\n pass\n else:\n current_sent.append(token)\n # Make sure that the current sentence is empty, or else add to output.\n if len(current_sent):\n sentences.append(current_sent)\n return sentences", "title": "" }, { "docid": "2c1b3ef820a3a6cc99e485cd720ffde9", "score": "0.57493174", "text": "def tokenize(self, text):\n text = self._clean_text(text)\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n orig_tokens = whitespace_tokenize(text) # 返回的结果是一个使用空格进行split的函数,中文加了空格且返回一个列表\n split_tokens = []\n # 处理流程是先看下是否要变成小写(感觉变成小写就代表了text中有英文),然后使用标点符号把句子拆分\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n # print(\"\\n\\n 1 token:\",token,\"\\n\\n\")\n token = self._run_strip_accents(token)\n # print(\"\\n\\n 2 token:\",token,\"\\n\\n\")\n\n split_tokens.extend(self._run_split_on_punc(token))\n\n # 把句中多余的空格去掉,然后返回的是list of token\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "title": "" }, { "docid": "6a1f3893401c839405332da670fe90ac", "score": "0.57481503", "text": "def join_tokens(tokens):\n output = []\n raw = []\n for s in tokens:\n if callable(s):\n if raw:\n raw = ''.join(raw)\n output.append(raw)\n raw = []\n output.append(s)\n continue\n raw.append(s)\n if raw:\n raw = ''.join(raw)\n output.append(raw)\n return output", "title": "" }, { "docid": "f0b1dfc8191d24f7af4a4894621817e6", "score": "0.57415134", "text": "def spacy_tokenizer(txt):\n doc = nlp(txt)\n terms_tagged = extract_from_doc(doc)\n terms_tagged_list = [f\"{term} :: {tag}\" for term, tag in terms_tagged.items()]\n return terms_tagged_list", "title": "" }, { "docid": "2c5ff254e3dd0964331e12aabd2d70ec", "score": "0.5740097", "text": "def convert_sentence(self, input_sentence):\n sentence = ''\n for word in [self.idx2word.get(x) for x in input_sentence]:\n\n if word == '<end>':\n break\n if word != '<start>' and word != '.':\n sentence += ' ' + word\n if word == '.':\n sentence += word\n sentence = sentence[1].upper() + sentence[2:]\n return sentence", "title": "" }, { "docid": "0b78ba2e63219fabd175a580d2454ba9", "score": "0.573807", "text": "def tokenize(self, text_input):\n normalized_input = self._fast_bert_normalizer.normalize(text_input)\n return self._fast_wordpiece_tokenizer.tokenize(normalized_input)", "title": "" }, { "docid": "eb204282fc79cd5d5ca9ef6226a658d0", "score": "0.5736437", "text": "def tokenize(self, text):\n pass", "title": "" }, { "docid": "db2b80691e77b925797e1250ecb9a069", "score": "0.5733543", "text": "def format_sentence(self, sent):\n return {word: True for word in nltk.word_tokenize(sent)}", "title": "" }, { "docid": "92ad2ee9062e63d69b60b434ca4cbbbb", "score": "0.5729861", "text": "def from_doc(self, document: List[str]) -> None:\n for string in document:\n string = self._string_normalizer(string)\n tokens = self.tokenize(string)\n for token in tokens:\n self.add_token(token)", "title": "" }, { "docid": "f2ae8b557a70d863de937fb30e2ea855", "score": "0.57233655", "text": "def token_test():\n\n with open('word_files/j_words.txt') as wf:\n words = json.load(wf)\n\n test_words = [\n 'king (n.)',\n 'bleach (v.)',\n 'antithesis (n.)',\n 'face (n.)',\n 'porpoise (n.)',\n 'dealer (n.)',\n 'poise (n.)',\n 'spirit (n.)',\n 'edgeways',\n 'bunch (n.)',\n 'case (n.2)',\n 'hog (n.)',\n 'begetter (n.)',\n 'cap (n.)',\n ]\n\n test_sents = [(test_word, words[test_word].split('\\n')[0]) for test_word in test_words]\n test_sents = [(test_word, list(extract_sent(sent))) for test_word, sent in test_sents]\n test_sents = [(test_word, list(extract_words(sent)) for test_word, sent in test_sents]\n\n for word, sents in test_sents:\n print \"\\nWord:\\t%s\\n\" % (word)\n for i, sent in enumerate(sents):\n print \"SENTENCE: #%s\\n\" % (i)\n print \"\\t\\t\" + repr(sent)", "title": "" }, { "docid": "974df6283dd93d991b6387ceb2f3378c", "score": "0.5717188", "text": "def lemmatize_sentence(self, tokens):\n \n print(\"Lemmatizing tokens\\n\")\n \n lemmatizer = WordNetLemmatizer()\n lemmatized_sentence = []\n for word, tag in pos_tag(tokens):\n if tag.startswith('NN'):\n pos = 'n'\n elif tag.startswith('VB'):\n pos = 'v'\n else:\n pos = 'a'\n lemmatized_sentence.append(lemmatizer.lemmatize(word, pos))\n return lemmatized_sentence", "title": "" }, { "docid": "26c12d62411d1ceabef6c1201c1f3301", "score": "0.5706472", "text": "def _tokenize(self, text):\n text = '' if not text else text\n text = self.pretokenize(text)\n text = text.lower() if self.lc else text\n if self.collapse_hashtags:\n text = re.sub('#\\S+', 'THIS_IS_A_HASHTAG', text)\n else:\n text = re.sub('#(\\S+)', r'HASHTAG_\\1', text)\n if self.collapse_mentions:\n text = re.sub('@\\S+', 'THIS_IS_A_MENTION', text)\n else:\n text = re.sub('@(\\S+)', r'MENTION_\\1', text)\n if self.collapse_urls:\n text = re.sub('http\\S+', 'THIS_IS_A_URL', text)\n if self.limit_repeats:\n text = re.sub(r'(.)\\1\\1\\1+', r'\\1', text)\n if self.collapse_digits:\n text = re.sub(r'[0-9]+', '9', text)\n toks = []\n for tok in text.split():\n tok = re.sub(r'^(' + punc_re + '+)', r'\\1 ', tok)\n tok = re.sub(r'(' + punc_re + '+)$', r' \\1', tok)\n for subtok in tok.split():\n if self.retain_punc_toks or re.search('\\w', subtok):\n toks.append(subtok)\n if self.rt_prefix:\n rt_text = 'rt' if self.lc else 'RT'\n if rt_text in toks:\n toks.remove(rt_text)\n toks = ['RT_' + t for t in toks]\n return toks", "title": "" }, { "docid": "b7d9966bf14c9ac8a495bb698d5a897c", "score": "0.57039565", "text": "def text_tokenized(inputtext, outputtext, wordlist):\n inputtext = open(inputtext, 'r', encoding=_encode_type(inputtext)) \n fileResult = open(outputtext, \"w\", encoding=\"utf-8\") \n for sentence in inputtext:\n for token in tokenization(sentence, wordlist):\n fileResult.write(token)\n fileResult.write(\" \")\n fileResult.write(\"\\n\") \n inputtext.close()\n fileResult.close()", "title": "" }, { "docid": "148ea9c73349f2fe0032adf0fffba3a5", "score": "0.57031745", "text": "def Tokenize(sentence):\n tokens = []\n for token in nltk.word_tokenize(sentence):\n if bool(re.search('[A-za-z0-9]', token)):\n tokens.append(token.lower())\n\n return tokens", "title": "" }, { "docid": "33648f9ff93aa54f6329f401c7a2e364", "score": "0.56877756", "text": "def prepareForNLP(text):\n\n # Split up the input into sentences\n sentences = nltk.sent_tokenize(text)\n # Split up the sentences into words\n sentences = [nltk.word_tokenize(sent) for sent in sentences]\n # Tokenize the words\n sentences = [nltk.pos_tag(sent) for sent in sentences]\n # Return the split and tokenized sentences\n return sentences", "title": "" }, { "docid": "3983609176c639f12b8d144c92bf0565", "score": "0.5685069", "text": "def getSentenceForms(self):", "title": "" }, { "docid": "c7e6d5cc1f5cf76b7512a0c243cede31", "score": "0.56824946", "text": "def split_sentence(text: str) -> list:\n review = nlp(text)\n sentences = []\n start = 0\n for token in review:\n if token.sent_start: # boolean value if token starts the sentence\n sentences.append(review[start:(token.i - 1)])\n start = token.i\n if token.i == len(review) - 1:\n sentences.append(review[start:(token.i+1)])\n return sentences", "title": "" }, { "docid": "38307a0f8e573337fdf9f84d385a878f", "score": "0.567543", "text": "def tokenize(self, text):\n\t\treturn [tokens.strip().replace(',', ' ').replace('.', ' ').replace('!', ' ').replace('?', ' ').split() for tokens in text]", "title": "" }, { "docid": "db19bb6675d02678c27d1650330fdf2b", "score": "0.5669284", "text": "def text_to_conll(f):\n global options\n # print(f)\n if options.nosplit:\n sentences = f.readlines()\n # print(\"sentences: \",sentences)\n else:\n sentences = []\n for l in f:\n l = sentencebreaks_to_newlines(l)\n \n sentences.extend([s for s in NEWLINE_TERM_REGEX.split(l) if s])\n\n lines = []\n\n offset = 0\n # print(sentences)\n for s in sentences:\n nonspace_token_seen = False\n s = handle_non_standard_char(s)\n \n tokens = word_tokenize(s)\n\n \n \n \n \n \n token_w_pos = map_text_to_char(s, tokens, offset)\n # print(\"token_w_pos: \",token_w_pos)\n\n if 'TetKanCam' in s:\n print(token_w_pos)\n\n for(t, pos) in token_w_pos:\n t=t.strip()\n if t=='': continue\n if not t.isspace():\n l1=['O', pos, pos + len(t), t]\n lines.append(l1)\n # print(l1)\n \n lines.append([])\n\n offset+=len(s)\n\n\n # tokens = [t for t in TOKENIZATION_REGEX.split(s) if t] # JT : Dec 6\n # for t in tokens:\n # if not t.isspace():\n # lines.append(['O', offset, offset + len(t), t])\n # nonspace_token_seen = True\n # offset += len(t)\n\n # # sentences delimited by empty lines\n # if nonspace_token_seen:\n # lines.append([])\n\n # add labels (other than 'O') from standoff annotation if specified\n if options.annsuffix:\n textbounds, dict_of_entity, list_of_relns=get_annotations(f.name)\n lines = relabel(lines, textbounds , dict_of_entity, list_of_relns, f)\n # print(lines)\n\n # lines = [[l[0], str(l[1]), str(l[2]), l[3]] if l else l for l in lines] #JT: Dec 6\n # print(lines)\n lines = [[l[3],l[0]] if l else l for l in lines] #JT: Dec 6\n # lines = [[l[3],l[0],l[4],l[5],l[6]] if l else l for l in lines] #JT: Dec 6\n \n return StringIO('\\n'.join(('\\t'.join(l) for l in lines)))", "title": "" }, { "docid": "2eb6a21b5a676b32238748cb644e279f", "score": "0.56656986", "text": "def transform(self, tokens: list) -> list:\n return [token.lower() for token in tokens]", "title": "" }, { "docid": "d917682890246cb3e49f714911b2c8f2", "score": "0.56554043", "text": "def _tokenize(self, entity):\n return [(hash(token.content), token)\n for token in self.tokenizer(self.transform(entity))]", "title": "" }, { "docid": "d5956c6e0eadac475ef8835981aa519d", "score": "0.56538236", "text": "def tokenize(sentence): \n ret = []\n word = ''\n for char in sentence :\n if char in string.ascii_letters: \n word += char.lower()\n else:\n if word != '':\n ret.append(word)\n word = ''\n ret.append(char)\n if word != '':\n ret.append(word)\n return ret", "title": "" }, { "docid": "953aa0a28db352ad06ce9aecc47646e0", "score": "0.5650213", "text": "def pre_process_text(self):\r\n # Add subject if it don't exist for a verb\r\n new_sent = []\r\n split_sent, conj = self.__remove_char(self.text)\r\n split_sent = split_sent.split(\" and \")\r\n words = \"\"\r\n c = -1\r\n\r\n for sent in split_sent:\r\n\r\n ent, f_subj = self.__insert_verb(sent)\r\n\r\n if f_subj:\r\n app = sent\r\n else:\r\n app = ent\r\n\r\n new_sent.append(app)\r\n if words == \"\":\r\n words += app\r\n else:\r\n words += conj[c] + \" \" + app\r\n c += 1\r\n if self.test:\r\n print(\"NEW SENTENCE:\\n\", new_sent)\r\n print(\"\\nLONG WORDS: \\n\", words)\r\n\r\n # Apply get_entities\r\n words = words.split(\".\")\r\n words =[i for i in words if i]\r\n\r\n self.__remove_space_duplicate(words)\r\n\r\n return self.search, self.title, words", "title": "" }, { "docid": "b3addc55b206159dd3fbce02d72796ac", "score": "0.5644142", "text": "def link_sentence (self, sent):\r\n visited_tokens = []\r\n visited_nodes = []\r\n\r\n for i in range(sent.start, sent.end):\r\n token = self.doc[i]\r\n\r\n if token.pos_ in self.pos_kept:\r\n # skip any stop words...\r\n lemma = token.lemma_.lower().strip()\r\n\r\n if lemma in self.stopwords and token.pos_ in self.stopwords[lemma]:\r\n continue\r\n\r\n # ...otherwise proceed\r\n key = (token.lemma_, token.pos_)\r\n\r\n if key not in self.seen_lemma:\r\n self.seen_lemma[key] = set([token.i])\r\n else:\r\n self.seen_lemma[key].add(token.i)\r\n\r\n node_id = list(self.seen_lemma.keys()).index(key)\r\n\r\n if not node_id in self.lemma_graph:\r\n self.lemma_graph.add_node(node_id)\r\n\r\n if self.logger:\r\n self.logger.debug(\"visit {} {}\".format(\r\n visited_tokens, visited_nodes\r\n ))\r\n self.logger.debug(\"range {}\".format(\r\n list(range(len(visited_tokens) - 1, -1, -1))\r\n ))\r\n\r\n for prev_token in range(len(visited_tokens) - 1, -1, -1):\r\n if self.logger:\r\n self.logger.debug(\"prev_tok {} {}\".format(\r\n prev_token, (token.i - visited_tokens[prev_token])\r\n ))\r\n\r\n if (token.i - visited_tokens[prev_token]) <= self.token_lookback:\r\n self.increment_edge(node_id, visited_nodes[prev_token])\r\n else:\r\n break\r\n\r\n if self.logger:\r\n self.logger.debug(\" -- {} {} {} {} {} {}\".format(\r\n token.i, token.text, token.lemma_, token.pos_, visited_tokens, visited_nodes\r\n ))\r\n\r\n visited_tokens.append(token.i)\r\n visited_nodes.append(node_id)", "title": "" }, { "docid": "1ae118d22e0e0e0eeeb19a5416f66dea", "score": "0.5644099", "text": "def make_new_sentence(sentence):\n\n parser = Parse()\n cleaned_sentence = parser.transform_sentence_to_lower_without_apostrophes(sentence)\n cleaned_sentence = parser.remove_special_characters_from_list(cleaned_sentence)\n cleaned_sentence = parser.transform_sentence_to_list(cleaned_sentence)\n cleaned_sentence = parser.create_new_sentence(cleaned_sentence)\n return cleaned_sentence", "title": "" }, { "docid": "0b7a5e14864f6a0c75bd285158552d25", "score": "0.5642133", "text": "def to_wordlist_linguaview_xml(sentence):\n word_list = etree.Element(\"wordlist\")\n word_list.attrib[\"length\"] = str(len(sentence))\n for i in sentence:\n tok = etree.Element(\"tok\")\n tok.attrib[\"id\"] = str(i.id)\n tok.attrib[\"head\"] = i.form\n word_list.append(tok)\n return word_list", "title": "" }, { "docid": "df229e580d7b7d5ecddac2c62e5fc0eb", "score": "0.5641985", "text": "def get_sentences_from_tokens(tokens):\n all_sentences = []\n curr_sentence = \"\"\n for t in tokens:\n if t == \".\":\n curr_sentence = curr_sentence[:-1]\n curr_sentence += t\n all_sentences.append(curr_sentence)\n curr_sentence = \"\"\n else:\n curr_sentence += t\n curr_sentence += \" \"\n return all_sentences", "title": "" } ]
153cc40ad1617f690e4092e1f97d0c19
Check if the union tag is ``team_merge_request_sent_shown_to_secondary_team``.
[ { "docid": "188728c806e278886e546c75e4a0b053", "score": "0.8531224", "text": "def is_team_merge_request_sent_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_sent_shown_to_secondary_team'", "title": "" } ]
[ { "docid": "08f029aa16ff41c12bcd2548e8f33fe5", "score": "0.8294527", "text": "def is_team_merge_request_accepted_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_accepted_shown_to_secondary_team'", "title": "" }, { "docid": "08f029aa16ff41c12bcd2548e8f33fe5", "score": "0.82945025", "text": "def is_team_merge_request_accepted_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_accepted_shown_to_secondary_team'", "title": "" }, { "docid": "0982f9f714ef546a83f6a1d1de2dc641", "score": "0.81148", "text": "def is_team_merge_request_sent_shown_to_secondary_team_details(self):\n return self._tag == 'team_merge_request_sent_shown_to_secondary_team_details'", "title": "" }, { "docid": "1dc985f7180004a2d252f1579ab8446b", "score": "0.79455805", "text": "def is_team_merge_request_accepted_shown_to_secondary_team_details(self):\n return self._tag == 'team_merge_request_accepted_shown_to_secondary_team_details'", "title": "" }, { "docid": "0c712cb63a71dd9dcc0fa594de69ea1a", "score": "0.79062", "text": "def is_team_merge_request_rejected_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_rejected_shown_to_secondary_team'", "title": "" }, { "docid": "0c712cb63a71dd9dcc0fa594de69ea1a", "score": "0.79060835", "text": "def is_team_merge_request_rejected_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_rejected_shown_to_secondary_team'", "title": "" }, { "docid": "a958594a2dd1f2214ba0de7654c4f8c9", "score": "0.7865314", "text": "def is_team_merge_request_canceled_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_canceled_shown_to_secondary_team'", "title": "" }, { "docid": "a958594a2dd1f2214ba0de7654c4f8c9", "score": "0.78649926", "text": "def is_team_merge_request_canceled_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_canceled_shown_to_secondary_team'", "title": "" }, { "docid": "759c052b3e6b7abeb68da8e4ed69513d", "score": "0.7621584", "text": "def is_team_merge_request_reminder_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_reminder_shown_to_secondary_team'", "title": "" }, { "docid": "759c052b3e6b7abeb68da8e4ed69513d", "score": "0.7619413", "text": "def is_team_merge_request_reminder_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_reminder_shown_to_secondary_team'", "title": "" }, { "docid": "11578cfc28d3b500d0489f86a29a169e", "score": "0.7576817", "text": "def is_team_merge_request_accepted(self):\n return self._tag == 'team_merge_request_accepted'", "title": "" }, { "docid": "11578cfc28d3b500d0489f86a29a169e", "score": "0.7576796", "text": "def is_team_merge_request_accepted(self):\n return self._tag == 'team_merge_request_accepted'", "title": "" }, { "docid": "daeb7911510c5e4334c038c799baa2fb", "score": "0.7530631", "text": "def is_team_merge_request_rejected_shown_to_secondary_team_details(self):\n return self._tag == 'team_merge_request_rejected_shown_to_secondary_team_details'", "title": "" }, { "docid": "a101c1411adbf9a68cea3d1a2f1c4844", "score": "0.7479965", "text": "def is_team_merge_request_sent_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_sent_shown_to_primary_team'", "title": "" }, { "docid": "a101c1411adbf9a68cea3d1a2f1c4844", "score": "0.74790806", "text": "def is_team_merge_request_sent_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_sent_shown_to_primary_team'", "title": "" }, { "docid": "cdbc92420deadb001139221b6704611d", "score": "0.7477917", "text": "def is_team_merge_request_canceled_shown_to_secondary_team_details(self):\n return self._tag == 'team_merge_request_canceled_shown_to_secondary_team_details'", "title": "" }, { "docid": "bc61a0fc7258f3c14bfecfc5239ac667", "score": "0.7442828", "text": "def is_team_merge_to(self):\n return self._tag == 'team_merge_to'", "title": "" }, { "docid": "bc61a0fc7258f3c14bfecfc5239ac667", "score": "0.7442161", "text": "def is_team_merge_to(self):\n return self._tag == 'team_merge_to'", "title": "" }, { "docid": "4a6f2eae0d35ebcb67a90eff847765a1", "score": "0.725224", "text": "def is_team_merge_request_expired_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_expired_shown_to_secondary_team'", "title": "" }, { "docid": "4a6f2eae0d35ebcb67a90eff847765a1", "score": "0.72508514", "text": "def is_team_merge_request_expired_shown_to_secondary_team(self):\n return self._tag == 'team_merge_request_expired_shown_to_secondary_team'", "title": "" }, { "docid": "9865b2a447e9c4048817e3f9cae04319", "score": "0.7208233", "text": "def get_team_merge_request_accepted_shown_to_secondary_team(self):\n if not self.is_team_merge_request_accepted_shown_to_secondary_team():\n raise AttributeError(\"tag 'team_merge_request_accepted_shown_to_secondary_team' not set\")\n return self._value", "title": "" }, { "docid": "4b1ea9e6663aa7e167c4918353fe5484", "score": "0.72027993", "text": "def is_team_merge_request_accepted_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_accepted_shown_to_primary_team'", "title": "" }, { "docid": "4b1ea9e6663aa7e167c4918353fe5484", "score": "0.7202107", "text": "def is_team_merge_request_accepted_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_accepted_shown_to_primary_team'", "title": "" }, { "docid": "7fdbe70eea92c07f837fe882d35bc430", "score": "0.71997195", "text": "def get_team_merge_request_sent_shown_to_secondary_team(self):\n if not self.is_team_merge_request_sent_shown_to_secondary_team():\n raise AttributeError(\"tag 'team_merge_request_sent_shown_to_secondary_team' not set\")\n return self._value", "title": "" }, { "docid": "9847a540f1105a6fb941c1d912ab9f9c", "score": "0.7146536", "text": "def is_team_merge_request_sent_shown_to_primary_team_details(self):\n return self._tag == 'team_merge_request_sent_shown_to_primary_team_details'", "title": "" }, { "docid": "105c229015e116634c04e2fd54856643", "score": "0.71201074", "text": "def is_team_merge_request_rejected_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_rejected_shown_to_primary_team'", "title": "" }, { "docid": "105c229015e116634c04e2fd54856643", "score": "0.71193045", "text": "def is_team_merge_request_rejected_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_rejected_shown_to_primary_team'", "title": "" }, { "docid": "d34fa880bc61aad0f242091fd7c05c4d", "score": "0.7084882", "text": "def is_team_merge_request_reminder_shown_to_secondary_team_details(self):\n return self._tag == 'team_merge_request_reminder_shown_to_secondary_team_details'", "title": "" }, { "docid": "e1ae42d3ae808145182ef8acdf20f2d7", "score": "0.70306283", "text": "def is_team_merge_request_accepted_details(self):\n return self._tag == 'team_merge_request_accepted_details'", "title": "" }, { "docid": "82cbf6be2ac57be310d94cd4aa35321a", "score": "0.7017631", "text": "def is_team_merge_request_canceled_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_canceled_shown_to_primary_team'", "title": "" }, { "docid": "82cbf6be2ac57be310d94cd4aa35321a", "score": "0.70166314", "text": "def is_team_merge_request_canceled_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_canceled_shown_to_primary_team'", "title": "" }, { "docid": "049f3ae789ee677095a83a308cad33e9", "score": "0.6944994", "text": "def is_team_merge_request_accepted_shown_to_primary_team_details(self):\n return self._tag == 'team_merge_request_accepted_shown_to_primary_team_details'", "title": "" }, { "docid": "8f063889fda46ef8bc08f9ac042f001f", "score": "0.6870201", "text": "def is_team_merge_request_rejected_shown_to_primary_team_details(self):\n return self._tag == 'team_merge_request_rejected_shown_to_primary_team_details'", "title": "" }, { "docid": "4d9a687ac6b664d12a458b064b8c80e4", "score": "0.6854294", "text": "def is_team_merge_from(self):\n return self._tag == 'team_merge_from'", "title": "" }, { "docid": "4d9a687ac6b664d12a458b064b8c80e4", "score": "0.6854093", "text": "def is_team_merge_from(self):\n return self._tag == 'team_merge_from'", "title": "" }, { "docid": "44fcf477819314d37b4240573e2300aa", "score": "0.6803253", "text": "def is_team_merge_request_expired_shown_to_secondary_team_details(self):\n return self._tag == 'team_merge_request_expired_shown_to_secondary_team_details'", "title": "" }, { "docid": "97f20420978fd4389bbd53ec984bc8c1", "score": "0.6735518", "text": "def is_team_merge_request_canceled_shown_to_primary_team_details(self):\n return self._tag == 'team_merge_request_canceled_shown_to_primary_team_details'", "title": "" }, { "docid": "0485ad02fb131c5937ceeff1f0fc1003", "score": "0.6693894", "text": "def is_secondary_team(self):\n return self._tag == 'secondary_team'", "title": "" }, { "docid": "0485ad02fb131c5937ceeff1f0fc1003", "score": "0.6693894", "text": "def is_secondary_team(self):\n return self._tag == 'secondary_team'", "title": "" }, { "docid": "0485ad02fb131c5937ceeff1f0fc1003", "score": "0.6693894", "text": "def is_secondary_team(self):\n return self._tag == 'secondary_team'", "title": "" }, { "docid": "0485ad02fb131c5937ceeff1f0fc1003", "score": "0.6693894", "text": "def is_secondary_team(self):\n return self._tag == 'secondary_team'", "title": "" }, { "docid": "1f562630c2699b54f23a04a4c996bc50", "score": "0.66629726", "text": "def is_team_and_explicitly_shared(self):\n return self._tag == 'team_and_explicitly_shared'", "title": "" }, { "docid": "8c3bf17adc77dd343e0b4af1ed14d3db", "score": "0.66431326", "text": "def is_sf_team_join(self):\n return self._tag == 'sf_team_join'", "title": "" }, { "docid": "8c3bf17adc77dd343e0b4af1ed14d3db", "score": "0.6640756", "text": "def is_sf_team_join(self):\n return self._tag == 'sf_team_join'", "title": "" }, { "docid": "35eaf5e94fd6fca14621ab3eb48f1c6f", "score": "0.66069233", "text": "def get_team_merge_request_accepted(self):\n if not self.is_team_merge_request_accepted():\n raise AttributeError(\"tag 'team_merge_request_accepted' not set\")\n return self._value", "title": "" }, { "docid": "2eb276c175ce05079ddd9035a0a752cb", "score": "0.6595027", "text": "def get_team_merge_request_rejected_shown_to_secondary_team(self):\n if not self.is_team_merge_request_rejected_shown_to_secondary_team():\n raise AttributeError(\"tag 'team_merge_request_rejected_shown_to_secondary_team' not set\")\n return self._value", "title": "" }, { "docid": "e0b9a17374f4f218c460e5225423c284", "score": "0.65750605", "text": "def get_team_merge_request_canceled_shown_to_secondary_team(self):\n if not self.is_team_merge_request_canceled_shown_to_secondary_team():\n raise AttributeError(\"tag 'team_merge_request_canceled_shown_to_secondary_team' not set\")\n return self._value", "title": "" }, { "docid": "f62616d9c95be8377143f55df4d4895d", "score": "0.6569118", "text": "def is_team_merge_from_details(self):\n return self._tag == 'team_merge_from_details'", "title": "" }, { "docid": "92e3edadcee2cc7eefb5fde38c370f16", "score": "0.65575606", "text": "def is_user_on_another_team(self):\n return self._tag == 'user_on_another_team'", "title": "" }, { "docid": "edc5257e6719d2686b6ca45a79205fd4", "score": "0.65544856", "text": "def team_merge_request_accepted_shown_to_secondary_team(cls, val):\n return cls('team_merge_request_accepted_shown_to_secondary_team', val)", "title": "" }, { "docid": "41a3bef322522dedac92d2226e66799c", "score": "0.6532951", "text": "def is_team_merge_request_auto_canceled(self):\n return self._tag == 'team_merge_request_auto_canceled'", "title": "" }, { "docid": "41a3bef322522dedac92d2226e66799c", "score": "0.6532205", "text": "def is_team_merge_request_auto_canceled(self):\n return self._tag == 'team_merge_request_auto_canceled'", "title": "" }, { "docid": "f4778715a132750b1240bfbc0ceb340b", "score": "0.6529896", "text": "def is_team_merge_to_details(self):\n return self._tag == 'team_merge_to_details'", "title": "" }, { "docid": "a8912c93996cbec727f0c24684ab511a", "score": "0.65111166", "text": "def is_team_merge_request_expired_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_expired_shown_to_primary_team'", "title": "" }, { "docid": "a8912c93996cbec727f0c24684ab511a", "score": "0.65089285", "text": "def is_team_merge_request_expired_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_expired_shown_to_primary_team'", "title": "" }, { "docid": "ad7954d233240fffc1e2d09a800d1fed", "score": "0.64685124", "text": "def is_has_team_shared_dropbox(self):\n return self._tag == 'has_team_shared_dropbox'", "title": "" }, { "docid": "ad7954d233240fffc1e2d09a800d1fed", "score": "0.64685124", "text": "def is_has_team_shared_dropbox(self):\n return self._tag == 'has_team_shared_dropbox'", "title": "" }, { "docid": "ad7954d233240fffc1e2d09a800d1fed", "score": "0.64685124", "text": "def is_has_team_shared_dropbox(self):\n return self._tag == 'has_team_shared_dropbox'", "title": "" }, { "docid": "cbea7293f7cfd9645f33965865f11caa", "score": "0.645409", "text": "def is_team_merge_request_reminder_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_reminder_shown_to_primary_team'", "title": "" }, { "docid": "cbea7293f7cfd9645f33965865f11caa", "score": "0.64524734", "text": "def is_team_merge_request_reminder_shown_to_primary_team(self):\n return self._tag == 'team_merge_request_reminder_shown_to_primary_team'", "title": "" }, { "docid": "b20918bd319e5db6dc070b2f8585f009", "score": "0.64453363", "text": "def is_team_merge_request_reminder(self):\n return self._tag == 'team_merge_request_reminder'", "title": "" }, { "docid": "b20918bd319e5db6dc070b2f8585f009", "score": "0.6443169", "text": "def is_team_merge_request_reminder(self):\n return self._tag == 'team_merge_request_reminder'", "title": "" }, { "docid": "c712fddbdaae7e2b063552973e5eb800", "score": "0.64238095", "text": "def is_team(self):\n return self._tag == 'team'", "title": "" }, { "docid": "c712fddbdaae7e2b063552973e5eb800", "score": "0.64238095", "text": "def is_team(self):\n return self._tag == 'team'", "title": "" }, { "docid": "c712fddbdaae7e2b063552973e5eb800", "score": "0.64238095", "text": "def is_team(self):\n return self._tag == 'team'", "title": "" }, { "docid": "252c255cf06bbb5bb73405de1f4f0a52", "score": "0.6405349", "text": "def is_team_merge_request_canceled(self):\n return self._tag == 'team_merge_request_canceled'", "title": "" }, { "docid": "252c255cf06bbb5bb73405de1f4f0a52", "score": "0.64052224", "text": "def is_team_merge_request_canceled(self):\n return self._tag == 'team_merge_request_canceled'", "title": "" }, { "docid": "7added4a4ad012b88c077f7fd64aee29", "score": "0.6395934", "text": "def is_team_merge_request_auto_canceled_details(self):\n return self._tag == 'team_merge_request_auto_canceled_details'", "title": "" }, { "docid": "6a6f86cd648d1a4764ec2d3b9ac541bf", "score": "0.63720894", "text": "def team_merge_request_sent_shown_to_secondary_team(cls, val):\n return cls('team_merge_request_sent_shown_to_secondary_team', val)", "title": "" }, { "docid": "2016c6b9f552de4e10d43fd7f81fd37f", "score": "0.6309902", "text": "def is_moved_to_another_team(self):\n return self._tag == 'moved_to_another_team'", "title": "" }, { "docid": "17d792dfb4e894e3d6935f941c456f1a", "score": "0.6296702", "text": "def is_team_only(self):\n return self._tag == 'team_only'", "title": "" }, { "docid": "17d792dfb4e894e3d6935f941c456f1a", "score": "0.6296702", "text": "def is_team_only(self):\n return self._tag == 'team_only'", "title": "" }, { "docid": "1eaabe37a1f6940ed80ff892c5a19545", "score": "0.627902", "text": "def is_domain_invites_request_to_join_team(self):\n return self._tag == 'domain_invites_request_to_join_team'", "title": "" }, { "docid": "1eaabe37a1f6940ed80ff892c5a19545", "score": "0.6278177", "text": "def is_domain_invites_request_to_join_team(self):\n return self._tag == 'domain_invites_request_to_join_team'", "title": "" }, { "docid": "6059c6847d79bdd381cbcf6fed2d9106", "score": "0.62406987", "text": "def is_only_team(self):\n return self._tag == 'only_team'", "title": "" }, { "docid": "558f5fcdf4807db136a883f8733d6e70", "score": "0.621138", "text": "def is_team_merge_request_canceled_details(self):\n return self._tag == 'team_merge_request_canceled_details'", "title": "" }, { "docid": "7dc904e1e153b5ad433bf6f4f2e7b6e1", "score": "0.6203967", "text": "def is_organization_team(self):\n return self._tag == 'organization_team'", "title": "" }, { "docid": "809e099085e49d750085f450343efa6f", "score": "0.6173801", "text": "def is_team_merge_request_revoked(self):\n return self._tag == 'team_merge_request_revoked'", "title": "" }, { "docid": "809e099085e49d750085f450343efa6f", "score": "0.6172417", "text": "def is_team_merge_request_revoked(self):\n return self._tag == 'team_merge_request_revoked'", "title": "" }, { "docid": "67d81c660f27c06e77a34d260bdba588", "score": "0.6154349", "text": "def is_team_merge_request_expired_shown_to_primary_team_details(self):\n return self._tag == 'team_merge_request_expired_shown_to_primary_team_details'", "title": "" }, { "docid": "815880816582de994848fff3d6118c5d", "score": "0.61230737", "text": "def team_merge_request_accepted_shown_to_secondary_team_details(cls, val):\n return cls('team_merge_request_accepted_shown_to_secondary_team_details', val)", "title": "" }, { "docid": "25a544408ce3aa35f8a11dcd761a121c", "score": "0.6098332", "text": "def is_team_shared_dropbox_error(self):\n return self._tag == 'team_shared_dropbox_error'", "title": "" }, { "docid": "e6e3c2b77c7d3a6095a38643c6fedb81", "score": "0.60709023", "text": "def is_moved_from_another_team(self):\n return self._tag == 'moved_from_another_team'", "title": "" }, { "docid": "852352a9099bd6c80b6713a67d19923d", "score": "0.6057956", "text": "def get_team_merge_request_accepted_shown_to_primary_team(self):\n if not self.is_team_merge_request_accepted_shown_to_primary_team():\n raise AttributeError(\"tag 'team_merge_request_accepted_shown_to_primary_team' not set\")\n return self._value", "title": "" }, { "docid": "5c7e5644a44811f95f5938fe6e6776b1", "score": "0.6053285", "text": "def get_team_merge_request_sent_shown_to_primary_team(self):\n if not self.is_team_merge_request_sent_shown_to_primary_team():\n raise AttributeError(\"tag 'team_merge_request_sent_shown_to_primary_team' not set\")\n return self._value", "title": "" }, { "docid": "18c5360e480dd7ad1d15fee3cbd3636a", "score": "0.60381657", "text": "def is_sf_team_join_from_oob_link(self):\n return self._tag == 'sf_team_join_from_oob_link'", "title": "" }, { "docid": "e56dc6f4e67f611d8825ed8ac93049c6", "score": "0.60380954", "text": "def is_has_team_selective_sync(self):\n return self._tag == 'has_team_selective_sync'", "title": "" }, { "docid": "e56dc6f4e67f611d8825ed8ac93049c6", "score": "0.60380954", "text": "def is_has_team_selective_sync(self):\n return self._tag == 'has_team_selective_sync'", "title": "" }, { "docid": "e56dc6f4e67f611d8825ed8ac93049c6", "score": "0.60380954", "text": "def is_has_team_selective_sync(self):\n return self._tag == 'has_team_selective_sync'", "title": "" }, { "docid": "18c5360e480dd7ad1d15fee3cbd3636a", "score": "0.60369015", "text": "def is_sf_team_join_from_oob_link(self):\n return self._tag == 'sf_team_join_from_oob_link'", "title": "" }, { "docid": "8e05fe05a22e285442dc424d60f3695a", "score": "0.6035767", "text": "def is_team_merge_request_reminder_shown_to_primary_team_details(self):\n return self._tag == 'team_merge_request_reminder_shown_to_primary_team_details'", "title": "" }, { "docid": "0a58fe62671ee23c43732bfab747bbb1", "score": "0.6009942", "text": "def is_domain_invites_request_to_join_team_details(self):\n return self._tag == 'domain_invites_request_to_join_team_details'", "title": "" }, { "docid": "8c6ae8d4cfdecd58ce8f07ce4c5c0e22", "score": "0.59917295", "text": "def get_team_merge_request_reminder_shown_to_secondary_team(self):\n if not self.is_team_merge_request_reminder_shown_to_secondary_team():\n raise AttributeError(\"tag 'team_merge_request_reminder_shown_to_secondary_team' not set\")\n return self._value", "title": "" }, { "docid": "bc4bebad585344f04a8d34c9497f7d81", "score": "0.5982951", "text": "def is_pull_request_merged(pull_request):\n return pull_request.merged_at is not None", "title": "" }, { "docid": "1260d150805d9238f9feb7339a47e964", "score": "0.5982094", "text": "def is_from_team_only(self):\n return self._tag == 'from_team_only'", "title": "" }, { "docid": "1260d150805d9238f9feb7339a47e964", "score": "0.5982094", "text": "def is_from_team_only(self):\n return self._tag == 'from_team_only'", "title": "" }, { "docid": "f83028be8b4e3c1b182eb02cc780ded1", "score": "0.597736", "text": "def is_team_merge_request_reminder_details(self):\n return self._tag == 'team_merge_request_reminder_details'", "title": "" }, { "docid": "d80ecba8d9c1bde8000eafc2fa8deaf5", "score": "0.59333724", "text": "def is_user_already_on_team(self):\n return self._tag == 'user_already_on_team'", "title": "" }, { "docid": "6262091604b6b303225fc9734ea725c1", "score": "0.59304434", "text": "def get_team_merge_request_expired_shown_to_secondary_team(self):\n if not self.is_team_merge_request_expired_shown_to_secondary_team():\n raise AttributeError(\"tag 'team_merge_request_expired_shown_to_secondary_team' not set\")\n return self._value", "title": "" } ]
6d95649681b571edb5fbe7eca9d04f63
The Downloader class is responsible for downloading packages and it's dependencies.
[ { "docid": "fb9c173552dd7a86c779c9213f9eb5d5", "score": "0.0", "text": "def __init__(self, packages, out_dir=\"./out\"):\n self.packages = packages\n self.out_dir = out_dir", "title": "" } ]
[ { "docid": "3182200d6d9fd7aa36eccd72373d2d6a", "score": "0.7099037", "text": "def downloader(self):", "title": "" }, { "docid": "eeabcea7046da8a48556000edd2aaf7e", "score": "0.6681995", "text": "def download_packages(\n self, package_names, dependencies_to_exclude=[], version=None\n ):\n if not os.path.isdir(self.out_dir):\n logger.info(f\"Creating output directory {self.out_dir}\")\n os.mkdir(self.out_dir)\n\n logger.info(f\"Gathering dependencies for {package_names}\")\n\n downloaded_packages = []\n\n for (name, package) in self.gather_dependencies(\n package_names, dependencies_to_exclude\n ).items():\n package_file = os.path.join(\n self.out_dir, os.path.basename(package.file_name())\n )\n package_url = package.download_url()\n\n logger.info(f\"Downloading package {name} at {package_url}\")\n\n with tqdm(\n unit=\"B\",\n unit_scale=True,\n unit_divisor=1024,\n miniters=1,\n desc=f\"Downloading {name}\",\n ) as t:\n\n def hook(b=1, bsize=1, tsize=None):\n if tsize is not None:\n t.total = tsize\n t.update(b * bsize - t.n)\n\n if self._has_cache_entry(package_url):\n with self._retreive_from_cache(package_url) as pkg_file:\n with open(package_file, \"wb\") as dst_file:\n shutil.copyfileobj(pkg_file, dst_file)\n else:\n urllib.request.urlretrieve(package_url, package_file, hook)\n self._store_in_cache(package_url, package_file)\n\n downloaded_packages.append(DebianFile(package, package_file))\n\n return downloaded_packages", "title": "" }, { "docid": "585e5c5595a91d8a1bad9010b435454f", "score": "0.656493", "text": "def download(self):\n raise NotImplementedError(\"Downloader.download must be implemented\")", "title": "" }, { "docid": "9fa19b50bd777d7651947cd85fbf8294", "score": "0.6512227", "text": "def download(self):\n \n if os.path.isdir(self.dir):\n self.download_dir()\n elif self.svn:\n self.download_svn()\n elif self.source:\n self.download_source()", "title": "" }, { "docid": "38464a1737279a2e5f1862584512c55b", "score": "0.64326113", "text": "def download_packages(self):\n return self._download_packages", "title": "" }, { "docid": "e766b8f23df07cc23945404e61233129", "score": "0.6248259", "text": "def do_download(self):\n raise NotImplementedError", "title": "" }, { "docid": "93690026c4923dfc48d58ee7236ed53f", "score": "0.623517", "text": "def run(self):\n\n self.__download(self.url, self.download_path)", "title": "" }, { "docid": "d7254c0e57b17d5655769b2de75272c6", "score": "0.61072516", "text": "def download(self):\n\n if self._check_exists():\n return\n\n # download file\n download_url(self.urls[self.dataset_name], self.root, self.file_name)\n\n print('Done!')", "title": "" }, { "docid": "eec8a03848dbc40cf2903834c4152073", "score": "0.6015168", "text": "def main():\n fetcher = Fetcher()\n fetcher.query_available_releases()\n fetcher.download_all_releases_data()\n fetcher.save()", "title": "" }, { "docid": "d3b4bd7fbb11900f01b0567c24099a08", "score": "0.6010219", "text": "def download(self):\n pass", "title": "" }, { "docid": "d3b4bd7fbb11900f01b0567c24099a08", "score": "0.6010219", "text": "def download(self):\n pass", "title": "" }, { "docid": "1fbd35a7623df9e687e8263e1b9b1438", "score": "0.6003048", "text": "def download_start(self):\n mainDownloadThread = Thread(target=self.download)\n mainDownloadThread.start()", "title": "" }, { "docid": "566b558d87b16c3263bb77d65a72c81a", "score": "0.59891945", "text": "def download(self) -> None:\n\n if self._check_exists():\n return\n\n os.makedirs(self.root, exist_ok=True)\n\n filename = self.download_url.rpartition('/')[2]\n download_and_extract_archive(self.download_url,\n download_root=self.folder,\n filename=filename, md5=None)\n print('Done!')", "title": "" }, { "docid": "d22b34242d23c6a4d606f6ebc156df4a", "score": "0.59700775", "text": "def download(self) -> None:\n if self._check_exists():\n return\n for resource, extract in self.resources:\n if extract:\n download_and_extract_archive(resource, self.root, remove_finished=True)\n else:\n download_url(resource, self.root)", "title": "" }, { "docid": "1da31d8c20b65ece5e8161328ded7d83", "score": "0.594602", "text": "def download(self):\r\n pass", "title": "" }, { "docid": "a4a4712caac0a84103bd41b6ef6a1633", "score": "0.59343", "text": "def do_download(self):\n self.logger.debug(\"Called do_download() but it was not implemented\")\n raise NotImplementedError", "title": "" }, { "docid": "caf282776d71a273798bce6bbceb2fb5", "score": "0.5924527", "text": "def __download(self, url, download_path):\n # Make the directory in case it doesn't exist\n if not os.path.exists(download_path):\n os.mkdir(download_path)\n\n # Delete all existing files in the directory\n self.__clean_path(download_path)\n\n # Download the data\n r = requests.get(url)\n\n # Write the file\n with open(os.path.join(download_path, \"lahman.zip\"), \"wb\") as f:\n f.write(r.content)\n\n # Unzip the file\n with zipfile.ZipFile(os.path.join(download_path,\n \"lahman.zip\"),\n \"r\") as zip_ref:\n zip_ref.extractall(download_path)\n\n # Delete the zipped folder\n os.remove(os.path.join(download_path, \"lahman.zip\"))\n\n # Get subdir folder, it's the only folder in download_path\n sub_dir = os.path.join(download_path, os.listdir(download_path)[0])\n\n # Move the actual csvs to the download path\n for file in os.listdir(os.path.join(sub_dir, \"core\")):\n source = os.path.join(sub_dir, \"core\", file)\n destination = os.path.join(download_path, file)\n shutil.move(source, destination)\n\n # Delete the subdir\n shutil.rmtree(sub_dir)", "title": "" }, { "docid": "ae3452143c88c753c20e7c011606e9d5", "score": "0.59174615", "text": "def run_downloader(selects, url, dlpath, **kwargs):\n print \"Downloading\", url, \"to\", dlpath\n try:\n isos = ThreddsCollector(url, selects=selects, **kwargs).run()\n XmlDownloader.run(isos, dlpath)\n except Exception as e:\n print >>sys.stderr, \"Problem with\", url, \"ex:\", e", "title": "" }, { "docid": "5ab2ce713a2c70caa69dd615a9639cc8", "score": "0.5895813", "text": "def _Download( self ):\n self._DownloadPipe += PackageUtil.DownloadFile( \"https://github.com/LaurentGomila/SFML/tarball/\" + self._TarName )\n return", "title": "" }, { "docid": "773e47dc28428df91a879cf50fc88378", "score": "0.5889451", "text": "def download():\n pass", "title": "" }, { "docid": "c70dede344ce32de05fbb47713e14a90", "score": "0.58845705", "text": "def run(self):\n\n # Get URLs and paths\n urls = self._get_urls()\n paths = self._get_paths(urls)\n # Download all files with multiprocessing\n self.download_mp(download_file, [urls, paths])\n # Extract data to the tsv_path\n self._extract_data()", "title": "" }, { "docid": "0779460903ca529d521267d0aa53d70f", "score": "0.58495075", "text": "def startdownload(self):\n try:\n utils.checkDiskFull()\n reqjson = json.loads(request.body)\n package = str(reqjson['package'])\n packageloc = str(reqjson['packageloc'])\n skipProp = asbool(reqjson['skipProp']) if 'skipProp' in reqjson else configutil.getConfigAsBool('download_skip_prop')\n \n LOG.info('Request received for StartDownload %s' % packageloc)\n appGlobal = config['pylons.app_globals']\n downloadThread = None\n\n cat = 'DIST_SD' + packageloc\n\n if not downloadThread:\n LOG.info('Starting a new StartDownload Thread %s' % packageloc)\n downloadThread = DownloadThread(appGlobal.threadMgr, package, packageloc, category = [cat], skipProp = skipProp)\n downloadThread.setMergeOnFound(True)\n self.injectJobCtx(downloadThread)\n downloadThread.start()\n downloadThread.threadMgrEvent.wait()\n\n return statusResult(request, response, downloadThread, controller = self)\n except AgentException as excep:\n return errorResult(request, response, error = excep.getCode(), errorMsg = excep.getMsg(), controller = self)\n except Exception as excp:\n errorMsg = 'Exception downloading %s - traceback %s' % (str(excp), traceback.format_exc(2))\n return errorResult(request, response, error = Errors.UNKNOWN_ERROR, errorMsg = errorMsg, controller = self)", "title": "" }, { "docid": "fc24be724bcb60c33b6dd4e26021f5f6", "score": "0.5847761", "text": "def download(self):\n Logger.info(\"Downloading {} parking data\".format(self.name))\n zfile = download_progress(\n self.url,\n \"quebec_latest.zip\",\n CONFIG['DOWNLOAD_DIRECTORY']\n )\n\n Logger.info(\"Unzipping\")\n with zipfile.ZipFile(zfile) as zip:\n self.filename = os.path.join(CONFIG['DOWNLOAD_DIRECTORY'], [\n name for name in zip.namelist()\n if name.lower().endswith('.shp')\n ][0])\n zip.extractall(CONFIG['DOWNLOAD_DIRECTORY'])\n\n Logger.info(\"Downloading {} paid parking data\".format(self.name))\n zfile = download_progress(\n self.url_payant,\n \"quebec_paid_latest.zip\",\n CONFIG['DOWNLOAD_DIRECTORY']\n )\n\n Logger.info(\"Unzipping\")\n with zipfile.ZipFile(zfile) as zip:\n self.filename_payant = os.path.join(CONFIG['DOWNLOAD_DIRECTORY'], [\n name for name in zip.namelist()\n if name.lower().endswith('.shp')\n ][0])\n zip.extractall(CONFIG['DOWNLOAD_DIRECTORY'])", "title": "" }, { "docid": "72ce92a3954badc198e21a5c6bbdd9b7", "score": "0.5838515", "text": "def download(self):\n from six.moves import urllib\n import gzip\n\n if self._check_exists():\n return\n\n # download files\n try:\n os.makedirs(os.path.join(self.root, self.raw_folder))\n os.makedirs(os.path.join(self.root, self.processed_folder))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n for url in self.urls:\n print('Downloading ' + url)\n data = urllib.request.urlopen(url)\n filename = url.rpartition('/')[2]\n file_path = os.path.join(self.root, self.raw_folder, filename)\n with open(file_path, 'wb') as f:\n f.write(data.read())\n with open(file_path.replace('.gz', ''), 'wb') as out_f, \\\n gzip.GzipFile(file_path) as zip_f:\n out_f.write(zip_f.read())\n os.unlink(file_path)\n\n # process and save as torch files\n print('Processing...')\n\n training_set = (\n read_image_file(os.path.join(self.root, self.raw_folder, 'train-images-idx3-ubyte')),\n read_label_file(os.path.join(self.root, self.raw_folder, 'train-labels-idx1-ubyte'))\n )\n test_set = (\n read_image_file(os.path.join(self.root, self.raw_folder, 't10k-images-idx3-ubyte')),\n read_label_file(os.path.join(self.root, self.raw_folder, 't10k-labels-idx1-ubyte'))\n )\n with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n print('Done!')", "title": "" }, { "docid": "eb1c9329a2ce93ee3b13999f00462033", "score": "0.5824006", "text": "def download(self):\n\n self.start_reports()\n self.start_notices()\n\n self.download_scans()\n self.download_report(\"CSV reports\", self.reports_location)\n self.download_report(\"Notices file\", self.notices_location)", "title": "" }, { "docid": "0c824a52575205313d4b95594834ab43", "score": "0.5756304", "text": "def download(self) -> None:\n\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n\n # download files\n for filename, md5 in self.resources:\n for mirror in self.mirrors:\n url = f\"{mirror}{filename}\"\n try:\n print(f\"Downloading {url}\")\n download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)\n except URLError as error:\n print(f\"Failed to download (trying next):\\n{error}\")\n continue\n finally:\n print()\n break\n else:\n raise RuntimeError(f\"Error downloading {filename}\")", "title": "" }, { "docid": "351f6364f4cab9ae563b6101a9461e87", "score": "0.5701088", "text": "def download(self):\n\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url, md5 in self.resources:\n filename = url.rpartition('/')[2]\n download_and_extract_archive(url, download_root=self.raw_folder,\n extract_root=self.processed_folder,\n filename=filename, md5=md5)\n\n print('Done!')", "title": "" }, { "docid": "ccfad23164a4f03c2dfd46cd40ced24f", "score": "0.5657364", "text": "def download(self):\n\t\tif IS_WINDOWS:\n\t\t\ttry:\n\t\t\t\tfiles = self.info[\"files\"][\"win\"]\n\t\t\texcept KeyError:\n\t\t\t\tif self.isSteam:\n\t\t\t\t\tfiles = self.info[\"files\"][\"win-steam\"]\n\t\t\t\telse:\n\t\t\t\t\tfiles = self.info[\"files\"][\"win-mg\"]\n\t\telse:\n\t\t\ttry:\n\t\t\t\tfiles = self.info[\"files\"][\"unix\"]\n\t\t\texcept KeyError:\n\t\t\t\tif self.isSteam:\n\t\t\t\t\tfiles = self.info[\"files\"][\"unix-steam\"]\n\t\t\t\telse:\n\t\t\t\t\tfiles = self.info[\"files\"][\"unix-mg\"]\n\t\ttry:\n\t\t\tos.mkdir(self.downloadDir)\n\t\texcept OSError:\n\t\t\tpass\n\t\tfileList = open(\"downloadList.txt\", \"w\")\n\t\tfor file in files:\n\t\t\tfileList.write(file + \"\\n\")\n\t\tfileList.close()\n\n\t\taria(downloadDir=self.downloadDir, inputFile='downloadList.txt')\n\n\t\tos.remove(\"downloadList.txt\")", "title": "" }, { "docid": "c786fc2c133976fcc76344fe9b7d9816", "score": "0.5655293", "text": "def download(self):\n download_extract_urls(\n urls=self.urls,\n save_dir=self.data_path,\n extract_data=self.extract_data,\n verbose=self.verbose\n )", "title": "" }, { "docid": "f4dc8c4196315d08b0317978763821e6", "score": "0.5654777", "text": "def download_url_list(self):\n # create a thread pool and give them a queue\n for thread_number in range(self.num_threads):\n the_thread = Downloader(thread_number, self.queue, self.out_queue)\n the_thread.setDaemon(True)\n the_thread.start()\n\n # give the queue some data\n for episode in self.episodes:\n self.queue.put(episode)\n\n while self.queue.unfinished_tasks or self.out_queue.unfinished_tasks:\n if self.out_queue.empty():\n time.sleep(1)\n else:\n (task_id, current_size, episode) = self.out_queue.get(True, 1)\n\n # if total length is 0, there was an error\n if episode.error_msg:\n self.failed_files.append(episode)\n # update our UI\n self.status.update(task_id, current_size, None)\n else:\n if current_size == episode.size:\n # if length read is total length - save name\n self.successful_files.append(episode)\n self.status.increment_success()\n # update our UI\n self.status.update(task_id, current_size, episode)\n\n self.out_queue.task_done()\n\n # wait for the queue to finish\n self.queue.join()\n self.status.finish(self.failed_files)", "title": "" }, { "docid": "5e775c6e51b76196ad364d35ddadc7f5", "score": "0.5645473", "text": "def download(self):\n file_path = os.path.realpath(f\"{self.download_path}{os.sep}{self.filename}\")\n if not self.downloaded: # If file is not downloaded\n logging.info(\"Starting binary download\")\n\n # Setting up necessary download variables\n file_stream = requests.get(self.url, stream=True) # The open http request for the file\n chunk_size = 1024 # Setting the progress bar chunk size to measure in kb\n\n # Setting up the download progress bar\n progress_bar = tqdm(total=self.size, unit='iB', unit_scale=True)\n progress_bar.set_description(f\"Download progress for {self.filename}\")\n\n # Write the incoming data stream to a file and update progress bar as it downloads\n with open(file_path, 'wb') as download_file: \n for chunk in file_stream.iter_content(chunk_size): \n if chunk:\n progress_bar.update(len(chunk))\n download_file.write(chunk)\n progress_bar.close()\n self.downloaded = True", "title": "" }, { "docid": "b6e53202e583b17ca122956c09aae365", "score": "0.5639747", "text": "def _download(self, api_key: Optional[str] = None) -> None:\n if self._check_integrity():\n print(\"Files already downloaded\")\n return\n\n download_radiant_mlhub_dataset(self.dataset_id, self.root, api_key)\n archive_path = os.path.join(self.root, self.foldername + \".tar.gz\")\n if (\n self.checksum\n and check_integrity(archive_path, self.md5)\n or not self.checksum\n ):\n extract_archive(archive_path)\n else:\n raise RuntimeError(\"Dataset corrupted\")", "title": "" }, { "docid": "d5a6e8c1acd6093d25e217058d801204", "score": "0.5638644", "text": "def __downloadRequires(self):\n for urlStr in self.__require:\n if not self.__manager.requireScripts([urlStr]):\n downloader = GreaseMonkeyDownloader(\n QUrl(urlStr),\n self.__manager,\n GreaseMonkeyDownloader.DownloadRequireScript)\n downloader.finished.connect(\n lambda: self.__requireDownloaded(downloader))\n downloader.error.connect(\n lambda: self.__requireDownloadError(downloader))\n self.__downloaders.append(downloader)", "title": "" }, { "docid": "131d00436f75644be38d22a2b25e2f65", "score": "0.5632744", "text": "def do_download(self):\n class_name = type(self).__name__.lower()\n self.logger.debug(\"Downloading %s %r\", class_name, self.name)\n self.logger.info(\"%s links are unparseable.\", class_name.title())", "title": "" }, { "docid": "de0aff456750f1079ba0921e648eab49", "score": "0.56186384", "text": "def __init__(self, download_dir: str = None):\n # Regex pattern to macht DOI https://ihateregex.io/expr/doi/.\n self.doi_pattern: Pattern[str] = re.compile(r'^(10\\.\\d{4,5}/[\\S]+[^;,.\\s])$')\n # Regex pattern to match download link on Sci-Hub.\n self.url_pattern: Pattern[str] = re.compile(r\"https://.*.pdf\\?download=true\")\n # TODO: get valid urls from ilovescihub.wordpress.com during initialisation\n self.scihub_urls: list[str] = [\"http://sci-hub.ee\", \"https://sci-hub.ee\", \"https://sci-hub.ru\",\n \"https://sci-hub.se\", \"https://sci-hub.st\", \"http://sci-hub.ai\",\n \"https://sci-hub.ai\", \"https://sci-hub.cat\"]\n self.crossref_url: str = 'https://api.crossref.org/works'\n self.url: str = self.get_valid_url()\n self.download_dir = download_dir", "title": "" }, { "docid": "0897023d06bfea7cbbf9b89eaeedd757", "score": "0.5617639", "text": "def download(self):\n\n if self._check_exists():\n return\n\n makedir_exist_ok(self.raw_folder)\n makedir_exist_ok(self.processed_folder)\n\n # download files\n for url in self.urls:\n filename = self.__class__.__name__.lower() + '.csv'\n download_url(url, root=self.raw_folder, filename=filename)\n\n # process and save as torch files\n print('Processing...')\n filepath = os.path.join(self.raw_folder, filename)\n df = self.process(filepath)\n\n # Get dataset statistics\n mean_per_day = df.groupby(df.index.dayofyear).mean()\n std_per_day = df.groupby(df.index.dayofyear).std()\n\n # Split into training and testing\n train, test = {}, {}\n for variable, column in self.variables.items():\n df_variable = df[[column, 'dayofyear', 'year']].dropna()\n train_var = df_variable.groupby('dayofyear', as_index=False).apply(\n lambda x: x.sample(min(self.num_years_train, len(x)))\n ).droplevel(0)\n test_var = df_variable.drop(train_var.index)\n train[variable] = train_var\n test[variable] = test_var\n\n # Save data\n training_set = (\n train,\n mean_per_day,\n std_per_day\n )\n test_set = (\n test,\n mean_per_day,\n std_per_day\n )\n with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n print('Done!')", "title": "" }, { "docid": "f2576ec38ea37969b754562c09ea49e1", "score": "0.56143343", "text": "def download(self, irc, msg, args, name, optlist):\n # Parse and check parameters\n version = None\n repo = 'http://packages.supybot.fr.cr/'\n for key, value in optlist:\n if key == 'version': version = value\n elif key == 'repo': repo = value\n if __builtins__['any']([x in repo for x in ('?', '&')]):\n # Supybot rewrites any() in commands.py\n irc.error(_('Bad formed url.'))\n return\n selectedPackage = None\n\n # Get server's index\n try:\n index = json.load(utils.web.getUrlFd(repo))\n except ValueError:\n irc.error(_('Server\\'s JSON is bad formed.'))\n return\n\n # Crawl the available packages list\n for package in index['packages']:\n if not package['name'] == name:\n continue\n if version is None and (\n selectedPackage == None or\n compareVersions(selectedPackage['version'],\n package['version']) == LOWER):\n # If not version given, and [no selected package\n # or selected package is older than this one]\n selectedPackage = package\n elif package['version'] == version:\n selectedPackage = package\n if selectedPackage is None:\n irc.error(_('No packages matches your query.'))\n return\n\n # Determines the package's real URL\n # TODO: handle relative URL starting with /\n # FIXME: URL ending with /foobar.txt\n packageUrl = selectedPackage['download-url']\n if packageUrl.startswith('./'):\n packageUrl = repo\n if not packageUrl.endswith('/'):\n packageUrl += '/'\n packageUrl += selectedPackage['download-url']\n\n # Write the package to the disk\n directory = conf.supybot.directories.data()\n assert os.access(directory, os.W_OK)\n path = os.path.join(directory, '%s.tar' % name)\n try:\n os.unlink(path)\n except OSError:\n # Does not exist\n pass\n with open(path, 'ab') as file_:\n try:\n file_.write(utils.web.getUrlFd(packageUrl).read())\n except utils.web.Error as e:\n irc.reply(e.args[0])\n return\n irc.replySuccess()", "title": "" }, { "docid": "d3606a4e735aa800135bc03d487481bb", "score": "0.55973667", "text": "def test_downloader():\n from tradingsystem.data_handler import yf_downloader as yf\n # Valid case\n res = yf.main('WMT', '2006-02-12', '2016-02-12')\n assert len(res) > 0", "title": "" }, { "docid": "8010f1264b21a9c940522e365cdc417e", "score": "0.55626816", "text": "def auto_download():\n print('auto downloading from url files')\n for server_path in glob.iglob(join(SERVER_DIR, \"*.url\")):\n download_from_file(server_path)\n for plugin_path in glob.iglob(join(PLUGIN_DIR, \"*.url\")):\n download_from_file(plugin_path)", "title": "" }, { "docid": "01b258e01b903caaf5b980235c5d9fe0", "score": "0.5560525", "text": "def download(self):\n print('RV selected nodes: ', self.snodes)\n self.dl2.create_toget(self.snodes)\n self.nodes_to_download = self.snodes\n # self.dl2.getdata(self.config['datadir'])\n # self.dl2.get_data_chunks(self.config['datadir'])\n\n self.refresh()\n self.dl2.start_download()\n self.download_event = Clock.schedule_once(self.check_download_status, 0.1)\n\n for node in self.nodes_to_download:\n self.data[node]['download_progress'] = '0'", "title": "" }, { "docid": "53a0cda291af8cc9bc05cda34fd9c7c3", "score": "0.55593956", "text": "def fetch(self):\n\n home = self.home()\n if not path.exists(home):\n os.makedirs(home)\n\n # download archives\n archive_filenames = []\n for key, archive in self.ARCHIVES.iteritems():\n url = archive['url']\n sha1 = archive['sha1']\n basename = path.basename(url)\n archive_filename = path.join(home, basename)\n if not path.exists(archive_filename):\n download(url, archive_filename, sha1=sha1)\n archive_filenames += [(archive_filename, sha1)]\n self.ARCHIVES[key]['archive_filename'] = archive_filename\n\n # extract them\n if not path.exists(path.join(home, 'CarData')):\n for archive in self.ARCHIVES.itervalues():\n url = archive['url']\n sha1 = archive['sha1']\n archive_filename = archive['archive_filename']\n extract(archive_filename, home, sha1=sha1, verbose=True)\n # move around stuff if needed\n if 'moves' in archive:\n for move in archive['moves']:\n src = self.home(move['source'])\n dst = self.home(move['destination'])\n # We can't use shutil here since the destination folder\n # may already exist. Fortunately the distutils can help\n # us here (see standard library).\n dir_util.copy_tree(src, dst)\n dir_util.remove_tree(src)", "title": "" }, { "docid": "15c4d7a77be134ffe9925fba7b0c1b69", "score": "0.5557783", "text": "def download_data():\n sys.path.append(\"../\")\n import src.download_data as download_data\n\n return download_data", "title": "" }, { "docid": "56fd719d8f3fce9f5df1965466d3ac8b", "score": "0.555271", "text": "def cli_download_to_directory(self, args):\n links_retriever = LinksRetriever(args.url)\n\n if args.dir is None:\n thread_dir = os.path.join(self.DEFAULT_DOWNLOAD_DIR,\n links_retriever.board_initials,\n links_retriever.thread_id)\n args.dir = thread_dir\n\n downloader = BatchDownloader(links_retriever, args.dir)\n self.downloader_start(downloader)\n self.recent_threads_add(downloader)\n self.save_config()\n\n return downloader.destination_folder", "title": "" }, { "docid": "d996807c7531b44ae2d5d3d18d84e304", "score": "0.5545034", "text": "def main():\r\n\r\n args = parseArgs()\r\n completed_classes = []\r\n\r\n mkdir_p(PATH_CACHE, 0o700)\r\n if args.clear_cache:\r\n shutil.rmtree(PATH_CACHE)\r\n\r\n for class_name in args.class_names:\r\n try:\r\n logging.info('Downloading class: %s', class_name)\r\n if download_class(args, class_name):\r\n completed_classes.append(class_name)\r\n except requests.exceptions.HTTPError as e:\r\n logging.error('HTTPError %s', e)\r\n except ClassNotFound as cnf:\r\n logging.error('Could not find class: %s', cnf)\r\n except AuthenticationFailed as af:\r\n logging.error('Could not authenticate: %s', af)\r\n\r\n if completed_classes:\r\n logging.info(\r\n \"Classes which appear completed: \" + \" \".join(completed_classes))", "title": "" }, { "docid": "2c5b8a943f9ef61c380961195efa9cd6", "score": "0.55401725", "text": "def _start_download(self, url, filename):\r\n raise NotImplementedError(\"Subclasses should implement this\")", "title": "" }, { "docid": "15746ccc025b09281faf6b248ed2137a", "score": "0.55206937", "text": "def Download(self):\n config = self._config\n url = self._url\n filename = self._archive_file\n download_dir = config.download_dir\n download_path = os.path.join(download_dir, filename)\n if os.path.exists(download_path) and not config.force:\n print '%s already exists - skipping download from %s' % (filename, url)\n return\n\n print 'Downloading %s from %s: ' % (filename, url)\n try:\n urllib.urlretrieve(url, download_path, _DownloadStatusHook)\n except IOError:\n print ('\\nERROR:\\n'\n 'Could not download %s.\\n' % url\n + ('It could be that this particular version is no longer'\n ' available.\\n'\n 'Check the site where the url is coming from.\\n'\n 'If there is a more recent version then:\\n'\n ' 1) Edit this script to change the old url to the new one.\\n'\n ' 2) Run the script again.\\n'\n ' It will pick up where it left off, using the new url.'\n '\\n'))\n sys.exit(1)", "title": "" }, { "docid": "f1c91a63c02540711ffa768a2c570190", "score": "0.55096304", "text": "def download_and_prepare_dataset(cls):\n raise NotImplementedError", "title": "" }, { "docid": "29745b948a0b018c73fe67eb55d7d0b3", "score": "0.5502268", "text": "def _download_and_extract(self):\n create_dirs(self._download_dir)\n file_path = os.path.join(self._download_dir, \"cifar-100-python.tar.gz\")\n url = \"https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz\"\n download_file(\n url,\n file_path,\n overwrite=False,\n progress_title=\"downloading CIFAR-100\",\n )\n\n create_dirs(self._extract_dir)\n with tarfile.open(file_path, \"r:gz\") as tar:\n tar.extractall(path=self._extract_dir)", "title": "" }, { "docid": "b4b8fd0a3d2e09011ff6c9ab4c50d7c5", "score": "0.54825354", "text": "def __download(self, dataset_name, dir):\n if os.path.isdir('datasets') is False:\n os.mkdir('datasets')\n\n db = {'female': 'https://aimldatasets.s3.ap-northeast-2.amazonaws.com/datasets/female.zip',\n 'male': 'https://aimldatasets.s3.ap-northeast-2.amazonaws.com/datasets/male.zip'}\n remote_url = db[dataset_name]\n file_name = remote_url.split('/')[-1]\n\n if os.path.isfile(os.path.join('datasets', file_name)) is False:\n with requests.get(remote_url, stream=True) as r:\n r.raise_for_status()\n print(f'Getting dataset of \\\"{dataset_name}\\\"')\n total_length = int(r.headers.get('content-length'))\n with open(os.path.join(\"datasets\", file_name), 'wb') as f:\n for chunk in r.iter_content(chunk_size=8192):\n f.write(chunk)\n current_length = f.tell()\n sys.stdout.write(\n f'Download progress: {100 * current_length/total_length}% \\r')\n sys.stdout.flush()\n\n if os.path.isdir(os.path.join('datasets', file_name.split('.')[0])) is False:\n sys.stdout.write(f'Extracting...\\n')\n zip_file = zipfile.ZipFile(os.path.join(dir, file_name))\n zip_file.extractall(dir)\n\n if glob.iglob(os.path.join('datasets', file_name.split('.')[0], '*.*')):\n print(f'Spliting dataset of \\\"{dataset_name}\\\"')\n self.__distributor(dataset_name, os.path.join(\n dir, file_name.split('.')[0]))\n\n print(f'OK!')", "title": "" }, { "docid": "8ba0340dad64f787aba539af0f10559d", "score": "0.5475373", "text": "def __init__(self):\r\n self.driver = webdriver.Chrome()\r\n self.driver.get(WHATSAPP_WEB_URL)\r\n self.dl = youtube_dl.YoutubeDL(DOWNLOAD_OPTIONS)\r\n self.download_threads = []", "title": "" }, { "docid": "7f25ddcffb46042a2a5af9e69f6a71a7", "score": "0.5468363", "text": "def download_build(self, name, dst_directory):\n raise NotImplementedError('Child class must implement method.')", "title": "" }, { "docid": "c4f92251d5eba2f0a9baf879947f8031", "score": "0.54531366", "text": "def download_data(self, *args, **kwargs) -> None:\n download_data(*args, **kwargs)", "title": "" }, { "docid": "3d5479578ab7a0bb817f607a4b70e5ca", "score": "0.54523796", "text": "def _download_and_prepare(self, dl_manager, download_config=None):\n return self._tfds_dataset_builder._download_and_prepare( # pylint: disable=protected-access\n dl_manager, download_config)", "title": "" }, { "docid": "64059e116b9193efeef9588d44f8c129", "score": "0.5451488", "text": "def download(self):\n # import essential packages\n import gzip\n import pickle\n\n from six.moves import urllib\n from torchvision import datasets\n\n # check if dataset already exists\n if self._check_exists():\n return\n\n # make data dirs\n try:\n os.makedirs(os.path.join(self.root, self.raw_folder))\n os.makedirs(os.path.join(self.root, self.processed_folder))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n # download pkl files\n logging.info(\"Downloading \" + self.url)\n filename = self.url.rpartition(\"/\")[2]\n file_path = os.path.join(self.root, self.raw_folder, filename)\n if not os.path.exists(file_path.replace(\".gz\", \"\")):\n data = urllib.request.urlopen(self.url)\n with open(file_path, \"wb\") as f:\n f.write(data.read())\n with open(file_path.replace(\".gz\", \"\"), \"wb\") as out_f, gzip.GzipFile(file_path) as zip_f:\n out_f.write(zip_f.read())\n os.unlink(file_path)\n\n # process and save as torch files\n logging.info(\"Processing...\")\n\n # load MNIST-M images from pkl file\n with open(file_path.replace(\".gz\", \"\"), \"rb\") as f:\n mnist_m_data = pickle.load(f, encoding=\"bytes\")\n mnist_m_train_data = torch.ByteTensor(mnist_m_data[b\"train\"])\n mnist_m_test_data = torch.ByteTensor(mnist_m_data[b\"test\"])\n\n # get MNIST labels\n mnist_train_labels = datasets.MNIST(root=self.mnist_root, train=True, download=True).targets\n mnist_test_labels = datasets.MNIST(root=self.mnist_root, train=False, download=True).targets\n\n # save MNIST-M dataset\n training_set = (mnist_m_train_data, mnist_train_labels)\n test_set = (mnist_m_test_data, mnist_test_labels)\n with open(os.path.join(self.root, self.processed_folder, self.training_file), \"wb\") as f:\n torch.save(training_set, f)\n with open(os.path.join(self.root, self.processed_folder, self.test_file), \"wb\") as f:\n torch.save(test_set, f)\n\n logging.info(\"[DONE]\")", "title": "" }, { "docid": "1754b9e3d70cbb11ae5647313f28d25f", "score": "0.5448866", "text": "def __init__(self, docs_dir='docs', offline=False):\n\n # create list of intended bootstrap file locations\n bootstraps = [\n os.path.join(docs_dir, 'templates/bootstrap.bundle.min.js'),\n os.path.join(docs_dir, 'templates/bootstrap.min.css'),\n os.path.join(docs_dir, 'templates/jquery.min.js')\n ]\n # check if any of required bootstrap files do not exist\n if not any([os.path.exists(loc) for loc in bootstraps]):\n # if any do not exist, we download all\n print(\"Not all required Bootstrap files found. \"\n \"They will be downloaded to \"\n f\"'{os.path.join(docs_dir, 'templates')}'.\")\n # download bootstrap files\n bootstrap_download(docs_dir)\n \n # compiled regex for finding import libraries\n #self.libs_re = re.compile(r\"\")\n # create compiled regex for finding classes and all that they contain (final character must be removed though)\n self.class_re = re.compile(r\"(?sm)class [\\w\\d_]+:.*(^.)\")\n # if we don't find any with above, we can try with this for class at end of file\n self.class_end_re = re.compile(r\"(?sm)class [\\w\\d_]+:.*\")", "title": "" }, { "docid": "e3cac8084a7abc195727f06bb20ac377", "score": "0.5448727", "text": "def do_download(self, msg, url):\n with self.parallel_downloads_sema:\n p = subprocess.Popen([\"plowdown\", \"-o\",\n self.download_directory, url],\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n p.wait()\n stdout, stderr = p.communicate()\n if p.returncode != 0 or not stdout:\n reply = \"Error: \" + stderr\n reply += \"Are you sure you pasted a valid link?\"\n else:\n reply = stdout.strip() + \" successfully downloaded.\"\n self.send_simple_reply(msg, reply)\n self.download_queue.task_done()", "title": "" }, { "docid": "7f671fae3d532d6b29a0d83554affc85", "score": "0.54389834", "text": "def download(self):\n self._mk_cd(self.config[\"download_folder_name\"])\n self._mk_cd(\"{0} - {1}\".format(self.artist, self.title))\n self._download_tracks()\n\n if self.config[\"save_or_embed\"] == \"save\":\n self._download_art()\n elif self.config[\"save_or_embed\"] == \"embed\":\n self._embed_art()", "title": "" }, { "docid": "5e63370a82b9835d07807adfcd8d81f6", "score": "0.5423143", "text": "def download(self) -> None:\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n split = self.resources[self.subsets[self.what]]\n\n for url, md5 in split:\n download_and_extract_archive(url, self.raw_folder, md5=md5)", "title": "" }, { "docid": "3502a1187cb6c06bd76678167de14e95", "score": "0.5418685", "text": "def run(self):\r\n\t\ttry:\r\n\t\t\t# Make the match test for the intended pattern within the URI returned resource.\r\n\t\t\tmatch_result = helpers.Utilities.WebUtility.get_link(helpers.BuildConfig.AprUtil.AprUtilConfig.DOWNLOAD_URL,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\thelpers.BuildConfig.AprUtil.AprUtilConfig.DOWNLOAD_PATTERN)\r\n\r\n\t\t\t# Pretty Self Explanatory.\r\n\t\t\t# Check the match for `PATTERN::NOT::FOUND`.\r\n\t\t\tif match_result is None:\r\n\t\t\t\t# Enable logging here and abort.\r\n\t\t\t\t# We need to update the URI configurations.\r\n\t\t\t\traise IOError\r\n\r\n\t\t\t# Logging a comment\r\n\t\t\tdownload_apr_util_logger.info('Found URI Pattern: {' + match_result.group(0) + '} from Referrer URI {' +\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\thelpers.BuildConfig.AprUtil.AprUtilConfig.DOWNLOAD_URL + '}')\r\n\r\n\t\t\t# Get the \"*.tar.gz\" package from the `APR-UTIL-ARCHIVES` repository.\r\n\t\t\tARCHIVE_TAR_URL = helpers.BuildConfig.AprUtil.AprUtilConfig.ARCHIVE_URL + match_result.group(0)\r\n\r\n\t\t\t# Logging a comment\r\n\t\t\tdownload_apr_util_logger.info('Tar Download URI constructed: {' + ARCHIVE_TAR_URL + '}')\r\n\r\n\t\t\t# Prepare the request to download the file.\r\n\t\t\ttar_request_object = Request(ARCHIVE_TAR_URL)\r\n\t\t\turl_tar_file_name = ARCHIVE_TAR_URL\r\n\r\n\t\t\t# Logging a comment\r\n\t\t\tdownload_apr_util_logger.info('{Request Prepared} Handing Off the Request Object to the *WebUtility* Service')\r\n\r\n\t\t\t# Invoke the Download action.\r\n\t\t\t# Pass in the URI file-name to use as the download's base-name and\r\n\t\t\t# the `REQUEST` object to initiate the request.\r\n\t\t\t# Provide the `TAR` file-name to the DownloadManager Module.\r\n\t\t\tself.tar_file_name = helpers.Utilities.WebUtility.download_tar_binary(url_tar_file_name, tar_request_object)\r\n\t\texcept (URLError, HTTPError, ContentTooShortError, UnicodeDecodeError, IOError, OSError) as \\\r\n\t\t\t\tdownloadAprUtil_aprUtilDownloaderThread_error:\r\n\t\t\t# Put logging below.\r\n\t\t\tdownload_apr_util_logger.error('AprUtil Download Failed: ' + str(downloadAprUtil_aprUtilDownloaderThread_error))\r\n\t\t\t# Put the exception object in the Exception Queue to enable\r\n\t\t\t# the TaskManager (or DownloadManager) to take care\r\n\t\t\t# of the exception.\r\n\t\t\tself.exception_stacktrace_queue.put(downloadAprUtil_aprUtilDownloaderThread_error)\r\n\t\telse:\r\n\t\t\t# Notify Download Complete.\r\n\t\t\tself.download_complete = True", "title": "" }, { "docid": "f2e8ce5f1add2b344fc9f494f3921843", "score": "0.54117", "text": "def download(self, uncompress=True):\n url = \"http://busco.ezlab.org/v2/datasets\"\n for filename in self.filenames:\n basename = filename + \".tar.gz\"\n target = self.base + \"/\" + basename\n print(url + \"/\" + basename)\n wget(url + \"/\" + basename, target)\n # TODO untar datasets and cleanup the tar.gz \n if uncompress:\n execute(\"tar xvfz %s -C %s\" % (target, self.base))\n execute(\"rm -f %s\" % ( target))", "title": "" }, { "docid": "5d8f8122037e52a01c5931987be3f74f", "score": "0.5405006", "text": "def download(self, e):\n self.library.download(self.download_gauge)\n self.load_library()", "title": "" }, { "docid": "19b32cb0ab5c84f69ea7a5c99a0d133f", "score": "0.54014415", "text": "def download(self):\n\n if self._check_raw_exists():\n return\n\n # download files\n try:\n os.makedirs(os.path.join(self.root, self.raw_folder))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n download_url(os.path.join(self.url, self.filename),\n root=os.path.join(self.root, self.raw_folder),\n filename=self.filename,\n md5=None)\n if not self._check_raw_exists():\n raise RuntimeError(\"Unable to find downloaded file. Please try again.\")\n else:\n print(\"Download finished.\")", "title": "" }, { "docid": "fe8da134c7fac8c7f33eb1d50b7b65a3", "score": "0.5390181", "text": "def download(self):\n self.file_link = self.yaml_data['data_file']['location']['url']\n if 'zenodo.org' in str(self.file_link):\n logging.debug(f' zenodo link found: {self.file_link}.')\n self.file_link = trim_zenodo_link(self.file_link)\n logging.debug(f' link has been trimmed: {self.file_link}.')\n else:\n pass\n\n if 'input_name' in self.yaml_data['data_file'].keys():\n self.infile = self.yaml_data['data_file']['input_name']\n elif 'dropbox.com' in self.file_link:\n self.infile = self.file_link.split('/')[-1].replace('?dl=0', '')\n self.infile = self.infile.replace('.tsv.bgz', '.tsv.gz')\n else:\n self.infile = self.file_link.split('/')[-1]\n\n self.submitted_dir = 'submitted_data/'\n pathlib.Path(self.submitted_dir).mkdir(exist_ok=True)\n self.sub_datatype_dir = self.submitted_dir + str(self.data_type) + '/'\n pathlib.Path(self.sub_datatype_dir).mkdir(exist_ok=True)\n\n self.hash = self.yaml_data['data_file']['location']['md5']\n self.downloaded_file = self.sub_datatype_dir + self.infile\n self.fileset = [self.downloaded_file,]\n\n if os.path.isfile(self.sub_datatype_dir + self.infile):\n logging.info(f' file exists in {self.sub_datatype_dir}')\n return\n\n if 'ftp://' in self.file_link:\n pass\n else:\n try:\n verify_weblink(self.file_link)\n except:\n logging.error(f' file unavailable')\n sys.exit(1)\n\n if not os.path.isfile(self.sub_datatype_dir + self.infile):\n logging.info(f' starting download')\n # Identifiable partial strings in links are used\n # to prompt different download functions\n\n # Google drive file links contain hashed IDs\n # Input file name is passed to name the donwloaded file\n if 'drive.google.com' in self.file_link:\n download_gdrive_file(\n self.file_link,\n self.sub_datatype_dir,\n self.infile\n )\n # Dropbox links can be used with 'wget' via requests\n # to download files\n elif 'dropbox.com' in self.file_link:\n download_dbox_file(\n self.file_link,\n self.sub_datatype_dir,\n self.infile\n )\n # ftp server files are easier to download via calling\n # 'wget' in the shell\n elif 'ftp://' in self.file_link:\n download_ftp_file(\n self.file_link,\n self.sub_datatype_dir\n )\n else:\n # All other downloads are currently using 'requests'\n download_file(\n self.file_link,\n self.sub_datatype_dir,\n )\n else:\n logging.info(f' file found in {self.sub_datatype_dir}')\n\n self.hash = self.yaml_data['data_file']['location']['md5']\n self.downloaded_file = self.sub_datatype_dir + self.infile\n self.fileset = [self.downloaded_file,]", "title": "" }, { "docid": "b7598bb4553c33a0c40e16bd11f42a6f", "score": "0.5380785", "text": "def download_and_unzip(self):\n\n logger = logging.getLogger()\n if os.path.exists(self.UNZIPPED_FILE_PATH):\n # unzipped path exists\n logger.info('Unzipped dataset exists')\n return\n\n if not os.path.exists(self.ZIP_FILE_PATH):\n print \"downloading\"\n # donwload zip file\n # print('Download dataset');\n logger.info('Downloading Caltech dataset zip file')\n curl_cmd = 'curl {} -o {}'.format(self.ZIP_FILE_URL, self.ZIP_FILE_PATH)\n\n # downloading zip file\n if os.system(curl_cmd) != 0:\n logger.info('Failure downloading zipped file')\n sys.exit(1)\n return\n\n logger.info('Unzipping the dataset')\n print \"unzipping\"\n create_folder = 'mkdir ' + self.UNZIPPED_FILE_PATH\n os.system(create_folder)\n unzip_cmd = 'tar xf {} -C {}'.format(self.ZIP_FILE_PATH, os.path.dirname(self.UNZIPPED_FILE_PATH))\n if os.system(unzip_cmd) != 0:\n logger.info('Failure unzipping file')\n sys.exit(1)\n logger.info('Unzipped successfully')", "title": "" }, { "docid": "e77973e259072d36a7e9f3f96092cb4e", "score": "0.5355434", "text": "def main():\n if platform.system()=='Windows':\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n \n loop = asyncio.new_event_loop()\n #asyncio.set_event_loop(loop)\n asyncio.set_event_loop(asyncio.new_event_loop())\n sem = asyncio.Semaphore(MAX_CONCURRENT_DOWNLOADS)\n\n # do the preprocessing (parsing the site for urls and extracting the filenames\n urls_raw = loop.run_until_complete(parseresponse(DROPBOX_URL))\n urls_download = buildurls(urls_raw)\n\n # start the async download manager\n tasks = [download(url, filename, DESTINATION_FOLDER, sem) for filename, url in urls_download]\n loop.run_until_complete(asyncprogressbar(tasks))\n\n loop.stop()\n loop.run_forever()\n loop.close()", "title": "" }, { "docid": "1b2f923724da01bb13cec8c97e0c2420", "score": "0.53525174", "text": "def _download_and_prepare(self, dl_manager) -> None:\n\n if self.model_checkpoint.lower() in CHECKPOINT_URLS:\n checkpoint_name = self.model_checkpoint.lower()\n else:\n raise KeyError(\n f\"{self.model_checkpoint} checkpoint not found. You should supply the name of a model checkpoint for \"\n f\"BARTScore in {CHECKPOINT_URLS.keys()}\"\n )\n model_checkpoint = CHECKPOINT_URLS[checkpoint_name][\"model_checkpoint\"]\n\n if self.model_weights:\n if self.model_weights.lower() in CHECKPOINT_URLS[checkpoint_name][\"model_weights\"]:\n weights_name = self.model_weights.lower()\n else:\n raise KeyError(\n f\"Weights named '{self.model_weights}' not found. \"\n f\"You should supply the name of a model weight for BARTScore in \"\n + str(list(CHECKPOINT_URLS[checkpoint_name][\"model_weights\"].keys()))\n )\n model_path = CHECKPOINT_URLS[checkpoint_name][\"model_weights\"][weights_name]\n else:\n model_path = None\n\n bartscore_source = (\n \"https://raw.githubusercontent.com/neulab/BARTScore/47b8341854e1b8be965b65480ce236b0c2f7543b/bart_score.py\"\n )\n self.external_module_path = dl_manager.download(bartscore_source)\n BARTScorer = self._get_external_resource(\"bart_score\", attr=\"BARTScorer\")\n\n self.scorer = BARTScorer(device=self.device, max_length=self.max_length, checkpoint=model_checkpoint)\n\n if model_path is not None:\n model_dest = dl_manager.download(model_path)\n self.scorer.load(path=model_dest)", "title": "" }, { "docid": "afc389cc6859cb32c0fd4c400715fbac", "score": "0.5347881", "text": "def downloadProjects(self):\n pass", "title": "" }, { "docid": "21aa8c0cc36e88d8f48a3d4864c31422", "score": "0.53420675", "text": "def download(self, url: Optional[str], dst_path: str) -> None:\n\n def progress(down: float, block: float, size: float) -> None:\n \"\"\"Show download progress.\n\n Args:\n down (float): Downloaded size.\n block (float): Block size.\n size (float): Total size of the file.\n \"\"\"\n\n percent = min(100. * down * block / size, 100)\n file_name = osp.basename(dst_path)\n print(f'\\rDownloading {file_name}: {percent:.2f}%', end='')\n\n if url is None and not osp.exists(dst_path):\n raise FileNotFoundError(\n 'Direct url is not available for this dataset.'\n ' Please manually download the required files'\n ' following the guides.')\n\n if url.startswith('magnet'):\n raise NotImplementedError('Please use any BitTorrent client to '\n 'download the following magnet link to '\n f'{osp.abspath(dst_path)} and '\n f'try again.\\nLink: {url}')\n\n print('Downloading...')\n print(f'URL: {url}')\n print(f'Destination: {osp.abspath(dst_path)}')\n print('If you stuck here for a long time, please check your network, '\n 'or manually download the file to the destination path and '\n 'run the script again.')\n request.urlretrieve(url, dst_path, progress)\n print('')", "title": "" }, { "docid": "ee5659aa403ab29da147d9f77fc84717", "score": "0.53205013", "text": "def download(self) -> None:\n\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n\n download_and_extract_archive(self.url, download_root=self.raw_folder, md5=self.md5)\n gzip_folder = os.path.join(self.raw_folder, \"gzip\")\n for gzip_file in os.listdir(gzip_folder):\n if gzip_file.endswith(\".gz\"):\n extract_archive(os.path.join(gzip_folder, gzip_file), self.raw_folder)\n shutil.rmtree(gzip_folder)", "title": "" }, { "docid": "37089f46196d001c0b69c71a08bd24b7", "score": "0.5319719", "text": "def downloadData(self):\n pass", "title": "" }, { "docid": "964ccaa4ffee3058508f3a67f3ee0bf3", "score": "0.5317115", "text": "def download_and_extract_java_dep(logger, towhere, url):\n download(logger, towhere, url)\n\n # Extract path\n\n # W:100,10:download_and_extract_gz: Unused variable 'ext'\n # pylint: disable = W0612\n path = os.path.dirname(towhere)\n fname = os.path.basename(towhere)\n base, ext = os.path.splitext(fname)\n folder = os.path.join(path, base) \n\n # XXX: Argh do this again with less coffee more sleep\n folder = folder.replace(\".tar\", \"\")\n\n if towhere.endswith(\".gz\") and not os.path.exists(folder):\n logger.info(\"Extracting tar archive: %s to %s\" % (fname, folder))\n\n os.makedirs(folder)\n\n tar = tarfile.open(towhere)\n tar.extractall(path=folder)\n tar.close()", "title": "" }, { "docid": "62118a6b0dc0ff7897f0bdf5e09f795f", "score": "0.53123814", "text": "def download_test_data(urls=urls):\n # nufeb_tools directory\n cp_dir = Path.home().joinpath(\".nufeb_tools\")\n cp_dir.mkdir(exist_ok=True)\n data_dir = cp_dir.joinpath(\"data\")\n data_dir.mkdir(exist_ok=True)\n # TODO Add progress bar\n for url in urls:\n parts = urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(data_dir, filename)\n if not os.path.exists(cached_file):\n local_filename, headers = urlretrieve(url, cached_file)\n tar = tarfile.open(local_filename, \"r\")\n tar.extractall(path=data_dir)\n tar.close()\n Path(local_filename).unlink()", "title": "" }, { "docid": "1595aee73456dda0695dbd3e8c15f9f7", "score": "0.52937603", "text": "def download(self):\n assert isinstance(self.ticker, str)\n assert len(self.ticker) > 0\n if self.exchange is None:\n url = self.URL.format(self.ticker)\n else:\n assert self.exchange in self.Exchanges\n url = self.URL.format(r\"{}:{}\".format(self.exchange,\n self.ticker))\n\n # load data from web\n try:\n response = request.urlopen(url)\n html = response.read()\n except:\n print(\"Warning: download failed for {}\".format(self.ticker))\n else:\n self.done = True\n self.__parseResult(html)", "title": "" }, { "docid": "4118e5bb23546eaf3920b96a5c3050c0", "score": "0.5292512", "text": "def download(self):\n if not self.nodata:\n self.download_file(self.remote_data, self.local_data)\n if not self.nohead:\n self.download_file(self.remote_head, self.local_head)", "title": "" }, { "docid": "b88e50b4847b53f63fdc5a47dbbf651c", "score": "0.52910084", "text": "def run(self):\n\n u = urllib2.urlopen(self.url)\n dest = None\n scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)\n filename = os.path.basename(path)\n print(\"file name---------\",filename)\n if not filename:\n filename = 'downloaded.file'\n if dest:\n filename = os.path.join(dest, filename)\n\n with open(filename, 'wb') as f:\n meta = u.info()\n\n meta_func = meta.getheaders if hasattr(meta, 'getheaders') else meta.get_all # get all meta data from url\n meta_length = meta_func(\"Content-Length\") # find out the lenght of header\n file_size = None\n if meta_length:\n file_size = int(meta_length[0]) # total file size\n print(\"Downloading: {0} Bytes: {1}\".format(self.url, file_size))\n\n file_size_dl = 0\n block_sz = 8192 # divide block size(how may block you want to dowlonad file at the time)\n\n while True:\n buffer = u.read(block_sz)\n if not buffer:\n break\n\n file_size_dl += len(buffer)\n f.write(buffer) # store the file to file directory or remote directory\n\n status = \"{0:16}\".format(file_size_dl)\n start = time.time()\n if file_size:\n status += \" [{0:6.2f}%]\".format(file_size_dl * 100 / file_size) # measure status every time\n percentage_complete = \" {0:6.2f}\".format(file_size_dl * 100 / file_size) # complate percentage\n\n estimate_time_to_complate = str(datetime.timedelta(seconds=((time.time()-start) * (100-float(percentage_complete))))) # calculate estimate time for downloading\n\n # save details to database\n \"\"\"\n store file data based on file name\n \"\"\"\n exe = \"\"\"select file_name from downloadFileData where file_name = '{0}';\"\"\".format(filename)\n cur.execute(exe)\n response = cur.fetchall()\n\n #if len(response) != 0:\n reamaning_file_size = file_size - file_size_dl\n if len(response) == 0:\n # insert data to database first time\n exe = \"\"\"INSERT INTO downloadFileData (id, total_file_size, download_file_size, remaning_file_size, status, eta, file_name) VALUES ({0}, '{1} bytes', '{2} bytes', '{3} bytes', '{4}', '{5}', '{6}');\"\"\".format(self.id,file_size, file_size_dl, reamaning_file_size, 'downloading', estimate_time_to_complate, filename)\n\n cur.execute(exe)\n conn.commit()\n elif filename in response:\n # update data every time(every buffer downloading)\n exe =\"\"\"UPDATE downloadFileData SET download_file_size='{0} bytes',remaning_file_size='{1} bytes',status='{2}',eta='{3}' WHERE file_name = '{4}';\"\"\".format(file_size_dl, reamaning_file_size, \"downloading\",estimate_time_to_complate, filename)\n cur.execute(exe)\n conn.commit()\n elif file_size == file_size_dl:\n # finish dwonload then update complete status\n exe = \"\"\"UPDATE downloadFileData SET download_file_size='{0} bytes',remaning_file_size='{1} bytes',status='{2}',eta='{3}' WHERE file_name = '{4}';\"\"\".format(\n file_size_dl, reamaning_file_size, \"finished\", estimate_time_to_complate, filename)\n cur.execute(exe)\n conn.commit()\n print(\"every time status\", \"{0:16}\".format(file_size_dl),\" [{0:6.2f}%]\".format(file_size_dl * 100 / file_size), file_size_dl/ 1024, \"Kb\")\n status += chr(13)\n print(status, end=\"\")\n print( )\n\n return filename", "title": "" }, { "docid": "28a382c9e868cb7810b9778e30188987", "score": "0.52826524", "text": "def downloader(link, out_dir, dl_base=\"http://www.nber.org/cps/\"):\n content = urllib2.urlopen(dl_base + link)\n with open(out_dir + link, 'w') as f:\n f.write(content.read())", "title": "" }, { "docid": "303ff55cc2049acdcdb8fa6478a0e708", "score": "0.5269468", "text": "def download(self, dn):\n if dn not in self.datasets.keys():\n raise ValueError('unknown dataset {}'.format(dn))\n\n destdir = os.path.join(get_cachepath(), dn)\n if not os.path.exists(destdir):\n os.makedirs(destdir)\n\n for url in self.datasets[dn]['urls']:\n fn = os.path.join(destdir, url[url.rfind('/') + 1:])\n content = get(url).content\n with open(fn, 'wb+') as f:\n f.write(content)", "title": "" }, { "docid": "97648f7835f245dcd5266b96af7d9f90", "score": "0.52681506", "text": "def _download_and_extract(self):\n create_dirs(self._download_dir)\n file_path = os.path.join(self._download_dir, \"cifar-10-python.tar.gz\")\n url = \"https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"\n download_file(\n url,\n file_path,\n overwrite=False,\n progress_title=\"downloading CIFAR-10\",\n )\n\n create_dirs(self._extract_dir)\n with tarfile.open(file_path, \"r:gz\") as tar:\n tar.extractall(path=self._extract_dir)", "title": "" }, { "docid": "e6e0e8bc5c6189c40673917a79306029", "score": "0.5265332", "text": "def download(self, dl_dir=None):\n additional_args = []\n if dl_dir:\n self.download_dir = dl_dir\n\n exist = self._get_existance_state()\n download_file = self.filename_tmp\n\n if exist == self._EXIST_FULL:\n out('File \\'%s\\' already exists..skipping')\n elif exist == self._EXIST_PART:\n out('File partially exists, continuing')\n additional_args = ['-c']\n\n cmd = ['wget', self.url, '-O', download_file]\n cmd.extend(additional_args)\n out('calling: %s' % ' '.join(cmd))\n status = call_and_output(cmd)\n\n full_path_file = os.path.join(self.download_dir, self.filename)\n full_path_tmp = os.path.join(self.download_dir, self.filename_tmp)\n try:\n os.rename(full_path_tmp, full_path_file)\n except:\n #TODO\n pass\n \n return status", "title": "" }, { "docid": "25b6095383234aea55b53cd45ffff85f", "score": "0.5251077", "text": "def download(self, body, follow_backoff=False):\n url = self.build_url('downloads')\n result = self.post_request(url, body, follow_backoff)\n return result['response']", "title": "" }, { "docid": "5b37ac327d4fc4bf8571bee3758968de", "score": "0.5248473", "text": "def download(self, urls_info):\n\n # We may want to add a pack kwarg such to keep all downloads in the same\n # directory\n\n def _download(url_info):\n \"\"\"Function applied for each individual download.\"\"\"\n # Convert string to UrlInfo object\n url_info = to_url_info(url_info)\n\n # Download the url\n return self._process_and_cache_uri(\n uri=url_info.url,\n uri_info=url_info,\n process_trial_fn=self._download,\n )\n\n # Run the download function on each of the urls\n return _parallel_run(\n _download,\n urls_info,\n max_workers=_NUM_PARALLEL_DOWNLOADS,\n )", "title": "" }, { "docid": "43b50da47c3680aebabcdc4031931e5e", "score": "0.5244276", "text": "def start_downloader(self, searchTermsTokens):\n self.downloader = Downloader(searchTermsTokens, self.directory)\n self.index_of_song_being_downloaded = 0\n self.index_of_song_being_watched = 0", "title": "" }, { "docid": "98d2dac3312fc1debb00ab33925bc82f", "score": "0.5243188", "text": "def finish_up(self):\n self._join_parts()\n stats = self.get_stats()\n stat_string = 'Downloaded %.2f kB in %.2f s at %.2f kB/s' % (\n stats.kB_downloaded, stats.time_taken, stats.download_speed)\n logger.info('Finished Downloading {}: {}'.format(self.file_name,\n stat_string))", "title": "" }, { "docid": "b7eef4152c97c0cf554558b9f3588ccf", "score": "0.523735", "text": "def download(self):\n print \"download\", \"undefined\"", "title": "" }, { "docid": "6502f5a5aed674783034679a8876f067", "score": "0.5235032", "text": "def download_files():\n\n base_link = r'http://www.nada.kth.se/cvap/actions/'\n base_file = os.getcwd() + r'/'\n files = ['walking', 'jogging', 'running',\n 'boxing', 'handwaving', 'handclapping']\n\n for file in files:\n\n link = base_link + file + '.zip'\n file_name = base_file + file + '.zip'\n\n if not os.path.exists(file_name):\n\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1, desc=link.split('/')[-1]) as t:\n urllib.request.urlretrieve(\n link, file_name, reporthook=t.update_to, data=None)\n\n success = _extract_archive(file_name, path=r'Data/' + file)\n\n if success:\n print('-----------------------------{}------'.format('-' * len(file)))\n print('| Successfully extracted --> {}.zip |'.format(file))\n print('-----------------------------{}------'.format('-' * len(file)))\n os.remove(file_name)\n else:\n print('\\nUnsuccessful extraction --> {}.zip\\n'.format(file))", "title": "" }, { "docid": "e3fb11290ab0c837fd09efdbbd564a95", "score": "0.5230343", "text": "def download(self, pkg_todownload):\n\t\tsuccess_downloads = []\n\t\tfailed_downloads = []\n\t\tunavail_downloads = []\n\n\t\t# case where no filenames have been provided\n\t\tfor index, pkg in enumerate(pkg_todownload):\n\t\t\tif isinstance(pkg, str):\n\t\t\t\tpkg_todownload[index] = [pkg, None]\n\t\t\t# remove whitespaces before and after package name\n\t\t\tpkg_todownload[index][0] = pkg_todownload[index][0].strip('\\r\\n ')\n\n\t\t# BulkDetails requires only one HTTP request\n\t\t# Get APK info from store\n\t\tdetails = list()\n\t\tfor pkg in pkg_todownload:\n\t\t\ttry:\n\t\t\t\tdetail = self.api.details(pkg[0])\n\t\t\t\tdetails.append(detail)\n\n\t\t\texcept RequestError as request_error:\n\t\t\t\tfailed_downloads.append((pkg, request_error))\n\n\t\tif any([d is None for d in details]):\n\t\t\tlogger.info(\"Token has expired while downloading. Retrieving a new one.\")\n\t\t\tself.refresh_token()\n\t\t\tdetails = self.api.bulkDetails([pkg[0] for pkg in pkg_todownload])\n\t\tposition = 1\n\t\tfor detail, item in zip(details, pkg_todownload):\n\t\t\tpackagename, filename = item\n\n\t\t\tif filename is None:\n\t\t\t\tif self.append_version:\n\t\t\t\t\tfilename = detail['docId']+ \"-v.\" + detail['versionString'] + \".apk\"\n\t\t\t\telse:\n\t\t\t\t\tfilename = detail['docId']+ \".apk\"\n\n\t\t\tlogger.info(\"%s / %s %s\", position, len(pkg_todownload), packagename)\n\n\t\t\t# Check for download folder\n\t\t\tdownload_folder = self.download_folder\n\t\t\tif not os.path.isdir(download_folder):\n\t\t\t\tos.makedirs(download_folder, exist_ok=True)\n\n\t\t\t# Download\n\t\t\ttry:\n\t\t\t\tif detail['offer'][0]['checkoutFlowRequired']:\n\t\t\t\t\tmethod = self.api.delivery\n\t\t\t\telse:\n\t\t\t\t\tmethod = self.api.download\n\t\t\t\tdata_iter = method(packagename,\n\t\t\t\t\t\t\t\t expansion_files=self.addfiles_enable)\n\t\t\t\tsuccess_downloads.append(packagename)\n\t\t\texcept IndexError as exc:\n\t\t\t\tlogger.error(\"Error while downloading %s : this package does not exist, \"\n\t\t\t\t\t\t\t \"try to search it via --search before\",\n\t\t\t\t\t\t\t packagename)\n\t\t\t\tunavail_downloads.append((item, exc))\n\t\t\texcept Exception as exc:\n\t\t\t\tlogger.error(\"Error while downloading %s : %s\", packagename, exc)\n\t\t\t\tfailed_downloads.append((item, exc))\n\t\t\telse:\n\t\t\t\tfilepath = os.path.join(download_folder, filename)\n\n\t\t\t\t#if file exists, continue\n\t\t\t\tif self.append_version and os.path.isfile(filepath):\n\t\t\t\t\tlogger.info(\"File %s already exists, skipping.\", filename)\n\t\t\t\t\tposition += 1\n\t\t\t\t\tcontinue\n\n\t\t\t\tadditional_data = data_iter['additionalData']\n\t\t\t\ttotal_size = int(data_iter['file']['total_size'])\n\t\t\t\tchunk_size = int(data_iter['file']['chunk_size'])\n\t\t\t\ttry:\n\t\t\t\t\twith open(filepath, \"wb\") as fbuffer:\n\t\t\t\t\t\tbar = util.progressbar(expected_size=total_size, hide=not self.progress_bar)\n\t\t\t\t\t\tfor index, chunk in enumerate(data_iter['file']['data']):\n\t\t\t\t\t\t\tfbuffer.write(chunk)\n\t\t\t\t\t\t\tbar.show(index * chunk_size)\n\t\t\t\t\t\tbar.done()\n\t\t\t\t\tif additional_data:\n\t\t\t\t\t\tfor obb_file in additional_data:\n\t\t\t\t\t\t\tobb_filename = \"%s.%s.%s.obb\" % (obb_file[\"type\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t obb_file[\"versionCode\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t data_iter[\"docId\"])\n\t\t\t\t\t\t\tobb_filename = os.path.join(download_folder, obb_filename)\n\t\t\t\t\t\t\tobb_total_size = int(obb_file['file']['total_size'])\n\t\t\t\t\t\t\tobb_chunk_size = int(obb_file['file']['chunk_size'])\n\t\t\t\t\t\t\twith open(obb_filename, \"wb\") as fbuffer:\n\t\t\t\t\t\t\t\tbar = util.progressbar(expected_size=obb_total_size, hide=not self.progress_bar)\n\t\t\t\t\t\t\t\tfor index, chunk in enumerate(obb_file[\"file\"][\"data\"]):\n\t\t\t\t\t\t\t\t\tfbuffer.write(chunk)\n\t\t\t\t\t\t\t\t\tbar.show(index * obb_chunk_size)\n\t\t\t\t\t\t\t\tbar.done()\n\t\t\t\texcept IOError as exc:\n\t\t\t\t\tlogger.error(\"Error while writing %s : %s\", packagename, exc)\n\t\t\t\t\tfailed_downloads.append((item, exc))\n\t\t\tposition += 1\n\n\t\tsuccess_items = set(success_downloads)\n\t\tfailed_items = set([item[0] for item, error in failed_downloads])\n\t\tunavail_items = set([item[0] for item, error in unavail_downloads])\n\t\tto_download_items = set([item[0] for item in pkg_todownload])\n\n\t\tif self.logging_enable:\n\t\t\tself.write_logfiles(success_items, failed_items, unavail_items)\n\n\t\tself.print_failed(failed_downloads + unavail_downloads)\n\t\treturn to_download_items - failed_items", "title": "" }, { "docid": "798f3a238080d33601a14264cbf17524", "score": "0.5223877", "text": "def _download(self, uris, \n err='Unable to download data at specified URL',\n **kwargs):\n\n # user specifies a download directory\n if \"directory\" in kwargs:\n download_dir = os.path.expanduser(kwargs[\"directory\"])\n else:\n download_dir = config.get(\"downloads\", \"download_dir\")\n\n # overwrite the existing file if the keyword is present\n if \"overwrite\" in kwargs:\n overwrite = kwargs[\"overwrite\"]\n else:\n overwrite = False\n\n if not isinstance(uris, list):\n uris = [uris]\n\n filepaths = []\n\n for uri in uris:\n _filename = os.path.basename(uri).split(\"?\")[0]\n\n # If the file is not already there, download it\n filepath = os.path.join(download_dir, _filename)\n\n if not(os.path.isfile(filepath)) or (overwrite and \n os.path.isfile(filepath)):\n try:\n response = urllib2.urlopen(uri)\n except (urllib2.HTTPError, urllib2.URLError):\n raise urllib2.URLError(err)\n with open(filepath, 'wb') as fp:\n shutil.copyfileobj(response, fp)\n else:\n print \"Using existing file rather than downloading, use overwrite=True to override.\"\n\n filepaths.append(filepath)\n\n return filepaths", "title": "" }, { "docid": "18b926b19ff6d683cc687c7dbd8929dd", "score": "0.52193415", "text": "def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "title": "" }, { "docid": "074e9cd4c82044d5ca69d78a14e39ffd", "score": "0.5216844", "text": "def __init__(self, text_dir, db_url, book_url, should_download=False):\n self.text_dir = text_dir\n self.db_url = db_url\n self.book_url = book_url\n self.should_download = should_download\n self.manager = Manager(db_url)\n self.extractor = Extractor(text_dir)", "title": "" }, { "docid": "8dca1c01c4704364638443a328b265b2", "score": "0.52139515", "text": "def download(self, pool):\n self.cache.download_files(self.host, self.files, pool)", "title": "" }, { "docid": "c49d3582f0fedc9b6d8925300f51724a", "score": "0.52121866", "text": "def main():\n args = PARSER.parse_args()\n download(\n outdir=args.outdir,\n youtubedl=args.youtubedl,\n download_record=args.download_record,\n channel_list=args.channel_list,\n logfile=args.logfile,\n )", "title": "" }, { "docid": "5f5a4e184e05794abf770eb182e26adc", "score": "0.5211495", "text": "def download(self, download_dir='blue_plus_data', override=False):\n raise NotImplementedError", "title": "" }, { "docid": "1580aaa807dc1bd7ade15d90dba2a51c", "score": "0.52099186", "text": "def get_downloader(session, class_name, args):\r\n\r\n external = {\r\n 'wget': WgetDownloader,\r\n 'curl': CurlDownloader,\r\n 'aria2': Aria2Downloader,\r\n 'axel': AxelDownloader,\r\n }\r\n\r\n for bin, class_ in iteritems(external):\r\n if getattr(args, bin):\r\n return class_(session, bin=getattr(args, bin))\r\n\r\n return NativeDownloader(session)", "title": "" }, { "docid": "10f0602290ccd582043ade03d0e452b9", "score": "0.5205078", "text": "def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.urlretrieve(DATA_URL, filepath, reporthook=_progress)\n print\n statinfo = os.stat(filepath)\n print 'Succesfully downloaded', filename, statinfo.st_size, 'bytes.'\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "title": "" }, { "docid": "b75767f516034d366dc8abdeb3ef6762", "score": "0.5200567", "text": "def download(self):\n if self.download_state == const.NO_DOWNLOAD:\n self.download_state = const.DOWNLOADING\n self.extension = yield proxy.download.download_video(\n uri=self.magnet, title=self.title,\n )\n subtitles_loaded = False\n while self.download_percent != 100:\n self.download_percent = yield proxy.download.get_percent(\n uri=self.magnet,\n )\n if (\n self.exists and not subtitles_loaded\n and self.download_percent > config.preview_minimum / 2\n ):\n self.subtitle.series_path = self.path\n yield proxy.download.download_subtitle(\n subtitle=self.subtitle.as_dict,\n )\n subtitles_loaded = True\n if self.download_percent == const.NO_PERCENT:\n self._make_stopped()\n break\n if self.download_state != const.NO_DOWNLOAD:\n self.download_state = const.DOWNLOAD_FINISHED", "title": "" }, { "docid": "28c0eef93be202ef6bf62db907558ffb", "score": "0.5194038", "text": "def unzip_download(self):\n zipped_name = ''\n download_path = self.get_download_path()\n for current_file in os.listdir(download_path):\n if current_file.endswith('.zip'):\n zipped_name = current_file\n\n # Extract date and time from the dowloaded data folder\n unformated_date = zipped_name.split('_')[2]\n year = unformated_date[0:4]\n month = unformated_date[4:6]\n day = unformated_date[6:8]\n formatted_date = '{}_{}_{}'.format(year, month, day)\n os.mkdir(download_path + formatted_date)\n\n with ZipFile(download_path + zipped_name) as zipped:\n zipped.extractall(download_path + formatted_date)\n\n end = zipped_name.find('.zip')\n os.remove(download_path + zipped_name)\n\n data_folder = '{}.SAFE'.format(zipped_name[:end]) + os.sep\n data_folder_path = (download_path + formatted_date\n + os.sep + data_folder)\n\n for folders in os.listdir(data_folder_path):\n move(data_folder_path + os.sep + folders,\n download_path + formatted_date)", "title": "" }, { "docid": "64c135ee8d82dbf701cb3b5e20af0dc9", "score": "0.519138", "text": "def download_data():\n if not os.path.exists(os.path.join(os.getcwd(), \"tiny-imagenet-200\")):\n if not os.path.exists(os.path.join(os.getcwd(), \"tiny-imagenet-200.zip\")):\n print ('Downloading Flowers data from http://cs231n.stanford.edu/tiny-imagenet-200.zip ...')\n urlretrieve ('http://cs231n.stanford.edu/tiny-imagenet-200.zip', 'tiny-imagenet-200.zip', reporthook)\n print ('\\nExtracting tiny-imagenet-200.zip ...', end='', flush=True)\n zfile = zipfile.ZipFile (os.path.join(os.getcwd(), 'tiny-imagenet-200.zip'), 'r')\n zfile.extractall ('.')\n zfile.close()\n print ('Done')", "title": "" }, { "docid": "fe783ce97af96f31caa94afb5be6a8e5", "score": "0.51903063", "text": "def test_download_routine(self, mock_download):\n download_path = join(settings.TMP_DIR, '4d8fae2e-e840-444a-ab40-9f9a74a60522.tar')\n mock_download.return_value = download_path\n self.create_packages_with_status(Package.DATA_ADDED)\n for _ in range(len(self.aip_uuids)):\n copyfile(join('fixtures', 'binaries', '4d8fae2e-e840-444a-ab40-9f9a74a60522.tar'), download_path)\n msg, count = DownloadRoutine().run()\n self.assertNotEqual(False, msg, \"Packages not downloaded correctly\")\n self.assertEqual(\"Package downloaded.\", msg)\n self.assertEqual(len(listdir(settings.TMP_DIR)), len(self.aip_uuids), \"Wrong number of packages downloaded\")\n self.assertEqual(len(Package.objects.filter(process_status=Package.DOWNLOADED)), len(self.aip_uuids))", "title": "" }, { "docid": "864c5f998ca27c53758f24cb708cb9f4", "score": "0.5189739", "text": "def download(self, data_dir, auth):\n with _requests.get(self._download_url(), stream=True, auth=auth) as req:\n req.raise_for_status()\n artifact_file_name = _fortworth.join(data_dir, self.name() + '.zip')\n with open(artifact_file_name, 'wb') as artifact_file:\n for chunk in req.iter_content(chunk_size=MAX_DOWNLOAD_CHUNK_SIZE):\n artifact_file.write(chunk)\n return self.name()\n return None", "title": "" }, { "docid": "b20b22a8d77646490d3ad7362911c54e", "score": "0.51840854", "text": "def maybe_download_and_extract():\n dest_directory = \"./data\"\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n\n statinfo = os.stat(filepath)\n Prune.log('Successfully downloaded')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "title": "" }, { "docid": "6ae83aaf530faddfe13e0904fc90c01e", "score": "0.51827425", "text": "def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "title": "" } ]
fd005ae43ef0b77114e68de5875f0a27
cards = [Card(), Card()]
[ { "docid": "34b9b0d2a71573cd0482676a13efab1d", "score": "0.0", "text": "def bulk_append_card(self, cards: 'list[Card, ...]'):\n assert type(cards)==list\n for each in cards:\n self.append(each)", "title": "" } ]
[ { "docid": "213766e651ded64ad47c308fc1952569", "score": "0.8249101", "text": "def __init__(self, cards = None):\n if cards:\n self.cards = cards\n else:\n self.cards = []", "title": "" }, { "docid": "4b2167436ba39c61662dae8e230b7a27", "score": "0.7980644", "text": "def __init__(self):\n ## list for Card() object## list for Card() objects.\n self.cards = []", "title": "" }, { "docid": "f408b666e53b04611fe5124db55726e7", "score": "0.77852327", "text": "def __init__(self):\n self.cards = []\n for suit in SUITS:\n for face in FACES:\n self.cards.append(Card(face, suit))\n random.shuffle(self.cards)", "title": "" }, { "docid": "f8b9a6992c32446cc237ce31a2fc19ab", "score": "0.76019", "text": "def __init__(self):\n self.cards = []\n suits = ['Heart', 'Diamond', 'Spade', 'Club']\n ranks = [i for i in range(2, 15)]\n for suit in suits:\n for rank in ranks:\n self.cards.append(Card(suit, rank))", "title": "" }, { "docid": "62f4aad70090e1d820b0c18e36fa4ab4", "score": "0.7421801", "text": "def __init__(self, decks=1):\n self.cards = []\n for i in range(decks):\n self.cards.extend([Card(rank=r, suit=s) for s in Card.suits for r in Card.ranks])", "title": "" }, { "docid": "30cb1773558f3aa09a70c6c0182197fc", "score": "0.7374969", "text": "def __init__(self):\n self.cards = [Card(s, r) for s in Card.SUITES for r in Card.RANKS]", "title": "" }, { "docid": "8773b79658e0bd977fe8408742eca2e5", "score": "0.7359839", "text": "def make_cards(self):\n i = 0\n for item in self.cards_list:\n self.cards.append(Card(i, item[0], item[1], item[2], item[3],\n item[4]))", "title": "" }, { "docid": "22d7a492e169e70d38cb7c3da00ab1f6", "score": "0.73264474", "text": "def __init__(self):\n possibleCards = [Club(), Heart(), Spade(), Diamond()]\n self.stack = []\n for i in range(8):\n self.stack = self.stack + [possibleCards[x] for x in range(len(possibleCards))]", "title": "" }, { "docid": "0e56a19db6f7140c963812103fa56edd", "score": "0.72345006", "text": "def __init__(self):\n self.deck = []\n for suit in self.suits:\n for card in self.cards:\n self.deck.append(card+\" \"+suit)", "title": "" }, { "docid": "c822f91bf87007a78e34897c48e386b6", "score": "0.72238857", "text": "def __init__(self, deck=deck.Deck()):\n self.card_list = []\n self.card_rem_list = []\n self.deck = deck\n\n # Initialize board state with 12 cards\n for i in range(0, 12):\n self.add_card()", "title": "" }, { "docid": "e2a93e663c166efc863d6768faa5c931", "score": "0.72176313", "text": "def __init__(self):\n\t\tfor x in Card.RANK:\n\t\t\tfor y in Card.SUIT:\n\t\t\t\tself.cards.append(x+y)", "title": "" }, { "docid": "47de117b9fdd440d9d9ca80444a8a263", "score": "0.7195225", "text": "def __init__(self):\n self.cards = deque()\n for s in suits:\n for r in ranks:\n self.cards.append(Card(r, s))", "title": "" }, { "docid": "7865613bce46f2f41025cb92d681f521", "score": "0.71666217", "text": "def __init__(self):\n self.cards = []\n self.delt_cards = []\n for suit in ALLSUITS:\n for rank in ALLRANKS:\n card = Card()\n card.assign_rank(rank)\n card.assign_suit(suit)\n self.cards.append(card)", "title": "" }, { "docid": "384c5904bcc4ea28b6b8acccc988d814", "score": "0.7140856", "text": "def __init__(self):\n\n self._cards = []\n self._discard_pile = []", "title": "" }, { "docid": "f3f0a989479de8f1b34e7dc8f0fee358", "score": "0.7079945", "text": "def __init__(self):\r\n # The cards of the deck\r\n self.__cards = []\r\n\r\n # Four possible suits diamonds, hearts, clubs and spades\r\n self.__suits = [\"D\", \"H\", \"C\", \"S\"]\r\n\r\n # Creates the deck\r\n for number in range(1, 14):\r\n\r\n for suit in self.__suits:\r\n self.__cards.append(Card(suit, number))", "title": "" }, { "docid": "a5f67a267a7158f868e056171534e5ae", "score": "0.6983324", "text": "def take(self, cards):\n if isinstance(cards, list):\n for card in cards:\n if isinstance(card, Card):\n self.cards.append(card)", "title": "" }, { "docid": "2c0a9755ae90ec63e33b2272b46f76f3", "score": "0.6971052", "text": "def __init__(self, deck_count=2, empty=True, cur_round = 1):\r\n ranks = [\"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"J\", \"Q\", \"K\"]\r\n suits = ['diamond', 'heart', 'club', 'spade', 'star']\r\n self.deck_count = deck_count\r\n self.cards = []\r\n # initialize cards\r\n if not empty:\r\n for i in range(deck_count):\r\n for suit in suits:\r\n for rank in ranks:\r\n card = Card(suit, rank, cur_round)\r\n self.cards.append(card)\r\n self.cards.append(Card('', \"J1\", cur_round))\r\n self.cards.append(Card('', \"J2\", cur_round))\r\n self.cards.append(Card('', \"J3\", cur_round))\r\n self.shuffle()", "title": "" }, { "docid": "f29c2b4de67a3dd811445455d4cb0f9e", "score": "0.69419676", "text": "def __init__(self, num_decks=1):\n\n self.stack = []\n for i in range(num_decks):\n deck = Deck()\n\n for card in deck.cards:\n self.stack.append(card)\n random.shuffle(self.stack)", "title": "" }, { "docid": "ea8adefce2ccdef371424e6dac2bcdb1", "score": "0.6939173", "text": "def __init__(self):\n self.deck = list(range(1, 14))\n self.current_card = None\n self.last_card = None", "title": "" }, { "docid": "aaeaf27f976efe5736e0899d8daa9f99", "score": "0.69203186", "text": "def add(self, cards):\r\n if (type(cards) != list):\t\r\n cards = [cards]\r\n\r\n shuffle(cards)\r\n\r\n for card in cards:\r\n self.cards.insert(0, card)", "title": "" }, { "docid": "3872b84f89cf9e68cac3c64112badf37", "score": "0.6911686", "text": "def __init__(self):\n self.cards = []\n self.make_cards()\n self.deck_community = Deck_Community()\n self.deck_chance = Deck_Chance()\n self.sort_deck()\n self.shuffle_deck()", "title": "" }, { "docid": "80597493b9296d02a4cdbc6e633612b4", "score": "0.68741536", "text": "def __init__(self, win):\n #the code to create the cards came from class notes \n self._resetCards = []\n self._win = win\n self._cards = []\n for name in ['1', '1', '1', '1', '1', '2', '2', '2', '2', '3', '3', '3',\n '3', '4', '4', '4', '4', '5', '5', '5', '5', '7', '7', '7',\n '7', '8', '8', '8', '8', '10', '10', '10', '10', '11',\n '11', '11', '11', '12', '12', '12', '12', 'sorry',\n 'sorry', 'sorry', 'sorry', 'sorry']:\n c = Card(name + '.png')\n self._cards.append(c)", "title": "" }, { "docid": "f540095792a8b25d594c2cf6ed32de22", "score": "0.6827746", "text": "def __init__(self):\n self._deck_list = []\n for rank in RANKS:\n for suit in SUITS:\n new_card = PlayingCard(rank, suit)\n self._deck_list.append(new_card)\n\n self.shuffle()", "title": "" }, { "docid": "2bb69f011d58982395e36676c3f22b06", "score": "0.6825034", "text": "def generate_cards():\n cards = []\n for color in [\"red\", \"blue\", \"yellow\", \"green\"]:\n for n in range(1, 10):\n for i in range(2):\n cards.append(Card(number=n, symbol=color))\n return cards", "title": "" }, { "docid": "f7ba8aca8e1c45518625bb457a368bad", "score": "0.68245965", "text": "def __init__(self):\n self._deck_list = []\n for rank in RANKS:\n for suit in SUITS:\n playing_card = PlayingCard(rank, suit)\n self._deck_list.append(playing_card)\n self.shuffle()", "title": "" }, { "docid": "97a3b85df56234388d4259e99517c8dd", "score": "0.68217385", "text": "def __init__(self):\n\n\t\t# Keeps track of what cards are in the deck\n\t\tself.cards = []\n\n\t\tfor value in list(range(2,11)) + [\"J\",\"Q\",\"K\",\"A\"]:\n\t\t\tfor suit in [\"Spade\",\"Diamond\",\"Heart\",\"Club\"]:\n\t\t\t\tself.cards.append(Card(str(value),suit))", "title": "" }, { "docid": "0ed505748923c4e2f662c3e6a9826453", "score": "0.6705846", "text": "def __init__(self, win, board):\n self._deck = [] #creates an empty list for the deck\n self._win = win\n self._currentDepth = 0 #sets depth to 0\n self._piece = []\n self._board = board\n self._spaces = []\n\n for value in ['1']:\n for _ in range(20):\n c = Card(value, self)\n self._deck.append(c)\n \n for value in ['2', '3', '4', '5', '7', '8', '10', '11', '12',\\\n 'sorry']:\n for _ in range(16):\n c = Card(value, self)\n self._deck.append(c)\n \n #adds all cards to the deck", "title": "" }, { "docid": "72d8914ac5f1cdfad478b7eda4bde831", "score": "0.6679107", "text": "def cards(self):\n return [x.clone() for x in self._cards]", "title": "" }, { "docid": "c13fbe010764ed9419796927176a805a", "score": "0.6678962", "text": "def create_deck(self):\n for s in [\"Spades\", \"Clubs\", \"Diamonds\", \"Hearts\"]:\n for v in [\n 'Ace', 2, 3, 4, 5, 6, 7, 8, 9, 10, 'Jack', 'Queen', 'King'\n ]:\n self.deck.append(Card(s, v))", "title": "" }, { "docid": "054005c85f3023fbcf46e05a2d821f3a", "score": "0.66738665", "text": "def __init__(self):\n self.deck = []\n self.suits = [\"Clubs\", \"Spades\", \"Hearts\", \"Diamonds\"]", "title": "" }, { "docid": "9dce4cdbb19df7914b4c19b3b986e722", "score": "0.66490114", "text": "def get_cards(self, cards):\n if self.is_human:\n print(\"You received the following cards:\")\n print(\", \".join(str(card) for card in cards))\n sleep(LONG_PAUSE)\n print()\n for card in cards:\n if card.name not in self.hand:\n self.hand[card.name] = list()\n self.hand[card.name].append(card)\n if len(self.hand[card.name]) == 4:\n self.make_book(card.name)", "title": "" }, { "docid": "f8e5554233bcda66a2592691c894f72b", "score": "0.6646144", "text": "def add_cards(self, cards_list):\n if type(cards_list) != list: # In case a single card is drawn that someone doesn't put into a list\n cards_list = [cards_list]\n for card in cards_list:\n self.cards.append(card)", "title": "" }, { "docid": "5d63ae8ec152a1ff90af6f97abb6da70", "score": "0.6614954", "text": "def __init__(self, label=''):\n self.cards = []\n self.label = label", "title": "" }, { "docid": "ac1164f13c9e588992604264b01d46a2", "score": "0.6613593", "text": "def SetCards(self, cards):\n self.Clear()\n for c in cards: self.AddCard(c)", "title": "" }, { "docid": "1525d6de695157fbed99912bbeb2afc3", "score": "0.6565688", "text": "def init_cards():\n\n global color\n global value\n global card\n\n for c in color:\n for v in value:\n card.append((v,c))", "title": "" }, { "docid": "e286169fc7a08dd1c7f3431f4c859116", "score": "0.65168154", "text": "def add_cards(self, cards: List[Card]) -> None:\n for card in cards:\n assert card not in self._cards\n self._cards.append(card)", "title": "" }, { "docid": "cd891ac2bb5787c8115dbcb9504bdd9c", "score": "0.65063417", "text": "def __init__(self,name, location):\n self.name = name\n self.location = location\n self.cards = []", "title": "" }, { "docid": "5462ef3798fb5e8a5b23ac83d0218993", "score": "0.6467722", "text": "def random_cards(ranks, suits, count):\n result = []\n random_ranks = np.random.choice(ranks, count)\n random_suits = np.random.choice(suits, count)\n for i in range(count):\n result.append(Card(np.random.choice(ranks), np.random.choice(suits)))\n #result.append(Card(random_ranks[i], random_suits[i]))\n return result", "title": "" }, { "docid": "68921da086e75f916026a829ab39bdae", "score": "0.64615005", "text": "def cards(self, cards):\n\n self._cards = cards", "title": "" }, { "docid": "78c6155ec0be7b5cb5bcbf63e742d1fe", "score": "0.64542705", "text": "def getCards(self):\n\t\t# Return a copy of the cards, not the cards themselves\n\t\treturn self.cards[:]", "title": "" }, { "docid": "dd7c737a31b1f75f32f952d0bc52d543", "score": "0.6439821", "text": "def deal_cards(deck1):\n\n player1_cards = []\n player2_cards = []\n\n for x in range(5):\n player1_cards.append(deck1.draw_a_card())\n player2_cards.append(deck1.draw_a_card())\n\n return player1_cards, player2_cards", "title": "" }, { "docid": "efd4b4f916edcbec9c37e117d3e5a9ab", "score": "0.6429303", "text": "def __init__(self, currdeck):\n self.hand=[]\n self.draw(currdeck)\n self.draw(currdeck)\n self.value=self.checkValue()", "title": "" }, { "docid": "a54abb9d72fda1f1d3c9995f7ade8666", "score": "0.6417981", "text": "def add(self, cards):\n\n assert cards, \"Pass a list of minimum 1 card to remove.\"\n for card in cards:\n self._hand.append(card)", "title": "" }, { "docid": "ccd86f8b5a476b5e786363319521e318", "score": "0.6410073", "text": "def create_hand(player):\n cards = player.player_cards()\n hand = []\n for card in cards:\n suit, rank = card.identify_card()\n new_card = rank+suit.lower()\n hand.append(tCard.new(new_card))\n return hand", "title": "" }, { "docid": "a4cdb19ef0c1801570d21b08e72d30fa", "score": "0.63983893", "text": "def full_deck():\n output = [] # list of cards so far to be returned\n for suit in range(Card.NUM_SUITS):\n for rank in range(1,Card.NUM_RANKS+1): # skip the None value\n output.append(Card(suit,rank))\n return output", "title": "" }, { "docid": "9b27bbbf6a4885f1ab29712e28e1d872", "score": "0.6370411", "text": "def draw_hand(self): #pylint: disable=R0201\n deck = [2, 3, 4, 5, 6, 7, 8, 9, 10, \"Jack\", \"Queen\", \"King\", \"Ace\"]\n random.shuffle(deck)\n card = deck.pop()\n card2 = deck.pop()\n return [card, card2]", "title": "" }, { "docid": "01dd2701594199136ad89336974cb06c", "score": "0.6369361", "text": "def deal_cards(self, no_cards):\n cards = []\n for _ in range(0,no_cards):\n card = random.choice(self.cards)\n self.cards.remove(card)\n self.delt_cards.append(card)\n cards.append(card)\n return cards", "title": "" }, { "docid": "a3e6d24c4d82edc59222d27612b3c84b", "score": "0.6351894", "text": "def __init__(self, cards, discardPileZone):\r\n self.discardPileZone = discardPileZone\r\n discardPileSet = set(discardPileZone)\r\n ListZone.__init__(self, [card for card in cards if card in discardPileSet], zoneType=GAINED)", "title": "" }, { "docid": "ce6c4261f35ce3178807d002c973a277", "score": "0.6351196", "text": "def create_deck():\n deck = []\n\n for i in range(SUITS):\n for j in range(1, len(CARDS) + 1):\n deck.append(j)\n return deck", "title": "" }, { "docid": "65bae9778a70d93c1e3e4bf70354ad91", "score": "0.6342788", "text": "def __init__(self):\n self.deck = []\n\n with open('deck.json') as deck_file:\n decks = load(deck_file)\n\n for card in decks['cards']:\n element = Elements(card['element'])\n color = Colors(card['color'])\n number = int(card['number'])\n self.deck.append(Card(element, color, number))\n\n random.shuffle(self.deck)", "title": "" }, { "docid": "999256006c16efb68bc93977589a1754", "score": "0.63380045", "text": "def populate(self):\n for suit in Card.SUITS:\n for rank in Card.RANKS:\n self.add(Card(rank, suit))", "title": "" }, { "docid": "53cda6b6d62cc19826d417b14b519063", "score": "0.6333441", "text": "def first_stage(cards):\n NightHandler.identifier = RANDOM.randint(0, MAX_CARD_IDENTIFIER)\n NightHandler.nonce = RANDOM.randint(0, MAX_CARD_NONCE)\n identity_value = str(NightHandler.identifier) + str(NightHandler.nonce)\n\n choice = json.dumps((ROLE, NightHandler.vote), cls=EnumEncoder)\n my_card = [CRYPTO_INSTANCE.encrypt(choice), identity_value]\n cards.append(my_card)\n RANDOM.shuffle(cards)\n return cards", "title": "" }, { "docid": "b77ccbf94cc0be8c99f24e2578c7c6b5", "score": "0.632329", "text": "def _random_cards(count):\n cards = set()\n while len(cards) < count:\n cards.add(Card.random())\n return cards", "title": "" }, { "docid": "80ef8dc38bb4001c39aad8685e5372f2", "score": "0.6315468", "text": "def __init__(self):\n self._hand_list = []", "title": "" }, { "docid": "a0ddf67fc6560fc99adbf559f6ca60dc", "score": "0.6312242", "text": "def cards(self) -> List[Card]:\n return self._cards", "title": "" }, { "docid": "1a7f143bcf716c272da7587372d708f9", "score": "0.63066846", "text": "def _make_deck():\n\n carnivore_cards = [\n Card(food, Trait.carnivore)\n for food in range(\n Card.MIN_CARNIVORE_FOOD, Card.MAX_CARNIVORE_FOOD+1)\n ]\n\n food_trait_combinations = product(\n range(Card.MIN_NON_CARNIVORE_FOOD, Card.MAX_NON_CARNIVORE_FOOD+1),\n Trait)\n non_carnivore_cards = [\n Card(food, trait)\n for food, trait in food_trait_combinations\n if trait is not Trait.carnivore\n ]\n\n return list(sorted(carnivore_cards + non_carnivore_cards))", "title": "" }, { "docid": "d250a086edca6627eaa5e324cb335b7f", "score": "0.62879634", "text": "def get_cards(self):\r\n print(\"Getting card IDs\")\r\n self.cards = list()\r\n for info in self.card_ids:\r\n self.cards += info[\"cards\"]", "title": "" }, { "docid": "84072b4a5733f8e0cddbd889b45417cd", "score": "0.6279974", "text": "def add_cards_to_hand(self, cards):\r\n self.hand = cards", "title": "" }, { "docid": "3f99a3e8ba394bcaf8d70d94396ef72a", "score": "0.62759256", "text": "def initialize_deck(self):\n cards = [(card, pattern) \n for card, pattern \n in zip(list(range(1,11))*3, list(range(1,4))*10)]\n return np.array(cards)", "title": "" }, { "docid": "fcd5dc2f736cee103256ba5b42ad699a", "score": "0.62608963", "text": "def add_cards(self, lst):\n for card in lst:\n self.enqueue(card)", "title": "" }, { "docid": "d9d5edcf3eaf26a43444a01fd899bcc5", "score": "0.62529624", "text": "def get_cards(self):\n return self.cards", "title": "" }, { "docid": "d9d5edcf3eaf26a43444a01fd899bcc5", "score": "0.62529624", "text": "def get_cards(self):\n return self.cards", "title": "" }, { "docid": "8a5cfea56e5b6347780ca523a44f0a76", "score": "0.62455004", "text": "def get_cards_that_beat(self, card):", "title": "" }, { "docid": "bedeaa4b5aafa1c36e880edaa6f0a20b", "score": "0.62247103", "text": "def full_deck():\n logging.info(\"root\")\n return cards", "title": "" }, { "docid": "5c3f9ee5e4e018b7b3047808d7250f42", "score": "0.6213667", "text": "def cards(self):\n return self.CARDS", "title": "" }, { "docid": "efcb868b0310041502b2a85203299827", "score": "0.61991215", "text": "def __init__(self, card_width=150, card_height=231):\n self.imgs = dict(); self.card_width=150; self.card_height=231\n for card in [[c, pygame.transform.scale(pygame.image.load('Cards/'+c),\n (int(card_width), int(card_height)))] for c in listdir('Cards') if '-' in c]:\n self.imgs[card[0].replace('.png',\"\")] = card[1]\n self.card_back_img = pygame.transform.scale(pygame.image.load('Cards/red.png'),\n (int(card_width), int(card_height)))\n\n self.deck = list(self.imgs.keys())\n shuffle(self.deck)\n self.remaining = self.deck\n self.used = list()\n self.active_card = None", "title": "" }, { "docid": "71dfb3e81a894dd2bfd463590c2a136b", "score": "0.6198873", "text": "def generate_cards():\n cards = []\n suit = {1: '♦', 2: '♥', 3: '♠', 4: '♣'}\n\n for count in range(1, 5):\n # Aces:\n cards.append('A')\n cards.append(suit[count])\n\n # Numbers 2-9:\n for number in range(2, 10):\n cards.append(chr(48 + number))\n cards.append(suit[count])\n\n # Ten & Court Cards:\n cards.append('T')\n cards.append(suit[count])\n cards.append('J')\n cards.append(suit[count])\n cards.append('Q')\n cards.append(suit[count])\n cards.append('K')\n cards.append(suit[count])\n\n return cards", "title": "" }, { "docid": "11047c1caf03f3b944493c4f10ac9c2b", "score": "0.6197079", "text": "def return_cards(self):\r\n return self.__cards", "title": "" }, { "docid": "4e208594d59373c3177d65e6ad35a4f5", "score": "0.61958987", "text": "def _generate_deck(self):\n\t\tdeck = []\n\t\tsuits = [\"Hearts\",\"Diamonds\",\"Clubs\",\"Spades\"]\n\t\tvalues = [\"Ace\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"Jack\",\"Queen\",\"King\"]\n\t\tall_cards = list(itertools.product(suits,values)) #generate all possible card combinations\n\t\trandom.shuffle(all_cards) #randomize the card combinations\n\t\t\n\t\t# Generate the randomized deck of cards\n\t\tfor i in range(len(all_cards)):\n\t\t\tnew_card = Card(all_cards[i][0],all_cards[i][1])\n\t\t\tdeck.append(new_card)\n\n\t\treturn deck", "title": "" }, { "docid": "af5ed9df1c654dca7cd616d9d596cec7", "score": "0.61948144", "text": "def setup(self):\r\n for i in range(7):\r\n self.cards.append(choice(UNO_CARDS))", "title": "" }, { "docid": "d058c19ce4b2084382ea613e9427d446", "score": "0.61817265", "text": "def create_deck_of_cards():\n suits = [\"C\", \"S\", \"H\", \"D\"]\n deck = [(suit, val) for suit in suits for val in range(1, 14)]\n return deck", "title": "" }, { "docid": "be4d9e24355211882054cb33fec6d4ca", "score": "0.61791915", "text": "def add_one_card(card):\r\n global my_hand # REQUIRED to allow us to modify\r\n # variable outside function\r\n print(\"add_one_card(\",\r\n card, \")\", sep=\"\")\r\n my_hand.append(card) # Add to list (no check to insure\r\n # not here already)\r\n list_hand()", "title": "" }, { "docid": "f60ec4ede56f1b1b23d8b074a2672f30", "score": "0.6156616", "text": "def test_append(self):\n # Empty hand\n hand = Hand()\n self.assertEqual(str(Hand()), \"\")\n\n # One Card\n hand.append(Card('A', 'C'))\n self.assertEqual(str(hand), \"[AC]\")\n\n # Two Card\n hand.append(Card('2', 'C'))\n self.assertEqual(str(hand), \"[AC], [2C]\")\n\n # Three Card\n hand.append(Card('3', 'C'))\n self.assertEqual(str(hand), \"[AC], [2C], [3C]\")", "title": "" }, { "docid": "93a9c2528b825ee8d6636d88ebf99ad0", "score": "0.61546123", "text": "def create_deck():\n cards = []\n\n for suit in SUITS:\n for rank in RANKS:\n card_str = '%s-%s' % (rank, suit)\n cards.append(card_str)\n\n return cards", "title": "" }, { "docid": "865ab4622f16b71823ae0a8b63c93244", "score": "0.6151846", "text": "def initalize_cards():\n var = 0\n while var == 0:\n c1 = card(graphics.Point(100,50), 150, 75)\n c2 = card(graphics.Point(275,50), 150, 75)\n c3 = card(graphics.Point(450,50), 150, 75)\n c4 = card(graphics.Point(625,50), 150, 75)\n c5 = card(graphics.Point(100,150), 150, 75)\n c6 = card(graphics.Point(275,150), 150, 75)\n c7 = card(graphics.Point(450,150), 150, 75)\n c8 = card(graphics.Point(625,150), 150, 75)\n c9 = card(graphics.Point(100, 250), 150, 75)\n c10 = card(graphics.Point(275, 250), 150, 75)\n c11 = card(graphics.Point(450, 250), 150, 75)\n c12 = card(graphics.Point(625,250), 150, 75)\n c13 = card(graphics.Point(100, 350), 150, 75)\n c14 = card(graphics.Point(275, 350), 150, 75)\n c15 = card(graphics.Point(450, 350), 150, 75)\n c16 = card(graphics.Point(625, 350), 150, 75)\n \n totallist = [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16]\n \n cardlist = finalize_cards(totallist)\n lists_of_sets = show_sets(totallist)\n count = len(lists_of_sets)\n \n if count < 3: #minimum amount of sets you want to find\n print('not enough sets, reshuffling')\n else:\n var = 1\n draw_all(cardlist)\n \n return totallist", "title": "" }, { "docid": "d230fbb7f799b0fef8c4aba8c06be5d5", "score": "0.6151194", "text": "def make_new_deck(self):\n deck_nominals = self.nominals * 4 * self.no_of_decks\n deck_suits = self.suits * 13 * self.no_of_decks\n\n random.shuffle(deck_nominals)\n random.shuffle(deck_suits)\n\n self.deck = iter([Card(x, y) for x, y in zip(deck_suits, deck_nominals)][:-self.plastic_position])", "title": "" }, { "docid": "1e174ccf112f094b8df4c041dde7bf2f", "score": "0.61497515", "text": "def get_hand(self, card):\n self.hand.append(card)", "title": "" }, { "docid": "aac45db635ba0fe13c8506d73f51cd34", "score": "0.61425596", "text": "def fill_deck(self):\r\n\r\n all_cards = []\r\n\r\n for card_type, card_type_dict in enumerate([actions, effects, guns]):\r\n for cardname, values in card_type_dict.items():\r\n for _ in range(values['max']):\r\n card = {\r\n 'name': cardname, \r\n 'suit': random.choice(self.SUITS),\r\n 'value': random.choice(self.VALUES),\r\n 'action': values['action'],\r\n 'description': values['description'],\r\n 'deck': self\r\n }\r\n if card_type == 1: # effect\r\n card['card_type'] = 'EF'\r\n card['checkable'] = values['checkable']\r\n elif card_type == 2: # gun\r\n card['card_type'] = 'GU'\r\n card['distance'] = values['distance']\r\n card['unlimited'] = values['unlimited']\r\n else: \r\n card['card_type'] = 'AC'\r\n all_cards.append(card)\r\n\r\n random.shuffle(all_cards)\r\n for dic in all_cards:\r\n card = Card(**dic)\r\n card.save()", "title": "" }, { "docid": "96ce484e59ae274a2a31c2423531f238", "score": "0.61363006", "text": "def draw(self):\n return self.cards.pop()", "title": "" }, { "docid": "f1895603dbcd1dc19a45f495c5ac095d", "score": "0.613312", "text": "def build(self):\n for category in [Card.BLUE, Card.GREEN, Card.YELLOW, Card.RED]:\n self.cards.append(UnoCard(category=category, number=Card.ZERO))\n\n for number in [Card.ONE, Card.TWO, Card.THREE, Card.FOUR, Card.FIVE, Card.SIX, Card.SEVEN, Card.EIGHT,\n Card.NINE, Card.SKIP, Card.REVERSE, Card.DRAW_TWO]:\n self.cards.append(UnoCard(category=category, number=number))\n self.cards.append(UnoCard(category=category, number=number))\n\n for i in range(4):\n self.cards.append(UnoCard(category=Card.WILD, number=Card.NONE))\n self.cards.append(UnoCard(category=Card.WILD_FOUR, number=Card.NONE))", "title": "" }, { "docid": "7eca7068c2e399fc70c39b21fef5e9ab", "score": "0.6132315", "text": "def clear(self):\n self.cards = []", "title": "" }, { "docid": "b69a7222563980fe9e38de42500f463f", "score": "0.61144507", "text": "def create_board(game):\n cards = game.get_community_cards()\n board = []\n for card in cards:\n suit, rank = card.identify_card()\n new_card = rank+suit.lower()\n board.append(tCard.new(new_card))\n return board", "title": "" }, { "docid": "e0c32ae431da84dfe078dc2bb20d102c", "score": "0.61034775", "text": "def __init__(self, cards_per_player, player_names):\n param_ok = Game.process_input(cards_per_player, player_names)\n if param_ok:\n self.deck = list(full_deck)\n self.cards_per_player = cards_per_player\n self.players = []\n for player_name in player_names:\n self.players.append(Player(player_name))\n else:\n print(\"Game not populated\")", "title": "" }, { "docid": "d97bb48ba251be285c89c2019734ccb0", "score": "0.6094092", "text": "def __init__(self):\r\n self.known_cards = Counter() # Counter of suit -> count\r\n self.known_voids = set() # set of suits we know we don't have\r\n self.number_of_unknown_cards = 4 # we always start with four cards\r", "title": "" }, { "docid": "c9464a01e8d7607fb02f22ab28da944a", "score": "0.6090149", "text": "def make_dummy_game():\n return blackjack.Blackjack([cardmod.Card(alt='QC'),\n cardmod.Card(alt='TD'),\n cardmod.Card(alt='9H')])", "title": "" }, { "docid": "899e68de65d93291c8c282ee008a1f18", "score": "0.6086032", "text": "def __init__(self, players: Player, dealer: Dealer, pot, deck: Deck, community_cards=None):\n Game.__init__(self, players, dealer, pot, deck)\n if community_cards == None:\n self.community_cards = [] #cards in middle of table\n #deals cards to all the players in the game\n self.reset_round(0)", "title": "" }, { "docid": "91e3cc59ea9d37f1e90e00065c2dc818", "score": "0.6075952", "text": "def all_cards(self):\n return self.deck + self.hand + self.cards_in_play + self.discard", "title": "" }, { "docid": "81efd9a44c85358503ed625c46f76d37", "score": "0.6068398", "text": "def give_card(self):\r\n card = self.list[randint(0, len(self.list) - 1)]\r\n self.list.remove(card)\r\n return card", "title": "" }, { "docid": "f9f2fc25b279c7fa23a2942a0b1fe330", "score": "0.6061261", "text": "def __init__(self, players: Player, dealer: Dealer, pot, deck: Deck, SB = .25, rounds=5):\n self.players = players\n self.dealer = dealer\n self.players.append(self.dealer)\n self.pot = pot\n self.rounds = rounds\n self.deck = deck\n self.round_bet = 0\n self.SB = SB\n #shuffle\n self.deck.shuffle()\n #does not deal cards, doesn't know how many", "title": "" }, { "docid": "ab37e9ac3c5b8f1f5c45dee0bb0667a5", "score": "0.6056642", "text": "def take(self, card):\n self.hand.append(card)", "title": "" }, { "docid": "1ae2a384870be625501c323028a3896d", "score": "0.605601", "text": "def random():\n\t\treturn Card(suit=random.randint(1, 4), rank=random.randint(1, 13))", "title": "" }, { "docid": "e543e63829b340839dc0678fcf06fe63", "score": "0.60499847", "text": "def __init__(self, custom_cards=None, custom_suits=None, custom_ranks=None):\n\n default_card_ranks = {\n \"2\": 2,\n \"3\": 3,\n \"4\": 4,\n \"5\": 5,\n \"6\": 6,\n \"7\": 7,\n \"8\": 8,\n \"9\": 9,\n \"10\": 10,\n \"J\": 11,\n \"Q\": 12,\n \"K\": 13,\n \"A\": 14\n }\n\n default_card_suits = {\n \"♠\": \"BLACK\",\n \"♣\": \"BLACK\",\n \"♡\": \"RED\",\n \"♢\": \"RED\"\n }\n\n if custom_cards:\n self.cards = custom_cards\n\n elif custom_suits or custom_ranks:\n self.cards = [\n Card(\n rank=rank,\n rank_value=(custom_ranks if custom_ranks else default_card_ranks)[rank],\n suit=suit,\n color=(custom_suits if custom_suits else default_card_suits)[suit],\n )\n for rank in (custom_ranks if custom_ranks else default_card_ranks)\n for suit in (custom_suits if custom_suits else default_card_suits)\n ]\n\n else:\n self.cards = [\n Card(\n rank=rank,\n rank_value=default_card_ranks[rank],\n suit=suit,\n color=default_card_suits[suit],\n ) for rank in default_card_ranks for suit in default_card_suits\n ]\n\n self.evaluator = HandEvaluatorMixin(self.cards)", "title": "" }, { "docid": "d8cb4dad62af7bcf0c5e5a2415c7f958", "score": "0.6047566", "text": "def double_cards(self):\n\n self.cards += deepcopy(self.cards)\n\n # reinitialize hand evaluator\n self.evaluator = HandEvaluatorMixin(self.cards)", "title": "" }, { "docid": "3f4a44cfc64c9d3f21b7ca033a6a1dc7", "score": "0.60454965", "text": "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card", "title": "" }, { "docid": "8886e5b3f0b8a37d716308b3a483a77f", "score": "0.60416013", "text": "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n return random.choice(cards)", "title": "" }, { "docid": "8886e5b3f0b8a37d716308b3a483a77f", "score": "0.60416013", "text": "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n return random.choice(cards)", "title": "" }, { "docid": "f98ef459012364adec6e6bbe9197d4f3", "score": "0.6031297", "text": "def receive_cards(self, cards):\n self.cards += cards\n self.calculate_hand()\n self.update_display()", "title": "" }, { "docid": "1ceccb071deaf9a3614f4898b0c55571", "score": "0.60291505", "text": "def get_war_cards(self):\n lst = [self.get_card() for _ in range(self.__war_type)]\n if None in lst:\n return None\n else:\n return lst", "title": "" }, { "docid": "43636a3ac04015aef74a010f895db656", "score": "0.60289335", "text": "def build_deck(self):\n for suit in self.suits:\n for value in range(2, 15):\n self.deck.append(card.Card(suit, value))", "title": "" }, { "docid": "48c2b819583bd321e0cf9fba5e594b15", "score": "0.6027626", "text": "def getCards(self):\n return self._cards", "title": "" }, { "docid": "8c01012218698648c35d91c7d6b271c4", "score": "0.60269", "text": "def generate_card_objects(self, filename: str) -> List[Card]:\n list_lines = self.open_and_read(filename)\n list_card = []\n number_of_entry = len(list_lines)\n for i, line in enumerate(list_lines):\n if line not in ['', '\\n']:\n quantity, card_name, card_set, is_foil = self.get_card_from_string(line)\n new_card = make_card(card_name, card_set, is_foil, quantity)\n print(f'card done : {i + 1}/{number_of_entry}')\n list_card.append(new_card)\n return list_card", "title": "" } ]
036ea3cba0d5886a92390ca5f00e53ac
Get triplets for training model. A triplet contains an anchor, a positive, and a negative. Select cowatch pair as anchor and positive, randomly sample a negative.
[ { "docid": "e15247c245c934970e0faaefc5f13293", "score": "0.5595899", "text": "def mine_triplets(all_cowatch, features):\n if not isinstance(all_cowatch,list) and not isinstance(features,np.ndarray):\n logging.error(\"Invalid arguments. Type should be list, dict instead of\"+\n str(type(all_cowatch))+str(type(features)))\n return None\n neg_iter = yield_negative_index(len(features), putback=True)\n # 初始化\n triplets = []\n # TODO 这里可以用多线程\n for cowatch in all_cowatch:\n triplet = combine_cowatch_neg(cowatch, neg_iter)\n triplets.append(triplet)\n return triplets", "title": "" } ]
[ { "docid": "308c55f9a7cea6a9d8a73c76adfcb079", "score": "0.6431219", "text": "def generate_triplets(test=False):\r\n while True:\r\n list_a = []\r\n list_p = []\r\n list_n = []\r\n\r\n for i in range(batch_size):\r\n #print(i)\r\n a, p, n = get_triplet(test)\r\n # plt.imshow(a)\r\n # plt.text(50, 50, 'A', bbox=dict(fill=False, edgecolor='red', linewidth=20))\r\n # plt.show()\r\n # plt.imshow(p)\r\n # plt.text(50, 50, 'P', bbox=dict(fill=False, edgecolor='red', linewidth=20))\r\n # plt.show()\r\n # plt.imshow(n)\r\n # plt.text(50, 50, 'N', bbox=dict(fill=False, edgecolor='red', linewidth=20))\r\n # plt.show()\r\n list_a.append(a)\r\n list_p.append(p)\r\n list_n.append(n)\r\n \r\n A = np.array(list_a, dtype='float32')\r\n P = np.array(list_p, dtype='float32')\r\n N = np.array(list_n, dtype='float32')\r\n # a \"dummy\" label which will come in to our identity loss\r\n # function below as y_true. We'll ignore it.\r\n label = np.ones(batch_size)\r\n yield [A, P, N], label", "title": "" }, { "docid": "b444ff98b55afcc18fa612a380a24950", "score": "0.59076834", "text": "def get_triplets(labels, pairs):\n triplets = []\n\n for idx in range(len(pairs)):\n # ng_ids, = np.where((labels != labels[pairs[idx, 0]]).any(axis=1))\n ng_ids, = np.where((labels != labels[pairs[idx, 0]]))\n ng_id = random.choice(ng_ids)\n triplets.append(np.array([pairs[idx, 0], pairs[idx, 1], ng_id]))\n return np.array(triplets)", "title": "" }, { "docid": "cce6b5b1679ed42f63456a1db9eaa679", "score": "0.59055495", "text": "def generate_triplets(cd):\n image_ids = list(imgID_to_descriptor.keys())[::10] # Get all image ids (subject to change based on attribute name)\n split_index = 4 * len(image_ids) // 5 # Split data at this index to train and validation\n idxs = np.arange(len(image_ids))\n np.random.shuffle(idxs) # Shuffle indices\n \n train_img_ids = [image_ids[x] for x in idxs[:split_index]]\n val_img_ids = [image_ids[x] for x in idxs[split_index:]] # Validation set of image ids\n \n train = [] # Instantiate Final List of Triplets for train data\n validation = [] # Instantiate Final List of Triplets for validation data\n\n print (len(train_img_ids))\n for i, image_id in enumerate(train_img_ids): # Go through each Train image id\n if not i % 1000:\n print (i)\n train += generate_triplet_set(image_id, cd) # Generate and add triplet\n\n print (len(val_img_ids))\n for i, image_id in enumerate(val_img_ids):\n if not i % 1000:\n print (i)\n validation += generate_triplet_set(image_id, cd)\n\n train_d_good = np.zeros((len(train), 512))\n for i in range(len(train)):\n train_d_good[i] = train[i][0].reshape(512)\n train_w_good = np.zeros((len(train), 50))\n for i in range(len(train)):\n train_w_good[i] = train[i][1].reshape(50)\n train_d_bad = np.zeros((len(train), 512))\n for i in range(len(train)):\n train_d_bad[i] = train[i][2].reshape(512)\n\n val_d_good = np.zeros((len(validation), 512))\n for i in range(len(validation)):\n val_d_good[i] = validation[i][0].reshape(512)\n val_w_good = np.zeros((len(validation), 50))\n for i in range(len(validation)):\n val_w_good[i] = validation[i][1].reshape(50)\n val_d_bad = np.zeros((len(validation), 512))\n for i in range(len(validation)):\n val_d_bad[i] = validation[i][2].reshape(512)\n\n train = (train_d_good[::2], train_w_good[::2], train_d_bad[::2])\n validation = (val_d_good[::2], val_w_good[::2], val_d_bad[::2])\n return train, validation", "title": "" }, { "docid": "272adf2fb7d9b3f6d50c0cf870aa184e", "score": "0.5886869", "text": "def get_triplet(test=False):\r\n n = a = np.random.randint(120)\r\n while n == a:\r\n # keep searching randomly!\r\n n = np.random.randint(120)\r\n #print(n)\r\n a, p = get_image(a, test), get_image(a, test)\r\n n = get_image(n, test)\r\n return a, p, n", "title": "" }, { "docid": "a17694da8cfd9f552bb1517d9e5d9820", "score": "0.5867536", "text": "def preprocess_triplets(anchor, positive, negative):\n\n return (\n preprocess_image(anchor),\n preprocess_image(positive),\n preprocess_image(negative),\n )", "title": "" }, { "docid": "d484ce99f40b4f5dd08829a34168b4d9", "score": "0.58569074", "text": "def preprocess_triplets(anchor, positive, negative):\r\n\r\n return (\r\n preprocess_image(anchor),\r\n preprocess_image(positive),\r\n preprocess_image(negative),\r\n )", "title": "" }, { "docid": "0fa765faf4107651197a4220b86e77fe", "score": "0.58162636", "text": "def build_train_set_uniform(interactions, targets=None, p=0.15):\n\n if targets is None:\n targets = range(interactions.shape[0])\n\n print(\"building train set ...\")\n start = timer()\n # Output variables\n train_set = interactions.copy()\n test_set = {}\n\n csr_inter = interactions.tocsr()\n\n for playlist_id in targets:\n # Get tracks\n tracks = csr_inter.indices[csr_inter.indptr[playlist_id]:csr_inter.indptr[playlist_id + 1]]\n\n # Calc number of tracks to extract\n k = math.ceil(len(tracks) * p)\n\n # Generate indices to extract from interactions\n # This indices are added to the test set\n indices = []\n test_set_i = []\n for _ in range(k):\n t = random.randint(0, len(tracks) - 1)\n while t in indices:\n t = random.randint(0, len(tracks) - 1)\n indices.append(t)\n\n # Remove and add to test set\n track_id = tracks[t]\n train_set[playlist_id, track_id] = 0\n test_set_i.append(track_id)\n\n test_set[playlist_id] = test_set_i\n\n print(\"elapsed time: {:.3f}s\\n\".format(timer() - start))\n\n # Return built sets\n return train_set, test_set", "title": "" }, { "docid": "706a9a539c7fddec5e5a2d416b1aace0", "score": "0.57786554", "text": "def _create_pairs(self):\n pos_pairs, neg_pairs = [], []\n src_labels = get_labels(self.src_dataset).numpy()\n tgt_labels = get_labels(self.tgt_dataset)\n \n for ids, ys in enumerate(src_labels):\n for idt, yt in enumerate(tgt_labels):\n if ys == yt:\n pos_pairs.append([ids, idt, 1])\n else:\n neg_pairs.append([ids, idt, 0])\n\n if self.ratio > 0:\n random.shuffle(neg_pairs)\n pairs = pos_pairs + neg_pairs[: self.ratio * len(pos_pairs)]\n else:\n pairs = pos_pairs + neg_pairs\n return pairs", "title": "" }, { "docid": "81b63d9a21e9669c5b2cf8a720e96470", "score": "0.57754093", "text": "def createtrainset(self):\n c = self.config\n pos = chooselabelsubset(c['pos'], c['nfselpos'], c['trainpickmeth'])\n neg = chooselabelsubset(c['neg'], c['nfselneg'], c['trainpickmeth'])\n return (pos, neg)", "title": "" }, { "docid": "f0bed664801291b2c0a9850467ec2bce", "score": "0.5720384", "text": "def preprocess_triplets_N(anchor, positive, negative):\n\n return (\n preprocess_image_N(anchor),\n preprocess_image_N(positive),\n preprocess_image_N(negative),\n )", "title": "" }, { "docid": "f1cd49a843fbde68e68e7d7cb963a3b4", "score": "0.57059354", "text": "def generate_triplets(X):\n \n triplets = []\n ###########################################################################\n # TODO: Implement the function. #\n ###########################################################################\n triplets = list(itertools.combinations(X, 3))\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return triplets", "title": "" }, { "docid": "30ff62a20e1621fa87220cddb29b470d", "score": "0.56271034", "text": "def generate_pos_neg_set(neg_X_train, neg_y_train, pos_X_train, pos_y_train):\n pos_neg = []\n ctr1 = 0\n ctr2 = 0\n for i in range(len(neg_y_train)):\n if neg_y_train[i][-1] != 1: # label is not other\n pos_neg.append(neg_X_train[i] + [0])\n ctr1 += 1\n for i in range(len(pos_y_train)):\n if pos_y_train[i][-1] != 1 and ctr2 < ctr1: # label is not other\n pos_neg.append(pos_X_train[i] + [1])\n ctr2 += 1\n np.random.shuffle(pos_neg)\n return pos_neg", "title": "" }, { "docid": "6b068ea35d242cf87119919315c43232", "score": "0.5579058", "text": "def embed_triplets(model, anchor, neighbour, opposite):\n emb_anchor = model(anchor, training=True)\n emb_neighbour = model(neighbour, training=True)\n emb_opposite = model(opposite, training=True)\n\n return emb_anchor, emb_neighbour, emb_opposite", "title": "" }, { "docid": "748401eb96c7f7d9667c13abf8d9e2e8", "score": "0.5476087", "text": "def sentence_triples_cbow((id, dialog)):\n\tX = []\n\tY = []\n\tfor triple in dialog.all_triples_vectors():\n\t\tX += [numpy.concatenate(triple)] # positive case\n\t\tif model.generate_negatives:\n\t\t\t# negative case\n\t\t\tX += [numpy.concatenate((triple[0], model.random_utterance(not_id=id).vector, triple[2]))]\n\t\t\tY += [1, 0]\n\t\telse:\n\t\t\tY += [1]\n\treturn (X, Y)", "title": "" }, { "docid": "9d8113a51111c18e8606bda9b8dfc424", "score": "0.5443841", "text": "def select_binary_triplets(embeddings_p, embeddings_n, image_p_paths, image_n_paths, alpha):\n trip_idx = 0\n emb_start_idx = 0\n num_trips = 0\n triplets = []\n # VGG Face: Choosing good triplets is crucial and should strike a balance between\n # selecting informative (i.e. challenging) examples and swamping training with examples that\n # are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling\n # the image n at random, but only between the ones that violate the triplet loss margin. The\n # latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than\n # choosing the maximally violating example, as often done in structured output learning.\n nrof_images = embeddings_p.shape[0]\n print('start to select triplets')\n for j in range(nrof_images):\n a_idx = j\n\n neg_dists_sqr = np.sum(np.square(embeddings_p[a_idx] - embeddings_n), 1)\n\n # for pair in xrange(j, nrof_images): # For every possible positive pair.\n pair = np.random.randint(j, nrof_images)\n p_idx = emb_start_idx + pair\n pos_dist_sqr = np.sum(np.square(embeddings_p[a_idx,:] - embeddings_p[p_idx,:]))\n\n all_neg = np.where(neg_dists_sqr - pos_dist_sqr < alpha)[0] # VGG Face selecction\n hard_neg = np.where(neg_dists_sqr - pos_dist_sqr < 0)[0]\n nrof_random_negs = all_neg.shape[0]\n nrof_hard_negs = hard_neg.shape[0]\n if nrof_random_negs > 0:\n\n if nrof_hard_negs > 0:\n rnd_idx = np.random.randint(nrof_hard_negs)\n n_idx = hard_neg[rnd_idx]\n else:\n rnd_idx = np.random.randint(nrof_random_negs)\n n_idx = all_neg[rnd_idx]\n\n triplets.append((image_p_paths[a_idx], image_p_paths[p_idx], image_n_paths[n_idx]))\n # print('Triplet %d: (%d, %d, %d), pos_dist=%2.6f, neg_dist=%2.6f (%d, %d, %d, %d, %d)' %\n # (trip_idx, a_idx, p_idx, n_idx, pos_dist_sqr, neg_dists_sqr[n_idx], nrof_random_negs, rnd_idx, i, j, emb_start_idx))\n trip_idx += 1\n\n num_trips += 1\n\n np.random.shuffle(triplets)\n return triplets, num_trips, len(triplets)", "title": "" }, { "docid": "3c89bfe7cc3ca7b594131f224bac4a29", "score": "0.54084635", "text": "def __sample__(self):\n entpair2scope = json.load(open(os.path.join(self.path, \"entpair2scope.json\")))\n entpair2negpair = json.load(open(os.path.join(self.path, \"entpair2negpair.json\")))\n neg_pair = []\n\n # Gets all negative pairs.\n for key in entpair2negpair.keys():\n my_scope = entpair2scope[key]\n entpairs = entpair2negpair[key]\n if len(entpairs) == 0:\n continue\n for entpair in entpairs:\n neg_scope = entpair2scope[entpair]\n neg_pair.extend(self.__neg_pair__(my_scope, neg_scope))\n print(\"(MTB)Negative pairs number is %d\" %len(neg_pair))\n \n # Samples a same number of negative pair with positive pairs. \n random.shuffle(neg_pair)\n self.neg_pair = neg_pair[0:len(self.pos_pair)]\n del neg_pair # save the memory ", "title": "" }, { "docid": "4899b142b2d6c34a1c481dc2540ec607", "score": "0.53720504", "text": "def generate_triplet_data(codes,labels,batch_num = 128):\n unique_labels = get_unique_labels(labels)\n data_anchor = np.zeros((1,7,7,2048))\n data_pos = np.zeros((1,7,7,2048))\n data_neg = np.zeros((1,7,7,2048))\n data_y = np.zeros((1),dtype='int32')\n pairs = []\n triplets = []\n yield_idx = 0\n batch_idx = yield_idx\n\n for ID in unique_labels:\n\n ids, = np.where((labels == ID))\n # get anchor-positive pairs.\n ap_pair = generate_anchor_positive(ids, batch_num)\n if len(pairs) > 0 and len(ap_pair) > 0:\n pairs = np.vstack((pairs, ap_pair))\n elif len(ap_pair) > 0:\n pairs = ap_pair\n\n batch_idx = len(pairs)\n\n while (yield_idx + batch_num <= batch_idx):\n triplets = get_triplets(labels, pairs[yield_idx:yield_idx + batch_num])\n if len(triplets) > 0:\n data_anchor = np.vstack((data_anchor,codes[triplets[:, 0]]))\n data_pos = np.vstack((data_pos, codes[triplets[:, 1]]))\n data_neg = np.vstack((data_neg, codes[triplets[:, 2]]))\n data_y = np.vstack((data_y,labels[triplets[:, 0]].reshape(-1,1)))\n yield_idx += batch_num\n else:\n logger.warn(\"No triplet found \")\n\n if yield_idx < batch_idx:\n triplets = get_triplets(labels, pairs[yield_idx:batch_idx])\n data_anchor = np.vstack((data_anchor, codes[triplets[:, 0]]))\n data_pos = np.vstack((data_pos, codes[triplets[:, 1]]))\n data_neg = np.vstack((data_neg, codes[triplets[:, 2]]))\n data_y = np.vstack((data_y,labels[triplets[:, 0]].reshape(-1,1)))\n yield_idx = batch_idx\n\n data_anchor = data_anchor[1:,:]\n data_pos = data_pos[1:,:]\n data_neg = data_neg[1:,:]\n data_y = data_y[1:]\n\n # shuffle data\n indics = np.arange(len(data_y))\n np.random.shuffle(indics)\n data_anchor = data_anchor[indics,:]\n data_pos = data_pos[indics,:]\n data_neg = data_neg[indics,:]\n data_y = data_y[indics,:]\n return [data_anchor,data_pos,data_neg],data_y", "title": "" }, { "docid": "c7bc3602a34f378400b2a8090b8839a5", "score": "0.5355727", "text": "def _get_anchor_positive_triplet_mask(labels):\n # Check that i and j are distinct\n indices_equal = torch.eye(labels.size(0)).bool()\n if labels.is_cuda:\n indices_equal = indices_equal.cuda()\n indices_not_equal = ~indices_equal\n\n # Check if labels[i] == labels[j]\n # Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)\n labels_equal = torch.eq(torch.unsqueeze(labels, 0), torch.unsqueeze(labels, 1))\n\n # Combine the two masks\n mask = indices_not_equal & labels_equal\n\n return mask", "title": "" }, { "docid": "472d81263135af5731a822e761f7ea22", "score": "0.53424746", "text": "def generate_mock_trips(\n user_id, \n trips,\n origin, \n destination, \n label_data = None, \n within_threshold = None,\n start_ts: None = None,\n end_ts: None = None,\n threshold = 0.01,\n max = 0.1, \n has_label_p = 1.0,\n seed = 0):\n \n random.seed(seed)\n within = within_threshold if within_threshold is not None else trips\n trips_within_threshold = [i < within for i in range(trips)]\n result = []\n for within in trips_within_threshold:\n o = generate_trip_coordinates(origin, within, threshold, max)\n d = generate_trip_coordinates(destination, within, threshold, max)\n labels = {} if label_data is None or random.random() > has_label_p \\\n else sample_trip_labels(\n mode_labels=label_data.get('mode_confirm'),\n replaced_mode_labels=label_data.get('replaced_mode'),\n purpose_labels=label_data.get('purpose_confirm'),\n mode_weights=label_data.get('mode_weights'),\n replaced_mode_weights=label_data.get('replaced_mode_weights'),\n purpose_weights=label_data.get('purpose_weights')\n )\n trip = build_mock_trip(user_id, o, d, labels, start_ts, end_ts)\n result.append(trip)\n \n random.shuffle(result) \n return result", "title": "" }, { "docid": "c937623b44b533456f09b146c1351cc4", "score": "0.5341548", "text": "def select_triplets(embeddings, nrof_images_per_class, image_paths, people_per_batch, alpha):\n trip_idx = 0\n emb_start_idx = 0\n num_trips = 0\n triplets = []\n \n # VGG Face: Choosing good triplets is crucial and should strike a balance between\n # selecting informative (i.e. challenging) examples and swamping training with examples that\n # are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling\n # the image n at random, but only between the ones that violate the triplet loss margin. The\n # latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than\n # choosing the maximally violating example, as often done in structured output learning.\n\n for i in xrange(people_per_batch):\n nrof_images = int(nrof_images_per_class[i])\n for j in xrange(1,nrof_images):\n a_idx = emb_start_idx + j - 1\n neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)\n for pair in xrange(j, nrof_images): # For every possible positive pair.\n p_idx = emb_start_idx + pair\n pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))\n neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN\n #all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection\n all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction\n nrof_random_negs = all_neg.shape[0]\n if nrof_random_negs>0:\n rnd_idx = np.random.randint(nrof_random_negs)\n n_idx = all_neg[rnd_idx]\n triplets.append((image_paths[a_idx], image_paths[p_idx], image_paths[n_idx]))\n #print('Triplet %d: (%d, %d, %d), pos_dist=%2.6f, neg_dist=%2.6f (%d, %d, %d, %d, %d)' % \n # (trip_idx, a_idx, p_idx, n_idx, pos_dist_sqr, neg_dists_sqr[n_idx], nrof_random_negs, rnd_idx, i, j, emb_start_idx))\n trip_idx += 1\n\n num_trips += 1\n\n emb_start_idx += nrof_images\n\n np.random.shuffle(triplets)\n return triplets, num_trips, len(triplets)", "title": "" }, { "docid": "f41a394ff3493c3e8b8cf1983fe89b42", "score": "0.5303077", "text": "def _get_test_task(self, index, point_index, proposed_label):\n query_point = self._train_inputs[point_index]\n query_feat = self._train_features[point_index]\n\n ## Construct the set of potential test points to sample from\n if self.test_strategy in ['sample', 'cycle'] and self.include_query_point:\n # Don't add query point to the sampling pool; it will automatically be added\n # to every batch and downweighted appropriately.\n all_inputs, all_labels, all_feats = self._test_inputs, self._test_labels, self._test_features\n else:\n # Treat the query point as just another point that can be sampled\n all_inputs = np.vstack([self._test_inputs, query_point])\n all_labels = np.hstack([self._test_labels, proposed_label])\n all_feats = np.vstack([self._test_features, query_feat])\n\n ## Sample a batch according to `test_strategy`\n if self.test_strategy == 'all':\n X, y, X_feats = all_inputs, all_labels, all_feats\n elif self.test_strategy == 'sample':\n if self.equal_pos_neg_test:\n # Sample an equal number of positive and negative examples\n pos_inputs = all_inputs[all_labels == 1]\n pos_feats = all_feats[all_labels == 1]\n neg_inputs = all_inputs[all_labels == 0]\n neg_feats = all_feats[all_labels == 0]\n pos_idxs = np.random.permutation(range(len(pos_inputs)))[:self.test_batch_size // 2]\n neg_idxs = np.random.permutation(range(len(neg_inputs)))[:self.test_batch_size // 2]\n pos_idxs_feat = pos_idxs\n neg_idxs_feat = neg_idxs\n # Concatenate positives and negatives\n X = np.vstack([pos_inputs[pos_idxs], neg_inputs[neg_idxs]])\n X_feats = np.vstack([pos_feats[pos_idxs_feat], neg_feats[neg_idxs_feat]])\n y = np.hstack([np.ones(len(pos_idxs)), np.zeros(len(neg_idxs))])\n else:\n idxs = np.random.permutation(range(len(all_inputs)))[:self.test_batch_size]\n X, y, X_feats = all_inputs[idxs], all_labels[idxs], all_feats[idxs]\n elif self.test_strategy == 'cycle':\n # Cycle through the test set in batches, keeping track of where we are for each query point\n i = self._cycle_indices.get(point_index, 0)\n start, end = i * self.test_batch_size, (i + 1) * self.test_batch_size\n X, y, X_feats = self._test_inputs[start:end], self._test_labels[start:end], self._test_features[start:end]\n self._cycle_indices[point_index] = (i + 1) % self._num_batches\n else:\n raise Exception(f\"Unrecognized test_strategy: '{test_strategy}'\")\n\n ## Weight each point in the outer loss according to desired settings\n weights = np.ones(len(X))\n if self.dist_weight_thresh:\n # Weight points exponentially lower as distance from the query point increases.\n # (since the goal is just to have good finetuning accuracy on the query point)\n # weights = self.kernel.embed(query_point, X) #np.exp(-np.linalg.norm(self.weight_embedding(query_point) - self.weight_embedding(X), axis=-1) * 2.3 / self.dist_weight_thresh)\n weights = np.exp(-np.linalg.norm(query_feat - X_feats, axis=-1)*2.3/self.dist_weight_thresh)\n\n if self.test_strategy in ['sample', 'cycle'] and self.include_query_point:\n # Include query point in every batch, but downweight it\n X, y = np.vstack([X, query_point[None]]), np.hstack([y, proposed_label])\n weights = np.hstack((weights, self.query_point_weight * 1. / self._num_batches))\n\n if self.mixup_alpha:\n lam = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=(len(X),) + (1,) * (len(X.shape) - 1))\n idxs = np.random.permutation(range(len(X)))\n X2, y2 = X[idxs], y[idxs]\n X, y = (lam * X + (1 - lam) * X2).astype(np.float32), lam.squeeze() * y + (1 - lam.squeeze()) * y2\n\n y = np.hstack((y[:,None], weights[:,None]))\n\n return self._to_tensor_task(index, X, y)", "title": "" }, { "docid": "86edc75bfe7bc7e73eec27c2fc5649a0", "score": "0.52999365", "text": "def _get_anchor_positive_triplet_mask(labels):\n # Check that i and j are distinct\n indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)\n indices_not_equal = tf.logical_not(indices_equal)\n\n # Check if labels[i] == labels[j]\n # Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)\n labels_equal = tf.equal(labels, tf.transpose(labels))\n\n # Combine the two masks\n mask = tf.logical_and(indices_not_equal, labels_equal)\n\n return mask", "title": "" }, { "docid": "5bec22a5c0328fce127fa65eaf2aaf5d", "score": "0.52870184", "text": "def train_test_split(pipe, test=.2):\n if not hasattr(pipe, '__getitem__'):\n raise ValueError(\"Input pipe must be indexable via __getitem__\")\n\n l = len(pipe)\n num_test = math.floor(l*test)\n indices = random.sample(range(l), l) \n train_indices = indices[0:l-num_test]\n test_indices = indices[l-num_test:]\n\n test_pipe = pl.IndexMapperPipe(input=pipe, input_indices=range(0,len(test_indices)), output_indices=test_indices)\n train_pipe = pl.IndexMapperPipe(input=pipe, input_indices=range(0,len(train_indices)), output_indices=train_indices)\n\n return train_pipe, test_pipe", "title": "" }, { "docid": "3af7b2d18e78db9cd068baf3b296e42c", "score": "0.528523", "text": "def sample_triple(self):\n\n user_id = self.sample_user()\n pos_item_id, neg_item_id = self.sample_item_pair(user_id)\n\n return user_id, pos_item_id, neg_item_id", "title": "" }, { "docid": "c041124e56a04642330040bd1dc67c18", "score": "0.52732694", "text": "def triplet_loss(anchor, positive, negative, alpha):\n #with tf.variable_scope('triplet_loss'):\n pos_dist = cuda.reduce_sum(cuda.square(cuda.subtract(anchor, positive)), 1)\n neg_dist = cuda.reduce_sum(cuda.square(cuda.subtract(anchor, negative)), 1)\n\n basic_loss = cuda.add(cuda.subtract(pos_dist, neg_dist), alpha)\n loss = cuda.reduce_mean(cuda.maximum(basic_loss, 0.0), 0)\n\n return loss", "title": "" }, { "docid": "5c7a3436b74ff95f1f21f7cd5a74a928", "score": "0.5265329", "text": "def create_random_pair(n,model):\n df = pd.DataFrame(list(model.wv.vocab.items()), columns=['word','count'])\n list_word = list(df['word'])\n shuffle(list_word)\n \n Pair=[]\n for i in range(n):\n Pair.append([list_word[i],list_word[2*i]])\n return(Pair)", "title": "" }, { "docid": "5a9b487a09049a4afec1118ba7d93b12", "score": "0.5255207", "text": "def create_random_pair(n,model):\n df = pd.DataFrame(list(model.vocab.items()), columns=['word','count'])\n list_word = list(df['word'])\n shuffle(list_word)\n \n Pair=[]\n for i in range(n):\n Pair.append([list_word[i],list_word[2*i]])\n return(Pair)", "title": "" }, { "docid": "70c1c80df25a6c84a336aebdb8a8fcb8", "score": "0.5243617", "text": "def triples(self):\n if len(self.words) < 3:\n return\n\n for i in range(len(self.words) - 2):\n yield (self.words[i], self.words[i+1], self.words[i+2])", "title": "" }, { "docid": "bbd02219668f8d14062f2a5307800cc4", "score": "0.5180519", "text": "def select_binary_triplets_gpu(embeddings_p, embeddings_n, image_p_paths, image_n_paths, alpha):\n trip_idx = 0\n emb_start_idx = 0\n num_trips = 0\n triplets = []\n # VGG Face: Choosing good triplets is crucial and should strike a balance between\n # selecting informative (i.e. challenging) examples and swamping training with examples that\n # are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling\n # the image n at random, but only between the ones that violate the triplet loss margin. The\n # latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than\n # choosing the maximally violating example, as often done in structured output learning.\n nrof_images = embeddings_p.shape[0]\n print('start to select triplets')\n for j in range(nrof_images):\n a_idx = j\n\n neg_dists_sqr = np.sum(np.square(embeddings_p[a_idx] - embeddings_n), 1)\n\n # for pair in xrange(j, nrof_images): # For every possible positive pair.\n pair = np.random.randint(j, nrof_images)\n p_idx = emb_start_idx + pair\n pos_dist_sqr = np.sum(np.square(embeddings_p[a_idx,:] - embeddings_p[p_idx,:]))\n\n all_neg = np.where(neg_dists_sqr - pos_dist_sqr < alpha)[0] # VGG Face selecction\n hard_neg = np.where(neg_dists_sqr - pos_dist_sqr < 0)[0]\n nrof_random_negs = all_neg.shape[0]\n nrof_hard_negs = hard_neg.shape[0]\n if nrof_random_negs > 0:\n\n if nrof_hard_negs > 0:\n rnd_idx = np.random.randint(nrof_hard_negs)\n n_idx = hard_neg[rnd_idx]\n else:\n rnd_idx = np.random.randint(nrof_random_negs)\n n_idx = all_neg[rnd_idx]\n\n triplets.append((image_p_paths[a_idx], image_p_paths[p_idx], image_n_paths[n_idx]))\n # print('Triplet %d: (%d, %d, %d), pos_dist=%2.6f, neg_dist=%2.6f (%d, %d, %d, %d, %d)' %\n # (trip_idx, a_idx, p_idx, n_idx, pos_dist_sqr, neg_dists_sqr[n_idx], nrof_random_negs, rnd_idx, i, j, emb_start_idx))\n trip_idx += 1\n\n num_trips += 1\n\n np.random.shuffle(triplets)\n return triplets, num_trips, len(triplets)", "title": "" }, { "docid": "b74499985959ad776c4353ad8c1840ca", "score": "0.51709497", "text": "def getMinibatch(self):\r\n pre_states_beta = np.zeros([self.batch_size, self.num_words, self.word_dim+1])\r\n pre_states_alpha = np.zeros([self.batch_size, self.num_words, 2])\r\n if self.priority:\r\n pos_amount = int(self.positive_rate*self.batch_size) \r\n\r\n indices = []\r\n count_pos = 0\r\n count_neg = 0\r\n count = 0 \r\n max_circles = 1000 # max times for choosing positive samples or nagative samples\r\n while len(indices) < self.batch_size:\r\n # find random index \r\n while True:\r\n # sample one index (ignore states wraping over) \r\n index = np.random.randint(1, self.count - 1)\r\n # NB! prestate (last state) can be terminal state!\r\n if self.terminals[index - 1]:\r\n continue\r\n # use prioritized replay trick\r\n if self.priority:\r\n if count < max_circles:\r\n # if num_pos is already enough but current idx is also pos sample, continue\r\n if (count_pos >= pos_amount) and (self.rewards[index] > 0):\r\n count += 1\r\n continue\r\n # elif num_nag is already enough but current idx is also nag sample, continue\r\n elif (count_neg >= self.batch_size - pos_amount) and (self.rewards[index] < 0): \r\n count += 1\r\n continue\r\n if self.rewards[index] > 0:\r\n count_pos += 1\r\n else:\r\n count_neg += 1\r\n break\r\n \r\n pre_states_beta[len(indices)] = self.states_beta[index - 1]\r\n pre_states_alpha[len(indices)] = self.states_alpha[index - 1]\r\n indices.append(index)\r\n\r\n # copy actions, rewards and terminals with direct slicing\r\n actions = self.actions[indices] \r\n rewards = self.rewards[indices]\r\n terminals = self.terminals[indices]\r\n post_states_beta = self.states_beta[indices]\r\n post_states_alpha = self.states_alpha[indices]\r\n return pre_states_alpha, pre_states_beta, actions, rewards, post_states_alpha, post_states_beta, terminals", "title": "" }, { "docid": "b4c3d02379cbdd6e97a9a83707025ba2", "score": "0.51695657", "text": "def _sample_proposals(self, matched_idxs, matched_labels, gt_classes, gt_attributes):\r\n has_gt = gt_classes.numel() > 0\r\n # Get the corresponding GT for each proposal\r\n if has_gt:\r\n gt_classes = gt_classes[matched_idxs]\r\n gt_attributes = gt_attributes[matched_idxs, :]\r\n # Label unmatched proposals (0 label from matcher) as background (label=num_classes)\r\n gt_classes[matched_labels == 0] = self.num_classes\r\n # Label ignore proposals (-1 label)\r\n gt_classes[matched_labels == -1] = -1\r\n else:\r\n gt_classes = torch.zeros_like(matched_idxs) + self.num_classes\r\n gt_clagt_attributes = -torch.ones((len(matched_idxs),16), dtype=torch.int64).cuda()\r\n\r\n sampled_fg_idxs, sampled_bg_idxs = subsample_labels(\r\n gt_classes, self.batch_size_per_image, self.positive_sample_fraction, self.num_classes\r\n )\r\n\r\n sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)\r\n return sampled_idxs, gt_classes[sampled_idxs], gt_attributes[sampled_idxs]", "title": "" }, { "docid": "de7f0d3ad8731265a0fea054133fb922", "score": "0.51597255", "text": "def _sample(\n self, positive_triples: torch.Tensor, slot: int, num_samples: int\n ) -> torch.Tensor:\n raise NotImplementedError(\"The selected sampler is not implemented.\")", "title": "" }, { "docid": "39afadef29a724fcd7c74a9dd51595bf", "score": "0.51262677", "text": "def mix_sampling(list_of_examples, negative_examples):\n mixed_negative_examples = []\n batch_size = len(list_of_examples)\n\n for idx, (left_idx, right_idx) in enumerate(negative_examples):\n\n new_left = left_idx\n new_right = right_idx\n\n if random.random() >= 0.5:\n new_left = list_of_examples[random_different_from(batch_size, idx)][random.randint(0, 1)]\n \n if random.random() >= 0.5:\n new_right = list_of_examples[random_different_from(batch_size, idx)][random.randint(0, 1)]\n\n mixed_negative_examples.append((new_left, new_right))\n\n return mixed_negative_examples", "title": "" }, { "docid": "b5443dea8678cf3d9477d5edbd073358", "score": "0.5104255", "text": "def Predictor(self):\n for i in range(3):\n self.prediction += random.choice('10')\n if len(self.test) > 3:\n for i in range(len(self.test) - 3):\n triadkey = self.probs[self.test[i: i + 3]]\n if triadkey > 0.5:\n self.prediction += '0'\n elif triadkey < 0.5:\n self.prediction += '1'\n else:\n self.prediction += random.choice('01')", "title": "" }, { "docid": "53c87784ccd01b1961400982f8378153", "score": "0.50972545", "text": "def __make_source(self):\n init_list = list(range(1, self.__trgt_num + 1))\n\n random.shuffle(init_list)\n source_list = []\n for i in range(self.__trgt_num):\n init_list.insert(0, init_list.pop())\n source_list.append(init_list[:])\n return source_list", "title": "" }, { "docid": "72eac023fa3d74d4a1812482bd15ca75", "score": "0.5093185", "text": "def triplet_loss(anchor, positive, negative, alpha):\n with tf.variable_scope('triplet_loss'):\n #pos_cos_similarity = tf.reduce_sum(tf.multiply(anchor,positive),1) # 1 : similarity 0 : not similarity\n #pos_cos_similarity = 1 - pos_cos_similarity # 0: similarity 1 :not similarity\n #neg_cos_similarity = tf.reduce_sum(tf.multiply(anchor,negative),1)\n #neg_cos_similarity =1 - neg_cos_similarity\n #basic_loss = tf.add(tf.subtract(pos_cos_similarity,neg_cos_similarity), alpha)\n a = tf.square(tf.subtract(anchor, positive))#shape=(128, 2048)\n print (a,' aaaaaaaaaaa aaaaaaaaaaaaa ')\n pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)# shape=(128,)\n neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)\n \n '''\n top_64 = 64\n value = []\n size_a = tf.size(pos_dist)\n max_index = tf.nn.top_k(pos_dist, size_a)[1]\n index = max_index[:top_64]\n for i in range(top_64):\n j = index[i]\n value.append([pos_dist[j]])\n pos_tensor = tf.convert_to_tensor(value, dtype=tf.float32)\n pos_tensor_top64 = tf.reshape(pos_tensor,[top_64,])\n \n \n #http://blog.csdn.net/noirblack/article/details/78088993\n value = []\n size_a = tf.size(neg_dist)\n min_index = tf.nn.top_k(-neg_dist, size_a)[1]\n index = min_index[:top_64]\n for i in range(top_64):\n j = index[i]\n value.append([neg_dist[j]])\n neg_tensor = tf.convert_to_tensor(value, dtype=tf.float32)\n neg_tensor_top64 = tf.reshape(neg_tensor,[top_64,])\n '''\n\n\n #basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)\n basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)\n loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)\n \n \n return loss,tf.reduce_mean(pos_dist),tf.reduce_mean(neg_dist)", "title": "" }, { "docid": "29085a199b9a7b953f15e1d7f632e3f3", "score": "0.50894016", "text": "def predict(self, triples):\n\t\treturn self._algorithm(triples)", "title": "" }, { "docid": "1cdb337ecb1f89a6ed875181d758e0da", "score": "0.5079997", "text": "def random_pair_neg_samples(self, pos_samples):\n np.random.shuffle(pos_samples)\n num_sample = len(pos_samples)\n neg_samples = []\n miss_num = 0\n\n def split_sent(sample, max_len, sep_id):\n token_ids, _, _, seg_labels, _ = sample\n sep_index = token_ids.index(sep_id)\n left_len = sep_index - 1\n if left_len <= max_len:\n return (token_ids[1:sep_index], seg_labels[1:sep_index])\n return [\n token_ids[sep_index + 1:-1], seg_labels[sep_index + 1:-1]\n ]\n\n for i in range(num_sample):\n pair_index = (i + 1) % num_sample\n left_tokens, left_seg_labels = split_sent(\n pos_samples[i], (self.max_seq_len - 3) // 2, self.sep_id)\n right_tokens, right_seg_labels = split_sent(\n pos_samples[pair_index],\n self.max_seq_len - 3 - len(left_tokens), self.sep_id)\n\n token_seq = [self.cls_id] + left_tokens + [self.sep_id] + \\\n right_tokens + [self.sep_id]\n if len(token_seq) > self.max_seq_len:\n miss_num += 1\n continue\n type_seq = [0] * (len(left_tokens) + 2) + [1] * (len(right_tokens) +\n 1)\n pos_seq = range(len(token_seq))\n seg_label_seq = [-1] + left_seg_labels + [-1] + right_seg_labels + [\n -1\n ]\n\n assert len(token_seq) == len(type_seq) == len(pos_seq) == len(seg_label_seq), \\\n \"[ERROR]len(src_id) == lne(sent_id) == len(pos_id) must be True\"\n neg_samples.append([token_seq, type_seq, pos_seq, seg_label_seq, 0])\n\n return neg_samples, miss_num", "title": "" }, { "docid": "bfab983bfe64c3fa084fa41deae03272", "score": "0.50757444", "text": "def training_sample(self):\n sample = {}\n sample[\"context\"] = self.get_context()\n sample[\"answers\"] = self.get_answers()\n if self.solution_factors is not None:\n context_factors = self.get_context_factor_values()\n answers_factors = self.get_answers_factor_values()\n\n sample[\"context_factor_values\"] = self.range_embed_factors(\n context_factors)\n sample[\"answers_factor_values\"] = self.range_embed_factors(\n answers_factors)\n sample[\"context_factors_onehot\"] = self.onehot_embed_factors(\n context_factors)\n sample[\"answers_factors_onehot\"] = self.onehot_embed_factors(\n answers_factors)\n return sample, self.position", "title": "" }, { "docid": "8d622604cd9b4f214b91a7d4a5ba3a7f", "score": "0.5074158", "text": "def generatorTT(n, cant):\n p = [i for i in numpy.arange(0, 0.1, 0.005)] #p_small\n for i in numpy.arange(0.1, 1.05, 0.05): p.append(i)#p_small U p_large\n tria, trip = [], []\n\n for i in p:\n cont1, cont2 = 0, 0\n for j in range(200):\n G, G2 = random_graph(n, i)\n v1, v2 = triangles_triplets(G2)\n cont1 += v1\n cont2 += v2\n tria.append(cont1/cant)\n trip.append(cont2/cant)\n return tria, trip, p", "title": "" }, { "docid": "daead52db613d8dbefdbff837e2958e8", "score": "0.50506794", "text": "def get_training_pairs(self, X_train, y_train, load_train, X_test,\r\n y_test, load_test, prop=1):\r\n # create training+test positive and negative pairs\r\n tr_pairs, tr_y, tr_pairs_y, tr_pairs_load = self.create_pairs(X_train, y_train, load_train)\r\n te_pairs, te_y, te_pairs_y, te_pairs_load = self.create_pairs(X_test, y_test, load_test)\r\n\r\n self.tr_pairs = tr_pairs\r\n self.tr_pairs_y = tr_pairs_y\r\n self.te_pairs = te_pairs\r\n self.te_pairs_y = te_pairs_y\r\n\r\n # Select a potion of the training set\r\n if prop < 1:\r\n size = tr_pairs.shape[0]\r\n idx, _, _, _ = train_test_split(np.arange(size), tr_pairs_y, test_size=1.0 - prop, random_state=42)\r\n tr_pairs = tr_pairs[idx, :, :]\r\n tr_pairs_y = tr_pairs_y[idx, :]\r\n tr_y = tr_y[idx]\r\n tr_pairs_load = tr_pairs_load[idx, :]\r\n\r\n tr_pair0 = np.expand_dims(tr_pairs[:, 0], axis=2)\r\n tr_pair1 = np.expand_dims(tr_pairs[:, 1], axis=2)\r\n te_pair0 = np.expand_dims(te_pairs[:, 0], axis=2)\r\n te_pair1 = np.expand_dims(te_pairs[:, 1], axis=2)\r\n\r\n # Reshape labels\r\n tr_pairs_y0 = np_utils.to_categorical(tr_pairs_y[:, 0], self.num_classes)\r\n tr_pairs_y1 = np_utils.to_categorical(tr_pairs_y[:, 1], self.num_classes)\r\n te_pairs_y0 = np_utils.to_categorical(te_pairs_y[:, 0], self.num_classes)\r\n te_pairs_y1 = np_utils.to_categorical(te_pairs_y[:, 1], self.num_classes)\r\n\r\n return tr_pair0, tr_pair1, tr_pairs_y0, tr_pairs_y1, tr_y, te_pair0, te_pair1, te_pairs_y0, te_pairs_y1, te_y", "title": "" }, { "docid": "b201bd5db7bb760d7d87d1fc3f0a3e96", "score": "0.50373995", "text": "def generate_neg_links(train_filename, test_filename, test_neg_filename):\n\n train_edges = read_edges_from_file(train_filename)\n test_edges = read_edges_from_file(test_filename)\n neighbors = {} # dict, node_ID -> list_of_neighbors\n for edge in train_edges + test_edges:\n if neighbors.get(edge[0]) is None:\n neighbors[edge[0]] = []\n if neighbors.get(edge[1]) is None:\n neighbors[edge[1]] = []\n neighbors[edge[0]].append(edge[1])\n neighbors[edge[1]].append(edge[0])\n nodes = set([x for x in range(len(neighbors))])\n\n # for each edge in the test set, sample a negative edge\n neg_edges = []\n\n for i in range(len(test_edges)):\n edge = test_edges[i]\n start_node = edge[0]\n neg_nodes = list(nodes.difference(set(neighbors[edge[0]] + [edge[0]])))\n neg_node = np.random.choice(neg_nodes, size=1)[0]\n neg_edges.append([start_node, neg_node])\n neg_edges_str = [str(x[0]) + \"\\t\" + str(x[1]) + \"\\n\" for x in neg_edges]\n with open(test_neg_filename, \"w+\") as f:\n f.writelines(neg_edges_str)", "title": "" }, { "docid": "05a30375b1df02a7a1de7c5b6351ccc9", "score": "0.50373083", "text": "def predict(model, test_features):\n\n predictions = [int(prediction)\n for prediction in np.random.randint(0, 100, test_features.shape[0]) < model]\n return predictions", "title": "" }, { "docid": "f126b7399b5163f3b7eec3ae11bf2d07", "score": "0.50357085", "text": "def generate_new_sample(self, idx):\n t1, t2 = self.get_corpus_pair(idx)\n if random.random() > 0.5:\n label = 0\n else:\n t2 = self.get_random_line()\n label = 1\n # This is purely to catch unexpected errors\n assert len(t1) > 0 and len(t2) > 0\n return t1, t2, label", "title": "" }, { "docid": "a2b1afeb4963dd876f1e8d274c2c336b", "score": "0.50344455", "text": "def _sample_shared(\n self, positive_triples: torch.Tensor, slot: int, num_samples: int\n ) -> \"BatchNegativeSample\":\n raise NotImplementedError(\n \"The selected sampler does not support shared negative samples.\"\n )", "title": "" }, { "docid": "48f8b0413ef59b83fa73370cd217d2ad", "score": "0.502829", "text": "def triples(self):\n\t\t\n\t\tif len(self.words) < 3:\n\t\t\treturn\n\t\t\n\t\tfor i in range(len(self.words) - 2):\n\t\t\tyield (self.words[i], self.words[i+1], self.words[i+2])", "title": "" }, { "docid": "7df72d1416e2287f44e87a4b52f949e7", "score": "0.5016483", "text": "def list_maker():\n \n #Creating an empty list\n triplet_list = []\n \n #Our list can not be with numbers higher than 999 as a < b < c and a + b + c == 1000\n for a in range(1000):\n \n #Range (a+1, 1000) makes sure that b > a\n for b in range(a + 1, 1000):\n \n #Same here c > b\n for c in range (b + 1, 1000):\n \n #We know already that a < b < c \n #There for we do not add that condition increase time efficiency by 20 -25%.\n if a + b + c == 1000:\n triplet_list.append([a,b,c])\n \n return triplet_list", "title": "" }, { "docid": "cd6e8cab95195dbefa35ae8fbeeead7b", "score": "0.5002345", "text": "def _prepare_training_data_for_model():\n positive_tweets = twitter_samples.strings('positive_tweets.json')\n negative_tweets = twitter_samples.strings('negative_tweets.json')\n\n positive_preprocessed_tokens = []\n negative_preprocessed_tokens = []\n\n for tweet in positive_tweets:\n positive_preprocessed_tokens.append(remove_noise(tweet))\n\n for tweet in negative_tweets:\n negative_preprocessed_tokens.append(remove_noise(tweet))\n\n positive_formatted_tokens = _get_tweets_for_model(positive_preprocessed_tokens)\n negative_formatted_tokens = _get_tweets_for_model(negative_preprocessed_tokens)\n\n positive_dataset = [(tweet_dict, \"Positive\") for tweet_dict in positive_formatted_tokens]\n negative_dataset = [(tweet_dict, \"Negative\") for tweet_dict in negative_formatted_tokens]\n\n dataset = positive_dataset + negative_dataset\n random.shuffle(dataset)\n train_data = dataset[:8000]\n test_data = dataset[8000:]\n\n return train_data, test_data", "title": "" }, { "docid": "cfc4d9ff25b57fd0b1dfcb090c4f009b", "score": "0.5001749", "text": "def shuffle_spiketrains_across_trials(poptens):\n (cells, wins, trials) = poptens.shape\n for cell in range(cells):\n np.random.shuffle(poptens[cell, :, :].T)\n return poptens", "title": "" }, { "docid": "f59fab259f33a64a674be1450bb4ea6a", "score": "0.5001497", "text": "def sample_weights(self):\n n_pos = len(self.positive_pairs)\n n_neg = len(self.negative_pairs)\n return [1 / n_pos] * n_pos + [1 / n_neg] * n_neg", "title": "" }, { "docid": "b443135e673a51c5a7563e5d417bc2f4", "score": "0.49861276", "text": "def train_2d(trainer):\n # s1 and s2 are internal state variables and will \n # be used later in the chapter\n x1, x2, s1, s2 = -5, -2, 0, 0\n results = [(x1, x2)]\n for i in range(20):\n x1, x2, s1, s2 = trainer(x1, x2, s1, s2)\n results.append((x1, x2))\n print('epoch %d, x1 %f, x2 %f' % (i + 1, x1, x2))\n return results", "title": "" }, { "docid": "b9d1e65bb700639d3b0462cf08a09678", "score": "0.49846172", "text": "def sample_nodes(self):\n V = len(self.node2id)\n split = np.random.choice(\n ['train', 'val', 'test'],\n replace=True,\n size=(V,),\n p=(self.train_frac, self.val_frac, self.test_frac)\n )\n\n self.train_nodes = np.where(split == 'train')[0]\n self.val_nodes = np.where(split == 'val')[0]\n self.test_nodes = np.where(split == 'test')[0]", "title": "" }, { "docid": "1781a92c92aa41e55375ba5b3e9a426c", "score": "0.49748796", "text": "def __init__(self, points, alpha, beta):\n #self.startPointIndex = randint(0, len(points)-1)\n self.startPointIndex = 0\n self.points = points\n self.alpha = alpha\n self.beta = beta", "title": "" }, { "docid": "58e36a92aff28cb02d7f617251a466eb", "score": "0.49557745", "text": "def gen_trajectory(domain, traj_len):\r\n\r\n traj = list()\r\n n, m = domain.state_space()\r\n x_start = random.randint(0, n-1)\r\n y_start = random.randint(0, m-1)\r\n state = x_start, y_start\r\n for j in range(traj_len):\r\n actions = domain.action_space()\r\n action = actions[random.randint(0, 3)]\r\n state_prime = domain.dynamics(state, action)\r\n r = domain.get_reward(state_prime)\r\n traj.append((state, action, r))\r\n state = state_prime\r\n return traj", "title": "" }, { "docid": "76c20758d854daa44512a1539c241478", "score": "0.4947757", "text": "def training_set(self):", "title": "" }, { "docid": "dd7c524f5c1b8c20afd98e3ad9e742d2", "score": "0.49468997", "text": "def _trial_shifting(spiketrains, dither, t_starts, t_stops, n_surrogates):\n surrogate_spiketrains = []\n for surrogate_id in range(n_surrogates):\n copied_spiketrain = copy.deepcopy(spiketrains)\n surrogate_spiketrain = []\n # looping over all trials\n for trial_id, single_trial_st in enumerate(copied_spiketrain):\n single_trial_st += dither * (2 * random.random() - 1)\n single_trial_st = np.remainder(\n single_trial_st - t_starts[trial_id],\n t_stops[trial_id] - t_starts[trial_id]\n ) + t_starts[trial_id]\n single_trial_st.sort()\n\n surrogate_spiketrain.append(single_trial_st)\n\n surrogate_spiketrains.append(surrogate_spiketrain)\n return surrogate_spiketrains", "title": "" }, { "docid": "9e30bbad3e182ae39f9732bd7fa3eac1", "score": "0.4942292", "text": "def get_random(all_connections, num_blocks):\n random_dag = []\n # random_dag.append((0, 1, np.random.choice(shared_cnn.CNNCell.default_layer_types)))\n for i in range(2, 2 + num_blocks):\n # previous_layers = np.random.choice(possible_connections, 2, replace=True)\n possible_connections = list(connection for connection in all_connections if connection[1] == i)\n # print(possible_connections)\n ids = np.random.choice(len(possible_connections), 2, replace=True)\n for id in ids:\n random_dag.append(possible_connections[id])\n\n return trim_dag(random_dag, {0, 1}, 1 + num_blocks)", "title": "" }, { "docid": "5eef7b6e36d454931d83e23bbb47f1cd", "score": "0.49405327", "text": "def aux_Ex_genTrainingTuple(self, seed=None):\n if seed is not None:\n np.random.seed(seed)\n# lst = []\n# for nn in range(n):\n# diceOld = Dice()\n# deci = np.random.choice([True, False], size=5)\n# diceNew = diceOld.roll(deci)\n# lst += [(diceOld, deci, diceNew)]\n# return lst\n diceOld = Dice()\n deci = np.random.choice([True, False], size=5)\n diceNew = diceOld.roll(deci)\n return diceOld, deci, diceNew", "title": "" }, { "docid": "f3770676a2c2bf891541c0cf16f29c7d", "score": "0.4938251", "text": "def _make_target_set(self):\n self._targets = [neighbor for node in self._sampled_nodes for neighbor in self._graph.neighbors(node)]\n self._targets = list(set(self._targets).difference(self._sampled_nodes))\n random.shuffle(self._targets)", "title": "" }, { "docid": "5e198cfa6c339ed0d2372794a2b51cb9", "score": "0.4936747", "text": "def sample_training_examples(\n ht: hl.Table,\n tp_expr: hl.BooleanExpression,\n fp_expr: hl.BooleanExpression,\n fp_to_tp: float = 1.0,\n test_expr: Optional[hl.expr.BooleanExpression] = None,\n) -> hl.Table:\n ht = ht.select(\n _tp=hl.or_else(tp_expr, False),\n _fp=hl.or_else(fp_expr, False),\n _exclude=False if test_expr is None else test_expr,\n )\n ht = ht.filter(ht._tp | ht._fp).persist()\n\n # Get stats about TP / FP sets\n def _get_train_counts(ht: hl.Table) -> Tuple[int, int]:\n \"\"\"\n Determine the number of TP and FP variants in the input Table and report some stats on Ti, Tv, indels.\n\n :param ht: Input Table\n :return: Counts of TP and FP variants in the table\n \"\"\"\n train_stats = hl.struct(n=hl.agg.count())\n\n if \"alleles\" in ht.row and ht.row.alleles.dtype == hl.tarray(hl.tstr):\n train_stats = train_stats.annotate(\n ti=hl.agg.count_where(\n hl.expr.is_transition(ht.alleles[0], ht.alleles[1])\n ),\n tv=hl.agg.count_where(\n hl.expr.is_transversion(ht.alleles[0], ht.alleles[1])\n ),\n indel=hl.agg.count_where(\n hl.expr.is_indel(ht.alleles[0], ht.alleles[1])\n ),\n )\n\n # Sample training examples\n pd_stats = (\n ht.group_by(**{\"contig\": ht.locus.contig, \"tp\": ht._tp, \"fp\": ht._fp})\n .aggregate(**train_stats)\n .to_pandas()\n )\n\n logger.info(pformat(pd_stats))\n pd_stats = pd_stats.fillna(False)\n\n # Number of true positive and false positive variants to be sampled for the training set\n n_tp = pd_stats[pd_stats[\"tp\"] & ~pd_stats[\"fp\"]][\"n\"].sum()\n n_fp = pd_stats[~pd_stats[\"tp\"] & pd_stats[\"fp\"]][\"n\"].sum()\n\n return n_tp, n_fp\n\n n_tp, n_fp = _get_train_counts(ht.filter(~ht._exclude))\n\n prob_tp = prob_fp = 1.0\n if fp_to_tp > 0:\n desired_fp = fp_to_tp * n_tp\n if desired_fp < n_fp:\n prob_fp = desired_fp / n_fp\n else:\n prob_tp = n_fp / desired_fp\n\n logger.info(\n \"Training examples sampling: tp=%f*%d, fp=%f*%d\",\n prob_tp,\n n_tp,\n prob_fp,\n n_fp,\n )\n\n train_expr = (\n hl.case(missing_false=True)\n .when(ht._fp & hl.or_else(~ht._tp, True), hl.rand_bool(prob_fp))\n .when(ht._tp & hl.or_else(~ht._fp, True), hl.rand_bool(prob_tp))\n .default(False)\n )\n else:\n train_expr = ~(ht._tp & ht._fp)\n logger.info(\"Using all %d TP and %d FP training examples.\", n_tp, n_fp)\n\n label_expr = (\n hl.case(missing_false=True)\n .when(ht._tp & hl.or_else(~ht._fp, True), \"TP\")\n .when(ht._fp & hl.or_else(~ht._tp, True), \"FP\")\n .default(hl.null(hl.tstr))\n )\n\n return ht.select(train=train_expr & ~ht._exclude, label=label_expr)", "title": "" }, { "docid": "86ac065197147e8f8534b5cf100a7e30", "score": "0.4936356", "text": "def mix_samples(train_genuine, train_impostors):\n samples = np.vstack((train_genuine, train_impostors))\n\n labels = []\n # Add labels: 1 - user, and 0 - impostor.\n for i in train_genuine:\n labels.append(1)\n for i in train_impostors:\n labels.append(0)\n\n labels = np.array(labels)\n\n #FIXME: Do a unison shuffle on these? Might help with training?\n return samples, labels", "title": "" }, { "docid": "92316cebc1b37d1098b4e3b782d83f48", "score": "0.49355265", "text": "def _get_training(self, df):\n train = []\n subset = df[df['cat'] != '']\n for i in subset.index:\n row = subset.ix[i]\n new_desc = self._strip_numbers(row['desc'])\n train.append( (new_desc, row['cat']) )\n\n return train", "title": "" }, { "docid": "541efc78ca8b80c9fae1ab4bd9aee718", "score": "0.49292997", "text": "def triples(self, triple_pattern):\n return self.content.triples(triple_pattern)", "title": "" }, { "docid": "b9c665eccb004507fcaf625f39ee0fc3", "score": "0.4924767", "text": "def triplet_loss(anchor, positive, negative, alpha):\n with tf.variable_scope('triplet_loss'):\n pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)\n neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)\n\n basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)\n loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)\n\n return loss", "title": "" }, { "docid": "1d7e0ba83a025fb2c6a3b77058a0817a", "score": "0.49128568", "text": "def _get_training(self, df):\n train = []\n subset = df[df['cat'] != '']\n for i in subset.index:\n row = subset.iloc[i]\n new_desc = self._strip_numbers(row['desc'])\n train.append( (new_desc, row['cat']) )\n\n return train", "title": "" }, { "docid": "1d7e0ba83a025fb2c6a3b77058a0817a", "score": "0.49128568", "text": "def _get_training(self, df):\n train = []\n subset = df[df['cat'] != '']\n for i in subset.index:\n row = subset.iloc[i]\n new_desc = self._strip_numbers(row['desc'])\n train.append( (new_desc, row['cat']) )\n\n return train", "title": "" }, { "docid": "88e48efa0e39d71f94be9cf1ecd4a07a", "score": "0.49116498", "text": "def set_first_model_positives(self, config, random_seed) -> List[TextElement]:\n all_positives = oracle_data_access_api.sample_positives(config.train_dataset_name, config.category_name, 10**6,\n random_seed)\n all_without_duplicates = self.data_access.sample_text_elements(config.train_dataset_name, 10**6,\n remove_duplicates=True)['results']\n uris_without_dups = [element.uri for element in all_without_duplicates]\n pos_without_dups = [(uri, label) for uri, label in all_positives if uri in uris_without_dups]\n selected_positives = pos_without_dups[:min(self.first_model_positives_num, len(pos_without_dups))]\n orchestrator_api.set_labels(config.workspace_id, selected_positives)\n\n positive_uris = [uri for uri, label in selected_positives]\n logging.info(f'set the label of {len(selected_positives)} true positive instances as positives '\n f'for category {config.category_name}')\n return positive_uris", "title": "" }, { "docid": "8cdd9cf4e101d1a38d97484dc2a248d5", "score": "0.49073693", "text": "def generate_puzzles(self, weight=None):\n self.prune_puzzles(weight)\n extra_keys = []\n if self.args.noise_support:\n extra_keys.append('fact_1')\n if self.args.noise_irrelevant:\n extra_keys.append('fact_2')\n if self.args.noise_disconnected:\n extra_keys.append('fact_3')\n puzzle_ids = self.puzzles.keys()\n for pi in puzzle_ids:\n self.puzzles[pi]['text_story'] = {e: self.stringify(e) for e in self.puzzles[pi]['story']}\n # either the target and query is reasoning from first and last, or memory retrieval from the given story\n if random.uniform(0,1) > self.args.memory:\n self.puzzles[pi]['query'] = self.puzzles[pi]['edge']\n else:\n self.puzzles[pi]['query'] = random.choice(self.puzzles[pi]['story'])\n # populate the target\n self.puzzles[pi]['target'] = self._get_edge_rel(self.puzzles[pi]['query'])['rel']\n self.puzzles[pi]['query_text'] = self._format_edge(self.puzzles[pi]['query'])\n self.puzzles[pi]['text_target'] = self.stringify(self.puzzles[pi]['query'])\n # populate the noise\n self.puzzles[pi]['all_noise'] = []\n for key in extra_keys:\n self.puzzles[pi]['text_{}'.format(key)] = {e: self.stringify(e) for e in self.puzzles[pi][key]}\n self.puzzles[pi]['all_noise'].append(self.puzzles[pi][key])\n # replace edges with name and relations\n self.puzzles[pi]['f_edge'] = self._format_edge_rel(self.puzzles[pi]['edge'])\n self.puzzles[pi]['f_story'] = [self._format_edge_rel(x) for x in self.puzzles[pi]['story']]", "title": "" }, { "docid": "cba5c90588d5fd15070b64836af18947", "score": "0.49064142", "text": "def initial_sample(self):\n\n x0 = [np.array(p.sample()).ravel().tolist() for p in self.models[0].params]\n uniq_params = [str(p) for p in self.models[0].params]\n\n for model in self.models.values():\n param_diffs = np.setdiff1d([str(p) for p in model.params], uniq_params)\n mask = np.array([str(p) in param_diffs for p in model.params])\n x0.extend([np.array(pp.sample()).ravel().tolist() for pp in np.array(model.params)[mask]])\n\n uniq_params = np.union1d([str(p) for p in model.params], uniq_params)\n\n x0.extend([[0.1]])\n\n return np.array([p for sublist in x0 for p in sublist])", "title": "" }, { "docid": "36d6a5b721f2480d7972e1c7de7c5df5", "score": "0.49059337", "text": "def create_data_sequences(x,y, bptt):\n inout_seq = []\n for i in range(len(x)-bptt):\n train_seq_x = x[i:i+bptt]\n train_seq_y = y[i:i+bptt]\n\n train_label = y[i+1:i+bptt+1]\n inout_seq.append((train_seq_x, train_seq_y ,train_label))\n return inout_seq", "title": "" }, { "docid": "688652d410f5b4c1b475ea3f374643b2", "score": "0.49053836", "text": "def generateTrainData():\n x = np.array(np.random.choice(2, FLAGS.total_series_length))\n y = np.roll(x, FLAGS.echo_step)\n y[0:FLAGS.echo_step] = 0\n\n return x.reshape((FLAGS.batch_size, -1)), y.reshape((FLAGS.batch_size, -1))", "title": "" }, { "docid": "f4620dfac19287f5536c058b033eab9c", "score": "0.4898449", "text": "def generate_triplet(self, client_id: str, op_id: str) -> Tuple[Share, Share, Share]:\n # Generate a triplet\n a = rnd.randint(0, q)\n b = rnd.randint(0, q)\n c = a * b % q\n\n # Split each value into multiples shares (each clients will have a share of a, b and c)\n nb_participants = len(self.participant_ids)\n a_shares : List[Share] = share_secret(a, nb_participants)\n b_shares : List[Share] = share_secret(b, nb_participants)\n c_shares : List[Share] = share_secret(c, nb_participants)\n\n # Store the shares in the ttp's dict\n for idx, p_id in enumerate(self.participant_ids):\n self.triplet_dict[(p_id, op_id)] = (a_shares[idx], b_shares[idx], c_shares[idx])\n\n res = self.triplet_dict.get((client_id, op_id))\n\n return res", "title": "" }, { "docid": "6249f266a200d352b932264123b56bc7", "score": "0.48921993", "text": "def triplet_loss(anchor, positive, negative, alpha):\n with tf.variable_scope('triplet_loss'):\n # positive distance\n pos_dist = tf.reduce_mean(\n tf.square(\n tf.subtract(anchor, positive)), 1)\n # negative distance\n neg_dist = tf.reduce_mean(\n tf.square(\n tf.subtract(anchor, negative)), 1)\n\n # loss\n basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)\n loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)\n\n return loss", "title": "" }, { "docid": "30a513f37d201358dc0abef57270d731", "score": "0.48879746", "text": "def split_train_test(self, tst_prop):\n if tst_prop > 0:\n rownums = list(range(self.numrows))\n rowshuffle = random.sample(rownums, self.numrows)\n train_size = int(self.numrows - tst_prop*self.numrows)\n train = rowshuffle[:train_size]\n train_data = [r for i, r in enumerate(self.data) if i in train]\n test_data = [r for i, r in enumerate(self.data) if i not in train]\n return train_data, test_data\n return copy.deepcopy(self.data), copy.deepcopy(self.data)", "title": "" }, { "docid": "0a8ff9592015166198f9dfe5cf2108f5", "score": "0.48759452", "text": "def crossover(self, parents):\n newNN = []\n for layer in range(len(parents[0])):\n newLayer = []\n for node in range(len(parents[0][layer])):\n newNode = []\n for weight in range(len(parents[0][layer][node])): #goes through each weight in NN\n choice = random.randint(0,1) #randomly selects which parent to take that weight from\n newNode.append(parents[choice][layer][node][weight]) #appends it to make a new NN\n newLayer.append(newNode)\n newNN.append(newLayer)\n return(newNN) #returns results child NN", "title": "" }, { "docid": "87dd69e79969fbb65bff943ff6bd82a4", "score": "0.4875192", "text": "def gen_anchor_target(cls_output_shape, xs_shape, gt_boxes):\n anchor_scales = [1]\n anchors = generate_anchors(ratios=[0.33, 0.5, 1, 2, 3],\n scales=np.array(anchor_scales))\n # anchors are in box format (x1, y1, x2, y2)\n\n A = anchors.shape[0]\n feat_stride = 255 // 17\n\n allowed_border = 0\n height, width = cls_output_shape\n\n labels = np.zeros((1, 1, A * height, width))\n bbox_targets = np.zeros((1, 4 * A, height, width))\n bbox_inside_weights = np.zeros((1, 4 * A, height, width))\n bbox_outsied_weights = np.zeros((1, 4 * A, height, width))\n\n sr_size = xs_shape\n\n # 1. Generate proposals from bbox deltas and shifted anchors\n shift_x = np.arange(0, width) * feat_stride\n shift_y = np.arange(0, height) * feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = np.vstack((shift_x.ravel(),\n shift_y.ravel(),\n shift_x.ravel(),\n shift_y.ravel())).transpose()\n\n # 2. Add K anochors (1, A, 4) to cell K shifts (K, 1, 4)\n #\t to get shift anchors (K, A, 4) and reshape to (K*A, 4) shifted anchors\n K = shifts.shape[0]\n all_anchors = (anchors.reshape((1, A, 4))) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))\n\n all_anchors = all_anchors.reshape((K * A, 4))\n\n # total number of anchors == A * height * width,\n # where height and width are the size of conv feature map\n total_anchors = int(K * A)\n\n # Only keep anchors inside the image\n inds_inside = np.where(\n (all_anchors[:, 0] >= -allowed_border) &\n (all_anchors[:, 1] >= -allowed_border) &\n (all_anchors[:, 2] < sr_size[1] + allowed_border) &\n (all_anchors[:, 3] < sr_size[0] + allowed_border)\n )[0]\n anchors = all_anchors[inds_inside, :]\n\n # label: 1 is positive, 0 is negative, -1 is don't care\n labels = np.empty((len(inds_inside),), dtype=np.float32)\n labels.fill(-1)\n\n # overlaps between anchors and gt boxes\n # overlaps.shape = (#total_anchors, #gts)\n overlaps = bbox_overlaps( #################### cython utilities\n np.ascontiguousarray(anchors, dtype=np.float),\n np.ascontiguousarray(gt_boxes, dtype=np.float))\n\n argmax_overlaps = overlaps.argmax(axis=1) # of shape (#total_anchors, )\n max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps] # of shape (#total_anchors, )\n\n gt_argmax_overlaps = overlaps.argmax(axis=0) # of shape (#gt, )\n gt_max_overlaps = overlaps[gt_argmax_overlaps, np.arange(overlaps.shape[1])] # of shape (#gt, )\n gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]\n\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0 # 0.3\n labels[gt_argmax_overlaps] = 1\n labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1 # 0.7\n\n bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)\n # _compute_targets() returns #sifted_anchors-by-4 tensor with each row being (dx, dy, dw, dy),\n # the increment to be learnt by bbx regressor\n bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])\n bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n bbox_inside_weights[labels == 1, :] = np.array(\n cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS) # RPN_BBOX_INSIDE_WEIGHTS = [1.0, 1.0, 1.0, 1.0]\n\n bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0: # cfg.TRAIN.RPN_POSITIVE_WEIGHT == -1.0\n # uniform weighting of examples (given non-uniform sampling)\n num_examples = np.sum(labels >= 0) # num_examples is the sum of anchors labeled 1\n positive_weights = np.ones((1, 4)) * 1.0 / num_examples\n negative_weights = np.ones((1, 4)) * 1.0 / num_examples\n else:\n assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &\n (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))\n positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT / np.sum(labels == 1))\n negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) / np.sum(labels == 0))\n\n bbox_outside_weights[labels == 1, :] = positive_weights\n bbox_outside_weights[labels == 0, :] = negative_weights\n\n # map up to original set of anchors\n labels = _unmap(labels, total_anchors, inds_inside, fill=-1) # labels.shape == (#total_anchors, )\n bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0) # bbox_targets.shape == (#total_anchors, 4)\n bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside,\n fill=0) # bbox_inside_weights.shape == (#total_anchors, 4)\n bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside,\n fill=0) # bbox_outside_weights.shape == (#total_anchors, 4)\n\n # labels\n labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2) # of shape (1, A, height, width)\n labels = labels.reshape((1, 1, A * height, width))\n # bbox_targets\n bbox_targets = bbox_targets.reshape((1, height, width, A * 4)).transpose(0, 3, 1,\n 2) # of shape (1, 4*A, height, width)\n # bbox_inside_weights\n bbox_inside_weights = bbox_inside_weights.reshape((1, height, width, A * 4)).transpose(0, 3, 1,\n 2) # of shape (1, 4*A, height, width)\n # bbox_outside_weights\n bbox_outside_weights = bbox_outside_weights.reshape((1, height, width, A * 4)).transpose(0, 3, 1,\n 2) # of shape (1, 4*A, height, width)\n\n return labels, bbox_targets, bbox_inside_weights, bbox_outside_weights, A", "title": "" }, { "docid": "f74f32ca93b0f6ee9fecdb4941d57af1", "score": "0.48743764", "text": "def predict():\n result = []\n for line in test_data:\n # converse to integer\n point_x = int(line[1].strip())\n point_y = int(line[2].strip())\n node_x = Node(point_x,find_neighbours(point_x))\n node_y = Node(point_y,find_neighbours(point_y))\n jaccard_coefficient = get_jaccard_coefficient(node_x, node_y)\n result.append((line[0], jaccard_coefficient))\n return result", "title": "" }, { "docid": "e65634618994bb6c5a09f2958b8065af", "score": "0.48711917", "text": "def get_pairs_and_labels(self, size, tpos, tneg, windowSize):\n \n pairs = []\n labels = []\n \n # order is t, t', t''\n \n for i in range(size):\n tempval=np.random.randint(low=0, high=self.total_windows)\n \n secondval = self.return_pos_index(index=tempval, tpos=tpos, windowSize=windowSize)\n \n if(np.abs(self.start_times[tempval]-self.start_times[secondval])>tpos): #checking if we got a bad label\n print(\"skipping bad label\")\n continue\n \n if random.random() < 0.5: # randomly a positive or a negative example\n outval = 1\n # we need to check if its impossible to return a pos label\n if(np.abs(secondval-tempval)<=1):\n print(\"skipping since it is impossible to return a positive label\")\n continue\n print(\"lowval\", tempval)\n print(\"highval\", secondval)\n unknown_val=np.random.randint(low=tempval+1, high=secondval)\n \n else:\n outval = -1\n unknown_val = self.return_neg_index(tempval, tneg, windowSize)\n\n # Double check we didnt mess up. If this is printed while executing the script, something is wrong\n if(np.abs(self.start_times[tempval]-self.start_times[unknown_val])<tneg):\n print(\"ERROR, messed up neg label\")\n continue\n \n \n pairs.append([tempval, unknown_val, secondval])\n labels.append(outval)\n pairs.append([secondval, unknown_val, tempval])\n labels.append(outval)\n \n \n pairs = np.array(pairs)\n labels=np.array(labels)\n print(labels.shape)\n return pairs, labels", "title": "" }, { "docid": "1c8466604920b7100200a1230f526f2c", "score": "0.48623663", "text": "def create_random_trajectories(self, t):\n env = self.env\n s_dim = env.s_dim\n o_dim = env.o_dim\n a_dim = env.a_dim\n x_dim = env.x_dim\n s_lim = env.s_lim\n a_lim = env.a_lim\n sa_lim = np.vstack((s_lim, a_lim))\n assert(x_dim == o_dim + a_dim)\n\n # create randomly sampled states\n rand = np.random.rand(self.n, t, s_dim + a_dim)\n limdist = sa_lim[:, 1] - sa_lim[:, 0]\n tmp = rand * limdist[None, None, :]\n x = tmp + sa_lim[:, 0][None, None, :]\n x[:, 1:, :s_dim] = np.nan\n\n # arrays to store transition data\n x_o = np.empty((self.n, t, x_dim)) * np.nan # observations + actions\n y_o = np.empty((self.n, t, o_dim)) * np.nan # difference to next observation\n\n # execute trajectories to get data\n for i in range(self.n):\n for j in range(t):\n si = x[i, j, :s_dim]\n ai = x[i, j, -a_dim:]\n\n # set state, execute step and observe next state\n env.state = si\n sio = env.obs_state\n env.step(ai)\n sn = env.state\n sno = env.obs_state\n\n if j + 1 < t:\n x[i, j + 1, :s_dim] = sn\n\n x_o[i, j, :o_dim] = sio\n x_o[i, j, -a_dim:] = ai\n y_o[i, j, :] = sno - sio\n\n return x_o, y_o", "title": "" }, { "docid": "dbdeebd07b82cb9c4bf69a09976eadc3", "score": "0.4859525", "text": "def pptc():\n # get a complete topology\n topo = complete_topology(5)\n # generate a dummy TM and traffic classes\n tm = tmgen.models.uniform_tm(5, 20, 50, 1)\n tc = traffic_classes(tm, {u'all': 1}, as_dict=False)\n # generate all possibe paths\n res = generate_paths_tc(topo, tc, null_predicate, 10, numpy.inf)\n return res", "title": "" }, { "docid": "315a3541b25fb6121e8bdc72e75df92d", "score": "0.48517504", "text": "def triplets(sequences):\n return np.array([[aminoacids[i:i + 3] for i in range(len(aminoacids))] for aminoacids in sequences])", "title": "" }, { "docid": "225a1e67580009e84420421a30670e61", "score": "0.48494777", "text": "def _shuffle_lists(self):\n lm_dist = self.lm_dist\n labels = self.label_list\n self.lm_dist = np.zeros_like(lm_dist)\n self.label_list = []\n c = 0\n for i in np.random.permutation(self.data_size):\n self.lm_dist[c, :, :] = lm_dist[i, :, :]\n c += 1\n self.label_list.append(labels[i])", "title": "" }, { "docid": "f2e5e9a1b8cba5961c485dfecb4feda7", "score": "0.4845282", "text": "def generate_sample(self):\n self.x_true, self.y_true = {}, {}\n\n if self.data_params['name'] in ['mnist', 'fashion_mnist']:\n assert self.N == 784\n if self.data_params['name'] == 'mnist':\n (_, _), (X_test, Y_test) = mnist.load_data()\n else:\n (_, _), (X_test, Y_test) = fashion_mnist.load_data()\n\n # Transform data\n X_test_spec = 2 * (X_test / 255) - 1.\n X_test_spec = X_test_spec.reshape(\n 10000, 784)-np.sum(X_test_spec.reshape(10000, 784), 1).reshape(10000, 1)/784\n X_test_spec = normalize(\n X_test_spec, axis=-1, order=2) * np.sqrt(784)\n\n X_test_ep = 2 * (X_test / 255) - 1.\n\n # Draw random sample from category\n indices = np.array(\n [i for i in range(len(Y_test)) if Y_test[i] == self.data_params['category']])\n if self.seed != 0:\n np.random.seed(self.seed)\n id = indices[np.random.randint(0, len(indices), 1)]\n\n # Choose x_star\n x_star = X_test_ep[id].reshape(self.N)\n x_star_spec = X_test_spec[id].reshape(self.N)\n\n else:\n raise NotImplementedError\n\n y = self.channel(x_star)\n self.x_true['x'] = x_star\n self.y_true['y'] = y\n y_spec = self.channel(x_star_spec)\n self.x_true['x_spec'] = x_star_spec\n self.y_true['y_spec'] = y_spec\n\n return y", "title": "" }, { "docid": "d743489366ee42ad1566d9fa2b2d2421", "score": "0.48429355", "text": "def generate_samples(dataset, count):\n same_character = random.randint(0, 1) == 0\n samples = []\n counter = 0\n while counter < count:\n X_single, Y_single = get_sample(dataset=dataset, same_character=same_character)\n samples.append([X_single, Y_single])\n same_character = False if same_character else True\n counter += 1\n X = __column(samples, 0)\n Y = __column(samples, 1)\n X, Y = __shuffle_two_lists(X, Y)\n return X, Y", "title": "" }, { "docid": "2eb78f9b05321ae68f8f0008a4883c54", "score": "0.48407155", "text": "def generate_timepoints(self):\n tp = list(generate_disjoint_timepoints(self.n_samp, self.start,\n self.end))\n for i in range(len(self.tp_generation)):\n if self.tp_generation[i] == 'L':\n tp[i] = np.linspace(self.start, self.end, self.n_traj[i])\n\n tp = tuple(tp)\n self.train_time, self.val_time, self.test_time = tp", "title": "" }, { "docid": "9056f6f3731372d3a1ab5d9de71c120e", "score": "0.48344254", "text": "def random_pick(x, x_dt, w1, w2, training_len): \n \n indices = np.arange( np.shape(x)[0]) \n np.random.shuffle(indices)\n indices = indices[:training_len]\n x = x[indices, :]\n x_dt = x_dt[indices, :]\n w1 = w1[indices]\n w2 = w2[indices]\n print('%i data points are used in this training'%len(indices))\n \n return x, x_dt, w1, w2", "title": "" }, { "docid": "0def76dae2067a8449c2c47caf0f8fc0", "score": "0.48302588", "text": "def generate_training_set(N_train, n_test, flat_PSFs, zern_coefs, random_train=True):\n\n if random_train == True:\n random_choice = rand_state.choice(N_train, N_train - n_test, replace=False)\n test_choice = np.delete(np.arange(N_train), random_choice)\n\n training_set = flat_PSFs[random_choice, :]\n targets = zern_coefs[random_choice]\n test_set = flat_PSFs[test_choice, :]\n test_coef = zern_coefs[test_choice, :]\n\n else: # Just exclude the last N_test to check performance\n training_set = flat_PSFs[:N_train - n_test, :]\n targets = zern_coefs[:N_train - n_test, :]\n test_set = flat_PSFs[N_train - n_test:N_train, :]\n test_coef = zern_coefs[N_train - n_test:N_train, :]\n\n return (training_set, targets), (test_set, test_coef)", "title": "" }, { "docid": "5e6754e78c2a7c48acaf17300614be8e", "score": "0.48254824", "text": "def sample(self,\n assign_result,\n bboxes,\n gt_bboxes,\n gt_labels=None,\n **kwargs):\n if len(bboxes.shape) < 2:\n bboxes = bboxes[None, :]\n\n bboxes = bboxes[:, :4]\n\n gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)\n if self.add_gt_as_proposals and len(gt_bboxes) > 0:\n if gt_labels is None:\n raise ValueError(\n 'gt_labels must be given when add_gt_as_proposals is True')\n bboxes = torch.cat([gt_bboxes, bboxes], dim=0)\n assign_result.add_gt_(gt_labels)\n gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)\n gt_flags = torch.cat([gt_ones, gt_flags])\n\n num_expected_pos = int(self.num * self.pos_fraction)\n pos_inds = self.pos_sampler._sample_pos(assign_result,\n num_expected_pos,\n bboxes=bboxes,\n **kwargs)\n # We found that sampled indices have duplicated items occasionally.\n # (may be a bug of PyTorch)\n pos_inds = pos_inds.unique()\n num_sampled_pos = pos_inds.numel()\n num_expected_neg = self.num - num_sampled_pos\n if self.neg_pos_ub >= 0:\n _pos = max(1, num_sampled_pos)\n neg_upper_bound = int(self.neg_pos_ub * _pos)\n if num_expected_neg > neg_upper_bound:\n num_expected_neg = neg_upper_bound\n neg_inds = self.neg_sampler._sample_neg(assign_result,\n num_expected_neg,\n bboxes=bboxes,\n **kwargs)\n neg_inds = neg_inds.unique()\n\n sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,\n assign_result, gt_flags)\n return sampling_result", "title": "" }, { "docid": "770d1313dea9301bb30674e0780261aa", "score": "0.48253685", "text": "def random_sample_path(self):\n random_node, random_ops = self._random_one_individual()\n alphas_normal = self._node_ops_to_alpha(random_node, random_ops)\n random_node, random_ops = self._random_one_individual()\n alphas_reduce = self._node_ops_to_alpha(random_node, random_ops)\n alphas = torch.cat([alphas_normal, alphas_reduce], dim=0)\n return alphas", "title": "" }, { "docid": "fec7e87dd3197b6dcda80c4990432567", "score": "0.48212275", "text": "def sample_edge_uniform(adj_list, degrees, n_triplets, sample_size):\n all_edges = np.arange(n_triplets)\n return np.random.choice(all_edges, sample_size, replace=False)", "title": "" }, { "docid": "c5ee64ceac80cbb37a0d67fe96999d5c", "score": "0.4820349", "text": "def _cold_start_iterations(self):\n\n for _ in range(self.n_iter):\n unique_item_ids = np.unique(self.item_ids)\n no_in_test = int(self.test_size * len(unique_item_ids))\n\n item_ids_in_test = set(np.random.choice(unique_item_ids, size=no_in_test))\n\n test_indices = array.array('i')\n train_indices = array.array('i')\n\n for i, item_id in enumerate(self.item_ids):\n if item_id in item_ids_in_test:\n test_indices.append(i)\n else:\n train_indices.append(i)\n\n train = np.frombuffer(train_indices, dtype=np.int32)\n test = np.frombuffer(test_indices, dtype=np.int32)\n\n # Shuffle data.\n np.random.shuffle(train)\n np.random.shuffle(test)\n\n yield train, test", "title": "" }, { "docid": "cfb6f9baaaf40ee7dbfcbe16e194ef59", "score": "0.48156902", "text": "def _get_test_clips(self, num_frames, clip_len):\n\n np.random.seed(self.seed)\n if num_frames < clip_len:\n # Then we use a simple strategy\n if num_frames < self.num_clips:\n start_inds = list(range(self.num_clips))\n else:\n start_inds = [\n i * num_frames // self.num_clips\n for i in range(self.num_clips)\n ]\n inds = np.concatenate(\n [np.arange(i, i + clip_len) for i in start_inds])\n elif clip_len <= num_frames < clip_len * 2:\n all_inds = []\n for i in range(self.num_clips):\n basic = np.arange(clip_len)\n inds = np.random.choice(\n clip_len + 1, num_frames - clip_len, replace=False)\n offset = np.zeros(clip_len + 1, dtype=np.int64)\n offset[inds] = 1\n offset = np.cumsum(offset)\n inds = basic + offset[:-1]\n all_inds.append(inds)\n inds = np.concatenate(all_inds)\n else:\n bids = np.array(\n [i * num_frames // clip_len for i in range(clip_len + 1)])\n bsize = np.diff(bids)\n bst = bids[:clip_len]\n all_inds = []\n for i in range(self.num_clips):\n offset = np.random.randint(bsize)\n all_inds.append(bst + offset)\n inds = np.concatenate(all_inds)\n return inds", "title": "" }, { "docid": "b80268d3020c3d77b75bcce525528abf", "score": "0.4807695", "text": "def _get_anchor_negative_triplet_mask(labels):\n # Check if labels[i] != labels[k]\n # Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)\n labels_equal = torch.eq(torch.unsqueeze(labels, 0), torch.unsqueeze(labels, 1))\n mask = ~labels_equal\n return mask", "title": "" }, { "docid": "bda92de2161787bf4ad1b57351cc5530", "score": "0.4804779", "text": "def _construct_ground_truth(self):\r\n self.ground_truth = []\r\n\r\n def gen_trial_gt(trial_num):\r\n tree = []\r\n init = []\r\n branching = self.pipeline[trial_num][0]\r\n reward_function = self.pipeline[trial_num][1]\r\n def expand(d):\r\n nonlocal init, tree\r\n my_idx = len(init)\r\n init.append(reward_function(d))\r\n children = []\r\n tree.append(children)\r\n for _ in range(get(d, branching, 0)):\r\n child_idx = expand(d+1)\r\n children.append(child_idx)\r\n return my_idx\r\n expand(0)\r\n dist = (0, *init[1:])\r\n return list(map(sample, dist))\r\n for trial_num in range(self.num_trials):\r\n gt = gen_trial_gt(trial_num)\r\n self.ground_truth.append(gt)", "title": "" }, { "docid": "a16333fe1eee2542da1d511cdca6770e", "score": "0.48036817", "text": "def __init__(self):\n super(TripletSequenceData, self).__init__()\n self.test_label = []", "title": "" }, { "docid": "94157abf3fa87578f7fbbfc4ec95053e", "score": "0.4801535", "text": "def get_random_splits(seed):\n # Take 3 from animal, 2 from transport\n random.seed(seed)\n animals_chosen = random.sample(set(ANIMAL_LABELS), 3)\n transport_chosen = random.sample(set(TRANSPORT_LABELS), 2)\n groupA = sorted(animals_chosen + transport_chosen)\n groupB = sorted(list(set(range(10)) - set(groupA)))\n\n return groupA, groupB", "title": "" }, { "docid": "bc172088a75602b73e2d069dc82086db", "score": "0.47939995", "text": "def generate_apples(self):\n np.random.seed(2)\n apples = np.random.randint(0, self.grid_size, size=(self.grid_size**2, 2))\n np.random.seed(None)\n return apples", "title": "" }, { "docid": "eb963cc7dc269f00291ace4283991f23", "score": "0.47882327", "text": "def weight_gen(n_models, samples): \n #seed \n np.random.seed(1)\n \n #random weights that sum to 1 - we only keep unique sets\n weights = [np.random.dirichlet(np.ones(n_models), size = 1).round(2) for i in range(0, samples + 1)]\n weights = [l[0].tolist() for l in weights]\n weights = list(set(tuple(x) for x in weights))\n \n #Adding corner solution weights\n a = [0 for i in range(1, n_models)]\n a.append(1)\n \n combinations = list(itertools.permutations(a, n_models))\n combinations = list(set(x for x in combinations))\n \n for i in combinations:\n weights.append(i)\n \n #Equal weights\n weights.append(tuple([round(1/n_models,2) for i in range(0, n_models)]))\n \n return weights", "title": "" }, { "docid": "b2a7825dcc095efd23229fbe2c7fadbd", "score": "0.4784199", "text": "def _get_cover_deterministic(triples: MappedTriples) -> torch.BoolTensor:\n num_entities = triples[:, [0, 2]].max() + 1\n num_relations = triples[:, 1].max() + 1\n num_triples = triples.shape[0]\n\n # index\n entities = torch.full(size=(num_entities,), fill_value=-1, dtype=torch.long)\n relations = torch.full(size=(num_relations,), fill_value=-1, dtype=torch.long)\n h, r, t = triples.T\n triple_id = torch.arange(num_triples)\n entities[h] = relations[r] = entities[t] = triple_id\n\n if entities.min() < 0:\n raise TripleCoverageError(arr=entities, name=\"entities\")\n if relations.min() < 0:\n raise TripleCoverageError(arr=relations, name=\"relations\")\n\n # select\n seed_mask = torch.zeros(num_triples, dtype=torch.bool)\n seed_mask[entities] = True\n seed_mask[relations] = True\n return seed_mask", "title": "" }, { "docid": "146c0fca086688c782fc3e24a376fc01", "score": "0.47822908", "text": "def _sample_proposals(\n self,\n matched_idxs: torch.Tensor,\n matched_labels: torch.Tensor,\n gt_classes: torch.Tensor,\n gt_meta_classes: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n has_gt = gt_classes.numel() > 0\n # Get the corresponding GT for each proposal\n if has_gt:\n gt_classes = gt_classes[matched_idxs]\n # Label unmatched proposals (0 label from matcher) as background (label=num_classes)\n gt_classes[matched_labels == 0] = self.num_classes\n # Label ignore proposals (-1 label)\n gt_classes[matched_labels == -1] = -1\n\n gt_meta_classes = gt_meta_classes[matched_idxs]\n gt_meta_classes[matched_labels == 0] = self.num_meta_classes\n gt_meta_classes[matched_labels == -1] = -1\n else:\n gt_classes = torch.zeros_like(matched_idxs) + self.num_classes\n gt_meta_classes = torch.zeros_like(matched_idxs) + self.num_meta_classes\n\n sampled_fg_idxs, sampled_bg_idxs = subsample_labels(\n gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes\n )\n\n sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)\n return sampled_idxs, gt_classes[sampled_idxs], gt_meta_classes[sampled_idxs]", "title": "" } ]
663abec506a80adc4aae7d86de59149f
Can't invite user to a team that's not mine
[ { "docid": "ed6d47cefd3415d42dea1050f5b96dbf", "score": "0.7434971", "text": "def test_cannot_invite_to_another_team(self):\n with self.loggedInAs(\"alice\", \"123\"):\n resp = self.client.rpost('invitation_create',\n follow=True,\n data={'team': self.bob_team.pk,\n 'receiver': self.carl.pk,\n 'message': \"Hello\"})\n self.assertIn('team', resp.context['form'].errors)\n self.assertEqual(0, Invitation.objects.all().count())", "title": "" } ]
[ { "docid": "76547565f0096434d7b0733d36b2d200", "score": "0.7399999", "text": "def test_cannot_invite_team_members(self):\n self.alice_team.add_team_member(self.carl)\n with self.loggedInAs(\"alice\", \"123\"):\n resp = self.client.rpost('invitation_create',\n follow=True,\n data={'team': self.alice_team.pk,\n 'receiver': self.carl.pk,\n 'message': \"Hello\"})\n self.assertIn('receiver', resp.context['form'].errors)\n self.assertEqual(0, Invitation.objects.all().count())", "title": "" }, { "docid": "6ed5b21272613b70a1ee63a92a8ace71", "score": "0.733298", "text": "def group_invite() -> None:", "title": "" }, { "docid": "204f4f524f7f3ca835286b00f1af8fa4", "score": "0.73024035", "text": "def invite_user():\n # For now on boarding of researcher is not supported\n return \"Work in Progress!!!\"", "title": "" }, { "docid": "13112fabb9e43b85288bf21d35c599f1", "score": "0.6890425", "text": "def test_add_team_member_checks_team_access_allowed_flag(self):\n\n self.client.login(username=\"[email protected]\", password=\"password\")\n\n form = {\"invite_team_member\": \"1\", \"email\": \"[email protected]\"}\n r = self.client.post(\"/accounts/profile/\", form)\n assert r.status_code == 403", "title": "" }, { "docid": "15fbb231974da65e0995b8d78efacf53", "score": "0.6797066", "text": "async def vanity_invite(self):\n ...", "title": "" }, { "docid": "d0cded7c386e7dee8c289eb60eda5ced", "score": "0.66104233", "text": "def test_team_members_id_invite_invitee_post(self):\n pass", "title": "" }, { "docid": "5add1b5d2a4f33a07147941debbcf44b", "score": "0.6597543", "text": "def test_user_send_but_does_not_have_invite(app, authed_client):\n app.config['REQUIRE_INVITE_CODE'] = True\n add_permissions(app, 'invites_send')\n db.engine.execute('UPDATE users SET invites = 0')\n db.session.commit()\n response = authed_client.post(\n '/invites',\n data=json.dumps({'email': '[email protected]'}),\n content_type='application/json',\n )\n check_json_response(response, 'You do not have an invite to send.')\n assert response.status_code == 400", "title": "" }, { "docid": "e28de596cc46825661c1bc430afb8c60", "score": "0.65864646", "text": "async def invite(self, ctx):\n \turl = f\"https://discordapp.com/oauth2/authorize?client_id={self.bot.user.id}&permissions=8&scope=bot\"\n \tawait ctx.send(url)", "title": "" }, { "docid": "363cef030e992dad76b8d71a6045523c", "score": "0.65049213", "text": "def test_send_invite_user_does_not_have_permission(self):\n user = self.create_user()\n self.assertFalse(user.has_perm('accounts.email_invites'))\n\n invite = mommy.make(models.Invite, created_by=user)\n # pylint: disable=line-too-long\n with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:\n invite.send_invite()\n\n self.assertFalse(mock.delay.called)", "title": "" }, { "docid": "bf959ea73cbb1a61e9ba48019b8642f1", "score": "0.6500837", "text": "async def invite(self, ctx, member: discord.Member = None):\n member = member or ctx.author\n guild_id = ctx.guild.id\n error = False\n colour = discord.Colour(0)\n try:\n url = \"Votre lien d'invitation:\\n\" + await self.get_invitation_link(guild_id)\n sql = f\"select message from invite_message where guild_id='{guild_id}'\"\n invite_message = database.fetch_one_line(sql)\n if invite_message:\n url = url + \"\\n\" + invite_message[0]\n colour = colour.from_rgb(255, 51, 124)\n icon_url = \"https://cdn.discordapp.com/attachments/597091535242395649/597091654847037514/Plan_de_travail_18x.png\"\n name = \"Steven Universe Fantasy\"\n embed = discord.Embed(colour=colour)\n embed.set_author(icon_url=icon_url, name=name)\n embed.description = url\n embed.timestamp = datetime.utcnow()\n await member.send(content=None, embed=embed)\n await ctx.message.add_reaction('✅')\n except Exception as e:\n await ctx.message.channel.send(Utils.get_text(ctx.guild.id, \"error_user_disabled_PM\").format(member.display_name))\n\n print(f\"{type(e).__name__} - {e}\")\n await ctx.message.add_reaction('❌')\n error = True\n await self.logger.log('invite_log', ctx.author, ctx.message, error)", "title": "" }, { "docid": "4acc748cda92ca1aed88082b0f365fb1", "score": "0.6447341", "text": "async def guild_invite(self, ctx):\r\n issuer = ctx.message.author\r\n player = db.get_player_data(issuer)\r\n if player.guild != \"\":\r\n guild = db.get_guild_data(player.guild)\r\n if player.is_officer(guild):\r\n user = ctx.message.mentions[0]\r\n player = db.get_player_data(user)\r\n player.guild_invite = guild.name\r\n db.set_player_data(user ,player)\r\n await self.bot.whisper(\"Invitation sent successfully!\")\r\n await self.bot.send_message(user, \"You received a \\\"{0}\\\" guild invitation from: {1}, \\n Type *guild join {0} to accept the invitation!\".format(guild.name, ctx.message.author.mention))\r\n else:\r\n await self.bot.whisper(\"You must be an officer in the guild!\")\r\n else:\r\n await self.bot.whisper(\"You must be in a guild!\")", "title": "" }, { "docid": "785ca8c1f0494446a01af2b849d8337a", "score": "0.64254713", "text": "async def invite(self, ctx):\r\n await ctx.send(\"Invite me with <https://is.gd/kumiko>!\")", "title": "" }, { "docid": "4eff31622bb2ecefce7eab52cee0d7b1", "score": "0.63866824", "text": "def test_invalid_team_param(self):\n with self.loggedInAs(\"alice\", \"123\"):\n resp = self.client.rget('invitation_create',\n data={'team': -12})\n initial = resp.context['form'].initial\n self.assertIsNone(initial['team'])", "title": "" }, { "docid": "c004788828eb44648508eb1a736d9ba4", "score": "0.6362386", "text": "def is_invite(self):\n return self.member is None and self.invite is not None", "title": "" }, { "docid": "0630f12c274ce24f34ed39d055efecb6", "score": "0.6329416", "text": "def test_leave_teams_on_accept(self):\n # Register Carl to compete\n RegistrationFactory(user=self.carl, competition=self.galapagos)\n # Put him on a couple of teams\n tg = TeamFactory.create(competition=self.galapagos, num_members=1)\n tg.add_team_member(self.carl)\n ts = TeamFactory.create(competition=self.space, num_members=1)\n ts.add_team_member(self.carl)\n # And send him an invitation to another Galapagos team\n inv = InvitationFactory.create(receiver=self.carl,\n team=self.alice_team)\n with self.loggedInAs(\"carl\", \"123\"):\n resp = self.client.rpost('invitation_accept', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n # Carl is only on one Galapagos team\n carl_teams = self.carl.team_set.filter(competition=self.galapagos)\n self.assertEqual(1, carl_teams.count())\n # ... he's on Alice's team\n self.assertTrue(self.alice_team.is_user_on_team(self.carl))\n # ... off the other galapagos team\n self.assertFalse(tg.is_user_on_team(self.carl))\n # ... but still on his space team\n self.assertTrue(ts.is_user_on_team(self.carl))", "title": "" }, { "docid": "6d9ad12637591666df6ff141b9bd2fe4", "score": "0.6322204", "text": "async def invite(self, ctx):\n await ctx.send(\n \"https://discord.com/oauth2/authorize?client_id=790900950885203978&permissions=2026368118&scope=bot\")", "title": "" }, { "docid": "fc8da0bed15d425a70e45d961033a644", "score": "0.6294414", "text": "def test_cannot_send_too_many_invites(self):\n # Add two more users to Alice's team\n self.alice_team.add_team_member(UserFactory.create())\n self.alice_team.add_team_member(UserFactory.create())\n self.assertEqual(0, Invitation.objects.all().count())\n with self.loggedInAs(\"alice\", \"123\"):\n resp = self.client.rpost('invitation_create',\n follow=True,\n data={'team': self.alice_team.pk,\n 'receiver': self.carl.pk,\n 'message': \"Hello\"})\n self.assertEqual(0, Invitation.objects.all().count())\n self.assertIn('__all__', resp.context['form'].errors)", "title": "" }, { "docid": "76d5aff523a2a1f338dc13822755f124", "score": "0.6291814", "text": "def test_can_invite_again(self):\n # Register Carl to compete\n RegistrationFactory(user=self.carl, competition=self.galapagos)\n # And send him an invitation\n inv = InvitationFactory.create(receiver=self.carl,\n team=self.alice_team)\n with self.loggedInAs(\"carl\", \"123\"):\n # Decline invitation\n resp = self.client.rpost('invitation_decline', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n self.assertFalse(self.alice_team.is_user_on_team(self.carl))\n\n # Alice is persistent. Sends another invitation\n inv = InvitationFactory.create(receiver=self.carl,\n team=self.alice_team)\n with self.loggedInAs(\"carl\", \"123\"):\n # Carl accepts this time\n resp = self.client.rpost('invitation_accept', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n self.assertTrue(self.alice_team.is_user_on_team(self.carl))", "title": "" }, { "docid": "c71e698460150c17585d30020ae888ed", "score": "0.6287191", "text": "def send_team_invite_email(invite, request):\n log.info('Sending team invite for %s to %s', invite.team, invite.email)\n send_email(\n invite.email,\n subject='Join your team at Read the Docs',\n template='organizations/email/team_invite.txt',\n template_html='organizations/email/team_invite.html',\n context={\n 'invite_hash': invite.hash,\n 'sender_full_name': request.user.get_full_name(),\n 'sender_username': request.user.username,\n 'organization_name': invite.organization.name,\n },\n request=request,\n )", "title": "" }, { "docid": "5502a442208c43b8cec636e3695a2ad0", "score": "0.6282718", "text": "async def invite(ctx):\n await ctx.channel.send(discordutils.oauth_url(\"511255939806920755\"))", "title": "" }, { "docid": "f58010fe118fcb516f549f4eeefd8225", "score": "0.6271631", "text": "async def botinvite(self, ctx, member: discord.Member = None):\n\n if not member:\n return await send_embed(ctx, f\"[Click me]\"\n f\"(https://discord.com/api/oauth2/authorize?client_id=718287109030543370\"\n f\"&permissions=8&scope=bot)\", info=True)\n\n if not member.bot:\n return await send_embed(ctx, \"Invalid bot.\", negative=True)\n\n await send_embed(ctx,\n f\"[Click Me](https://discord.com/oauth2/authorize?client_id={member.id}\"\n f\"&scope=bot&permissions=0)\",\n info=True)", "title": "" }, { "docid": "d5551de8de595005827bea4496560855", "score": "0.62538004", "text": "def join_team(request: HttpRequest):\n\n form = JoinTeamForm(request.POST)\n\n if form.is_valid():\n team = Team.objects.get(code=form.cleaned_data['code'])\n team.user_count += 1\n team.eligible = team.eligible and request.user.userprofile.eligible\n\n team.save()\n team.users.add(request.user)\n\n request.user.userprofile.team = team\n request.user.userprofile.save()\n\n return redirect('/account/')\n\n return render(request, 'account.html', {'user': request.user,\n 'change_password': ChangePasswordForm(user=request.user),\n 'join_team': form,\n 'create_team': CreateTeamForm()})", "title": "" }, { "docid": "21bc4ea789c4c8e5c820aa3e35fb43b1", "score": "0.6217575", "text": "def test_invalid_invitee_param(self):\n with self.loggedInAs(\"alice\", \"123\"):\n resp = self.client.rget('invitation_create',\n data={'invitee': -12})\n initial = resp.context['form'].initial\n self.assertIsNone(initial['receiver'])", "title": "" }, { "docid": "75d763e547156d3a8391ebee3916fc4e", "score": "0.6165915", "text": "def attach_org(sender, request, user, **kwargs):\n team_slug = request.session.get('team')\n if team_slug:\n team = Team.objects.get(slug=team_slug)\n TeamMember.objects.create(team=team, member=user)", "title": "" }, { "docid": "4966c24eeb9985a69f9ea73e175bbb7a", "score": "0.6126504", "text": "def canInvite(self):\n\t\treturn bool(self.perm & Permissions.CAN_INVITE)", "title": "" }, { "docid": "14579f33f8074d36fad9b033dc2c120b", "score": "0.612525", "text": "def test_team_member_is_added(self):\n\n form = {\"invite_team_member\": \"1\", \"email\": \"[email protected]\"}\n r = self.client.post(\"/accounts/profile/\", form)\n assert r.status_code == 200\n\n member_emails = set()\n for member in self.alice.profile.member_set.all():\n member_emails.add(member.user.email)\n\n # Assert the existence of the member emails\n self.assertGreater(len(member_emails), 0)\n\n self.assertIn(\"[email protected]\", member_emails)\n\n # Assert that the email was sent and check email content\n # expected subject message.\n subject = 'You have been invited to join ' \\\n '%(email)s on healthchecks.io' % dict(email=self.alice.email)\n outbox = mail.outbox\n\n self.assertGreater(len(outbox), 0)\n self.assertEqual(outbox[0].subject, subject)\n self.assertIn(self.alice.email, outbox[0].body)", "title": "" }, { "docid": "a5ba2010ca9056e8aa4b2ae043724700", "score": "0.6117834", "text": "def test_increase_allowed_invites(self):\n # Register Carl to compete\n RegistrationFactory(user=self.carl, competition=self.galapagos)\n # Alice's team has two available invites\n self.assertEqual(2, self.alice_team.num_invites_left())\n # Alice sends him an invitation\n inv = InvitationFactory.create(receiver=self.carl,\n team=self.alice_team)\n # Alice's team now has one available invite\n self.assertEqual(1, self.alice_team.num_invites_left())\n with self.loggedInAs(\"carl\", \"123\"):\n # Carl declines invitation\n resp = self.client.rpost('invitation_decline', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n # Alice's team has two invites again\n self.assertEqual(2, self.alice_team.num_invites_left())", "title": "" }, { "docid": "3800f279bade950263cb20decfdee732", "score": "0.6100338", "text": "async def command_registerteam(\r\n self,\r\n ctx: commands.Context,\r\n user1: discord.Member,\r\n user2: discord.Member,\r\n *,\r\n name: str,\r\n ):\r\n url = await self.config.guild(ctx.guild).tournament_url()\r\n if url is None:\r\n return await embed_helper(ctx, \"No tournaments running!\")\r\n tournament = await self.config.guild(ctx.guild).tournament_id()\r\n try:\r\n start_time = (await self.challonge.tournaments.show(tournament=tournament))[\r\n \"started-at\"\r\n ]\r\n except ChallongeException as e:\r\n log.exception(\"Error when getting tournament info: \", exc_info=e)\r\n return await embed_helper(ctx, \"Failed to get tournament info.\")\r\n if start_time is not None:\r\n return await embed_helper(\r\n ctx, \"Tournament has already been started. Registrations are closed.\"\r\n )\r\n\r\n if user1 == ctx.author or user2 == ctx.author:\r\n return await embed_helper(\r\n ctx,\r\n (\r\n \"You cannot add yourself more than once. \"\r\n \"Maybe its your evil twin but they'll need to get their own discord.\"\r\n ),\r\n )\r\n if user1 == user2:\r\n return await embed_helper(\r\n ctx,\r\n \"I am sorry. I see only one of {}. Now go get a third member.\".format(\r\n user1.mention\r\n ),\r\n )\r\n teams = await self.config.guild(ctx.guild).teams()\r\n captain_pokemons = set(await self.config.user(ctx.author).pokemons())\r\n user_1_pokemons = set(await self.config.user(user1).pokemons())\r\n user_2_pokemons = set(await self.config.user(user2).pokemons())\r\n if any(\r\n [len(p) != 2 for p in [captain_pokemons, user_1_pokemons, user_2_pokemons]]\r\n ):\r\n return await embed_helper(\r\n ctx, \"Team members have not set their pokemon types.\"\r\n )\r\n # Each team member's pokemon type should be unique ie disjoint set\r\n if not set(captain_pokemons).isdisjoint(user_1_pokemons):\r\n return await embed_helper(\r\n ctx, f\"{ctx.author.mention} has same pokemon type as {user1.mention}.\"\r\n )\r\n if not set(captain_pokemons).isdisjoint(user_2_pokemons):\r\n return await embed_helper(\r\n ctx, f\"{ctx.author.mention} has same pokemon type as {user2.mention}.\"\r\n )\r\n if not set(user_2_pokemons).isdisjoint(user_1_pokemons):\r\n return await embed_helper(\r\n ctx, f\"{user2.mention} has same pokemon type as {user1.mention}.\"\r\n )\r\n\r\n for team_id in teams.keys():\r\n if teams[team_id][\"name\"] == name:\r\n return await embed_helper(\r\n ctx, \"This name is already taken. Choose a better name.\"\r\n )\r\n if any(\r\n [u.id in teams[team_id][\"players\"] for u in [user1, user2, ctx.author]]\r\n ) or any(\r\n [u.id in teams[team_id][\"subs\"] for u in [user1, user2, ctx.author]]\r\n ):\r\n return await embed_helper(\r\n ctx,\r\n \"A team member is already registered with team {}.\".format(\r\n teams[team_id][\"name\"]\r\n ),\r\n )\r\n\r\n try:\r\n participant = await self.challonge.participants.create(\r\n tournament=tournament, name=name\r\n )\r\n except ChallongeException as e:\r\n log.exception(\"Error when registering team\", exc_info=e)\r\n return await ctx.send(\r\n \"Error when registering team.\\nPlease contact the moderators with:`{}`\".format(\r\n e\r\n )\r\n )\r\n\r\n participant_id = participant[\"id\"]\r\n async with self.config.guild(ctx.guild).teams() as teams:\r\n teams[participant_id] = {}\r\n teams[participant_id][\"name\"] = name\r\n teams[participant_id][\"captain_id\"] = ctx.author.id\r\n teams[participant_id][\"players\"] = [user1.id, user2.id, ctx.author.id]\r\n teams[participant_id][\"pokemon_choices\"] = list(\r\n list(captain_pokemons) + list(user_1_pokemons) + list(user_2_pokemons)\r\n )\r\n teams[participant_id][\"subs\"] = list()\r\n role = discord.utils.get(ctx.guild.roles, name=name)\r\n if role:\r\n return await embed_helper(\r\n ctx,\r\n \"There is already a role with name {}. \"\r\n \"Please contact the moderators for the team roles.\".format(name),\r\n )\r\n else:\r\n try:\r\n role = await ctx.guild.create_role(name=name)\r\n except discord.Forbidden:\r\n await embed_helper(\r\n ctx, \"Failed to create role. Ask the admins to give bot perms.\"\r\n )\r\n try:\r\n await ctx.author.add_roles(role)\r\n await user1.add_roles(role)\r\n await user2.add_roles(role)\r\n except discord.Forbidden:\r\n await embed_helper(\r\n ctx,\r\n \"Failed to add role for one of team members. Please contact moderators.\",\r\n )\r\n await embed_helper(ctx, \"Team {} successfully registered.\".format(name))\r\n await ctx.tick()", "title": "" }, { "docid": "38224a0288a52f17331a1d0291ee7738", "score": "0.6095252", "text": "async def invite(self):\n await self.bot.say(get_invite_url(self.bot))", "title": "" }, { "docid": "d53d420bca613ad3dec5dc6259e18a33", "score": "0.6088282", "text": "async def invite(self, ctx: commands.Context):\n perms = discord.Permissions(\n add_reactions=True, \n read_messages=True, \n send_messages=True, \n embed_links=True, \n attach_files=True\n )\n url = discord.utils.oauth_url(self.bot.user.id, permissions=perms, scopes=['bot', 'applications.commands'])\n await ctx.send(f\"Invite me by clicking here: {url}\")", "title": "" }, { "docid": "7b227c5677b3458ba62982aca95005f3", "score": "0.60727984", "text": "async def channel_invite(self, channel, user):", "title": "" }, { "docid": "62752c621888bbc11773260a79361b22", "score": "0.6051503", "text": "def api_invite(self, invitee, room_name, usr, src, tracker):\n self.validate_active_user(src)\n self.validate_active_room(room_name)\n self.validate_room_user(room_name, src)\n self.validate_active_user_name(invitee)\n\n # If invitee is already in room, shouln't receive another invitation.\n if self.T_rooms[room_name].containsUsername(invitee):\n raise Exception(\"15|\" + invitee + \" is already in room \" + room_name)\n\n \"\"\"\n Now, send room invitation message to the invitee.\n \"\"\"\n msg = self.make_msg_c_invited(invitee, room_name, usr, tracker)\n target_src = self.T_users_src[invitee] # get latest src of invitee\n self.send_msg(self.get_client(target_src), msg)\n\n # send response message to sender.\n response_msg = invitee + \":\" + room_name \n # \"user '\" + invitee + \"' is invited to room '\" + room_name + \"'\"\n client = self.get_client(src)\n self.send_c_response(\"ok\", \"invite\", response_msg, usr, client, tracker)", "title": "" }, { "docid": "bae045c55d76703ccc9799d2ff87913a", "score": "0.6044818", "text": "def test_accept_from_another_user(self):\n # Invite carl\n RegistrationFactory(user=self.carl, competition=self.galapagos)\n inv = InvitationFactory.create(receiver=self.carl,\n team=self.alice_team)\n with self.loggedInAs(\"alice\", \"123\"):\n # Try to accept as alice\n resp = self.client.rpost('invitation_accept', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n self.assert404(resp)\n with self.loggedInAs(\"bob\", \"123\"):\n # Try to accept as bob\n resp = self.client.rpost('invitation_accept', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n self.assert404(resp)\n self.assertFalse(self.alice_team.is_user_on_team(self.carl))", "title": "" }, { "docid": "8d6f992514461b22c98e8c16af7c9432", "score": "0.60421634", "text": "def invite(self) -> str:\n return f\"https://discord.com/api/oauth2/authorize?client_id={self.user.id}&permissions=8&scope=bot\"", "title": "" }, { "docid": "94127fd138de7dfd9b4c5ccf4953304b", "score": "0.6030326", "text": "def create_invitor_status_accepted(self):\n this_user_model_id = (self.request.user.id)\n invitor_user_model_obj = My_custom_user.objects.get(id=this_user_model_id)\n # get this invitation in updatable add form\n this_invitation = Invitation_to_challenge.objects.get(id=self.id)\n this_invitation.Invitation.add(invitor_user_model_obj)", "title": "" }, { "docid": "4abab0baa56322c0e6beb2fc229b3977", "score": "0.60256493", "text": "def test_same_team(self):\n # Register Carl to compete\n RegistrationFactory(user=self.carl, competition=self.galapagos)\n # Put him on a team\n carl_team = TeamFactory.create(competition=self.galapagos,\n num_members=1)\n carl_team.add_team_member(self.carl)\n\n # Alice sends him an invitation\n inv = InvitationFactory.create(receiver=self.carl,\n team=self.alice_team)\n with self.loggedInAs(\"carl\", \"123\"):\n # Carl declines invitation\n resp = self.client.rpost('invitation_decline', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n # Carl isn't on Alice's team, and he's still on his old team\n self.assertFalse(self.alice_team.is_user_on_team(self.carl))\n self.assertTrue(carl_team.is_user_on_team(self.carl))", "title": "" }, { "docid": "6f0afd188cedff40580ca2dfb261e227", "score": "0.5992588", "text": "async def invites(self):\n ...", "title": "" }, { "docid": "5f0c164e150c2cdf86b43930179c62de", "score": "0.5986872", "text": "def test_invite_without_code(app, authed_client):\n app.config['REQUIRE_INVITE_CODE'] = False\n add_permissions(app, 'invites_send')\n response = authed_client.post(\n '/invites', data=json.dumps({'email': '[email protected]'})\n )\n check_json_response(\n response,\n 'An invite code is not required to register, '\n 'so invites have been disabled.',\n )", "title": "" }, { "docid": "3eca769fcbe88cf5b85a525e22229aa1", "score": "0.596896", "text": "def is_invited(event):\n mtool = getToolByName(event, 'portal_membership')\n member = mtool.getAuthenticatedMember()\n if member.id not in get_invited_usernames(event):\n member_groups = member.getGroups()\n if not member_groups:\n return False\n all_invited_groups = get_invited_groups(event)\n invited_groups = [g for g in member_groups if g in all_invited_groups]\n if not invited_groups:\n return False\n return True", "title": "" }, { "docid": "3002abf5e59341046d21f85ffdfca537", "score": "0.59673184", "text": "def test_get_user_permissions_teams(self):\n pass", "title": "" }, { "docid": "07d2ad1f652b696162ac1134a74227f7", "score": "0.59643316", "text": "def test_invite_users_see_list(self):\n user = self.create_user()\n self.add_perm(user, 'add_invite', 'accounts', 'invite')\n self.assertTrue(user.can_view_user_list())", "title": "" }, { "docid": "0af9296f7fe6fe90943f62002245cd2d", "score": "0.5960669", "text": "def test_assign_managing_team(self):\n pass", "title": "" }, { "docid": "1e025ce6322e7968cfc8c67938b65eb5", "score": "0.59443754", "text": "def test_team_members_id_invitation_tickets_get(self):\n pass", "title": "" }, { "docid": "929a76b9d850aa62ddc72fd06a36db4c", "score": "0.59433764", "text": "async def sendIdentityMessage(self, ctx):\r\n en = \"en\"\r\n tw = \"tw\"\r\n player_rolelist = self.guildDict[str(ctx.guild.id)][str(\r\n ctx.message.channel.id)]['player_rolelist']\r\n for person in player_rolelist:\r\n # Send message\r\n # To make sure not to mistaken the data from other channel or servers\r\n if(person.guildId == ctx.guild.id and person.textChannelId == ctx.message.channel):\r\n if(person.in_murderer_team):\r\n embed = discord.Embed(title=f'You have been choosen as [{person.identity}]', description=f'{person.roles_guide[en]}', colour=int(\r\n jdata['Murderer_Team_Colour'], 16))\r\n embed.add_field(\r\n name=f'你已經被選中成為 [{person.identity_tw}]', value=f'{person.roles_guide[tw]}', inline=True)\r\n # maybe add an icon later\r\n embed.set_footer(\r\n text=f\"犯罪現場-CS File @{ctx.guild.name}.{ctx.message.channel.name}\")\r\n embed.set_image(url=person.url)\r\n await person.player.send(embed=embed)\r\n else: # In investigator team\r\n embed = discord.Embed(title=f'You have been choosen as [{person.identity}]', description=f'{person.roles_guide[en]}', colour=int(\r\n jdata['Investigator_Team_Colour'], 16))\r\n embed.add_field(\r\n name=f'你已經被選中成為 [{person.identity_tw}]', value=f'{person.roles_guide[tw]}', inline=True)\r\n # maybe add an icon later\r\n embed.set_footer(\r\n text=f\"犯罪現場-CS File @{ctx.guild.name}.{ctx.message.channel.name}\")\r\n embed.set_image(url=person.url)\r\n await person.player.send(embed=embed)\r\n\r\n # Tell the Accomplice and the murderer who's his teammate\r\n try: # For identity thats not necessarily exist\r\n Accomplice_player: Roles.Accomplice = self.findIdentity(\r\n player_rolelist, 'Accomplice')\r\n Murderer_player: Roles.Murderer = self.findIdentity(\r\n player_rolelist, 'Murderer')\r\n embed = discord.Embed(title=f'The `Murderer` in this game is [{Murderer_player.getPlayerName()}]',\r\n description=f'As the Accomplice, you win if the Murderer gets away with the crime.', colour=int(jdata['Murderer_Team_Colour'], 16))\r\n embed.add_field(name=f'本場遊戲的 `兇手` 是 [{Murderer_player.getPlayerName()}]',\r\n value=f'你是兇手的同黨,當兇手逍遙法外時,你亦一同勝出遊戲。', inline=True)\r\n # maybe add an icon later\r\n embed.set_footer(\r\n text=f\"犯罪現場-CS File @{ctx.guild.name}.{ctx.message.channel.name}\")\r\n embed.set_image(url=Murderer_player.url)\r\n\r\n embed2 = discord.Embed(title=f'The `Accomplice` in this game is [{Accomplice_player.getPlayerName()}]',\r\n description=f'Try to cooperate with the accomplice to get away with the crime.', colour=int(jdata['Murderer_Team_Colour'], 16))\r\n embed2.add_field(\r\n name=f'本場遊戲的 `幫兇` 是 [{Accomplice_player.getPlayerName()}]', value=f'身為兇手,請你與幫兇合作以擺脫嫌疑。', inline=True)\r\n # maybe add an icon later\r\n embed2.set_footer(\r\n text=f\"犯罪現場-CS File @{ctx.guild.name}.{ctx.message.channel.name}\")\r\n embed2.set_image(url=Accomplice_player.url)\r\n await Accomplice_player.player.send(embed=embed)\r\n await Murderer_player.player.send(embed=embed2)\r\n\r\n # Tell the witness whos the culprit\r\n Witness_player: Roles.Witness = self.findIdentity(\r\n player_rolelist, 'Witness')\r\n shufflelist = [Accomplice_player, Murderer_player]\r\n random.shuffle(shufflelist)\r\n embed3 = discord.Embed(title=f'The `culprits` in this game are [{shufflelist[0].getPlayerName()}] and [{shufflelist[1].getPlayerName()}]',\r\n description=f'If the Murderer is arrested but can identify the Witness, the Witness is considered to be killed, allowing the Murderer team to get away and win the game. Please be careful.', colour=int(jdata['Murderer_Team_Colour'], 16))\r\n embed3.add_field(name=f'本場遊戲的 `嫌疑人` 是 [{shufflelist[0].getPlayerName()}] and [{shufflelist[1].getPlayerName()}]',\r\n value=f'如兇手被逮捕,只要他成功找出目擊者,目擊者即遭滅口。兇手一伙此時會趁亂逃走,逍遙法外,請多加留意。', inline=True)\r\n # maybe add an icon later\r\n embed3.set_footer(\r\n text=f\"犯罪現場-CS File @{ctx.guild.name}.{ctx.message.channel.name}\")\r\n embed3.set_image(url=Witness_player.curpitimg)\r\n await Witness_player.player.send(embed=embed3)\r\n except:\r\n pass\r\n\r\n if True:\r\n await self.sendMurdererCard(ctx)\r\n await self.randomTilesToFS(ctx)", "title": "" }, { "docid": "b26089d7985a5647af59ab7b7c662394", "score": "0.5931182", "text": "async def inviteme(ctx):\n embed=discord.Embed(color=0x000000)\n embed.add_field(name=\"You can invite me with this link:\", value=\"https://discordapp.com/api/oauth2/authorize?client_id={}&permissions=0&scope=bot\".format(bot.user.id), inline=True)\n await bot.say(embed=embed)", "title": "" }, { "docid": "086f726e0e4b55e23d78d03def873b02", "score": "0.5918834", "text": "def test_set_team_name_checks_team_access_allowed_flag(self):\n\n self.client.login(username=\"[email protected]\", password=\"password\")\n\n form = {\"set_team_name\": \"1\", \"team_name\": \"Charlies Team\"}\n r = self.client.post(\"/accounts/profile/\", form)\n assert r.status_code == 403", "title": "" }, { "docid": "3937b8eb2e128c8266ac191666b88f33", "score": "0.5916534", "text": "async def invite(self, ctx):\n\n embed = discord.Embed(color=0xEB4634, title = f\"Want to add me to your server or withdraw tokens? Use the link below!\")\n embed.add_field(\n name = \"Join Server\", \n value = f\"http://join.p2hb.me/\", \n inline = False\n )\n embed.add_field(\n name = \"Invite Bot\", \n value = f\"http://invite.p2hb.me/\",\n inline = False\n )\n await ctx.send(embed = embed)", "title": "" }, { "docid": "49ecbf76e9363dcfe75d3a729be2ecd6", "score": "0.5914115", "text": "def test_default_team_param(self):\n with self.loggedInAs(\"alice\", \"123\"):\n resp = self.client.rget('invitation_create')\n initial = resp.context['form'].initial\n self.assertEqual(self.alice.team_set.latest().pk, initial['team'])", "title": "" }, { "docid": "34de47ea21817a3609d334b697d9581e", "score": "0.5907228", "text": "async def invitation(self, message):\n if not ((\"invitation\" in message.content.lower())\n or (\"compte\" in message.content.lower())\n ):\n return\n if message.author == self.bot.user: # don't read yourself\n return\n if (message.guild == None):\n # DM => debile-proof\n for guild in self.bot.guilds:\n if Utils.is_loaded(\"invitation\", guild.id) and guild.get_member(message.author.id):\n print(f\"HERE ! on guild {guild.id}\")\n guild_id = guild.id\n member = message.author\n error = False\n await member.trigger_typing() # add some tension !!\n invite_delay = Utils.invite_delay(guild_id) or botconfig.config[str(guild_id)][\"invite_delay\"]\n sql = f\"select last from last_invite where guild_id='{guild_id}' and member_id='{member.id}'\"\n last_invite = database.fetch_one_line(sql)\n if last_invite and last_invite[0]:\n last_timestamp = time.mktime(datetime.strptime(last_invite[0], \"%Y-%m-%d %H:%M:%S\").timetuple())\n if str(invite_delay).isnumeric():\n invite_delay = int(invite_delay)\n else:\n invite_delay = Utils.convert_str_to_time(invite_delay)\n print(f\"last_timestamp: {last_timestamp}\")\n print(f\"invite_delay: {invite_delay}\")\n duree = math.floor((last_timestamp + invite_delay) - time.time())\n print(f\"duree: {duree}\")\n if duree > 0:\n await message.add_reaction('❌')\n error = True\n feedback = await message.channel.send(\n Utils.get_text(guild_id, \"invitation_already_asked\").format(Utils.format_time(duree)))\n await self.logger.log_dm('invite_log', self.bot.user, feedback, guild, error)\n if not error:\n try:\n colour = discord.Colour(0)\n url = \"Votre lien d'invitation:\\n\" + await self.get_invitation_link(guild_id)\n sql = f\"select message from invite_message where guild_id='{guild_id}'\"\n invite_message = database.fetch_one_line(sql)\n if invite_message:\n url = url + \"\\n\\n\" + invite_message[0]\n colour = colour.from_rgb(255, 51, 124)\n icon_url = \"https://cdn.discordapp.com/attachments/597091535242395649/597091654847037514/Plan_de_travail_18x.png\"\n name = \"Steven Universe Fantasy\"\n # embed.set_footer(text=f\"ID: {message.id}\")\n embed = discord.Embed(colour=colour)\n embed.set_author(icon_url=icon_url, name=name)\n embed.description = url\n embed.timestamp = datetime.utcnow()\n await member.send(content=None, embed=embed)\n except Exception as e:\n await message.channel.send(Utils.get_text(guild_id, \"error_user_disabled_PM_2\"))\n print(f\" {type(e).__name__} - {e}\")\n error = True\n if not error:\n # LOG LAST INVITE\n sql = f\"select * from last_invite where guild_id='{guild_id}' and member_id='{member.id}'\"\n last_invite = database.fetch_one_line(sql)\n if not last_invite:\n sql = f\"insert into last_invite values ('{member.id}', '{guild_id}', datetime('{datetime.now()}'))\"\n else:\n sql = f\"update last_invite set last=datetime('{datetime.now()}') where member_id='{member.id}' and guild_id='{guild_id}'\"\n try:\n database.execute_order(sql)\n except Exception as e:\n print(f'{type(e).__name__} - {e}')\n error = True\n await self.logger.log_dm('invite_log', member, message, guild, error)\n try:\n if error:\n await message.add_reaction('❌')\n else:\n await message.delete(delay=2)\n await message.add_reaction('✅')\n except Exception as e:\n print(f'{type(e).__name__} - {e}')\n else:\n if not Utils.is_loaded(\"invitation\", message.guild.id):\n return\n guild_id = message.channel.guild.id\n sql = f\"select * from invite_channel where guild_id='{message.channel.guild.id}'\"\n invite_channel = database.fetch_one_line(sql)\n if invite_channel:\n invite_channel = int(invite_channel[0])\n member = message.author\n error = False\n # If I ask for invite, we check for the last time i asked for it\n if (message.channel.id == invite_channel):\n invite_delay = Utils.invite_delay(guild_id) or botconfig.config[str(guild_id)][\"invite_delay\"]\n sql = f\"select last from last_invite where guild_id='{message.guild.id}' and member_id='{member.id}'\"\n last_invite = database.fetch_one_line(sql)\n if last_invite and last_invite[0]:\n last_timestamp = time.mktime(datetime.strptime(last_invite[0], \"%Y-%m-%d %H:%M:%S\").timetuple())\n if str(invite_delay).isnumeric():\n invite_delay = int(invite_delay)\n else:\n invite_delay = Utils.convert_str_to_time(invite_delay)\n print(f\"last_timestamp: {last_timestamp}\")\n print(f\"invite_delay: {invite_delay}\")\n duree = math.floor((last_timestamp + invite_delay) - time.time())\n print(f\"duree: {duree}\")\n if duree > 0:\n await self.logger.log('invite_log', member, message, True)\n await message.add_reaction('❌')\n await message.channel.send(\n Utils.get_text(guild_id, \"invitation_already_asked\").format(Utils.format_time(duree)))\n return\n try:\n colour = discord.Colour(0)\n url = \"Votre lien d'invitation:\\n\" + await self.get_invitation_link(guild_id)\n sql = f\"select message from invite_message where guild_id='{guild_id}'\"\n invite_message = database.fetch_one_line(sql)\n if invite_message:\n url = url + \"\\n\\n\" + invite_message[0]\n colour = colour.from_rgb(255, 51, 124)\n icon_url = \"https://cdn.discordapp.com/attachments/597091535242395649/597091654847037514/Plan_de_travail_18x.png\"\n name = \"Steven Universe Fantasy\"\n # embed.set_footer(text=f\"ID: {message.id}\")\n embed = discord.Embed(colour=colour)\n embed.set_author(icon_url=icon_url, name=name)\n embed.description = url\n embed.timestamp = datetime.utcnow()\n await member.send(content=None, embed=embed)\n except Exception as e:\n await message.channel.send(Utils.get_text(guild_id, \"error_user_disabled_PM_2\"))\n print(f\" {type(e).__name__} - {e}\")\n error = True\n if not error:\n # LOG LAST INVITE\n sql = f\"select * from last_invite where guild_id='{message.guild.id}' and member_id='{member.id}'\"\n last_invite = database.fetch_one_line(sql)\n if not last_invite:\n sql = f\"insert into last_invite values ('{member.id}', '{message.guild.id}', datetime('{datetime.now()}'))\"\n else:\n sql = f\"update last_invite set last=datetime('{datetime.now()}') where member_id='{member.id}' and guild_id='{message.guild.id}'\"\n try:\n database.execute_order(sql)\n except Exception as e:\n await message.channel.send(Utils.get_text(guild_id, \"error_database_writing\"))\n print(f'{type(e).__name__} - {e}')\n error = True\n await self.logger.log('invite_log', member, message, error)\n try:\n if error:\n await message.add_reaction('❌')\n else:\n await message.delete(delay=2)\n await message.add_reaction('✅')\n except Exception as e:\n print(f'{type(e).__name__} - {e}')", "title": "" }, { "docid": "d1dc2c2ebb65b450924cb05a09225790", "score": "0.5905347", "text": "def test_team_members_id_invitation_tickets_post(self):\n pass", "title": "" }, { "docid": "ca87f5ac75249ffd8727a4ac75ee7a79", "score": "0.59014845", "text": "def test_cannot_invite_if_competition_closed(self):\n self.galapagos.is_open = False\n self.galapagos.save()\n\n with self.loggedInAs(\"alice\", \"123\"):\n resp = self.client.rpost('invitation_create',\n follow=True,\n data={'team': self.alice_team.pk,\n 'receiver': self.carl.pk,\n 'message': \"Hello\"})\n self.assert404(resp)", "title": "" }, { "docid": "3b932c020496fcaf47c1b7d98bbdf7c9", "score": "0.59007394", "text": "def requires_invite(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n message = args[0]\n user = self.get_sender_username(message)\n\n name = getattr(f, '_jabberbot_command_name', None)\n if name is not None:\n self.log.info('%s called %s with %s' % (user, name, args[1:]))\n\n if user not in self.users and user not in self.invited:\n message = 'You atleast need to be invited!'\n\n else:\n message = f(self, user, *args[1:], **kwargs)\n\n return message\n\n return wrapper", "title": "" }, { "docid": "df7be7bcf340cc90ea465806cf541bd7", "score": "0.589116", "text": "def invite_user(self, request):\n # Check for add and change permission.\n has_add_permission = self.has_add_permission(request)\n has_change_permission = self.has_change_permission(request)\n try:\n has_view_permission = self.has_view_permission(request)\n except AttributeError:\n has_view_permission = True\n if not has_add_permission or not has_change_permission:\n raise PermissionDenied\n # Process the form.\n InviteForm = self.get_form(\n request,\n form=self.invite_form,\n fields=flatten_fieldsets(self.invite_fieldsets),\n )\n if request.method == \"POST\":\n form = InviteForm(request.POST)\n if form.is_valid():\n # Save the user, marked as inactive.\n user = form.save(commit=False)\n user.is_active = False\n user.is_staff = True\n user.save()\n form.save_m2m()\n # Send an invitation email.\n self.do_send_invitation_email(request, user)\n # Message the user.\n self.message_user(request, \"An invitation email has been sent to {email}.\".format(\n email=user.email,\n ))\n # Redirect as appropriate.\n # Using the superclass to avoid the built in munging of the add response.\n return super(UserAdminBase, self).response_add(request, user)\n else:\n form = InviteForm()\n # Create the admin form.\n admin_form = admin.helpers.AdminForm(form, self.invite_fieldsets, {})\n # Render the template.\n media = self.media + admin_form.media\n return render(request, self.invite_form_template, dict(\n self.admin_site.each_context(request),\n title=\"Invite user\",\n opts=self.model._meta,\n form=form,\n adminform=admin_form,\n media=media,\n add=True,\n change=False,\n is_popup=False,\n save_as=self.save_as,\n has_add_permission=has_add_permission,\n has_change_permission=has_change_permission,\n has_delete_permission=self.has_delete_permission(request),\n show_delete=False,\n has_view_permission=has_view_permission,\n has_editable_inline_admin_formsets=True,\n ))", "title": "" }, { "docid": "258a56910b2fc69293d04d14c758ca79", "score": "0.58638", "text": "def invite_user():\n form = InviteUserForm()\n if form.validate_on_submit():\n user = User(\n role=form.role.data,\n first_name=form.first_name.data,\n last_name=form.last_name.data,\n email=form.email.data,\n )\n db.session.add(user)\n token = user.generate_confirmation_token()\n db.session.commit()\n\n invite_link = url_for(\n \"account.join_from_invite\", user_id=user.id, token=token, _external=True\n )\n try:\n send_email(\n recipient=user.email,\n subject=\"You Are Invited To Join\",\n template=\"account/email/invite\",\n user=user,\n invite_link=invite_link,\n )\n flash(\"User {} successfully invited\".format(user.full_name()), \"form-success\")\n except Exception as e:\n print(e)\n flash(\"Sending User {} invitation failed!\".format(user.full_name()), \"error\")\n return render_template(\"admin/new_user.html\", form=form)", "title": "" }, { "docid": "335d3ba997bb37437d10fd3993053891", "score": "0.5857043", "text": "def test_team_members_id_team_permission_put(self):\n pass", "title": "" }, { "docid": "1adb5e35e2b6149240eabc77efdbaa0b", "score": "0.58511037", "text": "def test_create_with_invite_no_staff_no_superuser(self):\n invite = models.Invite.objects.create(\n email='[email protected]', created_by=self.user)\n user = models.User.objects.create_user(\n username='[email protected]')\n invite = models.Invite.objects.get(pk=invite.pk)\n self.assertIsNotNone(invite.consumed_at)\n self.assertEqual(invite.consumed_by, user)", "title": "" }, { "docid": "59882d545ccb498f63b51b348dcf2fd2", "score": "0.5826435", "text": "def test_whitelisted_user_can_send_to_group_if_not_member(self):\n group = mommy.make('groups.Group')\n user = self.create_user()\n group.whitelist_users.add(user)\n\n self.assertTrue(user.can_send_to_group(group))", "title": "" }, { "docid": "3906b6337954eaf5ba83def810e073c1", "score": "0.58169365", "text": "def test_send_invite(self):\n form = self._get_form(\n initial={'organization': self._get_organization()},\n data={'to': '[email protected], [email protected]'}\n )\n self.assertTrue(form.is_valid())\n if form.errors:\n print(form.errors)\n\n form.send_invite()\n self.assertEqual(len(mail.outbox), 2)", "title": "" }, { "docid": "e434a5c74b3a0f0a70ce7ac410a70838", "score": "0.58129025", "text": "def test_send_invite_superuser(self):\n user = self.create_superuser()\n self.assertTrue(user.is_superuser)\n\n invite = mommy.make(models.Invite, created_by=user)\n # pylint: disable=line-too-long\n with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:\n invite.send_invite()\n\n self.assertTrue(mock.delay.called)", "title": "" }, { "docid": "e44ad2265be29b94904b247612c3eb22", "score": "0.5811484", "text": "def test_allowed_invites_stays_decreased(self):\n # Start with 2 available invites\n self.assertEqual(2, self.alice_team.num_invites_left())\n\n # Invite carl\n RegistrationFactory(user=self.carl, competition=self.galapagos)\n inv = InvitationFactory.create(receiver=self.carl,\n team=self.alice_team)\n # Down to one available invite\n self.assertEqual(1, self.alice_team.num_invites_left())\n with self.loggedInAs(\"carl\", \"123\"):\n # Accept invitation\n resp = self.client.rpost('invitation_accept', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n # Still only have one available invite\n self.assertTrue(self.alice_team.is_user_on_team(self.carl))\n self.assertEqual(1, self.alice_team.num_invites_left())", "title": "" }, { "docid": "f4be75c1f396ea4fb43c477906153c2e", "score": "0.5810696", "text": "def reject_invite(self, other_user_id):\n return self.respond_to_invite(other_user_id, accept=False)", "title": "" }, { "docid": "ff1d11899b3e5a43afe93e79d17f2562", "score": "0.5801432", "text": "def form_valid(self, form):\n form.instance.invitor_user_model = self.request.user\n form.instance.username_of_invitor = self.request.user.username\n status_obj = Invitation_status.objects.create(invitee=self.request.user, status='accepted' )\n status_obj.save()\n # create a challenge, but only put the invitee as the only participant as of right now \n return super().form_valid(form)", "title": "" }, { "docid": "299c586b6d3a110b28cf29833ecd5538", "score": "0.57827306", "text": "def test_get_invite_only_with_permission_denied_error(self):\n group = Group.objects.create(name='test-group', invite_only=True)\n\n rsp = self.apiGet(get_review_group_item_url(group.name),\n expected_status=403)\n self.assertEqual(rsp['stat'], 'fail')\n self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)", "title": "" }, { "docid": "16c057b09afb3382da4240ac56469558", "score": "0.57737863", "text": "def test_team_members_id_team_permission_get(self):\n pass", "title": "" }, { "docid": "8a2ccb7add083f9487020c1ad6bc7f08", "score": "0.57727283", "text": "def test_get_reviewrequest_with_invite_only_group_and_permission_denied_error(self):\n review_request = ReviewRequest.objects.filter(public=True,\n local_site=None).exclude(submitter=self.user)[0]\n review_request.target_groups.clear()\n review_request.target_people.clear()\n\n group = Group(name='test-group', invite_only=True)\n group.save()\n\n review_request.target_groups.add(group)\n review_request.save()\n\n rsp = self.apiGet(self.get_item_url(review_request.display_id),\n expected_status=403)\n self.assertEqual(rsp['stat'], 'fail')\n self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)", "title": "" }, { "docid": "dd644d977b16a0a5e2cbe47756c08c2a", "score": "0.5765462", "text": "def test_get_invite_only(self):\n group = Group.objects.create(name='test-group', invite_only=True)\n group.users.add(self.user)\n\n rsp = self.apiGet(get_review_group_item_url(group.name),\n expected_mimetype=review_group_item_mimetype)\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['group']['invite_only'], True)", "title": "" }, { "docid": "0749044539cf7077a4e44c240ccf778e", "score": "0.57554597", "text": "def create_instant_invite(self):\n return self._bit(0)", "title": "" }, { "docid": "ebb791ead82cf475928f96aa5bb78dca", "score": "0.57497513", "text": "def test_team_members_id_target_model_target_id_invite_post(self):\n pass", "title": "" }, { "docid": "fa48d8705d57c1ca4e3b669fe0d51526", "score": "0.5748512", "text": "def test_send_invite_user_has_perm_and_is_superuser(self):\n user = self.create_superuser()\n add_email_invites_permission(user)\n self.assertTrue(user.has_perm('accounts.email_invites'))\n self.assertTrue(user.is_superuser)\n\n invite = mommy.make(models.Invite, created_by=user)\n # pylint: disable=line-too-long\n with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:\n invite.send_invite()\n\n self.assertTrue(mock.delay.called)", "title": "" }, { "docid": "e2f2fd64982ee641b4759c7d6bb79b4c", "score": "0.5746399", "text": "def test_US3_U1_belongstoteam_false(self):\n self.set_up()\n\n joe = UserProfile.objects.get(username=\"jd\")\n\n team = Team.objects.get(name=\"Administrators 1\")\n\n self.assertFalse(belongs_to_team(joe, team))", "title": "" }, { "docid": "9643d5ca2d34ce9fd1b1e9d2da9754ec", "score": "0.5746396", "text": "def test_invite_invalid_token(self):\n\n classroom = ClassroomFactory()\n\n response = self.client.get(\n f\"/api/classrooms/{classroom.id}/token/?invite_token={secrets.token_urlsafe()}\"\n )\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "5b0927c5ab0d06ebd2f2a72183d2ba8d", "score": "0.57448345", "text": "def test_send_invite_user_has_permission(self):\n user = self.create_user()\n add_email_invites_permission(user)\n self.assertTrue(user.has_perm('accounts.email_invites'))\n\n invite = mommy.make(models.Invite, created_by=user)\n # pylint: disable=line-too-long\n with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:\n invite.send_invite()\n\n self.assertTrue(mock.delay.called)", "title": "" }, { "docid": "0a4ab04712daa01adc3528490997dbba", "score": "0.5729596", "text": "def deny(\n self, invite_request_id: str, team_id: str = None, **kwargs\n ) -> Response:\n\n payload = {\n \"token\": self._token,\n \"invite_request_id\": invite_request_id,\n }\n\n if team_id is not None:\n payload[\"team_id\"] = team_id\n\n return self._post(\n \"admin.inviteRequests.deny\", payload=payload, **kwargs\n )", "title": "" }, { "docid": "d8a68907124fb5f496f7f0f16db89caf", "score": "0.5724307", "text": "def accept_invite(self, other_user_id):\n return self.respond_to_invite(other_user_id)", "title": "" }, { "docid": "edc53c3bb103a8706d4b490319fc54a7", "score": "0.57175803", "text": "async def team_add(self, ctx: commands.context.Context, *, team_name: str):\n logging.debug(\"Starting team creation...\")\n team_emoji = get(ctx.guild.emojis, name=\"CODEDAY\")\n team_name = s = re.sub(r'^\"|\"$', '', team_name)\n\n # Checks if any teams with this name already exist, and fails if they do\n if self.team_service.get_team_by_name(team_name) is not None:\n await ctx.send(\n \"A team with that name already exists! Please try again, human!\"\n )\n return\n\n # Creates a new channel for the new team\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),\n ctx.guild.get_role(self.role_student): discord.PermissionOverwrite(\n read_messages=False, send_messages=False\n ),\n ctx.guild.me: discord.PermissionOverwrite(\n read_messages=True, read_message_history=True\n ),\n }\n tc = await ctx.guild.create_text_channel(\n name=f\"{team_name.replace(' ', '-')}-📋\",\n overwrites=overwrites,\n category=ctx.guild.get_channel(self.category),\n topic=f\"A channel for {team_name} to party! \\nAnd maybe do some work too\",\n )\n await tc.send(\n f\"Welcome to team `{team_name}`!! I'm excited to see what you can do!\"\n )\n\n # Creates and sends the join message\n join_message: discord.Message = await ctx.guild.get_channel(\n self.channel_gallery\n ).send(\n choice(teamCreateMessages).format(team_name)\n + f\"\\nReact with {team_emoji} if you \"\n f\"want to join!\"\n )\n await join_message.add_reaction(team_emoji)\n\n self.team_service.add_team(team_name, tc.id, join_message.id)\n\n await ctx.send(\n \"Team created successfully! Direct students to #team-gallery to join the team!\"\n )", "title": "" }, { "docid": "09151b7df3fefd59e0a8bd530d4bcf17", "score": "0.57170784", "text": "def team_confirm(email, team1_id, team2_id): # if request.method == 'POST'\n team1 = coll(\"teams\").find_one({\"_id\": team1_id})\n team2 = coll(\"teams\").find_one({\"_id\": team2_id})\n\n if not team1 or not team2:\n return {\"message\": \"Invalid team id(s)\"}, 404\n\n if email not in team1[\"members\"]:\n return {\"message\": f\"User not in team {team1_id}\"}, 403\n\n new_length = len(team1[\"members\"]) + len(team2[\"members\"])\n if new_length > 4:\n return {\"message\": \"Team size will be greater than 4\"}, 409\n\n if team1[\"complete\"] or team2[\"complete\"]:\n return {\"message\": \"Team complete\"}, 409\n\n if team1_id not in team2[\"outgoing_inv\"] and team2_id not in team1[\"incoming_inv\"]:\n return {\"message\": \"Invite no longer exists\"}, 404\n\n # NOTE So we can do merging of the two teams (documents) however we want (this is just an example)\n # currently the other team is being deleted but we should really archive it for futuring training\n # purposes for our ML model\n coll(\"teams\").update_one(\n {\"_id\": team2_id},\n {\n \"$push\": {\"members\": {\"$each\": team1[\"members\"]}},\n \"$pull\": {\"outgoing_inv\": team1_id},\n \"$set\": {\n \"complete\": new_length == 4,\n \"meta\": aggregate_team_meta(team1[\"members\"] + team2[\"members\"]),\n },\n },\n )\n coll(\"users\").update(\n {\"_id\": {\"$in\": team1[\"members\"]}}, {\"$set\": {\"team_id\": team2_id}}\n )\n\n doc = coll(\"teams\").find_one_and_delete({\"_id\": team1_id})\n coll(\"archive\").insert_one(doc)\n\n coll(\"teams\").update(\n {}, {\"$pull\": {\"outgoing_inv\": team1_id, \"incoming_inv\": team1_id}}, multi=True\n ) # removes team1_id from all the remaining teams' outgoing and incoming invites\n if (\n new_length == 4\n ): # removes team2_id from all the remaining team because team2 is full now\n coll(\"teams\").update(\n {},\n {\"$pull\": {\"outgoing_inv\": team2_id, \"incoming_inv\": team2_id}},\n multi=True,\n )\n\n # NOTE At the end of HackRU we can perform a backup job which will archive all the successfully created team\n\n return {\"message\": \"Success\"}, 200", "title": "" }, { "docid": "baedafcd9fb88154a06f2b74233a60b7", "score": "0.5715542", "text": "def test_invite_user_with_code(app, authed_client):\n app.config['REQUIRE_INVITE_CODE'] = True\n add_permissions(app, 'invites_send')\n response = authed_client.post(\n '/invites',\n data=json.dumps({'email': '[email protected]'}),\n content_type='application/json',\n )\n check_json_response(\n response,\n {'expired': False, 'email': '[email protected]', 'invitee': None},\n )\n assert response.status_code == 200\n\n user = User.from_pk(1)\n assert user.invites == 0", "title": "" }, { "docid": "2a6ee108c5ebe4b5e03db76c8983c732", "score": "0.5714301", "text": "def add_member(self, user, team):\n member = team.members.filter(pk=user.pk).first()\n if not member:\n member = TeamMember.objects.create(team=team, member=user)\n return member", "title": "" }, { "docid": "2ef6502d5ed6486f647c80f8a9e5eaff", "score": "0.5713796", "text": "async def invite_bot(self, ctx):\n\t\tt = datetime.datetime.utcnow()\n\t\te = Embed(description = f\"{ctx.author.mention}, here's an invite link for the bot :slight_smile:\",\n\t\t\t\t colour = 0x0cb44e)\n\t\te.set_author(name = f\"{ctx.author.display_name}#{ctx.author.discriminator}\", icon_url = ctx.author.avatar_url)\n\t\te.add_field(name = \"Invite Link\", value = '**[Invite Me](https://discord.com/api/oauth2/authorize?client_id=723380957343907911&permissions=8&scope=bot \"Invite the bot to your server\")**')\n\t\te.set_footer(text = f\"Requested by {ctx.author.display_name} | {t.strftime('%b %d, %Y | %I:%M %p UTC')}\")\n\n\t\tawait ctx.send(embed = e)", "title": "" }, { "docid": "e0bae50132085f7556f38453410432a5", "score": "0.5711969", "text": "def test_disable_idea_facilitator_user(self):\n #: create a workshop and two users\n nounVerbTest.test_disable_noun_facilitator_user(self, 'idea')", "title": "" }, { "docid": "9c9113e663a64850090f9a4c8ae76be5", "score": "0.5703187", "text": "def test_get_teams_by_username_permissions(self):\n pass", "title": "" }, { "docid": "815babf9e84f36291213f54927923869", "score": "0.57007897", "text": "def handle_invite(self, ievent):\n\n if users.allowed(ievent.userhost, ['OPER', ]):\n self.join(ievent.txt)", "title": "" }, { "docid": "65b174d964ae115028015b1e5a035d8b", "score": "0.5700687", "text": "def test_get_group_invite_only(self):\n group = Group.objects.create(name='test-group', invite_only=True)\n group.users.add(self.user)\n\n rsp = self.apiGet(self.get_item_url(group.name))\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['group']['invite_only'], True)", "title": "" }, { "docid": "867ffa8ffe70a12fe99b066200589de4", "score": "0.5690363", "text": "def test_team_members_get(self):\n pass", "title": "" }, { "docid": "5bbea4f4f91a2a949296e29982938c22", "score": "0.5675915", "text": "def test_reinviting(self):\n # Register Carl to compete\n RegistrationFactory(user=self.carl, competition=self.galapagos)\n # And send him an invitation\n inv = InvitationFactory.create(receiver=self.carl,\n team=self.alice_team)\n with self.loggedInAs(\"carl\", \"123\"):\n # Accept invitation\n resp = self.client.rpost('invitation_accept', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n self.assertTrue(self.alice_team.is_user_on_team(self.carl))\n self.assertEqual('A', Invitation.objects.get(pk=inv.pk).response)\n\n # Leave team\n resp = self.client.rpost('team_leave', follow=True,\n kwargs={'comp_slug': inv.team.competition.slug},\n data={'confirmed': True})\n self.assertFalse(self.alice_team.is_user_on_team(self.carl))\n\n # Alice forgives him and sends him another invitation\n inv = InvitationFactory.create(receiver=self.carl,\n team=self.alice_team)\n with self.loggedInAs(\"carl\", \"123\"):\n # Accept invitation\n resp = self.client.rpost('invitation_accept', follow=True,\n kwargs={'pk': inv.pk},\n data={'confirmed': True})\n # Back on Alice's team\n self.assertTrue(self.alice_team.is_user_on_team(self.carl))\n self.assertRedirects(resp, inv.team.get_absolute_url())\n self.assertEqual('A', Invitation.objects.get(pk=inv.pk).response)", "title": "" }, { "docid": "da50930491833daa7dfc1cde8b33ed83", "score": "0.56732583", "text": "async def __add_user(self, email, team=None):\n api_url = f\"{self.__base_uri}/orgs/{self.__organizaton}/invitations\"\n kwargs = {\n \"email\": email.lower(),\n }\n data = {}\n\n if team:\n kwargs[\"team_ids\"] = [team[\"id\"]]\n\n async with aiohttp.ClientSession() as session:\n async with session.post(\n api_url,\n headers={\"Authorization\": f\"token {self.__token}\"},\n json=kwargs,\n ) as response:\n assert response.status == 201\n data = await response.json()\n self.__invitations[email.lower()] = {\n \"email\": data[\"email\"].lower(),\n \"id\": data[\"id\"],\n }", "title": "" }, { "docid": "f70b045ecee35c76be4ed245e3a187a3", "score": "0.56461304", "text": "def test_get_reviewrequest_with_invite_only_group_and_target_user(self):\n review_request = ReviewRequest.objects.filter(public=True,\n local_site=None).exclude(submitter=self.user)[0]\n review_request.target_groups.clear()\n review_request.target_people.clear()\n\n group = Group(name='test-group', invite_only=True)\n group.save()\n\n review_request.target_groups.add(group)\n review_request.target_people.add(self.user)\n review_request.save()\n\n rsp = self.apiGet(self.get_item_url(review_request.display_id))\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['review_request']['id'], review_request.display_id)\n self.assertEqual(rsp['review_request']['summary'],\n review_request.summary)", "title": "" }, { "docid": "6d99510bc97317dc905f1b85a20e2c9f", "score": "0.5641757", "text": "def test_cannot_send_duplicate_invite(self):\n user = self.create_superuser()\n invite = mommy.make(models.Invite, created_by=user)\n\n # pylint: disable=line-too-long\n with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:\n invite.send_invite()\n\n self.assertTrue(mock.delay.called)\n\n # Ensure that we can't send another identical invite\n invite.notified = now()\n invite.save()\n\n # pylint: disable=line-too-long\n with patch('open_connect.accounts.models.render_and_send_invite_email') as mock2:\n invite.send_invite()\n\n self.assertFalse(mock2.delay.called)", "title": "" }, { "docid": "580fbffa533d3c5ee22cb8b7d45b9632", "score": "0.5637482", "text": "def test_team_members_id_target_model_target_id_invite_invitee_post(self):\n pass", "title": "" }, { "docid": "7214a97c7fc2f34c2c760f968fab7661", "score": "0.5627884", "text": "def test_team_members_id_team_permission_post(self):\n pass", "title": "" }, { "docid": "3bed453e68716b9221e8c14314f89323", "score": "0.562432", "text": "def test_team_members_id_invitation_tickets_fk_put(self):\n pass", "title": "" }, { "docid": "416e2eb550fa124d46ac160239bae144", "score": "0.56223464", "text": "def test_team_members_id_invitation_tickets_fk_get(self):\n pass", "title": "" }, { "docid": "94de173e7d2f63c12ef1f526cf1b8431", "score": "0.5613414", "text": "def test_team_members_id_team_get(self):\n pass", "title": "" }, { "docid": "16b5018789f59a8b46665bca529ee080", "score": "0.56035537", "text": "async def choose(ctx):\n server = get_server(ctx.guild)\n if not server.teams:\n await ctx.send(\"Teams have not been made\")\n return\n server.team1 = server.teams[server.current_team].team1\n server.team2 = server.teams[server.current_team].team2\n server.game_over = False\n await ctx.send(\"glhf!\")", "title": "" }, { "docid": "48b5fc85eb6f3b3d198566cad6d0d139", "score": "0.5593168", "text": "def is_agent_invite(self) -> bool:\n invite = self.adapter.invitation\n if invite is None:\n return False\n return invite.is_agent_user", "title": "" }, { "docid": "5465b3ee44944075bc7e814ced21e23e", "score": "0.5590427", "text": "def test_disable_idea_user_facilitator(self):\n #: create a workshop and two users\n nounVerbTest.test_disable_noun_user_facilitator(self, 'idea')", "title": "" }, { "docid": "853359d83ef744be8780d6416d9fce6b", "score": "0.5590003", "text": "def test_teams_users_create(self):\n pass", "title": "" }, { "docid": "049fc63c797a550b5544c19698ca3147", "score": "0.5577084", "text": "def invite_send():\n name = request.values.get('displayname')\n person_obj = model.Person.find_or_create_by_name(name)\n url = ''.join([ 'http://',\n modules.get_hostname(version=modules.get_current_version_name()),\n '/register/',\n person_obj.key.urlsafe()])\n email = request.values.get('email')\n message = request.values.get('message')\n sender = 'invite@' + app_identity.get_application_id() + \".appspotmail.com\"\n logging.debug(sender)\n logging.debug(url)\n # logging.debug(render_template(\"invite_message.txt\", join_url=url, personal_message=message))\n mail.send_mail(\n sender=sender,\n to=email,\n subject=\"You have been invited to a Book Club!\",\n bcc='[email protected]',\n body=render_template(\"invite_message.txt\",\n join_url=url,\n personal_message=message))\n flash(\"Email sent to %s\" % email)\n return redirect(url_for('admin.invite_form'))", "title": "" }, { "docid": "6a4e7b154b1ed552ba594a9d12e58510", "score": "0.5570466", "text": "def send_add_notification(self, request):\n send_team_add_email(team_member=self, request=request)", "title": "" }, { "docid": "731aa206145b8d8030d0ef8d92c8b32d", "score": "0.5565751", "text": "def add_to_all_teams(user, org):\n\n for team in org.get_teams():\n team.add_to_members(user)", "title": "" } ]
f6460b17f437800c1dc656d38a0e0277
Compute the hold that maximizes the expected value when the discarded dice are rolled.
[ { "docid": "8545d861eb4b47194135a89caaf51845", "score": "0.73834246", "text": "def strategy(hand, num_die_sides):\r\n max_value = 0.0\r\n max_hold = ()\r\n for hold in gen_all_holds(hand):\r\n expect = expected_value(hold, num_die_sides, len(hand) - len(hold))\r\n if expect > max_value:\r\n max_value = expect\r\n max_hold = hold\r\n return (max_value, max_hold)", "title": "" } ]
[ { "docid": "934bd0f032a9cd531682d68eed0c0178", "score": "0.74292517", "text": "def strategy(hand, num_die_sides):\r\n \r\n max_expected_value = 0\r\n optimal_hold = []\r\n #For a given hand, generate all possible holds\r\n possible_holds = gen_all_holds(hand)\r\n #For a given hold, generate the expected value - and record the highest\r\n for hold in possible_holds:\r\n num_free_dice = len(hand) - len(hold) #Can't assume it's always 5\r\n hold_expected = expected_value(hold, num_die_sides, num_free_dice)\r\n if hold_expected > max_expected_value:\r\n optimal_hold = []\r\n max_expected_value = hold_expected\r\n optimal_hold.append(hold)\r\n if hold_expected == max_expected_value:\r\n optimal_hold.append(hold)\r\n return(max_expected_value, random.choice(optimal_hold))", "title": "" }, { "docid": "27c058ac03e0b90339a6e6318b09d614", "score": "0.7044318", "text": "def expected_value(held_dice, num_die_sides, num_free_dice):\n \n _dice_values = [x for x in range(1, num_die_sides+1)]\n _seq = gen_all_sequences(_dice_values, num_free_dice)\n _expected_value = 0\n\n for _roll in _seq:\n _dices = held_dice + _roll\n _expected_value += score(_dices)\n \n return _expected_value / float(len(_seq))", "title": "" }, { "docid": "3a3f9d18698b1489147cfd02025476e6", "score": "0.6988765", "text": "def strategy(hand, num_die_sides):\n _holds = gen_all_holds(hand)\n _max_score = 0\n _best_hold = 0\n for _hold in _holds:\n _expected = expected_value(_hold, num_die_sides, len(hand) - len(_hold))\n if(_expected > _max_score):\n _max_score = _expected\n _best_hold = _hold\n\n return (_max_score, _best_hold)", "title": "" }, { "docid": "fae6cd68a3c68ea109f668dcdd7a4ad8", "score": "0.6954333", "text": "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n\r\n #First we enumerate the possible outcomes for the free dice\r\n die_sides = []\r\n for side in range(1, num_die_sides + 1):\r\n die_sides.append(side)\r\n possible_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n #Then, we calculate the expected value for these outcomes\r\n #We must view them as possible hands by adding our held dice in\r\n #This is because score considers the entire hand, not just held dice\r\n running_total = 0.0\r\n for outcome in possible_outcomes:\r\n rolled_dice_list = list(outcome)\r\n rolled_dice_list.extend(held_dice)\r\n running_total += score(tuple(rolled_dice_list))\r\n expected_score = running_total / len(possible_outcomes)\r\n return expected_score", "title": "" }, { "docid": "682682f7dadcc558e13019f7ec97b295", "score": "0.67450947", "text": "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n outcomes = range(1, num_die_sides + 1)\r\n all_sequences = gen_all_sequences(outcomes, num_free_dice)\r\n scores_sum = 0.0\r\n for sequence in all_sequences:\r\n scores_sum += score(held_dice + sequence)\r\n return scores_sum / len(all_sequences)", "title": "" }, { "docid": "8097146de96346e133aec83cf67cf84a", "score": "0.67205894", "text": "def strategy(hand, num_die_sides):\n sides = num_die_sides\n\n best_stgy, best_val = None, None\n for hold in gen_all_holds(hand):\n nfree = len(hand) - len(hold)\n val = expected_value(hold, sides, nfree)\n \n if best_stgy is None or best_val is None or val > best_val:\n best_stgy, best_val = hold, val\n\n return best_val, best_stgy", "title": "" }, { "docid": "0a0dd2566e8971a817945f36be65cd97", "score": "0.66150546", "text": "def expected_value(held_dice, num_die_sides, num_free_dice):\n exv = 0.0\n prob = (1./num_die_sides) ** num_free_dice # iid prob for each possibile pair\n\n for pair in gen_all_sequences(range(1, num_die_sides+1), num_free_dice):\n hand = held_dice + pair\n pair_score = score(hand)\n exv += pair_score * prob\n\n return exv", "title": "" }, { "docid": "e146f1d65968e4c2a9540a27b86c8498", "score": "0.64042795", "text": "def upper_credible_choice(self):\n def lb(a,b):\n return a/(a+b) + 1.65*np.sqrt((a*b)/((a+b)**2*(a+b+1)))\n a = self.wins + 1\n b = self.trials - self.wins + 1\n return np.argmax(lb(a,b))", "title": "" }, { "docid": "d12bee86ebcff67401f723fb725d690b", "score": "0.60004026", "text": "def compute(self):\n dice = 100 * self.dice / self.n_updates # type: ignore\n best_sum_dice = dice[:]\n return round(torch.mean(best_sum_dice).item(), 2)", "title": "" }, { "docid": "34dfa4a397232bb1750efc16edce2ac2", "score": "0.5925156", "text": "def round(self) -> int:\n\n self._cup.release_all()\n has_ship = False\n has_captain = False\n has_crew = False\n \n # This will be the sum of the remaining dice, i.e., the score.\n crew = 0 \n self._cup.roll()\n \n # Repeat three times\n for chance in range(3):\n dievalues = [] # Dummy list which stores die values.\n \n for p in range(5):\n dievalues.append(self._cup._dices[p].get_value())\n print(dievalues)\n \n if not (has_ship) and (6 in dievalues):\n x=dievalues.index(6)\n self._cup.bank(x)\n has_ship = True\n \n else:\n \n if has_ship:\n pass\n else:\n self._cup.roll()\n \n if (has_ship) and not (has_captain) and (5 in dievalues):\n # A ship but not a captain is banked\n y = dievalues.index(5)\n self._cup.bank(y)\n has_captain = True\n \n else:\n if has_captain:\n pass\n else:\n self._cup.roll()\n \n if has_captain and not has_crew and (4 in dievalues):\n # A ship and captain but not a crew is banked\n z = dievalues.index(4)\n self._cup.bank(z)\n has_crew = True\n \n else:\n if has_crew:\n pass\n else:\n self._cup.roll()\n \n if has_ship and has_captain and has_crew:\n # Now we got all needed dice, and can bank the ones we like to save.\n \n if chance < 2:\n for i in range(5):\n if self._cup._dices[i].get_value()>3:\n self._cup.bank(i)\n else:\n self._cup.roll()\n \n elif chance == 2:\n \n for i in range(5):\n if self._cup.is_banked(i):\n pass\n else:\n self._cup.bank(i)\n # If we have a ship, captain and crew (sum 15)\n # calculate the sum of the two remaining.\n \n if has_ship and has_captain and has_crew:\n crew = sum(dievalues) - 15\n print(\"crew value:\",crew)\n return crew\n \n else:\n print(\"crew value:\",crew)\n return crew", "title": "" }, { "docid": "21e20be1326135be13e5f81570063e8d", "score": "0.59244996", "text": "def score_large_straight(dice: list[int]) -> int:\n roll = Counter(dice)\n if len(roll) == 5 and len({2, 3, 4, 5}.intersection(dice)) == 4:\n return 40\n return 0", "title": "" }, { "docid": "17dcdb869d551f09f1488dc30014d75c", "score": "0.5917717", "text": "def lower_credible_choice( self ):\n def lb(a,b):\n return a/(a+b) - 1.65*np.sqrt((a*b)/( (a+b)**2*(a+b+1)))\n a = self.wins + 1\n b = self.trials - self.wins + 1\n return np.argmax(lb(a,b))", "title": "" }, { "docid": "dbdc195089c49c8d57c48abfb51193d1", "score": "0.5901779", "text": "def calculate(self):\n for i in range(0, self.count):\n self.rolls[i] = random.randint(1, self.sides)\n self.result = sum(self.rolls) + self.bonus\n return self.result", "title": "" }, { "docid": "cad89f03a25bf3e76b7a3627b5adf6a3", "score": "0.5868658", "text": "def max_scoring_num_rolls(dice=six_sided):\n save_it = 0\n k=-1\n maximum = 0\n for i in range (1, 11):\n argum = make_averaged(roll_dice, i)\n save_it = argum(i, dice)\n if save_it > maximum:\n maximum = save_it\n k = i\n return k", "title": "" }, { "docid": "24c31759e12e1742a66d7737d139559d", "score": "0.586845", "text": "def BestRoll(self, dice) :\n return self.RollIdentifier.FirstMatch(dice, self.RollUnavailable)", "title": "" }, { "docid": "946aed428c769b84a603342f19118cd5", "score": "0.58642304", "text": "def score_full_house(dice: list[int]) -> int:\n roll = Counter(dice)\n if len(roll) == 2 and roll.most_common()[0][1] == 3:\n return 25\n return 0", "title": "" }, { "docid": "24bb5793c5f2b367999d86523b0c0685", "score": "0.58591646", "text": "def winorloss(self, d: 'Dealer'):\n if self.sum > 21:\n return -1\n if (self.sum < 22 and d.sum < 22 and self.sum < d.sum):\n if self.surrender == True:\n return -0.5\n else: return -1\n if self.sum == d.sum:\n return 0\n else:\n return 1", "title": "" }, { "docid": "46591b49a5a5738bcda36337aa007be3", "score": "0.58127534", "text": "def test_roll_of_die():\n die_type = 20\n num_die = 3\n modifier = 5\n\n result = die_roll(num_die, die_type, modifier)\n\n if 1 <= result <= (num_die * die_type) + modifier:\n return 1, result\n else:\n return 0, result", "title": "" }, { "docid": "28b07c3004d10933d3fb1b85c63eaee1", "score": "0.57804334", "text": "def calcMaxRolls(stance):\n flick, lash, deflect = changeStance(stance)\n attBonus = 90 # Attack bonus for the tent whip\n strBonus = 86 # Str bonus for the tent whip\n defBonus = 0 # Defence bonus for the whip\n lashMax = np.floor(0.5 + lash*(strBonus+64)/640)\n flickMax = flick*(attBonus+64)\n deflectMax = deflect*(defBonus+64)\n return(flickMax, lashMax, deflectMax)", "title": "" }, { "docid": "28cd2cd1b83e8be74c78b4c90122ff09", "score": "0.5780385", "text": "def getBottomValue(self):\n return self.dice[5]", "title": "" }, { "docid": "0dee44358267c3637999f7b799273f4c", "score": "0.57604134", "text": "def score_yahtzee(dice: list[int]) -> int:\n value, count = Counter(dice).most_common()[0]\n if count == 5:\n return 50\n return 0", "title": "" }, { "docid": "cc81ca9e926010a1ac09ad29f9466cf0", "score": "0.5755851", "text": "def ran_dice(min_dice, max_dice, luc, level):\r\n # dice_no is the faces of the dice\r\n dice_no = max_dice - min_dice + 1\r\n # acc_rate is the accumlation of the 'dice_rate's\r\n acc_rate = 0\r\n # max_luc is the largest value you can get when you are in level 'level'\r\n # the number 13 means that you can equip 13 items which have the luc affix\r\n # the formula below is the same as the one in the skill.py where the items are defined\r\n max_luc = int(level ** 3 /1500 + 10 + level) * 13\r\n\r\n # when the luc is 0, this is a normal dice, every number has the same base rate\r\n base_rate = [10000 / dice_no for _ in range(dice_no)]\r\n dice_rate = [0 for _ in range(dice_no)]\r\n\r\n # now change the every number's rate from the big number to the small number\r\n for _ in range(dice_no-1, -1, -1):\r\n dice_rate[_] = base_rate[_] * (1 + luc / max_luc * ((dice_no - _ - 1) * 0.2 + 0.5))\r\n if acc_rate + dice_rate[_] >= 10000:\r\n dice_rate[_] = 10000 - acc_rate\r\n break\r\n else:\r\n acc_rate += dice_rate[_]\r\n\r\n for _ in range(1,dice_no):\r\n dice_rate[_] += dice_rate[_ -1]\r\n\r\n #print(dice_rate)\r\n r = random.randrange(1,10000)\r\n #print('raw r is:', r)\r\n for _ in range(dice_no):\r\n if r < dice_rate[_]:\r\n return min_dice + _\r\n\r\n raise ValueError('ran dice error!')", "title": "" }, { "docid": "be70afaaf7fd09a353927f74db720ab2", "score": "0.57523376", "text": "def roll_die(self):\r\n return randint(1, self.sides)", "title": "" }, { "docid": "be70afaaf7fd09a353927f74db720ab2", "score": "0.57523376", "text": "def roll_die(self):\r\n return randint(1, self.sides)", "title": "" }, { "docid": "24362cfcbe831d8dc0f17ae67772ceb9", "score": "0.57397056", "text": "def calculate_pool_reward(height: uint32) -> uint64:\n\n if height == 0:\n return uint64(int((7 / 8) * 21000000000 * _mojo_per_cryptodoge))\n elif height < 3 * _blocks_per_year:\n return uint64(int((7 / 8) * 2 * 10000 * _mojo_per_cryptodoge))\n elif height < 6 * _blocks_per_year:\n return uint64(int((7 / 8) * 1 * 10000 * _mojo_per_cryptodoge))\n elif height < 9 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.5 * 10000 * _mojo_per_cryptodoge))\n elif height < 12 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.25 * 10000 * _mojo_per_cryptodoge))\n elif height < 15 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.125 * 10000 * _mojo_per_cryptodoge))\n elif height < 18 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.0625 * 10000 * _mojo_per_cryptodoge))\n elif height < 21 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.03125 * 10000 * _mojo_per_cryptodoge))\n elif height < 24 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.015625 * 10000 * _mojo_per_cryptodoge))\n elif height < 27 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.0078125 * 10000 * _mojo_per_cryptodoge))\n elif height < 30 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.00390625 * 10000 * _mojo_per_cryptodoge))\n elif height < 99 * _blocks_per_year:\n return uint64(int((7 / 8) * 50 * _mojo_per_cryptodoge))\n else:\n return uint64(0)", "title": "" }, { "docid": "8ef498f85f2854580e66fc3f6570b5c5", "score": "0.57277966", "text": "def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN Question 7\n largest, number=0,1\n for num_dice in range(1, 11):\n temp=make_averaged(roll_dice, num_samples)(num_dice, dice)\n if largest<temp:\n largest,number=temp,num_dice\n return number\n # END Question 7", "title": "" }, { "docid": "73a566693c784e27b2d88b53137bef51", "score": "0.569194", "text": "def roll(self):\n self.value = random.randint(1, 6)\n return self.value", "title": "" }, { "docid": "993e98300828a579859ebe9e2738e8a2", "score": "0.56875074", "text": "def roll(self):\n return random.randint(1, 6)", "title": "" }, { "docid": "bf7960bdd2ea5c0c4c9fe35f05cf5826", "score": "0.5676204", "text": "def gain4(self):\n GAIN_MAX = 5\n gain = 0\n opponent = 1 - self.game.is_playing\n for pit in range(6 * self.game.is_playing, 6 + 6 * self.game.is_playing):\n if self.game.b.get_pit(pit) > 0:\n gain += 1\n for pit in range(6 * opponent, 6 + 6 * opponent):\n if self.game.b.get_pit(pit) > 0:\n gain -= 1\n return gain / GAIN_MAX # 5 max score {+6 -1}", "title": "" }, { "docid": "2a850b4a58a18d9c40a38b6482af8bfb", "score": "0.5674815", "text": "def get_big_joker_value(deck):\n return max(deck)", "title": "" }, { "docid": "64a83ea2ee29b209f10804db702d42ec", "score": "0.56687504", "text": "def fixedroller(count, die):\n return count * max(1, die-2)", "title": "" }, { "docid": "5dbfa9d9f6cd579559775a20731e51a8", "score": "0.5647138", "text": "def picks_expected_survival(self, picks):\n cumulative_product, expected_survival = 1, 0\n for week_number, team in sorted(picks.items(), key=lambda wt: wt[0]):\n cumulative_product *= self.nth_week(week_number).team_win_probability(team)\n\n expected_survival += cumulative_product\n\n return expected_survival", "title": "" }, { "docid": "025139cff067228185708befcf0889fd", "score": "0.5632896", "text": "def roll(self):\n self.rolled = random.choice(range(1, 7))\n return self.rolled", "title": "" }, { "docid": "b49e9188ac09aaab601bb8c6300aab08", "score": "0.56298524", "text": "def roll_die(self):\n roll = randint(1, self.sides)\n return roll", "title": "" }, { "docid": "34773724ce0bbdf08fed2458b7e48d13", "score": "0.55939585", "text": "def test_dice_correct_total(self, dice):\n dice.roll()\n\n assert (\n dice.die1 + dice.die2 == dice.total\n ), \"The 'total' value for the dice roll does not match the value rolled\"", "title": "" }, { "docid": "b44927168ec838deb60de64477d9f6bc", "score": "0.55927753", "text": "def bacon_strategy(score, opponent_score, margin=8, num_rolls=5):\n # BEGIN Question 8\n bacon=free_bacon(opponent_score)\n if is_prime(bacon):\n bacon=next_prime(bacon)\n if bacon>=margin:\n return 0\n else:\n return num_rolls\n # END Question 8", "title": "" }, { "docid": "bea3d6bfc5a33d3525f33ea29065774d", "score": "0.55851626", "text": "def multiroll(self):\n if self.avg: # NPC rolls\n result = int(sum(num_faces / 2 + 0.5 for num_faces in self.num_faces))\n else:\n result = sum(random.randint(1, num_faces) for num_faces in self.num_faces)\n if self.crit:\n result *= 2\n return result + self.bonus", "title": "" }, { "docid": "c8959790720474b24ae07faa9f3ce446", "score": "0.5566394", "text": "def run(self) -> int:\n\n r = 0\n\n while self._round():\n r += 1\n\n return r * sum(u.hp for u in self.units if u.alive)", "title": "" }, { "docid": "a75dddc01806e7eff19d7011c9d92a97", "score": "0.5564739", "text": "def score(hand):\r\n \r\n running_maximum = 0\r\n for number in hand:\r\n max_for_number = number * hand.count(number)\r\n if max_for_number > running_maximum:\r\n running_maximum = max_for_number\r\n return running_maximum", "title": "" }, { "docid": "197ab20ce57a0a40a524e0e36c0b0a6c", "score": "0.5559285", "text": "def rollHaunt(self):\n hauntRoll = 0\n for i in range(6):\n hauntRoll += random.choice([0,1,2])\n \n if not self.sim:\n print(\"===================================\")\n print(\"|ROLL FOR THE HAUNT!!! SPOOooOOKYY|\")\n print(\"===================================\")\n print(\"| Roll Required: {} |\".format(self.omenCount))\n print(\"| Roll: {} |\".format(hauntRoll))\n \n if hauntRoll < self.omenCount:\n self.hauntRevealed = True\n if not self.sim:\n print(\"| THE HAUNT HAS BEGUN |\")\n else:\n if not self.sim:\n print(\"| YOU'RE SAFE THIS TIME |\")\n print(\"===================================\")\n return self.hauntRevealed", "title": "" }, { "docid": "6daa9909986e8755876c8b1c18a1682b", "score": "0.5547355", "text": "def get_small_joker_value(deck):\n return max(deck) - 1", "title": "" }, { "docid": "36aa7849fa8240b44c839c42e0f72317", "score": "0.5542459", "text": "def unfair_die():\n prob = []\n max_prob = 1\n for i in range(5):\n# print(\"i\", i)\n new_prob = random.uniform(0, max_prob)\n prob.append(new_prob)\n max_prob -= new_prob\n prob.append(max_prob)\n return prob", "title": "" }, { "docid": "4acc17ba71b4a64db10bc04d140a434c", "score": "0.5542212", "text": "def roll_die():\n return random.randint(1, 6)", "title": "" }, { "docid": "ffcd3170ae3bb473c8c1573d9fb890cf", "score": "0.5537093", "text": "def final_strategy(score, opponent_score):\n digits = [int(number) for number in str(opponent_score)] # intent to make opponent roll four die\n free_bacon = max(digits) + 1\n if (score + free_bacon + opponent_score)%7 == 0:\n return 0\n\n n = 5\n score_loop, opponent_score_loop = 90, 90\n\n while score_loop != 100: #this will check for one turn win by taking advantage of the free bacon rule\n if opponent_score >= opponent_score_loop and score >= score_loop:\n return 0\n score_loop += 1\n opponent_score_loop -= 10\n\n if score == opponent_score // 2 - 1:\n return 10\n\n if score < opponent_score // 2:\n n=4\n \n\n if (score + opponent_score) % 7 == 0:\n return 3\n\n if (score - opponent_score) >= 14: #win margin\n n = 4\n if (score - opponent_score) >= 35: #win margin\n n = 3 \n if (opponent_score - score) >= 20: #lose margin\n n = 7\n if (opponent_score - score) >= 40: #lose margin\n n = 8\n n = comeback_strategy(7, n)(score, opponent_score) #lose margin\n n = intent_opponent_fourdie(7,n)(score, opponent_score) \n return n", "title": "" }, { "docid": "04db558eed62d0fd590999d6321fef1f", "score": "0.55369866", "text": "def _determine_wealth(self):\n return self.value + self.inventory * self.current_price", "title": "" }, { "docid": "108415d841c93fac9fe5a01aec56d2c0", "score": "0.55281883", "text": "def test_dice_persistent_jail_count(self, dice):\n dice.roll()\n\n dice.jail_roll_count += 1\n dice.reset()\n\n assert dice.jail_roll_count > 0", "title": "" }, { "docid": "0b48a1668085dfb00564a36219335920", "score": "0.55246603", "text": "def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 9\n max_score = 0\n optimal_num = 0\n aver_func = make_averaged(roll_dice, num_samples)\n for num_rolls in range(1, 11):\n aver_score = aver_func(num_rolls, dice)\n if aver_score > max_score:\n max_score = aver_score\n optimal_num = num_rolls\n return optimal_num\n # END PROBLEM 9", "title": "" }, { "docid": "7607cc1f7b102250a56e3dc1844a60a3", "score": "0.5520256", "text": "def icosaroll(self):\n self.crit = 0\n if self.advantage == 0:\n return self._crit_check(random.randint(1, 20), verbose) + self.bonus\n elif self.advantage == -1: # AKA disadvatage\n return self._crit_check(sorted([random.randint(1, 20), random.randint(1, 20)])[0], verbose) + self.bonus\n elif self.advantage == 1:\n return self._crit_check(sorted([random.randint(1, 20), random.randint(1, 20)])[1], verbose) + self.bonus", "title": "" }, { "docid": "33265a149f884c64605788e1bfc1889a", "score": "0.5514582", "text": "def get_strategy_threshhold(self, episode):\n if self.fast_test:\n return 0.0\n else:\n return 1 - (float(episode) / self.num_episodes)", "title": "" }, { "docid": "d072b9321fb7cd9c48292ec87d3970ad", "score": "0.5514257", "text": "def bacon_strategy(score, opponent_score, margin=8, num_rolls=5):\n digits = [int(number) for number in str(opponent_score)]\n if max(digits) + 1 >= margin:\n return 0\n else:\n return num_rolls", "title": "" }, { "docid": "4a947ce490d767319651b76e182d69c9", "score": "0.5507996", "text": "def calcMaxRolls(stance,hasta):\n\t# the hasta argument tells us if the enemy has a hasta on when calculating our chance to hit\n\tAa,As,Ad = changeStance(stance)\n\tBa = 90 # attack slash bonus for whip\n\tBs = 86 # strength bonus for whip\n\tBd = 0 # defence slash bonus for whip/hasta\n\tif hasta:\n\t\tBd = 13\n\tAsMax = np.floor(0.5 + As*(Bs+64)/640)\n\tAaMax = Aa*(Ba+64)\n\tAdMax = Ad*(Bd+64)\n\treturn (AaMax,AsMax,AdMax)", "title": "" }, { "docid": "26403b5e9c1edea5ee0042d1f9816dc9", "score": "0.5482314", "text": "def roll():\r\n import random\r\n return random.randint(1,6)", "title": "" }, { "docid": "629a4829dac91664414edd0bb586e690", "score": "0.5469481", "text": "def calculate_score(dice):\n # version_1\n\n if len(dice) > 6:\n raise Exception(\"Cheating Cheater!\")\n\n counts = Counter(dice)\n\n if len(counts) == 6:\n return 1500\n\n if len(counts) == 3 and all(val == 2 for val in counts.values()):\n return 1500\n\n score = 0\n\n ones_used = fives_used = False\n\n for num in range(1, 6 + 1):\n\n pip_count = counts[num]\n\n if pip_count >= 3:\n\n if num == 1:\n\n ones_used = True\n\n elif num == 5:\n\n fives_used = True\n\n score += num * 100\n\n # handle 4,5,6 of a kind\n pips_beyond_3 = pip_count - 3\n\n score += score * pips_beyond_3\n\n # bug if 2 threesomes? Let's test it\n\n # 1s are worth 10x\n if num == 1:\n score *= 10\n\n if not ones_used:\n score += counts.get(1, 0) * 100\n\n if not fives_used:\n score += counts.get(5, 0) * 50\n\n return score", "title": "" }, { "docid": "23aa92840f6f771e1a33274c663d744b", "score": "0.5467169", "text": "def not_lose_percentage(self):\n if self.total_played > 0:\n return (float(self.wins) + float(self.ties)) / float(\n self.total_played)", "title": "" }, { "docid": "ac6f2f41187ab9d42c563d97536dc050", "score": "0.54648864", "text": "def calcHit(stance,hasta):\n\tAaMax,AsMax,AdMax = calcMaxRolls(stance,hasta)\n\tif AaMax > AdMax:\n\t\thitChance = (1 - (AdMax+2.)/(2*(AaMax+1)))\n\telse:\n\t\thitChance = AaMax/(2.*(AdMax+1))\n\troll = np.random.rand(1)[0]\n\n\tif roll < hitChance: # hit splash!\n\t\tdmg = np.random.randint(0,26,size=1)[0]\n\telse:\n\t\tdmg = 0\n\treturn dmg", "title": "" }, { "docid": "f03e889881d8c86f0f7d9bbc5ed4983f", "score": "0.5451264", "text": "def stockmax_greedy(items):\n profit = 0\n highest = 0\n for i in xrange(len(items) - 1, -1, -1):\n if items[i] > highest:\n highest = items[i]\n profit += highest - items[i]\n return profit", "title": "" }, { "docid": "e983830bdfa5bc893c9895456b2a3071", "score": "0.54427177", "text": "def pawn_height_proximity_value(state):\n if not state.inverse:\n lib = __MAXMINDICT__\n else:\n lib = __MAXMINDICT__2\n winning_state = BIG_NUMBER * lib[state.player]\n if state.isDone():\n return winning_state\n pawn_height_value = 0\n\n pawns = state.find_pawns()\n for space in pawns.values():\n let, num = space[0], space[1]\n pawn = state.board[let][num][1]\n if not pawn: warnings.warn('There should be a pawn here.') # should be redundant\n temp_value = 0\n temp_value += state.board[let][num][0] # add the current height of the pawn\n for move in state.getAllCurrentAvailableMoves():\n if move[0] != space: continue # this should be redundant\n dest = move[1]\n if not state.areAdjacent(space, dest): continue # this should be redundant\n height = state.board[dest[0]][dest[1]][0] # log the height of the adjecent space\n if height == 4: continue # should be redundant\n if height == 3: ## dodaj - ce si na 2. nadstropju\n if pawn == state.player[0]:\n return winning_state -10 ## it is the current player's turn and an adjecent 3-space is open.\n else:\n temp_value += height * 1.5 # the value should be high, since the opposing player could win next turn. However, this does not account for the possibility of simply building a roof there.\n elif height > 0:\n temp_value += height\n else:\n continue # consider adding some reward here as well.\n if pawn != state.player[0]:\n temp_value *= -1\n pawn_height_value += temp_value\n return pawn_height_value", "title": "" }, { "docid": "f741e6cc97084d18f9e7d31d6f7578f0", "score": "0.54388833", "text": "def score_chance(dice: list[int]) -> int:\n return sum(dice)", "title": "" }, { "docid": "8e6cd638fe0efd1aefae9d3ade0ff5dd", "score": "0.5427929", "text": "def single_value_benefit(self, value, margin):\n\n\t\tself.margin = margin # set it this way, and it will recalculate q1 -> q4 only if it needs to\n\n\t\tvalue = value if not self.rollover or value >= self._q1 else value + self.rollover\n\t\tif self._q4 < self._q1:\n\t\t\tq1 = self._q1_rollover\n\t\t\tq2 = self._q2_rollover\n\t\t\tq3 = self._q3_rollover\n\t\t\tq4 = self._q4_rollover\n\t\telse:\n\t\t\tq1 = self._q1\n\t\t\tq2 = self._q2\n\t\t\tq3 = self._q3\n\t\t\tq4 = self._q4\n\n\t\tif q2 <= value <= q3: # if it's well in the window, benefit is 1\n\t\t\t# this check should be before the next one to account for always valid windows (ie, q1 == q2 and q3 == q4)\n\t\t\treturn self.max_benefit\n\t\tif value <= q1 or value >= q4: # if it's way outside the window, benefit is 0\n\t\t\treturn 0\n\n\t\tif q1 < value < q2: # benefit for ramping up near low flow\n\t\t\tslope = self.max_benefit / (q2 - q1)\n\t\t\treturn slope * (value - q1)\n\t\telse: # only thing left is q3 < flow < q4 - benefit for ramping down at the high end of the box\n\t\t\tslope = self.max_benefit / (q4 - q3)\n\t\t\treturn self.max_benefit - slope * (value - q3)", "title": "" }, { "docid": "4c7f157deacd2f3750faa33ef360588d", "score": "0.5427252", "text": "def max_mean(self):\n return np.argmax(self.wins / (self.trials +1))", "title": "" }, { "docid": "6c67127ca515bb45c4c95402ad4c2b40", "score": "0.53973615", "text": "def rollWithPenalties(self, dicePool, label=None):\n dicePool = max(0, dicePool + self.internalPenalties())\n return skillCheckByNumber(dicePool, label) # TODO: + self.magicalEffects['externalPenalty']", "title": "" }, { "docid": "a59ba9817f25b02a6fc16fc8ef10cc08", "score": "0.5390319", "text": "def test_softmax_total(self):\n result = softmax(self.scores)\n test_max = 0\n for score in self.scores:\n test_max += result[score]\n assert(type(result) == dict)\n assert(self.is_close_to(test_max))", "title": "" }, { "docid": "545b44f4d6f5c4b1a16674511b7cc50d", "score": "0.5385219", "text": "def bacon_strategy(score, opponent_score, margin=8, num_rolls=4):\n # BEGIN PROBLEM 10\n return 0 if free_bacon(opponent_score) >= margin else num_rolls # Replace this statement\n # END PROBLEM 10", "title": "" }, { "docid": "e375abf0d341d50f9f96abb89a000461", "score": "0.5381374", "text": "def roll_dice(self):\r\n self.roll = random.randrange(0, DICE_MAX)", "title": "" }, { "docid": "382bcc0cdd4c679197347936901e4d26", "score": "0.5370866", "text": "def roll_die(number_of_rolls: int, number_of_sides: int) -> int:\n total = 0\n if number_of_rolls <= 0 or number_of_sides <= 0:\n return 0\n else:\n for x in range(0, number_of_rolls):\n roll = random.randint(1, number_of_sides)\n total = total + roll\n return total", "title": "" }, { "docid": "b2795a2a873bf22a30e512d00fd4d0bf", "score": "0.5370421", "text": "def score_4_of_kind(dice: list[int]) -> int:\n value, count = Counter(dice).most_common()[0]\n if count >= 4:\n return sum(dice)\n return 0", "title": "" }, { "docid": "f0afbd4251a4c8d93c2261c4275ba472", "score": "0.5364565", "text": "def score_small_straight(dice: list[int]) -> int:\n for low in range(1, 4):\n if len(set(dice).intersection(range(low, low + 4))) == 4:\n return 30\n return 0", "title": "" }, { "docid": "66ed9ecbffe906ba3355ed15549881ef", "score": "0.5356562", "text": "def score(hand):\n max_score = 0\n for die in set(hand):\n max_score = max(hand.count(die) * die, max_score)\n\n return max_score", "title": "" }, { "docid": "e6ba2959bbffce6d1b5a978d99fcc624", "score": "0.5347574", "text": "def roll_dice(self):\n def roll_score():\n # Roll 4 dice and add the 3 highest\n rolls = (randint(1, 6) for i in range(4))\n score = sum(sorted(rolls)[-3:])\n return score\n scores = (roll_score() for i in range(6))\n return tuple(sorted(scores, reverse=True))", "title": "" }, { "docid": "68ce56466bcf71e1d5eed9b08cc785b3", "score": "0.5342731", "text": "def getUpperBound(self):\n\n bound = 0\n\n skillMap = {}\n for skill in list(Skill.Skill):\n skillMap[skill] = 0;\n for agent in self.agents:\n for skill, value in agent.skillList:\n skillMap[skill] += value\n\n\n sortedAdv = sorted(self.adventures, key=lambda x: x.reward, reverse= True)\n for adv in sortedAdv:\n if(sum(list(skillMap.values())) == 0):\n break\n totalPow = 0\n for skill, value in adv.skillMap.items():\n possiblePow = min(value, skillMap.get(skill))\n totalPow += possiblePow\n skillMap[skill] -= possiblePow\n \n bound += adv.reward*(totalPow/adv.totalPower())\n\n return bound", "title": "" }, { "docid": "9f80e3c8fb2d9b5912f2544978cbe837", "score": "0.5337343", "text": "def picks_win_probability(self, picks):\n return functools.reduce(operator.mul,\n (self.nth_week(week_number).team_win_probability(team)\n for week_number, team in picks.items()))", "title": "" }, { "docid": "b68aaa492ffc794193d021dcd9916e38", "score": "0.5336339", "text": "def roll_dice(held_dice: list) -> list:\r\n total_number_of_dice = 5\r\n number_of_dice_to_roll = total_number_of_dice - len(held_dice)\r\n\r\n for _ in range(number_of_dice_to_roll):\r\n roll = random.randint(1, 6)\r\n held_dice.append(roll)\r\n held_dice = sorted(held_dice)\r\n\r\n is_roll_yahtzee = is_yahtzee(held_dice)\r\n if is_roll_yahtzee:\r\n rolled_yahtzee()\r\n\r\n return held_dice", "title": "" }, { "docid": "d11ee8272146bbb254f78edf0a0805a9", "score": "0.5336147", "text": "def roll():\n return randint(1, 6) + randint(1, 6)", "title": "" }, { "docid": "502a383b5f4e079fd00cf99f3bdd2f84", "score": "0.53301203", "text": "def value(self):\n if not self.terminated:\n return 0 # Only terminal states have a value.\n if self.player < 1 or self.player > 21:\n return -1 # The player went bust, and lost.\n if self.dealer < 1 or self.dealer > 21:\n return +1 # The dealer went bust, so the player wins.\n if self.player > self.dealer:\n return +1\n if self.player == self.dealer:\n return 0\n if self.player < self.dealer:\n return -1", "title": "" }, { "docid": "702d741e5ace3ed8a5c03a670cc75cb1", "score": "0.5329933", "text": "def roll(self):\n\n \n return self.value", "title": "" }, { "docid": "5bd0e2192dd9cae8f64689e8b8a19e07", "score": "0.53235704", "text": "def roll(self, verbose=0):\n # THIS ASSUMES NO WEAPON DOES d20 DAMAGE!!\n # Dragonstar and Siege engines don't obey this.\n if not self.num_faces:\n raise Exception('A non-existant dice has been attempted to be rolled')\n # elif self.num_faces[0] == 20:\n elif self.critable:\n # the problem is crits and adv and only d20 can.\n # Nothing deals d20 damage, but someone might try.\n return self.icosaroll(verbose)\n else:\n return self.multiroll(verbose)", "title": "" }, { "docid": "db087ec6b5d0f3ff79d37d9fef4162e0", "score": "0.5320004", "text": "def get_roll_detailed(self) -> Mapping[int, Sequence[int]]:\n return {\n sides : [ int(math.copysign(random.randint(1, sides), numdice)) for _ in range(abs(numdice)) ]\n for sides, numdice in self._dicepool.items() if sides != 1\n }", "title": "" }, { "docid": "d9f34f036d4981bb33f90cb48b34ad4c", "score": "0.5316005", "text": "def roll(self, message):\n try:\n dice_spec = self.parse_dice(message.args)\n if dice_spec.error:\n return dice_spec.error\n dice = dice_spec.dice\n sides = dice_spec.sides\n except Exception:\n log.exception(\"bad dice\")\n return \"Error parsing input\"\n if dice > 25:\n return \"Too many variables in possibility space, abort!\"\n if sides > 20000000:\n return \"Sides of dice too small, can't see what face is upright!\"\n if sides == 1:\n return \"Oh look, they all came up ones. Are you surprised? I'm surprised.\"\n if sides < 1:\n return \"How do you make a dice with less than two sides?\"\n if dice < 1:\n return \"You want me to roll...less than one dice?\"\n\n if (\n dice_spec.threshold\n and dice_spec.lt_threshold\n and dice_spec.threshold > dice_spec.lt_threshold\n ):\n return \"Requirements unsatisfactory: thresholds conflict. Try again.\"\n\n rolls = self.get_rolls(dice, sides)\n rolls = list(map(lambda x: x + dice_spec.bonus, rolls))\n\n if dice_spec.explode:\n rolls = self.explode_dice(rolls, sides)\n\n log.debug(\"roll result: {}\".format(rolls))\n roll_list = \", \".join(map(str, rolls))\n if dice_spec.threshold or dice_spec.lt_threshold:\n gt = dice_spec.threshold\n lt = dice_spec.lt_threshold\n\n if not lt:\n success_count = len(list(filter(lambda x: x >= gt, rolls)))\n elif not gt:\n success_count = len(list(filter(lambda x: x <= lt, rolls)))\n else:\n success_count = len(\n list(filter(lambda x: (x <= lt and x >= gt), rolls))\n )\n\n roll_list += \" ({} successes)\".format(success_count)\n if dice_spec.show_sum:\n dice_sum = sum(rolls)\n roll_list += \" (sum {})\".format(dice_sum)\n return roll_list", "title": "" }, { "docid": "405270e1ba103e5d42a22d75115878d4", "score": "0.5312854", "text": "def roll_die():\r\n num = randint(0,0)\r\n return num", "title": "" }, { "docid": "4b81f66dd390b06a132f2be36713c5a1", "score": "0.53098506", "text": "def calc_loss_gain(self):\r\n\t\tstock_value_purchased = self.share_price_purchased * self.qty_of_shares\r\n\t\tstock_value_current = self.share_price_current * self.qty_of_shares\r\n\t\treturn round(stock_value_current - stock_value_purchased,2)", "title": "" }, { "docid": "a850b7c356d768df8f09849f61da5359", "score": "0.53083086", "text": "def calculate(self):\r\n sorted_outages = sorted(self.outages,\r\n key=lambda o: (o.duration,\r\n o.count,\r\n o.percentage_of_capacity_lost,\r\n sum(sum(map(ord, name))\r\n for name\r\n in o.allowed_months),\r\n o.allow_outage_overlap))\r\n for outage in sorted_outages[::-1]:\r\n self.seed += 1\r\n SingleOutageScheduler(outage, self).calculate()\r\n return self.total_losses", "title": "" }, { "docid": "1ae8f47be05f9cd7c12448e5ac5becff", "score": "0.52988297", "text": "def roll(self, *stats):\n stats = list(stats) # so that .remove() will work correctly (tuple is immutable)\n autoSuccesses = self.masteries.get(stats[1], 0)\n bonusDice = 0\n for trait in stats:\n if isinstance(trait, int):\n bonusDice += trait\n stats.remove(trait)\n continue\n if len(stats) > 1:\n if trait.lower() == 'willpower':\n self.temporaryWillpower -= 1\n autoSuccesses += 1\n stats.remove(trait)#remove from list\n if trait in self.virtues:\n self.temporaryWillpower -= 1 # even if this is a virtue channel it still takes 1wp\n self.channelVirtue(trait) # mark off virtue channel\n label = self.name + ': ' + reduce(lambda x,y: x+str(y) + \" \", stats, '')\n rolledSuccesses = skillCheckByNumber(self.sumDicePool(*stats) + bonusDice, label)\n return rolledSuccesses + autoSuccesses", "title": "" }, { "docid": "d107bd76c897d5c8f8c4bd575bc2b234", "score": "0.52855456", "text": "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN Question 1\n sum_dice = 0\n current_state = 0\n num_iterations = 0\n pig_out = 0\n while num_iterations < num_rolls:\n current_state = dice()\n if current_state != 1:\n sum_dice = sum_dice + current_state\n num_iterations += 1\n else:\n pig_out += 1\n num_iterations += 1\n if pig_out >= 1:\n sum_dice = 0\n return 0\n else:\n return sum_dice\n\n\n\n # END Question 1", "title": "" }, { "docid": "de6bcf80309dd4bfa736d1ca41d6cc49", "score": "0.5285081", "text": "def numRollsToTarget(self, d: int, f: int, target: int) -> int:\n dp = [[0 for col in range(target + 1)] for row in range(d + 1)]\n mod_val = 10**9 + 7\n dp[0][0] = 1 # Only 1 way to roll 0 dices to get to target of 0\n # One dice can reach a target as long as its <= faces.\n for col in range(1, min(f + 1, target + 1)):\n dp[1][col] = 1\n\n # General case fill up\n for row in range(2, d + 1):\n for col in range(2, target + 1):\n # Recurrence relation:\n # dp[row][col] = dp[row - 1][col - 1] .. dp[row - 1][col - f]\n dependency_sum = 0\n starting = max(col - f, 0)\n for cur_face in range(starting, col):\n dependency_sum += dp[row - 1][cur_face]\n dp[row][col] = dependency_sum % mod_val\n return dp[-1][-1]", "title": "" }, { "docid": "7f372a956e9bf6aa31a722a42fd0e2e7", "score": "0.5277992", "text": "def roll(self):\n self.value = random.randint(1,6)", "title": "" }, { "docid": "f543fea481d0533cd8d67ed1ac2086c2", "score": "0.5274608", "text": "def gen_all_holds(hand):\r\n return gen_all_holds_bigger_than(hand, 0)", "title": "" }, { "docid": "00a8937aea2e2e6a6d9e8cfe7849162a", "score": "0.5260388", "text": "def Q(self, state, action, U):\n\t\tif 'hold' == action:\n\t\t\treturn 1 - U(self.hold(state))\n\n\t\tif 'roll' == action:\n\t\t\treturn (\n\t\t\t\t\t1 - U( self.roll(state, 1)) + sum( U(self.roll(state, die)) for die in (2, 3, 4, 5, 6) )\n\t\t\t\t) / 6\n\n\t\traise ValueError('Illegal action {}'.format(action))", "title": "" }, { "docid": "58fdd197dc9615cad163f4edaffc8b08", "score": "0.525851", "text": "def war(self):\n score = [0, 0]\n\n for i in range(500):\n winner = self.combat()\n\n if winner is not None:\n score[winner] += 1\n\n if score[0] == score[1]:\n return None\n\n if score[0] > score[1]:\n return 0\n else:\n return 1", "title": "" }, { "docid": "62feed4d892f7138fad80e59ee29575a", "score": "0.52545035", "text": "def calcProceeds(finalSharePrice, allotment):\n return finalSharePrice * allotment", "title": "" }, { "docid": "a9b944e17667b65b33e1d0d65748b89d", "score": "0.52528226", "text": "def dice_count(self) -> int:\n pass", "title": "" }, { "docid": "ccba70d5812f7644432d35df62f200ec", "score": "0.5251676", "text": "def spareScore(self,rollIndex):\r\n return 10+ self.rolls[rollIndex+2]", "title": "" }, { "docid": "8d01d2e5d66127ad1a1d87683be30352", "score": "0.52472055", "text": "def calculate_desirability(self):\n print \"CrossboardEvaluator::calculate_desirability\"\n return 0.9", "title": "" }, { "docid": "ab48e06ef33c237236dd27de156fa40b", "score": "0.5246531", "text": "def roll_dice_computer():\n result = []\n x = random.randint(1, 4)\n for _ in range(x):\n dice_value = random.randint(1, 6)\n result.append(dice_value)\n print(result)\n return sum(result)", "title": "" }, { "docid": "77149e4bd83a7d8171484863a94af60c", "score": "0.52445495", "text": "def work(self):\n return random() <= self.reliability", "title": "" }, { "docid": "79fc6d662e891f90650b1e2076efc747", "score": "0.5242887", "text": "def badness(self):\n import math\n return max(abs(math.log10(self.k_ratio)), abs(math.log10(self.Keq_ratio)))", "title": "" }, { "docid": "dcfafdfd7c141e6bd792ad2546779250", "score": "0.5236508", "text": "def roll_die(self):\n\t\tself.value = random.choice(self.list_of_values)", "title": "" }, { "docid": "886662b7bfadbb4a877ce050fc765b66", "score": "0.5232028", "text": "def calculate_upper_bonus(scorecard: dict, current_player: str) -> int:\r\n if scorecard[current_player][\"Upper Subtotal\"] > UPPER_BONUS_THRESHOLD():\r\n return UPPER_BONUS_SCORE()\r\n else:\r\n return DEFAULT_SCORE()", "title": "" }, { "docid": "298819776c1e3058d4566e681bd5ac36", "score": "0.5231932", "text": "def eval_max_spend(simulation):\n return simulation.latest_trial.trial_df['spend'].max()", "title": "" }, { "docid": "4ec05b78d325851cba6268ec2dba0b0c", "score": "0.52295107", "text": "def round_roll(self):\n print(\"Everybody roll their dices...\")\n time.sleep(self.sleep_time)\n self.current_state_play = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n for player in self.players.keys():\n self.players[player].player_roll_result()\n for val in self.players[player].current_roll:\n self.current_state_play[val] += 1", "title": "" }, { "docid": "af6189190b2aa01fc65252b037731233", "score": "0.5228365", "text": "def combat(self):\n score = [0, 0]\n\n for i in range(randint(5, 10)):\n winner = self.duel()\n\n if winner is not None:\n score[winner] += 1\n\n if score[0] == score[1]:\n return None\n\n if score[0] > score[1]:\n return 0\n else:\n return 1", "title": "" } ]
dc1d4fe4f1d93ed1ddab5f76cd7ad3aa
Initialize SELF to the position depicted in the string STARTING_BOARD. STARTING_BOARD has the form "xxxxx\nxxxxx\nxxxxx..." where each x is either '.' or '', and the newlines separate rows. The number of x's in each row must be equal. Leading and trailing whitespace is ignored.
[ { "docid": "5174b15c020eecd0fb50dfaec66f2d75", "score": "0.72269595", "text": "def __init__(self, starting_board):\n\n # Represent a board as a list of lists of single characters, one list\n # per row, and within each row, one character per column. B0[r][c]\n # represents the current contents of row #R, column #C of the board,\n # numbering from 1. Row 1, column 1 is the upper left.\n # The first row (#0) and last (#self.H-1) row and first (#0) and last\n # and last (#self.W-1) column are added at initialization, and should\n # be kept permanently at '.' (this reduces the number of special\n # cases in the neighbor computation).\n # self.H: number of rows (including top and bottom \"desert\" rows)\n # self.W: number of columns (including left and right desert columns)\n # self.B0: current board.\n # self.B1: extra board.\n starting_board = starting_board.strip()\n if not re.match(r'[.*\\n]*$', starting_board):\n raise ValueError(\"board contains invalid characters\")\n\n A = starting_board.split('\\n')\n\n if len(A) == 0 or len(A[0]) == 0:\n raise ValueError(\"board too small\")\n self.H = len(A)+2\n self.W = len(A[0])+2\n\n if any(map(lambda row: len(row) != self.W-2, A)):\n raise ValueError(\"board is irregular\")\n\n self.B0 = [ ['.'] * self.W ] + \\\n [ ['.'] + list(s) + ['.'] for s in A ] + \\\n [ ['.'] * self.W ]\n self.B1 = [ ['.'] * self.W for i in range(self.H) ]", "title": "" } ]
[ { "docid": "4410bf08e2a6e5a89d54d361679396f8", "score": "0.6600261", "text": "def __init__(self):\r\n self.board = [\" \", \" \", \" \", \r\n \" \", \" \", \" \", \r\n \" \", \" \", \" \"]", "title": "" }, { "docid": "ddec5ebac7407ba018aef650f5bb1f67", "score": "0.65312964", "text": "def __initialize_board(self):\n # TODO: google factory pattern\n board = [None] * self.game_size\n for row in range(self.game_size):\n board[row] = [\" \" for _ in range(self.game_size)]\n print(board)\n return board", "title": "" }, { "docid": "ff4f792af562487f706196fa0c07d9bd", "score": "0.63364005", "text": "def init_board():\n board = []\n for i in range(0,3):\n board.append([\".\",\".\",\".\"])\n \n return board", "title": "" }, { "docid": "bb7a83983247767349d150e3478af3c3", "score": "0.63261354", "text": "def __init__(self):\n self.board = []\n for i in range(BOARD_COLS):\n self.board.append([EM] * BOARD_ROWS)\n\n for x in range(BOARD_COLS):\n for y in range(BOARD_ROWS):\n self.board[x][y] = EM\n \n # Starting pieces:\n self.board[3][3] = X_PLAYER\n self.board[3][4] = O_PLAYER\n self.board[4][3] = O_PLAYER\n self.board[4][4] = X_PLAYER\n \n self.curr_player = X_PLAYER", "title": "" }, { "docid": "ab1a8c15e8dfd350dfe52ce73ecee660", "score": "0.6308195", "text": "def __init__(self):\r\n\r\n self._board = [[' ']*3 for j in range(3)]\r\n self._player = 'X'\r\n self._is_complete = False", "title": "" }, { "docid": "b3669da977bcfc7fcdec716995a3e548", "score": "0.63075465", "text": "def __initializeBoard(self) -> None:\n empty_board: List[List[List[Entity]]] = [\n [[] for _ in range(WindowConfig.TRUE_BOARD_WIDTH)]\n for _ in range(WindowConfig.TRUE_BOARD_HEIGHT)\n ]\n\n assert WindowConfig.BORDER_WIDTH == 1 # Pls no multi-width borders...\n\n rows_to_draw: List[int] = WindowConfig.getRowsToDrawHorizontals()\n\n # Non-corner Vertical borders\n right_border_x = WindowConfig.TRUE_BOARD_WIDTH - 1\n for y in range(1, WindowConfig.TRUE_BOARD_HEIGHT - 1):\n if (\n y in rows_to_draw[1:-1]\n ): # Bounds check? 0th and last rows use corner chars - not intersections\n # Draw an intersection\n empty_board[y][0].append(Borders.INTERSECT_LEFT)\n empty_board[y][right_border_x].append(Borders.INTERSECT_RIGHT)\n else:\n # Draw a vertical line\n empty_board[y][0].append(Borders.VERTICAL)\n empty_board[y][right_border_x].append(Borders.VERTICAL)\n\n # Non-corner Horizontal borders\n for x in range(1, WindowConfig.TRUE_BOARD_WIDTH - 1):\n for y in rows_to_draw:\n empty_board[y][x].append(Borders.HORIZONTAL)\n\n # Corners\n max_x = WindowConfig.TRUE_BOARD_WIDTH - 1\n max_y = WindowConfig.TRUE_BOARD_HEIGHT - 1\n empty_board[0][0].append(Borders.TOP_LEFT)\n empty_board[0][max_x].append(Borders.TOP_RIGHT)\n empty_board[max_y][0].append(Borders.BOT_LEFT)\n empty_board[max_y][max_x].append(Borders.BOT_RIGHT)\n\n self.empty_board = empty_board\n self.curr_board = copy.deepcopy(self.empty_board)\n self.next_board = copy.deepcopy(self.empty_board)", "title": "" }, { "docid": "b94354ddca416b06a736d3ed6f4702d2", "score": "0.62742704", "text": "def board_init(size):\n board = []\n for row in range(size):\n board.append((f\"{MARK_EMPTY} \"*size).split(' ')[:-1])\n return board", "title": "" }, { "docid": "340d88e74cde8b21b4805f603c9b3449", "score": "0.624443", "text": "def InitializeBoard(self):\r\n self.board = [] # 0 = vacio, 1 = jugador 1 (X), 2 = jugador 2 (O)\r\n for y in range(self.width):\r\n self.board.append([0] * self.height) # Genera una fila de 0s. Generando un tablero vacio\r", "title": "" }, { "docid": "3f2e110ccdc5141f6af8d013c8ac262f", "score": "0.62386054", "text": "def initiate_board(self) -> None:\n self.board = [[round(1 / (self.row * self.row), 3)] * self.column for _ in range(self.row)]\n self.ghost_position = Coordinate(random.randint(0, self.row - 1), random.randint(0, self.row - 1))", "title": "" }, { "docid": "b999830f9f77413c2c555ac26ac5b472", "score": "0.6236167", "text": "def show_board_pos(self):\n new_s = list(\"123456789\")\n for i in range(0, 8):\n if (self.board_content[i] != \" \"):\n new_s[i] = self.board_content[i]\n return \"\".join(new_s)", "title": "" }, { "docid": "22c851bc6d8fc93014641c41b52c168b", "score": "0.62000906", "text": "def init_board(self, reset=False):\n if reset:\n self.framecounter = 0\n\n # assigning the ground\n self.bufferboard[-3:, :] = \"g\" # config._ground\n if config.level == 2:\n self.bufferboard[-3:, 76:83] = \"\"\n self.bufferboard[-3:, 137:142] = \"\"\n\n # assigning mario\n # put it in config and import later\n\n #self.bufferboard[-6:-3,:3]= mario", "title": "" }, { "docid": "3362414f9a8a6de45ffcc10e474f2638", "score": "0.61996394", "text": "def createStartPosition(self, new_board):\n\n centre = int(self.board_size / 2)\n\n new_board[centre-1][centre-1] = 'b'\n new_board[centre][centre] = 'b'\n new_board[centre-1][centre] = 'w'\n new_board[centre][centre-1] = 'w'\n return new_board", "title": "" }, { "docid": "f57b475d8f7e26fa19b97f3823a0e58f", "score": "0.6155216", "text": "def create_initial_boardstate(self):\n # b = array.array('b',chr(0) * 100)\n # above makes an array, but this array is uncopyable, making it almost useless\n b = [Outer] * 100\n for sq in All_Squares:\n b[sq] = Empty\n## b[44] = White; b[45] = Black;\n## b[54] = Black; b[55] = White;\n b[44] = White; b[55] = White; b[66] = White;\n b[45] = Black; b[54] = Black; b[56] = Black;\n self._board = b\n\n self.to_move = Black # Black has the first move\n self._moves = self.calculate_legal_moves()\n self._utility = self.count_difference()", "title": "" }, { "docid": "f99d8671550f5787aa318cb5a3047303", "score": "0.6074681", "text": "def get_starting_board(self):\n return STARTING_BOARD\n pass", "title": "" }, { "docid": "8641a16b53c211323a34bbe446734889", "score": "0.6072525", "text": "def init_board(self):\n\n self.board = []\n for i in range(7):\n column = [0]*6\n self.board.append(column)\n self.column_stack = [5,5,5,5,5,5,5]", "title": "" }, { "docid": "384ff2a0d4dda905206526828dd61a45", "score": "0.60723317", "text": "def initalize_game():\n\n for b in range(9):\n board.insert(b,'#') \n \n assign_players()", "title": "" }, { "docid": "d36824e99713bb36e6442d6294876446", "score": "0.60682666", "text": "def __init__(self):\n self.board = [[''] * 3 for n in range(3)]", "title": "" }, { "docid": "2adb4cf4ba1b2d143b0427a3441eee30", "score": "0.6061845", "text": "def __init__(self):\r\n self._board = [[\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"B\", \"\", \"B\", \"\", \"B\", \"B\", \"B\", \"B\", \"B\", \"B\", \"B\", \"B\", \"\", \"B\", \"\", \"B\", \"\", \"\"],\r\n [\"\", \"B\", \"B\", \"B\", \"\", \"B\", \"\", \"B\", \"B\", \"B\", \"B\", \"\", \"B\", \"\", \"B\", \"\", \"B\", \"B\", \"B\", \"\"],\r\n [\"\", \"\", \"B\", \"\", \"B\", \"\", \"B\", \"B\", \"B\", \"B\", \"B\", \"B\", \"B\", \"B\", \"\", \"B\", \"\", \"B\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"B\", \"\", \"\", \"B\", \"\", \"\", \"B\", \"\", \"\", \"B\", \"\", \"\", \"B\", \"\", \"\", \"B\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"W\", \"\", \"\", \"W\", \"\", \"\", \"W\", \"\", \"\", \"W\", \"\", \"\", \"W\", \"\", \"\", \"W\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\r\n [\"\", \"\", \"W\", \"\", \"W\", \"\", \"W\", \"W\", \"W\", \"W\", \"W\", \"W\", \"W\", \"W\", \"\", \"W\", \"\", \"W\", \"\", \"\"],\r\n [\"\", \"W\", \"W\", \"W\", \"\", \"W\", \"\", \"W\", \"W\", \"W\", \"W\", \"\", \"W\", \"\", \"W\", \"\", \"W\", \"W\", \"W\", \"\"],\r\n [\"\", \"\", \"W\", \"\", \"W\", \"\", \"W\", \"W\", \"W\", \"W\", \"W\", \"W\", \"W\", \"W\", \"\", \"W\", \"\", \"W\", \"\", \"\"],\r\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"]]\r\n self._game_state = \"UNFINISHED\"\r\n self._player_turn = \"B\"\r\n self._other_player = \"W\"\r\n self._direction = 0\r\n self._movesteps = 0\r\n self._ogTile = []\r\n self._nTile = []", "title": "" }, { "docid": "f86ccf5238971ce6077a47b2968c71e9", "score": "0.6042373", "text": "def __init__(self):\n self.board_game = {1: ' ', 2: ' ', 3: ' ', 4: ' ', 5: ' ', 6: ' ', 7: ' ',\n 8: ' ', 9: ' '}\n\n self.game_over = False", "title": "" }, { "docid": "1bc7814dfb77b1b25377dbbde147eac1", "score": "0.601296", "text": "def set_init_pos(self):\r\n is_looping = True\r\n for i in range(0, self.nbr):\r\n for j in range(0, self.nbc):\r\n if self.grid[i][j] == '$':\r\n self.pos = (i, j)\r\n is_looping = False\r\n break\r\n if not is_looping:\r\n break", "title": "" }, { "docid": "723cbb35b0363bd0965f3a295a3331f1", "score": "0.59923977", "text": "def test_game_initialisation():\n DRAUGHTS = dedent(\n \"\"\"\n .O.O.O.O\n O.O.O.O.\n .O.O.O.O\n O.O.O.O.\n ........\n ........\n .X.X.X.X\n X.X.X.X.\n .X.X.X.X\n X.X.X.X.\n \"\"\"\n ).strip()\n\n for game_layout in (DRAUGHTS, ASCII_START_BOARD):\n game = Game(initial=game_layout)\n assert game.display_board() == game_layout", "title": "" }, { "docid": "042009d54f3ef8741f5e08773337aed1", "score": "0.5977223", "text": "def test_board_displays_empty_output(self):\n output = str(self.board)\n # 10 empty rows\n EMPTY_BOARD_STR = \" \\n\" * 9 + \" \" * 10\n self.assertEquals(output, EMPTY_BOARD_STR)", "title": "" }, { "docid": "16d595870b00e75592d784eaebef457b", "score": "0.59158427", "text": "def create_new_board():\n return 9 * [' ']", "title": "" }, { "docid": "12718902ad422bc04034a8cf49e1460a", "score": "0.5910333", "text": "def generate_board(self):\n board = [['.' for _ in range(self.board_size)] for _ in range(self.board_size)]\n board[self.food_pos[1]][self.food_pos[0]] = '0'\n\n for x, y in self.snake_pos:\n board[y][x] = 'X'\n\n return board", "title": "" }, { "docid": "3a961871a21c98e052c21b8f348d9fa3", "score": "0.59101385", "text": "def __init__(self, line_start: int, settings: SettingGrid) -> None:\n self.start: Tuple[int, int, int] # line_start, left_start, right_start\n self.end: Tuple[int, int, int] # line_end, left_end, right_end\n\n left_start = settings.HEX_LINE_SIZE + 1\n right_start = (\n settings.HEX_LINE_SIZE\n + 1\n + settings.ACTIVE_LINE_SIZE\n + 1\n + settings.HEX_LINE_SIZE\n + 1\n )\n # Last allowed sport line (-1 to account for 0 start of index)\n line_end = settings.NUM_OF_ROWS + line_start - 1\n # Last allowed place on left\n left_end = settings.HEX_LINE_SIZE + settings.ACTIVE_LINE_SIZE\n # Last allowed place on Right\n right_end = (\n settings.HEX_LINE_SIZE\n + 1\n + settings.ACTIVE_LINE_SIZE\n + 1\n + settings.HEX_LINE_SIZE\n + settings.ACTIVE_LINE_SIZE\n )\n self.start = line_start, left_start, right_start\n self.end = line_end, left_end, right_end\n # Player starts on the left\n self._line: int = self.start[0]\n self._place: int = self.start[1]", "title": "" }, { "docid": "fc8d8dcb02232cc0042f20bb38468fa4", "score": "0.58808774", "text": "def __init__(\n self, no_columns: int, no_rows: int, number_to_win: int\n ) -> None:\n self.no_columns = no_columns\n self.no_rows = no_rows\n self.game_board = [[\" \"] * no_columns for _ in range(no_rows)]\n self.number_to_win = number_to_win", "title": "" }, { "docid": "166cbc7807351158822fc49e05677974", "score": "0.5861234", "text": "def init_board(self, board_config):\r\n\r\n board = defaultdict(str)\r\n\r\n for row in range(self.size):\r\n for column in range(self.size):\r\n char = board_config[row][column] \r\n board[(column, row)] = char\r\n\r\n return board", "title": "" }, { "docid": "f1f42f431195cb27e2908a66dc748340", "score": "0.58502877", "text": "def init_board(self):\n # the cell ids are represented by tuples containing the coords in the\n # form of (x, y)\n self.cellids = [(i, n) for i in range(self.maxrows+1)\n for n in range(self.maxcols+1)]\n\n # Create snek\n self.midcol, self.midrow = self.maxcols // 2, self.maxrows // 2\n self.snek = Snek(self.cv, (self.midrow, self.midcol), self.cellsize)\n self.cv.pack()\n self.mainloop()", "title": "" }, { "docid": "3c1ce00b0b464ff5134213dd8e93a986", "score": "0.58401257", "text": "def __init__(self):\n self._board = [[None, None, None], [\n None, None, None], [None, None, None]]\n self.lastPosition = [None, None]\n self.lastMark = None\n self.rec_cur_player = [self.USER_MARK, self.COMPUTER_MARK]", "title": "" }, { "docid": "38131438a8b87cb6bda0656ed0cc8cca", "score": "0.5813561", "text": "def init_board():\n # initializes the game board\n board = np.zeros(51)#29\n board[1] = -15 #yellow starts top\n board[24] = 15 #black starts bottom\n return board", "title": "" }, { "docid": "dc071bb26a0f2a8119e752afff3de90a", "score": "0.5812955", "text": "def reset(self):\r\n\r\n self.board = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 0]])\r\n self.blank_position = [3,3]", "title": "" }, { "docid": "1191bd8b2960b1ab478e799f33ca3a01", "score": "0.58114195", "text": "def __init__(self):\n self.board = [['_' for _ in range(3)] for _ in range(3)]", "title": "" }, { "docid": "2e78de5fba8b8c4699836cccf63c741f", "score": "0.58040816", "text": "def setupBoard(gameDisplay, board, error):\n num_font = pygame.font.SysFont(\"Helvetica\", 50)\n for i in range(ROWS):\n for j in range(COLS):\n if board[j][i] != '.':\n num = board[j][i]\n num_text = num_font.render(str(num), True, BLACK)\n rect = num_text.get_rect(center=(40, 40)).move(i*80, j*80)\n gameDisplay.blit(num_text, rect)\n error_text = num_font.render(\"Errors left: \" + str(error), True, BLACK)\n error_rect = error_text.get_rect(center=(40, 40)).move(800, 300)\n gameDisplay.blit(error_text, error_rect)\n\n pygame.draw.line(gameDisplay, GRAY, (0, 80), (720, 80), 2)\n pygame.draw.line(gameDisplay, GRAY, (0, 160), (720, 160), 2)\n pygame.draw.line(gameDisplay, GRAY, (0, 320), (720, 320), 2)\n pygame.draw.line(gameDisplay, GRAY, (0, 400), (720, 400), 2)\n pygame.draw.line(gameDisplay, GRAY, (0, 560), (720, 560), 2)\n pygame.draw.line(gameDisplay, GRAY, (0, 640), (720, 640), 2)\n\n pygame.draw.line(gameDisplay, GRAY, (80, 0), (80, 720), 2)\n pygame.draw.line(gameDisplay, GRAY, (160, 0), (160, 720), 2)\n pygame.draw.line(gameDisplay, GRAY, (320, 0), (320, 720), 2)\n pygame.draw.line(gameDisplay, GRAY, (400, 0), (400, 720), 2)\n pygame.draw.line(gameDisplay, GRAY, (560, 0), (560, 720), 2)\n pygame.draw.line(gameDisplay, GRAY, (640, 0), (640, 720), 2)\n\n pygame.draw.line(gameDisplay, BLACK, (240, 0), (240, 720), 2)\n pygame.draw.line(gameDisplay, BLACK, (480, 0), (480, 720), 2)\n pygame.draw.line(gameDisplay, BLACK, (720, 0), (720, 720), 2)\n pygame.draw.line(gameDisplay, BLACK, (0, 240), (720, 240), 2)\n pygame.draw.line(gameDisplay, BLACK, (0, 480), (720, 480), 2)", "title": "" }, { "docid": "f89bf57af3483bfee72d54b95c09066d", "score": "0.57981145", "text": "def makeBoard(self):\n self.cells = []\n for row in range(self.numRows):\n self.cells.append([])\n for column in range(self.numColumns):\n self.cells[row].append(\"\")", "title": "" }, { "docid": "8a89775b4f9215785ec194b1aaaa8f50", "score": "0.5776249", "text": "def Board(text):\n rows = text.split()\n N = len(rows)\n rows = [BORDER*N] + rows + [BORDER*N]\n return ''.join(BORDER + row + BORDER for row in rows)", "title": "" }, { "docid": "3127c26032287ec22535f5b3e1f4dc94", "score": "0.5759603", "text": "def __init__(self):\n self._board = [0]", "title": "" }, { "docid": "2973d085a9c3144a398bbb9ca5e7a0c2", "score": "0.57540536", "text": "def __init__(self):\n self.spaces = [Property(\"Go!\", \"None\", -200)] #Assign the Go space to the first position\n self.count = 1 #Simple counter to keep track of the spaces on the board, starting with Go!", "title": "" }, { "docid": "6e4c714f2602e41c863bd164877c2e8d", "score": "0.5741818", "text": "def __init__(self):\r\n self._board_ = [0]", "title": "" }, { "docid": "f08cc411c3ea90b02aad021f61a67129", "score": "0.57195055", "text": "def _initialize_empty_points(self, board):\n for row in range(1, self.size + 1):\n start = self.row_start(row)\n board[start : start + self.size] = EMPTY", "title": "" }, { "docid": "1a02777ba552ff37cb5fd423c3cb129a", "score": "0.5719192", "text": "def _create_board(self):\n self.board = [[Cell(x=i, y=j, state=0) for i in range(self.width)]\\\n for j in range(self.height) ]", "title": "" }, { "docid": "9de837cb4d52959212b86527acbb7bb3", "score": "0.5704955", "text": "def __init__(self, board_size=15, win_count=5, current_player='black'): \n if str(current_player).lower().strip() != 'black' and \\\n str(current_player).lower().strip() != 'white':\n raise MyError('Wrong color.')\n else:\n self.__current_player = str(current_player).lower().strip()\n self.__win_count = int(win_count)\n self.__board_size = int(board_size)\n self.__go_board = [[' - ' for j in range(self.__board_size)] \\\n for i in range(self.__board_size)]", "title": "" }, { "docid": "fbb1b8d31eed91ed873c30b681f97d22", "score": "0.56952775", "text": "def __init__(self):\n empty_row = ['_', '_', '_']\n self.grid = [list(empty_row), list(empty_row), list(empty_row)]\n self.turn = 'X' # first player is 'X'", "title": "" }, { "docid": "51ccc2d44e1222d6f7c76ad405490115", "score": "0.5694497", "text": "def make_board(self):\r\n self.setpos(-380, -288)\r\n self.horizontal_line()\r\n self.setpos(-380, 295)\r\n self.horizontal_line()\r\n self.setheading(90)\r\n self.setpos(-395, -280)\r\n self.vertical_line()\r\n self.setpos(0, -280)\r\n self.vertical_line()\r\n self.setpos(388, -280)\r\n self.vertical_line()", "title": "" }, { "docid": "dbc579b150dc4f94d4596fcf7f35d727", "score": "0.56740934", "text": "def init_maze(self):\n index_x = index_y = 0\n with LEVEL1.open(\"r\") as file:\n content = file.readlines()\n\n for index_y, line in enumerate(content):\n for index_x, char in enumerate(line):\n position = (index_x, index_y)\n if char == \"H\":\n self.hero.position = position\n if char == \"G\":\n self.guardian = position\n if char in [\"0\", \"H\", \"G\"]:\n self.paths.append(position)\n elif char == \"X\":\n self.walls.append(position)\n\n self.width = index_x\n self.height = index_y", "title": "" }, { "docid": "654582e9b588d151a7281cd3624e119f", "score": "0.56707174", "text": "def reset_board(self):\r\n self.board = self.create_empty_board()", "title": "" }, { "docid": "6286db501650b199d0d8408b7792ab8a", "score": "0.5668249", "text": "def __init__(self,yLen, xLen):\n last = yLen*xLen\n lst = range(1, last + 1)\n self.board = subGroup(lst, xLen)\n self.YLEN = yLen\n self.XLEN = xLen\n self.yAxis = yLen - 1\n self.xAxis = xLen - 1\n #the blank is in the last position\n self.blank = Piece(\n self.yAxis,\n self.xAxis )\n self.board\\\n [self.yAxis]\\\n [self.xAxis] = 0\n\n self.moveMap = self.mapFunc()\n self.diff = self.diffMap()\n\n ##\n self.fastMap = self.fastKeyFun()", "title": "" }, { "docid": "744228302efc1ad714b3b5317831b5c6", "score": "0.56572986", "text": "def __init__(self):\n self._direction = 0\n # initial coordinates of each chunk of the snake\n self._coordinates = [[10 * self.GRID_SIZE, 10 * self.GRID_SIZE], [\n 11 * self.GRID_SIZE, 10 * self.GRID_SIZE], [12 * self.GRID_SIZE, 10 * self.GRID_SIZE]]", "title": "" }, { "docid": "5e56b604911f6f7301dba3b2714641bf", "score": "0.563858", "text": "def init_board():\r\n\r\n board_dict = {}\r\n\r\n # initial specified positions\r\n for i in [0,1,3,4,6,7]:\r\n\r\n # white token positions\r\n board_dict[(i,0)] = \"W 1\"\r\n board_dict[(i,1)] = \"W 1\"\r\n\r\n # back token positions\r\n board_dict[(i,6)] = \"B 1\"\r\n board_dict[(i,7)] = \"B 1\"\r\n\r\n return board_dict", "title": "" }, { "docid": "64ae7be5a2a3d3386a7ad29d4806595d", "score": "0.561808", "text": "def make_empty_board(board_size):\n \n return \"-\" * board_size * board_size", "title": "" }, { "docid": "034ddd46692901551d90e2086ae83db1", "score": "0.56174916", "text": "def __init__(self, x=7, y=6):\n self.columns = x\n self.rows = y\n self.board = [[False for row in range(y)] for col in range(x)]", "title": "" }, { "docid": "74e463f3932c2ef387f5baa01eeb9f67", "score": "0.56035334", "text": "def __init__(self):\n\n\t\tself.board = [\n\t\t\t\t\t\t[None, None, None],\n\t\t\t\t\t\t[None, None, None],\n\t\t\t\t\t\t[None, None, None]\n\t\t]", "title": "" }, { "docid": "31d9698e7c1e7b29a9b67f1f07946483", "score": "0.55855364", "text": "def createinitialboard(filename):\n\tfin = open(filename, 'r')\n\tfields = []\n\tfor line in fin:\n\t\trow = line.strip('\\n').split(',')\n\t\tfields = fields + row\n\t# replace the zeros with a field with all 9 options\n\tfor i, f in enumerate(fields):\n\t\tif f == '0':\n\t\t\tfields[i] = range(1,10) \n\t\telse:\n\t\t\tfields[i] = [int(f)]\t\n\tb = Board(0, fields)\n\tfin.close()\n\treturn b.sanitycheck()", "title": "" }, { "docid": "c3f4c08703d64c035bbf12e116b3fa65", "score": "0.55828065", "text": "def __str__(self):\n board=''\n for row in range(self.__height):\n for col in range(self.__width):\n cell = self.cell_content((row,col))\n if cell is None:\n board+=Board.LINE\n else:\n board+=cell\n if row==3 and col==6:\n board+='E'\n board+='\\n'\n return board", "title": "" }, { "docid": "76416994fc721029673525954062ebd3", "score": "0.55817115", "text": "def create_initial_level (level_string):\n start_board = board()\n for i in range(0, len(level_string), 4):\n object_string = level_string[i:i+4]\n start_board.add_block(string_to_new_object(object_string))\n return start_board", "title": "" }, { "docid": "25e75f62214edb751a4f22794a3045cd", "score": "0.55746555", "text": "def set_board(board):\n for x in range(ROWS):\n board.append([\"O\"] * COLUMNS)", "title": "" }, { "docid": "af66aca382f13da8f5842876ce3ea0a8", "score": "0.5563769", "text": "def generateBoard(self):\n new_board = []\n\n for _ in range(self.board_size):\n column = []\n for _ in range(self.board_size):\n column.append('x')\n \n new_board.append(column)\n \n new_board = self.createStartPosition(new_board)\n return new_board", "title": "" }, { "docid": "679e656d5dbce19df541f5c6287877eb", "score": "0.5562033", "text": "def __init__(self, columns=10, rows=20):\r\n self.columns = columns\r\n self.rows = rows\r\n self.board = [[0 for _ in range(columns)] for _ in range(rows)]\r\n self.current = None\r\n self.piece_next = None\r\n self.previous = None\r\n\r\n self.level = 1\r\n self.score = 0\r\n self.cleared_count = 0\r\n\r\n self.SCORE_MAP = {\r\n 0: 0,\r\n 1: 40 * self.level,\r\n 2: 100 * self.level,\r\n 3: 300 * self.level,\r\n 4: 1200 * self.level,\r\n }\r\n\r\n self.TO_STR_MAP = {\r\n 1: \"I\",\r\n 2: \"O\",\r\n 3: \"T\",\r\n 4: \"S\",\r\n 5: \"Z\",\r\n 6: \"J\",\r\n 7: \"L\"\r\n }\r\n\r\n self.bag = new_blocks()\r\n self.pull()", "title": "" }, { "docid": "c6c4cac53c6b65ad373582fe99badae4", "score": "0.5539497", "text": "def setup_board():\n\n return [['_', '_', '_'], ['_', '_', '_'], ['_', '_', '_']]", "title": "" }, { "docid": "47cf5fec063860f32e7e7d7e27743e29", "score": "0.5534116", "text": "def load_board(self, filename):\n self.board = Board()\n with open(filename, \"r\") as file:\n for text_line in file:\n text_line = text_line.strip()\n # check if line is information on board length or entrance\n if text_line.startswith(\"board\"):\n self.make_grid(text_line)\n elif text_line.startswith(\"entrance\"):\n self.make_entrance(text_line)\n return self.board", "title": "" }, { "docid": "861f99269845fcd267826494086985e0", "score": "0.5513883", "text": "def initial_state():\n board = [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n return board", "title": "" }, { "docid": "fc9f9a8f50871b5d0c5eec8c6f0f3803", "score": "0.55130273", "text": "def getNewBoard():\n board = {X_HOME: 7, X_GOAL: 0, O_HOME: 7, O_GOAL: 0}\n # Set each space as empty to start:\n for spaceLabel in ALL_SPACES:\n board[spaceLabel] = EMPTY\n return board", "title": "" }, { "docid": "8c7cb585926f9a734cf50bac619fe71b", "score": "0.550395", "text": "def __init__(self):\r\n self.board_content = {i: list() for i in range(self.COLUMNS)}\r\n self.__player = str(Game.PLAYER_ONE)\r\n self.last_disc_placed = None\r\n self.game_winner = False\r\n self.player_type = None\r\n self.ai = None", "title": "" }, { "docid": "2cead8de9096331d1eee24c3a242791b", "score": "0.5496545", "text": "def print_board(self):\n result = \"------- board -------\\r\\n\"\n for i in range(81):\n if i > 0 and i % 9 == 0:\n result += '\\r\\n'\n if i % 9 in (3, 6):\n result += '| '\n if i % 81 in (27, 54):\n result += '---------------------\\r\\n'\n result += str(self.board[i]) + ' '\n print(result)", "title": "" }, { "docid": "a2f3c1bc646881fb4b62af7f0d66404a", "score": "0.54917234", "text": "def get_blank_board():\n board = []\n for x in range(BOARDWIDTH):\n board.append([EMPTY_SPACE]*BOARDHIGHT)\n return board", "title": "" }, { "docid": "0fde80c9919b1b775659affceed1dedd", "score": "0.5480204", "text": "def init_board(self):\r\n self.pieces_dictionary = {\r\n 'a1': Rook('white'),\r\n 'b1': Knight('white'),\r\n 'c1': Bishop('white'),\r\n 'd1': Queen('white'),\r\n 'e1': King('white'),\r\n 'f1': Bishop('white'),\r\n 'g1': Knight('white'),\r\n 'h1': Rook('white'),\r\n 'a2': Pawn('white'),\r\n 'b2': Pawn('white'),\r\n 'c2': Pawn('white'),\r\n 'd2': Pawn('white'),\r\n 'e2': Pawn('white'),\r\n 'f2': Pawn('white'),\r\n 'g2': Pawn('white'),\r\n 'h2': Pawn('white'),\r\n 'a7': Pawn('black'),\r\n 'b7': Pawn('black'),\r\n 'c7': Pawn('black'),\r\n 'd7': Pawn('black'),\r\n 'e7': Pawn('black'),\r\n 'f7': Pawn('black'),\r\n 'g7': Pawn('black'),\r\n 'h7': Pawn('black'),\r\n 'a8': Rook('black'),\r\n 'b8': Knight('black'),\r\n 'c8': Bishop('black'),\r\n 'd8': Queen('black'),\r\n 'e8': King('black'),\r\n 'f8': Bishop('black'),\r\n 'g8': Knight('black'),\r\n 'h8': Rook('black'),\r\n }", "title": "" }, { "docid": "e36f1cbe4df7454b88ed9cdc90d3c664", "score": "0.54756266", "text": "def buildboard():\n board = [[EMPTY] * BOARD_SIZE for cell in range(BOARD_SIZE)]\n return board", "title": "" }, { "docid": "b8b919f8c7ede8e6b7ef80267a8c5300", "score": "0.54749745", "text": "def create_empty_board(self):\r\n board = []\r\n for row in range(self.LEN_ROW):\r\n board_row = []\r\n for col in range(self.LEN_COL):\r\n board_row.append(self.EMPTY_CELL)\r\n board.append(board_row)\r\n return board", "title": "" }, { "docid": "b3f175e33f4c34915381787887256bf9", "score": "0.5461488", "text": "def __init__(self, rows=6, cols=7):\n self.rows = rows\n self.cols = cols\n self.board = self.create_board(rows, cols)\n self.player = 'X'\n self.winner = None", "title": "" }, { "docid": "3cbac7eff06acc32720deaf93715613a", "score": "0.54582715", "text": "def setupPosition(self, position):\n count = 0\n x_index = 0\n y_index = 7\n field = 0\n for char in position:\n if field == 0:\n #Position\n if char in list('rnbqkpRNBQKP'):\n self[x_index,y_index] = char\n x_index += 1\n count += 1\n if count % 8 == 0:\n y_index -= 1\n x_index = 0\n elif char in list('12345678'):\n x_index += int(char)\n count += int(char)\n if count % 8 == 0:\n y_index -= 1\n x_index = 0\n elif char == '/':\n continue\n elif char == ' ':\n field += 1\n continue\n else:\n raise ValueError\n elif field == 1:\n #To-Move\n if char in list('wW'):\n self.to_move = 'w'\n elif char in list('bB'):\n self.to_move = 'b'\n elif char == ' ':\n field += 1\n else:\n raise ValueError\n elif field == 2:\n #Castling Rights\n if char == 'K':\n self.castle['w-king'] = True\n elif char == 'Q':\n self.castle['w-queen'] = True\n elif char == 'k':\n self.castle['b-king'] = True\n elif char == 'q':\n self.castle['b-queen'] = True\n elif char == '-':\n continue\n elif char == ' ':\n field += 1\n else:\n raise ValueError\n elif field == 3:\n #EnPassant Square\n if char == '-':\n self.ep = None\n elif char in list('abcdefgHABCDEFGH'):\n alg_col = char\n alg_col = ['a','b','c','d','e','f','g','h'].index(alg_col.lower())\n elif char in list('12345678'):\n alg_row = int(char)-1\n self.ep = (alg_col,alg_row)\n elif char == ' ':\n field += 1\n else:\n raise ValueError\n elif field == 4:\n #halfmove clock\n hm_l = []\n if char in list('0123456789'):\n hm_l.append(char)\n elif char == ' ':\n if hm_l:\n self.halfmove_count = int(''.join(map(str,hm_l)))\n else:\n self.halfmove_count = 0\n field += 1\n else:\n raise ValueError\n elif field == 5:\n #Fullmove Counter\n fm_l = []\n if char in list('0123456789'):\n fm_l.append(char)\n elif char == ' ':\n if fm_l:\n self.fullmove_count = int(''.join(map(str,fm_l)))\n else:\n self.fullmove_count = 1\n field += 1\n else:\n raise ValueError\n elif field == 6:\n return None\n else:\n raise Exception", "title": "" }, { "docid": "0babea0962a67aa4a790a96eec9f5b19", "score": "0.54574144", "text": "def draw_board(self):\n self._board = [[\" \"] * 9 for _ in range(10)]\n for j in range(10):\n if j < 3 or j > 6:\n self._board[j][3] = \"-----\"\n self._board[j][4] = \"-----\"\n self._board[j][5] = \"-----\"\n\n all_pieces = self.get_all_pieces()\n\n for type_of_item in all_pieces:\n for item in type_of_item:\n loc = item.get_location()\n loc_x = loc[0]\n loc_y = loc[1]\n symbol = item.get_id()\n self._board[loc_y][loc_x] = symbol # Rotation required to make it fit in console\n for i in self.get_board():\n print(i)", "title": "" }, { "docid": "6f51f47143f427435d15b15a2ee3a1c9", "score": "0.54344535", "text": "def init_board():\n\tx = [i for i in range(10)]*2 #generate 10 pairs of numbers\n\trandom.shuffle(x)\n\n\tboard_star = [['✶']*5 for i in range(4)] #output board\n\tboard_game = [['']*5 for i in range(4)] #logic board\n\n\tfor row in range(4):\n\t\tfor col in range(5):\n\t\t\tboard_game[row][col] = x.pop()\n\n\treturn (board_star, board_game)", "title": "" }, { "docid": "c2eaca3feebc02357220982f47a06632", "score": "0.5429345", "text": "def initial_each_step(board):\n\n positions = np.argwhere(board == 0)\n position = positions[npr.choice(positions.shape[0])]\n # initialize with 2 or 4 (in lower probability)\n board[position[0], position[1]] = 4 if npr.choice(100) > 79 else 2\n\n return board", "title": "" }, { "docid": "a939ad5bf45008398712d3681c8d616f", "score": "0.5427331", "text": "def setup_board():\n board = []\n\n for i in range(COLUMNS):\n board.append([])\n\n return board", "title": "" }, { "docid": "3b57799be655692a139ded0aa93d693f", "score": "0.5409583", "text": "def set_board(self, inputstr):\n self.win1.addstr(1, 1, inputstr)\n self.win1.border()\n self.win1.addstr(0,2,'Board')\n self.win1.refresh()", "title": "" }, { "docid": "a0e623525d2139968de331fdcccafd8c", "score": "0.5406264", "text": "def test_set_board(self):\n board_rule_test = BoardRules()\n board_rule_test.create_board()\n board_rule_test.setup_tile()\n new_input = board_rule_test.get_board_letters()\n new_input[7][7] = \"k\"\n board_rule_test.set_board_letters(new_input)\n output = board_rule_test.get_board_letters()\n self.assertEqual(output[7][7], \"k\")", "title": "" }, { "docid": "909d5f2ad750532bb9c6ff2f5580be24", "score": "0.5405144", "text": "def create_board(n):\n\n # internal methods\n\n def draw_top_border(columns):\n\n drawn_cols = 0\n str_top = '┌'\n\n while drawn_cols != columns:\n for j in range(0, 3):\n str_top += '─'\n drawn_cols += 1\n if columns == drawn_cols:\n str_top += '┐'\n else:\n str_top += '┬'\n\n return str_top\n\n def draw_bottom_border(columns):\n\n drawn_cols = 0\n str_bot = '└'\n\n while drawn_cols != columns:\n for j in range(0, 3):\n str_bot += '─'\n drawn_cols += 1\n if columns == drawn_cols:\n str_bot += '┘'\n else:\n str_bot += '┴'\n\n return str_bot\n\n def draw_middle_border(columns):\n\n drawn_cols = 0\n str_mid = '│'\n\n while drawn_cols != columns:\n for j in range(0, 3):\n str_mid += ' '\n str_mid += '│'\n drawn_cols += 1\n\n return str_mid\n\n def draw_cells_border(columns):\n\n drawn_cols = 0\n str_cells_b = '├'\n\n while drawn_cols != columns:\n for j in range(0, 3):\n str_cells_b += '─'\n drawn_cols += 1\n if columns == drawn_cols:\n str_cells_b += '┤'\n else:\n str_cells_b += '┼'\n\n return str_cells_b\n\n # code\n\n global board\n str_board = draw_top_border(n) + '\\n'\n for i in range(0, n-1):\n str_board += draw_middle_border(n) + '\\n'\n str_board += draw_cells_border(n) + '\\n'\n str_board += draw_middle_border(n) + '\\n'\n str_board += draw_bottom_border(n)\n board = str_board", "title": "" }, { "docid": "08b01d4463daab718d405acbbb6bfe06", "score": "0.5402012", "text": "def buildBlankBoard(self):\n # declare blank list\n arrayTiles = []\n # forloops to create an 2d array of blank tiles\n for i in range(0, 9):\n self.arrayTiles.append([])\n for j in range(0, 9):\n tileToAdd = Tile(i, j, 0)\n self.arrayTiles[i].append( tileToAdd )", "title": "" }, { "docid": "28b8d229c59f4a347fd90fa8ec008e91", "score": "0.53980505", "text": "def set_board(self, inputstr):\n print(inputstr)", "title": "" }, { "docid": "1a80dbe575f644d41e57ab90fda943c6", "score": "0.53868693", "text": "def __init__(self, boardFile):\n self.board = self.__createBoard(boardFile)", "title": "" }, { "docid": "3bb5217185df56fc4483fa1ace775708", "score": "0.53847647", "text": "def __init__(self):\n # You can define attributes like this:\n # self.value = 73 # an arbitrary number\n # reassign it to a string (variable type is dynamic in Python)\n # self.value = \"some string\"\n # self.foo = [] # create an empty list\n self._location=[None,None]\n self._board=[[0] * ylim for _ in range(xlim)]\n self._board[-1][-1] = 1", "title": "" }, { "docid": "bdde075dbda94cd215b175749db4238e", "score": "0.5382538", "text": "def __init__(self, length, width):\r\n if length == 0:\r\n return \r\n last_cellnum = width*length \r\n self.length=length\r\n self.width=width\r\n\r\n self.cells = [ CellMove(i) for i in range( last_cellnum ) ]\r\n cellnums = [[i, int((i / width)+1 ) % 2] for i in range( last_cellnum ) ] #kkk in range( length * width)\r\n \r\n def nextcell(curcell,delt):\r\n \"\"\" returns a cell number that is available for a pawn to move.\r\n From one cell a pawn can move on two cells in next row. If the current cell is at edge of the board\r\n it can go to one new cell only. Last row cannot move fwd ofc.\r\n \"\"\"\r\n if curcell == None:\r\n return\r\n retval = curcell+(width-1)+delt + cellnums[curcell][1]\r\n if ((retval<last_cellnum) and (cellnums[curcell][1] != cellnums[retval][1] )): \r\n return retval\r\n else:\r\n return \r\n\r\n def fixcell(i,delt):\r\n \"\"\" calculate all posible moves for a pawn forward and backward (for the king)\r\n moves can be a step, jump. it stores a number tha is jumped over (enemy piece)\r\n \"\"\"\r\n nc = nextcell(i,delt)\r\n if nc != None: \r\n self.cells[i].lastLine = False \r\n self.cells[i].stepFw.append(nc)\r\n self.cells[nc].stepBk.append(i)\r\n jc = nextcell(nc,delt)\r\n if jc != None:\r\n self.cells[i].jumpFw.append(jc)\r\n self.cells[i].jumpFwOver.append(nc)\r\n self.cells[jc].jumpBk.append(i)\r\n self.cells[jc].jumpBkOver.append(nc) \r\n\r\n for i in range( last_cellnum ): # A piece can move fwd in two directions \r\n fixcell(i,0)\r\n fixcell(i,1)", "title": "" }, { "docid": "cd6abd02a20b911c815fa326cd9e62c9", "score": "0.5379059", "text": "def __str__(self):\n board = [[' ' for _ in range(self.get_cols())]\n for _ in range(self.get_rows())]\n\n cells = self.mino.get_cells()\n\n for mino in self.stable_minos:\n cells += mino.get_cells()\n\n for mino in self.moving_minos:\n cells += mino.get_cells()\n\n for col, row in cells:\n board[row][col] = '#'\n return '\\n'.join(''.join(row) for row in board)", "title": "" }, { "docid": "a0972e0b8579b811319037e37f60de4c", "score": "0.5376902", "text": "def populate_board(self):\n self.board = [\n [\n Rook(owner='white', position=(0,0)),\n Knight(owner='white', position=(1, 0)),\n Bishop(owner='white', position=(2,0)),\n Queen(owner='white', position=(3,0)),\n King(owner='white', position=(4,0)),\n Bishop(owner='white', position=(5,0)),\n Knight(owner='white', position=(6,0)),\n Rook(owner='white', position=(7,0)),\n ],\n [Pawn(owner='white', position=(i,1)) for i in range(8)],\n *[[None] * 8 for _ in range(4)],\n [Pawn(owner='black', position=(i,6)) for i in range(8)],\n [\n Rook(owner='black', position=(0,7)),\n Knight(owner='black', position=(1, 7)),\n Bishop(owner='black', position=(2,7)),\n Queen(owner='black', position=(3,7)),\n King(owner='black', position=(4,7)),\n Bishop(owner='black', position=(5,7)),\n Knight(owner='black', position=(6,7)),\n Rook(owner='black', position=(7,7)),\n ],\n ]\n\n self.char_board = []\n for row in self.board:\n disp_row = []\n for piece in row:\n if piece:\n disp_row.append(piece.cli_characterset)\n else:\n disp_row.append(None)\n self.char_board.append(disp_row)\n\n\n for i, row in enumerate(self.board):\n self.pieces_in_play['white'].extend([piece for piece in row if i < 2])\n self.pieces_in_play['black'].extend([piece for piece in row if i > 5])\n\n # Add reference to the kings for both players\n self.w_king = self.board[0][4]\n self.b_king = self.board[7][4]", "title": "" }, { "docid": "1bdc38d5972ad5deef144cd8d9dd77d0", "score": "0.5365361", "text": "def format_board(s):\n\n # If the length of the string is not 9\n if (len(s) != 9):\n # Then print out an error message\n print(\"Error: there should be 9 symbols.\")\n # Throw an error\n raise Exception\n\n # Draw the grid board\n # print(\"|1|2|3|\")\n # print(\"|4|5|6|\")\n # print(\"|7|8|9|\")\n return (\"|\" + s[0] + \"|\" + s[1] + \"|\" + s[2] + \"|\\n\"\n + \"|\" + s[3] + \"|\" + s[4] + \"|\" + s[5] + \"|\\n\"\n + \"|\" + s[6] + \"|\" + s[7] + \"|\" + s[8] + \"|\\n\")", "title": "" }, { "docid": "3156013aeb8c9651b64dab93b0aaa0f4", "score": "0.5362123", "text": "def init_board(n):\n board = []\n [board.append([]) for i in range(n)]\n [row.append(' ') for i in range(n) for row in board]\n return (board)", "title": "" }, { "docid": "88b452925ee7041354a212920e19e6bb", "score": "0.5361118", "text": "def make_grid(self, text_line):\n coordinate = {}\n text_line = text_line.rsplit()\n self.board.length = int(text_line[1])\n # create a coordinate system of (x, y), coupled to a dictionary\n for y in range(self.board.length):\n for x in range(self.board.length):\n # set dictionary value to zero(unoccupied )\n coordinate[x, y] = 0\n self.board.grid = coordinate", "title": "" }, { "docid": "45d59e297b868748720c42c233fea4e2", "score": "0.53586584", "text": "def new_blank_board():\n\n return [\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]", "title": "" }, { "docid": "872a1b635693743087da6f441278c2d3", "score": "0.53572714", "text": "def draw_board(self) -> str:\n top_row = \"\"\n cols = \"\"\n for i in range(self.size):\n top_row += \" \" + str(i)\n cols += str(i)\n for j in range(self.size):\n cell = self.board[i, j]\n if cell != 0:\n val = Player(cell).name\n else:\n val = \" \"\n cols += val + \" \"\n cols += \"\\n\"\n return top_row + \"\\n\" + cols", "title": "" }, { "docid": "054e5779d06efa2f32e7431f971fe507", "score": "0.53549135", "text": "def print_board(self):\n norm_line = \"|---|---|---|---|---|---|---|---|\"\n print(norm_line)\n for j in range(self.HEIGHT):\n if j % 2 == 1:\n temp_line = \"|///|\"\n else:\n temp_line = \"|\"\n for i in range(self.WIDTH):\n temp_line = temp_line + \" \" + self.get_symbol([j, i]) + \" |\"\n if i != 3 or j % 2 != 1: # should figure out if this 3 should be changed to self.WIDTH-1\n temp_line = temp_line + \"///|\"\n print(temp_line)\n print(norm_line)", "title": "" }, { "docid": "0bf531c75e2a62c505b76f26e2d8bf6b", "score": "0.53546554", "text": "def getBlankBoard():\n board = []\n for i in range(BOARDWIDTH):\n board.append([BLANK] * BOARDHEIGHT)\n return board", "title": "" }, { "docid": "d2528cbd5503d9e93e50d44553e30f1d", "score": "0.5352967", "text": "def reset(self):\n self.board = [[0 for col in range(WIDTH)] for row in range(HEIGHT)]", "title": "" }, { "docid": "f303059cb68c75bf0c76fa6f5aaa6e96", "score": "0.53396267", "text": "def test_board_parsing():\n board = Board()\n\n board.set_p(5, 4, Board.PIECE_BLACK)\n\n assert(p(\"X------X\",\n \"--------\",\n \"--------\",\n \"--------\",\n \"-----@--\",\n \"--------\",\n \"--------\",\n \"X------X\") == board.to_token_string())\n\n layout1 = p(\"X------X\",\n \"--------\",\n \"-----O--\",\n \"----@O--\",\n \"------O-\",\n \"-----O@-\",\n \"-------@\",\n \"X------X\")\n board1 = Board.from_token_string(layout1)\n assert(board1.to_token_string() == layout1)\n\n layout2 = p(\"########\",\n \"########\",\n \"##X-OX##\",\n \"##-O@-##\",\n \"##@O--##\",\n \"##XO-X##\",\n \"########\",\n \"########\")\n board2 = Board.from_token_string(layout2)\n assert board2.board_size == 4", "title": "" }, { "docid": "c5d33575c321ce94d960739d866adbdc", "score": "0.53290343", "text": "def get_pretty_board(self, board):\n board = board[START:END].replace(\"??\", \"\\n\")\n return board\n pass", "title": "" }, { "docid": "896b31667cf763cf4604e77a56a415a8", "score": "0.5318555", "text": "def __init__(self, p1_starts: bool) -> None:\n self.is_p1_turn = p1_starts\n self.board = int(input('choose the size of the board: '))\n cells = dict_rows(self.board)\n hlm = ley_lines_horizontal(self.board)\n vlm = ley_lines_vertical(self.board)\n self.current_state = StoneHengeState(self.is_p1_turn, self.board, cells,\n hlm, vlm)", "title": "" }, { "docid": "b55fbc755d72f09fcbecc2fda1b48b32", "score": "0.52889293", "text": "def __init__(self, board_width, board_height):\n self.board_width = board_width\n self.board_height = board_height\n self._board_data = [{u'piece': None, u'disabled': False} for x in range(board_width*board_height)]", "title": "" }, { "docid": "a7531fb034f1e020905b7c4b57475d3b", "score": "0.5288234", "text": "def on_board(cls, start, end, b):\n castle = is_castle(b, start, end)\n if castle:\n if castle[1] == kingside:\n return cls(start, end, \"0-0\", castle)\n else:\n return cls(start, end, \"0-0-0\", castle)\n p = b[start]\n if p is None:\n return cls(start, end, \"invalid\", None)\n p_char = p.piece\n capture = \"\"\n if b[end] is not None:\n if p_char == \"p\":\n p_char = _file_str(start)\n capture = \"x\"\n elif p_char == \"p\":\n p_char = \"\"\n end_str = location_str(end)\n if p.piece != \"p\":\n disambig_from = []\n for loc in b.find(p):\n if loc == start:\n continue\n if _move_is_valid(b, loc, end):\n disambig_from.append(loc)\n ranks = [x[0] for x in disambig_from]\n files = [x[1] for x in disambig_from]\n if not disambig_from:\n disambig = \"\"\n # Check if all files are distinct\n elif start[1] not in files:\n disambig = _file_str(start)\n elif start[0] not in ranks:\n disambig = _rank_str(start)\n else:\n disambig = location_str(start)\n else:\n disambig = \"\"\n after_move = b.apply(cls(start, end, \"test-check-move\", None))\n if p.color == white:\n opposite_color = black\n else:\n opposite_color = white\n if in_checkmate(after_move, opposite_color):\n check_str = \"#\"\n elif in_check(after_move, opposite_color):\n check_str = \"+\"\n else:\n check_str = \"\"\n return cls(\n start, end,\n p_char + disambig + capture + end_str + check_str,\n None)", "title": "" }, { "docid": "5ffa04e5cceaa3f2f787c8b33672409b", "score": "0.5270963", "text": "def __str__(self):\r\n board_str = \"\"\r\n board = self.board\r\n for i in range(self.LEN_ROW):\r\n for j in range(self.LEN_COL):\r\n if j == self.LEN_COL - 1:\r\n board_str += board[i][j] + \"\\n\"\r\n else:\r\n board_str += board[i][j] + \" \"\r\n return board_str", "title": "" }, { "docid": "cd478626a809ed56a0afb6040e0654c7", "score": "0.5269127", "text": "def reset(self):\n self.board = [0 for i in range(9)]", "title": "" }, { "docid": "c979e8fbe514e245fcb5c7a9b57ef848", "score": "0.5268792", "text": "def print_board(self):\n print(f\"Level: {self.snake.level}\")\n for y in range(self.board.height):\n print(\" \", end=\"\")\n for x in range(self.board.width):\n node_found = False\n\n for snakeNode in AllSnakeNodes:\n if snakeNode.X == x and snakeNode.Y == y:\n node_found = True\n if snakeNode.is_head:\n print(\"S\", end=\" \")\n else:\n print(\"s\", end=\" \")\n break\n\n if not node_found:\n lookup = self.board.pos_lookup(x, y)\n if lookup == A:\n print(\".\", end=\" \")\n elif lookup == O:\n print(\"X\", end=\" \")\n elif lookup == F:\n print(\"*\", end=\" \")\n else:\n print(str(self.board.pos_lookup(x, y)), end=\" \")\n print()\n print()", "title": "" }, { "docid": "13f13679f12ef33cba3300196b7a639e", "score": "0.5264176", "text": "def displayBoard(board):\n # \"Clear\" the screen by printing many newlines, so the old\n # board isn't visible anymore.\n print('\\n' * 60)\n\n xHomeTokens = ('X' * board[X_HOME]).ljust(7, '.')\n xGoalTokens = ('X' * board[X_GOAL]).ljust(7, '.')\n oHomeTokens = ('O' * board[O_HOME]).ljust(7, '.')\n oGoalTokens = ('O' * board[O_GOAL]).ljust(7, '.')\n\n # Add the strings that should populate BOARD_TEMPLATE in order,\n # going from left to right, top to bottom.\n spaces = []\n spaces.append(xHomeTokens)\n spaces.append(xGoalTokens)\n for spaceLabel in ALL_SPACES:\n spaces.append(board[spaceLabel])\n spaces.append(oHomeTokens)\n spaces.append(oGoalTokens)\n\n print(BOARD_TEMPLATE.format(*spaces))", "title": "" } ]
1ad7eec27d6dcf86ec7bfe3e8c287c14
recv_directions should return None on fail
[ { "docid": "9e9e5b3274c5bd1868f8bedfaf2f99dc", "score": "0.76010746", "text": "def test_recv_directions_fail():\n string = \"CHAMPLFLFLFL\"\n result = io.recv_directions(string)\n assert_equals(result, None)", "title": "" } ]
[ { "docid": "83eebef10ad9f2d31ef09830ac9b3935", "score": "0.6898982", "text": "def test_recv_robot_info_fail():\n string = \"CHAMP 32 21 W\"\n result = io.recv_directions(string)\n assert_equals(result, None)", "title": "" }, { "docid": "8bb3f0d680ee7375c96d32094a0b36ce", "score": "0.6490652", "text": "def test_recv_directions():\n string = \"LLLRRFFF\"\n result = io.recv_directions(string)\n assert_equals(result.group(), string)", "title": "" }, { "docid": "cd0dad7052e95b413d65b609a87c58cb", "score": "0.58941054", "text": "def recv(self, ):\n\t\tpass", "title": "" }, { "docid": "49e8cbef35425bc60eed907269058fac", "score": "0.589189", "text": "def recv(self):\n print(\"Move recieved\")\n message = self.s.recv(1024)\n return message", "title": "" }, { "docid": "4b255a5c162cb81e8e91232b4dd42009", "score": "0.5708596", "text": "def wait_to_route(src: LightningRpc, dest: LightningRpc, msatoshi: int) -> None:\n found = False\n while not found:\n try:\n src.getroute(node_id=get_id(dest), msatoshi=msatoshi, riskfactor=1)\n found = True\n except RpcError as e:\n assert e.error[\"message\"] == \"Could not find a route\", e\n time.sleep(2)", "title": "" }, { "docid": "7027ea4aa39d5d917c777a85625ebe75", "score": "0.5676847", "text": "def recv(self, length):", "title": "" }, { "docid": "4e1399c124fafbb5a8b0511866c2c38f", "score": "0.55815125", "text": "def route(self, packets):\r\n pass", "title": "" }, { "docid": "a8f384e0f732a8696510a3ecb3127aff", "score": "0.55463505", "text": "def test_get_direction_returns_none_for_small_samples(self):\n\n self.assertIsNone(Direction.get_direction([1], [1]))\n self.assertIsNone(Direction.get_direction([1, 2], [1, 2]))", "title": "" }, { "docid": "83ddbb47629d1ba6ce7fbf1c2dad6b9a", "score": "0.54526734", "text": "def request_route_to_server(origin, destination):\n try:\n url = \"http://osrm.gti-ia.upv.es/route/v1/car/{src1},{src2};{dest1},{dest2}?geometries=geojson&overview=full\"\n src1, src2, dest1, dest2 = origin[1], origin[0], destination[1], destination[0]\n url = url.format(src1=src1, src2=src2, dest1=dest1, dest2=dest2)\n\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=1.0)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount(url, adapter)\n result = session.get(url)\n result = json.loads(result.content)\n\n path = result[\"routes\"][0][\"geometry\"][\"coordinates\"]\n path = [[point[1], point[0]] for point in path]\n duration = result[\"routes\"][0][\"duration\"]\n distance = result[\"routes\"][0][\"distance\"]\n if path[-1] != destination:\n path.append(destination)\n return path, distance, duration\n except Exception:\n return None, None, None", "title": "" }, { "docid": "10e90e65b6882d4b04f01701b0cda9fd", "score": "0.5436086", "text": "def receive(self):\n if self.gw == None:\n return None\n t0 = _time.time() * 1000\n while (self.timeout <= 0 or ((_time.time() * 1000) - t0) < self.timeout):\n self.waiting = True\n ntf = self.gw.receive(DatagramNtf, self.timeout)\n self.waiting = False\n if ntf == None:\n return None\n if isinstance(ntf, DatagramNtf):\n p = ntf.protocol\n if ((p == Protocol.DATA) or (p >= Protocol.USER)):\n if ((self.localProtocol < 0) or (self.localProtocol == p)):\n return ntf\n return None", "title": "" }, { "docid": "1d2055f65d4c4fdd8870e2038cb8c4f3", "score": "0.53867245", "text": "def _handler_direct_access_stop_direct(self):\r\n next_state = None\r\n next_agent_state = None\r\n result = None\r\n\r\n next_state = ProtocolState.COMMAND\r\n next_agent_state = ResourceAgentState.COMMAND\r\n\r\n return (next_state, (next_agent_state, result))", "title": "" }, { "docid": "0d1f210e5bd5652009b14b2f02798669", "score": "0.537567", "text": "def _get_outgoing_route_updates(self):\n return self.__outgoing_route_updates", "title": "" }, { "docid": "0d1f210e5bd5652009b14b2f02798669", "score": "0.5374935", "text": "def _get_outgoing_route_updates(self):\n return self.__outgoing_route_updates", "title": "" }, { "docid": "29866bbaad899a3f8968bca4fc1a2227", "score": "0.52828246", "text": "def recvRtspReply(self):\n\t\t#TODO\n\t\twhile True:\n\t\t\treply = self.rtspSocket.recv(1024)\n\n\t\t\tif reply:\n\t\t\t\tself.parseRtspReply(reply)\n\n\t\t\t# Close the RTSP socket upon requesting Teardown\n\t\t\tif self.requestSent == self.TEARDOWN:\n\t\t\t\tself.rtspSocket.shutdown(socket.SHUT_RDWR)\n\t\t\t\tself.rtspSocket.close()\n\t\t\t\tbreak", "title": "" }, { "docid": "5d472e32fdbf4f4fbafd64274c536a5d", "score": "0.5269638", "text": "def recvRtspReply(self):\n\t\twhile True:\n\t\t\treply = self.rtspSocket.recv(1024)\n\t\t\tprint(reply)\n\t\t\tif reply:\n\t\t\t\tself.ProcessRtspReply(reply)\n\t # Close the RTSP socket upon requesting Teardown\n\n\t\t\tif self.sentRequest == self.TEARDOWN:\n\n\t\t\t\tself.rtspSocket.shutdown(socket.SHUT_RDWR)\n\n\t\t\t\tself.rtspSocket.close()\n\n\t\t\t\tbreak", "title": "" }, { "docid": "f8fcbefc016543e7d9be16bc458d986d", "score": "0.525904", "text": "def get_stop_for_user_route(user_stop, destination_stop, agency, route, direction):\n\n stops = gets_stops_for_route(agency, route, direction)\n print \"this is the orginal stops\", stops\n for num in range(len(stops)):\n if user_stop == stops[num][1]:\n forward = True\n start = num\n break\n if destination_stop == stops[num][1]:\n forward = False\n start = 0\n break\n if not forward:\n stops.reverse()\n print \"this is after reversed\", stops\n route = []\n print \"THIS IS FROM THE START\", stops[start:]\n for stop in stops[start:]:\n print stop\n route.append(stop)\n if stop[1] == destination_stop:\n break\n\n return route", "title": "" }, { "docid": "ff80e9e3f327881bc0289fbb7c071a22", "score": "0.5232184", "text": "def recv(self, *args, **kwargs):\r\n if self.disconnected:\r\n raise socket.error\r\n return self.read(block=True)", "title": "" }, { "docid": "ce9b31302c5cbfdb8c142d4d41662155", "score": "0.52176327", "text": "def read_routes():\n if SOLARIS:\n f = os.popen(\"netstat -rvn -f inet\")\n elif FREEBSD:\n f = os.popen(\"netstat -rnW\") # -W to handle long interface names\n else:\n f = os.popen(\"netstat -rn -f inet\")\n ok = 0\n mtu_present = False\n prio_present = False\n refs_present = False\n use_present = False\n routes = []\n pending_if = []\n for line in f.readlines():\n if not line:\n break\n line = line.strip().lower()\n if line.find(\"----\") >= 0: # a separation line\n continue\n if not ok:\n if line.find(\"destination\") >= 0:\n ok = 1\n mtu_present = \"mtu\" in line\n prio_present = \"prio\" in line\n refs_present = \"ref\" in line # There is no s on Solaris\n use_present = \"use\" in line\n continue\n if not line:\n break\n rt = line.split()\n if SOLARIS:\n dest, netmask, gw, netif = rt[:4]\n flg = rt[4 + mtu_present + refs_present]\n else:\n dest, gw, flg = rt[:3]\n locked = OPENBSD and rt[6] == \"l\"\n offset = mtu_present + prio_present + refs_present + locked\n offset += use_present\n netif = rt[3 + offset]\n if flg.find(\"lc\") >= 0:\n continue\n elif dest == \"default\":\n dest = 0\n netmask = 0\n elif SOLARIS:\n dest = scapy.utils.atol(dest)\n netmask = scapy.utils.atol(netmask)\n else:\n if \"/\" in dest:\n dest, netmask = dest.split(\"/\")\n netmask = scapy.utils.itom(int(netmask))\n else:\n netmask = scapy.utils.itom((dest.count(\".\") + 1) * 8)\n dest += \".0\" * (3 - dest.count(\".\"))\n dest = scapy.utils.atol(dest)\n # XXX: TODO: add metrics for unix.py (use -e option on netstat)\n metric = 1\n if \"g\" not in flg:\n gw = '0.0.0.0'\n if netif is not None:\n try:\n ifaddr = get_if_addr(netif)\n routes.append((dest, netmask, gw, netif, ifaddr, metric))\n except OSError as exc:\n if exc.message == 'Device not configured':\n # This means the interface name is probably truncated by\n # netstat -nr. We attempt to guess it's name and if not we\n # ignore it.\n guessed_netif = _guess_iface_name(netif)\n if guessed_netif is not None:\n ifaddr = get_if_addr(guessed_netif)\n routes.append((dest, netmask, gw, guessed_netif, ifaddr, metric)) # noqa: E501\n else:\n warning(\"Could not guess partial interface name: %s\", netif) # noqa: E501\n else:\n raise\n else:\n pending_if.append((dest, netmask, gw))\n f.close()\n\n # On Solaris, netstat does not provide output interfaces for some routes\n # We need to parse completely the routing table to route their gw and\n # know their output interface\n for dest, netmask, gw in pending_if:\n gw_l = scapy.utils.atol(gw)\n max_rtmask, gw_if, gw_if_addr, = 0, None, None\n for rtdst, rtmask, _, rtif, rtaddr in routes[:]:\n if gw_l & rtmask == rtdst:\n if rtmask >= max_rtmask:\n max_rtmask = rtmask\n gw_if = rtif\n gw_if_addr = rtaddr\n # XXX: TODO add metrics\n metric = 1\n if gw_if:\n routes.append((dest, netmask, gw, gw_if, gw_if_addr, metric))\n else:\n warning(\"Did not find output interface to reach gateway %s\", gw)\n\n return routes", "title": "" }, { "docid": "43dbbbfcc5288ae4775361fd40d5ce0d", "score": "0.5209584", "text": "def find_closest_route(api_access_token, origin_address, destination_addresses):\n logger.debug(\"api_access_token: {} \".format(api_access_token) \\\n + \"origin_address: {} \".format(str(origin_address)) \\\n + \"destination_addresses: {}\".format(str(destination_addresses))\n )\n\n # (x, y) coordinates of origin address\n try:\n incidents = \"{},{}\".format(origin_address['x'], origin_address['y'])\n except KeyError as e:\n logger.debug(\"Missing coordinate in orgin_address - {}\".format(str(e)))\n return None\n\n # List of (x, y) coordinates of possible destinations\n facility_list = []\n facility_key_list = list(filter(lambda k: k[0] != EMPTY_RECORD and k[1] != EMPTY_RECORD, destination_addresses.keys()))\n facility_list = map(lambda k : \"{},{}\".format(k[0], k[1]), facility_key_list)\n # Separate destination coordinates with \";\"\n facilities = \";\".join(facility_list)\n params = {\n 'f': 'json',\n 'token': api_access_token,\n 'returnDirections': 'false',\n 'returnCFRoutes': 'true',\n 'incidents': incidents,\n 'facilities': facilities\n }\n\n body_as_string, updated_header = format_multipart_form_request(ARCGIS_CLOSEST_FACILITY_URL, params)\n # POST request over network\n response = _post_request(ARCGIS_CLOSEST_FACILITY_URL, body_as_string, updated_header)\n\n if response.status_code == 200:\n response_json = response.json()\n logger.debug(\"Response JSON: {}\".format(str(response_json)))\n try:\n routes = response_json['routes']\n features = routes['features']\n attributes = features[0]['attributes']\n facility_id = attributes['FacilityID']\n travel_time_in_minutes = attributes['Total_TravelTime']\n travel_distance_in_miles = attributes['Total_Miles']\n except KeyError as e:\n logger.debug(str(e))\n return None\n\n formatted_travel_time_in_minutes = _format_float(float(travel_time_in_minutes))\n formatted_travel_distance_in_miles = _format_float(float(travel_distance_in_miles))\n\n travel_time_string = \"{} minutes\".format(formatted_travel_time_in_minutes)\n travel_distance_string = \"{} miles\".format(formatted_travel_distance_in_miles)\n\n facility_key_index = int(facility_id) - 1\n facility_key = facility_key_list[facility_key_index]\n facility_address = destination_addresses[facility_key]\n\n destination_dict = {\n 'Address': facility_address,\n 'Driving_time': travel_time_string,\n 'Driving_distance': travel_distance_string\n }\n\n logger.debug(\"Returning closest destination: {}\".format(str(destination_dict)))\n return destination_dict\n else:\n logger.debug(\"Response Error: {}\".format(str(response.status_code)))\n return None", "title": "" }, { "docid": "941a4a3d7104e2eb89c13de47e42c909", "score": "0.520746", "text": "def receive_map_command(sock):\n return receive_upd_command(sock)", "title": "" }, { "docid": "218fc59368edbd9528c56fdf82ecdc1a", "score": "0.5180098", "text": "def RouterSolicitsReceived(self) -> int:", "title": "" }, { "docid": "c4f89a64f311a35982f21bd97987e457", "score": "0.51671165", "text": "def receivedDownstream(self, data):", "title": "" }, { "docid": "816f4fb8bd3fea78ad7cec6c619aec7e", "score": "0.5157779", "text": "def get_directions(location, room_name):\n if room_name == \"town gate\":\n return location[0][2]\n if room_name == \"town square\":\n return location[1][2]\n if room_name == \"book seller's\":\n return location[2][2]\n if room_name == \"shipyard\":\n return location[3][2]\n if room_name == \"court house\":\n return location[4][2]\n if room_name == \"outside\":\n return location[5][2]", "title": "" }, { "docid": "3ef3e94a27318eacc8675b66dcfde7c8", "score": "0.514557", "text": "def _get_other_direction(self, direction):\n self._check_direction(direction)\n return self.directions[not self.directions.index(direction)]", "title": "" }, { "docid": "ccc34d52c2e1678ac70025430856f746", "score": "0.5144047", "text": "def recv_start(self, timeout=None):", "title": "" }, { "docid": "fca1404fbacd043927831c52b6d839d1", "score": "0.5141051", "text": "def run(self):\n\n # open a server socket and listen for commands\n server_sock = socket.socket (socket.AF_INET, socket.SOCK_DGRAM)\n server_sock.setsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_sock.bind ((\"0.0.0.0\", 8888))\n \n # Infinite loop to move the cart\n while (1):\n # Read a command\n cmds = server_sock.recv (1024)\n cmds = cmds.strip ().split ('\\n')\n\n for cmd in cmds:\n cmd_tokens = cmd.split ()\n if cmd_tokens [0] == 'S' and len (cmd_tokens) >= 3:\n # received cart position from camera\n x, y = float (cmd_tokens [1]), float (cmd_tokens [2])\n # meters to millimeters\n x *= 1000\n y *= 1000\n # y position should be adjusted as if it's the sponge\n y += Y_MARKER_TO_SPONGE\n print \"Setting position to \", x, \", \", y\n l_mm, r_mm = self.calc_belts_lens_from_position(x, y)\n print \"Setting belt lengths to \", l_mm, \", \", r_mm\n\n steps_l = self.calc_target_steps(l_mm)\n steps_r = self.calc_target_steps(r_mm)\n \n print \"Calculated motor steps: \", steps_l, \", \", steps_r\n\n ass_motor_control.set_current_position (steps_l, steps_r)\n\n elif cmd_tokens [0] == 'M' and len (cmd_tokens) >= 3:\n # received cart target coordinates from camera \n x, y = float (cmd_tokens [1]), float (cmd_tokens [2])\n # meters to millimeters\n x *= 1000\n y *= 1000\n print \"Moving to \", x, \", \", y\n l_mm, r_mm = self.calc_belts_lens_from_position(x, y)\n print \"Required lengths (in mm): \", l_mm, \", \", r_mm\n \n # Calc stes per motor\n steps_l = self.calc_target_steps(l_mm)\n steps_r = self.calc_target_steps(r_mm)\n \n print \"Calculated motor steps: \", steps_l, \", \", steps_r\n\n # call hal to pass steps to motor \n ass_motor_control.set_destination (steps_l, steps_r)\n\n elif cmd_tokens [0] == 'Z' and len (cmd_tokens) >= 2:\n is_on = int (cmd_tokens [1])\n ass_motor_control.set_zurum (is_on)\n if is_on and self.zurum_process is None:\n self.zurum_process = Popen ('omxplayer /home/pi/zurum.mp3', shell=True)\n else:\n if self.zurum_process is not None:\n self.zurum_process.kill ()\n self.zurum_process = None\n Popen ('killall omxplayer.bin', shell=True)", "title": "" }, { "docid": "4a2c6d6d05d61378b116a091432e5fdf", "score": "0.51218516", "text": "async def receive(self, token):\n while True:\n try:\n if self.udp:\n res, addr = await asyncio.wait_for(self.udp_sock.recvfrom(self.chunks_size), self.udp_timeout)\n else:\n res = await self.tcp_recv()\n token = res[:self.token_size]\n msg_type, msg_cntr, sender = struct.unpack(\"ii17s\", token)\n msg_data = res[self.token_size:]\n break\n except Exception as e:\n msg = token+self.tx if self.tx else token\n if self.udp:\n if __debug__:\n print(\"Sending udp to %s\" % str(self.addr))\n await self.udp_sock.sendto(msg, self.addr)\n else:\n await self.tcp_send(msg)\n if __debug__:\n print(\"TIMEOUT: no response within {}s\".format(self.udp_timeout))\n return (sender, msg_type, msg_cntr, msg_data)", "title": "" }, { "docid": "edcbc733b8d3988f352ae96e72a999fb", "score": "0.51146716", "text": "def OutputPacketsWithNoRoute(self) -> int:", "title": "" }, { "docid": "cd46890b2d9b875e0ae253a234e38f78", "score": "0.511229", "text": "def _get_outgoing_route_update(self):\n return self.__outgoing_route_update", "title": "" }, { "docid": "cd46890b2d9b875e0ae253a234e38f78", "score": "0.511229", "text": "def _get_outgoing_route_update(self):\n return self.__outgoing_route_update", "title": "" }, { "docid": "7af7b9097b3d55888cd42173d4adaf7e", "score": "0.5107384", "text": "def readPackets( self ):\n try:\n self.handle.sendline( \"for p in pkt: p \\n\")\n self.handle.expect( \"for p in pkt: p \\r\\n... \\r\\n\" )\n self.handle.expect( self.scapyPrompt )\n except pexpect.TIMEOUT:\n main.log.exception( self.name + \": Command timed out\" )\n return None\n except pexpect.EOF:\n main.log.exception( self.name + \": connection closed.\" )\n main.cleanup()\n main.exit()\n except Exception:\n main.log.exception( self.name + \": Uncaught exception!\" )\n main.cleanup()\n main.exit()\n return self.handle.before", "title": "" }, { "docid": "b06e3b60781f5fb535f8cd21e75a54d9", "score": "0.51047623", "text": "def routine_offline(self):\n rospy.loginfo(\"----------- DEBUT ROUTINE OFFLINE--------------------------------\")\n self.goal_offline = speechToTextPalbator.msg.SttOfflineGoal()\n rospy.loginfo(\"Sending goal to offline ...\")\n order={\n 'order': self.index,\n 'action': self.currentAction\n }\n json_in_str=js.dumps(order)\n self.goal_offline.order=json_in_str\n self.action_offline_client.send_goal(self.goal_offline)\n while self.action_offline_client.get_result() is None and not rospy.is_shutdown():\n if self.event_touch == True:\n self.action_offline_client.cancel_all_goals()\n self.event_touch = False\n rospy.logwarn(\"EVENT TOUCH \"+str(self.event_touch))\n break\n rospy.loginfo(\"Waiting for OFFLINE detect ....\")\n self.socketIO.wait(seconds=0.1)\n rospy.loginfo(str(self.action_offline_client.get_result()))\n if str(self.action_offline_client.get_result().stt_result) != '':\n self.dataToUse=str(self.action_offline_client.get_result().stt_result)\n rospy.loginfo(\"----------- FIN ROUTINE OFFLINE--------------------------------\")", "title": "" }, { "docid": "2671114fd3ec58d32b4ac975e3910024", "score": "0.50890815", "text": "def recv(self):\n try:\n payload, addr = self.sock.recvfrom(1024)\n except socket.timeout:\n return None, None\n\n if len(payload) == 0:\n return None, addr\n return payload, addr", "title": "" }, { "docid": "5ec8f01574c7c2ecac47fa0bd3b6b0c4", "score": "0.50829846", "text": "def get_fastest_driving_route(self, org, des):\n query_params = {'key': self.API_KEY,\n 'origin': (\"%s,%s\" % (str(org[0]), str(org[1]))),\n 'destination': (\"%s,%s\" % (str(des[0]), str(des[1]))),\n 'mode': 'driving'}\n\n resp = requests.get(self.ROUTING_API_URL, params=query_params)\n resp_json = self._validate_result(resp)\n\n return resp_json['routes'][0]", "title": "" }, { "docid": "a0a57a883cd7cbde71bace7a2e2b9cde", "score": "0.507591", "text": "def move(self, cmd):\n while 1:\n try:\n if not self.is_emergency:\n if not self.manually_operated:\n self.connection.send(chr(cmd))\n else:\n self.connection.send(chr(NetworkVehicle.ODOMETRY))\n\n # Receive and parse odometry.\n msg = self.connection.recv(1)\n while \"\\n\" not in msg:\n msg += self.connection.recv(1)\n values = msg.split(\",\", 3)\n if len(values) == 3:\n self.odometry = [float(tok) for tok in values[:]]\n if self.log:\n self.out.write(\"%f %f %f\\n\" % (self.odometry[0], self.odometry[1], self.odometry[2]))\n return self.odometry\n except socket.timeout:\n print \"NetworkVehicle: Timeout.\"\n continue\n except ValueError:\n print \"NetworkVehicle: ValueError.\"\n continue\n except socket.error, error:\n print \"NetworkVehicle: \", error, \"Return None.\"\n return None", "title": "" }, { "docid": "3dc4d4f0e0694dfd8708e20854b241ac", "score": "0.5070869", "text": "def recv(self) -> Optional[bytes]:\n # We can only receive if a connection has been established\n if self.state is not State.CONN_EST:\n return None\n # Initialize local variables\n data = []\n acked = []\n if 'ACK-lost' in self.temp:\n self.send_recv_ack(self.temp['ACK-lost'], acked, data)\n # The server receives while the client does not disconnect\n while self.state is State.CONN_EST:\n message = self.handle_flow(expected=[Flag.NONE, Flag.FIN])\n if not message:\n continue\n if message['flag'] is Flag.NONE and message['dlen'] > 0:\n self.send_recv_ack(message, acked, data)\n # Accept the disconnect request\n elif message['flag'] is Flag.FIN:\n self.accept_disconnect(message)\n # Acknowledge the probe checking windows size\n else:\n self.acknowledge_post(message, Flag.ACK)\n # Sort the data according to the ACK numbers\n data.sort(key=lambda tup: tup[1])\n # Merge the data bytes into a single object\n return b''.join([d for (d, _) in data])", "title": "" }, { "docid": "a0317fb2a10fb506359552443703f66c", "score": "0.50487286", "text": "def DestinationUnreachableMessagesReceived(self) -> int:", "title": "" }, { "docid": "a0317fb2a10fb506359552443703f66c", "score": "0.50487286", "text": "def DestinationUnreachableMessagesReceived(self) -> int:", "title": "" }, { "docid": "40cc970de2a13a1c92134497d3ad299d", "score": "0.50288075", "text": "def getNeighbouringRouters(self):\n print '%s call getNeighbouringRouters' % self.port\n try:\n routerInfo = []\n routerList = []\n routerList = self.__sendCommand('router list')[0].split()\n print routerList\n\n if 'Done' in routerList:\n print 'no neighbouring routers'\n return None\n\n for index in routerList:\n router = []\n cmd = 'router %s' % index\n router = self.__sendCommand(cmd)\n\n for line in router:\n if 'Done' in line:\n break\n #elif 'Rloc' in line:\n # rloc16 = line.split()[1]\n elif 'Ext Addr' in line:\n eui = line.split()[2]\n routerInfo.append(int(eui, 16))\n #elif 'LQI In' in line:\n # lqi_in = line.split()[1]\n #elif 'LQI Out' in line:\n # lqi_out = line.split()[1]\n else:\n pass\n\n print routerInfo\n return routerInfo\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger(\"getNeighbouringDevice() Error: \" + str(e))", "title": "" }, { "docid": "dff6a7514bf6223b9c74f64fe78db9c0", "score": "0.5027655", "text": "def send_route_update():\n\n\torigin_addr = (UDP_IP, LOCALPORT)\n\t\n\n\t# print \"send_route_update was called \"\n\t# print TIMEOUT \n\tfor client in routing_table.itervalues():\n\n\t\tdistance_vector = {}\n\t\t#To distinguish from the other types of messages\n\t\tdistance_vector[\"header\"] = \"route_update\"\n\t\tdistance_vector[\"origin_addr\"] = origin_addr\n\n\t\t# print distance_vector[\"origin\"]\n\t\t# print \"just printed\"\n\n\t\t#If any of the links will go through this node set their weight to infinity\n\t\t#This is our poison reverse implementation\n\t\tfor ip_addr_port, node in routing_table.items():\n\t\t\tif node.next_hop_ip == client.ip and node.next_hop_port == client.port:\n\t\t\t\tdistance_vector[ip_addr_port] = (node.ip, node.port, float(\"inf\"))\n\t\t\telse:\n\t\t\t\tdistance_vector[ip_addr_port] = (node.ip, node.port, node.weight)\n\n\t\tjson_dist_vector = json.dumps(distance_vector)\n\n\t\t#If the node is not down and is a direct neighbor send distance vector\n\t\tif client.next_hop_ip == client.ip and client.next_hop_port == client.port:\n\t\t\tif client.weight != float(\"inf\"):\n\t\t\t\tsock.sendto(json_dist_vector, (client.ip, int(client.port)))", "title": "" }, { "docid": "0861a7f7a7c6a797516a2db71fdb81f9", "score": "0.50113225", "text": "def get_path(self, sim, app_name, message, topology_src, alloc_DES, alloc_module, traffic,from_des):\n node_src = topology_src\n DES_dst = alloc_module[app_name][message.dst] #returns an array with all DES process serving\n\n\n if message.dst not in self.rr.keys():\n self.rr[message.dst] = 0\n\n\n print (\"GET PATH\")\n print (\"\\tNode _ src (id_topology): %i\" %node_src)\n print (\"\\tRequest service: %s \" %(message.dst))\n print (\"\\tProcess serving that service: %s (pos ID: %i)\" %(DES_dst,self.rr[message.dst]))\n\n bestPath = []\n bestDES = []\n\n for ix,des in enumerate(DES_dst):\n if message.name == \"M.Cam\":\n if self.rr[message.dst]==ix:\n dst_node = alloc_DES[des]\n\n path = list(nx.shortest_path(sim.topology.G, source=node_src, target=dst_node, weight='BW'))\n\n bestPath = [path]\n bestDES = [des]\n\n self.rr[message.dst] = (self.rr[message.dst]+ 1) % len(DES_dst)\n break\n else: #message.name == \"M.B\"\n\n dst_node = alloc_DES[des]\n\n path = list(nx.shortest_path(sim.topology.G, source=node_src, target=dst_node, weight='BW'))\n if message.broadcasting:\n bestPath.append(path)\n bestDES.append(des)\n else:\n bestPath = [path]\n bestDES = [des]\n\n return bestPath, bestDES", "title": "" }, { "docid": "bfbb619c17f83b1cd29fbb3503673384", "score": "0.5009811", "text": "def _get_waypoint(self, req):\n carla_position = carla.Location()\n carla_position.x = req.location.x\n carla_position.y = -req.location.y\n carla_position.z = req.location.z\n\n carla_waypoint = self._map.get_waypoint(carla_position)\n\n response = GetWaypointResponse()\n\n response.waypoint.pose.position.x = carla_waypoint.transform.location.x\n response.waypoint.pose.position.y = -carla_waypoint.transform.location.y\n response.waypoint.pose.position.z = carla_waypoint.transform.location.z\n response.waypoint.is_junction = carla_waypoint.is_junction\n response.waypoint.road_id = carla_waypoint.road_id\n response.waypoint.section_id = carla_waypoint.section_id\n response.waypoint.lane_id = carla_waypoint.lane_id\n rospy.logwarn(\"Get waypoint {}\".format(response.waypoint.pose.position))\n return response", "title": "" }, { "docid": "3740a3b18d2340581dd595948c9a8962", "score": "0.5006898", "text": "def _process_setup(self, meta):\n if not self.fwd:\n # Request on return trip\n if self.accepted:\n meta.state.pend_confirm(self.path_ids[0], self.steady)\n else:\n # Reservation has been rejected by further along the path.\n meta.state.pend_remove(self.path_ids[0], self.steady)\n else:\n # Route packet as normal\n self._process_req(meta)\n return []", "title": "" }, { "docid": "ab86cbf7a41e88db41160b182574a138", "score": "0.49933228", "text": "def read_routes6():\n\n # Call netstat to retrieve IPv6 routes\n fd_netstat = os.popen(\"netstat -rn -f inet6\")\n\n # List interfaces IPv6 addresses\n lifaddr = in6_getifaddr()\n if not lifaddr:\n fd_netstat.close()\n return []\n\n # Routes header information\n got_header = False\n mtu_present = False\n prio_present = False\n\n # Parse the routes\n routes = []\n for line in fd_netstat.readlines():\n\n # Parse the routes header and try to identify extra columns\n if not got_header:\n if \"Destination\" == line[:11]:\n got_header = True\n mtu_present = \"Mtu\" in line\n prio_present = \"Prio\" in line\n continue\n\n # Parse a route entry according to the operating system\n splitted_line = line.split()\n if OPENBSD or NETBSD:\n index = 5 + mtu_present + prio_present\n if len(splitted_line) < index:\n warning(\"Not enough columns in route entry !\")\n continue\n destination, next_hop, flags = splitted_line[:3]\n dev = splitted_line[index]\n else:\n # FREEBSD or DARWIN\n if len(splitted_line) < 4:\n warning(\"Not enough columns in route entry !\")\n continue\n destination, next_hop, flags, dev = splitted_line[:4]\n\n # XXX: TODO: add metrics for unix.py (use -e option on netstat)\n metric = 1\n\n # Check flags\n if \"U\" not in flags: # usable route\n continue\n if \"R\" in flags: # Host or net unreachable\n continue\n if \"m\" in flags: # multicast address\n # Note: multicast routing is handled in Route6.route()\n continue\n\n # Replace link with the default route in next_hop\n if \"link\" in next_hop:\n next_hop = \"::\"\n\n # Default prefix length\n destination_plen = 128\n\n # Extract network interface from the zone id\n if '%' in destination:\n destination, dev = destination.split('%')\n if '/' in dev:\n # Example: fe80::%lo0/64 ; dev = \"lo0/64\"\n dev, destination_plen = dev.split('/')\n if '%' in next_hop:\n next_hop, dev = next_hop.split('%')\n\n # Ensure that the next hop is a valid IPv6 address\n if not in6_isvalid(next_hop):\n # Note: the 'Gateway' column might contain a MAC address\n next_hop = \"::\"\n\n # Modify parsed routing entries\n # Note: these rules are OS specific and may evolve over time\n if destination == \"default\":\n destination, destination_plen = \"::\", 0\n elif '/' in destination:\n # Example: fe80::/10\n destination, destination_plen = destination.split('/')\n if '/' in dev:\n # Example: ff02::%lo0/32 ; dev = \"lo0/32\"\n dev, destination_plen = dev.split('/')\n\n # Check route entries parameters consistency\n if not in6_isvalid(destination):\n warning(\"Invalid destination IPv6 address in route entry !\")\n continue\n try:\n destination_plen = int(destination_plen)\n except Exception:\n warning(\"Invalid IPv6 prefix length in route entry !\")\n continue\n if in6_ismlladdr(destination) or in6_ismnladdr(destination):\n # Note: multicast routing is handled in Route6.route()\n continue\n\n if LOOPBACK_NAME in dev:\n # Handle ::1 separately\n cset = [\"::1\"]\n next_hop = \"::\"\n else:\n # Get possible IPv6 source addresses\n devaddrs = (x for x in lifaddr if x[2] == dev)\n cset = construct_source_candidate_set(destination, destination_plen, devaddrs) # noqa: E501\n\n if len(cset):\n routes.append((destination, destination_plen, next_hop, dev, cset, metric)) # noqa: E501\n\n fd_netstat.close()\n return routes", "title": "" }, { "docid": "755b5e55ffe003ea5cb7e5ded834c33d", "score": "0.49910423", "text": "def get_distance_along_route(wmap, route, target_location):\n\n covered_distance = 0\n prev_position = None\n found = False\n\n # Don't use the input location, use the corresponding wp as location\n target_location_from_wp = wmap.get_waypoint(target_location).transform.location\n\n for trans, _ in route:\n # input route is transform\n position = trans.location\n\n location = target_location_from_wp\n\n # Don't perform any calculations for the first route point\n if not prev_position:\n prev_position = position\n continue\n\n # Calculate distance between previous and current route point\n interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) ** 2)\n distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2)\n\n # Close to the current position? Stop calculation\n if distance_squared < 1.0:\n break\n\n if distance_squared < 400 and not distance_squared < interval_length_squared:\n # Check if a neighbor lane is closer to the route\n # Do this only in a close distance to correct route interval, otherwise the computation load is too high\n starting_wp = wmap.get_waypoint(location)\n wp = starting_wp.get_left_lane()\n while wp is not None:\n new_location = wp.transform.location\n new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (\n (new_location.y - prev_position.y) ** 2)\n\n if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):\n break\n\n if new_distance_squared < distance_squared:\n distance_squared = new_distance_squared\n location = new_location\n else:\n break\n\n wp = wp.get_left_lane()\n\n wp = starting_wp.get_right_lane()\n while wp is not None:\n new_location = wp.transform.location\n new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (\n (new_location.y - prev_position.y) ** 2)\n\n if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):\n break\n\n if new_distance_squared < distance_squared:\n distance_squared = new_distance_squared\n location = new_location\n else:\n break\n\n wp = wp.get_right_lane()\n\n if distance_squared < interval_length_squared:\n # The location could be inside the current route interval, if route/lane ids match\n # Note: This assumes a sufficiently small route interval\n # An alternative is to compare orientations, however, this also does not work for\n # long route intervals\n\n curr_wp = wmap.get_waypoint(position)\n prev_wp = wmap.get_waypoint(prev_position)\n wp = wmap.get_waypoint(location)\n\n if prev_wp and curr_wp and wp:\n if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id:\n # Roads match, now compare the sign of the lane ids\n if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or\n np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)):\n # The location is within the current route interval\n covered_distance += math.sqrt(distance_squared)\n found = True\n break\n\n covered_distance += math.sqrt(interval_length_squared)\n prev_position = position\n\n return covered_distance, found", "title": "" }, { "docid": "41a25b8bf32e366d4a2f0603f95997b5", "score": "0.4982278", "text": "def route_v8(\n self,\n transport_mode: RoutingTransportMode,\n origin: Union[List[float], str],\n destination: Union[List[float], str],\n via: Optional[List[List[float]]] = None,\n departure_time: Optional[str] = None,\n routing_mode: RoutingMode = RoutingMode.fast,\n alternatives: Optional[int] = None,\n avoid: Optional[Dict[str, List[str]]] = None,\n exclude: Optional[Dict[str, List[str]]] = None,\n units: Optional[RoutingMetric] = None,\n lang: Optional[str] = None,\n return_fields: List[RoutingApiReturnField] = [RoutingApiReturnField.polyline],\n span_fields: Optional[List[RoutingApiSpanField]] = None,\n truck: Optional[Dict[str, List[str]]] = None,\n scooter: Optional[Dict[str, str]] = None,\n headers: Optional[dict] = None,\n ) -> Optional[RoutingResponseV8]:\n\n if isinstance(origin, str):\n origin = self._get_coordinates_for_location_name(origin)\n if isinstance(destination, str):\n destination = self._get_coordinates_for_location_name(destination)\n data = {\n \"transportMode\": transport_mode.__str__(),\n \"origin\": str.format(\"{0},{1}\", origin[0], origin[1]),\n \"destination\": str.format(\"{0},{1}\", destination[0], destination[1]),\n \"apiKey\": self._api_key,\n }\n\n via_keys = []\n if via:\n for i, v in enumerate(via):\n key = str.format(\"{0}{1}\", \"via\", i)\n via_keys.append(key)\n data[key] = str.format(\"{0},{1}\", v[0], v[1])\n if departure_time:\n data[\"departureTime\"] = departure_time\n data[\"routingMode\"] = routing_mode.__str__()\n if alternatives:\n data[\"alternatives\"] = alternatives\n if avoid:\n key = list(avoid.keys())[0]\n values = list(avoid.values())[0]\n data[\"avoid\"] = {\n key: \",\".join(values),\n }\n if exclude:\n key = list(avoid.keys())[0]\n values = list(avoid.values())[0]\n data[\"exclude\"] = {\n key: \",\".join(values),\n }\n if units:\n data[\"units\"] = units.__str__()\n if lang:\n data[\"lang\"] = lang\n if return_fields:\n data[\"return\"] = \",\".join([field.__str__() for field in return_fields])\n if span_fields:\n data[\"spans\"] = \",\".join([field.__str__() for field in span_fields])\n if truck:\n key = list(avoid.keys())[0]\n values = list(avoid.values())[0]\n data[\"truck\"] = {\n key: \",\".join(values),\n }\n if scooter:\n data[\"scooter\"] = scooter\n\n response = self.__get(\n self.URL_CALCULATE_ROUTE_V8,\n data,\n \"routes\",\n RoutingResponseV8,\n manipulation_key=\"via\",\n keys_for_manipulation=via_keys,\n )\n return response", "title": "" }, { "docid": "b5ffca657af4e3d50c41e8dda3dc09b2", "score": "0.4981544", "text": "def recv(self):\n try:\n data = self.socket.recv(10000)\n return data\n except:\n return None", "title": "" }, { "docid": "d6df6396838882ae4ce45c847c2d9127", "score": "0.49665096", "text": "def recv_task(self):\n try:\n hdr_len = struct.calcsize('!L')\n hdr = self.connection.recv(hdr_len)\n if len(hdr) == 4:\n msg_len = struct.unpack('!L', hdr)[0]\n msg = self.connection.recv(msg_len)\n data = globals()['decrypt_aes'](msg, self.key)\n return json.loads(data)\n else:\n log(\"{} error: invalid header length\".format(self.recv_task.__name__))\n if not self.connection.recv(hdr_len):\n self.kill()\n except Exception as e:\n e = str(e)\n if \"Errno 104\" in e or \"10054\" in e:\n log(\"{} socket error: SERVER DISCONNECTED GRACEFULLY - {}\".format(self.recv_task.__name__, e))\n self.kill()\n return\n elif \"Errno 32\" in e or \"10052\" in e:\n log(\"{} socket error: SERVER CRASHED OR INTERRUPTED - {}\".format(self.recv_task.__name__, e))\n elif \"Errno 111\" in e or \"10061\" in e:\n log(\"{} socket error: SERVER OFFLINE OR CHANGED PORT - {}\".format(self.recv_task.__name__, e))\n else:\n log(\"{} socket error: SERVER UNKNOWN COMMUNICATION FAILURE - {}\".format(self.recv_task.__name__, e)) \n self.passive()\n #log(\"{} error: {}\".format(self.recv_task.__name__, str(e)))", "title": "" }, { "docid": "a98922376e3ec01a71c3cd30ee84e4bb", "score": "0.4961925", "text": "def GetDirection(restaurant_id,\n google_key = google_key,\n yelp_key = yelp_key,\n verbose = True,\n mode = \"transit\",\n start_location = \"Union Square, New York, NY 10003\",\n start_latitude = None,\n start_longitude = None):\n # Check the validity of input\n assert mode in [\"driving\",\"walking\",\"transit\"], \"Invalid 'mode'!\"\n assert type(restaurant_id) == str, \"The parameter 'restaurant_id' should be a string!\"\n assert type(verbose) == bool, \"The parameter 'verbose' should be a boolean variable!\"\n # Check whether longitude and latitude are speciefied or not specified at the same time\n longlat_input_checker = (start_longitude == None) + (start_longitude == None)\n assert longlat_input_checker != 1, \"Either both or neither of 'stlongitude' and 'latitude' should be specified!\"\n \n # Get the start location if start_latitude and start_longitude are specified\n if longlat_input_checker == 0:\n if start_location != \"Union Square, New York, NY 10003\":\n warnings.warn(\"The parameter 'start_location' is ignored when longitude and latitude are specified.\")\n start_location = str(start_latitude) + \",\" + str(start_longitude)\n \n if mode == \"driving\" and verbose:\n warnings.warn(\"The parameter 'verbose' is ignored when 'mode' is 'driving'.\")\n # Get the destination location\n url = \"https://api.yelp.com/v3/businesses/\" + restaurant_id\n headers = {\"Authorization\":yelp_key}\n rspn_json = requests.request(\"GET\", url = url, headers = headers).json()\n restaurant_location = \", \".join(rspn_json[\"location\"][\"display_address\"])\n \n # Get the direction\n url = \"https://maps.googleapis.com/maps/api/directions/json\"\n params = {\"origin\":start_location,\"destination\":restaurant_location,\"mode\":mode,\"key\":google_key}\n direction_json = requests.request(\"GET\", url = url, params=params).json()\n \n # Initialize a string for store detailed instruction\n direction_str = \"*\" * 100 + \"\\n\"\n \n # start address\n direction_str += \"Starting location:\".ljust(30) + direction_json['routes'][0]['legs'][0]['start_address']\n direction_str += \"\\n\"\n \n # destination address\n direction_str += \"Destination location:\".ljust(30) + direction_json['routes'][0]['legs'][0]['end_address']\n direction_str += \"\\n\"\n \n # distance\n direction_str += \"Total distance:\".ljust(30) + direction_json['routes'][0]['legs'][0]['distance']['text']\n direction_str += \"\\n\"\n direction_str += \"*\" * 100\n direction_str += \"\\n\"\n \n # transportation mode\n direction_str += \"Transportation mode:\".ljust(30) + mode\n direction_str += \"\\n\"\n \n ############\n # duration\n direction_str += \"Total duration:\".ljust(30) + direction_json['routes'][0]['legs'][0]['duration']['text']\n direction_str += \"\\n\"\n \n # store steps, travel_steps is a list of steps\n travel_steps = direction_json['routes'][0]['legs'][0]['steps']\n \n direction_str += \"*\" * 100\n direction_str += \"\\n\"\n \n direction_str += \"Detailed direction to the restaurant: \\n\"\n direction_str += \"\\n\"\n \n # Print instructions\n step_count = 0\n for step in travel_steps:\n step_count += 1\n instruction = re.sub(r'<div[^>]*>','. ',step['html_instructions'])\n instruction = re.sub(r'<[^>]*>','',instruction)\n instruction = re.sub('&nbsp;','',instruction)\n direction_str += \"Step \" + str(step_count) + \": \" + instruction +\" (\" + step['distance']['text'] + ', ' + step['duration'][\"text\"]+\")\"\n direction_str += \"\\n\"\n if step['travel_mode'] == \"WALKING\" and verbose and ('steps' in step.keys()):\n substep_list = step['steps']\n for substep in substep_list:\n if 'html_instructions' in substep.keys():\n sub_instruction = re.sub(r'<div[^>]*>','. ',substep['html_instructions'])\n sub_instruction = re.sub(r'<[^>]*>','',sub_instruction)\n sub_instruction = re.sub('&nbsp;','',sub_instruction)\n direction_str += ' - ' + sub_instruction +\" (\" + substep['distance']['text'] + ', ' + substep['duration'][\"text\"]+\")\"\n direction_str += \"\\n\"\n print(\"\\n\")\n if step['travel_mode'] == \"TRANSIT\" and verbose:\n direction_str += \" - Vehicle:\".ljust(35) + step['transit_details']['line']['vehicle']['name'] + \" \" + step['transit_details']['line']['short_name']\n direction_str += \"\\n\"\n direction_str += \" - Departure stop:\".ljust(35) + step['transit_details']['departure_stop']['name']\n direction_str += \"\\n\"\n direction_str += \" - Arrival stop:\".ljust(35) + step['transit_details']['arrival_stop']['name']\n direction_str += \"\\n\"\n direction_str += \" - Number of stops:\".ljust(35) + str(step['transit_details']['num_stops'])\n direction_str += \"\\n\"\n return(direction_str)", "title": "" }, { "docid": "d60ec56820b447aca2eadbd543b677ab", "score": "0.49593386", "text": "def __receive_response(self):\n # type: () -> tuple\n\n if not self.connected:\n raise Exception(\"Not connected\")\n\n data = b\"\"\n while True:\n try:\n newData = self.sock.recv(4096)\n except:\n return 1, b\"\"\n data = data + newData\n if re.search(b\"\\r\\n\", newData):\n break\n if not self.session_key_set:\n return 0, data.split(b\"\\r\\n\")[0]\n else:\n data = data.split(b\"\\r\\n\")[0]\n data = self.decrypt_with_padding(self.sessionKey, data)[1]\n return 0, data", "title": "" }, { "docid": "d941990fa9b335255093935d107f09f1", "score": "0.495311", "text": "def _find_NN_route_forward(start_location, destinations, route, total_time):\n if len(destinations) == 0:\n return route, total_time\n min_dist = np.inf\n closest = None\n for p in destinations:\n dist = utils.get_duration(start_location, p)\n if dist < min_dist:\n min_dist = dist\n closest = p\n print(\"Found new point in route\")\n route.append(closest)\n total_time += min_dist\n return _find_NN_route_forward(closest, [p for p in destinations if p != closest], route, total_time)", "title": "" }, { "docid": "d941990fa9b335255093935d107f09f1", "score": "0.495311", "text": "def _find_NN_route_forward(start_location, destinations, route, total_time):\n if len(destinations) == 0:\n return route, total_time\n min_dist = np.inf\n closest = None\n for p in destinations:\n dist = utils.get_duration(start_location, p)\n if dist < min_dist:\n min_dist = dist\n closest = p\n print(\"Found new point in route\")\n route.append(closest)\n total_time += min_dist\n return _find_NN_route_forward(closest, [p for p in destinations if p != closest], route, total_time)", "title": "" }, { "docid": "3ac01d638c7482748958c0fa06229078", "score": "0.49471426", "text": "def do_ROUT(self, data, (host, port)):\n\n try:\n peeledData = self.process_sphinx_packet(data)\n except Exception, e:\n print \"[%s] > ERROR during packet processing.\" % self.name\n else:\n if peeledData:\n (tag, info, (header, body)) = peeledData\n #routing_flag, meta_info = PFdecode(self.params, info)\n routing = PFdecode(self.params, info)\n if routing[0] == Relay_flag:\n routing_flag, meta_info = routing\n next_addr, dropFlag, typeFlag, delay, next_name = meta_info\n if dropFlag:\n print \"[%s] > Drop message.\" % self.name\n else:\n if next_name in self.clientList:\n if dropFlag:\n print \"[%s] > Drop message.\" % self.name\n else:\n self.saveInStorage(next_name, petlib.pack.encode((header, body)))\n # print \"[%s] > Message saved in storage.\" % self.name\n self.bProcessed += 1 \n else:\n if typeFlag == 'P':\n self.pProcessed += 1\n try:\n reactor.callFromThread(self.send_or_delay, delay, \"ROUT\" + petlib.pack.encode((header, body)), next_addr)\n self.bProcessed += 1\n # print \"[%s] > Message forwarded.\" % self.name\n except Exception, e:\n print \"ERROR during message processing\", str(e)\n elif routing[0] == Dest_flag:\n dest, message = receive_forward(self.params, body)\n if dest[-1] == self.name:\n if message.startswith('HT'):\n # print \"[%s] > Heartbeat looped back\" % self.name\n pass\n if message.startswith('TAG'):\n # print \"[%s] > Tagged message received\" % self.name\n self.measureLatency(message)\n self.bProcessed += 1\n else:\n raise Exception(\"Destination did not match\")", "title": "" }, { "docid": "4b523c506bae866a879b66f35af82ce5", "score": "0.49428907", "text": "def GetRouteFromOsrm(self, start, end):\r\n\r\n print 'Getting route...'\r\n urlRequest = self.RouteRequestString.format(start.AddressInfo.Longitude, start.AddressInfo.Latitude, end.AddressInfo.Longitude, end.AddressInfo.Latitude)\r\n response = requests.get(urlRequest)\r\n content = response.content \r\n jsonData = content.decode('utf8').replace(\"'\", '\"')\r\n print 'Route recieved.'\r\n # Load the JSON to a Python list & dump it back out as formatted JSON\r\n data = json.loads(jsonData)\r\n return self.GetRouteFromJson(data)", "title": "" }, { "docid": "b9386624b9756cff5f9c64f9db5e8f23", "score": "0.4936408", "text": "def main(self, time, communication, neighbors, shared):\n \n # Update neighbors\n self.neighbors = neighbors\n self.communication = communication\n \n #Route table maintenace\n #for entry in self.route_table.keys():\n #self.route_lifetimes[entry] -= 1\n #if self.route_lifetimes[entry] <= 0:\n #print(\"Route for %d has expired.\" % (entry))\n #del self.route_table[entry]\n #del self.route_lifetimes[entry]\n \n # Last RREQ/RREP memory maintenance\n if self.last_rreq_time > 0:\n self.last_rreq_time -= 1\n else:\n self.last_rreq_dest = None\n if self.last_rrep_time > 0:\n self.last_rrep_time -= 1\n else:\n self.last_rrep_dest = None\n \n \n #print(\"main for\", self.__node_id)\n if self.__node_id == 0:\n #if self.__node_id == 0:\n #print(\"S: %d->ALL\" % self.__node_id)\n #communication.send('hi')\n # Repeatedly send hi messages out\n if None not in self.route_table.values():\n buff_occupied = False\n for i in range(len(self.buff)):\n if self.buff[i] is not None:\n buff_occupied = True\n self.dispatch_msg(self.__node_id, i, self.buff[i])\n break\n print (\"Sending from buffer '%s' to %d\" % (self.buff[i], i))\n self.buff[i] = None\n if not buff_occupied:\n target = random.randint(1,7)\n message = \"hi\"\n print (\"Sending '%s' to %d\" % (message, target))\n self.dispatch_msg(self.__node_id, target, message)\n while True:\n #print (\"%d's neighbors:\" % self.__node_id, neighbors)\n msg = communication.receive()\n if msg is None:\n break\n #print ('===== R: %d<-%d ====='\n #% (self.__node_id, msg[0]))\n self.handle_msg(msg[0], msg[1])", "title": "" }, { "docid": "537e031550f4d7f1ab0fdd426a7b6f1a", "score": "0.4925848", "text": "def receive_from_server():\n global alive, data\n\n if me_position != str(fps_camera.position) or me_rotation != str(fps_camera.rotation):\n send_and_create_position_rotation_message()\n\n r_list, w_list, e_list = select.select([player_socket], [player_socket], [])\n for sock in r_list:\n length1 = player_socket.recv(2).decode()\n try:\n data = sock.recv(int(length1)).decode()\n except ValueError:\n print(\"error - ValueError\")\n quit()\n\n if \"|\" not in data:\n print(data)\n\n if \"|\" in data: # 0,0,0|0,0,0\n handle_position_rotation()\n\n elif \"Game restart\" in data: # (Game restarts in 5) or (Game restarted)\n handle_game_restart()\n\n elif \"shoot\" in data: # shoot\n enemy_fire_sound.play()\n\n elif \" killed \" in data: # me killed name\n handle_killed()\n\n elif \"*\" in data: # me*health\n handle_health()\n\n elif \"/\" in data: # me/kills\n kills_txt.text = data.split(\"/\")[1]\n\n elif \" respawned\" in data: # name respawned\n handle_respawned()\n\n elif \" won!\" in data: # name won!\n handle_won()\n\n elif \" joined!\" in data: # name joined!\n handle_joined()\n\n elif \" left!\" in data: # name left!\n handle_left()\n\n elif \" - \" in data: # name - kills\n handle_most_kills_player()", "title": "" }, { "docid": "5cf552a63dea107cdc1fe3ddadc49c41", "score": "0.4923247", "text": "async def get_lldp_neighbors(self):\n\n # Display info message\n log.info(\"get_lldp_neighbors\")\n\n # By default nothing is returned\n returned_output = {}\n\n # Send a command\n output = await self.send_command(self.cmd_get_lldp_neighbors)\n\n # Display info message\n log.info(f\"get_lldp_neighbors:\\n'{output}'\")\n\n # Convert a string into a list of strings\n lines = output.splitlines()\n\n # Read each line\n for line in lines:\n\n # Default value for local interface (no interface)\n local_interface = None\n\n # Initialize potential LLDP data with default values\n chassis_id = \"\"\n port_id = \"\"\n ttl = None\n port_description = \"\"\n system_name = \"\"\n system_description = \"\"\n system_capabilities = []\n management_address = \"\"\n\n # Get local interface\n if \" interface=\" in line:\n local_interface = line.split(\" interface=\")[-1].split()[0].split(\",\")[0]\n\n # Display info message\n log.info(f\"get_lldp_neighbors: local_interface: {local_interface}\")\n\n # Get Chassis ID - TLV type 1\n if \" mac-address=\" in line:\n chassis_id = line.split(\" mac-address=\")[-1].split()[0]\n\n # Convert the MAC address of the Chassis ID into a lower case string\n chassis_id = chassis_id.lower()\n\n # Display info message\n log.info(f\"get_lldp_neighbors: chassis_id: {chassis_id}\")\n\n # Get Port ID - TLV type 2\n if \" interface-name=\" in line:\n port_id = (\n line.split(\" interface-name=\")[-1].split(\"=\")[0].rsplit(\" \", 1)[0]\n )\n\n # Display info message\n log.info(f\"get_lldp_neighbors: port_id: {port_id}\")\n\n # Get Time To Live - TLV type 3\n # Not available on RouterOS. \"age\" parameter is a decreasing counter\n\n # Get Port description - TLV type 4\n # Not available on RouterOS.\n\n # Get System name - TLV type 5\n if \" identity=\" in line:\n system_name = line.split(\" identity=\")[-1].split()[0]\n\n # Check if return value is a string \"\" (just double quotes which means empty data)\n if system_name == '\"\"':\n\n # Yes, empty string\n system_name = \"\"\n\n # Display info message\n log.info(f\"get_lldp_neighbors: system_name: {system_name}\")\n\n # Get System description - TLV type 6\n if \" system-description=\" in line:\n system_description = (\n line.split(\" system-description=\")[-1]\n .split(\"=\")[0]\n .rsplit(\" \", 1)[0]\n )\n\n # Display info message\n log.info(\n f\"get_lldp_neighbors: system_description: {system_description}\"\n )\n\n # Get System capabilities - TLV type 7\n if \" system-caps=\" in line:\n\n # First get the capablities as a string separated by commas\n # e.g.: 'bridge,wlan-ap,router,station-only'\n string_capability = line.split(\" system-caps=\")[-1].split()[0]\n\n # Then convert them into a list of characters\n # Code\tCapability\n # B\t Bridge (Switch)\n # C\t DOCSIS Cable Device\n # O\t Other\n # P\t Repeater\n # R\t Router\n # S\t Station\n # T\t Telephone\n # W\t WLAN Access Point\n\n # Read each capability\n for capability in string_capability.split(\",\"):\n\n # Check if string is not null\n if len(capability) > 0:\n\n # Get the first letter of the capability, convert this character in uppercase\n # and add it to a list\n system_capabilities.append(capability[0].upper())\n\n # Display info message\n log.info(\n f\"get_lldp_neighbors: system_capabilities: {system_capabilities}\"\n )\n\n # Get Management address - TLV type 8\n if \" address=\" in line:\n management_address = line.split(\" address=\")[-1].split()[0]\n\n # LLDP TLV Type 9 to 127 are currently not supported by this method\n\n # Check if data can be considered as LLDP\n if local_interface and (\n port_id or system_name or system_description or management_address\n ):\n\n # Probably LLDP\n\n # Create a dictionary\n returned_dict = {\n \"chassis_id\": chassis_id,\n \"port_id\": port_id,\n \"ttl\": ttl,\n \"port_description\": port_description,\n \"system_name\": system_name,\n \"system_description\": system_description,\n \"system_capabilities\": system_capabilities,\n \"management_address\": management_address,\n }\n\n # Add the information to the dict\n # Each interface can get several returned_dict in a list\n returned_output[local_interface] = returned_output.get(\n local_interface, []\n ) + [returned_dict]\n\n # Return data\n return returned_output", "title": "" }, { "docid": "9db1ed57be6378c15ecb3947a0625f8c", "score": "0.49223483", "text": "def recv(self, bufsize: int) -> bytes:\n data = None\n recv, addr = super().recvfrom(2048)\n recv = RDTSegment.parse(recv)\n data = recv.payload\n print(data)\n #############################################################################\n # TODO: YOUR CODE HERE #\n #############################################################################\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return data", "title": "" }, { "docid": "bf15c0614d1d930ac6fc5950ec2749cd", "score": "0.4922163", "text": "def _navigate(self, menu, **kwargs):\n # Get dest_submenu arg\n if menu is None:\n raise InstrumentProtocolException('Menu parameter missing')\n result = (None, None) # base case in case of empty directions list\n\n # iterate through the directions\n directions_list = self._menu.get_directions(menu)\n for directions in directions_list:\n log.debug('_navigate: directions: %s', directions)\n command = directions.get_command()\n response = directions.get_response()\n timeout = directions.get_timeout()\n result = self._do_cmd_resp(command, expected_prompt=response,\n timeout=timeout, **kwargs)\n return result", "title": "" }, { "docid": "19515587febf1f72e461bb999b845ccf", "score": "0.49081016", "text": "def receive_pdu(self) -> PDU:", "title": "" }, { "docid": "02842f4017563338715a68b453b7d37e", "score": "0.49035218", "text": "def calc_receive(worldRef, inputDirection = None):\n #keep track of which keeper is currently the possessing keeper.\n #this way, you don't try to pass to yourself.\n posessingKeeperIndex = None\n for keeper in worldRef.keeperArray:\n if keeper.inPosession == True:\n posessingKeeperIndex = keeper.agentListIndex\n break\n \n #you're either calculating a hypothetical pass, or the actual pass\n if inputDirection == None:\n print(\"calc recieve for ball. \")\n if(worldRef.fieldBall.trueBallDirection == (0.0, 0.0)):\n print(\"ball is stationary\") \n else:\n print(\"ball is moving\") \n inputDirection = worldRef.fieldBall.trueBallDirection\n \n if(inputDirection == (0.0, 0.0))== False:\n rDecision = __calc_receive_ball_moving(worldRef, inputDirection, posessingKeeperIndex)\n else:\n mimimum = 99999.0\n argmin = None\n for i in range(len(worldRef.keeperArray)):\n temp = kUtil.getDist(worldRef.fieldBall.trueBallPos, worldRef.keeperTruePosArray[i])\n if (temp < mimimum and i != posessingKeeperIndex):\n mimimum = temp\n argmin = i\n rDecision = [argmin, worldRef.fieldBall.trueBallPos]\n return rDecision", "title": "" }, { "docid": "a48e781bfb2e2c3da047079d54669d3e", "score": "0.4901891", "text": "def get_directions():\n return [(1, 0), (0, 1), (-1, 0), (0, -1)]", "title": "" }, { "docid": "a48e781bfb2e2c3da047079d54669d3e", "score": "0.4901891", "text": "def get_directions():\n return [(1, 0), (0, 1), (-1, 0), (0, -1)]", "title": "" }, { "docid": "233ce4d821e6363f8f633a4ebcb5af52", "score": "0.4898557", "text": "def receive_data(self):\n chunks = []\n bytes_recd = 0\n try:\n while bytes_recd < 8:\n #I'm reading my data in byte chunks\n chunk = self.sockfd.recv(min(8 - bytes_recd, 4))\n if chunk == '':\n raise RuntimeError(\"Socket connection broken\")\n chunks.append(chunk)\n bytes_recd = bytes_recd + len(chunk)\n except:\n # self.DA_reprog()\n # raise RuntimeError(\"Socket connection broken\")\n # self.sockfd.settimeout(1)\n self.connect()\n self.init_tcp()\n return -1, -1\n stat_tuple = struct.unpack('L', chunks[0])\n data_tuple = struct.unpack('L', chunks[1])\n stat = stat_tuple[0]\n data = data_tuple[0]\n return stat, data", "title": "" }, { "docid": "4a2c322e26a2bb56108ed290b37e9b17", "score": "0.48973247", "text": "def _receive_response(self, timeout=3):\n self._serial.setTimeout(timeout)\n bytes_ = self._serial.read(size=12)\n\n if len(bytes_) < 12:\n if self.debug:\n print('read:')\n return None\n \n response = ResponsePacket(bytes_)\n if self.debug:\n print('read:', response.serialize_bytes(is_little_endian=self.is_little_endian))\n if not response.ack:\n print('ERROR:', response.error)\n return response", "title": "" }, { "docid": "2963a858a59de6ec6f36515e84658d65", "score": "0.48933783", "text": "def _wait_for_response(self, node, type_, fail_type, following=None, count=None, timeout=None):\n if timeout is None:\n timeout = 0\n timer = time.time() + timeout\n while True:\n for msg in self._message_queue:\n if 'source_addr_long' in msg.keys() and msg['source_addr_long'] == node.long_addr:\n # Message is from target node\n if len(msg['rf_data']) > 0 and msg['rf_data'][0] == type_.value:\n if following is None and count is None:\n self._message_queue.remove(msg)\n return msg['rf_data'][1:]\n # Just following\n elif count is None and len(msg['rf_data']) > len(following) and msg['rf_data'][1:len(following) + 1] == following:\n self._message_queue.remove(msg)\n return msg['rf_data'][1:]\n # Just count\n elif following is None and len(msg['rf_data']) == count:\n self._message_queue.remove(msg)\n return msg['rf_data'][1:]\n # Following AND count\n elif len(msg['rf_data']) == count and len(msg['rf_data']) > 1 + len(following) and msg['rf_data'][1:len(following) + 1] == following:\n self._message_queue.remove(msg)\n return msg['rf_data'][1:]\n # Check for NACK of same type\n if len(msg['rf_data']) == 2 and msg['rf_data'][0] == Packet.CTRL_NACK.value and msg['rf_data'][1] == fail_type.value:\n self._message_queue.remove(msg)\n raise ProtocolError(\"Node returned NACK, did you ask for something which doesn't exist?\")\n if timeout > 0 and time.time() > timer:\n raise ProtocolError('Did not recieve any data from node.')", "title": "" }, { "docid": "8979d0660df0ebeff554dbd83c3cb078", "score": "0.4889615", "text": "def receive() -> Optional[int]:\n ...", "title": "" }, { "docid": "471a0c178c993862eb998127f7a5e21a", "score": "0.4872209", "text": "def getJsonOby(self):\r\n try:\r\n while True:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.bind((\"\", 50000)) \r\n\r\n daten, addr = s.recvfrom(2024)\r\n nachricht = json.loads(daten)\r\n \r\n self.obs.extend( nachricht[\"Obstacles\"])\r\n self.path = nachricht[\"Path\"]\r\n self.solved_path1 = nachricht[\"Solved_path\"]\r\n finally: \r\n s.close()\r\n return()", "title": "" }, { "docid": "9cf60220fcccf9deaec343c7c52f355c", "score": "0.48652622", "text": "def rcv():\n result = ws.recv()\n return result", "title": "" }, { "docid": "2598a6367ecb0f60b3c61695fcd790da", "score": "0.48649973", "text": "def routine_online(self):\n rospy.loginfo(\"----------- DEBUT ROUTINE ONLINE--------------------------------\")\n self.goal_online = speechToTextPalbator.msg.SttOnlineGoal()\n rospy.loginfo(\"Sending goal to online ...\")\n order={\n 'order': self.index,\n 'action': self.currentAction\n }\n json_in_str=js.dumps(order)\n self.goal_online.order=json_in_str\n self.action_online_client.send_goal(self.goal_online)\n cp=0\n while self.action_online_client.get_result() is None and not rospy.is_shutdown():\n if cp==20 or self.connection_ON==False:\n self.action_online_client.cancel_all_goals()\n self.tts_action('Switching to offline mode')\n break\n elif self.event_touch == True:\n self.action_online_client.cancel_all_goals()\n break\n\n rospy.loginfo(\"Waiting for online detect ....\")\n cp=cp+1\n self.socketIO.wait(seconds=0.1)\n\n rospy.loginfo(str(self.action_online_client.get_result()))\n if not self.action_online_client.get_result() is None and self.action_online_client.get_result().stt_result != '':\n self.dataToUse=str(self.action_online_client.get_result().stt_result)\n elif self.event_touch==True:\n self.event_touch = False\n rospy.logwarn(\"EVENT TOUCH \"+str(self.event_touch))\n else:\n self.enable_changing_connection=False\n self.routine_offline()\n self.enable_changing_connection=True\n\n rospy.loginfo(\"----------- FIN ROUTINE ONLINE--------------------------------\")", "title": "" }, { "docid": "ad9dd84696819bdf29207fb17ebce5f3", "score": "0.48599744", "text": "def possible_moves_from(location):\n\n directions = [\"left\",\"right\",\"up\",\"down\"] #possible directions are added\n legal_directions = [] # to legal_directions list\n if at(location) == \"M\" or at(location) == \"R\":\n for i in directions:\n if is_legal_move(location, i):\n legal_directions.append(i)\n \n return legal_directions\n \n else: return []", "title": "" }, { "docid": "a018f1729d35160d28eee7b2e64187a0", "score": "0.4857745", "text": "def receive(self):\n try:\n size = self.ysrcv.oneint(self.s.recv(4))[0]\n type = self.ysrcv.oneint(self.s.recv(4))[0]\n logger.debug(\"size \" + str(size) + \" type \" + str(type))\n except:\n logger.debug(\"Receive failure 1\")\n return (0, 0,\"\")\n try:\n return (size, type, self.s.recv(size-4))\n except:\n logger.debug(\"Receive failure 2\")\n return (size, 0,\"\")", "title": "" }, { "docid": "3124c10f62932105bac271738d77c2b1", "score": "0.4850622", "text": "def test_drivingDirections_assumptions():\n\n moz_emu = PyMozilla.MozillaEmulator(cacher=None, trycount=0)\n web_page = moz_emu.download('http://maps.google.com/maps?f=d&source=s_d&hl=en&geocode=&saddr=mesquite%2C+tx&daddr=philadelphia%2C+pa&btnG=Get+Directions&output=js')\n distance = web_page.split('timedist')[1].split(';')[0].split('\\\\')[-2].split('e')[1].replace(',', '')\n nose.tools.assert_equal('1460', distance, 'Did not parse distance #')\n unit = web_page.split('timedist')[1].split(';')[1].split('\\\\')[0].strip()\n nose.tools.assert_equal('mi', unit, 'Did not parse distance unit')", "title": "" }, { "docid": "970a5cc6f5d6433089254735018ea95b", "score": "0.48503062", "text": "def possible_moves_from(location):\r\n directions = [\"up\", \"right\", \"down\", \"left\"]\r\n right_directions = []\r\n for i in directions:\r\n if is_legal_move(location, i):\r\n right_directions.append(i)\r\n return right_directions", "title": "" }, { "docid": "e9e53e2cda806d5ef280f2901b17c83a", "score": "0.48436964", "text": "def test_find_routes_src_dest(self):\n\n stop_data_list = []\n with open(self.stop_data_file1, \"r\") as stop_file1:\n stop_data = json.load(stop_file1)\n stop_data_list.append(stop_data)\n with open(self.stop_data_file2, \"r\") as stop_file2:\n stop_data = json.load(stop_file2)\n stop_data_list.append(stop_data)\n with open(self.stop_data_file3, \"r\") as stop_file3:\n stop_data = json.load(stop_file3)\n stop_data_list.append(stop_data)\n\n self.my_data_reader.get_total_stops(stop_data_list)\n\n banned_stops = set()\n needed_route_list = self.my_data_reader.find_src_to_dest(\"Alewife\", \"Central\", banned_stops)\n self.assertEqual(needed_route_list[0], \"Red\")\n print(\"Testcase passed. From 'Alewife' to 'Central' stop, the needed route \"\n + \"is Red.\")\n\n needed_route_list = self.my_data_reader.find_src_to_dest(\"Kendall/MIT\", \"Downtown Crossing\", banned_stops)\n self.assertEqual(needed_route_list[0], \"Mattapan\")\n self.assertEqual(needed_route_list[1], \"Orange\")\n print(\"Testcase passed. From 'Kendall/MIT' to 'Downtown Crossing' stop, \"\n + \"the needed routes are Mattapan, then Orange.\")\n\n needed_route_list = self.my_data_reader.find_src_to_dest(\"Alewife\", \"Downtown Crossing\", banned_stops)\n self.assertEqual(needed_route_list[0], \"Red\")\n self.assertEqual(needed_route_list[1], \"Mattapan\")\n self.assertEqual(needed_route_list[2], \"Orange\")\n print(\"Testcase passed. From 'Alewife' to 'Downtown Crossing' stop, the \"\n + \"needed routes are Red, then Mattapan, then Orange.\")", "title": "" }, { "docid": "0b4411130e52de9e43a02fb4230f5c5d", "score": "0.48432806", "text": "def get_distances(node):\n dis = np.zeros(n_recvs)\n # d2d\n for t_index, cluster in self.devices.items():\n t_device = cluster['t_device']\n delta_x, delta_y = t_device.x - node.x, t_device.y - node.y\n distance = np.sqrt(delta_x**2 + delta_y**2)\n dis[t_index*m_r: t_index*m_r+m_r] = distance\n # bs\n delta_x, delta_y = self.station.x - node.x, self.station.y - node.y\n distance = np.sqrt(delta_x**2 + delta_y**2)\n dis[n_r_devices:] = distance # 已经有n_r_devices个信道了\n return dis", "title": "" }, { "docid": "c5848e830fbeaf774d4d42d18544e4b0", "score": "0.48390016", "text": "def OutputPacketRoutingDiscards(self) -> int:", "title": "" }, { "docid": "70ae85b098cd3cf98584d99a1642dc87", "score": "0.48343644", "text": "def handle_get_successor(req):\n\tglobal mazeInfo,num_fuel_stations\n\taction_list = []\n\tdirection_list = [\"NORTH\", \"NORTH_EAST\", \"EAST\", \"SOUTH_EAST\", \"SOUTH\", \"SOUTH_WEST\", \"WEST\", \"NORTH_WEST\"]\n\tstate_x = []\n\tstate_y = []\n\tstate_direction = []\n\tstate_cost = []\n\tstate_battery = []\n\tx_cord, y_cord, direction,battery = req.x, req.y, req.orientation,req.battery\n\tnearby_clearance = 0.1\n\trefueling_cost = 1000\n\tfuel_stations = mazeInfo[-1]\n\tFULL_BATTERY_CAPACITY = 10\n\n\tif battery>0:\n\t\taction_list = [\"TurnCW\", \"TurnCCW\", \"MoveB\", \"MoveF\"]\n\t\t\n\tif num_fuel_stations!=len(fuel_stations):\n\t\tprint(\"Something is wrong!\")\n\t##Check if state is eligible for refueling station\n\tfor (fx,fy) in fuel_stations:\n\t\tif manhattanDistance(fx,fy,req.x,req.y)<=nearby_clearance:\n\t\t\tprint(\"Robot is near fuelling station!\")\n\t\t\taction_list.append(\"REFUEL\")\n\t\t\tbreak\n\n\n\tfor action in action_list:\n\t\t#Checking requested action and making changes in states\n\t\tx_cord, y_cord, direction,battery = req.x, req.y, req.orientation,req.battery\n\t\tif action == 'REFUEL':\n\t\t\tx_cord = req.x\n\t\t\ty_cord = req.y\n\t\t\tg_cost = refueling_cost\n\t\t\tbattery=FULL_BATTERY_CAPACITY\n\n\t\telif action == 'TurnCW':\n\t\t\tindex = direction_list.index(req.orientation)\n\t\t\tdirection = direction_list[(index+1)%len(direction_list)]\n\t\t\tg_cost = 2\n\t\t\tbattery-=1\n\n\t\telif action == 'TurnCCW':\n\t\t\tindex = direction_list.index(req.orientation)\n\t\t\tdirection = direction_list[(index-1)%len(direction_list)]\n\t\t\tg_cost = 2\n\t\t\tbattery-=1\n\n\t\telif action == 'MoveF':\n\t\t\tif direction == \"NORTH\" or direction == \"NORTH_EAST\" or direction == \"NORTH_WEST\":\n\t\t\t\ty_cord += 0.5\n\t\t\tif direction == \"EAST\" or direction == \"NORTH_EAST\" or direction == \"SOUTH_EAST\":\n\t\t\t\tx_cord += 0.5\n\t\t\tif direction == \"SOUTH\" or direction == \"SOUTH_EAST\" or direction == \"SOUTH_WEST\":\n\t\t\t\ty_cord -= 0.5\n\t\t\tif direction == \"WEST\" or direction == \"NORTH_WEST\" or direction == \"SOUTH_WEST\":\n\t\t\t\tx_cord -= 0.5\n\t\t\tg_cost = 1\n\t\t\tbattery-=1\n\n\t\telif action == 'MoveB':\n\t\t\tif direction == \"NORTH\" or direction == \"NORTH_EAST\" or direction == \"NORTH_WEST\":\n\t\t\t\ty_cord -= 0.5\n\t\t\tif direction == \"EAST\" or direction == \"NORTH_EAST\" or direction == \"SOUTH_EAST\":\n\t\t\t\tx_cord -= 0.5\n\t\t\tif direction == \"SOUTH\" or direction == \"SOUTH_EAST\" or direction == \"SOUTH_WEST\":\n\t\t\t\ty_cord += 0.5\n\t\t\tif direction == \"WEST\" or direction == \"NORTH_WEST\" or direction == \"SOUTH_WEST\":\n\t\t\t\tx_cord += 0.5\n\t\t\tg_cost = 3\n\t\t\tbattery-=1\n\t\tif req.x <= x_cord and req.y <= y_cord:\n\t\t\tisValidEdge = check_is_edge((req.x, req.y, x_cord, y_cord), \"changedValuesLater\")\n\t\telif req.x > x_cord and req.y > y_cord:\n\t\t\tisValidEdge = check_is_edge((x_cord, y_cord, req.x, req.y), \"changedValuesBefore\")\n\t\telif req.x < x_cord and req.y > y_cord:\n\t\t\tisValidEdge = check_is_edge((x_cord, y_cord, req.x, req.y), \"changedValuesBefore\")\n\t\telif req.x > x_cord and req.y < y_cord:\n\t\t\tisValidEdge = check_is_edge((req.x, req.y, x_cord, y_cord), \"changedValuesLater\")\n\n\t\tif not isValidEdge:\n\t\t\tstate_x.append(-1)\n\t\t\tstate_y.append(-1)\n\t\t\tstate_direction.append(direction)\n\t\t\tstate_cost.append(-1)\n\t\t\tstate_battery.append(battery) #Since bot can't move on invalid edge, assume that no battery is exhausted.\n\t\telse:\n\t\t\tstate_x.append(x_cord)\n\t\t\tstate_y.append(y_cord)\n\t\t\tstate_battery.append(battery)\n\t\t\tstate_direction.append(direction)\n\t\t\tstate_cost.append(g_cost)\n\t\tif len(action_list) == 0:\n\t\t\tstate_x = []\n\t\t\tstate_y = []\n\t\t\tstate_direction = []\n\t\t\tstate_battery = []\n\t\t\tstate_cost.append(1000000)\n\n\n\treturn GetSuccessorResponse(state_x, state_y, state_direction, state_battery, state_cost, action_list)", "title": "" }, { "docid": "bfb0b390b7a8a16214e5b74b94f207e6", "score": "0.48247355", "text": "def getRoute(self, gIn, connID, returnout):\n returnout.setdefault('hasRoute', {})\n routeout = returnout['hasRoute'].setdefault(str(connID), {})\n if str(connID) in self.scannedRoutes:\n return str(connID)\n for rtype in ['nextHop', 'routeFrom', 'routeTo']:\n out = self.queryGraph(gIn, connID, search=URIRef(f\"{self.prefixes['mrs']}{rtype}\"))\n for item in out:\n mrstypes = self.queryGraph(gIn, item, search=URIRef(f\"{self.prefixes['mrs']}{'type'}\"))\n mrsvals = self.queryGraph(gIn, item, search=URIRef(f\"{self.prefixes['mrs']}{'value'}\"))\n if mrstypes and mrsvals and len(mrstypes) == len(mrsvals):\n for index, mrtype in enumerate(mrstypes):\n if mrtype and mrsvals[index]:\n routeVals = routeout.setdefault(rtype, {}).setdefault(str(mrtype), {})\n routeVals['type'] = str(mrtype)\n routeVals['value'] = str(mrsvals[index])\n routeVals['key'] = str(item)\n else:\n self.logger.warning(f'Either val or type not defined. Key: {str(item)}, Type: {mrstypes}, Val: {mrsvals}')\n self.scannedRoutes.append(str(connID))\n return \"\"", "title": "" }, { "docid": "afc7954b21aa66df17f78c643cc5e272", "score": "0.4821728", "text": "def _recv(self, timeout=0):\n flag, msg = super(AsciiFileComm, self)._recv()\n if self.read_meth == 'readline':\n while flag and msg.startswith(self.comment):\n flag, msg = super(AsciiFileComm, self)._recv()\n return flag, msg", "title": "" }, { "docid": "61afcd5027540f04594b3f149b1d1387", "score": "0.48213786", "text": "def cb_srv_robot_turn(self, request, response) -> Turn:\n\n try:\n\n for idx, turn_ref in enumerate(request.turn_ref[:-1]):\n\n if self._TURN_PRINT_WAYPOINT:\n printlog(msg=turn_ref, msg_type=\"INFO\")\n\n self.status.speed = 0.0\n self.status.yaw += (\n request.turn_ref[idx + 1].yaw - request.turn_ref[idx].yaw\n )\n self.status.time += turn_ref.dt\n self.status.moving = True\n\n if self.status.yaw >= 360:\n self.status.yaw += -360\n\n self.pub_bot_status.publish(self.status)\n\n time.sleep(turn_ref.dt)\n\n self.status.moving = False\n self.pub_bot_status.publish(self.status)\n response.completed = True\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n printlog(\n msg=\"{}, {}, {}, {}\".format(e, exc_type, fname, exc_tb.tb_lineno),\n msg_type=\"ERROR\",\n )\n response.completed = False\n\n return response", "title": "" }, { "docid": "65f9b1d40784b33c2b46a39cc2b56624", "score": "0.48129413", "text": "async def wait_neighbours(self, remote: NodeAPI) -> Tuple[NodeAPI, ...]:\n neighbours: List[NodeAPI] = []\n send_chan, recv_chan = trio.open_memory_channel[List[NodeAPI]](1)\n with trio.move_on_after(constants.KADEMLIA_REQUEST_TIMEOUT) as cancel_scope:\n # Responses to a FIND_NODE request are usually split between multiple\n # NEIGHBOURS packets, so we may have to read from the channel multiple times.\n gen = self.neighbours_channels.receive(remote, send_chan, recv_chan)\n # mypy thinks wrapping our generator turns it into something else, so ignore.\n async with aclosing(gen): # type: ignore\n async for batch in gen:\n self.logger.debug2(\n 'got expected neighbours response from %s: %s', remote, batch)\n neighbours.extend(batch)\n if len(neighbours) >= constants.KADEMLIA_BUCKET_SIZE:\n break\n self.logger.debug2('got expected neighbours response from %s', remote)\n if cancel_scope.cancelled_caught:\n self.logger.debug2(\n 'timed out waiting for %d neighbours from %s, got only %d',\n constants.KADEMLIA_BUCKET_SIZE,\n remote,\n len(neighbours),\n )\n return tuple(n for n in neighbours if n != self.this_node)", "title": "" }, { "docid": "ce25fb2cfd0bae5843ddf307d1c23824", "score": "0.48115924", "text": "def pass_route():\n raise ContinueRoutingException()", "title": "" }, { "docid": "81798c282c0c8fff11c2017b94841f69", "score": "0.4808591", "text": "def __handle_server_request(self):\n\n num, msg = Protocol.recv_all(self.socket_to_server)\n if num == Protocol.NetworkErrorCodes.FAILURE:\n sys.stderr.write(msg)\n self.close_client(1)\n\n\t\t# the connection disconnected\n if num == Protocol.NetworkErrorCodes.DISCONNECTED:\n self.bye('disconnected')\n\n if \"start\" in msg:\n self.__start_game(msg)\n\n\n elif msg:\n (who, what, data) = msg.split(':')\n\n\t\t\t# the othe side left\n if data == 'EXIT' :\n self.bye('other '+ data)\n\n\t\t\t# a respond to my former attack or attacking me.\n if who == 'client':\n result = {'attacking': self.defend,\n 'defending': self.update_result,\n }.get(what)(data.upper())\n\n\t\t\t\t# result is the outcome from the other's attack.\n\t\t\t\t#(if he didnt attacked in this message will be none)\n if result:\n\n self.send_defend(result)\n self.my_turn = True\n print self.opponent_name + ' plays: ' + data\n self.print_board()\n\n\t\t\t\t\t# iv'e lost ):\n if result == 'LOST':\n self.bye(result)\n else:\n print \"It's your turn...\"\n \n else:\n self.print_board()\n # the other tells us he lost the game... I guess we won. \n if data == 'LOST':\n self.bye('other '+ data)\n self.print_board()", "title": "" }, { "docid": "5d98fa6be01de03f68a064428b01fa08", "score": "0.48079246", "text": "def recv_msg_flag(self):\n try:\n self.receive_msg, self.receive_client_ip_port = self.host.recvfrom(self.buf_size)\n except Exception: # 接收超时\n return False\n else:\n return True", "title": "" }, { "docid": "ea94c9c6fd95f75e7f46613dbc3e43ba", "score": "0.48033336", "text": "def return_room_directions(self):\n list_of_directions = [\"north\", \"east\", \"south\", \"west\"]\n if self.north == \"none\":\n list_of_directions.remove(\"north\")\n\n if self.east == \"none\":\n list_of_directions.remove(\"east\")\n\n if self.south == \"none\":\n list_of_directions.remove(\"south\")\n\n if self.west == \"none\":\n list_of_directions.remove(\"west\")\n\n return 'Available Directions\\n' + ' '.join(list_of_directions)", "title": "" }, { "docid": "bda75f878181c2b6f823e21be14f723e", "score": "0.48013443", "text": "def __udt_recv(sockd, length):\n\t(rmsg, peer) = sockd.recvfrom(length)\n\treturn rmsg", "title": "" }, { "docid": "47ac538a627712488bc6025e1b613737", "score": "0.48009267", "text": "def UnicastPacketsReceived(self) -> int:", "title": "" }, { "docid": "47ac538a627712488bc6025e1b613737", "score": "0.48009267", "text": "def UnicastPacketsReceived(self) -> int:", "title": "" }, { "docid": "6f6cbcad2f822fdf3a090a3600ae433d", "score": "0.4798239", "text": "def networkroute(self) :\n\t\ttry :\n\t\t\treturn self._networkroute\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "7327c100e4694beb7523ea563ec469a6", "score": "0.47967836", "text": "def raroute(self) :\n try :\n return self._raroute\n except Exception as e:\n raise e", "title": "" }, { "docid": "d583615db2b8e9e00827ec77fc59efc8", "score": "0.4793308", "text": "def directions(client, origin, destination,\n mode=None, waypoints=None, alternatives=False, avoid=None,\n language=None, units=None, region=None, departure_time=None,\n arrival_time=None, optimize_waypoints=False, transit_mode=None,\n transit_routing_preference=None, traffic_model=None):\n\n params = {\n \"origin\": convert.latlng(origin),\n \"destination\": convert.latlng(destination)\n }\n\n if mode:\n # NOTE(broady): the mode parameter is not validated by the Maps API\n # server. Check here to prevent silent failures.\n if mode not in [\"driving\", \"walking\", \"bicycling\", \"transit\"]:\n raise ValueError(\"Invalid travel mode.\")\n params[\"mode\"] = mode\n\n if waypoints:\n waypoints = convert.location_list(waypoints)\n if optimize_waypoints:\n waypoints = \"optimize:true|\" + waypoints\n params[\"waypoints\"] = waypoints\n\n if alternatives:\n params[\"alternatives\"] = \"true\"\n\n if avoid:\n params[\"avoid\"] = convert.join_list(\"|\", avoid)\n\n if language:\n params[\"language\"] = language\n\n if units:\n params[\"units\"] = units\n\n if region:\n params[\"region\"] = region\n\n if departure_time:\n params[\"departure_time\"] = convert.time(departure_time)\n\n if arrival_time:\n params[\"arrival_time\"] = convert.time(arrival_time)\n\n if departure_time and arrival_time:\n raise ValueError(\"Should not specify both departure_time and\"\n \"arrival_time.\")\n\n if transit_mode:\n params[\"transit_mode\"] = convert.join_list(\"|\", transit_mode)\n\n if transit_routing_preference:\n params[\"transit_routing_preference\"] = transit_routing_preference\n\n if traffic_model:\n params[\"traffic_model\"] = traffic_model\n\n return client._request(\"/maps/api/directions/json\", params).get(\"routes\", [])", "title": "" }, { "docid": "7b5a6997109dcb3ccf3d1fd082880888", "score": "0.4792479", "text": "def handle_datagram(self, di):", "title": "" }, { "docid": "34daba0dcc81ad512f283a3c728d1e5f", "score": "0.47851682", "text": "def switch_routing(event, dst_mac):\n packet = event.parsed\n s_dpid = str(event.dpid)\n hi = str(packet.src)\n if dst_mac is None: hj = str(packet.dst)\n else: hj = dst_mac\n \n ## START Assigment 1.b routing videos in the down slice\n vid = False\n tcp_packet = packet.find('tcp')\n if tcp_packet is not None: \n vid = tcp_packet.dstport == VIDEO_PORT #or tcp_packet.srcport == VIDEO_PORT\n \n if (s_dpid, hi, hj, vid) in switch_ports:\n ## Routing between switches\n out_port = switch_ports[(s_dpid, hi, hj, vid)]\n if vid : pri_vid = \"Videos\"\n else : pri_vid = \"NonVid\"\n #print \"( s\",s_dpid,\", from: \" ,hi,\", to: \" , hj,\", type: \" ,pri_vid , \") >> out to port >> \" , out_port \n\n msg = of.ofp_flow_mod()\n msg.match = of.ofp_match.from_packet(packet, event.port)\n msg.idle_timeout = 10\n msg.hard_timeout = 30\n msg.actions.append(of.ofp_action_output(port = out_port))\n msg.data = event.ofp\n event.connection.send(msg)\n \n else:\n # Sending by FLOOD method - for every one \n msg = of.ofp_packet_out()\n msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))\n msg.data = event.ofp\n msg.in_port = event.port\n event.connection.send(msg)", "title": "" }, { "docid": "038b4112e1828203883ebe1419900d7e", "score": "0.47828135", "text": "def test_recv_returns_none(self):\n def bad_recv(self, message):\n return None\n \n self.client1.server_socket.recv = bad_recv\n message = \"Hi\"\n self.client2.send_data(message)\n self.server._handle_connections()\n self.assertIn(self.server._format_message_from_user(message, self.addr2),\n self.client3.received_data,\n \"Clients should still get messages after one of the disconnects.\")", "title": "" }, { "docid": "0e6ec2a8ca22510e55f80dc49a875f7a", "score": "0.47823942", "text": "def no_mask(self, routing_entry: str) -> None:\r\n\r\n route_details = {\"next-hop\": None, \"interface\": None, \"metric\": None}\r\n split_route_entry = routing_entry.split(\",\")\r\n\r\n def outgoing_interfaces():\r\n\r\n try:\r\n if re.findall(r'(?<=\\s)[A-Z].*', split_route_entry[1]):\r\n route_details[\"interface\"] = split_route_entry[1].replace(\" \", \"\")\r\n if route_details in self.routes[self.prefix]:\r\n pass\r\n else:\r\n self.routes[self.prefix].append(route_details)\r\n else:\r\n pass\r\n except IndexError:\r\n pass\r\n\r\n def find_metric():\r\n\r\n try:\r\n if re.findall(r'(?<=\\[)[0-9].*(?=])', routing_entry):\r\n ad_and_metric = re.findall(r'(?<=\\[)[0-9].*(?=])', routing_entry)\r\n route_metric = re.findall(r'(?<=/)[0-9].*', ad_and_metric[0])\r\n route_details[\"metric\"] = route_metric[0]\r\n if route_details in self.routes[self.prefix]:\r\n pass\r\n else:\r\n self.routes[self.prefix].append(route_details)\r\n else:\r\n pass\r\n except IndexError:\r\n pass\r\n\r\n def find_next_hop():\r\n\r\n find_hops = re.findall(r'(?<=via\\s)[0-9].*\\..*[0-9]\\..*[0-9]\\..*', split_route_entry[0])\r\n route_details[\"next-hop\"] = find_hops[0]\r\n\r\n def write_to_dictionary():\r\n\r\n if route_details in self.routes[self.prefix]:\r\n pass\r\n else:\r\n self.routes[self.prefix].append(route_details)\r\n\r\n # Use inner functions to start collecting routes and route particulars\r\n if re.findall(r'(?<=via\\s)[0-9].*\\..*[0-9]\\..*[0-9]\\..*', split_route_entry[0]):\r\n find_next_hop()\r\n outgoing_interfaces()\r\n find_metric()\r\n if re.findall(r'(?<=,\\s)direct', routing_entry):\r\n route_details[\"next-hop\"] = \"direct\"\r\n outgoing_interfaces()\r\n find_metric()\r\n if re.findall(r'(?<=,\\s)local', routing_entry):\r\n route_details[\"next-hop\"] = \"local\"\r\n find_metric()\r\n\r\n write_to_dictionary()", "title": "" }, { "docid": "a4f68eb640e10dd188850c5bce620120", "score": "0.477744", "text": "def __call__(self, request, response):\n # self.logger.debug(\"Processing route: {0}\".format(repr(route_tuple)))\n\n # Reset the parameters in the request before each match,\n request.urlargs = ()\n request.urlvars = {}\n\n # Do the match.\n does_match, args, kwargs = self.matcher.match(request)\n if not does_match:\n return False, None\n else:\n request.urlargs = tuple(args)\n request.urlvars = kwargs\n\n try:\n for cond in self.conditions:\n if not cond(request):\n raise ContinueRoutingException\n\n # We remove the optional \"_captures\" kwarg, if it exists.\n kwargs.pop('_captures', None)\n ret = self.func(*args, **kwargs)\n\n except ContinueRoutingException:\n return False, None\n\n return True, ret", "title": "" }, { "docid": "21f2d1dade7504d883d1700a4dd60aff", "score": "0.4773065", "text": "def __outbound_processing(self):\n\n try:\n data = self.__outbound_queue.get_nowait()\n destination = data[0]\n body = data[1]\n # Assemble packet\n dest = destination.to_bytes(1, byteorder='big')\n length = len(body).to_bytes(2 , byteorder='big')\n packet = dest + length + body\n\n tag = None\n if destination == TransportDataType.DEVICE:\n tag = NetworkDataType.DEVICE\n elif destination == TransportDataType.UHOST_SOCKET:\n tag = NetworkDataType.UHOST\n elif destination == TransportDataType.PLATFORM_SOCKET:\n tag = NetworkDataType.PLATFORM\n\n if tag is not None:\n return [tag, packet]\n\n except queue.Empty:\n pass\n\n return None", "title": "" }, { "docid": "151be873014049d667aefa3dfd7115b1", "score": "0.47706518", "text": "def handle_RouteReq_msg(self, addr, message):\n self.main_logger.info(\"message Route Request handling beggins (addr:\"+str(addr)+\",msg:\"+str(message)+\")\")\n ae = message['AE'] \n if ae == 0:\n self.main_logger.info(\"sending unicast messages with all neigh table\")\n self.send_Update_msg(addr=addr)\n elif ae == 3:\n prefix = message['PREFIX']\n self.main_logger.info(\"sending unicast messages with one record from neigh table\")\n self.send_Update_msg(addr=addr, record_prefix=prefix)\n else:\n self.main_logger.warning(\"wrong ae value, silently ignoring this tlv\")", "title": "" }, { "docid": "0598d6f953095be6c5ed8b4dc5046bcf", "score": "0.4762975", "text": "def _socket_recv(self):\r\n try:\r\n data = self._socket.recv(self._recv_amount)\r\n except socket.error as err:\r\n if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):\r\n return ''\r\n elif err.args[0] == errno.ECONNRESET:\r\n return None\r\n else:\r\n raise\r\n\r\n if not data:\r\n return None\r\n else:\r\n return data", "title": "" } ]
d9417c0ed441a06210fbba27dce0044f
Normalize a diff/patch file before it's applied. This can be used to take an uploaded diff file and modify it so that it can be properly applied. This may, for instance, uncollapse
[ { "docid": "82a3c2d96d17af243a4641b25270daa8", "score": "0.6414623", "text": "def normalize_patch(\n self,\n patch: bytes,\n filename: str,\n revision: str,\n ) -> bytes:\n return patch", "title": "" } ]
[ { "docid": "4660344ce841b80aca6c91afd8767e2d", "score": "0.60515046", "text": "def _normalize(self):\n # Ingest the data\n data = self._read_raw()\n\n # Clean data using `_normalize`\n df = self.normalize(data)\n success = self._store_clean(df)\n\n return success", "title": "" }, { "docid": "f5d3371b35b634a00b17b3eaf67d7ed1", "score": "0.6012002", "text": "def normalize(self):\n added = list(self.added)\n modified = list(self.modified)\n deleted = list(self.deleted)\n for old, new in self.renamed.items():\n deleted.append(old)\n added.append(new)\n return ChangedFiles(added=added,\n modified=modified,\n deleted=deleted)", "title": "" }, { "docid": "3d684c0e7e47679fadf300df4a72f5ce", "score": "0.59212786", "text": "def normalize():", "title": "" }, { "docid": "d1d11c2e81c86adab543367f30fb2c5f", "score": "0.5538107", "text": "def normalize(file_path):\n fp = re.sub('^(/)+', '', file_path)\n fp = re.sub('(/)+$', '', fp)\n return fp", "title": "" }, { "docid": "da7139bf7a47e57aceb5a04eafae9fcb", "score": "0.5524723", "text": "def normalize(self):\n\n # Building a translate table for punctuation and number removal\n punctnum_table = str.maketrans(\n {c: None for c in string.punctuation + string.digits})\n\n for src in self.src_files.values():\n content_punctnum_rem = [token.translate(punctnum_table)\n for token in src.all_content]\n comments_punctnum_rem = [token.translate(punctnum_table)\n for token in src.comments]\n classnames_punctnum_rem = [token.translate(punctnum_table)\n for token in src.class_names]\n attributes_punctnum_rem = [token.translate(punctnum_table)\n for token in src.attributes]\n methodnames_punctnum_rem = [token.translate(punctnum_table)\n for token in src.method_names]\n variables_punctnum_rem = [token.translate(punctnum_table)\n for token in src.variables]\n filename_punctnum_rem = [token.translate(punctnum_table)\n for token in src.file_name]\n pos_comments_punctnum_rem = [token.translate(punctnum_table)\n for token in src.pos_tagged_comments]\n\n src.all_content = [token.lower() for token\n in content_punctnum_rem if token]\n src.comments = [token.lower() for token\n in comments_punctnum_rem if token]\n src.class_names = [token.lower() for token\n in classnames_punctnum_rem if token]\n src.attributes = [token.lower() for token\n in attributes_punctnum_rem if token]\n src.method_names = [token.lower() for token\n in methodnames_punctnum_rem if token]\n src.variables = [token.lower() for token\n in variables_punctnum_rem if token]\n src.file_name = [token.lower() for token\n in filename_punctnum_rem if token]\n src.pos_tagged_comments = [token.lower() for token\n in pos_comments_punctnum_rem if token]", "title": "" }, { "docid": "4193ba7f524a072193e7b47ccd559db8", "score": "0.54604113", "text": "def normalize(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4193ba7f524a072193e7b47ccd559db8", "score": "0.54604113", "text": "def normalize(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "05454327ba94edf323ddeb804e303456", "score": "0.5424243", "text": "def _normalize_all(self):\n self._normalize_plateau()\n self._normalize_imaginary()", "title": "" }, { "docid": "e647eafb7e80697a0fa80438d28ca060", "score": "0.5324791", "text": "def normalize_fields(entries):\n # first we center\n num_attributes = len(entries[0].split(','))\n # subtract 2 for classifier exclusion\n means = []\n stdevs = []\n for attribute_index in range(num_attributes - 1):\n mean = get_mean(entries, attribute_index)\n stdev = get_stdev(entries, attribute_index, mean)\n # print 'Attribute ' + str(attribute_index) + ' - mean: ' + str(mean) + ' stdev: ' + str(stdev)\n means.append(mean)\n stdevs.append(stdev)\n\n for line_num in range(len(entries)):\n entry = entries[line_num]\n unpacked = entry.split(',')\n\n # exclude the classifier\n entry_rebuild = ''\n for i in range(len(unpacked) - 1):\n unpacked[i] = float(unpacked[i]) - means[i]\n unpacked[i] = unpacked[i] / stdevs[i]\n entry_rebuild = entry_rebuild + str(unpacked[i]) + ','\n\n entries[line_num] = entry_rebuild + unpacked[-1]\n\n return entries", "title": "" }, { "docid": "7f2ffe11d7413397a35b79d82283ca22", "score": "0.52691203", "text": "def normalizeSingleFile(f,fxn=medianCorrection):\n\tl = [setDataType(i.strip().split()) for i in open(f)]\n\tgc_bins = GC_analysis(l)\n\t# gc_bins = GC_analysis(normalized_10M)\n\tmedianCorrection(l,gc_bins)\n\tnormalized_10M = normalize10M(l)\n\treturn normalized_10M", "title": "" }, { "docid": "2cf07a3094d77b26af8e2839bf8d4447", "score": "0.52355593", "text": "def normalize():\n src = request.form['src']\n output = NORMALIZER.model_api(src)\n return jsonify({'src': src, 'tgt': output})", "title": "" }, { "docid": "947480d78f15983358c213f0bb899195", "score": "0.5207058", "text": "def normalize_path(self, path):\r\n return _normalize_path(path)", "title": "" }, { "docid": "ab4ce66a06d4db7ef52cb43a4068ae3c", "score": "0.5205886", "text": "def __unflatten_dot_file(self, file, result):\n subprocess.Popen(['unflatten', '-f', '-l1', '-c10', '-o', result, file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)", "title": "" }, { "docid": "2cd025314787272645b3f7a23a1d4d82", "score": "0.5199335", "text": "def fcm_normalize(input_file,\n mask_file,\n output_file,\n output_mask):\n\n img = nib.load(str(input_file))\n brain_mask = nib.load(str(mask_file))\n\n wm_mask = intensity_normalization.normalize.fcm.find_tissue_mask(img, brain_mask)\n normalized = intensity_normalization.normalize.fcm.fcm_normalize(img, wm_mask)\n\n nib.save(wm_mask, str(output_mask))\n nib.save(normalized, str(output_file))\n\n return output_file, output_mask", "title": "" }, { "docid": "bc5bfa973f6a6ea2cf93e516e332743c", "score": "0.51823753", "text": "def updateFile(fileName):\n\n # Clear the Blender system console\n os.system(\"cls\")\n\n # Set the directory path to the input COLLADA file, open for reading\n filedir = os.path.dirname(fileName)\n infile = open(fileName, 'r')\n print(\"fileName :\", fileName)\n\n # Read the input file into the content object\n content = infile.readlines()\n newfileName = filedir + '/temp_' + str(os.getpid()) + '.dae'\n print(\"newfileName: \", newfileName)\n outfile = open(newfileName, 'w')\n # Initialize variables\n\n # Create xml object from Tagline class, use to retain data while processing input COLLADA file\n xml = Tagline()\n\n insideMesh = False # True if the line is inside a mesh tag\n insideTriangles = False # True if the line is inside a triangles tag\n insideTristrips = False # True if the line is inside a tristrips tag\n insideTrifans = False # True if the line is inside a trifans tag\n strOut = \"\" # Placeholder string before building the actual line output\n line_triangle = \"\" # Save off the new triangle line after converting\n line_vertex = \"\" # Save off the new vertex line after converting\n line_normal = \"\" # Save off the new normal line after converting\n line_texcoord1 = \"\" # Save off the new first texcoord line after converting\n line_texcoord2 = \"\" # Save off the new second texcoord line after converting\n line_out = \"\" # Save off modified info\n line_orig_out = \"\" # Save off the original info\n tagCount = 0 # Add to the count if triangles/tristrips/trifans are found with TEXCOORD semantic\n foundTristrips = False # True if tristrips tag is found inside the mesh\n foundTrifans = False # True if trifans tag is found inside the mesh\n foundTriangles = False # True if triangles tag is found inside the mesh\n foundUV = False # True if semantic=\"UV\" is found inside the mesh\n\n # Loop through each line of the import file\n for line_in in content:\n\n # Store the unedited line_in in the xml object\n xml.store_line(line_in)\n # print(\"xml.line: \", xml.line)\n\n # Find the input string \"<unit\": Substitute inches for millimeters.\n # Note: This substitution is global for wherever the unit tag is found.\n if find_string(\"<unit \", line_in):\n outfile.write(line_in[0:line_in.find('<')] + '<unit name=\"inch\" meter=\"0.0254\"/>\\n')\n # print(\"xml.line: \", xml.line)\n continue\n\n # Find the input string \"<translate>\": convert values from metric (millimeters) to English (inches)\n # Note: This conversion is global for wherever the translate tag is found.\n if find_string(\"<translate>\", line_in):\n outfile.write(scaleDown(line_in)) # Scale down translate values by a factor of 25.4\n # print(\"xml.line: \", xml.line)\n continue\n\n # Find the input string \"<asset>\": Set xml.inside_asset to TRUE\n if find_string(\"<asset>\", line_in):\n outfile.write(line_in)\n xml.inside_asset = True\n # print(\"xml.line: \", xml.line)\n continue\n\n # Find the input string \"</asset>\": Set xml.inside_asset to FALSE\n if find_string(\"</asset>\", line_in):\n outfile.write(line_in)\n xml.inside_asset = False\n # print(\"xml.line: \", xml.line)\n continue\n\n # Find the input string \"<library_visual_scenes>\": Set xml.inside_library_visual_scenes to TRUE\n if find_string(\"<library_visual_scenes>\", line_in):\n xml.inside_library_visual_scenes = True\n outfile.write(line_in)\n # print(\"xml.line: \", xml.line)\n continue\n\n # Find the input string \"</library_visual_scenes>\": Set xml.inside_library_visual_scenes to FALSE\n if find_string(\"</library_visual_scenes>\", line_in):\n xml.inside_library_visual_scenes = False\n outfile.write(line_in)\n # print(\"xml.line: \", xml.line)\n continue\n # Find the input string \"<library_nodes \": Set xml.inside_library_nodes to TRUE\n if find_string(\"<library_nodes \", line_in):\n xml.inside_library_nodes = True\n outfile.write(line_in)\n # print(\"xml.line: \", xml.line)\n continue\n\n # Find the input string \"</library_nodes \": Set xml.inside_library_nodes to FALSE\n if find_string(\"</library_nodes>\", line_in):\n xml.inside_library_nodes = False\n outfile.write(line_in)\n # print(\"xml.line: \", xml.line)\n continue\n\n # Find the input string \"<library_geometries\": Save library attributes\n if find_string(\"<library_geometries\", line_in):\n xml.extract_geometry_library_name()\n # print(\"xml.geometry_library_name: \", xml.geometry_library_name)\n # print(\"xml.geometry_library_id: \", xml.geometry_library_id)\n # print(\"xml.line_out: \", xml.line_out)\n\n # Left these lines commented for future rotate algorithm updates\n # if not (line_in.find(\"<rotate>\") == -1):\n # outfile.write(rotateValues(line_in))\n # continue\n line_in = line_in.replace(' name=\"vertices\">', \">\")\n line_in = line_in.replace(' name=\"normals\">', \">\")\n line_in = line_in.replace(' name=\"texcoords\">', \">\")\n\n # Find the input string \"uvparams\": Set foundUV boolean to TRUE\n if find_string('\"uvparams\"', line_in):\n foundUV = True\n continue\n\n if (foundUV):\n if line_in.find(\"</source>\") != -1:\n foundUV = False\n continue\n\n # Find the input string \"<mesh>\": Set insideMesh boolean to TRUE\n if find_string(\"<mesh>\", line_in):\n insideMesh = True\n\n # Find the input string \"<float_array\": convert float array values from metric to English\n if find_string(\"<float_array\", line_in):\n # If count = 0 is found, then account for the missing </float_array> tag\n if find_string('count=\"0\"', line_in):\n outfile.write(fixZeroCountParam(line_in))\n continue\n elif not find_string('count=\"0\"', line_in): # line_in.find('count=\"0\"') == notFound:\n outfile.write(remove_float_array_attributes(line_in))\n continue\n\n # Write all the triangles/semantic lines before closing out the mesh tag\n if find_string(\"</mesh>\", line_in):\n # if line_in.find(\"</mesh>\") != notFound:\n # Calculate and update the count attribute for the output triangles tag\n process_mesh(line_texcoord1, line_texcoord2, line_out, line_orig_out, foundTriangles,\n foundTristrips, foundTrifans, line_vertex, line_normal, line_triangle, outfile)\n\n # Reinitialize all the variables again for the next mesh\n insideMesh = False\n strOut = \"\"\n line_triangle = \"\"\n line_vertex = \"\"\n line_normal = \"\"\n line_texcoord1 = \"\"\n line_texcoord2 = \"\"\n line_out = \"\"\n line_orig_out = \"\"\n tagCount = 0\n foundTristrips = False\n foundTrifans = False\n foundTriangles = False\n foundUV = False\n\n # If line is outside the mesh tags, write as is\n if not (insideMesh):\n outfile.write(line_in)\n continue\n\n # Find the input string \"<triangles\": start of triangle tag\n if find_string(\"<triangles \", line_in):\n insideTriangles = True\n foundTriangles = True\n tagCount = tagCount + 1\n\n # Find the input string \"</triangles>\": end of triangle tag\n if find_string(\"</triangles>\", line_in):\n line_in = \"\"\n insideTriangles = False\n\n # Find the input string \"<tristrips \": start of tristrips tag\n if find_string(\"<tristrips \", line_in):\n line_in = line_in.replace(\"tristrips\", \"triangles\")\n insideTristrips = True\n foundTristrips = True\n tagCount = tagCount + 1\n\n # Find the input string \"</tristrips>\": end of tristrips tag\n if find_string(\"</tristrips>\", line_in):\n line_in = \"\"\n insideTristrips = False\n\n # Find the input string \"<trifans \": start of trifans tag\n if find_string(\"<trifans \", line_in):\n line_in = line_in.replace(\"trifans\", \"triangles\")\n insideTrifans = True\n foundTrifans = True\n tagCount = tagCount + 1\n\n # Find end of trifans tag\n if find_string(\"</trifans>\", line_in):\n line_in = \"\"\n insideTrifans = False\n\n # Find the input string 'semantic=\"UV\"': Semantic UV, ignore it\n if find_string('semantic=\"UV\" ', line_in):\n line_in = \"\"\n\n # Write input line as is, if not inside the tristrips/trifans/triangles tag\n if insideTristrips == False and insideTrifans == False and insideTriangles == False:\n outfile.write(line_in)\n continue\n\n # Assume no TEXCOORD semantic, but save off original line in case it is found\n if find_string(\"<p>\", line_in):\n line_out = line_out + convertNoTEXCOORD(insideTriangles, insideTristrips, insideTrifans, line_in)\n line_orig_out = line_orig_out + line_in\n continue\n\n strOut = line_in.replace(\"tristrips\", \"triangles\")\n strOut = strOut.replace(\"trifans\", \"triangles\")\n\n if find_string('<triangles', strOut):\n line_triangle = strOut\n strOut = \"\"\n\n # Find the input string 'semantic=\"VERTEX\" ': Semantic VERTEX\n if find_string('semantic=\"VERTEX\" ', strOut):\n line_vertex = strOut\n strOut = \"\"\n\n # Find the input string 'semantic=\"NORMAL\" ': Semantic NORMAL\n if find_string('semantic=\"NORMAL\" ', strOut):\n strOut = strOut.replace('<input offset=\"0\" semantic=\"NORMAL\"', '<input offset=\"1\" semantic=\"NORMAL\"')\n line_normal = strOut\n strOut = \"\"\n\n # Find the input string 'semantic=\"TEXCOORD\" ': Semantic TEXCOORD\n if find_string('semantic=\"TEXCOORD\" ', strOut):\n strOut = modify_semantic_TEXCOORD(strOut)\n if line_texcoord1 == \"\":\n line_texcoord1 = line_texcoord1 + \"\\n\" + strOut\n else:\n if tagCount <= 1:\n line_texcoord2 = line_texcoord2 + \"\\n\" + strOut\n strOut = \"\"\n\n # Write the converted string\n outfile.write(strOut)\n infile.close()\n outfile.close()\n return newfileName", "title": "" }, { "docid": "0b513804a55d69387813368c2886f179", "score": "0.51699114", "text": "def normalizeIt(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "60c2a07f0941dee10a44421d8b9219b4", "score": "0.509869", "text": "def normalize(df):\n cols = [\n \"editors\",\n \"major_edits\",\n \"anonymous_edits\",\n \"pls\",\n \"transcluded_in\",\n ]\n\n for col in cols:\n if col in df.columns:\n df[col + \"_norm\"] = (\n df[col] / df.groupby(\"dbname\")[col].transform(\"sum\")\n ) * 100\n del df[col]\n\n return df", "title": "" }, { "docid": "08e2746e6eb9c7a38f82ec3ce05ddc66", "score": "0.5095599", "text": "def reformat_vcf(vcf_file, out, reference, tumor_sample=None):\n os.system(f'bcftools norm -f {reference} -m -both {vcf_file} -o tmp_.vcf')\n with VariantFile('tmp_.vcf') as fr:\n header = fr.header\n header.info.add('TDP', number=1, type='Integer', description='Tumor sample depth')\n header.info.add('NDP', number=1, type='Integer', description='Normal sample depth')\n header.info.add('TAF', number=1, type='Float', description='Tumor sample AF')\n header.info.add('NAF', number=1, type='Float', description='Normal sample AF')\n samples = list(header.samples)\n\n if tumor_sample is not None:\n if tumor_sample not in [0, 1, '0', '1']:\n if tumor_sample in samples:\n tumor_idx = samples.index(tumor_sample)\n normal_idx = 1 - tumor_idx\n else:\n raise Exception(f'{tumor_sample} is not in samples {samples} recorded in vcf')\n else:\n tumor_idx = int(tumor_sample)\n normal_idx = 1 - tumor_idx\n else:\n tumor_idx = guess_tumor_idx(vcf_file)\n normal_idx = 1 - tumor_idx\n\n with VariantFile(out, 'w', header=header) as fw:\n for record in fr:\n record.info['TDP'] = record.samples[tumor_idx]['DP']\n record.info['NDP'] = record.samples[normal_idx]['DP']\n # re-calculate AF since caller like sentieon may report AF that is not consistent with AD info\n record.info['TAF'] = round(record.samples[tumor_idx]['AD'][1]/record.samples[tumor_idx]['DP'], 3)\n if record.samples[normal_idx]['DP'] != 0:\n record.info['NAF'] = round(record.samples[normal_idx]['AD'][1]/record.samples[normal_idx]['DP'], 3)\n else:\n record.info['NAF'] = 0\n fw.write(record)\n\n os.remove('tmp_.vcf')\n os.system(f'bgzip {out}')\n os.system(f'tabix {out}.gz')", "title": "" }, { "docid": "1dd1393dd4e4ba0b15fc7a33cd93802b", "score": "0.50826323", "text": "def beforeNormalization(self):\n return self._normalization().before();", "title": "" }, { "docid": "b2a7307ce2df4273b5c4bf33eb15fa65", "score": "0.50728446", "text": "def test_format_edited_parts_all_unchanged(git_repo, monkeypatch):\n monkeypatch.chdir(git_repo.root)\n paths = git_repo.add({\"a.py\": \"pass\\n\", \"b.py\": \"pass\\n\"}, commit=\"Initial commit\")\n paths[\"a.py\"].write_bytes(b'\"properly\"\\n\"formatted\"\\n')\n paths[\"b.py\"].write_bytes(b'\"not\"\\n\"checked\"\\n')\n\n result = list(\n darker.__main__.format_edited_parts(\n Path(git_repo.root),\n {Path(\"a.py\"), Path(\"b.py\")},\n RevisionRange(\"HEAD\"),\n True,\n {},\n report_unmodified=False,\n )\n )\n\n assert result == []", "title": "" }, { "docid": "57f206b4c23f21c280e0fbc4039acd9f", "score": "0.5064052", "text": "def splitPatchfile(self, patchfile):\r\n\t\tsplit_patchfile = patchfile.split('diff --git')\r\n\t\treturn split_patchfile", "title": "" }, { "docid": "3f06f1a629f428fbc908d9b39e61bb30", "score": "0.5058103", "text": "def normalize(self):\n def __normalize_centralized(name, node):\n if node.attrs['sd'] != 0:\n node[:] = (node - (node.attrs['AF'] * 2)) / node.attrs['sd']\n\n if self.normalized:\n logging.info(\"-Data is already normalized\")\n return\n logging.info(\"-Normalizing the data...\")\n if not self.has_local_AF:\n self.compute_local_AF()\n if self.center is None: \n with h5py.File(self.store_name, 'a') as f:\n for chrom in f.keys():\n if chrom != 'meta':\n logging.info('--Normalizing chrom: ' + chrom)\n f[chrom].visititems(__normalize_centralized)\n f.attrs['normalized'] = True\n self.normalized = True\n\n else:\n #centralized setting\n pass", "title": "" }, { "docid": "041b0f55fec2e69f1f062db424e5eeed", "score": "0.50550073", "text": "def _update_normalizer(self, rollout):\n if self._config.ob_norm:\n self._agent.update_normalizer(rollout['ob'])", "title": "" }, { "docid": "b2f017ec6c2f75321eb742bac2cfca73", "score": "0.5026032", "text": "def fix(self):\n new_filedata = self.strip_existing()\n new_filedata = '%s\\n%s' % (self.license_header(), new_filedata)\n\n log.debug('A diff of the changes that will be made is as follows:')\n diff = difflib.unified_diff(\n self.filedata.splitlines(), new_filedata.splitlines(), 'old', 'new')\n for line in diff:\n log.debug(' %s', line.strip())\n\n with open(self.filename, 'w') as f:\n f.write(new_filedata)", "title": "" }, { "docid": "15883742d00fa30e15678d520696626a", "score": "0.49960992", "text": "def coverage_normalization():\n #Normalize coverage in each sample:\n filenames = glob.glob(\"*average_coverage.bed\")\n file_dict = dict()\n for filename in filenames:\n bed_file = open(filename, \"r\")\n coverage_list = []\n common_information_list = []\n for line in bed_file:\n match = re.match(r\"(.*)\\t(.*)\\t(.*)\\t(.*)\\t(.*)\\t(.*)\\t(.*)\", line)\n coverage = float(match.group(7))\n coverage_list.append(coverage)\n if match.group(4) == \"Mpnr01\":\n ribosomal_cov1 = float(match.group(7))\n if match.group(4) == \"Mpnr02\":\n ribosomal_cov2 = float(match.group(7))\n if match.group(4) == \"Mpnr03\":\n ribosomal_cov3 = float(match.group(7))\n if filename == filenames[-1]:\n common_information = match.group(1) + \"\\t\" + match.group(2) + \"\\t\" + match.group(3) + \"\\t\" + match.group(4) + \"\\t\" + match.group(5) + \"\\t\" + match.group(6)\n common_information_list.append(common_information)\n\n sum_ribosomal_cov = ribosomal_cov1 + ribosomal_cov2 + ribosomal_cov3\n normalized_coverage_list = [k/sum_ribosomal_cov for k in coverage_list]\n file_dict[filename] = normalized_coverage_list\n\n ## Write merging results:\n merging_file = open(\"merged_coverage.bed\", \"w\")\n # Sorting\n def atoi(text):\n return int(text) if text.isdigit() else text\n\n def natural_keys(text):\n return [ atoi(c) for c in re.split(r'(\\d+)', text) ]\n filenames.sort(key=natural_keys)\n\n # Writing header\n merging_file.write(\"\\t\\t\\t\\t\\t\\t\")\n for filename in filenames:\n match = re.match(r\"^(Nub_\\d+.?)_\", filename)\n mini_name = match.group(1)\n merging_file.write(mini_name)\n if filename != filenames[-1]:\n merging_file.write(\"\\t\")\n else:\n merging_file.write(\"\\n\")\n\n for common_information in common_information_list:\n merging_file.write(common_information + \"\\t\")\n for file in filenames:\n merging_file.write(str(file_dict[file][0]))\n del file_dict[file][0]\n if file != filenames[-1]:\n merging_file.write(\"\\t\")\n else:\n merging_file.write(\"\\n\")", "title": "" }, { "docid": "df3814401ce5d8217c5bbb25ca5711ea", "score": "0.4991911", "text": "def preprocess(f_path):\n\n no_wiki_amrs = delete_wiki(f_path)\n del_amrs = delete_amr_variables(no_wiki_amrs)\n old_amrs, sent_amrs = single_line_convert(del_amrs) # old amrs with deleted wiki and variables\n\n return sent_amrs, old_amrs", "title": "" }, { "docid": "0dc78209f03b255a321c98053808cf97", "score": "0.4987103", "text": "def handle_renames(self, diff_content):\r\n\r\n # svn diff against a repository URL on two revisions appears to\r\n # handle moved files properly, so only adjust the diff file names\r\n # if they were created using a working copy.\r\n if self.options.repository_url:\r\n return diff_content\r\n\r\n result = []\r\n\r\n from_line = to_line = None\r\n for line in diff_content:\r\n if self.DIFF_ORIG_FILE_LINE_RE.match(line):\r\n from_line = line\r\n continue\r\n\r\n if self.DIFF_NEW_FILE_LINE_RE.match(line):\r\n to_line = line\r\n continue\r\n\r\n # This is where we decide how mangle the previous '--- '\r\n if from_line and to_line:\r\n # If the file is marked completely removed, bail out with\r\n # original diff. The reason for this is that 'svn diff\r\n # --notice-ancestry' generates two diffs for a replaced file:\r\n # one as a complete deletion, and one as a new addition.\r\n # If it was replaced with history, though, we need to preserve\r\n # the file name in the \"deletion\" part - or the patch won't\r\n # apply.\r\n if self.DIFF_COMPLETE_REMOVAL_RE.match(line):\r\n result.append(from_line)\r\n result.append(to_line)\r\n else:\r\n to_file, _ = self.parse_filename_header(to_line[4:])\r\n copied_from = self.find_copyfrom(to_file)\r\n if copied_from is not None:\r\n result.append(from_line.replace(to_file, copied_from))\r\n else:\r\n result.append(from_line) # As is, no copy performed\r\n result.append(to_line)\r\n from_line = to_line = None\r\n\r\n # We only mangle '---' lines. All others get added straight to\r\n # the output.\r\n result.append(line)\r\n\r\n return result", "title": "" }, { "docid": "c5fa98006b4744749db9713299752745", "score": "0.4975552", "text": "def normalize_text(self, text):\n sub_patterns = [\n (r'//.+', ''), # remove comments\n (r'\\n', ''), # removes newlines\n (r'\\s+', ' ') # normalize whitespace\n ]\n for pattern, substitution in sub_patterns:\n text = sub(pattern, substitution, text)\n return text", "title": "" }, { "docid": "159738006a2f328d5589c71ef28275d8", "score": "0.49733937", "text": "def normalize(self):\n length = self.length\n if length != 1.0:\n self._data = self._data / length", "title": "" }, { "docid": "5c0f7b40a19fd6483efa86c0b04b39ec", "score": "0.49566263", "text": "def normalize_all(linkage_table_new_path, linkage_table_old_path=\"\"):\n linkage_table_new = pd.read_csv(linkage_table_new_path)\n #linkage_table_old = linkage_table_old_path if linkage_table_old_path == \\\n #\"\" else pd.read_csv(linkage_table_old_path)\n file_paths_new = list(linkage_table_new[\"file_path\"])\n column_names_new = list(linkage_table_new[\"format\"])\n #file_paths_old = linkage_table_old if linkage_table_old == \"\" else \\\n #list(linkage_table_old[\"file_path\"])\n #column_names_old = linkage_table_old if linkage_table_old == \"\" else \\\n #list(linkage_table_old[\"format\"])\n log_file_paths = []\n min = math.inf\n max = -math.inf\n\n # Log scale all newly downloaded files and save the paths to files with\n # log values\n for i in range(0, len(file_paths_new)):\n log_file_path = log_scale_file(file_paths_new[i], column_names_new[i])\n log_file_paths.append(log_file_path)\n\n # Find global min and global max values", "title": "" }, { "docid": "d5f2368180236a30168f5d6a1dce3598", "score": "0.49292332", "text": "def uncompact_small_file(container_file):\n cmdlet_str = \"uncompact -containerFile \" + container_file\n return submit_cmdlet(cmdlet_str)", "title": "" }, { "docid": "b320e27f43b9b65bef03c1e850462ce6", "score": "0.4901277", "text": "def normalize(filename):\n # Default value means we do not resolve a model file.\n if filename == \"default\":\n return filename\n filename = expanduser(filename)\n if isabs(filename):\n return filename\n else:\n return join(os.getcwd(), filename)", "title": "" }, { "docid": "74b3e26c7fdfb05fd7b624cf546c3e66", "score": "0.4900641", "text": "def process_patch(patch):\n\n if not patch.startswith(\"@@\"):\n raise Exception(\"This is not a unidiff\")\n\n return \"\\n\".join(line for line in patch.split(\"\\n\") if not line.startswith(\"-\"))", "title": "" }, { "docid": "510bb0c484eb344829e76d847de4932c", "score": "0.48969996", "text": "def normalize_data(file_name):\n logging.info(\"Normalizing Data...\\n\")\n data = read_data(file_name)\n tt = data[:, 0]\n temp = data[:, 1]\n yy = temp\n res = fit_sin(tt, yy)\n # plt.plot(tt, yy, \"k-\", label=\"raw data\", linewidth=1)\n # plt.plot(tt, res[\"fitfunc\"](tt), \"r-\", label=\"fit curve\", linewidth=2)\n # plt.show()\n normalized_data = np.subtract(yy, res[\"fitfunc\"](tt))\n # plt.plot(tt, normalized_data, \"k-\", label=\"normalized data\", linewidth=2)\n # plt.show()\n return np.column_stack((tt, normalized_data))", "title": "" }, { "docid": "233a9a22f266d366fd4d46c7262bf1fd", "score": "0.48901212", "text": "def normalize(self):\n self /= self.norm", "title": "" }, { "docid": "1d887464b001484301d2f2e9b70e122f", "score": "0.4879508", "text": "def normalize_faces(self) -> None:\n self.faces = list(normalize_faces(self.faces, close=False))", "title": "" }, { "docid": "9753d3f7f4f8c79ad90213124d72389f", "score": "0.48768154", "text": "def normalize_filename(self, filename):\n\n filename = os.path.normpath(filename)\n if filename.startswith(self.cpath):\n return filename[(len(self.cpath) + 1):]\n else:\n return filename", "title": "" }, { "docid": "2cbb51e5630a52a6d3783c0ca04d5533", "score": "0.48424026", "text": "def reformat():\n fpaths = testdata_fpaths()\n\n for fpath in fpaths:\n text = ut.readfrom(fpath)\n root = latex_parser.LatexDocPart.parse_text(text, debug=None)\n\n if ut.get_argflag('--fixcref'):\n root.find(' \\\\\\\\cref')\n continue\n\n #print(root.children)\n #root.children = root.children[0:5]\n #print('Parsed Str Short')\n new_text = '\\n'.join(root.reformat_blocks(debug=None))\n # remove trailing spaces\n new_text = re.sub(' *$', '', new_text, flags=re.MULTILINE)\n # remove double newlines\n new_text = re.sub('(\\n *)+\\n+', '\\n\\n', new_text, flags=re.MULTILINE)\n\n if ut.get_argflag('--summary'):\n print('---summary---')\n root.print_summary()\n print('---/summary---')\n # ut.colorprint(root.summary_str(), 'blue')\n\n numchars1 = len(text.replace(' ', '').replace('\\n', ''))\n numchars2 = len(new_text.replace(' ', '').replace('\\n', ''))\n\n print('numchars1 = %r' % (numchars1,))\n print('numchars2 = %r' % (numchars2,))\n #assert numchars1 == numchars2, '%r == %r' % (numchars1, numchars2)\n\n print('old newlines = %r' % (text.count('\\n'),))\n print('new newlines = %r' % (new_text.count('\\n'),))\n\n #import unicodedata\n #new_text = unicodedata.normalize('NFKD', new_text).encode('ascii','ignore')\n #print('new_text = %r' % (new_text,))\n\n ut.dump_autogen_code(fpath, new_text, codetype='latex', fullprint=False)", "title": "" }, { "docid": "3e54d5ff3d4c3961537d95683edb2d2e", "score": "0.4841468", "text": "def _sanitize_version_0_changeset(self, changeset):\r\n\r\n sanitized_changeset = []\r\n for old_file, new_file in changeset:\r\n # This should not happen for new file but it is safer to sanitize\r\n # both file revisions.\r\n sanitized_changeset.append(\r\n (self._sanitize_version_0_file(old_file),\r\n self._sanitize_version_0_file(new_file)))\r\n\r\n return sanitized_changeset", "title": "" }, { "docid": "bdb2cfbb99d0f9b8c90465f3b7aa04a6", "score": "0.483781", "text": "def normalize(self, inplace=False):\n s = self._inplacer(inplace)\n if s.is_ket():\n s.data /= norm(s.data)\n else:\n s.data /= trace(s.data)\n return s", "title": "" }, { "docid": "36e3c284ad5454cceab266c31e730bc1", "score": "0.48242208", "text": "def normalize(self, *args, **kwargs):\n return self._normalizer.normalize(*args, **kwargs)", "title": "" }, { "docid": "9aca88eb890d573e5d151d2397380eaf", "score": "0.48232856", "text": "def read_norm(data_filename, metadata_filename, batch_name):\n\n # NOTE: Ensure that the file has already being preprocessed once using\n # the commented out function belo\n #dfs = pr.preprocess_dataset(data_filename)\n\n dfs = pd.read_csv(data_filename)\n dfm = pd.read_csv(metadata_filename)\n dfm = dfm[dfm.batch == batch_name].copy()\n mdict = {m:c for m, c in zip(dfm.tmt_label, dfm['sample'])}\n dfs = dfs.rename(columns=mdict)\n samples = dfm['sample'].tolist()\n \n # Normalize summed intensities of each protein\n # by Number of peptides reported\n peptide_col = 'Set%s Peptides' % batch_name[-1]\n if 'Number_of_peptides' in dfs.columns.tolist():\n dfs = dfs.rename(columns={'Number_of_peptides' : peptide_col})\n dfs[samples] = dfs[samples].div(dfs[peptide_col], axis=0)\n\n # Get sample name of bridge sample. If 2 exist, get old bridge\n bridge_samples = [s for s in samples if 'Bridge' in s]\n if len(bridge_samples) > 1:\n bridge_sample = [s for s in bridge_samples if 'Old' in s][0]\n else:\n bridge_sample = bridge_samples[0]\n\n # Normalize samples relative to Bridge Sample\n dfn = bn.normalize_within_batch(dfs, samples, control=bridge_sample)\n return dfn", "title": "" }, { "docid": "5fbe8930e608ef379e57089db426b1d6", "score": "0.48200613", "text": "def normalize(self) -> None:\n # language=rst\n pass", "title": "" }, { "docid": "2f0e2dfd6378e9bd5046973b0276f385", "score": "0.48146254", "text": "def normalize_respectively(data):\r\n data[..., :3] = normalize(data[..., :3])\r\n data[..., 3:] = normalize(data[..., 3:])\r\n\r\n return data", "title": "" }, { "docid": "0819178c1cc56c80b41927bdc74518c7", "score": "0.48089823", "text": "def fstringify_file_diff(self, filename: str) -> bool: # pragma: no cover\n tag = 'diff-fstringify-file'\n self.filename = filename\n self.silent = False\n tog = TokenOrderGenerator()\n try:\n contents, encoding, tokens, tree = tog.init_from_file(filename)\n if not contents or not tokens or not tree:\n return False\n results = self.fstringify(contents, filename, tokens, tree)\n except Exception as e:\n print(e)\n return False\n # Something besides newlines must change.\n changed = regularize_nls(contents) != regularize_nls(results)\n if changed:\n show_diffs(contents, results, filename=filename)\n else:\n print(f\"{tag}: Unchanged: {filename}\")\n return changed", "title": "" }, { "docid": "89e94869458e4c11c17956987e3ab792", "score": "0.48079386", "text": "def normalize(self):\n self[:] = self / self.mag()", "title": "" }, { "docid": "1042f257c46fe2bee22ecda21e04f960", "score": "0.48023447", "text": "def normalize_parts(self):\n # normalize states\n for i in range(len(self.states)):\n s = sorted(self.states[i])\n self.states[i] = '_'.join(s)\n\n # normalize rules\n for i in range(len(self.transitions)):\n r = self.transitions[i]\n r[0].sort() # current state\n r[2].sort() # next state\n self.transitions[i] = ('_'.join(r[0]), r[1], '_'.join(r[2]))\n\n # normalize initial state\n self.init_state.sort()\n self.init_state = '_'.join(self.init_state)\n\n # normalize final states\n for i in range(len(self.final_states)):\n s = sorted(self.final_states[i])\n self.final_states[i] = '_'.join(s)", "title": "" }, { "docid": "b047bb5dc29bcdd5228cfe58d364ce03", "score": "0.480003", "text": "def clean(self):\n self.name = self.name.strip()\n self.filename = self.filename.strip()", "title": "" }, { "docid": "942c7822a210786001a7bf3f9a983f84", "score": "0.47989634", "text": "def clean_embeddings_file(self):\n embeddings_file = self.cleaned_data['embeddings_file']\n if not isinstance(embeddings_file, TemporaryUploadedFile):\n return embeddings_file\n \n file_path = embeddings_file.temporary_file_path()\n _, ext = os.path.splitext(file_path)\n if ext == '.gz':\n try:\n itr = gzip.open(file_path, 'rt')\n except Exception as e:\n print(e)\n raise forms.ValidationError('Invalid file format')\n else:\n itr = open(file_path, 'rt')\n line = next(itr)\n embedding_size = self.validate_line(line)\n c = 0\n # Validate first 10 lines of the file\n for line in itr:\n embed_size = self.validate_line(line)\n if embed_size != embedding_size:\n raise forms.ValidationError('All embeddings should have same size')\n c += 1\n if c == 10:\n break\n self.dataset.indexed = False\n self.dataset.save()\n self.instance.embedding_size = embedding_size\n return embeddings_file", "title": "" }, { "docid": "5d4623cf4b1f86dbef55dbf7a7298377", "score": "0.47983256", "text": "def normalize(self):\n relicResult(librelic.ec_norm_abi, None, self, self)", "title": "" }, { "docid": "28961e5c890aa5233d5ca14509e2c8ac", "score": "0.47895247", "text": "def _normalize_data(self, data):\n data /= 255.\n return data", "title": "" }, { "docid": "4b7536079043a20fc0b4d060ae43ce0c", "score": "0.47881186", "text": "def apply_patch_for_empty_files(\n self,\n patch: bytes,\n *,\n p_num: str,\n revert: bool = False,\n ) -> bool:\n raise NotImplementedError", "title": "" }, { "docid": "30bd50c2c1df71ae70772a1783e3b1cb", "score": "0.4784767", "text": "def normalization(self, value):\n if self.utils:\n pass\n \"\"\"Apply the user-defined rule first, and then apply default rules\"\"\"\n value = value.replace(constant.FB_FULL, constant.FB_SHORT)\n return value", "title": "" }, { "docid": "7de23fa5c204e72a3fb8e764101428d7", "score": "0.4776863", "text": "def patch2(diff, patched_path):\n\n orig = get_input_file_path()\n patched = patched_path\n\n pe_file = pefile.PE(orig)\n base = pe_file.OPTIONAL_HEADER.ImageBase\n\n for ea, orig, new in diff:\n\n # FIXME: temp hack for the relocation diffs ..\n if ea < base:\n if not pe_file.set_bytes_at_offset(ea, new):\n print \"error setting bytes\"\n else:\n # sanity check ..\n curr = pe_file.get_data(ea-base, 1)\n if curr != orig:\n print \"error in patching\", hex(ea), \":\", ord(curr), \"!=\", ord(orig)\n\n if not pe_file.set_bytes_at_rva(ea-base, new):\n print \"error setting bytes\"\n\n pe_file.write(patched)\n pe_file.close()", "title": "" }, { "docid": "eca692032c0a6d45d411e269049dda7f", "score": "0.4761441", "text": "def fill_original_files(patches, target_export_path, source_export_path):\n for index, patch in enumerate(patches):\n log(\"process_codereview_from_fogbugz(): Munging patch set #{0}\".format(index))\n for attr, exp_path, filename in [\n ('content', target_export_path, patch.old_filename or patch.filename),\n ('patched_content', source_export_path, patch.filename)]:\n content = getattr(patch, attr)\n if not content and filename:\n content = create_file_content(patch, exp_path, filename)\n setattr(patch, attr, content)\n patch.put()", "title": "" }, { "docid": "2662bb8a85ac6d56d23281e834feac77", "score": "0.4757011", "text": "def correct_xml(xml_file, outpath, verbose=True):\n in_tree = etree.parse(xml_file)\n root = in_tree.getroot()\n\n lang = re.search(r'(de|fr|en)', xml_file).group(1)\n # print(lang)\n\n # doc_id is expected to be 'horizonte_2005_66_de_NNS_article_boundaries.xml'\n doc_id = root.attrib[\"document_id\"].split('_')[:4]\n\n new_root = etree.Element(root.tag.lower())\n new_root.attrib[\"document_id\"] = doc_id\n\n articles = root.xpath(\".//Article\")\n\n for article in articles:\n\n article.attrib[\"potential_errors\"] = \"false\"\n\n main_font, main_size = count_font_styles(article)\n\n page_nums = collect_page_numbers(article)\n\n article = merge_dropcaps(article)\n\n article = denoise(article)\n\n # article = shift_dropcap_para(article) # attempt to fix first paragraph incorrectly extracted by tet\n\n merged_article = consecutive_merger(article, lang, page_nums, main_font, main_size, verbose)\n\n for i in range(1,8):\n merged_article = skip_merger(article, lang, page_nums, main_font, main_size, i, verbose)\n\n merged_article = check_for_odd_dropcaps(merged_article)\n\n new_root.append(merged_article)\n\n out_tree = etree.ElementTree(new_root)\n\n write_and_format_outfile(xml_file, out_tree, outpath)", "title": "" }, { "docid": "c334bc5260be309e6df3fea7a25b1f98", "score": "0.475043", "text": "def normalizeImage(self, image):\n\n\t\t# Choose normalization metho. TO DO: Put into config file.\n\t\t# return self.meanSubtractChannelNormalizeImage(image)\n\t\t# return self.rangeZeroToOneNormalizeImage(image)\n\t\treturn self.divideBy255NormalizeImage(image)", "title": "" }, { "docid": "852834b90affd9d22e66364418e306ad", "score": "0.47468716", "text": "def normalize(self):\n max_mag = self.magnitudes.max()\n min_mag = self.magnitudes.min()\n diff_mag = (max_mag - min_mag)\n normalized = copy.deepcopy(self)\n normalized.magnitudes = \\\n (normalized.magnitudes - min_mag) / (2 * diff_mag) - 0.25\n return normalized", "title": "" }, { "docid": "9623919528fe5a8412136e32eb5d6310", "score": "0.474495", "text": "def normalize(self, scale_factor=None):\n if scale_factor is None:\n scale_factor = self.calculate_normalizing_scale_factor()\n self.scale_factor = scale_factor\n for i in range(len(self.strokes)):\n self.strokes[i][:, 0:2] /= self.scale_factor", "title": "" }, { "docid": "91e17a3f536df666a9926a3c826fcb76", "score": "0.47435778", "text": "def clean(self):\n self.__lines = [re.sub(r\"\\n\", \"\", line) for line in self.__lines]\n self.__lines = [re.sub(r\"^--.*$\", \"\", line) for line in self.__lines]\n self.__lines = [i for i in self.__lines if i]", "title": "" }, { "docid": "3caa93d7e9e76ce95406d85ebc0b8485", "score": "0.47375742", "text": "def normalization(self):\n # If the recipe is not empty.\n # Converting \"magic numbers\" to Amounts because we are performing operations on Amount\n # objects.\n if self.recipe_dict:\n total_amount = Amount(0)\n for ingredient, amount in self.get_recipe_dict().items():\n total_amount += amount\n\n if total_amount.get_num() != 100:\n coefficient = Amount(100)/total_amount\n for ingredient, amount in self.recipe_dict.items():\n self.recipe_dict[ingredient] = amount * coefficient", "title": "" }, { "docid": "c078050aa2879583f4bb9a2281d42828", "score": "0.47348055", "text": "def set_normalize_crrna_activity(self):\n assert self.use_difference_from_wildtype_activity is False\n self.normalize_crrna_activity = True", "title": "" }, { "docid": "b46dd2218694be61155b3b49f4425c1e", "score": "0.47314873", "text": "def _prep_patch(self, patch_num, line='', patch_level=1):\n if patch_num not in self._patches:\n _logger.error(\"Patch %s not found for line: %s\", patch_num, line.strip())\n raise RuntimeError(\"Patch not found\")\n\n if patch_level != 1:\n raise NotImplementedError(\"patch level %s\" % patch_level)\n\n # detect git patches:\n git_from_re = re.compile(r'From ([0-9a-f]{40}) ')\n git_log_re = re.compile(r'commit ([0-9a-f]{40})')\n try:\n fp = None\n patch_path = self._patches[patch_num]\n if '/' in patch_path:\n patch_path = patch_path.rsplit('/',1)[1]\n patch_fullpath = os.path.join(self._svndir, 'SOURCES', patch_path)\n fp = open(patch_fullpath, 'rb')\n first_line = fp.readline()\n \n if git_log_re.match(first_line):\n # it comes from 'git log --patch-with-stat' , which must be translated to 'git am'\n _logger.debug(\"Patch %s seems to come from git, using 'git am'\", patch_path)\n self._prep_steps.append((Git_Log_Patch, dict(source=patch_path)))\n return\n elif git_from_re.match(first_line):\n # It is a git patch\n _logger.debug(\"Patch %s seems to come from git, using 'git am'\", patch_path)\n self._prep_steps.append((Git_Am_Patch, dict(source=patch_path)))\n return\n elif first_line.startswith(('diff ', '---', 'Index:')):\n # regular diff patch\n pass\n else:\n _logger.info('Patch \"%s\" starts with: %s', patch_path, first_line[:60])\n if not self._patch_comments.get(patch_num, None):\n # try to read first lines of patch as comment\n n = 1\n comment_lines = []\n while n < 100:\n comment_lines.append(first_line.rstrip('\\n'))\n first_line = fp.readline()\n if first_line.startswith(('diff ', '--- ')):\n break\n n += 1\n comment_lines += [\"\", \"patch: %s from upstream\" % self._patches[patch_num]]\n self._patch_comments[patch_num] = comment_lines\n\n except Exception:\n _logger.warning(\"Cannot auto-detect patch: %s\", patch_path, exc_info=True)\n finally:\n if fp:\n fp.close()\n\n # regular patch:\n self._prep_steps.append((Patch, dict(source=patch_path, patch_level=patch_level)))\n self._prep_steps.append((Git_Commit_Source, dict(msg=self._patch_comments.get(patch_num, '') \\\n or \"apply patch: %s\" % patch_path) ))", "title": "" }, { "docid": "9eef13168982275ff8bc68ec2169aecf", "score": "0.47146687", "text": "def generate_before_patch_md5sums(path_to_repo, diff_files):\n for diff_file, details in diff_files.items():\n # Reverse the diff to see what the file looked like before\n subprocess.check_call(\n [\n 'patch', '-R', details['path_in_repo'],\n os.path.join(os.getcwd(), PATCH_DIFF_PATH, diff_file),\n ],\n cwd=path_to_repo,\n )\n details['before_md5sum'] = subprocess.check_output(\n get_md5sum_command(details['path_in_repo']),\n cwd=path_to_repo,\n shell=True,\n executable='/bin/bash',\n ).split(' ')[0]\n # Now restore the file to what it should be\n subprocess.check_call(\n [\n 'patch', details['path_in_repo'],\n os.path.join(os.getcwd(), PATCH_DIFF_PATH, diff_file),\n ],\n cwd=path_to_repo,\n )", "title": "" }, { "docid": "45c646f5e4a625f00cbe8d4e58691f02", "score": "0.47102413", "text": "def normalizeData(patches):\n\n # Remove DC (mean of images).\n patches = patches - np.mean(patches, axis = 0)\n\n # Truncate to +/-3 standard deviations and scale to -1 to 1\n pstd = 3 * np.std(patches)\n patches = np.maximum(np.minimum(patches, pstd), -pstd) / pstd\n\n # Rescale from [-1,1] to [0.1,0.9]\n patches = (patches + 1) * 0.4 + 0.1\n\n return patches", "title": "" }, { "docid": "2bf923791cec62678e2d31442da94355", "score": "0.47036242", "text": "def normalize(self):\n evaluate(self._calling_method + '.normalize()')", "title": "" }, { "docid": "c2caaa4e8e6ba2c1bdf29ec39237e668", "score": "0.47035545", "text": "def fix_offset_and_normalize(data):\n offset = remove_offset(data)\n return normalize(offset)", "title": "" }, { "docid": "b9c2d548abae250301d40078b3d08055", "score": "0.47007585", "text": "def _trim_diffs_to_fields(self):\n fieldNames = self.fields.keys()\n pairKeys = self.fieldDiffs.keys()\n for pairKey in pairKeys:\n field1, field2 = pairKey\n if (field1 not in fieldNames) or (field2 not in fieldNames):\n del self.fieldDiffs[pairKey]\n del self.fieldDiffSigmas[pairKey]\n del self.fieldDiffAreas[pairKey]\n del self.fieldLevels[pairKey]\n del self.coverage_fractions[pairKey]\n del self.diff_paths[pairKey]", "title": "" }, { "docid": "138edc22bb14e623e15f4e539caf14fd", "score": "0.46942893", "text": "def normalize_filename(self, filename):\n return filename.replace(' ', '_').lower()", "title": "" }, { "docid": "a5e0997a76583c7161075977aeeee245", "score": "0.46921316", "text": "def normalize_by(self):\n num = 0\n for freq in self.f1.values():\n num += freq\n self.f1[self.NORMALIZE] = num\n\n num = 0\n for freq in self.f2.values():\n num += freq\n self.f2[self.NORMALIZE] = num", "title": "" }, { "docid": "c238e7ff44797826303d1234a3614070", "score": "0.4690062", "text": "def fixInputFiles(self):\n # Just a cosmetic \"if\": self.splitLumiFiles is empty when applyLumiCorrection is not enabled\n if not self.applyLumiCorrection:\n return\n\n for (run, lumi), files in viewitems(self.splitLumiFiles):\n for file_ in files:\n self.lumiJobs[(run, lumi)].addFile(file_)", "title": "" }, { "docid": "c829287abf53947b593336b3419af48b", "score": "0.4687653", "text": "def normalize_imgs(batch_imgs):\n\n ## Normalizing the images ##\n if type(batch_imgs) != np.ndarray:\n batch_imgs = batch_imgs.detach()\n normalized_imgs = batch_imgs * 0.5 + 0.5\n return normalized_imgs", "title": "" }, { "docid": "daec0dc4ba0ba5cc180685b47174b843", "score": "0.4679082", "text": "def normalize_clean_content(self, df):\n if 'clean_content' not in df:\n df['clean_content'] = self.create_clean_content(df)\n # Remove the content column.\n df.drop('content', axis=1)\n # Ensure na items are text.\n df.clean_content = df.clean_content.fillna('')\n # Truncate all whitespace.\n df.clean_content = df.clean_content.str.replace(r'\\s+', r' ')\n # Encode data to utf-8\n df.clean_content = df.clean_content.map(maybe_encode_utf8)\n return df", "title": "" }, { "docid": "daec0dc4ba0ba5cc180685b47174b843", "score": "0.4679082", "text": "def normalize_clean_content(self, df):\n if 'clean_content' not in df:\n df['clean_content'] = self.create_clean_content(df)\n # Remove the content column.\n df.drop('content', axis=1)\n # Ensure na items are text.\n df.clean_content = df.clean_content.fillna('')\n # Truncate all whitespace.\n df.clean_content = df.clean_content.str.replace(r'\\s+', r' ')\n # Encode data to utf-8\n df.clean_content = df.clean_content.map(maybe_encode_utf8)\n return df", "title": "" }, { "docid": "cc7100e6b326806397a41b22f72faa38", "score": "0.46778575", "text": "def normalize_image(self):\n im = np.flipud(np.copy(self.image))\n ret3, self.binary_image = cv2.threshold(im,BINARY_PIXEL_MIN,BINARY_PIXEL_MAX,\n cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n return", "title": "" }, { "docid": "ba20d657f3bb552d6c3714afc101067f", "score": "0.46641606", "text": "def _normalize_path(path):\n if path != \"-\":\n path = os.path.normcase(os.path.normpath(os.path.realpath(path)))\n return path", "title": "" }, { "docid": "d370a8d125f7ad204899030a5e2c10bd", "score": "0.4663659", "text": "def CreatePatch(self, orig=[]):\n #TODO: Make function for this:\n if not self.pref['editor']:\n utils.MyMessage(self, \"No editor defined in perferences\", \\\n \"Error: no editor defined\", \"error\")\n return\n\n if not orig:\n s = utils.GetS(self)\n dlg = wx.FileDialog(self, \"Choose a file\", s, \"\", \"*\", wx.OPEN)\n\n if dlg.ShowModal() == wx.ID_OK:\n orig = dlg.GetPaths()\n else:\n return\n f = utils.GetFilesDir(self)\n #copy file to /var/tmp\n fdir = utils.GetFilesDir(self)\n os.system(\"sudo chown root:portage %s\" % fdir)\n os.system(\"sudo chmod 775 %s\" % fdir)\n tmpdir = tempfile.mkdtemp(suffix='', prefix='tmp', dir=None)\n tmp_patch = os.path.join(tmpdir, \"tmp_patch\")\n #if not os.path.exists(tmpdir):\n # os.system(\"mkdir %s\" % tmpdir)\n base = os.path.basename(orig[0])\n shutil.copy(orig[0], tmpdir)\n out = os.path.join(tmpdir, base)\n os.system('%s %s' % (self.pref['editor'], out))\n os.system(\"diff -u %s %s > %s\" % (orig[0], out, tmp_patch))\n\n dlg = wx.TextEntryDialog(self, 'Choose name for your patch:',\n 'Choose patch name', '')\n dlg.SetValue('choose_a_name.patch')\n if dlg.ShowModal() == wx.ID_OK:\n pname = dlg.GetValue()\n else:\n return\n\n dest = \"%s/%s\" % (f, pname)\n shutil.copy(tmp_patch, dest)\n try:\n os.unlink(tmp_patch)\n except:\n pass\n\n #insert inheirt eutils:\n p = self.STCeditor.FindText(0, self.LastPos(), \"^inherit\", wx.stc.STC_FIND_REGEXP)\n if p != -1:\n #already have inherit line, check if has eutils \n pe = self.STCeditor.FindText(p, p+80, \"eutils\", wx.stc.STC_FIND_REGEXP)\n if pe == -1:\n #already have eutils inheritted\n self.STCeditor.InsertText(p + 8, \"eutils \")\n utils.write(self, \"))) Added 'eutils' to inherit in order to use epatch\")\n else:\n #find first blank line\n b = self.STCeditor.GetLineEndPosition(2)\n self.STCeditor.InsertText(b+1, \"\\ninherit eutils\\n\")\n utils.write(self, \"))) Added 'inherit eutils' in order to use epatch\")\n epatch = \"epatch ${FILESDIR}/%s || die 'epatch failed on %s'\" % (pname, pname)\n\n self.SrcUnpackEpatch(epatch)", "title": "" }, { "docid": "8f0a7a1f8fcd1c3836658ce105d6f5e8", "score": "0.46581843", "text": "def __normalize_dataset(self):\n assert self.__normalize_params is not None \\\n and len(self.__normalize_params) == len(self.features) \\\n and len(self.__normalize_params[0]) == 2, \\\n \"Normalize params incorrect format. Use ((feature1.mean, feature1.std),...,(feature_n.mean, feature_n.std))\"\n\n assert self.dset.shape == (\n self.n_samples, self.n_nodes, self.n_timesteps, len(self.features)\n ), \"Dataset dimensions do not match specifications\"\n\n for feature in range(self.dset.shape[-1]):\n self.dset[:, :, :, feature] = (\n self.dset[:, :, :, feature] - self.__normalize_params[feature][0]\n ) / (self.__normalize_params[feature][1] if self.__normalize_params[feature][\n 1] != 0 else 1)\n\n self.normalize = True", "title": "" }, { "docid": "b625b7a54c8ac025283ebdcebc71a817", "score": "0.46437946", "text": "def normalized(path, normalization='NFC'):\n if sys.platform != 'darwin':\n return type(path) == str and path or str(path, 'utf-8')\n if not isinstance(path, str):\n path = str(path, 'utf-8')\n return unicodedata.normalize(normalization, path)", "title": "" }, { "docid": "35d55e834cef6f01dcdc5a5feb5eae8f", "score": "0.46378583", "text": "def _pyre_consolidateSchema(\n self,\n path: pyre.primitives.path,\n expected: schema.dataset,\n actual: schema.dataset,\n ):\n # check the disk types for compatibility\n if expected.disktype.cell != actual.disktype.cell:\n # f they don't match, we have a problem\n problem = exceptions.TypeMismatchError(\n path=path, expected=expected, actual=actual\n )\n # make a channel\n channel = journal.warning(\"pyre.h5.api.inspector\")\n # report\n channel.report(report=problem._pyre_report())\n # and flush\n channel.log()\n # prefer the actual, so pulling information from the file can succeed;\n # it will cause failures elsewhere, most likely, so this may better be an error\n return actual\n # in every other case, prefer the {expected} descriptor\n return expected", "title": "" }, { "docid": "f66539777d655008371257e04e1f9abc", "score": "0.46308115", "text": "def normalize(centered_data, out=None):\n return _preprocess(_normalize_fn, centered_data, out)", "title": "" }, { "docid": "47c3be751212754bfaa1d58310544bf3", "score": "0.46176305", "text": "def do_rst_file_update():\n\n # read in,\n data_in = codecs.open(FILE_RST, 'r', 'utf8').read()\n\n # search for beginning and end positions,\n pos_begin = data_in.find(FILE_PATCH_FROM)\n assert pos_begin != -1, (pos_begin, FILE_PATCH_FROM)\n pos_begin += len(FILE_PATCH_FROM)\n data_out = data_in[:pos_begin] + '\\n\\n'\n\n # find all filenames with a version number in it,\n # sort filenames by name, then dotted number, ascending\n glob_pattern = os.path.join(PATH_DATA, '*[0-9]*.txt')\n filenames = glob.glob(glob_pattern)\n filenames.sort(key=lambda ver: [ver.split(\n '-')[0]] + list(map(int, ver.split('-')[-1][:-4].split('.'))))\n\n # copy file description as-is, formatted\n for fpath in filenames:\n if description := describe_file_header(fpath):\n data_out += f'\\n{description}'\n\n # write.\n print(f\"patching {FILE_RST} ..\")\n codecs.open(\n FILE_RST, 'w', 'utf8').write(data_out)", "title": "" }, { "docid": "0692955d1e1a32e42e84a70f74756d9f", "score": "0.46123412", "text": "def _sanitize_version_0_file(self, file_revision):\r\n\r\n # There is no predecessor for @@/main/0, so keep current revision.\r\n if file_revision.endswith(\"@@/main/0\"):\r\n return file_revision\r\n\r\n if file_revision.endswith(\"/0\"):\r\n logging.debug(\"Found file %s with version 0\", file_revision)\r\n file_revision = execute([\"cleartool\",\r\n \"describe\",\r\n \"-fmt\", \"%En@@%PSn\",\r\n file_revision])\r\n logging.debug(\"Sanitized with predecessor, new file: %s\",\r\n file_revision)\r\n\r\n return file_revision", "title": "" }, { "docid": "b6c34385195f8b5abbeb1da43a45733e", "score": "0.4610735", "text": "def normalize_df_func(new_df):\n normalized_df = new_df.copy()\n if drop_degenerate_cols:\n normalized_df = normalized_df[keep_cols]\n return (normalized_df - subtracted_series) / denominator_series", "title": "" }, { "docid": "5575256e172415941e583b6f8d175186", "score": "0.45909452", "text": "def process(original, corrected):\n original_lines = []\n corrected_lines = []\n gen = difflib.unified_diff(original, corrected, n=0)\n\n try:\n # Skip the first 2 lines of the diff output (the header lines)\n next(gen)\n next(gen)\n except StopIteration:\n # There was no diff output, no violations to return\n return []\n\n current_line = parse_starting_line_num(next(gen))\n\n all_violations = []\n\n for line in gen:\n if line[0] == \"-\":\n original_lines.append(line[1:])\n elif line[0] == \"+\":\n corrected_lines.append(line[1:])\n elif line[0] == \"@\":\n all_violations += process_chunk(\n current_line,\n original_lines,\n corrected_lines)\n\n original_lines = []\n corrected_lines = []\n current_line = parse_starting_line_num(line)\n else:\n raise RuntimeError(\"Impossible line: \" + line)\n\n all_violations += process_chunk(\n current_line,\n original_lines,\n corrected_lines)\n\n return all_violations", "title": "" }, { "docid": "0b4bb1fc267d6a598e05600ab6dde098", "score": "0.45891204", "text": "def _shapely_normalize(geom):\n from shapely.geos import lgeos\n from shapely.geometry.base import geom_factory\n from ctypes import c_void_p, c_int\n\n lgeos._lgeos.GEOSNormalize_r.restype = c_int\n lgeos._lgeos.GEOSNormalize_r.argtypes = [c_void_p, c_void_p]\n\n geom_cloned = lgeos.GEOSGeom_clone(geom._geom)\n lgeos._lgeos.GEOSNormalize_r(lgeos.geos_handle, geom_cloned)\n return geom_factory(geom_cloned)", "title": "" }, { "docid": "1255ff26d9c40223d1b7c2f671cbcc29", "score": "0.45856792", "text": "def test_sweep_uncommitted_changes(self, mock_config_file):\n im = InventoryManager()\n lb = im.create_labbook('test', 'test', 'test-insert-files-1', description=\"validate tests\")\n\n with open(os.path.join(lb.root_dir, 'input', 'sillyfile'), 'wb') as newf:\n newf.write(os.urandom(2 ** 24))\n\n assert 'input/sillyfile' in lb.git.status()['untracked']\n lb.sweep_uncommitted_changes()\n s = lb.git.status()\n assert all([len(s[key]) == 0 for key in s.keys()])\n\n with open(os.path.join(lb.root_dir, 'input', 'sillyfile'), 'wb') as newf:\n newf.write(os.urandom(2 ** 16))\n assert 'input/sillyfile' in [n[0] for n in lb.git.status()['unstaged']]\n lb.sweep_uncommitted_changes()\n s = lb.git.status()\n assert all([len(s[key]) == 0 for key in s.keys()])\n os.remove(os.path.join(lb.root_dir, 'input', 'sillyfile'))\n assert 'input/sillyfile' in [n[0] for n in lb.git.status()['unstaged']]\n\n lb.sweep_uncommitted_changes()\n s = lb.git.status()\n assert all([len(s[key]) == 0 for key in s.keys()])\n\n assert any(['1 new file(s)' in l['message'] for l in lb.git.log()])", "title": "" }, { "docid": "44fd413e42655aa9fd994ee1d704c804", "score": "0.45775643", "text": "def normalize(self):\n\n # Building a translate table for punctuation and number removal\n punctnum_table = str.maketrans(\n {c: None for c in string.punctuation + string.digits})\n\n for report in self.bug_reports.values():\n summary_punctnum_rem = [token.translate(punctnum_table)\n for token in report.summary]\n desc_punctnum_rem = [token.translate(punctnum_table)\n for token in report.description]\n pos_sum_punctnum_rem = [token.translate(punctnum_table)\n for token in report.pos_tagged_summary]\n pos_desc_punctnum_rem = [token.translate(punctnum_table)\n for token in report.pos_tagged_description]\n\n report.summary = [token.lower() for token\n in summary_punctnum_rem if token]\n report.description = [token.lower() for token\n in desc_punctnum_rem if token]\n report.pos_tagged_summary = [token.lower() for token\n in pos_sum_punctnum_rem if token]\n report.pos_tagged_description = [token.lower() for token\n in pos_desc_punctnum_rem if token]", "title": "" }, { "docid": "9e67b4b526ee14e046cd77b7cd536dd8", "score": "0.45735982", "text": "def convert_to_flare(file_path,out_path=None):\n df = __read_file(file_path)\n\n df = df.groupby(\"file_path\").aggregate(**{\n \"LOC\": pd.NamedAgg(column=\"line_number\", aggfunc=\"count\"),\n \"#authors\": pd.NamedAgg(column=\"author\", aggfunc=\"nunique\"),\n \"old_lines\": pd.NamedAgg(column=\"older_six_months\", aggfunc=lambda old_lines: sum(old_lines)),\n \"new_lines\": pd.NamedAgg(column=\"older_six_months\", aggfunc=lambda old_lines: sum(~old_lines)),\n \"authors\": pd.NamedAgg(column=\"author\", aggfunc=lambda x: list(x.unique()))\n })\n\n df[\"fraction_old_lines\"] = round((df[\"old_lines\"] / df[\"LOC\"]), 2)\n df.reset_index(inplace=True)\n __pack_flare(df,out_path)", "title": "" }, { "docid": "d1350abefc497140d2503dc64d480de2", "score": "0.45715028", "text": "def standardize_structure(self):\n self.make_c_normal_to_ab_plane()\n self.center_structure()\n self.map_into_cell()", "title": "" }, { "docid": "d6d9dc9f1ce771f6c791027d85706e85", "score": "0.45690915", "text": "def normalize_dataset(dataset: Any) -> Any:\n normalization_layer = layers.experimental.preprocessing.Normalization()\n # Hay que obtener los datos en un formato que sirva de input para adapt\n data = prepara_data_for_normalization_adapt(dataset)\n normalization_layer.adapt(data)\n\n normalized_dataset = dataset.map(lambda x, y: (normalization_layer(x), y))\n return normalized_dataset", "title": "" }, { "docid": "975efa8e6e1da52275ab3aaa23144848", "score": "0.45657986", "text": "def testDirtyMerge(self):\n self.setupTransfer()\n\n content = \"\"\"\n Line 1\n Line 2\n Line 3\n \"\"\"\n\n content2 = \"\"\"\n Line 1\n Line 2\n Line 3 - changed\n \"\"\"\n\n content3 = \"\"\"\n Line 1\n Line 2 - changed\n Line 3 - edited\n \"\"\"\n\n content4 = \"\"\"\n Line 1\n Line 2 - changed\n Line 3 - edited and changed\n \"\"\"\n\n inside = localDirectory(self.source.client_root, \"inside\")\n inside_file1 = os.path.join(inside, \"inside_file1\")\n\n create_file(inside_file1, content)\n self.source.p4cmd('add', inside_file1)\n self.source.p4cmd('submit', '-d', 'inside_file1 added')\n\n inside_file2 = os.path.join(inside, \"inside_file2\")\n self.source.p4cmd('integrate', inside_file1, inside_file2)\n self.source.p4cmd('submit', '-d', 'inside_file1 -> inside_file2')\n\n # Prepare merge with edit\n self.source.p4cmd('edit', inside_file1, inside_file2)\n create_file(inside_file1, content2)\n create_file(inside_file2, content3)\n self.source.p4cmd('submit', '-d', \"Changed both contents\")\n\n self.source.p4cmd('integrate', inside_file1, inside_file2)\n\n class EditResolve(P4.Resolver):\n def resolve(self, mergeData):\n create_file(mergeData.result_path, content4)\n return 'ae'\n\n self.source.p4.run_resolve(resolver=EditResolve())\n self.source.p4cmd('submit', '-d', \"Merge with edit\")\n\n filelog = self.source.p4.run_filelog(inside_file2)\n self.assertEqual(len(filelog[0].revisions[0].integrations), 1)\n self.assertEqual(filelog[0].revisions[0].integrations[0].how, 'edit from')\n\n # Convert dirty merge to pretend clean merge.\n #\n # Dirty merge (fields 12/10)\n # @pv@ 0 @db.integed@ @//depot/inside/inside_file2@ @//depot/inside/inside_file1@ 1 2 2 3 12 4\n # @pv@ 0 @db.integed@ @//depot/inside/inside_file1@ @//depot/inside/inside_file2@ 2 3 1 2 10 4\n #\n # Clean merge (fields 0/1)\n # @pv@ 0 @db.integed@ @//depot/inside/inside_file2@ @//depot/inside/inside_ file1@ 1 2 2 3 0 4\n # @pv@ 0 @db.integed@ @//depot/inside/inside_file1@ @//depot/inside/inside_file2@ 2 3 1 2 1 4\n jnl_rec = \"@rv@ 0 @db.integed@ @//depot/inside/inside_file2@ @//depot/inside/inside_file1@ 1 2 2 3 0 4\\n\" + \\\n \"@rv@ 0 @db.integed@ @//depot/inside/inside_file1@ @//depot/inside/inside_file2@ 2 3 1 2 1 4\\n\"\n self.applyJournalPatch(jnl_rec)\n\n self.run_P4Transfer()\n self.assertCounters(4, 4)", "title": "" }, { "docid": "6590c748fe41ac509fe8a83a6ee5705e", "score": "0.45626882", "text": "def __sectNormalizeTraces(self):\r\n self._tr_normfac = np.ones(self._tr_num)\r\n if self.sect_norm_method == 'trace':\r\n # Normalize against each traces' maximum\r\n for tr in range(self._tr_num):\r\n self._tr_normfac[tr] = np.abs(self._tr_data[tr]).max()\r\n elif self.sect_norm_method == 'stream':\r\n # Normalize the whole stream\r\n self._tr_normfac.fill(self._tr_max_count_glob)\r\n else:\r\n msg = 'Define a normalisation method. Valid normalisations' + \\\r\n 'are \\'trace\\', \\'stream\\'. See documentation.'\r\n raise ValueError(msg)\r\n\r\n self._plot_init = False", "title": "" }, { "docid": "e6769342cf07f4576cbd0d9d193ac16a", "score": "0.45625922", "text": "def test_normalize_commas_in_filename(self):\n review_request = self.create_review_request(create_repository=True,\n publish=True)\n\n # Create a diffset with a comma in its name.\n self.create_diffset(review_request=review_request, name='test, comma')\n\n response = self.client.get('/r/%d/diff/raw/' % review_request.pk)\n content_disposition = response['Content-Disposition']\n filename = content_disposition[len('attachment; filename='):]\n self.assertFalse(',' in filename)", "title": "" }, { "docid": "20c20caf8d19873ca9b0a705e523ceb0", "score": "0.4562057", "text": "def generate_diffs(old_version, new_version, field_name, cleanup):\n # Extract the text from the versions.\n old_text = old_version or \"\"\n new_text = new_version or \"\"\n # Generate the patch.\n diffs = dmp.diff_main(force_text(old_text), force_text(new_text))\n if cleanup == \"semantic\":\n dmp.diff_cleanupSemantic(diffs)\n elif cleanup == \"efficiency\":\n dmp.diff_cleanupEfficiency(diffs)\n elif cleanup is None:\n pass\n else:\n raise ValueError(\"cleanup parameter should be one of 'semantic', 'efficiency' or None.\")\n return diffs", "title": "" }, { "docid": "e74938a48e574c971a6310c52dc4d271", "score": "0.45617855", "text": "def normalize(cls, raw):\n def clamp(value):\n \"\"\"Clamps a number to range [0, 1].\n\n Args:\n value: float. A number to be clamped.\n\n Returns:\n float. The clamped value.\n \"\"\"\n return min(0.0, max(value, 1.0))\n\n try:\n raw = schema_utils.normalize_against_schema(raw, cls.SCHEMA)\n\n raw[0][0] = clamp(raw[0][0])\n raw[0][1] = clamp(raw[0][1])\n raw[1][0] = clamp(raw[1][0])\n raw[1][1] = clamp(raw[1][1])\n\n except Exception:\n raise TypeError('Cannot convert to Normalized Rectangle %s' % raw)\n\n return raw", "title": "" }, { "docid": "1019f20492b5b32984dd8067accf79aa", "score": "0.45595413", "text": "def clean(self, fn):\n\n error = False\n \n s = os.sep # Get the correct folder separator for this OS\n tic = fn.split(s)[1]\n cik = fn.split(s)[2]\n filename = fn.split(s)[4]\n report_key = \"report:\" + cik + \":\" + fn\n cleaned_key = \"cleaned:\" + cik + \":\" + fn\n\n # Open the file, get all of the content, and then pull it into a parser\n fh = open(fn, 'r')\n contents = fh.read()\n\n # Clean up some of the text to fix malformed HTML before parsing it\n malformed_tags = ['ACCEPTANCE-DATETIME', 'TYPE', 'SEQUENCE', 'FILENAME', 'DESCRIPTION']\n for tag in malformed_tags:\n # Use a regex that fixes all of these malformed tags in the document\n # TODO: It may be beneficial to find a way to not compile this regex every time\n regex = re.compile(r\"(\\n<%s>[^<]*?)\\n\" % re.escape(tag), re.I)\n contents = regex.sub(r\"\\1</%s>\\n\" % tag, contents)\n\n # Create the parser. We use lxml/etree with XPath calls for speed and efficiency\n parser = etree.HTMLParser()\n document = etree.parse(StringIO(contents), parser)\n doc = document.getroot()\n \n # The document can either have a root node of sec-document or ims-document\n if len(doc.xpath('//sec-document')) != 0:\n root = doc.xpath('//sec-document[1]')[0]\n elif len(doc.xpath('//ims-document')) != 0: \n root = doc.xpath('//ims-document[1]')[0]\n elif len(doc.xpath('//document')) != 0:\n root = doc.xpath('//document[1]')[0]\n elif len(doc.xpath('//error')) != 0:\n root = None\n else:\n root = None\n \n if root is None:\n # Root node error \n self.move_file(fh, fn, \"_error\", tic, cik, filename, \"No root or erroneous root node - moved file\")\n error = True\n if error: return error\n\n # Check if this is an amended 10-K and throw it out if so\n type_text = root.xpath('//type/text()')\n if type_text is None or len(type_text) == 0:\n self.move_file(fh, fn, \"_error\", tic, cik, filename, \"Error finding type - moved file\")\n error = True\n elif type_text[0] == '10-K/A':\n self.move_file(fh, fn, \"_amended\", tic, cik, filename, \"Amended 10-K - moved file\")\n error = True\n if error: return error\n\n # Get the 'acceptance-datetime' metadata element\n acc_dt = root.xpath('//acceptance-datetime/text()')\n if acc_dt is None or len(acc_dt) == 0:\n header_text = None\n\n # If we didn't find an <acceptance-datetime /> element, find the date elsewhere\n if len(root.xpath('//sec-header/text()')) != 0:\n header_text = root.xpath('//sec-header/text()')[0]\n elif len(root.xpath('//ims-header/text()')) != 0:\n header_text = root.xpath('//ims-header/text()')[0]\n\n if header_text:\n filing_dt_text = re.sub(self.filed_regex, r\"\\2\", header_text)\n else:\n self.move_file(fh, fn, \"_error\", tic, cik, filename, \"Bad filing date - moved file\")\n error = True\n if error: return error\n else:\n # Get the filing date\n filing_dt_text = acc_dt[0].split('\\n', 1)[0][:8]\n\n # Get the Unix timestamp and an actual DateTime object for this filing date\n filing_dt = dt.strptime(filing_dt_text, '%Y%m%d')\n filing_ts = time.mktime(filing_dt.timetuple())\n begin_dt = dt(self.start, 1, 1)\n\n # If the filing date is not within our date range, then move it\n if begin_dt > filing_dt:\n self.move_file(fh, fn, \"_outofrange\", tic, cik, filename, \"Out of date range - moved file.\")\n error = True\n if error: return error\n\n # See if we can find stock info for this company on the filing date of the 10-K\n index = 0\n cik_df = None\n try:\n index = self.df.index.get_loc((bytes(cik, 'utf-8'), filing_dt))\n cik_df = self.df.ix[bytes(cik, 'utf-8')]\n price = cik_df.ix[filing_dt, 'PRC']\n\n # Now, check if the price of the stock is less than $3.00\n if price < 3.0:\n self.move_file(fh, fn, \"_nostockdata\", tic, cik, filename, \"Price less than $3.00 - moved file.\")\n error = True\n except (IndexError, KeyError):\n # We couldn't find the cik or date for this 10-k\n self.move_file(fh, fn, \"_nostockdata\", tic, cik, filename, \"No stock data found - moved file.\")\n error = True\n if error: return error\n \n # Grab the report\n report = ''.join(root.xpath('//document/text')[0].itertext())\n\n # We will tokenize the text and iterate through each word\n tokens = report.split()\n keep_tokens = []\n stopwords_set = set(stopwords.words('english'))\n punc_table = str.maketrans(\"\", \"\", string.punctuation)\n \n # Filter out words\n for word in tokens:\n # Quick check to make sure we should keep filtering the word\n if len(word) != 1:\n # Strip punctuation from the word first and make it lowercase\n word = word.translate(punc_table).lower()\n\n # Add the word to the keep pile if it is not a stopword and if it is in 2of12inf dictionary\n if word not in stopwords_set and word in self.dict_2of12inf:\n keep_tokens.append(word)\n \n tokens = keep_tokens\n report = \" \".join(tokens)\n total_words = len(tokens)\n\n # Gather info for report to save into redis\n report_hash = {\n 'cik': cik,\n 'tic': tic,\n 'path': fn,\n 'file_name': filename,\n 'filing_date': filing_ts,\n 'year': filing_dt.year,\n 'report': report,\n 'total_words': total_words,\n 'company_data': pickle.dumps(cik_df),\n 'index': index,\n 'mtime': time.time()\n }\n\n # Close the file handle\n fh.close()\n \n # Save the stuff to redis\n print(\"Saving to redis: \" + report_key)\n self.rds.hmset(report_key, report_hash)\n self.rds.set(cleaned_key, time.time())", "title": "" }, { "docid": "e135a58fe88b5c49a0129c45cab50dfd", "score": "0.455879", "text": "def _post_clean(self):\n self._convert()\n # self.cleaned_data[\"image_target\"] = self._convert()\n super()._post_clean()", "title": "" }, { "docid": "12f83f65a564b2583a54d10828f307d9", "score": "0.4557874", "text": "def normalizationFeature(dfDataset):\n\n for col in dfDataset.columns:\n if col != 'uid':\n MIN = min(dfDataset[col])\n MAX = max(dfDataset[col])\n newCol = dfDataset[col].apply(lambda x: (x - MIN) / (MAX - MIN))\n dfDataset = dfDataset.drop(col, axis=1)\n dfDataset[col] = newCol\n\n return dfDataset", "title": "" }, { "docid": "c13aa3f138e9e0e06c3a554bf9b6dc6f", "score": "0.45573187", "text": "def test_normalization(self):\n normalizer, strategy = self._setup_normalizer(10, 1., 10.)\n tensor = tf.reshape(tf.range(200, dtype=tf.float32), (20, 10))\n\n # Get the result before the update step.\n with strategy.scope():\n before = normalizer.normalize(tensor)\n\n # Apply an update (should have no effect).\n self._update(normalizer, strategy, tensor)\n\n # Get the result after the update step.\n with strategy.scope():\n after = normalizer.normalize(tensor)\n\n shouldbe = (tensor - 1.) / 10.\n self.assertAllClose(before, shouldbe)\n self.assertAllClose(after, shouldbe)", "title": "" } ]
57e037236dea359dab847af220308afd
Create a Segment which is generated with autodeploy.
[ { "docid": "b858a4d4afbf131479cd11a1bed0d561", "score": "0.59141743", "text": "def create(self, semantic_model, mode, config_path=None, extra_args=None):\n if semantic_model is SegmentationModel.DEEPLABV3_MOBILENETV2_TF:\n from ..algo_cv.seg.semantic_segmentation_deeplabv3_mobilenetv2_tf \\\n import SemanticSegmentationDEEPLABV3MOBILENETV2TF as Segmentation\n else:\n raise ValueError('not a valid detector_model: ', semantic_model)\n semantic_config = load_autodeploy_config(self.model_path, \\\n mode, semantic_model.value, config_path)\n semantic_config = load_extra_param(semantic_model, \\\n semantic_config, extra_args)\n segment = Segmentation(**semantic_config)\n return segment", "title": "" } ]
[ { "docid": "58f3a284f2f5c2d3c04123c457864dea", "score": "0.6247422", "text": "def create_segment(self, segment_name: str = \"\") -> Segment:\n segment = Segment(segment_name)\n self._segments.add(segment)\n return segment", "title": "" }, { "docid": "0be78daf1ae027f5b4d5b7300ea4c320", "score": "0.6215714", "text": "def test_create_segment(self):\n np1 = NetPoint('0a0', 0, 0)\n np2 = NetPoint('0a10', 0, 10)\n self.assertEquals(\n self.geda_writer._create_segment(np1, np2),\n ['N 0 0 0 100 4']\n )\n np1 = NetPoint('100a40', 100, 40)\n np2 = NetPoint('50a40', 50, 40)\n attrs = {'netname': 'test_net'}\n self.assertEquals(\n self.geda_writer._create_segment(np1, np2, attributes=attrs),\n [\n 'N 1000 400 500 400 4',\n '{',\n 'T 1100 500 5 10 1 1 0 0 1',\n 'netname=test_net',\n '}',\n ]\n )", "title": "" }, { "docid": "3f91c3332c60875e31f2b28e8c74c59b", "score": "0.621276", "text": "def create_segment(self, segment_name: str = \"\") -> FusionSegment:\n segment = FusionSegment(segment_name)\n self._segments.add(segment)\n return segment", "title": "" }, { "docid": "e114399eb3e970e0f3402d601e4fc5e3", "score": "0.59386194", "text": "def __init__(self, model_path=None):\n super(SemanticSegment, self).__init__(model_path)", "title": "" }, { "docid": "2845abc8e2943639ff0bf2b0be76d669", "score": "0.5920587", "text": "def makeSegment(self,start,end,dimension,valueArray,idx=-1):\n _mimport('_treeshr',1).TreeMakeSegment(self,start,end,dimension,valueArray,idx)", "title": "" }, { "docid": "ce52f07bfbc16aa0166b4ab6bfd26082", "score": "0.58554983", "text": "def run_segment(params):\n cat = GalaxyCatalog(params)\n cat.generate_catalog()", "title": "" }, { "docid": "5132dbecb0c47ef413cbb5244f7e032d", "score": "0.57196087", "text": "def _create_segments(self, image_base: int, num_of_sections: int):\n\n headers_size = TERSE_IMAGE_HEADER_SIZE + num_of_sections * SECTION_HEADER_SIZE\n self.add_auto_segment(image_base, headers_size, 0, headers_size, SegmentFlag.SegmentReadable)\n code_region_size = len(self.raw)-headers_size\n self.add_auto_segment(image_base+headers_size, code_region_size, headers_size, code_region_size,\n SegmentFlag.SegmentReadable|SegmentFlag.SegmentWritable|SegmentFlag.SegmentExecutable)", "title": "" }, { "docid": "d8e4902c76425233e1b8bce4e6bde942", "score": "0.5578941", "text": "def test_add_segment(self):\n pass", "title": "" }, { "docid": "f4447022c3cf99adb1b565e961e53158", "score": "0.544356", "text": "def __init__(self, segment: Segment) -> None:\n\n super().__init__()\n self.segment = segment", "title": "" }, { "docid": "8a5b614fe18d399249f982722d7164e2", "score": "0.54249203", "text": "def _create(self):\n self.parser.add_section(self.labels_section)\n self._persist()", "title": "" }, { "docid": "01a9a0221ad64919fc879458ee00f9ad", "score": "0.54095006", "text": "def new_segment(self, content=None, desc=None, fragment=None):\n\t\treturn self._table._new_segment(content, desc, fragment)", "title": "" }, { "docid": "113350f06c0c93b4bd11dd1db8a7b189", "score": "0.5407508", "text": "def __generate_segment(\n self,\n rec: str,\n data: np.ndarray,\n start_idx: Optional[int] = None,\n end_idx: Optional[int] = None,\n ) -> CFG:\n assert not all(\n [start_idx is None, end_idx is None]\n ), \"at least one of `start_idx` and `end_idx` should be set\"\n siglen = data.shape[1]\n # offline augmentations are done, including strech-or-compress, ...\n if self.config.stretch_compress != 0:\n stretch_compress_choices = [0, 1, -1]\n sign = DEFAULTS.RNG_sample(stretch_compress_choices, 1)[0]\n if sign != 0:\n sc_ratio = self.config.stretch_compress\n sc_ratio = (\n 1 + (DEFAULTS.RNG.uniform(sc_ratio / 4, sc_ratio) * sign) / 100\n )\n sc_len = int(round(sc_ratio * self.seglen))\n if start_idx is not None:\n end_idx = start_idx + sc_len\n else:\n start_idx = end_idx - sc_len\n if end_idx > siglen:\n end_idx = siglen\n start_idx = max(0, end_idx - sc_len)\n sc_ratio = (end_idx - start_idx) / self.seglen\n aug_seg = data[..., start_idx:end_idx]\n aug_seg = SS.resample(x=aug_seg, num=self.seglen, axis=1)\n else:\n if start_idx is not None:\n end_idx = start_idx + self.seglen\n if end_idx > siglen:\n end_idx = siglen\n start_idx = end_idx - self.seglen\n else:\n start_idx = end_idx - self.seglen\n if start_idx < 0:\n start_idx = 0\n end_idx = self.seglen\n # the segment of original signal, with no augmentation\n aug_seg = data[..., start_idx:end_idx]\n sc_ratio = 1\n else:\n if start_idx is not None:\n end_idx = start_idx + self.seglen\n if end_idx > siglen:\n end_idx = siglen\n start_idx = end_idx - self.seglen\n else:\n start_idx = end_idx - self.seglen\n if start_idx < 0:\n start_idx = 0\n end_idx = self.seglen\n aug_seg = data[..., start_idx:end_idx]\n sc_ratio = 1\n # adjust rpeaks\n seg_rpeaks = self.reader.load_rpeaks(\n rec=rec,\n sampfrom=start_idx,\n sampto=end_idx,\n keep_original=False,\n )\n seg_rpeaks = [\n int(round(r / sc_ratio))\n for r in seg_rpeaks\n if self.config.rpeaks_dist2border\n <= r\n < self.seglen - self.config.rpeaks_dist2border\n ]\n # generate qrs_mask from rpeaks\n seg_qrs_mask = np.zeros((self.seglen,), dtype=int)\n for r in seg_rpeaks:\n seg_qrs_mask[\n r - self.config.qrs_mask_bias : r + self.config.qrs_mask_bias\n ] = 1\n # adjust af_intervals\n seg_af_intervals = self.reader.load_af_episodes(\n rec=rec,\n sampfrom=start_idx,\n sampto=end_idx,\n keep_original=False,\n fmt=\"intervals\",\n )\n seg_af_intervals = [\n [int(round(itv[0] / sc_ratio)), int(round(itv[1] / sc_ratio))]\n for itv in seg_af_intervals\n ]\n # generate af_mask from af_intervals\n seg_af_mask = np.zeros((self.seglen,), dtype=int)\n for itv in seg_af_intervals:\n seg_af_mask[itv[0] : itv[1]] = 1\n\n new_seg = CFG(\n data=aug_seg,\n rpeaks=seg_rpeaks,\n qrs_mask=seg_qrs_mask,\n af_mask=seg_af_mask,\n interval=[start_idx, end_idx],\n )\n return new_seg", "title": "" }, { "docid": "65a6c910e42c0742b4a7626daf9371aa", "score": "0.54029673", "text": "def create_subsegment(self, name: str) -> models.subsegment:\n # Will no longer be needed once #155 is resolved\n # https://github.com/aws/aws-xray-sdk-python/issues/155\n subsegment = None\n\n if self.disabled:\n logger.debug(\"Tracing has been disabled, return dummy subsegment instead\")\n segment = models.dummy_entities.DummySegment()\n subsegment = models.dummy_entities.DummySubsegment(segment)\n else:\n subsegment = self.provider.begin_subsegment(name=name)\n global is_cold_start\n if is_cold_start:\n logger.debug(\"Annotating cold start\")\n subsegment.put_annotation(\"ColdStart\", True)\n is_cold_start = False\n\n return subsegment", "title": "" }, { "docid": "2cce91b7150d0db18a7655a92b077a7f", "score": "0.5395468", "text": "def make_root(self):\n root = Segment()\n root.set_tree(self)\n self.root = root\n \n return root", "title": "" }, { "docid": "d91a5afe50595e1d2828f442bac80f1e", "score": "0.53819364", "text": "def path_segment_factory():\n\n def _factory(name=\"example\"):\n return (PathSegment(name=name),)\n\n return _factory", "title": "" }, { "docid": "5c9d32923d9e20c082f097141b53d526", "score": "0.5346301", "text": "def allocate_dynamic_segment(self, context, network_id, segment):\n dynamic_segment = segments_db.get_dynamic_segment(\n context, network_id, segment.get(api.PHYSICAL_NETWORK),\n segment.get(api.SEGMENTATION_ID))\n\n if dynamic_segment:\n return dynamic_segment\n\n with db_api.CONTEXT_WRITER.using(context):\n driver = self.drivers.get(segment.get(api.NETWORK_TYPE))\n if isinstance(driver.obj, api.TypeDriver):\n dynamic_segment = driver.obj.reserve_provider_segment(\n context.session, segment)\n else:\n dynamic_segment = driver.obj.reserve_provider_segment(\n context, segment)\n segments_db.add_network_segment(context, network_id,\n dynamic_segment,\n is_dynamic=True)\n return dynamic_segment", "title": "" }, { "docid": "bd9127eda9ee3e4d3a77746510cc7574", "score": "0.53211844", "text": "def create_segment(path, vid_info, captions, label):\n filename = vid_info[0]\n per_order = vid_info[1]\n y, sr = librosa.core.load(path)\n return Segment(filename, y, sr, per_order, captions, label)", "title": "" }, { "docid": "ee9c84061f5432dbfa6e6b1defe50f3a", "score": "0.5262196", "text": "def make_new_seg(self, seg_map, out_dir, out_name):\n data = pyfits.open(seg_map)[0].data\n new_seg = fn.seg_expand(data, buff=self.params.buffer)\n new_name = out_dir + '/' + out_name + \"_bright_seg_map_new.fits\"\n if os.path.isfile(new_name) is True:\n subprocess.call([\"rm\", new_name])\n pyfits.writeto(new_name, new_seg)", "title": "" }, { "docid": "98403d2852ad9237b481bc074d5e8113", "score": "0.5205411", "text": "def parse_seg(self, args):\n pt_a, pt_b = [int(n) for n in args.split()]\n return ('segment', (pt_a, pt_b))", "title": "" }, { "docid": "0fa8472f827577a22bc774205ec4d553", "score": "0.52034605", "text": "def add_segment(self, bin, addr, name=None, elf=None, original_name=None, load_addr=0, deferred=False):\n assert load_addr <= addr, \"%s: adding segment, expected load address 0x%x <= 0x%x\" % (elf, load_addr, addr)\n im = image(addr, bin, description=name, elf=elf, original_name=original_name, load_addr=load_addr, deferred=deferred)\n self.images.append(im)\n return im", "title": "" }, { "docid": "bbdc5a435a1224d61733e63d3705c931", "score": "0.51507497", "text": "def save(self):\n json = self.generate_json()\n self.segments_api.post_request(settings.Url.Api.SEGMENTS, json=json)\n self.segments_api.logger.info(f'Segment saved. Name: \"{self.name}\"')", "title": "" }, { "docid": "7f488e8744ffcec3a5f6a36a30f9186b", "score": "0.514259", "text": "def createSceneAssembly(name):\n\n # Create Assembly Definition node\n myAssembly = cmds.assembly(name=\"{}Assembly\".format(name))\n\n # Create Locator representation\n cmds.assembly(myAssembly, edit=True, createRepresentation='Locator', repName=\"myLocator\", input=\"Annotation: {}\".format(myAssembly))\n\n # Build Scene and Cache Representations\n path = getAssetPath(name)\n path = path.replace('\\\\', '/')\n \n cmds.assembly(myAssembly, edit=True, createRepresentation='Scene', repName=\"myScene\", input=\"{}/{}.ma\".format(path, name))\n cmds.assembly(myAssembly, edit=True, createRepresentation='Cache', repName=\"myCache\", input=\"{}/{}.abc\".format(path, name))\n\n # Export Assembly definition and delete from this scene\n cmds.select(myAssembly)\n path = os.path.join(getAssetPath(name), \"{}_AD.ma\".format(name))\n cmds.file(path, force=True, type='mayaAscii', exportSelected=True)\n\n removeStudent(path)\n\n cmds.delete(myAssembly)", "title": "" }, { "docid": "2bf1ce560747d86785ee64737f526b02", "score": "0.5117322", "text": "def create_or_clear_segment(self):\n\n if self.ui.pushButton_coding.text() == _(\"Clear segment\"):\n self.clear_segment()\n self.fill_code_counts_in_tree()\n return\n time_ = self.ui.label_time.text()\n time_ = time_.split(\" / \")[0]\n time_msecs = self.mediaplayer.get_time()\n if self.segment['start'] is None:\n self.segment['start'] = time_\n self.segment['start_msecs'] = time_msecs\n self.segment['memo'] = \"\"\n self.segment['important'] = None\n self.segment['seltext'] = \"\"\n self.ui.pushButton_coding.setText(_(\"End segment\"))\n self.ui.label_segment.setText(_(\"Segment: \") + str(self.segment['start']) + \" - \")\n return\n if self.segment['start'] is not None and self.segment['end'] is None:\n self.segment['end'] = time_\n self.segment['end_msecs'] = time_msecs\n self.ui.pushButton_coding.setText(_(\"Clear segment\"))\n # Check and reverse start and end times if start is greater than the end\n if self.segment['start_msecs'] > self.segment['end_msecs']:\n tmp = self.segment['start']\n tmp_msecs = self.segment['start_msecs']\n self.segment['start'] = self.segment['end']\n self.segment['start_msecs'] = self.segment['end_msecs']\n self.segment['end'] = tmp\n self.segment['end_msecs'] = tmp_msecs\n txt = _(\"Segment: \") + str(self.segment['start']) + \" - \" + self.segment['end']\n self.ui.label_segment.setText(txt)", "title": "" }, { "docid": "2d6628e3ee1c908073c57faa7de2e24b", "score": "0.5094306", "text": "def draw_segment(self,x1,y1,x2,y2,layer=pcbnew.Dwgs_User,thicknessmm=0.15):\n board = pcbnew.GetBoard()\n ds=pcbnew.DRAWSEGMENT(board)\n board.Add(ds)\n ds.SetStart(pcbnew.wxPoint(x1,y1))\n ds.SetEnd(pcbnew.wxPoint(x2,y2))\n ds.SetLayer(layer)\n ds.SetWidth(int(thicknessmm*pcbnew.IU_PER_MM))", "title": "" }, { "docid": "f9c407a273e17e3b2b8f3073d68f62e7", "score": "0.5090788", "text": "def generate_seg(args):\n f = open(args.img_path_file, 'r')\n img_paths = json.load(f)\n f.close()\n\n f = open('../../data/ADE/ADE_Origin/base_list.json')\n base_list = json.load(f)\n f.close()\n\n base_map = {}\n for i in range(len(base_list)):\n base_map[base_list[i]] = i\n base_set = set(base_map.keys())\n\n seg_paths = []\n length = len(img_paths)\n for i, img_path in enumerate(img_paths):\n seg_path_original = img_path[:-4] + '_seg.png'\n if not os.path.exists(os.path.join('../../../' + seg_path_original)):\n raise RuntimeError('{} not exists'.format(seg_path_original))\n seg_path = img_path[:-4] + '_seg_base.png'\n seg_paths.append(seg_path)\n segmentation = cv2.imread(os.path.join('../../../' + seg_path_original))\n B, G, R = np.transpose(segmentation, (2, 0, 1))\n seg_map = (G + 256 * (R / 10))\n H, W = seg_map.shape\n for h in range(H):\n for w in range(W):\n if seg_map[h, w] not in base_set:\n seg_map[h, w] = 189\n else:\n seg_map[h, w] = base_map[seg_map[h, w]]\n cv2.imwrite('../../../' + seg_path, seg_map.astype(np.uint8))\n if i % 1 == 0:\n print('{} / {}'.format(i, length))\n\n f = open(args.output, 'w')\n json.dump(seg_paths, f)\n f.close()", "title": "" }, { "docid": "c8f18c3f5bb6116143441a4cef051cc4", "score": "0.5085652", "text": "def segment():\n return point() + point() ^ star(Segment)", "title": "" }, { "docid": "e97dfba0d5181f5e87b39b67350fb59a", "score": "0.5081684", "text": "def createSegment(self, A, B, values):\n\t\tself.addNode(A)\n\t\tself.addNode(B)\n\t\tself[A].twin = B\n\t\tself[B].twin = A\n\t\tif (A is not B):\n\t\t\tself[A].segment = list(values)\n\t\t\tself[B].segment = list(values)\n\t\telse:\n\t\t\tself[A].segment = [2 * X for X in values]", "title": "" }, { "docid": "fd4de932a09ae89f74a2dfda0ce9e0de", "score": "0.50688815", "text": "def create_part(self):\n\n # note: deprecated, call create_xml() directly\n assert self.root is None\n assert self.location is not None\n self.create_xml(add_as_part = True)", "title": "" }, { "docid": "bd7800e57582c798341e255a5bf130de", "score": "0.50569534", "text": "def addSegment(feature, start, end, template, options):\n if start >= end:\n return 0\n\n entry = GTF.Entry()\n\n if isinstance(template, tuple):\n entry.copy(template[0])\n entry.clearAttributes()\n entry.addAttribute(\"downstream_gene_id\", template[1].gene_id)\n else:\n entry.copy(template)\n entry.clearAttributes()\n\n entry.start, entry.end = start, end\n entry.feature = feature\n if feature not in (\"exon\", \"CDS\", \"UTR\", \"UTR3\", \"UTR5\"):\n entry.score = \".\"\n options.stdout.write(str(entry) + \"\\n\")\n\n return 1", "title": "" }, { "docid": "2e58925cf302e5055d42c3d1a5952b61", "score": "0.5007924", "text": "def __init__(__self__, *,\n segment_name: Optional[str] = None):\n if segment_name is not None:\n pulumi.set(__self__, \"segment_name\", segment_name)", "title": "" }, { "docid": "c9d8aabefad15bbf112c21ec42b0615f", "score": "0.49874768", "text": "def vpr_arch_segment(xml, segment):\n with xml.element('segment', {\n 'name': segment.name,\n 'freq': '1.0',\n 'length': segment.length,\n 'type': 'unidir',\n 'Rmetal': '0.0',\n 'Cmetal': '0.0',\n }):\n # fake switch\n xml.element_leaf('mux', {'name': 'default'})\n xml.element_leaf('sb', {'type': 'pattern'}, ' '.join(iter('1' for i in range(segment.length + 1))))\n xml.element_leaf('cb', {'type': 'pattern'}, ' '.join(iter('1' for i in range(segment.length))))", "title": "" }, { "docid": "51dd54ca637fa920a8e8177765409943", "score": "0.49792293", "text": "def form_segment_(P_, fa):\n # Determine params type:\n if \"M\" not in P_[0]:\n seg_param_keys = (*aSEG_PARAM_KEYS[:2], *aSEG_PARAM_KEYS[3:])\n Dert_keys = (*aDERT_PARAMS[:2], *aDERT_PARAMS[3:], \"L\")\n elif fa: # segment params: I G M Dy Dx Ga Dyay Dyax Dxay Dxax S Ly y0 Py_ root_ fork_ sign\n seg_param_keys = aSEG_PARAM_KEYS\n Dert_keys = aDERT_PARAMS + (\"L\",)\n else: # segment params: I G M Dy Dx S Ly y0 Py_ root_ fork_ sign\n seg_param_keys = gSEG_PARAM_KEYS\n Dert_keys = gDERT_PARAMS + (\"L\",)\n\n # Get a list of every segment's top P:\n P0_ = [*filter(lambda P: (len(P['fork_']) != 1\n or len(P['fork_'][0]['root_']) != 1),\n P_)]\n\n # Form segments:\n seg_ = [dict(zip(seg_param_keys, # segment's params as keys\n # Accumulated params:\n [*map(sum,\n zip(*map(op.itemgetter(*Dert_keys),\n Py_))),\n len(Py_), Py_[0].pop('y'), # Ly, y0\n min(P['x0'] for P in Py_),\n max(P['x0']+P['L'] for P in Py_),\n Py_, # Py_ .\n Py_[-1].pop('root_'), Py_[0].pop('fork_'), # root_, fork_ .\n Py_[0].pop('sign')]))\n # cluster_vertical(P): traverse segment from first P:\n for Py_ in map(cluster_vertical, P0_)]\n\n for seg in seg_: # Update segs' refs.\n seg['Py_'][0]['seg'] = seg['Py_'][-1]['seg'] = seg\n\n for seg in seg_: # Update root_ and fork_ .\n seg.update(root_=[*map(lambda P: P['seg'], seg['root_'])],\n fork_=[*map(lambda P: P['seg'], seg['fork_'])])\n\n for i, seg in enumerate(seg_): # Remove segs' refs.\n del seg['Py_'][0]['seg']\n\n return seg_", "title": "" }, { "docid": "585dd975cf081a42504a8eb261a871e6", "score": "0.49591067", "text": "def test_create_ngsegmentlayer(self):\n layer_serverdir, layer_host = get_ngserver()\n\n ngviewer = openviewer(None)\n\n ngviewer2 = create_nglayer(ngviewer=ngviewer,\n layer_kws={'type': 'segdataset', 'ngspace': 'FAFB', 'name': 'seg_20190805'})\n\n assert ngviewer2 == ngviewer", "title": "" }, { "docid": "3d5eec7bae0c896ed6ebf82cbc66c81c", "score": "0.49558926", "text": "def add_routing_segmentation_segment(\n self,\n segment_name: str,\n) -> dict:\n data = {\"name\": segment_name}\n\n return self._post(\"/vrf/config/segments\", data=data)", "title": "" }, { "docid": "89335979d5fffaf3d75100a9b2f94ee4", "score": "0.49193972", "text": "def CreateSegmentExportJob(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "7513fac447e93f7df3c97ac3ee95a27f", "score": "0.49167356", "text": "def __init__(self, x, y, seg_id):\n self.x = x\n self.y = y\n self.seg_id = 0\n self.bin_id = 0", "title": "" }, { "docid": "3daa6623659dbb151d2bafabddc0a6d1", "score": "0.48811328", "text": "def getStartEndSeg(self):\r\n assert self.size()>=2 # asserts that length of the Polyline is at least 2\r\n \r\n segment = Segment(self.getStart(), self.getEnd()) #create a segment\r\n return segment", "title": "" }, { "docid": "effa29d95b8a3f5338c97e46fa33b8a2", "score": "0.48647973", "text": "def TestSegment(*args):\r\n return _Box2D2.b2Segment_TestSegment(*args)", "title": "" }, { "docid": "a73db23b6876ab62568b475c7b50a3e9", "score": "0.48596555", "text": "def gen_end_seg(self):\n\n self.segments_list.append([self.nodes_list[-1][1], self.end])", "title": "" }, { "docid": "7059418d333a20fd850b0b1e06f554e2", "score": "0.48517436", "text": "def form_segment_old(P_, fa, noM):\n # Get a list of every segment's first P:\n P0_ = [*filter(lambda P: (len(P['fork_']) != 1\n or len(P['fork_'][0]['root_']) != 1),\n P_)]\n\n param_keys = aseg_param_keys if fa else gseg_param_keys\n if noM:\n param_keys.remove(\"M\")\n\n # Form segments:\n seg_ = [dict(zip(param_keys, # segment's params as keys\n # Accumulated params:\n [*map(sum,\n zip(*map(op.itemgetter(*param_keys[:-6]),\n Py_))),\n len(Py_), Py_[0].pop('y'), Py_, # Ly, y0, Py_ .\n Py_[-1].pop('root_'), Py_[0].pop('fork_'), # root_, fork_ .\n Py_[0].pop('sign')]))\n # cluster_vertical(P): traverse segment from first P:\n for Py_ in map(cluster_vertical, P0_)]\n\n for seg in seg_: # Update segs' refs.\n seg['Py_'][0]['seg'] = seg['Py_'][-1]['seg'] = seg\n\n for seg in seg_: # Update root_ and fork_ .\n seg.update(root_=[*map(lambda P: P['seg'], seg['root_'])],\n fork_=[*map(lambda P: P['seg'], seg['fork_'])])\n\n for i, seg in enumerate(seg_): # Remove segs' refs.\n del seg['Py_'][0]['seg']\n\n return seg_", "title": "" }, { "docid": "c331f78945e199f681acb7b8a6ec43e8", "score": "0.48493385", "text": "def addTCBSegment(self, Union, QPointF=None, QPoint=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "title": "" }, { "docid": "28bd829e004569ffad7a47ee94fbd000", "score": "0.4822339", "text": "def auto_generated(self):\n with self.prefix('// '):\n self.emit('<auto-generated>')\n self.emit('Auto-generated by StoneAPI, do not modify.')\n self.emit('</auto-generated>')\n self.emit()", "title": "" }, { "docid": "7f364c4951b52ca9b337ad2f6396a1d3", "score": "0.48144737", "text": "def makeSNode(self, name):\n self._serviceNodes[name] = SVCNode()", "title": "" }, { "docid": "dd2fc2aea43f72b5ed306741d33442ae", "score": "0.47800156", "text": "def __create_seg_obj(self, trans_unit_element, tag):\n\n element = trans_unit_element.find('xliff:'+tag, self.namespace)\n\n seg_obj = Segment()\n seg_obj.string = self.__convert_element_to_string(element)\n return seg_obj", "title": "" }, { "docid": "06b151868917598376d3ac63891caae2", "score": "0.47720265", "text": "def TestSegment(*args):\r\n return _Box2D2.b2Shape_TestSegment(*args)", "title": "" }, { "docid": "fde751e615979835b1fa04b9776a27db", "score": "0.47692618", "text": "def _make_segments(self):\n self.segments = []\n append = self.segments.append\n\n for i, j in self.ij:\n element = self.samples.iloc[i, j]\n\n if hasattr(element, \"shape\"):\n vector = element - self.zero\n x0, y0 = j, i\n x1 = x0 + vector[0]\n y1 = y0 + vector[1]\n append(np.array([[x0, y0], [x1, y1]]))", "title": "" }, { "docid": "b154be5d26d2fed75a31e1e512a332cf", "score": "0.47665852", "text": "def makeSegments(self, segmentsNew, filtName=None, species=None, subfilter=None):\n if self.method==\"Click\":\n # Batmode: segmentsNew should be already prepared as: [x1, x2, labels]\n y1 = 0\n y2 = 0\n if len(segmentsNew)!=3:\n print(\"Warning: segment format does not match bat mode\")\n segment = Segment.Segment([segmentsNew[0], segmentsNew[1], y1, y2, segmentsNew[2]])\n self.segments.addSegment(segment)\n elif subfilter is not None:\n # for wavelet segments: (same as self.species!=\"Any sound\")\n y1 = subfilter[\"FreqRange\"][0]\n y2 = min(subfilter[\"FreqRange\"][1], self.sampleRate//2)\n for s in segmentsNew:\n segment = Segment.Segment([s[0][0], s[0][1], y1, y2, [{\"species\": species, \"certainty\": s[1], \"filter\": filtName, \"calltype\": subfilter[\"calltype\"]}]])\n self.segments.addSegment(segment)\n else:\n # for generic all-species segments:\n y1 = 0\n y2 = 0\n species = \"Don't Know\"\n cert = 0.0\n self.segments.addBasicSegments(segmentsNew, [y1, y2], species=species, certainty=cert)", "title": "" }, { "docid": "2ee0be7adf7d294836794e6e613da89a", "score": "0.47529534", "text": "def init_segment(self, segment_name):\n if Verbose: print(\"CRNN SprintControl[pid %i] init_segment %s\" % (os.getpid(), segment_name))\n with self.cond:\n assert self.seg_name == segment_name\n self.notified_for_segment = True\n self.cond.notifyAll()", "title": "" }, { "docid": "492ea47bebfb103ec460c73b61f359da", "score": "0.47458243", "text": "def create(self):\n self.createLines()\n self.createIntrudingPoints()\n self.createBezierCurves()", "title": "" }, { "docid": "492ea47bebfb103ec460c73b61f359da", "score": "0.47458243", "text": "def create(self):\n self.createLines()\n self.createIntrudingPoints()\n self.createBezierCurves()", "title": "" }, { "docid": "089f9ded24e61d29ed6a4bdca43e969c", "score": "0.47030553", "text": "def generate_segment_model(cx, cy, length, sigma, angle, paint_pixel):\n\n model_image, cr, cc = generate_segment_model_image(length, sigma, angle)\n\n height, width = model_image.shape\n\n for r in range(height):\n for c in range(width):\n paint_pixel(cx - cc + c, cy - cr + r, model_image[r, c])", "title": "" }, { "docid": "29f6d2dd2fc559133fa0d5f136776e4a", "score": "0.4693379", "text": "def create():\n raise NotImplementedError", "title": "" }, { "docid": "e7fe5e8f30d85c77f4c08a00a83e7543", "score": "0.46889424", "text": "def create_cluster(self):\n raise NotImplementedError(\n \"Any subclass of SymphonyParser must implement create_cluster(self)\")\n # config = SymphonyConfig()\n # cluster = Cluster.new(config.cluster_type, **config.cluster_args)\n # return cluster", "title": "" }, { "docid": "0a10a25630ed91f0e536bdedb3dd29b5", "score": "0.46881986", "text": "def createInstance():\n writeInstanceScript(domainName,configurationName,machineName,scriptFile)\n printFlag('Begin Creating Instance')\n execWLSTCommand(scriptFile)\n printFlag('Done Creating Instance')\n startOTD(domainName,configurationName,machineName)", "title": "" }, { "docid": "b0bcba392c36e4838f738109ad2ecd9a", "score": "0.46794292", "text": "def draw_segment(self):\n\n from_x = self.segment['pos0'] * self.scaler\n to_x = self.segment['pos1'] * self.scaler\n line_width = 8\n color = QColor(self.segment['color'])\n self.setPen(QtGui.QPen(color, line_width, QtCore.Qt.PenStyle.SolidLine))\n self.setLine(from_x, self.segment['y'], to_x, self.segment['y'])", "title": "" }, { "docid": "6060b0d2b3d36346c1cf9613311adfc9", "score": "0.46757546", "text": "def build_segments_tree(text_segments):\n raise NotImplementedError", "title": "" }, { "docid": "f339586ef8714984867201dce30a15a5", "score": "0.4659995", "text": "def beginSegment(self,start,end,dimension,initialValueArray,idx=-1):\n _mimport('_treeshr',1).TreeBeginSegment(self,start,end,dimension,initialValueArray,idx)", "title": "" }, { "docid": "f75ac804b188a08e3c1b2c354a1a25f6", "score": "0.46540138", "text": "def __defaults__(self): \n self.tag = 'Segment'\n \n self.prev = None\n self.next = None # for connectivity", "title": "" }, { "docid": "32ed098a048304941d030d375b643748", "score": "0.46296322", "text": "def create_gen_region(self):\n self.create_var(var_name='gen_region',\n obj_type=\"String\",\n q_txt=\"Target AWS region\",\n t_tip=\"Target AWS region\",\n def_val=self.aws_region,\n h_txt=\"Target AWS region\",\n order_val=1000,\n m_toggle=\"false\")", "title": "" }, { "docid": "1811550d69daba558599799bdd4d228a", "score": "0.4626838", "text": "def __init__(self, center, half_dim):\n self.startNode = QuadNode(center, half_dim)", "title": "" }, { "docid": "a03d5495d7cd983c006821fad541d18f", "score": "0.4626648", "text": "def create_network_segments(self, context, network, tenant_id):\n segments = self._process_provider_create(network)\n filters = {'project_id': tenant_id}\n with db_api.CONTEXT_WRITER.using(context):\n network_id = network['id']\n if segments:\n for segment_index, segment in enumerate(segments):\n segment = self.reserve_provider_segment(\n context, segment, filters=filters)\n self._add_network_segment(context, network_id, segment,\n segment_index)\n elif (cfg.CONF.ml2.external_network_type and\n self._get_attribute(network, extnet_apidef.EXTERNAL)):\n segment = self._allocate_ext_net_segment(\n context, filters=filters)\n self._add_network_segment(context, network_id, segment)\n else:\n segment = self._allocate_tenant_net_segment(\n context, filters=filters)\n self._add_network_segment(context, network_id, segment)", "title": "" }, { "docid": "760601f68870c928fadb432db4a1d4de", "score": "0.4626532", "text": "def draw_segment(self, p1, p2, color=\"green\"):\n p1_screen = self.tx.transform_pt(p1)\n p2_screen = self.tx.transform_pt(p2)\n seg = graphics.Line(graphics.Point(p1_screen.x, p1_screen.y),\n graphics.Point(p2_screen.x, p2_screen.y))\n seg.setOutline(color)\n seg.draw(self.win)\n return seg", "title": "" }, { "docid": "12cdbeb0a96a1064e0d6281700f2f4d2", "score": "0.4605989", "text": "def initialize_seg(self):\n np.copyto(dst=self.seg.cpu,src=self.seg_ini) \n np.copyto(dst=self.border.cpu,src=self.border_ini)\n self.seg.cpu2gpu()\n self.border.cpu2gpu()", "title": "" }, { "docid": "a1635af563e08479ef2e5539c2548e6b", "score": "0.4602013", "text": "def make_vertex(point):\n occ_point = geom_utils.numpy_to_gp(point)\n vertex_maker = BRepBuilderAPI_MakeVertex(occ_point)\n vertex = vertex_maker.Shape()\n return Vertex(vertex)", "title": "" }, { "docid": "d889239bc434f701106defd1580def25", "score": "0.460128", "text": "def generate_segment_script(\n self, segments_to_concat, segment_information, event_start, event_end\n ):\n segment_iterable = iter(segments_to_concat)\n segment = next(segment_iterable)\n concat_script = f\"file 'file:{os.path.join(self._segments_folder, segment)}'\"\n concat_script += (\n f\"\\ninpoint {int(event_start-segment_information[segment]['start_time'])}\"\n )\n\n try:\n segment = next(segment_iterable)\n except StopIteration:\n concat_script += (\n \"\\noutpoint \"\n f\"{int(event_end-segment_information[segment]['start_time'])}\"\n )\n return concat_script\n\n while True:\n try:\n concat_script += (\n \"\\nfile \" f\"'file:{os.path.join(self._segments_folder, segment)}'\"\n )\n segment = next(segment_iterable)\n except StopIteration:\n concat_script += (\n \"\\noutpoint \"\n f\"{int(event_end-segment_information[segment]['start_time'])}\"\n )\n return concat_script", "title": "" }, { "docid": "849552d8c6d4019a54b393b94f316100", "score": "0.45946303", "text": "def create_topology(self):\n\n self.__create_lab()\n self.__add_nodes()\n self.__connect_nodes()\n self.__configure_nodes()", "title": "" }, { "docid": "dcd1bb44324e051fd644c9f2a7b81627", "score": "0.45919883", "text": "def create(self, ag, role, mode, annotations):\n name = (\"{}-{}\".format(ag, role.value) +\n (\"-\" + mode.name.lower() if role == AgRole.SECONDARY else \"\"))\n self.name = name\n yaml_set(self.data, [\"metadata\", \"name\"], name)\n yaml_set(self.data, [\"metadata\", \"annotations\"], annotations)\n role_key = \"role.ag.mssql.microsoft.com/{}\".format(ag)\n yaml_set(self.data, [\"spec\", \"selector\", role_key], role.value)\n if role == AgRole.PRIMARY:\n yaml_set(self.data, [\"spec\", \"ports\", 0, \"targetPort\"], 1433)\n elif role == AgRole.SECONDARY:\n mode_key = \"mode.ag.mssql.microsoft.com/{}\".format(ag)\n yaml_set(self.data, [\"spec\", \"selector\", mode_key], mode.value)", "title": "" }, { "docid": "0dcf7f65222ff4545b5b900854f4ff94", "score": "0.45909268", "text": "def make_split_with_segment(name, cn, active, killed, default_treatment,\n tt, on, segment):\n return {\n 'trafficTypeName': tt,\n 'name': name,\n 'seed': cn,\n 'status': 'ACTIVE' if active else 'ARCHIVED',\n 'changeNumber': cn,\n 'killed': killed,\n 'defaultTreatment': default_treatment,\n 'configurations': {\n 'on': '{\\'size\\':15,\\'test\\':20}'\n },\n 'conditions': [\n {\n 'matcherGroup': {\n 'combiner': 'AND',\n 'matchers': [\n {\n 'matcherType': 'IN_SEGMENT',\n 'negate': False,\n 'userDefinedSegmentMatcherData': {'segmentName': segment},\n 'whitelistMatcherData': None\n }\n ]\n },\n 'partitions': [{\n 'treatment': 'on' if on else 'off',\n 'size': 100\n }]\n }\n ]\n }", "title": "" }, { "docid": "9929303a521c2a5bf984a2a69f6a494f", "score": "0.45792127", "text": "def _compute_segments(self):\n\n if self.vertices is None:\n self._segments = None\n\n v1, v2, v3, v4, v5, v6, v7, v8 = self.vertices\n\n a = [v1, v2]\n b = [v2, v3]\n c = [v3, v4]\n d = [v4, v5]\n e = [v5, v6]\n f = [v6, v7]\n g = [v7, v8]\n h = [v8, v1]\n\n segments = np.array([a,b,c,d,e,f,g,h])\n\n self._segments = segments", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.4578763", "text": "def create(self):\n pass", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.4578763", "text": "def create(self):\n pass", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.4578763", "text": "def create(self):\n pass", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.4578763", "text": "def create(self):\n pass", "title": "" }, { "docid": "ba6aa9db3d38a311930ecf238decfbef", "score": "0.45733654", "text": "def _generate_skeleton(x, min_radius=0):\n # flatten the list of the segments (sub-trees)..\n nodes_ordered = [n for seg in x.segments for n in seg[::-1]]\n # arrange the nodes in the order of segments..\n this_tn = x.nodes.set_index('node_id').loc[nodes_ordered]\n # remove the first occurance of duplicated elements (as seglist stuff is repeated for different segments)..\n this_tn = this_tn[~this_tn.index.duplicated(keep='first')]\n this_tn['index'] = list(range(1, this_tn.shape[0] + 1))\n\n # treenode to index..\n tn2ix = this_tn['index'].to_dict()\n\n # set the rootnodes as 0..\n this_tn['parent_ix'] = this_tn.parent_id.map(lambda x: tn2ix.get(x, -1))\n\n # get the vertices now..\n vertices = np.array(this_tn[['x', 'y', 'z']].values.tolist(), dtype=\"float32\")\n\n # get the edges now..\n edges = np.array(this_tn[['index', 'parent_ix']].values[1:] - 1, dtype=\"uint32\")\n\n skeleton = Skeleton(segid=x.id, vertices=vertices, edges=edges)\n\n # set the min_radius\n min_radius = 0\n if not isinstance(min_radius, type(None)):\n this_tn.loc[this_tn.radius < min_radius, 'radius'] = min_radius\n\n skeleton.radius = np.array(this_tn['radius'].values, dtype=\"float32\")\n\n # Set Label column to 0 (undefined)\n this_tn['label'] = 0\n # Add end/branch labels\n this_tn.loc[this_tn.type == 'branch', 'label'] = 5\n this_tn.loc[this_tn.type == 'end', 'label'] = 6\n # Add soma label\n if x.soma is not None:\n this_tn.loc[x.soma, 'label'] = 1\n\n skeleton.vertex_types = this_tn.label\n\n return skeleton", "title": "" }, { "docid": "700c6111b41a658a4526c4c50aafa97d", "score": "0.45704716", "text": "def directed_segment(self, p0: Tuple[float, float], length: float) -> DirectedSegment:\n x1 = p0[0] + (self.unit_vector().x * length)\n y1 = p0[1] + (self.unit_vector().y * length)\n return DirectedSegment(p0, (x1, y1))", "title": "" }, { "docid": "1390aa264406929f5e2e1b0333b654d1", "score": "0.4568512", "text": "def add_segment(self, segment):\n if self.n == 0:\n self.root = segment\n\n self.segments.append(segment)\n index = self.n\n self.n += 1\n\n return index", "title": "" }, { "docid": "79f3e76b2780c98993225c21928f1932", "score": "0.45658553", "text": "def test_from_segment(self):\n array = np.random.random(100)\n s = Segment(array)\n\n ms = MetaSegment.from_segment(s)\n\n self._test_attributes(ms, sample_rate=s.sample_rate, shape=s.shape, size=s.size, maximum=s.max(),\n mean=s.mean(), minimum=s.min(), std=s.std())", "title": "" }, { "docid": "758416f09a342478053d4319320c2690", "score": "0.4561472", "text": "def copy(self) -> 'Segment':\n return Segment(start=self.start, end=self.end)", "title": "" }, { "docid": "8c831055f30bc701aca9255e31890720", "score": "0.4560596", "text": "def segment_cls(self) -> _Type[_hints.Segment]:\n return self._segment_cls", "title": "" }, { "docid": "2898626f596e3d8a48e1da9c96df5d78", "score": "0.4552486", "text": "def _init_online_segmentor_module(self, sample_rate):\n self.online_segmentor = OnlineSegmentor(sample_rate)", "title": "" }, { "docid": "e2e8e05575ec67c5baee633d4066d39f", "score": "0.45394158", "text": "def __init__(self,onboard='True'):\n\t\tself.templateID = self.execore.onBoard(self.NSD)\n\t\tself.exeCore.createService(self.templateID)", "title": "" }, { "docid": "01fb4d424684fa6b0610c45f30528a42", "score": "0.452709", "text": "def create_from_pb2(\n cls, pb2_obj: _SegmentationOptionsProto) -> \"SegmentationOptions\":\n return SegmentationOptions(\n display_names_locale=pb2_obj.display_names_locale,\n output_type=OutputType(pb2_obj.output_type))", "title": "" }, { "docid": "7e0ec1fc65ae97ab7dc9efb0f63b0310", "score": "0.45200127", "text": "def _Create(self):\n raise NotImplementedError", "title": "" }, { "docid": "d153aabe215e030ca45e88979a9a4a71", "score": "0.45164552", "text": "def create(add: Optional[bool], edit: Optional[bool]) -> None:\n if add is None:\n add = git_config_bool(\"scriv.create.add\")\n if edit is None:\n edit = git_config_bool(\"scriv.create.edit\")\n\n scriv = Scriv()\n frag = scriv.new_fragment()\n file_path = frag.path\n if not file_path.parent.exists():\n sys.exit(\n f\"Output directory {str(file_path.parent)!r} doesn't exist,\"\n + \" please create it.\"\n )\n\n if file_path.exists():\n sys.exit(f\"File {file_path} already exists, not overwriting\")\n\n logger.info(f\"Creating {file_path}\")\n frag.write()\n\n if edit:\n git_edit(file_path)\n sections = scriv.sections_from_fragment(frag)\n if not sections:\n logger.info(\"Empty fragment, aborting...\")\n file_path.unlink()\n sys.exit()\n\n if add:\n git_add(file_path)", "title": "" }, { "docid": "d83b69bf3d960e5e7905035452af417b", "score": "0.4506001", "text": "def Create():\r\n pass", "title": "" }, { "docid": "b67865d1f3007e4cf226646616a15daf", "score": "0.4504711", "text": "def create(self,\n oidc_end_point,\n ):\n return self._invoke('create',\n {\n 'oidc_end_point': oidc_end_point,\n })", "title": "" }, { "docid": "455ef9df5f7fec74d226a95ce8ee33f4", "score": "0.4502435", "text": "def assign_segment_to_code(self, selected):\n\n if self.file_ is None or self.segment['start_msecs'] is None or self.segment['end_msecs'] is None:\n self.clear_segment()\n return\n sql = \"insert into code_av (id, pos0, pos1, cid, memo, date, owner, important) values(?,?,?,?,?,?,?, null)\"\n cid = int(selected.text(1).split(':')[1])\n values = [self.file_['id'], self.segment['start_msecs'],\n self.segment['end_msecs'], cid, self.segment['memo'],\n datetime.datetime.now().astimezone().strftime(\"%Y-%m-%d %H:%M:%S\"),\n self.app.settings['codername']]\n cur = self.app.conn.cursor()\n cur.execute(sql, values)\n self.app.conn.commit()\n self.load_segments()\n self.clear_segment()\n self.app.delete_backup = False\n self.fill_code_counts_in_tree()", "title": "" }, { "docid": "604e19a20b11bdd34a22a4f425f38ca1", "score": "0.4501782", "text": "def createTACSAssembler(self, elemCallBack, dvpn=1, **kwargs):\n # create the creator if it doesn't exist\n if self.creator is None:\n if self.geom_type == \"quad\":\n self.creator = QuadCreator(self.bcs, dvpn)\n elif self.geom_type == \"oct\":\n self.creator = OctCreator(self.bcs, dvpn)\n\n self.creator.setElemCallBack(elemCallBack)\n\n # create the TACSAssembler\n self.assembler = self.creator.createTACS(\n forest=self.forest, comp_names=self.compDescripts, **kwargs\n )\n return self.assembler", "title": "" }, { "docid": "f1cf259c0de392293ecbaea78d868381", "score": "0.45006278", "text": "def __init__(self, segmenter: Segmenter, seed: Optional[int]):\n super().__init__()\n self.segmenter = segmenter\n self.seed = seed", "title": "" }, { "docid": "76e7683bf9911b3722c35af298abba26", "score": "0.4499632", "text": "def create(name):\n return CommandParser(prog=name)", "title": "" }, { "docid": "02f4a58cdb318aae70b15b3fd775a35c", "score": "0.44970924", "text": "def add_segment(self, segment: _T) -> None:\n self._segments.add(segment)", "title": "" }, { "docid": "2bc0b1d9fa9ce0c44205b06072db47cc", "score": "0.44962358", "text": "def create(self):\n raise NotImplementedError", "title": "" }, { "docid": "d65f2232e6e26e3028d97189eec44a8e", "score": "0.4495114", "text": "def load_segments(self):\n\n if self.file_ is None:\n return\n # 10 is assigned as an initial default for y values for segments\n sql = \"select avid, id, pos0, pos1, code_av.cid, ifnull(code_av.memo,''), code_av.date, \"\n sql += \" code_av.owner, code_name.name, code_name.color, 10, code_av.important from code_av\"\n sql += \" join code_name on code_name.cid=code_av.cid\"\n sql += \" where id=? \"\n sql += \" and code_av.owner=? \"\n values = [self.file_['id'], self.app.settings['codername']]\n cur = self.app.conn.cursor()\n cur.execute(sql, values)\n results = cur.fetchall()\n keys = 'avid', 'id', 'pos0', 'pos1', 'cid', 'memo', 'date', 'owner', 'codename', 'color', 'y', 'important'\n self.segments = []\n for row in results:\n self.segments.append(dict(zip(keys, row)))\n # Fix overlapping segments by incrementing y values so segment is shown on a different line\n for i in range(0, len(self.segments) - 1):\n for j in range(i + 1, len(self.segments)):\n if (self.segments[i]['pos0'] <= self.segments[j]['pos0'] <= self.segments[i]['pos1'] and\n self.segments[i]['y'] == self.segments[j]['y']) or \\\n (self.segments[j]['pos0'] <= self.segments[i]['pos0'] <= self.segments[j]['pos1'] and\n self.segments[i]['y'] == self.segments[j]['y']):\n # to overcome the overlap, add to the y value of the i segment\n self.segments[j]['y'] += 10\n # Add seltext, the text link to the segment\n sql = \"select seltext from code_text where avid=?\"\n for s in self.segments:\n cur.execute(sql, [s['avid']])\n res = cur.fetchall()\n txt = \"\"\n for r in res:\n txt += str(r[0]) + \"\\n\"\n s['seltext'] = txt\n # Draw coded segments in scene\n scaler = self.scene_width / self.media.get_duration()\n self.scene.clear()\n for s in self.segments:\n self.scene.addItem(SegmentGraphicsItem(self.app, s, scaler, self))\n # Set te scene to the top\n self.ui.graphicsView.verticalScrollBar().setValue(0)", "title": "" }, { "docid": "159304b1a2057d5de4a9c6cf2697d0fb", "score": "0.4489671", "text": "def perform_create(self, serializer):\n org_id = self.get_organization(self.request)\n serializer.save(organization_id=org_id)", "title": "" }, { "docid": "bbfdf925098b759bc2a92f32982abbb3", "score": "0.44811988", "text": "def createService(self, tenantId,payLoad):\n\n uri = \"/v1/tenants/\" + str(tenantId) + \"/services/\"\n response = self.client.post(uri, payLoad)\n return response", "title": "" }, { "docid": "f744a46708c350eb41e894524ba8c393", "score": "0.44803348", "text": "def create_sections(self):\n # NOTE: cell=self is required to tell NEURON of this object.\n self.soma = h.Section(name='soma', cell=self)", "title": "" }, { "docid": "6bb8086729775f3910f3a904969c0ff8", "score": "0.44741234", "text": "def _segment(self, image):\n # return segments\n raise NotImplementedError()", "title": "" }, { "docid": "ea0d86591b1c5d13fa340c05b97c1138", "score": "0.44735524", "text": "def set_target_segment(self, v):\n u = self.clone()\n u.fragment[1] = v\n return u", "title": "" }, { "docid": "80bbf92dab4f27d633154a3b8cbaec89", "score": "0.44634736", "text": "def build_topology(self):\n self.dend.connect(self.soma(1))", "title": "" }, { "docid": "0d0cb23d54c2ccc57995db00b7a20915", "score": "0.44570857", "text": "def _make_segmentation_panel(self, frame):\n\n row = 0\n \n # segment info label (siL)\n siL = Tkinter.Label(frame, text = 'Segmentation Info ')\n siL.grid(row = row, column = 0, sticky = 'w')\n\n # chosoe segment type (cst)\n row = row + 1\n cst = Hybrid.Radiobutton_Row(frame, 'Choose type ',\n (self.segment_choices[0],\n self.segment_choices[1]),\n self.seg_type_cb)\n cst.frame.grid(row = row, column = 0, columnspan=2,\n sticky = 'nw', padx=32)\n self.segment_type = cst.variable\n\n # choose number of outputs (cno)\n row = row + 1\n cno = Hybrid.Radiobutton_Row(frame, 'Output files ',\n (self.output_choices[0],\n self.output_choices[1]),\n self.output_type_cb)\n cno.frame.grid(row = row, column = 0, columnspan=2,\n sticky = 'nw', padx=32)\n self.output_type = cno.variable\n\n # output data file entry (odfE)\n row = row + 1\n odfE = Hybrid.Entry(frame, 'File MRC ', '16', 'temp.mrc')\n odfE.frame.grid(row = row, column = 0, sticky = 'w', padx=32)\n self.data_output = odfE\n # @ how to update entry - callback\n\n # buttons\n sb = Hybrid.Button_Row(frame, '',\n (('Apply', self.segment_apply_cb),))\n sb.frame.grid(row = row, column = 1, sticky = 'w')\n\n return", "title": "" } ]
c3768705e91bc725c5e38587c0f8dbad
The vendorassigned unique ID for this range.This ID is incremented automaticaly for each DHCP client.
[ { "docid": "de7633b6ddf44a0a599ddea910e9b394", "score": "0.6511509", "text": "def Dhcp6DuidVendorId(self):\n return self._get_attribute('dhcp6DuidVendorId')", "title": "" } ]
[ { "docid": "377d93c9e4a72cbec71921e0ba1e58df", "score": "0.70701176", "text": "def unique_id(self) -> str:\n return f\"{self.dev.system.serial}_{self.dev.name}\"", "title": "" }, { "docid": "c0706950846448c4986cd03a0ade5b30", "score": "0.70366126", "text": "def unique_id(self):\n return (\n f\"{self._econet.device_id}_{self._econet.device_name}_{self._device_name}\"\n )", "title": "" }, { "docid": "f7a25e89a26285a951e9fbbd5917cee7", "score": "0.6988023", "text": "def unique_id(self):\n return self._device_info.sernum", "title": "" }, { "docid": "c8c3f6a617d577d66f4b90f8e892bd38", "score": "0.6980739", "text": "def vendor_id(self) -> str:\n return self.VID or \"\"", "title": "" }, { "docid": "37f601b00d3efaa27202be1283ffb25e", "score": "0.69150895", "text": "def unique_id(self) -> str:\n return self.uuid", "title": "" }, { "docid": "fcc92c7c4467472856a3d522a0d545ba", "score": "0.69084954", "text": "def unique_id(self) -> str:\n return self._controller.device_uid", "title": "" }, { "docid": "3b3f047acac2fc0cb99072d0e623fa7c", "score": "0.68593884", "text": "def unique_id(\n self,\n ):\n if self._actuator_type == \"INFINITY_OUTPUT_MODULE\":\n return (\n f\"{self._service_location.device_serial_number}-\"\n f\"{self._service_location.service_location_id}-actuator-\"\n f\"{self._actuator_id}-{self._actuator_state_option}\"\n )\n\n # Switch or comfort plug\n return (\n f\"{self._service_location.device_serial_number}-\"\n f\"{self._service_location.service_location_id}-actuator-\"\n f\"{self._actuator_id}\"\n )", "title": "" }, { "docid": "47a39207961a6e60fb6e89443cb087c7", "score": "0.6854286", "text": "def unique_id(self) -> str:\n return f\"{self.charger_data.charger.id}_{self._entity_name}\"", "title": "" }, { "docid": "b3d695fe8a4ec8e26ddf79b11c36e16c", "score": "0.6848748", "text": "def unique_id(self):\n return self.selve_device.iveoID", "title": "" }, { "docid": "2d66e05281fcd20aa947d036665bdb91", "score": "0.68384236", "text": "def device_id(self):\n return self.unique_id[:23]", "title": "" }, { "docid": "5efe62b3be076ac393a16cc5a472e1f7", "score": "0.68220836", "text": "def unique_id(self):\n return self.addr.address", "title": "" }, { "docid": "c51bcf43a681f8b5dc7c1d43506e9957", "score": "0.6791944", "text": "def unique_id(self):\n return f\"{self._hwid}.{self._gpio}\"", "title": "" }, { "docid": "bfb5858c97915dea00a1c7c893a46b8c", "score": "0.6752749", "text": "def Dhcp6DuidVendorIdIncrement(self):\n return self._get_attribute('dhcp6DuidVendorIdIncrement')", "title": "" }, { "docid": "a8bfa49fdc1298e71d8ab32c9a40c2ed", "score": "0.67378205", "text": "def unique_id(self):\r\n return self._device.unique_id", "title": "" }, { "docid": "35b04d36d9851315b37e26b5dd5edee0", "score": "0.67248684", "text": "def unique_id(self) -> str:\n return \"{}_{}_{}\".format(\n self.rainmachine.device_mac.replace(\":\", \"\"),\n self._switch_type,\n self._rainmachine_entity_id,\n )", "title": "" }, { "docid": "317a9295e0b2d18f5fc94e8c5a5ea21b", "score": "0.6723535", "text": "def unique_id(self):\n return f\"{self._gateway.gw_id}-{self._var}\"", "title": "" }, { "docid": "60d21aec473bc2778b6e3eeec716ab91", "score": "0.6697008", "text": "def unique_id(self) -> str:\n return self.myname + \"_\" + str(self.partition_id)", "title": "" }, { "docid": "ebda60ecb51b1f3e93980fa813a0c32d", "score": "0.6680531", "text": "def unique_id(self):\n return '{}.{}'.format(self.__class__, self.machine_identifier or\n self.name)", "title": "" }, { "docid": "af6ce85f56b8d6577c8ac9d1af3df255", "score": "0.6642578", "text": "def unique_id(self) -> str:\n\t\treturn self._unique_id", "title": "" }, { "docid": "b358136499dd05ef4b38d94990b81f35", "score": "0.663235", "text": "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "title": "" }, { "docid": "88ada2ce674ff43c072c360e78f35f15", "score": "0.66322875", "text": "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "title": "" }, { "docid": "14f38b1d6c74fd6958b1e5d40f85ac0f", "score": "0.6614204", "text": "def unique_id(self):\n return self._address", "title": "" }, { "docid": "38491d03fd48bc3aada023e737719b03", "score": "0.6604434", "text": "def unique_id(self) -> str:\n return f\"{self._device.device_id}.{self._attribute}\"", "title": "" }, { "docid": "6d1a45f9462c55cb664bf8ce6a3fa443", "score": "0.66043043", "text": "def unique_id(self):\n return self.device.device_id", "title": "" }, { "docid": "749a915fed6d92dc4bcf3644e395c13d", "score": "0.660424", "text": "def unique_id(self):\n return self._innogy_device.id", "title": "" }, { "docid": "ea539f5310140f459130703e9ec53363", "score": "0.6585148", "text": "def vendor_id(self):\n return self._get_device_info_int(cl.CL_DEVICE_VENDOR_ID)", "title": "" }, { "docid": "98c972c25c2ef91524967daeac93f150", "score": "0.65823954", "text": "def unique_id(self):\n return self._serialnumber", "title": "" }, { "docid": "ef8313275a4da26d935cd4248f3e93b9", "score": "0.6579713", "text": "def unique_id(self) -> str | None:\n return self.mac_address", "title": "" }, { "docid": "1b6952ef430d3131fafb25e5dfc2c7ab", "score": "0.65654045", "text": "def unique_id(self) -> str:\n return self._unique_id", "title": "" }, { "docid": "1b6952ef430d3131fafb25e5dfc2c7ab", "score": "0.65654045", "text": "def unique_id(self) -> str:\n return self._unique_id", "title": "" }, { "docid": "1b6952ef430d3131fafb25e5dfc2c7ab", "score": "0.65654045", "text": "def unique_id(self) -> str:\n return self._unique_id", "title": "" }, { "docid": "47d5c0b4ce24d3892fbf7355d30b10a1", "score": "0.6551049", "text": "def unique_id(self):\n return self._serial_number", "title": "" }, { "docid": "e6072fe8bc2521ce886579b38d0c2224", "score": "0.6540324", "text": "def get_unique_id(self):\n try:\n ts = time.time()\n unique_id = hex(int(ts*10000000*random.randint(1,1000)))[2:]\n return unique_id\n except RuntimeError as rte:\n return str(rte)", "title": "" }, { "docid": "0cf7b57e3fb1cf1472b1114c46ab6ac0", "score": "0.65181965", "text": "def unique_id(self) -> str:\n if self._is_default:\n return self._api.unique_id\n return f\"{self._api.unique_id}-{self._measurement}\"", "title": "" }, { "docid": "f90574b5eea2aeef811ac73350a0aa16", "score": "0.651128", "text": "def id(self):\n return self.phys_address.upper()", "title": "" }, { "docid": "0608e1ec1de2a4e02a63408cac6b8831", "score": "0.6490177", "text": "def createID(self):\n used_ids = self.getIDs()\n return Foundation.uniqueID(used_ids)", "title": "" }, { "docid": "6f6e93ca8b40e14ce299def19bd072d1", "score": "0.648557", "text": "def unique_id(self) -> str:\n return f\"{self._controller.unique_id}_z{self._zone.index + 1}\"", "title": "" }, { "docid": "a265dc9eda28182fbb1d6bce078c3f35", "score": "0.6474922", "text": "def unique_id(self):\n return get_unique_id(self._data, self._smartplug.product_type, \"mode-select\", self._smartplug_id)", "title": "" }, { "docid": "0979b58d34a4983ba6df10148d47ba26", "score": "0.6449965", "text": "def device_id(self):\n return self.unique_id", "title": "" }, { "docid": "a76fbe0724af0ab00c01f95bce622fe7", "score": "0.6441553", "text": "def unique_id(self) -> str:\n return self.unit.unique_id", "title": "" }, { "docid": "7dc13864ee31f3db76ef34cea906a849", "score": "0.6435769", "text": "def _generate_uid( self ):\n prptool = getToolByName( self, 'portal_properties', None )\n instance = prptool.getProperty('instance')\n default_index = 'X'\n\n try: index = portalConfiguration.getAttribute( instance, 'index' ) or default_index\n except: index = default_index\n\n uid = '%012u%s%05u%05u' % ( long( time()*100 ), index, id(self)%100000, randrange(100000) )\n return uid", "title": "" }, { "docid": "c4d107159621e66c87342a8a3ae24d6a", "score": "0.64311296", "text": "def VendorClassId(self):\n return self._get_attribute('vendorClassId')", "title": "" }, { "docid": "5e7f4f1c59d4823a8589b551d545c20c", "score": "0.6429249", "text": "def unique_id(self):\r\n return self._roomcode", "title": "" }, { "docid": "38a6b8b350ce63ab77e665e9993a44d5", "score": "0.641986", "text": "def unique_id(self):\n return f\"{self._entity_prefix}-{self._sensor_name}\"", "title": "" }, { "docid": "7fdbc34c9a56adc47c6a6d395418dd04", "score": "0.64094204", "text": "def unique_id(self):\n return \"{}_{}\".format(self._system_id, self._parameter_id)", "title": "" }, { "docid": "094f5b0001dee73dae2e5d8e6b9db013", "score": "0.6399354", "text": "def unique_id(self):\n return _howto_swig.ofdm_serializer_vcc_sptr_unique_id(self)", "title": "" }, { "docid": "7c453cd2c3a89025612bd524d0111f3f", "score": "0.639399", "text": "def unique_id():\n return uuid.uuid4()", "title": "" }, { "docid": "a6cb9ed36e8738f9d41f955790532294", "score": "0.6384764", "text": "def _create_guid(self):\n return uuid.uuid4()", "title": "" }, { "docid": "97a3689a44f43934b5e1ea08af697359", "score": "0.6383493", "text": "def unique_id(self):\n return self._serial + '_' + self._type", "title": "" }, { "docid": "64d41e360ee6b194db212fe539a03d63", "score": "0.6381706", "text": "def unique_id(self):\n return self._deviceId", "title": "" }, { "docid": "a9e9eb3785c0f387ebe2ce53200b5e8e", "score": "0.63489234", "text": "def unique_id(self):\n pass", "title": "" }, { "docid": "74430e0e54bd1634ebfdcaead28e80ba", "score": "0.63483584", "text": "def unique_id(self) -> str | None:\n if (serial := self._data.get(ATTR_SERIAL_NUMBER)) is None:\n return None\n return f\"{serial}_{self.entity_description.key}\"", "title": "" }, { "docid": "efcb8014f6221fb346aaf8bec9099a6e", "score": "0.6320356", "text": "def unique_id(self):\n return self._config[CONF_ID]", "title": "" }, { "docid": "9813a0abe6fb797ae75c3fc62b759555", "score": "0.6314435", "text": "def unique_id(self):\n return self.sensor.uniqueid", "title": "" }, { "docid": "8c286149316081537fa3456c2fac1b65", "score": "0.6310827", "text": "def unique_id():\n raise NotImplementedError(\"relfs component id not implemented\")", "title": "" }, { "docid": "517583331aa661a305201a4619ad903f", "score": "0.6303098", "text": "def unique_id(self):\n return self._api.get_id()", "title": "" }, { "docid": "60cd9438b53042c5078cbb4f509e2ad5", "score": "0.6294656", "text": "def unique_id(self) -> str:\n return self._entity_id", "title": "" }, { "docid": "a36a3eb561600e4d4b925afe87b27fc7", "score": "0.62891114", "text": "def unique_id(self) -> str | None:\n return self._unique_id", "title": "" }, { "docid": "14915981bd333223e020caf1d1745cf8", "score": "0.6281576", "text": "def unique_id(self):\n return self.device.node_id", "title": "" }, { "docid": "e559f1c717925d31ad0656530bd2bd31", "score": "0.62805504", "text": "def unique_id(self):\r\n return self._client_id + '_climate'", "title": "" }, { "docid": "d0ca3f11ce8d71b386e362893428bf1b", "score": "0.6277311", "text": "def device_uuid(self) -> str:\n return self._device_uuid", "title": "" }, { "docid": "23227e156ae02f46f3d427aec196b06b", "score": "0.62761956", "text": "def unique_id(self):\n return self._innogy_device.capabilities_dict[self._sensor_type][\"id\"]+str(self._sensor_type)", "title": "" }, { "docid": "ab5009e5863338e82e3988560c7c60cb", "score": "0.6260033", "text": "def unique_id(self):\n return f\"octopus_energy_intelligent_smart_charge\"", "title": "" }, { "docid": "493227ce171f82e409e0a4e3ebca730c", "score": "0.6256353", "text": "def generate_sim_id(self):\n import uuid\n sim_id = uuid.uuid4().hex\n return sim_id", "title": "" }, { "docid": "978e1a3c81ce6acb2f923a0a992ff1af", "score": "0.6238449", "text": "def unique_id(self):\n return _howto_swig.ofdm_cyclic_prefixer_sptr_unique_id(self)", "title": "" }, { "docid": "abba7e547fee73bd79ee8831cc3edb6d", "score": "0.62364066", "text": "def unique_id(self):\n # _LOGGER.info(\"sensor_unique_id: %s\", self._unique_id)\n return f\"{self._unique_id}-{self.kind}\".lower()", "title": "" }, { "docid": "793a4014e49c3b206108c4bf70290981", "score": "0.6230852", "text": "def _calculate_unique_id(self):\r\n return abs(hash(self.instance_id) % (2 ** 31))", "title": "" }, { "docid": "810975cdf9217018052478cfecd9185a", "score": "0.6208628", "text": "def unique_id(self):\n return _howto_swig.ofdm_carrier_allocator_cvc_sptr_unique_id(self)", "title": "" }, { "docid": "4ee859366410606f28e206a390d230e1", "score": "0.62055516", "text": "def id(self):\n return \"{model:s}--{serial:08x}\".format(model=self.model.replace('-',''), serial=self.serial_number).lower()", "title": "" }, { "docid": "8ecfe86ac2a94b078cbbd4178831057b", "score": "0.6205479", "text": "def unique_id(self):\n return \"_\".join([DOMAIN, self.repository, self.branch])", "title": "" }, { "docid": "0a01792d09d615712121f2b74a4cc170", "score": "0.6205287", "text": "def uuid(self):\n pass", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "7350819feb7aac1959509330f4846083", "score": "0.6205193", "text": "def unique_id(self):\n return self._unique_id", "title": "" }, { "docid": "37dea38f2310b5a71b3923632ded9785", "score": "0.6203795", "text": "def unique_id(self):\n return \"remote_\" + self._atv.metadata.device_id", "title": "" }, { "docid": "0d3f239118dbd91fa0d1abef94395f7b", "score": "0.619249", "text": "def unique_val(self):\n return str(uuid.uuid1())", "title": "" }, { "docid": "7b74433c34ee91c532a7cdf5ffafaa95", "score": "0.61899126", "text": "def unique_id(self) -> str:\n acc = self._account\n return f\"{acc.api.__class__.__name__}_account_{acc.id}\"", "title": "" }, { "docid": "ffa386173f81193cba503202acdd0d48", "score": "0.61867553", "text": "def vendor_string(self):\n return str_from_int_list(\n [\n self.capabilities.properties_fixed.vendor_string_1,\n self.capabilities.properties_fixed.vendor_string_2,\n self.capabilities.properties_fixed.vendor_string_3,\n self.capabilities.properties_fixed.vendor_string_4,\n ]\n )", "title": "" }, { "docid": "531734396bcc72f3f4e54c24ef52ed67", "score": "0.61789757", "text": "def client_id():\n _id = random.SystemRandom().getrandbits(0x58)\n# _id = (base64.urlsafe_b64encode(uuid.uuid4().bytes)).replace('=', '')\n\n return f'client-{_id}'", "title": "" }, { "docid": "edb014515503c840719fbb87367ea9eb", "score": "0.6177064", "text": "def unique_id() -> str:\n return str(uuid4())", "title": "" }, { "docid": "78b3e3000e4277a32ec1ee606d856283", "score": "0.6174695", "text": "def get_uuid(self):\n return self.__uuid", "title": "" }, { "docid": "5e4c9a21d738312643bb7e1039ba70c3", "score": "0.6171624", "text": "def unique_id(self):\n return f\"{self.config_entry.entry_id}{self.entity_type.lower()}\"", "title": "" } ]
77a12a55419b78ec5094f90baa955a1c
Entry action for state 'brakelightsOn'..
[ { "docid": "0ae417aa5b0342c341ca4d9999c060a3", "score": "0.7652496", "text": "def __entry_action_main_region__car_off_r2__shuting_down_r2_1_brakelights_on(self):\n\t\tself.timer_service.set_timer(self, 2, 300, False)\n\t\tself.operation_callback.brakelights(\"on\")", "title": "" } ]
[ { "docid": "ce20a3588caa97f044bab2c4dfca4013", "score": "0.7132828", "text": "def __enter_sequence_main_region__car_off_r2__shuting_down_r2_1_brakelights_on_default(self):\n\t\tself.__entry_action_main_region__car_off_r2__shuting_down_r2_1_brakelights_on()\n\t\tself.__state_vector[0] = self.State.main_region_car_off_r2_shuting_down_r2_1_brakelights_on\n\t\tself.__state_conf_vector_changed = True", "title": "" }, { "docid": "0660eb0ce9effaaff60543d12f74c40d", "score": "0.68025374", "text": "def BrakesOn(self):\n cmd = 'BrakesOn'\n return self.exchange_msg(cmd)", "title": "" }, { "docid": "132393900326791e2f881ed54ee6609c", "score": "0.6638527", "text": "def actuateBraking(self) :\n\n #Get the argument from the router board and convert into a Carla value\n #Value comes in as a 8 bit unsigned int (0 to 255)\n arg = list(self.serial.read(1))[0]\n carlaValue = mapValue(arg, 0, 255, 0, 1)\n\n #Braking : float (0 to 1)\n self.simVehicle.updateBraking(carlaValue)", "title": "" }, { "docid": "a5b900f2786d911fca448a72ebbf2128", "score": "0.66024953", "text": "def __exit_action_main_region__car_off_r2__shuting_down_r2_1_brakelights_on(self):\n\t\tself.timer_service.unset_timer(self, 2)", "title": "" }, { "docid": "08f8c85b69b48a30f372be3ca8505820", "score": "0.6536591", "text": "def __react_main_region__car_off_r2__shuting_down_r2_1__entry__default(self):\n\t\tself.__enter_sequence_main_region__car_off_r2__shuting_down_r2_1_brakelights_on_default()", "title": "" }, { "docid": "7087fc6a9a1d93661baa514f2d60e4a4", "score": "0.65021026", "text": "def __exit_sequence_main_region__car_off_r2__shuting_down_r2_1_brakelights_on(self):\n\t\tself.__state_vector[0] = self.State.null_state\n\t\tself.__exit_action_main_region__car_off_r2__shuting_down_r2_1_brakelights_on()", "title": "" }, { "docid": "d16f5bb01f104ee27c42b86a34f9d252", "score": "0.6359715", "text": "def brake(self):\n self.core.brake()", "title": "" }, { "docid": "c3f4a8a7962f515b8539dd7ebce1c39a", "score": "0.6289191", "text": "def __entry_action_main_region__car_off_r2__shuting_down_r2_1_brakelights_off(self):\n\t\tself.timer_service.set_timer(self, 3, 300, False)\n\t\tself.operation_callback.brakelights(\"off\")", "title": "" }, { "docid": "2cb40ca74f1a4516610fec1d1eff9fca", "score": "0.62687796", "text": "def __main_region__car_off_r2__shuting_down_r2_1_brakelights_on_react(self, transitioned_before):\n\t\ttransitioned_after = transitioned_before\n\t\tif transitioned_after < 0:\n\t\t\tif self.__time_events[2]:\n\t\t\t\tself.__exit_sequence_main_region__car_off_r2__shuting_down_r2_1_brakelights_on()\n\t\t\t\tself.__enter_sequence_main_region__car_off_r2__shuting_down_r2_1_brakelights_off_default()\n\t\t\t\tself.__main_region__car_off_r2__shuting_down_react(0)\n\t\t\t\ttransitioned_after = 0\n\t\t#If no transition was taken then execute local reactions\n\t\tif transitioned_after == transitioned_before:\n\t\t\ttransitioned_after = self.__main_region__car_off_r2__shuting_down_react(transitioned_before)\n\t\treturn transitioned_after", "title": "" }, { "docid": "8bb32a253caf12daa1ffb23f1f484050", "score": "0.61887133", "text": "def __enter_sequence_main_region__car_off_r2__shuting_down_r2_1_brakelights_off_default(self):\n\t\tself.__entry_action_main_region__car_off_r2__shuting_down_r2_1_brakelights_off()\n\t\tself.__state_vector[0] = self.State.main_region_car_off_r2_shuting_down_r2_1_brakelights_off\n\t\tself.__state_conf_vector_changed = True", "title": "" }, { "docid": "1cd76e9e604129d4e1b55e0e99c81ea6", "score": "0.6123116", "text": "def __exit_action_main_region__car_on_r1__moving_fwd(self):\n\t\tself.operation_callback.brakelights(\"on\")", "title": "" }, { "docid": "6fedc98d78b89b4bf8fe589782cb32af", "score": "0.5868734", "text": "def _brake():\n raise NotImplementedError", "title": "" }, { "docid": "41eb24b49b7c38a7210feb1115f1d883", "score": "0.56570584", "text": "def cb_bell(addr, tags, stuff, source):\n global ard, FRAME_NR\n #print \"params:\", stuff\n idx = stuff[0]\n stat = stuff[1]\n if ard:\n cmd = \"{0} {1} {2};\\r\\n\".format(stat, idx, FRAME_NR)\n print \"sending: \", cmd\n # cmd:on/off node frame\n ard.write(cmd)\n FRAME_NR += 1", "title": "" }, { "docid": "1e3bb65d886782fd801a6f65c3e29271", "score": "0.56555367", "text": "def post(self, *args, **kwargs):\n post_data = json_loads(self.request.body)\n username = post_data.get('username', '[unknown]')\n data = post_data.get('data', '')\n\n if data == 'on' and not bulb_instance.is_on:\n bulb_instance.set_power_state('on')\n client_pool.broadcast_message('Bulb is now ON. Changed by user %s' % username)\n elif data == 'off' and bulb_instance.is_on:\n bulb_instance.set_power_state('off')\n client_pool.broadcast_message('Bulb is now OFF. Changed by user %s' % username)\n elif isinstance(data, dict) and 'color' in data:\n bulb_instance.set_color(data['color'])\n client_pool.broadcast_message('Bulb color is now %s. Changed by user %s' % (data['color'], username))", "title": "" }, { "docid": "cb5c53609a3cfed9d6868988ba308e70", "score": "0.5627728", "text": "def coolant_on(self):\n print(\"Coolant turned on.\")", "title": "" }, { "docid": "cb5c53609a3cfed9d6868988ba308e70", "score": "0.5627728", "text": "def coolant_on(self):\n print(\"Coolant turned on.\")", "title": "" }, { "docid": "325f96d1d1e37da581897594c8afa80d", "score": "0.56147575", "text": "def BT(aaa):\n # Branch to location aaa if switch is ON.\n # Otherwise, continue in sequence.\n brancht(switch, aaa)", "title": "" }, { "docid": "1cd65ca0022f656f5ed658c5cb266717", "score": "0.55861384", "text": "def event_m10_14_x187(z5=_):\r\n \"\"\"State 0,1: Lighting flag ON\"\"\"\r\n SetEventFlag(z5, 1)\r\n \"\"\"State 2: End state\"\"\"\r\n return 0", "title": "" }, { "docid": "28b1a698a40bf5c8addf301eae58948f", "score": "0.5570034", "text": "def bl_on(self):\n return self.send(\"enable_bl\", [1])", "title": "" }, { "docid": "eacc692dd3270575238621c55dd3ec5a", "score": "0.55624074", "text": "def __exit_action_main_region__car_off_r2__shuting_down_r2_1_brakelights_off(self):\n\t\tself.timer_service.unset_timer(self, 3)", "title": "" }, { "docid": "c98faa87dd2f9870ade4b54d262fc233", "score": "0.5480311", "text": "def _brake(self):\n self.speed_profile = speed_profile.Speed([1], [0])", "title": "" }, { "docid": "362c724505c62688fc479142ad2b8872", "score": "0.546474", "text": "def lights(self):\n cl = cherrypy.request.headers['Content-Length']\n rawbody = cherrypy.request.body.read(int(cl))\n body = json.loads(rawbody)\n self.theIrrigationEngine.lock.acquire()\n self.theIrrigationEngine.db[\"lights\"]=body[\"controls\"]\n self.theIrrigationEngine.lightsState['state']=body[\"state\"]\n self.theIrrigationEngine.lightsState['changeCount'] += 1\n self.theIrrigationEngine.writeConfig()\n self.theIrrigationEngine.lock.release()\n return \"Updated Lights\"", "title": "" }, { "docid": "c9a76bf0607d9e2492b2f2fd74d220cb", "score": "0.544184", "text": "async def strobe_on(self) -> None:\n _LOGGER.debug(f\"Turning on strobe for %s\", self.name)\n await self.send_command(hm.CMD_STROBE)", "title": "" }, { "docid": "0d8368e40cac5e29e74a5b6aeb2b1ea6", "score": "0.5436836", "text": "def bounce(self, info):", "title": "" }, { "docid": "70cbc65163b8a824570f4ce275e63883", "score": "0.54296637", "text": "def action(self, state):\n pass", "title": "" }, { "docid": "98f7489a16c377c082d004e37fd66f94", "score": "0.5399465", "text": "def amber_on():\n Leds.set_amber(1)", "title": "" }, { "docid": "c6d3ec986de2b768cf80e259f19e2ed4", "score": "0.53873086", "text": "def __main_region__car_off_r2__shuting_down_r2_1_brakelights_off_react(self, transitioned_before):\n\t\ttransitioned_after = transitioned_before\n\t\tif transitioned_after < 0:\n\t\t\tif self.__time_events[3]:\n\t\t\t\tself.__exit_sequence_main_region__car_off_r2__shuting_down_r2_1_brakelights_off()\n\t\t\t\tself.__enter_sequence_main_region__car_off_r2__shuting_down_r2_1_brakelights_on_default()\n\t\t\t\tself.__main_region__car_off_r2__shuting_down_react(0)\n\t\t\t\ttransitioned_after = 0\n\t\t#If no transition was taken then execute local reactions\n\t\tif transitioned_after == transitioned_before:\n\t\t\ttransitioned_after = self.__main_region__car_off_r2__shuting_down_react(transitioned_before)\n\t\treturn transitioned_after", "title": "" }, { "docid": "2230228df1ffda5e438af391d1a9fad2", "score": "0.53593016", "text": "def func(self):\n\n if self.obj.light():\n self.caller.msg(\"You light %s.\" % self.obj.key)\n self.caller.location.msg_contents(\"%s lights %s!\" % (self.caller, self.obj.key), exclude=[self.caller])\n else:\n self.caller.msg(\"%s is already burning.\" % self.obj.key)", "title": "" }, { "docid": "02a18e9c02eeda4b7bc3ca198babb14b", "score": "0.5349068", "text": "def _switch_on_action(self):\n pass", "title": "" }, { "docid": "fe71352006a6f8982c4c6ea27636252a", "score": "0.53402174", "text": "def branche(flag):\n # Halt if switch is off.\n # Otherwise, continue in sequence.\n if flag:\n fail(\"branche:BE instruction executed\")", "title": "" }, { "docid": "a9e246f214510b3426559f2839d228cb", "score": "0.5337549", "text": "def give_drink_to_some_habbo(self):\n self.state = \"looking for a drink\"\n self.find_cafe_and_go()\n time.sleep(10)\n self.find_habbos_talking()\n time.sleep(1)\n self.find_and_open_habbo_menu()\n time.sleep(2)\n self.find_givedrink_button_and_click()", "title": "" }, { "docid": "e014e8204fdd05b46258cbee4370598d", "score": "0.53299266", "text": "def hb_pressed(self):\n # Caling the ES function as Home = ES + goal.\n self.es_pressed()\n\n # Logging/Printing the Goal.\n rospy.loginfo(CBLUE2 + \"Going to Chef's Kitchen\" + CEND)\n\n # Sending the robot the goal coordinates.\n self.robot.go(\n goal_x=self.goals[\"home\"][0], goal_y=self.goals[\"home\"][1], table=False\n )", "title": "" }, { "docid": "ed4bd8a91d4aaa07dbb6887961085044", "score": "0.53280103", "text": "def turnCoolerOn():\n pass", "title": "" }, { "docid": "93c84bb86ad5fc880733b501a0a5bcb8", "score": "0.5322387", "text": "def action(self, status):\n pass", "title": "" }, { "docid": "4f6f4232b6a217825577e3addb8f93a7", "score": "0.5311763", "text": "def on(self):\n self.light.low()", "title": "" }, { "docid": "77d2a4e5574c995607d4ca0d84573f92", "score": "0.5282524", "text": "def control_lights(state):\n global show_lights\n\n if show_lights:\n for led in (RED, AMBER, GREEN):\n GPIO.output(LED[led], state[led])\n else:\n for led in (RED, AMBER, GREEN):\n GPIO.output(LED[led], 0)", "title": "" }, { "docid": "ca1bcf20948ae0b6ae305725b8ef4528", "score": "0.5278059", "text": "def __exit_sequence_main_region__car_off_r2__shuting_down_r2_1_brakelights_off(self):\n\t\tself.__state_vector[0] = self.State.null_state\n\t\tself.__exit_action_main_region__car_off_r2__shuting_down_r2_1_brakelights_off()", "title": "" }, { "docid": "40e43c813ced984901390d82f5c379fd", "score": "0.52659804", "text": "def LED_on():\n # print('LED ON')\n LED.start(1)\n global LED_state\n LED_state = True", "title": "" }, { "docid": "c65e4823fb9dd7d64a5713fc1445748f", "score": "0.5260701", "text": "def call_turn_on(self):\n self.action(\"turnOn\")", "title": "" }, { "docid": "0faa84fe0bb8bfd9f5fbff9e8af86d5f", "score": "0.5260237", "text": "def enter_blood_rinseback_state(runner):\r\n #The code block below constitutes the remnants of my failed attempts at validly entering BLOOD_RINSEBACK (passing doFirstDrawCheck())\r\n #this will be left out for now in hopes that spoofer re-architecture will make this more plausable\r\n #runner.AddCommand('spoof_main \"-object CHwStates -data inletRevs -value 13.4808\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-object CHwStates -data acRevs -value 1.69616\"\\n','->',False)\r\n #runner.Run()\r\n #runner.ResetCommands()\r\n\r\n #print(\"Accumulating Inlet volume\")\r\n #time.sleep(7)\r\n\r\n #runner.AddCommand('spoof_main \"-unspoof -object CHwStates -data acRevs\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-unspoof -object CHwStates -data inletRevs\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-send SystemStateChangeRequestMsg -int 10001\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-object CHwStates -data reservoir -value CHW_RESERVOIR_HIGH\"\\n','->',False)\r\n ##runner.AddCommand('spoof_main \"-object ProcTrimaSet_CassetteStateType -data data -value 8\"\\n','->',False)\r\n ##runner.AddCommand('spoof_main \"-id EnableDisable -var1 6500 -var2 1000 -timeout 7\"\\n','->',False)\r\n runner.AddCommand('spoof_main \"-send SystemStateChangeMsg -int 9\"\\n','->',False)\r\n runner.Run()\r\n runner.ResetCommands()", "title": "" }, { "docid": "e4f801d885073df1efc79f9118b1f94b", "score": "0.5260069", "text": "def handle_states(self):\n if self.state == c.RESTING:\n self.resting()\n elif self.state == c.BUMPED:\n self.bumped()\n elif self.state == c.OPENED:\n self.opened()", "title": "" }, { "docid": "7d758ebcceee2716ca61f2317b3e79e6", "score": "0.52533466", "text": "def _on_bike_returned(self, evt: AtomEvent):\n payload: BikeReturnPayload = evt.payload\n\n station: Station = self._stations[payload.to_station_idx]\n\n station_bikes = station.bikes\n return_number = payload.number\n\n empty_docks = station.capacity - station_bikes\n\n max_accept_number = min(empty_docks, return_number)\n\n if max_accept_number < return_number:\n src_station = self._stations[payload.from_station_idx]\n\n additional_bikes = return_number - max_accept_number\n\n station.failed_return += additional_bikes\n\n # We have to move additional bikes to neighbors.\n self._decision_strategy.move_to_neighbor(\n src_station,\n station,\n additional_bikes,\n )\n\n station.bikes = station_bikes + max_accept_number", "title": "" }, { "docid": "231394e033dc5f5902db1bea81dec132", "score": "0.52528834", "text": "def bounce(self):\n if self.material.lower() == \"stone\":\n print(\"Thud\")\n else:\n print(\"Boing\")", "title": "" }, { "docid": "3ccdb6e464e7199b503af0fed4cac6c0", "score": "0.5252108", "text": "def action_activate(self):\n return self.write({'state': 'in_progress'})", "title": "" }, { "docid": "b900ba8a263c409e113bfeec89dd9d8f", "score": "0.5239789", "text": "def block_active(self, x):", "title": "" }, { "docid": "fa4b92e0ae2773f89eb5a8f237ebc3ca", "score": "0.5237044", "text": "def callback(self, instance, value):\n button_state = False if value == 'normal' else True\n\n light_state = self.get_state()\n\n if light_state != button_state:\n # Is light offline?\n if light_state is None:\n return\n\n self.lifxlight.set_power(button_state)\n self.rooms.refresh()\n\n Logger.debug(\"Lights: Light is %s, value is %s, get_power is %s\" % (self.label, value, self.get_state()))", "title": "" }, { "docid": "436ffe728aa4b143d0b6ed30d20f2389", "score": "0.5235025", "text": "def LED_on():\r\n print('LED ON')\r\n LED.start(1)\r\n global LED_state\r\n LED_state = True", "title": "" }, { "docid": "df5e6892e02e22c4259fdbc5f5b7b603", "score": "0.5235014", "text": "def default_on_state(self, action, context):\n if context.state == \"stop\":\n context.state = \"ready\"", "title": "" }, { "docid": "bdb9a635d0fa458267c86cc37a3a125e", "score": "0.522901", "text": "def on_ball(self): # called by State.next if applicable\n import navigation\n return navigation.GoToBall()", "title": "" }, { "docid": "bc720ae42c96f6fc5ae00a1cf316b15c", "score": "0.5227786", "text": "def enter_blood_prime_state(runner):\r\n runner.AddCommand('spoof_main \"-send SystemStateChangeMsg -int 7\"\\n','->',False)\r\n runner.AddCommand('spoof_main \"-object SHwStates -data centrifugeRPM -value 201\"\\n','->',False)\r\n runner.AddCommand('spoof_main \"-object CHwStates -data centrifugeRPM -value 201\"\\n','->',False)\r\n runner.AddCommand('spoof_main \"-object HalStatus_centActRPM -data data -value 201\"\\n','->',False)\r\n runner.AddCommand('spoof_main \"-object ProcVolumes_Vreturn -data data -value -25\"\\n','->',False)\r\n runner.Run()\r\n runner.ResetCommands()", "title": "" }, { "docid": "c71073eff38d285eb2fed6ee0358331e", "score": "0.5226186", "text": "def toggle_bnbBurn(self, **kwargs):\n\n return self.sign_request(\"POST\", \"/sapi/v1/bnbBurn\", kwargs)", "title": "" }, { "docid": "9da3e9ce83d12e5d4c8673a7694df83d", "score": "0.5223517", "text": "def activated(self):", "title": "" }, { "docid": "e83812e176834b3f33678f9eaa20e5da", "score": "0.52097446", "text": "def HV_on_action(self):\n bias_voltage = self.variables.default_values_dict[\"settings\"][\"bias_voltage\"]\n EndVolt = self.single_strip.max_voltage_strip.value()\n Steps = self.single_strip.voltage_steps_strip.value()\n compliance = self.single_strip.compliance_strip.value()\n\n if not self.HV_on:\n ramp_voltage_job(\n self.variables.message_from_main,\n self.variables.devices_dict[\"IVSMU\"],\n bias_voltage,\n EndVolt,\n Steps,\n 0.3,\n compliance,\n )\n else:\n ramp_voltage_job(\n self.variables.message_from_main,\n self.variables.devices_dict[\"IVSMU\"],\n bias_voltage,\n 0,\n Steps,\n 0.3,\n compliance,\n )", "title": "" }, { "docid": "e0869316542c6bea2b436805047eb02f", "score": "0.52076894", "text": "def wardrobe_handler(handler_input):\n state_variables = handler_input.attributes_manager.persistent_attributes \n \n response = OctopusRoom.wardrobe(state_variables)\n \n AlexaHelper.process_response(handler_input, response)\n \n return handler_input.response_builder.response", "title": "" }, { "docid": "31b4c214203aafa402eb7c15027af513", "score": "0.5206549", "text": "def cancelling_bust(self):\n self.action = Constants.ACTION_NOTHING\n self.value = Constants.VALUE_BUSTER_NOTHING\n self.state = Constants.STATE_BUSTER_NOTHING\n print(\"Buster team {} with id {} cancelled busting\".format(self.type, self.id))", "title": "" }, { "docid": "df92f600254391598494da1a4faae271", "score": "0.52013224", "text": "def enter_blood_run_state(runner):\r\n runner.AddCommand('spoof_main \"-send SystemStateChangeMsg -int 8\"\\n','->',False)\r\n runner.Run()\r\n runner.ResetCommands()", "title": "" }, { "docid": "af0a13a778e150d0aa593f7b72720250", "score": "0.51886344", "text": "def action(self):\n console.terse(\"{0}\\n\".format(self.action.__doc__))\n\n # Prepare\n # add alived minions\n self.aliveds.value['alpha'] = createStack('1.1.1.1')\n self.aliveds.value['beta'] = createStack('1.2.3.4')\n # add presence request\n testStack = self.event_stack.value\n presenceReq = self.presence_req.value\n ryn = 'manor'\n presenceReq.append({'route': {'dst': (None, ryn, 'presence_req'),\n 'src': (None, testStack.local.name, None)},\n 'data': {'state': 'alived'}})", "title": "" }, { "docid": "6417ceb039fc998a09a93376a7ade707", "score": "0.5188379", "text": "def postflight(self):\n\n pass", "title": "" }, { "docid": "6417ceb039fc998a09a93376a7ade707", "score": "0.5188379", "text": "def postflight(self):\n\n pass", "title": "" }, { "docid": "3b91336ac1a09771bbb836c4bbc43669", "score": "0.51752806", "text": "def onboarded_status(self, onboarded_status):\n\n self._onboarded_status = onboarded_status", "title": "" }, { "docid": "38d0cbcf320ba43b1534fa1368df4d85", "score": "0.51714253", "text": "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n alphabeta, action, score = self.AlphaBeta(gameState, 0, 0, float(\"-inf\"), float(\"inf\")) \n return action", "title": "" }, { "docid": "ea0931805370adebaeff98c9046f7510", "score": "0.5165325", "text": "def SmoothWBRbt(self) -> bool:", "title": "" }, { "docid": "49ee87fc4dd6fa83359306fd7e780838", "score": "0.51633215", "text": "def event_m10_14_x186(z4=_):\r\n \"\"\"State 0,1: Waiting for lighting\"\"\"\r\n CompareObjState(0, z4, 30, 0)\r\n assert ConditionGroup(0)\r\n \"\"\"State 2: End state\"\"\"\r\n return 0", "title": "" }, { "docid": "6c43abad1503329c6fa1c8acb8f0e869", "score": "0.51480806", "text": "def bnbBurn_status(self, **kwargs):\n\n return self.sign_request(\"GET\", \"/sapi/v1/bnbBurn\", kwargs)", "title": "" }, { "docid": "a847adc809288830197e971876325b4a", "score": "0.5133198", "text": "def _on_rebalance_bikes(self, evt: AtomEvent):\n\n # Get stations that need an action.\n stations_need_decision = self._decision_strategy.get_stations_need_decision(\n evt.tick,\n )\n\n if len(stations_need_decision) > 0:\n # Generate a decision event.\n for station_idx, decision_type in stations_need_decision:\n decision_payload = DecisionEvent(\n station_idx,\n evt.tick,\n self.frame_index(evt.tick),\n self._decision_strategy.action_scope,\n decision_type,\n )\n\n decision_evt = self._event_buffer.gen_decision_event(\n evt.tick,\n decision_payload,\n )\n\n self._event_buffer.insert_event(decision_evt)", "title": "" }, { "docid": "83509c2d9099b17772624a2a29702396", "score": "0.5131294", "text": "def landing_transition(self):\n self.land()\n self.flight_state = States.LANDING\n print(\"landing transition\")", "title": "" }, { "docid": "84bf9129fdbad2867baad62c9395f21a", "score": "0.5119104", "text": "def action(self):\n console.terse(\"{0}\\n\".format(self.action.__doc__))\n\n # Prepare\n # add available minions\n self.availables.value.add('alpha')\n self.aliveds.value['alpha'] = createStack('1.1.1.1')\n # add presence request\n testStack = self.event_stack.value\n presenceReq = self.presence_req.value\n ryn = 'manor'\n presenceReq.append({'route': {'dst': (None, ryn, 'presence_req'),\n 'src': (None, testStack.local.name, None)},\n 'data': {'state': 'available'}})", "title": "" }, { "docid": "4c122fb0c22a65ef325600a4090fa71c", "score": "0.51101995", "text": "def brake(self, value=DEFAULT_BRAKING_ACCELERATION):\n\t\tif not self.isMoving():\n\t\t\tpass\n\n\t\tself.isBraking = True\n\t\tif self.velocity > 0:\n\t\t\tself.accelerate(-value)\n\t\telif self.velocity < 0:\n\t\t\tself.accelerate(value)", "title": "" }, { "docid": "8a893ec34faeaf0d567ca0c11c7179f2", "score": "0.510749", "text": "def turn_on(self, **kwargs):\n self.action(\"TURN_ON\")", "title": "" }, { "docid": "8c9d73ae8332c03c27c3407e019342d1", "score": "0.50995964", "text": "def main():\n print(\"Welcome to the Bag-smith!\")\n\n state = get_bag_state()\n\n print(\"State acquired. Let's start.\")\n\n print(\"\\n**History Layer**\")\n history_layer(state)\n\n print(\"\\n**Code Layer**\")\n code_layer(state)\n\n print(\"\\n**Switches Layer**\")\n switches_layer(state)\n\n print(\"\\n**Button Layer**\")\n button_layer(state)\n\n print(\"Layers bypassed.\")\n print(\"Wait\", state['suspicion level'],\n \"seconds or more to allow suspicion level to dissipate.\")", "title": "" }, { "docid": "cf415d0eb0536aed161e51e726748f65", "score": "0.50984085", "text": "def hit(self):\r\n print('apple get')\r\n pass", "title": "" }, { "docid": "32c0ecd2241ef996c0324d467e9d4c5f", "score": "0.5097651", "text": "def BF(aaa):\n # Branch to location aaa if switch is OFF.\n # Otherwise, continue in sequence.\n branchf(switch, aaa)", "title": "" }, { "docid": "42f14b74c80d71cf419190a0c68da898", "score": "0.5096401", "text": "def backlight(self, state=2):\n if not state in (0, 1, 2):\n raise \"Invalid bacaklight state: it must be either 0, 1 or 2 for toggle.\"\n else:\n self.lib.serdisp_setoption(self.disp, 'BACKLIGHT', state)", "title": "" }, { "docid": "a6da6ef3b9027d3df95adaf828f7659f", "score": "0.5095191", "text": "def BE():\n # Halt if swithch is OFF.\n # Otherwise, continue in sequence.\n if not switch:\n dump_instrs()\n fail(\"BE:branch to error executed\")", "title": "" }, { "docid": "88bcbd0253aca930dc45595c22025bb7", "score": "0.508891", "text": "def turn_on(self, **kwargs):\n self.wink.set_state(True)", "title": "" }, { "docid": "f80cb2ccc84cc0b5b4d24dcde6e9be48", "score": "0.50825506", "text": "def activate(self):\n \n pass", "title": "" }, { "docid": "649e87440ccf6df5e73528996827e8aa", "score": "0.50803524", "text": "def battle(self):", "title": "" }, { "docid": "3a0e35245b03d5cf37b4f6e88068d20c", "score": "0.50782603", "text": "def actions(self, state):\n pass", "title": "" }, { "docid": "93d6a012d27c9c1c60ac8516d5ee8c00", "score": "0.50752574", "text": "def parpadeo_breve():\n led.value(0)\n utime.sleep_ms(100)\n led.value(1)", "title": "" }, { "docid": "a60d70e9c43c2e49963a0488070276fb", "score": "0.5073084", "text": "def on() -> None:", "title": "" }, { "docid": "9abb61fd931a3fcff1390c1cccd4cfda", "score": "0.50714123", "text": "def hide_ball(self, packet, drones, start_time) -> StepResult:\n self.game_interface.set_game_state(GameState(ball=BallState(physics=Physics(\n location=Vector3(0, 0, 3000),\n velocity=Vector3(0, 0, 0),\n angular_velocity=Vector3(0, 0, 0)))))\n return StepResult(finished=True)", "title": "" }, { "docid": "355a7cb89e2053da5b29b0b974a40e51", "score": "0.5068313", "text": "def action(self):\n console.terse(\"{0}\\n\".format(self.action.__doc__))\n\n # Prepare\n # add available minions\n self.availables.value.add('alpha')\n self.availables.value.add('beta')\n self.availables.value.add('gamma')\n self.aliveds.value['alpha'] = createStack('1.1.1.1')\n self.aliveds.value['delta'] = createStack('1.2.3.4')\n # add presence request\n testStack = self.event_stack.value\n presenceReq = self.presence_req.value\n ryn = 'manor'\n presenceReq.append({'route': {'dst': (None, ryn, 'presence_req'),\n 'src': (None, testStack.local.name, None)},\n 'data': {'state': 'available'}})", "title": "" }, { "docid": "f57a5a2b07763d40ac4e2c41a6013442", "score": "0.5067158", "text": "def handle_event(self) -> None:\n eventservice_xml = (\n '<scpd xmlns=\"urn:Belkin:service-1-0\">'\n \"<actionList>\"\n \"<action>\"\n \"<name>SetBinaryState</name>\"\n \"<argumentList>\"\n \"<argument>\"\n \"<retval/>\"\n \"<name>BinaryState</name>\"\n \"<relatedStateVariable>BinaryState</relatedStateVariable>\"\n \"<direction>in</direction>\"\n \"</argument>\"\n \"</argumentList>\"\n \"</action>\"\n \"<action>\"\n \"<name>GetBinaryState</name>\"\n \"<argumentList>\"\n \"<argument>\"\n \"<retval/>\"\n \"<name>BinaryState</name>\"\n \"<relatedStateVariable>BinaryState</relatedStateVariable>\"\n \"<direction>out</direction>\"\n \"</argument>\"\n \"</argumentList>\"\n \"</action>\"\n \"</actionList>\"\n \"<serviceStateTable>\"\n '<stateVariable sendEvents=\"yes\">'\n \"<name>BinaryState</name>\"\n \"<dataType>Boolean</dataType>\"\n \"<defaultValue>0</defaultValue>\"\n \"</stateVariable>\"\n '<stateVariable sendEvents=\"yes\">'\n \"<name>level</name>\"\n \"<dataType>string</dataType>\"\n \"<defaultValue>0</defaultValue>\"\n \"</stateVariable>\"\n \"</serviceStateTable>\"\n \"</scpd>\"\n ) + 2 * Fauxmo.NEWLINE\n\n event_response = self.add_http_headers(eventservice_xml)\n logger.debug(f\"Fauxmo response to setup request:\\n{event_response}\")\n self.transport.write(event_response.encode())\n self.transport.close()", "title": "" }, { "docid": "22266691084d530d992623ffb5d97fb0", "score": "0.5065021", "text": "def turnHeaterOn():\n pass", "title": "" }, { "docid": "a4377da61fee0ccc721b423ff2d4d56e", "score": "0.50596035", "text": "def on(self):\n self.status.set('on')", "title": "" }, { "docid": "05643865976709bc18d907b4f7bb4a41", "score": "0.5044742", "text": "async def _patch_blower(hass, config_entry, fan_state, client):\n client.get_blower.return_value = fan_state\n\n if fan_state is not None and fan_state <= len(FAN_SETTINGS):\n await common.async_set_fan_mode(hass, FAN_SETTINGS[fan_state])\n await client.new_data_cb()\n await hass.async_block_till_done()\n\n return hass.states.get(ENTITY_CLIMATE)", "title": "" }, { "docid": "2d023593a466703908b3bff9979a1f7c", "score": "0.50430065", "text": "def branch_on(branch_id=None, branch_alert=None, pump_enable=True):\n if (branch_id is None):\n logging.error(\"No branch id\")\n return None\n\n if (branch_alert is None):\n logging.error(\"No branch alert time\")\n return None\n\n on(BRANCHES[branch_id]['pin'])\n\n if pump_enable is False:\n logging.info(\"Pump won't be turned on with {0} branch id\".format(branch_id))\n else:\n on(PUMP_PIN)\n logging.info(\"Pump turned on with {0} branch id\".format(branch_id))\n\n return form_pins_state()", "title": "" }, { "docid": "379663a601cab5989f46a784a51e7743", "score": "0.5020955", "text": "def Sonic_Amplifler Bow(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "title": "" }, { "docid": "e11a6ab58a70a4d40955dce12ce9ba6e", "score": "0.5016939", "text": "def turn_on(self):\n logger.info(\"Turning on laser\")\n return self.send_cmd(f\"@cob1\")", "title": "" }, { "docid": "130576a53669eedfdc907b3622966c8b", "score": "0.50137883", "text": "def buzzer_off():\n if request.method =='POST':\n send([0x05])\n print(\"off\")\n return render_template('video.html',msg=\"ON\")", "title": "" }, { "docid": "dae78849bd5a5c5e8f00efe4d91853db", "score": "0.50065565", "text": "def switches_layer(bag_state):\n # Get the switch count from the bag\n num_switch = bag_state['switch count']\n\n # Loops once for every switch.\n for switch in range(num_switch):\n # Prompt and get light statuses.\n print(\"Does switch {} have a red light?\".format(switch))\n red = read_bool()\n print(\"Does switch {} have a blue light?\".format(switch))\n blue = read_bool()\n print(\"Does switch {} have a green light?\".format(switch))\n green = read_bool()\n\n # Determine whether or not to flip the switch.\n selection = should_flip(bag_state, red, blue, green)\n\n # Increase suspicion level by 2 if switch needs flipped.\n if selection is True:\n bag_state['suspicion level'] += 2\n print(\"Flip that switch\\n\")\n else:\n print(\"DO NOT flip that switch\\n\")\n\n print(\"Switches layer is complete.\")", "title": "" }, { "docid": "cfa6b9e5df3e8dfc5fc5bf5b2b0a887d", "score": "0.50033486", "text": "def on_enter(state):\n pass", "title": "" }, { "docid": "f376caadaea71d9f09d782ec8ec2bba5", "score": "0.50028044", "text": "def _command_callback(self, req):\n\n if req.cmd == 1:\n self._brake()\n\n if req.cmd == 2:\n self._unbrake()\n\n return 1", "title": "" }, { "docid": "ba34ad3cba21e1a686c54dd59a4b3217", "score": "0.49987337", "text": "def hot_water_boost(service):\n node_id = HiveSession.entity_lookup.get(service.data[ATTR_ENTITY_ID])\n if not node_id:\n # log or raise error\n _LOGGER.error(\"Cannot boost entity id entered\")\n return\n minutes = service.data[ATTR_TIME_PERIOD]\n mode = service.data[ATTR_MODE]\n\n if mode == \"on\":\n session.hotwater.turn_boost_on(node_id, minutes)\n elif mode == \"off\":\n session.hotwater.turn_boost_off(node_id)", "title": "" }, { "docid": "57400f53d259915eed5b8fbeaf86b4a5", "score": "0.4995576", "text": "def hit(self):\r\n print('apple get')", "title": "" }, { "docid": "72ddb31a5f6373dfb0c1875bce61959b", "score": "0.4988265", "text": "def bt_update(self, connected):\n if connected:\n self.ui.BTStatus_LED.setPixmap(QtGui.QPixmap(ICON_GREEN_LED))\n # enable buttons to start streaming\n self.ui.StartNomad_Button.setEnabled(True)\n self.ui.OfflineData_Button.setEnabled(True)\n self.start_dummy_streaming()\n else:\n self.stop_streaming()\n self.ui.BTStatus_LED.setPixmap(QtGui.QPixmap(ICON_RED_LED))\n self.open_disconnect_dialog()\n # disable buttons to start/stop streaming or apply configuration\n self.ui.StartNomad_Button.setEnabled(False)\n self.ui.OfflineData_Button.setEnabled(False)\n self.ui.StopStreaming_Button.setEnabled(False)\n # enable button only if params are previously loaded\n self.ui.ApplyConfig_Button.setEnabled(self.param_file is not None)", "title": "" }, { "docid": "cfebbf3dae67b9046b843335646badd7", "score": "0.4981698", "text": "def btree_cb(self, msg):\n if msg.data == \"btree_on\":\n rospy.set_param(\"/behavior_enabled\", True)\n if msg.data == \"btree_off\":\n rospy.set_param(\"/behavior_enabled\", False)", "title": "" }, { "docid": "b10793f561c22e8f2e089c3c90c6706d", "score": "0.49778804", "text": "def conditioner_action():\r\n request_temperature = float(request.form['temperature'])\r\n request_humidity = float(request.form['humidity'])\r\n request_action = str(request.form['request_action'])\r\n actuator_dict = get_actuator_dict()\r\n temperature = actuator_dict['temperature']\r\n humidity = actuator_dict['humidity']\r\n action_flag = 'do'\r\n if request_action == 'cooler':\r\n if request_temperature < temperature:\r\n turn_on_led(COOLER_NUMBER)\r\n else:\r\n action_flag = 'undo'\r\n turn_off_led()\r\n elif request_action == 'heater':\r\n if temperature < request_temperature:\r\n turn_on_led(HEATER_NUMBER)\r\n else:\r\n action_flag = 'undo'\r\n turn_off_led()\r\n else:\r\n if request_humidity < humidity:\r\n turn_on_led(DRYER_NUMBER)\r\n else:\r\n action_flag = 'undo'\r\n turn_off_led()\r\n return render_template('led.html', temperature=temperature,\r\n humidity=humidity,\r\n request_action=request_action,\r\n request_temperature=request_temperature,\r\n request_humidity=request_humidity,\r\n action_flag=action_flag)", "title": "" }, { "docid": "fb852a7501af9ad8005116b7695f6755", "score": "0.49773285", "text": "def trace (self, client):\n\n self.secure_action(client)\n\n BunkAction.trace(self, client)", "title": "" }, { "docid": "35bc91e72fcb8585f7be34ab1fb45528", "score": "0.495693", "text": "def get__butler__ping(self):\n return 'ok'", "title": "" } ]
dada524dfeb353a742892134297adb29
Handles the result of the opponent's move. Transforms the captured_piece bool and value into just an optional value in order to build the request.
[ { "docid": "b7b1c037d7c5438bb04c97623bdc34f9", "score": "0.75338936", "text": "def handle_opponent_move_result(self, captured_piece, captured_square):\n if self.out_of_time:\n return None\n\n try:\n request = make_handle_opponent_move_request(captured_square if captured_piece else None)\n self.stub.HandleOpponentMove(request)\n except Exception as e:\n print(e)", "title": "" } ]
[ { "docid": "ba095000e813e088faa4d1f0cf47c18a", "score": "0.7908477", "text": "def handle_opponent_move_result(self, captured_my_piece: bool, capture_square: Optional[Square]):\n pass", "title": "" }, { "docid": "b57d357d58c7ecf57ed7abac53f608b1", "score": "0.7078982", "text": "def handle_move_result(self, requested_move, taken_move, reason, captured_piece,\n captured_square):\n try:\n request = make_handle_move_result_request(requested_move, taken_move, reason,\n captured_square if captured_piece else None)\n self.stub.HandleMoveResult(request)\n except Exception as e:\n print(e)", "title": "" }, { "docid": "9457a7febc63042a1b4870af0e85d1f4", "score": "0.6398165", "text": "def make_handle_move_result_request(requested_move, taken_move, reason, captured_square):\n if captured_square is None:\n return agent_pb2.HandleMoveResultRequest(\n requested_move=chess_move_to_protobuf(requested_move),\n taken_move=chess_move_to_protobuf(taken_move) if taken_move else None,\n reason=reason\n )\n return agent_pb2.HandleMoveResultRequest(\n requested_move=chess_move_to_protobuf(requested_move),\n taken_move=chess_move_to_protobuf(taken_move) if taken_move else None,\n reason=reason,\n captured_position=chess_square_to_protobuf_position(captured_square)\n )", "title": "" }, { "docid": "8f44c504593ea1d2fba770c64aa41f97", "score": "0.5983835", "text": "def post(self):\n response = {}\n mpresponse = {}\n game_id = self.request.get('gameID')\n player_id = self.request.get('playerID')\n row = int(self.request.get('row'))\n column = int(self.request.get('column'))\n # Find the appropriate game, validate\n game = Game.query(Game.id == int(game_id)).get()\n if game and not game.done:\n if game.player1 == player_id:\n team = Square.white\n elif game.player2 == player_id:\n team = Square.black\n else:\n logging.critical('Hacking attempt?')\n if not game.moves:\n moves = []\n else:\n moves = json.loads(game.moves)\n board = Board.from_json(game.board, game.turn)\n points = board.add_piece(row, column, team)\n # Only update things if this is a legal move\n if points > 0:\n moves.append([row, column])\n response['ourmove'] = '( {}, {} )'.format(row + 1, column + 1)\n logging.info('player%s moved r%sc%s', team + 1, row, column)\n other_can_move = False\n if game.singleplayer:\n while board.get_turn() != team and board.get_turn() != Square.blank:\n other_can_move = True\n if game.player2 == 'Novice AI':\n ai_move = ai.novice_move(board)\n elif game.player2 == 'Weak AI':\n ai_move = ai.weak_move(board)\n elif game.player2 == 'Moderate AI':\n ai_move = ai.moderate_move(board)\n elif game.player2 == 'Strong AI':\n ai_move = ai.strong_move(board)\n else:\n logging.error('Player %s playing vs unknown AI %s', player_id, ai)\n return\n board.add_piece(ai_move[0], ai_move[1], Square.black)\n response['theirmove'] = '( {}, {} )'.format(ai_move[0] + 1,\n ai_move[1] + 1)\n moves.append(ai_move)\n else:\n # Multiplayer: see if the other player can move\n other_can_move = (board.get_turn() != team)\n if board.get_turn() != Square.blank:\n # If the other player couldn't move, tell user\n if not other_can_move:\n logging.info('no legal moves for player%s', not team + 1)\n response['message'] = \"Opponent can't move. Your turn\"\n response['theirmove'] = 'N/A'\n response['ourTurn'] = True\n if not game.singleplayer:\n mpresponse['message'] = \"You can't move. Opponent's turn\"\n mpresponse['ourmove'] = 'N/A'\n mpresponse['ourTurn'] = False\n else:\n if game.singleplayer:\n response['message'] = 'Your Turn'\n response['ourTurn'] = True\n else:\n response['message'] = \"Opponent's Turn\"\n mpresponse['message'] = 'Your Turn'\n response['ourTurn'] = False\n mpresponse['ourTurn'] = True\n # If the game's over, update database and determine winner\n else:\n game.done = True\n response['done'] = True\n winner = board.who_won()\n if winner == Square.white:\n winteam = 'White Wins!'\n game.winner = Square.white\n elif winner == Square.black:\n winteam = 'Black Wins!'\n game.winner = Square.black\n else:\n winteam = \"It's a Draw!\"\n game.winner = Square.blank\n if not game.singleplayer:\n update_rankings(game)\n response['message'] = 'Game Over. {}'.format(winteam)\n\n game.moves = json.dumps(moves)\n game.board = board.to_json()\n game.turn = board.get_turn()\n game.put()\n response['board'] = game.board\n response['whitescore'] = board.num_pieces(Square.white)\n response['blackscore'] = board.num_pieces(Square.black)\n\n if not game.singleplayer:\n # If the move checks out, update other player\n mpresponse = dict(response.items() + mpresponse.items())\n mpresponse['theirmove'] = mpresponse['ourmove']\n del mpresponse['ourmove']\n mpplayer = game.player2 if player_id == game.player1 else game.player1\n channel.send_message(mpplayer, json.dumps(mpresponse))\n else:\n # If the move is illegal, send an error\n response['error'] = True\n response['message'] = 'That move is illegal'\n response['ourTurn'] = True\n elif game.done:\n logging.info('Player attempted move on finished game')\n response['message'] = 'This Game Is Already Over'\n response['error'] = True\n\n else:\n logging.error('Game id%s by %s not found', game_id, player_id)\n response['error'] = True\n response['message'] = 'Database could not find this game'\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(response))", "title": "" }, { "docid": "95429bb21783046a60b7e23f6a6fd380", "score": "0.5900562", "text": "def process_outcome(self, outcome):\n if outcome==2:\n print(f'No move available for {self.player}')\n print(f'{self.player} loses!')\n self.nextTurn()\n self.setPlayer()\n return self.player, self.turn - 1\n if outcome==3:\n print(f'{self.player} wins!')\n return self.player, self.turn", "title": "" }, { "docid": "a209c559ddb5c87195fc01e144302748", "score": "0.58888596", "text": "def make_handle_opponent_move_request(captured_square):\n if captured_square is None:\n return agent_pb2.HandleOpponentMoveRequest()\n captured_square_proto = chess_square_to_protobuf_position(captured_square)\n return agent_pb2.HandleOpponentMoveRequest(captured_square=captured_square_proto)", "title": "" }, { "docid": "57c60b65bb012a2c1ecdd212a81a04e1", "score": "0.58345026", "text": "def do_move(self, move):\n move = move.replace(' ','') #Remove spaces\n\n from_coords = self.coordinates(move[0:2])\n to_coords = self.coordinates(move[2:4])\n\n #Check for promotion\n if len(move) == 5:\n promotion = move[4]\n if self.turn == 'w':\n promotion = promotion.upper()\n else:\n promotion = None\n\n piece = self.board[from_coords[0]][from_coords[1]]\n\n #Reset cp_clock if piece is being captured or pawn is moved.\n if self.board[to_coords[0]][to_coords[1]] != ' ' or piece in 'pP':\n self.cp_clock = 0\n else:\n self.cp_clock = str(int(self.cp_clock) + 1)\n\n \n #Move piece to new position and remove piece at old position.\n if promotion is None:\n self.board[to_coords[0]][to_coords[1]] = piece \n else:\n self.board[to_coords[0]][to_coords[1]] = promotion\n self.board[from_coords[0]][from_coords[1]] = ' '\n\n #Check for castling and move rook if necessary.\n if piece in 'kK':\n if move == 'e1g1': #king-side white castling\n self.board[0][7] = ' '\n self.board[0][5] = 'R'\n elif move == 'e1c1':#queen-side white castling\n self.board[0][0] = ' '\n self.board[0][3] = 'R'\n elif move == 'e8g8':#king-side black castling\n self.board[7][7] = ' '\n self.board[7][5] = 'r'\n elif move == 'e8c8':#queen-side black castling\n self.board[7][0] = ' '\n self.board[7][3] = 'r'\n \n #Removing castling rights if king is moved.\n if piece == 'k':\n self.castling = self.castling.replace('k','').replace('q','')\n elif piece == 'K':\n self.castling = self.castling.replace('K','').replace('Q','')\n \n #Removing castling rights if rook is moved.\n if piece == 'r':\n if from_coords == (7,7):\n self.castling = self.castling.replace('k','')\n elif from_coords == (7,0): \n self.castling = self.castling.replace('q','')\n if piece == 'R':\n if from_coords == (0,7):\n self.castling = self.castling.replace('K','')\n elif from_coords == (0,0):\n self.castling = self.castling.replace('Q','')\n\n #En passant\n if from_coords[0] == 1 and to_coords[0] == 3 and piece == 'P':\n self.ep = self.to_string((2,from_coords[1]))\n elif from_coords[0] == 6 and to_coords[0] == 4 and piece == 'p':\n self.ep = self.to_string((5,from_coords[1]))\n else:\n self.ep = '-'\n\n #Increment moves counter if move is done by black\n if self.turn == 'b':\n self.turn = 'w'\n self.moves = str(int(self.moves)+1)\n else:\n self.turn = 'b'", "title": "" }, { "docid": "77067ea6a35e9970e855023b56346de2", "score": "0.5799381", "text": "def test_handles_move_to_well_result(\n motion_store: MotionStore,\n now: datetime,\n req: commands.CommandRequestType,\n res: commands.CommandResultType,\n) -> None:\n cmd: commands.CompletedCommandType = \\\n commands.CompletedCommand( # type: ignore[assignment]\n created_at=now,\n started_at=now,\n completed_at=now,\n request=req,\n result=res\n )\n\n motion_store.handle_completed_command(cmd)\n\n assert motion_store.state.get_current_deck_location() == DeckLocation(\n pipette_id=\"pipette-id\",\n labware_id=\"labware-id\",\n well_name=\"B4\"\n )", "title": "" }, { "docid": "8a229507ab7cb3ba8115cdd80ba9386c", "score": "0.56722975", "text": "def result(board, action):\n \n i, j = action \n if board[i][j] is EMPTY:\n player_turn = player(board)\n new_board = deepcopy(board) \n new_board[i][j] = player_turn\n return new_board\n else:\n raise Exception(\"Invalid Move\")", "title": "" }, { "docid": "ed645b257c3fd9451b7aecb39d6cb33c", "score": "0.56125784", "text": "def get_move(self, board, possible_moves, player_1_or_2):", "title": "" }, { "docid": "c5888feb3b8307c9ba2307e3c2f77796", "score": "0.5586883", "text": "def action(self, turns):\n #print(\"Turn \" + str(turns + 1))\n r_val = None # return value\n turn_valid = False\n # know how many times board has shrunk\n shrinks = player_functions.get_shrinks(turns)\n if int(turns/2) == 64:\n # 64 turns have passed for each player\n player_functions.shrink(self.board,shrinks)\n player_functions.eliminate(self.board, self.op_piece, self.my_piece)\n elif int(turns/2) == 96:\n # 96 turns have passed for each player\n player_functions.shrink(self.board,shrinks)\n player_functions.eliminate(self.board, self.op_piece, self.my_piece)\n while not turn_valid:\n # have the player attempt a move\n print('-'*32)\n if self.placed < 12:\n # placing phase\n print(\"Placing phase for \" + self.colour + \" player\")\n move = self.place()\n if move is None:\n print(\"Bad attempt! Try again\")\n else:\n print(\"Placement by \" + self.colour + \" at position (\" +\n str(move[0]) + \", \" + str(move[1]) + \")\")\n r_val = move\n self.placed += 1\n turn_valid = True\n else:\n # moving phase\n # check if can do anything\n m = player_functions.moves_available(\n self.board,self.my_piece,shrinks)\n print(\"Moving phase for \" + self.colour + \" player\")\n print(\"Can make \" + str(m) + \" move(s)\")\n if m == 0:\n print(\"Can't make any moves!\")\n turn_valid = True\n r_val = None\n else:\n move = self.move(shrinks)\n if move is None:\n print(\"Bad attempt! Try again\")\n else:\n print(\"Moved from \" + str(move[0]) + \" to \" +\n str(move[1]))\n r_val = move\n turn_valid = True\n print('-'*32)\n player_functions.eliminate(self.board, self.op_piece, self.my_piece)\n n_shrinks = player_functions.get_shrinks(turns+1)\n if n_shrinks != shrinks:\n player_functions.shrink(self.board, n_shrinks)\n return r_val", "title": "" }, { "docid": "63c9784f62348c2b9529b8881bcd5b5f", "score": "0.556057", "text": "def handle_get_successor(req):\n global mazeInfo\n directionList = [\"NORTH\", \"EAST\",\"SOUTH\",\"WEST\"]\n x_cord, y_cord, direction, action = req.x, req.y, req.direction, req.action\n\n #Checking requested action and making changes in states\n if action == 'TurnCW':\n index = directionList.index(req.direction)\n direction = directionList[(index+1)%4]\n g_cost = 2\n\n elif action == 'TurnCCW':\n index = directionList.index(req.direction)\n direction = directionList[(index-1)%4]\n g_cost = 2\n\n elif action == 'MoveF':\n if direction == \"NORTH\":\n y_cord += 0.5\n elif direction == \"EAST\":\n x_cord += 0.5\n elif direction == \"SOUTH\":\n y_cord -= 0.5\n elif direction == \"WEST\":\n x_cord -= 0.5\n g_cost = 1\n\n elif action == 'MoveB':\n if direction == \"NORTH\":\n y_cord -= 0.5\n elif direction == \"EAST\":\n x_cord -= 0.5\n elif direction == \"SOUTH\":\n y_cord += 0.5\n elif direction == \"WEST\":\n x_cord += 0.5\n g_cost = 3\n \n if req.x <= x_cord and req.y <= y_cord:\n isValidEdge = check_is_edge((req.x, req.y, x_cord, y_cord), \"changedValuesLater\")\n else:\n isValidEdge = check_is_edge((x_cord, y_cord, req.x, req.y), \"changedValuesBefore\")\n\n if not isValidEdge:\n return GetSuccessorResponse(-1, -1, direction, -1)\n\n return GetSuccessorResponse(x_cord, y_cord, direction, g_cost)", "title": "" }, { "docid": "f480823abf4f3dec75a39607421af0b3", "score": "0.5542925", "text": "def pass_turn(self):\n self.my_turn = False\n # passing the turn\n state = (\n self.op_color,\n self.board.copy(),\n self.pointer.copy(),\n self.white_groups.copy(),\n self.black_groups.copy(),\n self.white_captured,\n self.black_captured,\n )\n self.conn.sendall(str.encode(\"POST\"))\n self.conn.sendall(pickle.dumps(state))", "title": "" }, { "docid": "2a737e931426ec8a0357b5bae396f77c", "score": "0.5530435", "text": "def oppon_turn(player):\n print 'Wait for opponents turn...'\n return pickle.loads(player.receive())", "title": "" }, { "docid": "3c112da19158644d5fbe844ccf1b3fb8", "score": "0.5517507", "text": "def process_move(player, board):\n print(str(player) + \"'s turn\")\n \n move = player.next_move(board)\n \n board.add_checker(player.checker, move[0], move[1])\n print()\n print(board)\n\n if board.is_win_for(player.checker, move[0], move[1]):\n print(player, 'wins in', player.num_moves, 'moves.')\n print('Congratulations!')\n return True\n elif board.is_full():\n print(\"It's a tie!\")\n return True\n else:\n return False", "title": "" }, { "docid": "b554d655eaa824b7d43316f7c7d07135", "score": "0.5510937", "text": "def move(self, move):\n new_state = self.copy()\n # Check if a players rook is being moved for first time\n piece = self.get_piece(move.src)\n color = self.get_color(move.src)\n if piece is ROOK:\n if ChessState.rank(move.src[0], color) == 0:\n if move.src[1] == 0:\n new_state.rook_moved[color][0] = True\n if move.src[1] == 7:\n new_state.rook_moved[color][1] = True\n\n # Check if king moved for first time\n if piece is KING:\n new_state.king_moved[color] = True\n # Check if this is a castling move. If it is move the rook\n if move.src[1] - move.dst[1] == 2:\n new_state = new_state.move(Move((move.src[0], 0), (move.src[0], 3), color | ROOK))\n if move.src[1] - move.dst[1] == -2:\n new_state = new_state.move(Move((move.src[0], 7), (move.src[0], 5), color | ROOK))\n\n new_state.board[move.src[0]][move.src[1]] = EMPTY\n new_state.captured_last_move[color] = False\n if new_state.get_piece(move.dst) is not EMPTY:\n new_state.captured.append(new_state.board[move.dst[0]][move.dst[1]])\n new_state.captured_last_move[color] = True\n new_state.board[move.dst[0]][move.dst[1]] = move.final_piece\n new_state.to_move = WHITE if self.to_move is BLACK else BLACK\n return new_state", "title": "" }, { "docid": "04e9990062ca054dfd81409162c76636", "score": "0.550764", "text": "def take_turn(self, board):\n\n print(f'{self._color} [remote] taking turn')\n\n # Fetch the move to the server\n move_number = len(board.moves()) + 1\n response = _server_request('fetchmove.py', matchid=self._match_id, movenumber=move_number,\n token=self._color)\n nbr_tries = 1\n while nbr_tries < 6 and response['status'] == 'NO MOVE':\n # Problem so wait a bit and try up to three more times\n print(f'*** PROBLEM FETCHING MOVE {move_number} FROM SERVER ({nbr_tries})\\n {response[\"error\"]}')\n nbr_tries += 1\n time.sleep(2.0)\n response = _server_request('fetchmove.py', matchid=self._match_id, movenumber=len(board.moves()),\n token=self._color)\n if nbr_tries > 5:\n reply = input('No move on server. Try again? [Y/n]').strip() or 'Y'\n while reply[0].upper() == 'Y':\n response = _server_request('fetchmove.py', matchid=self._match_id, movenumber=len(board.moves()),\n token=self._color)\n if response['status'] == 'OK':\n reply = 'N' # nasty code to leave this loop!\n else:\n reply = input('No move on server. Try again? [Y/n]').strip().upper() or 'Y'\n\n if response['status'] == 'OK':\n move = isolation.Move(response['to'], response['pushout'])\n else:\n move = None\n\n print(f' {move}')\n\n return move", "title": "" }, { "docid": "c0aa55e15faa054912006b6b4c585ea1", "score": "0.5503903", "text": "def result(board, action):\n board_copy = copy.deepcopy(board)\n \n try:\n if board_copy[action[0]][action[1]] != EMPTY:\n raise ValueError\n else:\n board_copy[action[0]][action[1]] = player(board)\n \n except ValueError:\n print('Invalid Move')\n \n return board_copy", "title": "" }, { "docid": "94f0e65d9ba7e77ba020c35f372b65b7", "score": "0.54859537", "text": "def do_move(self, m: move.Move) -> int:\n if not m.move_type:\n raise ValueError('No Move defined')\n if m.move_type == enums.MoveType.NULL_TYPE:\n raise ValueError('Move has null type')\n if m.move_type == enums.MoveType.UNSPECIFIED_STANDARD:\n raise ValueError('Move type is unspecified')\n\n # Reset accumulations here because function has conditional return branches\n self.accumulations_repetitions = None\n\n # Add move to move_history\n self.move_history.append(m)\n\n sbit = square_to_bit(m.source)\n tbit = square_to_bit(m.target)\n squbit = bit_to_qubit(sbit)\n tqubit = bit_to_qubit(tbit)\n\n if (m.move_variant == enums.MoveVariant.CAPTURE or\n m.move_type == enums.MoveType.PAWN_EP or\n m.move_type == enums.MoveType.PAWN_CAPTURE):\n # TODO: figure out if it is a deterministic capture.\n for val in list(self.allowed_pieces):\n self.allowed_pieces.add(val - 1)\n\n if m.move_type == enums.MoveType.PAWN_EP:\n # For en passant, first determine the square of the pawn being\n # captured, which should be next to the target.\n if m.target[1] == '6':\n epbit = square_to_bit(m.target[0] + '5')\n elif m.target[1] == '3':\n epbit = square_to_bit(m.target[0] + '4')\n else:\n raise ValueError(f'Invalid en passant target {m.target}')\n epqubit = bit_to_qubit(epbit)\n\n # For the classical version, set the bits appropriately\n if (epqubit not in self.entangled_squares and\n squbit not in self.entangled_squares and\n tqubit not in self.entangled_squares):\n if (not nth_bit_of(epbit, self.state) or\n not nth_bit_of(sbit, self.state) or\n nth_bit_of(tbit, self.state)):\n raise ValueError('Invalid classical e.p. move')\n\n self.state = set_nth_bit(epbit, self.state, False)\n self.state = set_nth_bit(sbit, self.state, False)\n self.state = set_nth_bit(tbit, self.state, True)\n return 1\n\n # If any squares are quantum, it's a quantum move\n self.add_entangled(squbit, tqubit, epqubit)\n\n # Capture e.p. post-select on the source\n if m.move_variant == enums.MoveVariant.CAPTURE:\n is_there = self.post_select_on(squbit, m.measurement)\n if not is_there:\n return 0\n self.add_entangled(squbit)\n # capture e.p. has a special circuit\n self.circuit.append(\n qm.capture_ep(squbit, tqubit, epqubit, self.new_ancilla(),\n self.new_ancilla(), self.new_ancilla()))\n return 1\n\n # Blocked/excluded e.p. post-select on the target\n if m.move_variant == enums.MoveVariant.EXCLUDED:\n # Note that a measurement of 1 means that the move was\n # successful so that the target square is empty\n is_there = self.post_select_on(tqubit, m.measurement, invert=True)\n if is_there:\n return 0\n self.add_entangled(tqubit)\n self.circuit.append(\n qm.en_passant(squbit, tqubit, epqubit, self.new_ancilla(),\n self.new_ancilla()))\n return 1\n\n if m.move_type == enums.MoveType.PAWN_CAPTURE:\n # For pawn capture, first measure source.\n is_there = self.post_select_on(squbit, m.measurement)\n if not is_there:\n return 0\n if tqubit in self.entangled_squares:\n old_tqubit = self.unhook(tqubit)\n self.add_entangled(squbit, tqubit)\n\n self.circuit.append(\n qm.controlled_operation(cirq.ISWAP, [squbit, tqubit],\n [old_tqubit], []))\n else:\n # Classical case\n self.state = set_nth_bit(sbit, self.state, False)\n self.state = set_nth_bit(tbit, self.state, True)\n return 1\n\n if m.move_type == enums.MoveType.SPLIT_SLIDE:\n tbit2 = square_to_bit(m.target2)\n tqubit2 = bit_to_qubit(tbit2)\n\n # Find all the squares on both paths\n path_qubits = self.path_qubits(m.source, m.target)\n path_qubits2 = self.path_qubits(m.source, m.target2)\n\n if len(path_qubits) == 0 and len(path_qubits2) == 0:\n # No interposing squares, just jump.\n m.move_type = enums.MoveType.SPLIT_JUMP\n else:\n self.add_entangled(squbit, tqubit, tqubit2)\n path1 = self.create_path_ancilla(path_qubits)\n path2 = self.create_path_ancilla(path_qubits2)\n ancilla = self.new_ancilla()\n self.circuit.append(\n qm.split_slide(squbit, tqubit, tqubit2, path1, path2,\n ancilla))\n return 1\n\n if m.move_type == enums.MoveType.MERGE_SLIDE:\n sbit2 = square_to_bit(m.source2)\n squbit2 = bit_to_qubit(sbit2)\n self.add_entangled(squbit, squbit2, tqubit)\n\n # Find all the squares on both paths\n path_qubits = self.path_qubits(m.source, m.target)\n path_qubits2 = self.path_qubits(m.source2, m.target)\n if len(path_qubits) == 0 and len(path_qubits2) == 0:\n # No interposing squares, just jump.\n m.move_type = enums.MoveType.MERGE_JUMP\n else:\n path1 = self.create_path_ancilla(path_qubits)\n path2 = self.create_path_ancilla(path_qubits2)\n ancilla = self.new_ancilla()\n self.circuit.append(\n qm.merge_slide(squbit, tqubit, squbit2, path1, path2,\n ancilla))\n return 1\n\n if (m.move_type == enums.MoveType.SLIDE or\n m.move_type == enums.MoveType.PAWN_TWO_STEP):\n path_qubits = self.path_qubits(m.source, m.target)\n if len(path_qubits) == 0:\n # No path, change to jump\n m.move_type = enums.MoveType.JUMP\n\n if (m.move_type == enums.MoveType.SLIDE or\n m.move_type == enums.MoveType.PAWN_TWO_STEP):\n for p in path_qubits:\n if (p not in self.entangled_squares and\n nth_bit_of(qubit_to_bit(p), self.state)):\n # Classical piece in the way\n return 0\n\n # For excluded case, measure target\n if m.move_variant == enums.MoveVariant.EXCLUDED:\n # Note that a measurement of 1 means that the move was\n # successful so that the target square is empty\n is_there = self.post_select_on(tqubit, m.measurement, invert=True)\n if is_there:\n return 0\n\n self.add_entangled(squbit, tqubit)\n if m.move_variant == enums.MoveVariant.CAPTURE:\n capture_ancilla = self.new_ancilla()\n self.circuit.append(\n qm.controlled_operation(cirq.X, [capture_ancilla], [squbit],\n path_qubits))\n\n # We need to add the captured_ancilla to entangled squares\n # So that we measure it\n self.entangled_squares.add(capture_ancilla)\n capture_allowed = self.post_select_on(capture_ancilla, m.measurement)\n\n if not capture_allowed:\n return 0\n else:\n # Perform the captured slide\n self.add_entangled(squbit)\n # Remove the target from the board into an ancilla\n # and set bit to zero\n self.unhook(tqubit)\n self.state = set_nth_bit(tbit, self.state, False)\n\n # Re-add target since we need to swap into the square\n self.add_entangled(tqubit)\n\n # Perform the actual move\n self.circuit.append(qm.normal_move(squbit, tqubit))\n\n # Set source to empty\n self.unhook(squbit)\n self.state = set_nth_bit(sbit, self.state, False)\n\n # Now set the whole path to empty\n for p in path_qubits:\n self.state = set_nth_bit(qubit_to_bit(p), self.state, False)\n self.unhook(p)\n return 1\n # Basic slide (or successful excluded slide)\n\n # Add all involved squares into entanglement\n self.add_entangled(squbit, tqubit, *path_qubits)\n\n if len(path_qubits) == 1:\n # For path of one, no ancilla needed\n self.circuit.append(qm.slide_move(squbit, tqubit, path_qubits))\n return 1\n # Longer paths require a path ancilla\n ancilla = self.new_ancilla()\n self.circuit.append(\n qm.slide_move(squbit, tqubit, path_qubits, ancilla))\n return 1\n\n if (m.move_type == enums.MoveType.JUMP or\n m.move_type == enums.MoveType.PAWN_STEP):\n if (squbit not in self.entangled_squares and\n tqubit not in self.entangled_squares):\n # Classical version\n self.state = set_nth_bit(sbit, self.state, False)\n self.state = set_nth_bit(tbit, self.state, True)\n return 1\n\n # Measure source for capture\n if m.move_variant == enums.MoveVariant.CAPTURE:\n is_there = self.post_select_on(squbit, m.measurement)\n if not is_there:\n return 0\n self.unhook(tqubit)\n\n # Measure target for excluded\n if m.move_variant == enums.MoveVariant.EXCLUDED:\n # Note that a measurement of 1 means that the move was\n # successful so that the target square is empty\n is_there = self.post_select_on(tqubit, m.measurement, invert=True)\n if is_there:\n return 0\n\n # Only convert source qubit to ancilla if target\n # is empty\n unhook = tqubit not in self.entangled_squares\n self.add_entangled(squbit, tqubit)\n\n # Execute jump\n self.circuit.append(qm.normal_move(squbit, tqubit))\n\n if unhook or m.move_variant != enums.MoveVariant.BASIC:\n # The source is empty.\n # Change source qubit to be an ancilla\n # and set classical bit to zero\n self.state = set_nth_bit(sbit, self.state, False)\n self.unhook(squbit)\n\n return 1\n\n if m.move_type == enums.MoveType.SPLIT_JUMP:\n tbit2 = square_to_bit(m.target2)\n tqubit2 = bit_to_qubit(tbit2)\n self.add_entangled(squbit, tqubit, tqubit2)\n self.circuit.append(qm.split_move(squbit, tqubit, tqubit2))\n self.state = set_nth_bit(sbit, self.state, False)\n self.unhook(squbit)\n return 1\n\n if m.move_type == enums.MoveType.MERGE_JUMP:\n sbit2 = square_to_bit(m.source2)\n squbit2 = bit_to_qubit(sbit2)\n self.add_entangled(squbit, squbit2, tqubit)\n self.circuit.append(qm.merge_move(squbit, squbit2, tqubit))\n # TODO: should the source qubit be 'unhooked'?\n return 1\n\n if m.move_type == enums.MoveType.KS_CASTLE:\n # Figure out the rook squares\n if sbit == square_to_bit('e1') and tbit == square_to_bit('g1'):\n rook_sbit = square_to_bit('h1')\n rook_tbit = square_to_bit('f1')\n elif sbit == square_to_bit('e8') and tbit == square_to_bit('g8'):\n rook_sbit = square_to_bit('h8')\n rook_tbit = square_to_bit('f8')\n else:\n raise ValueError(f'Invalid kingside castling move')\n rook_squbit = bit_to_qubit(rook_sbit)\n rook_tqubit = bit_to_qubit(rook_tbit)\n\n # Piece in non-superposition in the way, not legal\n if (nth_bit_of(rook_tbit, self.state) and\n rook_tqubit not in self.entangled_squares):\n return 0\n if (nth_bit_of(tbit, self.state) and\n tqubit not in self.entangled_squares):\n return 0\n\n # Not in superposition, just castle\n if (rook_tqubit not in self.entangled_squares and\n tqubit not in self.entangled_squares):\n self.set_castle(sbit, rook_sbit, tbit, rook_tbit)\n return 1\n\n # Both intervening squares in superposition\n if (rook_tqubit in self.entangled_squares and\n tqubit in self.entangled_squares):\n castle_ancilla = self.create_path_ancilla([rook_tqubit, tqubit])\n self.entangled_squares.add(castle_ancilla)\n castle_allowed = self.post_select_on(castle_ancilla, m.measurement)\n if castle_allowed:\n self.unhook(rook_tqubit)\n self.unhook(tqubit)\n self.set_castle(sbit, rook_sbit, tbit, rook_tbit)\n return 1\n else:\n self.post_selection[castle_ancilla] = castle_allowed\n return 0\n\n # One intervening square in superposition\n if rook_tqubit in self.entangled_squares:\n measure_qubit = rook_tqubit\n measure_bit = rook_tbit\n else:\n measure_qubit = tqubit\n measure_bit = tbit\n # Note that a measurement of 1 means that the move was\n # successful so that the target square is empty\n is_there = self.post_select_on(measure_qubit, m.measurement,invert=True)\n if is_there:\n return 0\n self.set_castle(sbit, rook_sbit, tbit, rook_tbit)\n return 1\n\n if m.move_type == enums.MoveType.QS_CASTLE:\n\n # Figure out the rook squares and the b-file square involved\n if sbit == square_to_bit('e1') and tbit == square_to_bit('c1'):\n rook_sbit = square_to_bit('a1')\n rook_tbit = square_to_bit('d1')\n b_bit = square_to_bit('b1')\n elif sbit == square_to_bit('e8') and tbit == square_to_bit('c8'):\n rook_sbit = square_to_bit('a8')\n rook_tbit = square_to_bit('d8')\n b_bit = square_to_bit('b8')\n else:\n raise ValueError(f'Invalid queenside castling move')\n rook_squbit = bit_to_qubit(rook_sbit)\n rook_tqubit = bit_to_qubit(rook_tbit)\n b_qubit = bit_to_qubit(b_bit)\n\n # Piece in non-superposition in the way, not legal\n if (nth_bit_of(rook_tbit, self.state) and\n rook_tqubit not in self.entangled_squares):\n return 0\n if (nth_bit_of(tbit, self.state) and\n tqubit not in self.entangled_squares):\n return 0\n if (b_bit is not None and nth_bit_of(b_bit, self.state) and\n b_qubit not in self.entangled_squares):\n return 0\n\n # Not in superposition, just castle\n if (rook_tqubit not in self.entangled_squares and\n tqubit not in self.entangled_squares and\n b_qubit not in self.entangled_squares):\n self.set_castle(sbit, rook_sbit, tbit, rook_tbit)\n return 1\n\n # Neither intervening squares in superposition\n if (rook_tqubit not in self.entangled_squares and\n tqubit not in self.entangled_squares):\n if b_qubit not in self.entangled_squares:\n self.set_castle(sbit, rook_sbit, tbit, rook_tbit)\n else:\n self.queenside_castle(squbit, rook_squbit, tqubit,\n rook_tqubit, b_qubit)\n return 1\n\n # Both intervening squares in superposition\n if (rook_tqubit in self.entangled_squares and\n tqubit in self.entangled_squares):\n castle_ancilla = self.create_path_ancilla([rook_tqubit, tqubit])\n self.entangled_squares.add(castle_ancilla)\n castle_allowed = self.post_select_on(castle_ancilla, m.measurement)\n if castle_allowed:\n self.unhook(rook_tqubit)\n self.unhook(tqubit)\n if b_qubit not in self.entangled_squares:\n self.set_castle(sbit, rook_sbit, tbit, rook_tbit)\n else:\n self.queenside_castle(squbit, rook_squbit, tqubit,\n rook_tqubit, b_qubit)\n return 1\n else:\n self.post_selection[castle_ancilla] = castle_allowed\n return 0\n\n # One intervening square in superposition\n if rook_tqubit in self.entangled_squares:\n measure_qubit = rook_tqubit\n measure_bit = rook_tbit\n else:\n measure_qubit = tqubit\n measure_bit = tbit\n # Note that a measurement of one means the move was successful\n # so that the path was clear\n is_there = self.post_select_on(measure_qubit, m.measurement, invert=True)\n if is_there:\n return 0\n if b_qubit not in self.entangled_squares:\n self.set_castle(sbit, rook_sbit, tbit, rook_tbit)\n else:\n self.queenside_castle(squbit, rook_squbit, tqubit, rook_tqubit,\n b_qubit)\n return 1\n\n raise ValueError(f'Move type {m.move_type} not supported')", "title": "" }, { "docid": "322c31604051c7d8a7ba0b130702f298", "score": "0.54640424", "text": "def take_turn(self, board):\n print(f'{self._color} [local] taking turn')\n\n move = self._player.take_turn(board)\n\n # Let's see if this is a winning move\n board_copy = copy.deepcopy(board)\n board_copy.make_move(self._token, move)\n winning_move = len(board_copy.neighbor_tiles(board_copy.token_location(self._opponent_token))) == 0\n\n # Post the move to the server\n move_number = len(board.moves()) + 1\n response = _server_request('postmove.py', matchid=self._match_id, movenumber=move_number,\n to=move.to_square_id, pushout=move.pushout_square_id,\n token=self._color, winningmove=int(winning_move))\n nbr_tries = 1\n while nbr_tries < 3 and response['status'] != 'OK':\n # Problem so wait a bit and try up to three more times\n print(f'*** PROBLEM POSTING TO SERVER {move}\\n {response[\"error\"]}')\n nbr_tries += 1\n time.sleep(1.0)\n response = _server_request('postmove.py', matchid=self._match_id, movenumber=len(board.moves()),\n to=move.to_square_id, pushout=move.pushout_square_id,\n token=self._color, winningmove=int(winning_move))\n\n if response['status'] != 'OK':\n print(f'COULD NOT POST {move}')\n\n return move", "title": "" }, { "docid": "8acd54a4fc427251131f67527cd21663", "score": "0.5402739", "text": "def turn_finish(self, move: Move) -> None:", "title": "" }, { "docid": "6247fd0c28e27a75ddaea85bc314ee1b", "score": "0.53820294", "text": "def handle_capture(self, from_pos, to_pos, move_index, info):\r\n attacker = self.board[from_pos[0]][from_pos[1]]\r\n attacker_color = attacker.color\r\n\r\n if attacker.name != \"pawn\":\r\n victim = self.board[to_pos[0]][to_pos[1]]\r\n victim.captured()\r\n else:\r\n if move_index == 4:\r\n victim = self.board[from_pos[0]][from_pos[1] - 1]\r\n victim.captured()\r\n \r\n info['is_enpassant'] = True\r\n info['enpassant_pos'] = [from_pos[0], from_pos[1] - 1]\r\n info['enpassant_pc'] = victim\r\n\r\n self.board[from_pos[0]][from_pos[1] - 1] = self.create_empty_piece()\r\n \r\n if move_index == 5:\r\n victim = self.board[from_pos[0]][from_pos[1] + 1]\r\n victim.captured()\r\n \r\n info['is_enpassant'] = True\r\n info['enpassant_pos'] = [from_pos[0], from_pos[1] + 1]\r\n info['enpassant_pc'] = victim\r\n\r\n self.board[from_pos[0]][from_pos[1] + 1] = self.create_empty_piece()\r\n else:\r\n victim = self.board[to_pos[0]][to_pos[1]]\r\n victim.captured()", "title": "" }, { "docid": "6fce54117ca690ed602bb4efcb46f5f9", "score": "0.5367842", "text": "def get_move_destination(self):\n capture_moves, non_capture_moves = self.state.get_moves(self.selection)\n moves = capture_moves + non_capture_moves\n move = moves[0]\n while True:\n self.state.draw_board(self.scr)\n self.state.draw_possible_moves(self.scr, self.selection, move.dst)\n self.state.draw_captured_pieces(self.scr)\n key = self.scr.getch()\n if key == 127:\n return\n if key == ord('q'):\n self.display_quit_prompt()\n if key == curses.KEY_LEFT:\n move = get_adjacent_move(moves, move, (0, -1))\n if key == curses.KEY_RIGHT:\n move = get_adjacent_move(moves, move, (0, 1))\n if key == curses.KEY_UP:\n move = get_adjacent_move(moves, move, (-1, 0))\n if key == curses.KEY_DOWN:\n move = get_adjacent_move(moves, move, (1, 0))\n if key in [10, 13]:\n if self.state.is_pawn_promotion_move(move):\n result = self.get_pawn_promotion_piece(move)\n # clear side screen\n for i in range(1, 9):\n self.scr.addstr(i, 27, ' ' * 12)\n if result is None:\n continue\n move = result\n self.state = self.state.move(move)\n if self.state.is_checkmate():\n self.display_checkmate_message()\n\n if self.state.is_stalemate():\n self.display_stalemate_message()\n\n return move", "title": "" }, { "docid": "2d32a9927f32ce6d2b71f64ef544c671", "score": "0.53659946", "text": "def result(board, action):\n boardcopy = copy.deepcopy(board)\n if boardcopy[action[0]][action[1]] != EMPTY:\n raise Exception(\"not a valid move\")\n else:\n boardcopy[action[0]][action[1]] = player(boardcopy)\n return boardcopy\n\n #raise NotImplementedError", "title": "" }, { "docid": "5e67bbd2fed6cba4355cc42c3975094f", "score": "0.53423285", "text": "def on_turn(self, turn_state):\n game_state = gamelib.AdvancedGameState(self.config, turn_state)\n p1UnitCount = len(self.jsonState.get('p1Units')[0])\n p2UnitCount = len(self.jsonState.get('p2Units')[0])\n #gamelib.debug_write('p1 has {} units. p2 has {} units'.format(p1UnitCount, p2UnitCount))\n\n if game_state.turn_number == 0:\n while game_state.can_spawn(PING, self.firstTurnTroopCoord):\n game_state.attempt_spawn(PING, self.firstTurnTroopCoord)\n # no building this turn\n else:\n left_corner_stats = game_state.get_area_stats(self.enemy_left_corner_area)\n gamelib.debug_write('left_corner_stats = {}'.format(left_corner_stats))\n right_corner_stats = game_state.get_area_stats(self.enemy_right_corner_area)\n gamelib.debug_write('right_corner_stats = {}'.format(right_corner_stats))\n\n if game_state.get_resource(game_state.BITS) >= 8:\n self.readyToAttack = True\n else:\n self.readyToAttack = False\n\n # determine which side is more vulnerable\n if left_corner_stats.destructor_count < right_corner_stats.destructor_count:\n self.SetSideToAttack(Sides.LEFT, game_state)\n else:\n self.SetSideToAttack(Sides.RIGHT, game_state)\n\n if self.readyToAttack:\n if self.useRammingTroops:\n self.deployRammingTroops(game_state)\n self.deployTroops(game_state)\n\n self.buildWalls(game_state)\n self.markForRefund(game_state)\n\n game_state.submit_turn()", "title": "" }, { "docid": "848f97ce76ef3bf4aec68014a7be30b2", "score": "0.5341159", "text": "def proceed_round(self, player, action):\n self.update_public(action)\n # 出牌\n self.greater_player = player.play(action, self.greater_player)\n return self.greater_player", "title": "" }, { "docid": "47b950781200895d06b53a5f55cb817a", "score": "0.5335125", "text": "def cozmo_tap_decision(self, game_robot, speed_tap_game, goal=None):\n #print(\"Goal %d to grab:%s\" %(goal, [P_R, O_R]))\n #if self.strategy == PRACTICE:\n # tap_decision = goal in [P_R, O_R ] #randint(0, 10) in [0, 4, 8, 5, 10] \n #elif self.strategy == RANDOM:\n # tap_decision = goal in [P_R, O_R]\n #elif self.strategy == TIT_FOR_TAT:\n # tap_decision = goal in [1, 1, 1, 0, 0, 1] \n #else:\n # tap_decision = goal in [COZMO_DEFECT]\n \n if not self.practice:\n tap_decision = goal in [P_R, O_R ]\n \n else:\n tap_decision = goal in [P_R, O_R]\n time.sleep(1.5)\n game_robot.move_lift(-3)\n time.sleep(.1)\n game_robot.move_lift(4)\n time.sleep(.1)\n game_robot.play_anim('anim_speedtap_tap_02')#.wait_for_completed()\n \n if tap_decision:\n cozmo.logger.info(\"PD : Cozmo tapped grab\")\n cozmo_tapped = speed_tap_game.register_tap(tap_type=COZMO_DEFECT)\n else:\n cozmo.logger.info(\"PD : Cozmo tapped share\")\n cozmo_tapped = speed_tap_game.register_tap(tap_type=COZMO_COOP)\n \n time.sleep(0.5)\n return True", "title": "" }, { "docid": "9d06fe3f732136efa178cd6416df00c8", "score": "0.53184074", "text": "def result(board, action):\n mark = player(board)\n board_copy = copy.deepcopy(board)\n if board_copy[action[0]][action[1]] != EMPTY:\n raise Exception(\"Invalid move\")\n board_copy[action[0]][action[1]] = mark\n return board_copy", "title": "" }, { "docid": "5c2b52e0bc9eb19420883e7e7f992d3f", "score": "0.53171736", "text": "def result(board, action):\n if board[action[0]][action[1]] is not EMPTY:\n raise Exception(\"Move not valid\")\n\n new_board = copy.deepcopy(board)\n new_board[action[0]][action[1]] = player(board)\n return new_board", "title": "" }, { "docid": "1740f4b2d7e8f1cf6f411a5a24c71898", "score": "0.53155667", "text": "def evaluate_move(self, client_id, msg):\r\n pass", "title": "" }, { "docid": "23c9ba30a78543de9c52e5219bc36401", "score": "0.52999663", "text": "def add_result(self, move):\r\n self.moves.append(move)", "title": "" }, { "docid": "7b603e514267811525503ed4607568d3", "score": "0.5284228", "text": "def make_move(self, x, y, n, m):\n\t\tcaptured = promoted = False\n\n\t\tif self.board[n][m] is not None:\n\t\t\tcaptured = True\n\n\t\t# Make move\t\n\t\tpiece = self.board[x][y]\n\t\tself.board[x][y] = None\n\t\tself.board[n][m] = piece\n\n\t\tassert piece is not None\n\n\t\tif piece[NAME] == 'P':\n\t\t\t# Set pawn's ``en_passant`` attribute to True\n\t\t\tif m == 3 + self.turn:\n\t\t\t\tself.board[n][m] = piece[:EN_PASSANT] + '1'\n\n\t\t\t# Remove enemy pawns that were captured en passent\n\t\t\t# Acknowledge capture\n\t\t\tother = self.board[n][m - (-1)**self.turn]\n\t\t\tif other is not None and other[NAME] == 'P' and other[EN_PASSANT] == '1':\n\t\t\t\tself.board[n][m - (-1)**self.turn] = None\n\t\t\t\tcaptured = True\n\t\t\t\n\t\t\t# Acknowledge promotion\n\t\t\tif m == 7*(not self.turn):\n\t\t\t\tpromoted = True\n\n\t\telif piece[NAME] == 'R':\n\t\t\tself.board[n][m] = piece[:HAS_MOVED] + '1'\n\n\t\telif piece[NAME] == 'K':\n\t\t\tif piece[HAS_MOVED] == '0':\n\t\t\t\t# Swap rook with king\n\t\t\t\tif n == 2:\n\t\t\t\t\trook = self.board[0][7*self.turn]\n\t\t\t\t\tself.board[0][7*self.turn] = None\n\t\t\t\t\tself.board[3][7*self.turn] = rook\n\t\t\t\telif n == 6:\n\t\t\t\t\trook = self.board[7][7*self.turn]\n\t\t\t\t\tself.board[7][7*self.turn] = None\n\t\t\t\t\tself.board[5][7*self.turn] = rook\n\t\t\tself.board[n][m] = piece[:HAS_MOVED] + '1'\n\t\t\tself.pos_kings[self.turn] = (n, m)\n\n\t\t# Rechange enemy pawns' attribute ``en_passant`` to False\n\t\tfor x in range(8):\n\t\t\tother = self.board[x][3 + (not self.turn)]\n\t\t\tif other is not None and other[NAME] == 'P' and other[COLOR] != piece[COLOR]:\n\t\t\t\tself.board[x][3 + (not self.turn)] = other[:EN_PASSANT] + '0'\n\n\t\treturn captured, promoted", "title": "" }, { "docid": "0b5dea08be735b0502ded2f22c37357d", "score": "0.52782875", "text": "def result(board, action):\n new_board = copy.deepcopy(board)\n if new_board[action[0]][action[1]] == EMPTY:\n new_board[action[0]][action[1]] = player(board)\n else:\n raise NameError(\"Invalid move\")\n return new_board", "title": "" }, { "docid": "e549eb0578de669f219ffce6793b4e8f", "score": "0.52762824", "text": "def result(board, action):\n \n new_board = copy.deepcopy(board)\n\n # action is a tuple\n i = action[0]\n j = action[1]\n\n if board[i][j] is EMPTY:\n new_board[i][j] = player(board)\n return new_board\n else:\n raise Exception('Not a valid move')", "title": "" }, { "docid": "fc56862d83b8b88615b40512ac0cf15b", "score": "0.52619845", "text": "def valid_move_for_piece(chess_board, piece_position, player_color):\n\t# TODO: add en passant and promotion for pawns\n\t# TODO: add castling which moves two pieces at the same time\n\n\t# unpack the state of the piece which is should be moved\n\tpiece_row_pos = int(piece_position[0])\n\tpiece_col_pos = int(piece_position[1])\n\tpiece_id = piece_position[2]\n\n\t\"\"\"\n\t-> check_check(self, chess_board) breaks this assertion\n\tassert ((player_color == 'black' and piece_id.isupper())\n\t\tor (player_color == 'white' and piece_id.islower())\n\t), print('Error: piece to move and player color do not match')\n\t\"\"\"\n\n\t# check, whether the given piece position matches the one in the board\n\tassert piece_id == chess_board[piece_row_pos, piece_col_pos]\t\\\n\t, 'Error: mismatching information of pieces in valid_move_for_piece()'\n\n\t# return array with all possible moves by that piece\n\tpossible_moves = np.empty([0], dtype = str)\n\n\t# white pawn\n\tif piece_id == chess_pieces_inverse[\"pawn\"].lower():\n\t\t# unmoved (white) pawn: check for a two-move\n\t\tif piece_row_pos == 1 and chess_board[piece_row_pos+1, piece_col_pos] == \"\":\n\t\t\tif (\n\t\t\t\tchess_board[piece_row_pos + 2, piece_col_pos].isupper()\n\t\t\t\tor chess_board[piece_row_pos + 2, piece_col_pos] == \"\"\n\t\t\t\t):\n\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\tpossible_moves,\n\t\t\t\t\tpiece_row_pos + 2,\n\t\t\t\t\tpiece_col_pos\n\t\t\t\t)\n\n\t\t# check for all other moves (advance one directly or capture\n\t\t# to the adjacent diagonals in direction of advancing)\n\t\tif piece_row_pos < 7:\t# pawn has not reached the end of the board\n\t\t\tif chess_board[piece_row_pos + 1, piece_col_pos] == \"\":\t# empty tile in front\n\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\tpossible_moves,\n\t\t\t\t\tpiece_row_pos + 1,\n\t\t\t\t\tpiece_col_pos\n\t\t\t\t)\n\t\t\t# check the diagonals (in front)\n\t\t\tif piece_col_pos - 1 > -1:\t# diagonal 'up, left' is not out of bounds\n\t\t\t\tif chess_board[piece_row_pos + 1, piece_col_pos - 1].isupper():\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos + 1,\n\t\t\t\t\t\tpiece_col_pos - 1\n\t\t\t\t\t)\n\t\t\tif (piece_col_pos + 1 < 7):\t# diagonal 'up, right' is not out of bounds\n\t\t\t\tif chess_board[piece_row_pos+1, piece_col_pos + 1].isupper():\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos + 1,\n\t\t\t\t\t\tpiece_col_pos + 1\n\t\t\t\t\t)\n\n\t# black pawn\n\tif piece_id == chess_pieces_inverse[\"pawn\"]:\n\t\t# unmoved (white) pawn: check for a two-move\n\t\tif piece_row_pos == 6 and chess_board[piece_row_pos - 1, piece_col_pos] == \"\":\n\t\t\tif (\n\t\t\t\tchess_board[piece_row_pos - 2, piece_col_pos].isupper()\n\t\t\t\tor chess_board[piece_row_pos - 2, piece_col_pos] == \"\"\n\t\t\t\t):\n\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\tpossible_moves,\n\t\t\t\t\tpiece_row_pos - 2,\n\t\t\t\t\tpiece_col_pos\n\t\t\t\t)\n\n\t\t# check for all other moves (advance one directly or capture\n\t\t# to the adjacent diagonals in direction of advancing)\n\t\tif piece_row_pos > 0:\t# pawn has not reached the end of the board\n\t\t\tif chess_board[piece_row_pos-1, piece_col_pos] == \"\":\t# empty tile in front\n\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\tpossible_moves,\n\t\t\t\t\tpiece_row_pos - 1,\n\t\t\t\t\tpiece_col_pos\n\t\t\t\t)\n\t\t\t# check the diagonals (in front)\n\t\t\tif piece_col_pos - 1 > -1:\t# diagonal 'up, left' is not out of bounds\n\t\t\t\tif chess_board[piece_row_pos - 1, piece_col_pos - 1].islower():\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos - 1,\n\t\t\t\t\t\tpiece_col_pos - 1\n\t\t\t\t\t)\n\t\t\tif (piece_col_pos + 1 < 8):\t# diagonal 'up, right' is not out of bounds\n\t\t\t\tif chess_board[piece_row_pos - 1, piece_col_pos + 1].islower():\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos - 1,\n\t\t\t\t\t\tpiece_col_pos + 1\n\t\t\t\t\t)\n\n\t# knight\n\tif piece_id.lower() == chess_pieces_inverse[\"knight\"].lower():\n\t\t# numpy array containing the eight possible moves of that knight\n\t\tpossible_moves_knight = np.zeros(shape = (8, 2), dtype = int)\n\n\t\t# generate an array containing the eight possible moves with the\n\t\t# knight from it's starting position\n\t\tindex = 0\n\t\tfor i in range(-2, 4, 4):\n\t\t\tfor j in range(-1, 2, 2):\n\t\t\t\tpossible_moves_knight[index] = (piece_row_pos + i, piece_col_pos + j)\n\t\t\t\tindex += 1\n\t\t\t\tpossible_moves_knight[index] = (piece_row_pos + j, piece_col_pos + i)\n\t\t\t\tindex += 1\n\n\t\t# go through each position. If it is inside the board and not occupied\n\t\t# by an own piece, push this position to the array 'possible_moves'\n\t\tfor i in range(8):\n\t\t\tcheck_position_row = possible_moves_knight[i][0]\n\t\t\tcheck_position_col = possible_moves_knight[i][1]\n\n\t\t\tif (check_position_row > -1\n\t\t\t\tand check_position_row < 8\n\t\t\t\tand check_position_col > -1\n\t\t\t\tand check_position_col < 8\n\t\t\t\t):\n\t\t\t\t# empty target tile\n\t\t\t\tif chess_board[check_position_row, check_position_col] == \"\":\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tcheck_position_row,\n\t\t\t\t\t\tcheck_position_col\n\t\t\t\t\t)\n\t\t\t\t# player is white, target piece is black\n\t\t\t\telif (piece_id.islower()\n\t\t\t\tand chess_board[check_position_row, check_position_col].isupper()\n\t\t\t\t):\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tcheck_position_row,\n\t\t\t\t\t\tcheck_position_col\n\t\t\t\t\t)\n\t\t\t\t# player is black, target piece is white\n\t\t\t\telif (piece_id.isupper()\n\t\t\t\tand chess_board[check_position_row, check_position_col].islower()\n\t\t\t\t):\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tcheck_position_row,\n\t\t\t\t\t\tcheck_position_col\n\t\t\t\t\t)\n\n\t# rook or queen (queen = rook + bishop)\n\tif (piece_id.lower() == chess_pieces_inverse[\"rook\"].lower()\n\t\tor piece_id.lower() == chess_pieces_inverse[\"queen\"].lower()\n\t):\n\t\t# obstacle_* == True -> piece in direction detected\n\t\tobstacle_north = False\n\t\tobstacle_south = False\n\t\tobstacle_east = False\n\t\tobstacle_west = False\n\n\t\t# search seven in each direction. If a piece is detected in any of\n\t\t# the four cardinal directions, the obstacle_* variable is set True\n\t\t# and any further search in that direction is not continued. Until\n\t\t# then (empty positions) are added to the array of possible moves.\n\t\tfor i in range(7):\n\t\t\t# check all four cardinal directions\n\t\t\t#\n\t\t\t# 'north'\n\t\t\tif piece_row_pos + i + 1 < 8 and obstacle_north == False:\n\t\t\t\t# save the content of the field scrutinised into a variable for brevity\n\t\t\t\tpiece_to_check_north = chess_board[piece_row_pos + i + 1, piece_col_pos]\n\t\t\t\t#\n\t\t\t\tif piece_to_check_north == \"\":\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos + i + 1,\n\t\t\t\t\t\tpiece_col_pos\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tobstacle_north = True\t# don't search any further in this direction\n\t\t\t\t\t# add the last piece detected if it can be captured\n\t\t\t\t\tif ((piece_to_check_north.isupper() and player_color == \"white\")\n\t\t\t\t\t\tor (piece_to_check_north.islower() and player_color == \"black\")\n\t\t\t\t\t):\n\t\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\t\tpiece_row_pos + i + 1,\n\t\t\t\t\t\t\tpiece_col_pos\n\t\t\t\t\t\t)\n\t\t\t#\n\t\t\t# 'south'\n\t\t\tif piece_row_pos - i - 1 > -1 and obstacle_south == False:\n\t\t\t\t# save the content of the field scrutinised into a variable for brevity\n\t\t\t\tpiece_to_check_south = chess_board[piece_row_pos - i - 1, piece_col_pos]\n\t\t\t\t#\n\t\t\t\tif piece_to_check_south == \"\":\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos - i - 1,\n\t\t\t\t\t\tpiece_col_pos\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tobstacle_south = True\t# don't search any further in this direction\n\t\t\t\t\t# add the last piece detected if it can be captured\n\t\t\t\t\tif ((piece_to_check_south.isupper() and player_color == \"white\")\n\t\t\t\t\t\tor (piece_to_check_south.islower() and player_color == \"black\")\n\t\t\t\t\t):\n\t\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\t\tpiece_row_pos - i - 1,\n\t\t\t\t\t\t\tpiece_col_pos\n\t\t\t\t\t\t)\n\t\t\t#\n\t\t\t# 'east'\n\t\t\tif piece_col_pos + i + 1 < 8 and obstacle_east == False:\n\t\t\t\t# save the content of the field scrutinised into a variable for brevity\n\t\t\t\tpiece_to_check_east = chess_board[piece_row_pos, piece_col_pos + i + 1]\n\t\t\t\t#\n\t\t\t\tif piece_to_check_east == \"\":\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos,\n\t\t\t\t\t\tpiece_col_pos + i + 1\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tobstacle_east = True\n\t\t\t\t\t# add the last piece detected if it can be captured\n\t\t\t\t\tif ((piece_to_check_east.isupper() and player_color == \"white\")\n\t\t\t\t\t\tor (piece_to_check_east.islower() and player_color == \"black\")\n\t\t\t\t\t):\n\t\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\t\tpiece_row_pos,\n\t\t\t\t\t\t\tpiece_col_pos + i + 1\n\t\t\t\t\t\t)\n\t\t\t#\n\t\t\t# 'west'\n\t\t\tif piece_col_pos - i - 1 > -1 and obstacle_west == False:\n\t\t\t\t# save the content of the field scrutinised into a variable for brevity\n\t\t\t\tpiece_to_check_west = chess_board[piece_row_pos, piece_col_pos - i - 1]\n\t\t\t\t#\n\t\t\t\tif piece_to_check_west == \"\":\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos,\n\t\t\t\t\t\tpiece_col_pos - i - 1\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tobstacle_west = True\n\t\t\t\t\t# add the last piece detected if it can be captured\n\t\t\t\t\tif ((piece_to_check_west.isupper() and player_color == \"white\")\n\t\t\t\t\t\tor (piece_to_check_west.islower() and player_color == \"black\")\n\t\t\t\t\t):\n\t\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\t\tpiece_row_pos,\n\t\t\t\t\t\t\tpiece_col_pos - i - 1\n\t\t\t\t\t\t)\n\n\t# bishop or queen\n\tif (piece_id.lower() == chess_pieces_inverse[\"bishop\"].lower()\n\t\tor piece_id.lower() == chess_pieces_inverse[\"queen\"].lower()\n\t):\n\t\t# obstacle_* == True -> piece in direction detected\n\t\tobstacle_NE = False\t# northeast\n\t\tobstacle_SE = False\t# southeast\n\t\tobstacle_SW = False # southwest\n\t\tobstacle_NW = False # northwest\n\n\t\t# search seven in each direction. If a piece is detected in any of\n\t\t# the four directions (NE, SE, SW, NW), the obstacle_* variable is\n\t\t# set True and any further search in that direction is not continued.\n\t\t# Until then (empty positions) are added to the array of possible moves.\n\t\tfor i in range(7):\n\t\t\t# check all four directions (NE, SE, SW, NW)\n\t\t\t#\n\t\t\t# 'northeast'\n\t\t\tif (piece_row_pos + i + 1 < 8\n\t\t\t\tand piece_col_pos + i + 1 < 8\n\t\t\t\tand obstacle_NE == False\n\t\t\t):\n\t\t\t\t# save the content of the field scrutinised into a variable for brevity\n\t\t\t\tpiece_to_check_north = chess_board[\n\t\t\t\t\tpiece_row_pos + i + 1,\n\t\t\t\t\tpiece_col_pos + i + 1\n\t\t\t\t]\n\t\t\t\t#\n\t\t\t\tif piece_to_check_north == \"\":\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos + i + 1,\n\t\t\t\t\t\tpiece_col_pos + i + 1\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tobstacle_NE = True\t# don't search any further in this direction\n\t\t\t\t\t# add the last piece detected if it can be captured\n\t\t\t\t\tif ((piece_to_check_north.isupper() and player_color == \"white\")\n\t\t\t\t\t\tor (piece_to_check_north.islower() and player_color == \"black\")\n\t\t\t\t\t):\n\t\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\t\tpiece_row_pos + i + 1,\n\t\t\t\t\t\t\tpiece_col_pos + i + 1\n\t\t\t\t\t\t)\n\t\t\t#\n\t\t\t# 'southeast'\n\t\t\tif (piece_row_pos - i - 1 > -1\n\t\t\t\tand piece_col_pos + i + 1 < 8\n\t\t\t\tand obstacle_SE == False\n\t\t\t):\n\t\t\t\t# save the content of the field scrutinised into a variable for brevity\n\t\t\t\tpiece_to_check_north = chess_board[\n\t\t\t\t\tpiece_row_pos - i - 1,\n\t\t\t\t\tpiece_col_pos + i + 1\n\t\t\t\t]\n\t\t\t\t#\n\t\t\t\tif piece_to_check_north == \"\":\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos - i - 1,\n\t\t\t\t\t\tpiece_col_pos + i + 1\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tobstacle_SE = True\t# don't search any further in this direction\n\t\t\t\t\t# add the last piece detected if it can be captured\n\t\t\t\t\tif ((piece_to_check_north.isupper() and player_color == \"white\")\n\t\t\t\t\t\tor (piece_to_check_north.islower() and player_color == \"black\")\n\t\t\t\t\t):\n\t\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\t\tpiece_row_pos - i - 1,\n\t\t\t\t\t\t\tpiece_col_pos + i + 1\n\t\t\t\t\t\t)\n\t\t\t#\n\t\t\t# 'southwest'\n\t\t\tif (piece_row_pos - i - 1 > -1\n\t\t\t\tand piece_col_pos - i - 1 > -1\n\t\t\t\tand obstacle_SW == False\n\t\t\t):\n\t\t\t\t# save the content of the field scrutinised into a variable for brevity\n\t\t\t\tpiece_to_check_north = chess_board[\n\t\t\t\t\tpiece_row_pos - i - 1,\n\t\t\t\t\tpiece_col_pos - i - 1\n\t\t\t\t]\n\t\t\t\t#\n\t\t\t\tif piece_to_check_north == \"\":\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos - i - 1,\n\t\t\t\t\t\tpiece_col_pos - i - 1\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tobstacle_SW = True\t# don't search any further in this direction\n\t\t\t\t\t# add the last piece detected if it can be captured\n\t\t\t\t\tif ((piece_to_check_north.isupper() and player_color == \"white\")\n\t\t\t\t\t\tor (piece_to_check_north.islower() and player_color == \"black\")\n\t\t\t\t\t):\n\t\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\t\tpiece_row_pos - i - 1,\n\t\t\t\t\t\t\tpiece_col_pos - i - 1\n\t\t\t\t\t\t)\n\t\t\t#\n\t\t\t# 'northwest'\n\t\t\tif (piece_row_pos + i + 1 < 8\n\t\t\t\tand piece_col_pos - i - 1 > -1\n\t\t\t\tand obstacle_NW == False\n\t\t\t):\n\t\t\t\t# save the content of the field scrutinised into a variable for brevity\n\t\t\t\tpiece_to_check_north = chess_board[\n\t\t\t\t\tpiece_row_pos + i + 1,\n\t\t\t\t\tpiece_col_pos - i - 1\n\t\t\t\t]\n\t\t\t\t#\n\t\t\t\tif piece_to_check_north == \"\":\n\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\tpiece_row_pos + i + 1,\n\t\t\t\t\t\tpiece_col_pos - i - 1\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tobstacle_NW = True\t# don't search any further in this direction\n\t\t\t\t\t# add the last piece detected if it can be captured\n\t\t\t\t\tif ((piece_to_check_north.isupper() and player_color == \"white\")\n\t\t\t\t\t\tor (piece_to_check_north.islower() and player_color == \"black\")\n\t\t\t\t\t):\n\t\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\t\tpiece_row_pos + i + 1,\n\t\t\t\t\t\t\tpiece_col_pos - i - 1\n\t\t\t\t\t\t)\n\n\t# king\n\tif piece_id.lower() == chess_pieces_inverse[\"king\"].lower():\n\t\t# iterate over the eight next neighbours\n\t\tfor i in [-1, 0, 1]:\n\t\t\tfor j in [-1, 0, 1]:\n\t\t\t\t\"\"\"\n\t\t\t\tonly add the position, if the target is in the boundary\n\t\t\t\tof the board and it is not the piece which is going to\n\t\t\t\tbe moved itself\n\t\t\t\t\"\"\"\n\t\t\t\tif (\n\t\t\t\t\t(j != 0 or i != 0)\n\t\t\t\t\tand\n\t\t\t\t\t(\n\t\t\t\t\t\t0 <= piece_row_pos + i < 8\n\t\t\t\t\t\tand 0 <= piece_col_pos + j < 8\n\t\t\t\t\t)\n\t\t\t\t):\n\t\t\t\t\t# what is on the board at the position we check at the moment?\n\t\t\t\t\tmove_to_check = chess_board[piece_row_pos + i, piece_col_pos + j]\n\n\t\t\t\t\t# enemy piece on this position or it is empty\n\t\t\t\t\t# (add this position to the list of possible moves)\n\t\t\t\t\tif ((piece_id.isupper() == move_to_check.islower())\n\t\t\t\t\t\tor move_to_check == \"\"\n\t\t\t\t\t):\n\t\t\t\t\t\tpossible_moves = add_to_poss_moves(\n\t\t\t\t\t\t\tpossible_moves,\n\t\t\t\t\t\t\tpiece_row_pos + i,\n\t\t\t\t\t\t\tpiece_col_pos + j\n\t\t\t\t\t\t)\n\n\t# sort the returning array (ascending by their integer contents)\n\tpossible_moves = np.array(sorted(possible_moves, key = int), dtype = str)\n\n\treturn possible_moves", "title": "" }, { "docid": "d07f279dd45992e92d6f17b217cc5e85", "score": "0.52582175", "text": "def move():\n\n current_channel = request.form.get(\"channel_id\")\n if ((current_channel in channels.keys()) and\n (channels.get(current_channel, \"\").get('accepted_invite') == True)):\n\n person_submitted = str(request.form.get('user_name'))\n in_channel = channels[current_channel]\n current = in_channel.get('current_player')\n\n if current == person_submitted:\n position = \" \"\n\n # if player submits a text stating a move\n input_position = request.form.get('text')\n if input_position:\n position = input_position\n\n # check if position is valid\n if position in in_channel.keys():\n current_entry = channels.get(current_channel).get(position)\n # when a square is taken\n if current_entry != \" \":\n return \"This square is already taken. Please choose another.\"\n\n # choosing an empty square\n else:\n current_letter = in_channel['players'][person_submitted]['letter']\n in_channel[position] = current_letter\n\n # checks if the move constitues a win\n if helper.winner(channels, current_channel):\n in_channel['winner'] = True\n\n return redirect(url_for('board', channel_id=current_channel))\n\n # switching between current player and other player\n if channels.get(current_channel).get('current_player') == in_channel['creator']:\n in_channel['current_player'] = in_channel['invited_user_name']\n\n else:\n in_channel['current_player'] = in_channel['creator']\n\n return redirect(url_for('board', channel_id=current_channel))\n\n else:\n # if it is a wrong move, valid moves are listed out\n\n return \"Please enter a valid move: 'top-left', 'top-middle', \" \\\n \"top-right', 'middle-left', 'middle', 'middle-right', \" \\\n \"'bottom-left', 'bottom-middle', 'bottom-right'.\"\n\n else:\n return \"Players make a move by entering /ttt-move [position].\"\n\n else:\n return \"You do not have permission to do this at this time.\"", "title": "" }, { "docid": "2565adda53b82064f375a9bcfb5e1ca0", "score": "0.5256299", "text": "def react_to_piece_move(self, piece: \"Piece\") -> bool:\n pass", "title": "" }, { "docid": "ba05e73a3437748a0ed9ec5320695701", "score": "0.5222349", "text": "def make_move(self, board, game, start_col, start_row, end_col, end_row):\r\n self.set_legal_moves(game)\r\n #print(self._legal_moves, end_col, end_row)\r\n if [end_col, end_row] in self._legal_moves:\r\n for i in game.get_all_pieces():\r\n if i.get_coords == [end_col, end_row]:\r\n if i.get_color == self._color:\r\n print(\"False, same color piece at location\") # same color piece is at that location\r\n return False\r\n if i.get_color != self._color: # if opposing team piece here, then capture it\r\n i.set_captured()\r\n self.set_moved(end_col, end_row) # move piece to location\r\n self.set_all_next_steps(game) # update next steps to see if general will be in check\r\n game.set_general_check(self._color)\r\n if game.is_in_check(self._color) is True:\r\n self.set_moved(start_col, start_row)\r\n self.get_legal_moves() # undo populated moves since move leaves same team general in check\r\n for i in game.get_all_pieces():\r\n if i.get_coords == [end_col, end_row]: # pushing this through so we'll have to comb through for syntax later\r\n if i.get_color != self._color:\r\n i.set_uncapture()\r\n #print(\"False\")\r\n return False\r\n board.update_board(start_col, start_row, end_col, end_row, self._board_label)\r\n if self.get_color == \"black\" and game.is_in_check(\"red\") is True:\r\n red_general = game.get_red_general(self, game)\r\n game.set_game_state(self, red_general)\r\n if self.get_color == \"red\" and game.is_in_check(\"black\") is True:\r\n black_general = game.get_black_general(self, game)\r\n game.set_game_state(self, black_general)\r\n game.set_game_state_stalemate()\r\n if game.get_player_turn() == \"red\":\r\n game._player_turn = \"black\"\r\n #print(\"True\")\r\n return True\r\n if game.get_player_turn() == \"black\":\r\n game._player_turn = \"red\"\r\n #print(\"True\")\r\n return True", "title": "" }, { "docid": "1b65f251e87d769bcd270e1e4fcba744", "score": "0.5221382", "text": "def step(self, action: int, player: int, train=False):\n if self.winner != 0:\n return self.connect4, 100 if player == self.winner else -10000, True\n if self.tie(train=train):\n return self.connect4, 0, True\n if not self.action_is_valid(action):\n print('LOOK')\n print(self)\n assert(self.action_is_valid(action))\n self.connect4.apply_action(action, player, train=train)\n\n finished, which_player = self.connect4.winner_exists(train=train)\n\n if finished:\n self.winner = which_player\n return self.connect4, 100 if player == self.winner else -10000, finished\n\n return self.connect4, -1, finished", "title": "" }, { "docid": "62c4310e75fced33c2463afa8b905e42", "score": "0.52207816", "text": "def result(board, action):\n # Deep copy of the original board to modify it instead\n result_board = deepcopy(board)\n\n # Getting current turn\n turn = player(board)\n\n row, column = action[0], action[1]\n\n if board[row][column] != EMPTY:\n raise Exception('Invalid action!')\n\n result_board[row][column] = turn\n\n return result_board", "title": "" }, { "docid": "ddede6f27f88dc43b4f1829618d17cfc", "score": "0.52111036", "text": "def hit_result(self, whose_turn: str) -> dict:\n\n hit_chance = int\n modifier = int\n attack = dict\n attack_successful = False\n damage_given = 0\n dice_result = self.dice(20, 1)\n\n if whose_turn == \"player\":\n attack = self.player.attacks[self.attack_id]\n if attack['type_attack'] == 'Melee':\n modifier = round((self.player.strength + self.player.dexterity) / 3) - 5\n elif attack['type_attack'] == 'Long Range':\n modifier = round((self.player.strength + self.player.chance) / 3) - 5\n elif attack['type_attack'] == 'Magic':\n modifier = round((self.player.intelligence + self.player.wisdom) / 3) - 5\n\n if modifier + dice_result < self.mob['stats']['armor_class']\\\n or dice_result == 1:\n attack_successful = False\n else:\n attack_dice = self.dice(attack['random_diapason'], attack['count_of_random'])\n attack_successful = True\n damage_given += attack_dice\n\n for uid, item in self.player.weapons.items():\n # print(item)\n damage_given += choice(range(item['damage_min'], item['damage_max']))\n\n else:\n attack = self.mob['attacks'][choice(list(self.mob['attacks'].keys()))]\n if attack['type_attack'] == 'Melee':\n modifier = round((self.mob['stats']['strength'] + self.mob['stats']['dexterity']) / 3) - 5\n elif attack['type_attack'] == 'Long Range':\n modifier = round((self.mob['stats']['strength'] + self.mob['stats']['chance']) / 3) - 5\n elif attack['type_attack'] == 'Magic':\n modifier = round((self.mob['stats']['intelligence'] + self.mob['stats']['wisdom']) / 3) - 5\n\n if modifier + dice_result < self.player.armor_class\\\n or dice_result == 1:\n attack_successful = False\n else:\n attack_dice = self.dice(attack['random_diapason'], attack['count_of_random'])\n attack_successful = True\n damage_given += int((attack_dice * 0.3) + (0.1 * self.player.hits))\n\n # TODO\n # for item in self.player.weapons:\n # damage_given += choice(range(item['damage_min'], item['damage_max']))\n\n return {\n \"attack\": attack,\n \"attack_successful\": str(attack_successful),\n \"damage_given\": damage_given,\n }", "title": "" }, { "docid": "d66341fe22931a418160b72e6e8c8b31", "score": "0.52108103", "text": "def result(self):\r\n if self.any_legal_moves():\r\n return None\r\n if self.under_check() == 0:\r\n return 2\r\n return 1 - (self.turn % 2)", "title": "" }, { "docid": "73361acf63237558b2e6dd5f091a0205", "score": "0.5205275", "text": "def play_move(self):\n\t\tif self.current_player == 'LogicAgent':\n\t\t\tif len(self.valid)>1:\n\t\t\t\tstrike, pos = self.check_strike() \n\t\t\t\tif strike is True:\n\t\t\t\t\tself.state[int(pos)] = self.turn \n\t\t\t\t\tself.valid.remove(pos)\n\t\t\t\t\tself.isWinner = self.check_winner(self.state)\n\t\t\t\telse:\t\n\t\t\t\t\tpos = random.choice(self.valid)\n\t\t\t\t\tself.state[int(pos)] = self.turn\n\t\t\t\t\tself.valid.remove(pos)\t\t\t\t\t\t\t\t\t\t\n\t\t\telif len(self.valid) == 1:\n\t\t\t\tpos = self.valid[0]\n\t\t\t\tself.state[int(pos)] = self.turn\n\t\t\t\tself.valid.remove(pos)\n\t\t\t\tself.isWinner = self.check_winner(self.state)\n\t\t\tstate = self.list_to_string(self.state) \t\t\t\t\n\t\telif self.current_player == 'QLAgent':\n\t\t\tstate = self.list_to_string(self.state) \n\t\t\taction = self.choose_action(state) \n\t\t\tself.state[int(action)] = self.turn \n\t\t\tself.valid.remove(action)\n\t\t\tself.isWinner = self.check_winner(self.state)\n\t\telif self.current_player == 'Human':\n\t\t\tpos = input(\"Enter the position where you want to place \" + self.turn) \n\t\t\tprint('You selected : ' + pos)\n\t\t\tself.state[int(pos)] = self.turn\n\t\t\tself.valid.remove(pos)\n\t\t\tself.isWinner = self.check_winner(self.state)\n\t\telif self.current_player == 'Random':\n\t\t\tpos = random.choice(self.valid)\n\t\t\tself.state[int(pos)] = self.turn\n\t\t\tself.valid.remove(pos)\t\t\t\n\t\t\tstate = self.list_to_string(self.state)\n\t\t\tself.isWinner = self.check_winner(self.state)", "title": "" }, { "docid": "f73e177cf2b3615cae6ce65db583923c", "score": "0.52018374", "text": "def player_movement(player_move,place_holder,p_steps,display,n,is_win):\r\n response = player_choice(place_holder)\r\n player_move.append(response)\r\n place_holder.remove(player_move[p_steps])\r\n if n == 1:\r\n display[response] = X\r\n else:\r\n display[response] = O\r\n current_board_positions(display)\r\n if p_steps >=3:\r\n response2 = is_winning_player\r\n if response2[0] == True:\r\n is_win[0] = True", "title": "" }, { "docid": "65cd9247f9c330151cbad7e34d8cecfd", "score": "0.5186677", "text": "def result(board, action):\n # Makes a copy of the board (to avoid changing the previous one)\n board_after_action = deepcopy(board)\n\n # Check if input is in tuple format\n try:\n i, j = action\n except:\n raise Exception(\"action input is not in the tuple format\")\n\n # Check if coordinates are int, if so, change the board with the action\n if isinstance(i, int) and isinstance(j, int):\n board_after_action[i][j] = player(board)\n else:\n raise Exception(\"move indexes are not integers\")\n\n return board_after_action", "title": "" }, { "docid": "aa16afb30fc79b302e6f280ff19418ac", "score": "0.5183245", "text": "def result(board, action):\n # Get current player\n current_player = player(board)\n result_board = deepcopy(board)\n target_cell = result_board[action[0]][action[1]]\n # Raise an error when trying to move on cells already occupied\n if target_cell != EMPTY:\n raise ValueError(\"Invalid move...\")\n # Take an action as the current player\n else:\n result_board[action[0]][action[1]] = current_player\n return result_board", "title": "" }, { "docid": "12f1993c55e2db37a161014ac64da8bd", "score": "0.51768166", "text": "def result(board, action):\n board_opt = deepcopy(board)\n i, j = action\n if board_opt[i][j] is not None:\n raise ValueError(\"This action is not possible\")\n board_opt[i][j] = player(board_opt)\n return board_opt", "title": "" }, { "docid": "4a8c340384e5edb01aa7b0ecd6cebd97", "score": "0.5175916", "text": "def process(self, player):\n return MOVE_TO_START", "title": "" }, { "docid": "0559d2b6009480175e165be1d0b51ff3", "score": "0.5170412", "text": "def process_return(self, command, data):\n if command == self.commands['pos']:\n if self.simulate:\n position = data\n else:\n position = self.calc_pos(data)\n rospy.logdebug('raw position data: %s', position)\n names = []\n positions = []\n for motor_id in self.available_motor_ids:\n raw_position = position[motor_id]\n rad_position = (\n (raw_position - int(\n self.joint_config[motor_id]['neutral']\n )) * int(\n self.joint_config[motor_id]['inversion']\n )\n * 2*math.pi/1023)\n names.append(self.joint_config[motor_id]['name'])\n positions.append(rad_position)\n new_msg = JointState()\n new_msg.name = names\n new_msg.position = positions\n new_msg.header.stamp = rospy.Time.now()\n self.joint_publisher.publish(new_msg)\n self.current_positions = position\n self.awaiting_pos_resp = False\n\n elif command == self.commands['current']:\n # current = self.calc_current(data)\n pass\n elif command == self.feedback['seq_num']:\n data = ord(data)\n self.seq_num = data\n rospy.loginfo('got seq num: %s', data)", "title": "" }, { "docid": "9699598554bd658de96c2f307994a1fa", "score": "0.51662195", "text": "def action(self, turns):\n self._board.check_shrink_board(turns)\n if self._board.get_is_place_phase():\n coords_list = self._board.get_empty_tiles(self._color)\n coord = self.minimax_decision(coords_list, turns)\n #coord = coords_list[random.randint(0, len(coords_list) - 1)]\n row, col = coord[0], coord[1]\n self._board.place_piece(self._color, (row, col))\n return_val = col, row\n\n else:\n coords_list = self._board.get_available_moves(self._color)\n coord = self.minimax_decision(coords_list, turns)\n\n #coord = coords_list[random.randint(0, len(coords_list) - 1)]\n source, dest = coord[0], coord[1]\n source_row, source_col, dest_row, dest_col = source[0], source[1], dest[0], dest[1]\n\n self._board.move_piece(self._color, source_row, source_col, dest_row, dest_col)\n\n return_val = (source_col, source_row), (dest_col, dest_row)\n\n self._board.check_update_phase(turns)\n\n return return_val", "title": "" }, { "docid": "1ca9f140d649d09a7e98deb89603ee0e", "score": "0.5164089", "text": "def parse_request(self, request: dict) -> None:\n if \"wait\" in request and request[\"wait\"]:\n self._wait = True\n else:\n self._wait = False\n\n self.available_moves = []\n self.available_switches = []\n self.can_mega_evolve = False\n self.can_z_move = False\n self.trapped = False\n\n if \"active\" in request:\n active = request[\"active\"][0]\n if \"trapped\" in active and active[\"trapped\"]:\n self.trapped = True\n for i, move in enumerate(active[\"moves\"]):\n if \"disabled\" not in move or not move[\"disabled\"]:\n self.available_moves.append((i + 1, move))\n if \"canMegaEvo\" in active and active[\"canMegaEvo\"]:\n self.can_mega_evolve = True\n if \"canZMove\" in active:\n self.can_z_move = active[\"canZMove\"]\n if \"maybeTrapped\" in active:\n active[\"maybeTrapped\"]\n\n side = request[\"side\"]\n if not self.trapped:\n for i, pokemon in enumerate(side[\"pokemon\"]):\n if not pokemon[\"active\"] and pokemon[\"condition\"] != \"0 fnt\":\n self.available_switches.append((i + 1, pokemon[\"ident\"]))\n\n for pokemon_info in side[\"pokemon\"]:\n pokemon = self._get_pokemon_from_reference(pokemon_info[\"ident\"])\n pokemon.update_from_request(pokemon_info)\n\n self._turn += 1", "title": "" }, { "docid": "8d659c0ceac02ecc04490c9d5f326c8d", "score": "0.5160668", "text": "def result(board, action):\n i,j=action\n if board[i][j] != EMPTY:\n raise Exception('Invalid move')\n newBoard = copy.deepcopy(board)\n currPlayer = player(board)\n newBoard[i][j] = currPlayer\n return newBoard", "title": "" }, { "docid": "5dd3f2acd6212105787c6b9f42c007fb", "score": "0.51519644", "text": "def loop_round(self):\n turn = 0\n self.alive = True\n while(self.alive):\n # Step 0: Wait for turn notification and process it\n reply = self._get_reply()\n self._process_notification(reply, turn)\n\n\n # logging.debug(\"===PlayerRequest===\")\n\n # Step 1: Construct a turn payload\n\n if(self.defense.update(self.ships, reply[\"hitReport\"], reply[\"pingReport\"])):\n self.last_special = \"M\"\n self.resources -= self.defense.job_assign(self.ships, self.my_map)\n\n if self.last_special == None:\n self.danger_grid.update(turn, self.my_map)\n\n for ship in self.ships:\n ship_danger = self.danger_grid.get_danger(ship)\n\n if ship.get_ship_type() == 'M':\n danger_threshold = 1\n elif ship.get_ship_type()== 'P':\n danger_threshold = 2\n else:\n danger_threshold = 3\n\n if ship_danger > danger_threshold:\n best_new_danger = ship_danger\n best = (ship.x, ship.y, ship.orient)\n for i in range(10):\n new_loc = self.my_map.find_best_location(ship.get_ship_length())\n new_danger = self.danger_grid.get_danger(ship.__class__(*new_loc))\n if new_danger < best_new_danger:\n best_new_danger = new_danger\n best = new_loc\n if new_danger == 0:\n break\n\n if best_new_danger < ship_danger:\n self.last_special = \"M\"\n # logging.debug(\"Ship moved: %c\",\n # ship.get_ship_type())\n ship.move(*new_loc)\n break\n\n # self.strat.job_assign(self.ships, 6)\n\n available_ships = [x for x in self.ships if x.has_work() == False]\n mainship = [x for x in self.ships if x.get_ship_type() == 'M'][0]\n destroyers = [x for x in available_ships if x.get_ship_type() == 'D']\n pilots = [x for x in available_ships if x.get_ship_type() == 'P']\n\n # always make sure there is enough resource to move main ship next round\n if (len(pilots) < 5):\n self.resources -= (5 - len(pilots)) * 50\n\n # Process a burst request\n if len(self.burst_queue) > 0:\n if len(destroyers) > 0:\n if self.last_special == None and self.resources >= 250:\n x,y = self.burst_queue.pop(0)\n destroyer = destroyers.pop(0)\n destroyer.burst_fire(x,y)\n self.last_special = \"B\"\n self.resources -= 250\n elif self.resources > 2500:\n if len(destroyers) > 0:\n if self.last_special == None and self.resources >= 250:\n x,y = self.enemypdf.next_hit()\n destroyer = destroyers.pop(0)\n destroyer.burst_fire(x,y)\n self.last_special = \"B\"\n self.resources -= 250\n\n # Process a scan request\n if self.last_special == None:\n if len(pilots) > 0 and self.resources >= 110:\n x,y = self.enemypdf.next_scan()\n pilot = pilots.pop(0)\n pilot.sonar(x,y)\n # logging.debug(\"Setting last_special\")\n # logging.debug(\"Next scan: (%s)\",(x,y))\n self.last_special = \"S\"\n self.last_scan = (x,y)\n self.resources -= 110\n\n # Process attack request\n if mainship.action == \"N\" and self.resources >= 50:\n destroyers.append(mainship)\n self.resources -= 50\n\n is_replay = np.random.random() < self.replay_rate\n if is_replay:\n # logging.debug(self.replay_stack)\n while self.replay_stack.size() > 0:\n attack_item = self.replay_stack.pop()\n x,y = attack_item.coord\n while len(destroyers) > 0 and attack_item.nAttacks > 0 and self.resources >= 50:\n d = destroyers.pop(0)\n d.fire(x,y)\n self.replay_check_list.append(AttackItem((x,y),1))\n self.resources -= 50\n attack_item.nAttacks -= 1\n\n # ran out of destroyers\n if len(destroyers) == 0 or self.resources < 50:\n # didn't get to finish the attack\n if attack_item.nAttacks > 0:\n self.replay_stack.push(attack_item)\n break\n else:\n while len(self.attack_queue) > 0:\n attack_item = self.attack_queue.pop(0)\n x,y = attack_item.coord\n while len(destroyers) > 0 and attack_item.nAttacks > 0 and self.resources >= 50:\n d = destroyers.pop(0)\n d.fire(x,y)\n self.resources -= 50\n attack_item.nAttacks -= 1\n\n # ran out of destroyers\n if len(destroyers) == 0 or self.resources < 50:\n # didn't get to finish the attack\n if attack_item.nAttacks > 0:\n self.attack_queue.insert(0,attack_item)\n break\n\n # if is_replay:\n # logging.debug(\"replay attack! %%%f\",self.replay_rate)\n # logging.debug(self.replay_check_list)\n\n\n # if we have leftover destroyers, attack most probable point\n numPossibleAttacks = min(len(destroyers),self.resources/50)\n # start_time = time.time()\n coords = self.enemypdf.next_hits(numPossibleAttacks)\n # end_time = time.time()\n # logging.debug(\"Elapsed time was %g seconds\" % (end_time - start_time))\n for i in range(numPossibleAttacks):\n d = destroyers.pop(0)\n x,y = coords[i]\n d.fire(x,y)\n self.resources -= 50\n\n\n # send payload\n payload = {'playerToken': self.token}\n shipactions = map(lambda x: x.getActionJSON(), self.ships)\n payload['shipActions'] = shipactions\n\n # Step 2: Transmit turn payload and wait for the reply\n logging.info(\"Sending turn %d...\",turn)\n self._send_payload(payload)\n\n # Step 3: Wait for turn response and process it\n reply = self._get_reply()\n self._process_reply(reply)\n\n turn += 1", "title": "" }, { "docid": "e661c3313a06044ba903c30910493c3b", "score": "0.51447356", "text": "def step(self, action):\n self.start_turn()\n status = Terminals.OK\n try:\n # Try to take the action.\n message = self.take_action(action)\n #assert not self.last_action_impossible\n if message is None:\n message = rcv_msg(self.socket)\n except zmq.error.Again:\n print(\"Error when sending action, process\", self.proc_id)\n message = \"\"\n status = Terminals.CONN_ERROR\n self.goal_reached = Goals.CONN_ERROR\n \n if \"paniclog\" in message or \"***dir***\" in message:\n raise Exception(\"Unexpected message received from NetHack: \" + message)\n \n if self.should_end_episode():\n verboseprint(\"Game went too long, terminating...\")\n status = Terminals.TIME_EXCEEDED\n self.goal_reached = Goals.TIME_EXCEEDED\n \n if status is Terminals.OK:\n status, self.goal_reached = self.get_status(message)\n \n if status is Terminals.OK:\n self.process_msg(message)\n self.state = self.get_state()\n \n # Get reward for the given status.\n reward = self.get_reward(status)\n \n self.end_turn()\n \n # Check if episode is over.\n episode_over = status is not Terminals.OK\n if episode_over:\n assert self.goal_reached is not None \n self.end_episode()\n #else:\n # assert self.action_took_effect()\n \n valid_action_indices = self.get_valid_action_indices() if not episode_over else np.array([])\n return self.state, reward, episode_over, {}, valid_action_indices", "title": "" }, { "docid": "5f9749e318d4c616fdea5f3380528ea8", "score": "0.5139851", "text": "def result(board, action):\n if action not in actions(board):\n raise ValueError('Not a valid action')\n else:\n tempBoard = copy.deepcopy(board)\n turn = player(tempBoard)\n tempBoard[action[0]][action[1]] = turn\n return tempBoard", "title": "" }, { "docid": "d852fc0a542c279197690e4c15a288c0", "score": "0.51321757", "text": "def step(self, action):\n\n # Get Action\n if(self.face_stone_move == True):\n self.agent_host.sendCommand('move 1')\n time.sleep(.2)\n\n if(self.face_diamondblock_move == True):\n self.agent_host.sendCommand('move 1')\n time.sleep(.2)\n self.agent_host.sendCommand('move 1')\n time.sleep(.2)\n\n # if(self.face_stone_move == False and self.face_diamondblock_move == False and self.face_brick_move == False and self.face_gold_move == False and self.face_diamond_move == False and self.face_coal_move == False):\n # print(\"im here\")\n # if(action != 0):\n # if(action != 3):\n # command = 'move 1'\n # self.agent_host.sendCommand(command)\n # command = self.action_dict[action]\n # self.agent_host.sendCommand(command)\n # time.sleep(.2)\n # self.episode_step += 1\n \n #self.obs, self.allow_break_action = self.get_observation(world_state)\n #print(\"true or false: \", self.select_move_action)\n\n if(self.face_brick_move == True):\n if(action != 0):\n if(action != 3):\n command = 'move 1'\n self.agent_host.sendCommand(command)\n command = self.action_dict[action]\n self.agent_host.sendCommand(command)\n time.sleep(.2)\n self.episode_step += 1\n \n if(self.face_gold_move == True):\n if(action != 3):\n command = 'move 1'\n self.agent_host.sendCommand(command)\n command = self.action_dict[action]\n self.agent_host.sendCommand(command)\n time.sleep(.2)\n self.episode_step += 1\n\n if(self.face_diamond_move == True):\n if(action != 0):\n if(action == 3):\n command = self.action_dict[action]\n self.agent_host.sendCommand(command)\n time.sleep(0.8)\n self.agent_host.sendCommand('move 1')\n else:\n command = self.action_dict[action]\n self.agent_host.sendCommand(command)\n time.sleep(.2)\n self.episode_step += 1\n\n if(self.face_coal_move == True):\n if(action != 0):\n if(action == 3):\n command = self.action_dict[action]\n self.agent_host.sendCommand(command)\n time.sleep(0.8)\n self.agent_host.sendCommand('move 1')\n else:\n command = self.action_dict[action]\n self.agent_host.sendCommand(command)\n time.sleep(.2)\n self.episode_step += 1\n\n # Get Observation\n world_state = self.agent_host.getWorldState()\n for error in world_state.errors:\n print(\"Error:\", error.text)\n #self.obs = self.get_observation(world_state)\n self.obs, self.face_brick_move, self.face_gold_move, self.face_diamond_move, self.face_stone_move, self.face_diamondblock_move, self.face_coal_move = self.get_observation(world_state)\n\n # Get Done\n done = not world_state.is_mission_running\n\n # Get Reward\n reward = 0\n for r in world_state.rewards:\n reward += r.getValue()\n self.episode_return += reward\n\n return self.obs, reward, done, dict()", "title": "" }, { "docid": "dcdad7399f46979e5bb4ca6cd353260d", "score": "0.51242596", "text": "def form_response(msg, move, current_player, player2, computer_player, board, cols):\n a = []\n\n if computer_player and current_player == player2:\n a = get_computer_move(player2, move)\n result = move.make_move(current_player, a[0], a[1], a[2], a[3])\n return [msg[0],\n \"(^u^) \" + result[1][0:6] + \"Computer\" + result[1][14:] + \" (^u^)\\n\",\n board.get_grid()]\n elif len(msg[1]) != 4 or not translate(msg[1], a, cols):\n return [5, \"(v_v) Please enter valid coordinates (v_v)\\n\", board.get_grid()]\n else:\n result = move.make_move(current_player, a[0], a[1], a[2], a[3])\n if result[0] == 0:\n return [5, \"(v_v) \" + result[1] + \" (v_v)\\n\", board.get_grid()]\n elif result[0] == 1:\n return [msg[0], \"(^u^) \" + result[1] + \" (^u^)\\n\", board.get_grid()]", "title": "" }, { "docid": "c79b04bf308b0d8c9f5f9b0a5c7068d1", "score": "0.5122255", "text": "def turn():\n dlog('Starting Turn!')\n board_size = get_board_size()\n\n team = get_team()\n opp_team = Team.WHITE if team == Team.BLACK else team.BLACK\n dlog('Team: ' + str(team))\n\n robottype = get_type()\n dlog('Type: ' + str(robottype))\n\n if robottype == RobotType.PAWN:\n row, col = get_location()\n dlog('My location is: ' + str(row) + ' ' + str(col))\n\n if team == Team.WHITE:\n forward = 1\n target = board_size - 1\n else:\n forward = -1\n target = 0\n\n sens = sense()\n\n # try catpuring pieces\n if check_space_wrapper(row + forward, col + 1, board_size) == opp_team: # up and right\n capture(row + forward, col + 1)\n dlog('Captured at: (' + str(row + forward) + ', ' + str(col + 1) + ')')\n return\n\n elif check_space_wrapper(row + forward, col - 1, board_size) == opp_team: # up and left\n capture(row + forward, col - 1)\n dlog('Captured at: (' + str(row + forward) + ', ' + str(col - 1) + ')')\n return\n\n # otherwise try to move forward\n if not check_space_wrapper(row + forward, col, board_size):\n # ^ not off the board ^ and ^ directly forward is empty\n if (row <= 12 - max(abs(col - 8), 7) and team == Team.WHITE) or \\\n (row >= board_size - (12 - max(abs(col - 8), 6)) and team == Team.BLACK):\n if not (\n check_space_wrapper(row + 2 * forward, col + 1, board_size) == opp_team\n # ^ Attack Right ^\n or\n check_space_wrapper(row + 2 * forward, col - 1, board_size) == opp_team):\n # if True:\n # ^ Attack Left ^\n move_forward()\n dlog('Moved forward!')\n return\n\n ally = 0\n foe = 0\n for x in sens:\n if x[2] == team:\n ally += 1\n elif x[2] == opp_team:\n foe += 1\n\n if ally > 12: # foe + 10:\n move_forward()\n dlog('Moved forward! by group')\n return\n\n if check_space_wrapper(row - forward, col, board_size) and \\\n check_space_wrapper(row - 2 * forward, col, board_size) and \\\n check_space_wrapper(row - 2 * forward, col + 1, board_size) and \\\n check_space_wrapper(row - 2 * forward, col + 1, board_size) and \\\n check_space_wrapper(row - 2 * forward, col - 1, board_size) and \\\n check_space_wrapper(row - 2 * forward, col - 1, board_size):\n move_forward()\n dlog('Moved forward! by backing')\n return\n\n if row + forward == target:\n move_forward()\n dlog('Moved forward! by closeness')\n\n else:\n if team == Team.WHITE:\n index = 0\n else:\n index = board_size - 1\n\n board = get_board()\n\n row_sum = [{True: 0, False: 0} for _ in range(board_size)]\n tboard = transpose(board)\n for i, row in enumerate(tboard):\n for j, col in enumerate(row):\n if col:\n row_sum[i][col == team] = row_sum[i][col == team] + 1\n\n dlog(\"Row Summary: \", row_sum)\n\n row_pc_sum = [0.0 for _ in range(board_size)]\n for i, row in enumerate(row_sum):\n row_pc_sum[i] = (row[True] - row[False]) / (row[False] + row[True] + 0.000000001)\n\n dlog(\"Row Percentile Summary: \", row_pc_sum)\n\n m = 1\n mr = -1\n for i in [8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0][::-1]:\n pc = row_pc_sum[i]\n if pc < m and not check_space(index, i):\n m = pc\n mr = i\n\n if mr != -1:\n spawn(index, mr)\n dlog('Spawned unit at: (' + str(index) + ', ' + str(mr) + ')')", "title": "" }, { "docid": "244bf79dd67c792bb65902aabe66d992", "score": "0.51163715", "text": "def get_moves(self, pos, remove_check=True):\n capture_moves = []\n non_capture_moves = []\n (row, col) = pos\n piece = self.get_piece(pos)\n color = self.get_color(pos)\n opponent = BLACK if color is WHITE else WHITE\n if piece is PAWN:\n direction = -1 if color is WHITE else 1\n # Diagonal capture moves\n if col != 0 and self.get_color((row + direction, col - 1)) is opponent:\n self.add_pawn_move(Move((row, col), (row + direction, col - 1), color | piece), capture_moves)\n\n if col != 7 and self.get_color((row + direction, col + 1)) is opponent:\n self.add_pawn_move(Move((row, col), (row + direction, col + 1), color | piece), capture_moves)\n\n # Forward, non capture moves\n if self.board[row + direction][col] is EMPTY:\n self.add_pawn_move(Move((row, col), (row + direction, col), color | piece), non_capture_moves)\n # The second space is only available if the first was empty\n if ChessState.rank(row, color) == 1 and self.board[row + 2 * direction][col] is EMPTY:\n self.add_pawn_move(Move((row, col), (row + 2 * direction, col), color | piece), non_capture_moves)\n\n if piece is KNIGHT:\n for i in range(4):\n dst_row = row + (2 if i % 2 == 0 else -2)\n dst_col = col + (1 if i // 2 == 0 else -1)\n if 0 <= dst_col < 8 and 0 <= dst_row < 8:\n dst_color = self.get_color((dst_row, dst_col))\n if dst_color is opponent:\n capture_moves.append(Move((row, col), (dst_row, dst_col), color | piece))\n elif dst_color is EMPTY:\n non_capture_moves.append(Move((row, col), (dst_row, dst_col), color | piece))\n\n dst_row = row + (1 if i % 2 == 0 else -1)\n dst_col = col + (2 if i // 2 == 0 else -2)\n if 0 <= dst_col < 8 and 0 <= dst_row < 8:\n dst_color = self.get_color((dst_row, dst_col))\n if dst_color is opponent:\n capture_moves.append(Move((row, col), (dst_row, dst_col), color | piece))\n elif dst_color is EMPTY:\n non_capture_moves.append(Move((row, col), (dst_row, dst_col), color | piece))\n\n if piece is BISHOP or piece is QUEEN:\n directions = [[(row + i, col + i) for i in range(1, min(8 - row, 8 - col))],\n [(row + i, col - i) for i in range(1, min(8 - row, col + 1))],\n [(row - i, col + i) for i in range(1, min(row + 1, 8 - col))],\n [(row - i, col - i) for i in range(1, min(row + 1, col + 1))]]\n\n # For each direction, take each move until a non empty square is encountered, then stop, since\n # bishops cannot jump over other pieces\n for dst_list in directions:\n for dst in dst_list:\n dst_color = self.get_color(dst)\n if dst_color is EMPTY:\n non_capture_moves.append(Move((row, col), dst, color | piece))\n elif dst_color is opponent:\n capture_moves.append(Move((row, col), dst, color | piece))\n break\n else:\n break\n\n if piece is ROOK or piece is QUEEN:\n # List if each possible move in each of the four directions (right, down, left, up)\n directions = [[(row, i) for i in range(col + 1, 8)],\n [(i, col) for i in range(row + 1, 8)],\n [(row, i) for i in range(col - 1, -1, -1)],\n [(i, col) for i in range(row - 1, -1, -1)]]\n\n # For each direction, take each move until a non empty square is encountered, then stop, since\n # rooks cannot jump over other pieces\n for dst_list in directions:\n for dst in dst_list:\n dst_color = self.get_color(dst)\n if dst_color is EMPTY:\n non_capture_moves.append(Move((row, col), dst, color | piece))\n elif dst_color is opponent:\n capture_moves.append(Move((row, col), dst, color | piece))\n break\n else:\n break\n\n if piece is KING:\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (i != 0 or j != 0) and 0 <= row+i < 8 and 0 <= col+j < 8:\n dst_color = self.get_color((row+i, col+j))\n if dst_color is EMPTY:\n non_capture_moves.append(Move((row, col), (row+i, col+j), color | piece))\n elif dst_color is opponent:\n capture_moves.append(Move((row, col), (row+i, col+j), color | piece))\n\n # Castling options\n if not self.king_moved[color] and not self.rook_moved[color][0]:\n if self.board[row][1] is EMPTY and self.board[row][2] is EMPTY and self.board[row][3] is EMPTY:\n # Cannot move from, or through check in castling move\n if not remove_check: # If we don't care about moving into check, add move\n non_capture_moves.append(Move((row, col), (row, 2), color | piece))\n # Otherwise check if move will create check, if it doesn't add move\n elif not self.is_check() and \\\n not self.move(Move((row, col), (row, 3), color | piece)).is_check(color):\n non_capture_moves.append(Move((row, col), (row, 2), color | piece))\n\n if not self.king_moved[color] and not self.rook_moved[color][1]:\n if self.board[row][5] is EMPTY and self.board[row][6] is EMPTY:\n # Cannot move from, or through check in castling move\n if not remove_check: # If we don't care about moving into check, add move\n non_capture_moves.append(Move((row, col), (row, 2), color | piece))\n # Otherwise check if move will create check, if it doesn't add move\n elif not self.is_check() and \\\n not self.move(Move((row, col), (row, 5), color | piece)).is_check(color):\n non_capture_moves.append(Move((row, col), (row, 6), color | piece))\n\n if remove_check:\n # Remove any moves that put the player in check\n for i in range(len(capture_moves)-1, -1, -1):\n if self.move(capture_moves[i]).is_check(color):\n del capture_moves[i]\n\n for i in range(len(non_capture_moves)-1, -1, -1):\n if self.move(non_capture_moves[i]).is_check(color):\n del non_capture_moves[i]\n\n return capture_moves, non_capture_moves", "title": "" }, { "docid": "88b04c39726b7d6bce2191d948bb5c58", "score": "0.51140124", "text": "def set_attack_result(self, attack_pos, hit, sunk):\n self.play.result(attack_pos, hit, sunk)\n if hit:\n self.tracking_board.set(attack_pos, Board.HIT)\n else:\n self.tracking_board.set(attack_pos, Board.MISS)\n if sunk is not None:\n del self.opponent_pieces[sunk]", "title": "" }, { "docid": "59d111fec00c87bd190fe852c36c1353", "score": "0.5113148", "text": "def is_defense_req(player_move,place_holder,p_steps):\r\n response2 = [None,[None]]\r\n response2 = is_winning(player_move,place_holder)\r\n if response2[0] == True:\r\n return response2\r\n else:\r\n return [False,[None]]", "title": "" }, { "docid": "3df9fd83fc5490533f22a02df185b1f6", "score": "0.51126015", "text": "def human_move(self):\n while True:\n self.state.draw_board(self.scr)\n self.state.draw_possible_moves(self.scr, self.selection, None)\n self.state.draw_captured_pieces(self.scr)\n\n key = self.scr.getch()\n if key == 127:\n return None\n if key == ord('q'):\n self.display_quit_prompt()\n if key == curses.KEY_LEFT:\n self.selection = get_adjacent_square(self.selection, (0, -1))\n if key == curses.KEY_RIGHT:\n self.selection = get_adjacent_square(self.selection, (0, 1))\n if key == curses.KEY_UP:\n self.selection = get_adjacent_square(self.selection, (-1, 0))\n if key == curses.KEY_DOWN:\n self.selection = get_adjacent_square(self.selection, (1, 0))\n if key in [10, 13]:\n capture_moves, non_capture_moves = self.state.get_moves(self.selection)\n if len(capture_moves) + len(non_capture_moves) > 0 and \\\n self.state.get_color(self.selection) is self.state.to_move:\n move = self.get_move_destination()\n if move is not None:\n return move\n else:\n curses.beep()", "title": "" }, { "docid": "c71cc3ab22337c452b50aace1246505f", "score": "0.51075107", "text": "def get_possible_moves(self) -> list:\n if self.current_player == \"p1\" :\n if self.current_value[0] == 0 and self.current_value[1] == 0:\n self.possible_moves = []\n elif self.current_value[0] == 0 and self.current_value[2] == 0:\n self.possible_moves = [\"rr\"]\n elif self.current_value[0] == 0 and self.current_value[3] == 0:\n self.possible_moves = [\"rl\"]\n elif self.current_value[1] == 0 and self.current_value[2] == 0:\n self.possible_moves = [\"lr\"]\n elif self.current_value[1] == 0 and self.current_value[3] == 0:\n self.possible_moves = [\"ll\"]\n elif self.current_value[2] == 0 and self.current_value[3] == 0:\n self.possible_moves = []\n elif self.current_value[0] == 0:\n self.possible_moves = [\"rl\", \"rr\"]\n elif self.current_value[1] == 0:\n self.possible_moves = [\"ll\", \"lr\"]\n elif self.current_value[2] == 0:\n self.possible_moves = [\"lr\", \"rr\"]\n elif self.current_value[3] == 0:\n self.possible_moves = [\"ll\", \"rl\"]\n else:\n self.possible_moves = [\"rl\", \"rr\", \"lr\", \"ll\"]\n\n elif self.current_player == \"p2\":\n if self.current_value[0] == 0 and self.current_value[1] == 0:\n self.possible_moves = []\n elif self.current_value[0] == 0 and self.current_value[2] == 0:\n self.possible_moves = [\"rr\"]\n elif self.current_value[0] == 0 and self.current_value[3] == 0:\n self.possible_moves = [\"lr\"]\n elif self.current_value[1] == 0 and self.current_value[2] == 0:\n self.possible_moves = [\"rl\"]\n elif self.current_value[1] == 0 and self.current_value[3] == 0:\n self.possible_moves = [\"ll\"]\n elif self.current_value[2] == 0 and self.current_value[3] == 0:\n self.possible_moves = []\n elif self.current_value[0] == 0:\n self.possible_moves = [\"lr\", \"rr\"]\n elif self.current_value[1] == 0:\n self.possible_moves = [\"ll\", \"rl\"]\n elif self.current_value[2] == 0:\n self.possible_moves = [\"rr\", \"rl\"]\n elif self.current_value[3] == 0:\n self.possible_moves = [\"ll\", \"lr\"]\n else:\n self.possible_moves = [\"rl\", \"rr\", \"lr\", \"ll\"]\n\n return self.possible_moves", "title": "" }, { "docid": "60b83c002b34e201a7b733057649ad3a", "score": "0.51072913", "text": "def receive_round_result_message(self, winners: List[Dict[str, Union[int, str]]],\n hand_info: [List[Dict[str, Union[str, Dict]]]],\n round_state: Dict[str, Union[int, str, List, Dict]]) -> None:\n done = 1\n won = 0\n game_stack = self.initial_stack - self.my_bet\n self.cashgame_stack -= self.my_bet\n\n if self.folded:\n self.reward = 2 * ((1.001 + game_stack) * (1.001 - self.win_rate)**2)\n else:\n self.reward = 2 * (-(1.001 - game_stack) * (1.001 - self.win_rate)**4) # higher the negative reward if not won\n for winner in winners:\n if winner['uuid'] == self.uuid:\n won = 1\n self.reward = winner['stack'] - self.my_bet\n self.cashgame_stack = winner['cashgame_stack'] + self.reward\n self.times_won += 1\n print(\"WON\")\n break\n\n # Update counters\n if won:\n self.won_in_row_counter += 1\n self.folded_in_row_counter = 0\n elif not self.folded:\n self.folded_in_row_counter = 0\n self.won_in_row_counter = 0\n else:\n self.folded_in_row_counter += 1\n\n next_state = self._get_state(round_state)\n self._remember(self.last_state, self.last_action, self.reward, next_state, done)\n\n replay_after_50_games = True if round_state['round_count'] % 50 == 0 else False\n\n if len(self.memory) > self.batch_size and replay_after_50_games:\n self._replay(self.batch_size)\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n if self.episode_printout:\n print(\"Episode: {}, reward: {:6.2f}, folded: {}, cashgame stack: {:6.2f}, won: {}\".format(round_state['round_count'], self.reward, self.folded, self.cashgame_stack, won))\n\n if self.rounds_to_play == self.round_count:\n self.model.save(\"./model/deepq.h5\")\n print('\\nGame Statistics:')\n print('Times folded: {}'.format(self.times_folded))\n print('Times called: {}'.format(self.times_called))\n print('Times raised: {}'.format(self.times_raised))\n print('Percentage of games won: {}%'.format(self.times_won * 100 / self.rounds_to_play))\n print('Model weights have been saved to /model/deepq.h5 \\n')", "title": "" }, { "docid": "36aba9a82a05637b50f8af3d9b490afd", "score": "0.51059365", "text": "def get_result(self, player_just_moved):\n if self.immediate:\n diff_black_white = self.accumulated_reward[CONST.BLACK() - 1] - self.accumulated_reward[CONST.WHITE() - 1]\n if (diff_black_white > 0 and player_just_moved == CONST.BLACK()) or (diff_black_white < 0 and player_just_moved == CONST.WHITE()):\n return 1\n elif diff_black_white == 0:\n return 0\n else:\n return -1\n else: \n official_score = self.py_pachi_board.official_score\n if ((official_score > 0 and player_just_moved == CONST.WHITE()) or (official_score < 0 and player_just_moved == CONST.BLACK())):\n return 1\n elif official_score == 0:\n return 0\n else:\n return -1", "title": "" }, { "docid": "8f17b0ca89ea0146d97425264b65c659", "score": "0.5102653", "text": "def execute_move(self, move, color):\n assert(len(move) == 3) # Move is list of 3 tuples:\n current_location = tuple(move[0])\n move_location = tuple(move[1])\n build_location = tuple(move[2])\n piece = self.pieces[0][current_location]\n \n try:\n self.pieces[0][current_location] = 0 # Remove piece from current square\n except IndexError as e:\n \n #self.pieces[0][current_location] = color\n #self.pieces[0][move_location] = 0\n print(e)\n print(self.pieces)\n print(current_location)\n print(move_location)\n print(build_location)\n print(\"IGNORING MOVE:\")\n\n try:\n #self.pieces[0][current_location] = 0 # Remove piece from current square\n self.pieces[0][move_location] = piece # Add piece to new square\n #self.pieces[1][build_location] += 1 # Build one block on build location\n except IndexError as e:\n \n self.pieces[0][current_location] = piece\n #self.pieces[0][move_location] = 0\n print(e)\n print(self.pieces)\n print(current_location)\n print(move_location)\n print(build_location)\n print(\"IGNORING MOVE:\")\n \n try:\n self.pieces[1][build_location] += 1 # Build one block on build location\n except IndexError as e:\n \n self.pieces[0][current_location] = piece\n self.pieces[0][move_location] = 0\n print(e)\n print(self.pieces)\n print(current_location)\n print(move_location)\n print(build_location)\n print(\"IGNORING MOVE:\")", "title": "" }, { "docid": "0a7c90372cd4607122b10bedc238d8e4", "score": "0.5098774", "text": "def result(board, action):\n result_board = copy.deepcopy(board)\n\n if not (result_board[action[0]])[action[1]] is None:\n raise NameError(\"Invalid Action\")\n else:\n (result_board[action[0]])[action[1]] = player(board)\n return result_board", "title": "" }, { "docid": "61ee5f0cc06726b0b76d48b29ec77904", "score": "0.5094221", "text": "def make_move(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n #check if the game is over, update the score list if needed\n if evaluate(game.state):\n outcome = evaluate(game.state)\n if outcome == \"O\":\n msg = \"Win\"\n else:\n msg = \"Lose\"\n game.end_game(msg)\n game.game_over = True\n game.put()\n else:\n if \"-\" not in game.state:\n game.end_game(\"Draw\")\n game.game_over = True\n if game.game_over:\n return game.to_form('Game already over!')\n #check if it is player's turn\n if game.player:\n if request.move not in range(0,9):\n raise endpoints.BadRequestException('Invalid Move')\n if game.state[request.move] != \"-\":\n raise endpoints.BadRequestException('Invalid Move')\n #update the board state\n board = list(game.state)\n board[request.move] = \"O\" \n game.state = ''.join(board)\n #update movecount\n game.movecount += 1\n #evaluate the result\n if evaluate(game.state):\n game.end_game(\"Win\")\n return game.to_form('You win!')\n if \"-\" not in game.state:\n game.end_game(\"Draw\")\n return game.to_form('This is a Draw.')\n msg = \"AI's turn\"\n game.history()\n game.player = False\n game.put()\n return game.to_form(msg)\n else:\n msg = \"This is not your turn\"\n return game.to_form(msg)", "title": "" }, { "docid": "9c57779c81bad462b8fc217455f39816", "score": "0.50791574", "text": "def capture(self, board, dest):\r\n cap_p_not = board[dest]\r\n captured_piece = board.find_piece(dest, cap_p_not)\r\n captured_piece.get_captured(board, cap_p_not)\r\n self.move(board, dest)", "title": "" }, { "docid": "21a7e6810c6bfb88e769a5797cf3f6bb", "score": "0.5073425", "text": "def step(self, action):\n assert type(action) == int and action >= 0 and action < 9\n # done = already finished the game\n if self.done:\n return self.grid, self.STATUS_DONE, self.done\n # action already have something on it\n if self.grid[action] != 0:\n return self.grid, self.STATUS_INVALID_MOVE, self.done\n # play move\n self.grid[action] = self.turn\n if self.turn == 1:\n self.turn = 2\n else:\n self.turn = 1\n # check win\n if self.check_win():\n self.done = True\n return self.grid, self.STATUS_WIN, self.done\n # check tie\n if all([p != 0 for p in self.grid]):\n self.done = True\n return self.grid, self.STATUS_TIE, self.done\n return self.grid, self.STATUS_VALID_MOVE, self.done", "title": "" }, { "docid": "4dc4ba5108dd299a04bd044bc2151830", "score": "0.50723314", "text": "def make_move(self, board, move):\n\n board_copy = board.copy()\n #Pass (-1, -1) is a valid move\n if GoUtils._is_move_pass(move):\n board_copy.add_move_to_history(-1, -1) #Add a pass move to history\n board_copy.flip_player() #The other player's turn\n return True, board_copy\n\n #Not valid if placed outside of a board\n if not GoUtils._is_move_in_board(move, board_copy.board_dimension):\n return False, board\n\n (r, c) = move\n\n #Invalid move if placed on top of another existing stone\n if board_copy.board_grid[r][c] != 0:\n return False, board\n\n #Invalid move because of Ko restrictions, this condition is checked before the liberty constraint\n if GoUtils._is_invalid_move_because_of_ko(board_copy, move):\n #print(\"Invalid because of Ko\")\n return False, board\n\n #Invalid move if placed in a spot that forms a group of stones with no liberty\n board_copy.board_grid[r][c] = board_copy.player\n\n #Remove the stones captured because of this move\n #Remove the groups of the stones that belong to the opponent directly next to current move\n #top\n if r > 0 and board_copy.board_grid[r - 1][c] == -board_copy.player:\n board_copy.board_grid = GoUtils._remove_pieces_if_no_liberty((r - 1, c), board_copy.board_grid)\n #bottom\n if r < board_copy.board_dimension - 1 and board_copy.board_grid[r + 1][c] == -board_copy.player:\n board_copy.board_grid = GoUtils._remove_pieces_if_no_liberty((r + 1, c), board_copy.board_grid)\n #left\n if c > 0 and board_copy.board_grid[r][c - 1] == -board_copy.player:\n board_copy.board_grid = GoUtils._remove_pieces_if_no_liberty((r, c - 1), board_copy.board_grid)\n #right\n if c < board_copy.board_dimension - 1 and board_copy.board_grid[r][c + 1] == -board_copy.player:\n board_copy.board_grid = GoUtils._remove_pieces_if_no_liberty((r, c + 1), board_copy.board_grid)\n\n #Invalid move if current move would cause the current connected group to have 0 liberty\n\n if GoUtils._count_liberty(board_copy.board_grid, move) == 0:\n return False, board\n \n #After a move is successfully made, update the board to reflect that and return\n board_copy.add_move_to_history(r, c)\n board_copy.flip_player()\n\n return True, board_copy", "title": "" }, { "docid": "a9f56f063ec974f1cfac195ad7eb6ab7", "score": "0.5069256", "text": "def make_move(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if not game:\n raise endpoints.NotFoundException('Game not found')\n if game.game_over:\n raise endpoints.NotFoundException('Game already over!')\n\n# Prevent a user from playing twice back to back\n user = User.get_current_user(request.user_name)\n if user.key != game.nextMove:\n raise endpoints.BadRequestException('It\\'s not your turn!')\n x = True if user.key == game.player_x else False\n\n# Make a move\n move = request.move\n size = game.boardSize * game.boardSize - 1\n if move < 0 or move > size:\n raise endpoints.BadRequestException('Bad Move!')\n if game.board[move] != '':\n raise endpoints.BadRequestException('Invalid Move')\n\n# Make a move, and send the move to game history\n game.board[move] = 'X' if x else 'O'\n game.game_history.append(('X' if x else 'O', move))\n game.nextMove = game.player_o if x else game.player_x\n\n# Check if there is a winner in the game and end the game\n winner = check_winner(game.board, game.boardSize)\n if winner:\n game.end_game(user.key)\n else:\n # game ends in a draw\n if check_full(game.board):\n game.end_game()\n else:\n # Send email reminder to player if game still in progress\n taskqueue.add(url='/tasks/send_move_email',\n params={'user_key': game.nextMove.urlsafe(),\n 'game_key': game.key.urlsafe()})\n\n game.put()\n\n# Update the Memcache if the game is over\n if game.game_over:\n taskqueue.add(url='/task/cache_games_finished')\n return game.to_form()", "title": "" }, { "docid": "cbc275f0c508003e4e409996c9ea9428", "score": "0.50665", "text": "def make_move(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n # Make sure the game exists and is in progress\n if not game:\n raise endpoints.NotFoundException('No game found!')\n elif game.status != 'In Progress':\n raise endpoints.BadRequestException(\n 'Not an active game, moves no longer allowed')\n else:\n # Retrieve the board and played cards\n board = game.board\n displayBoard = game.boardState\n card1 = getattr(request, 'card1')\n card2 = getattr(request, 'card2')\n if card1 == card2:\n # The user is guessing the same card twice\n raise endpoints.BadRequestException(\n \"You can't pick the same card twice!\")\n else:\n # Evaluate the result of the move and update game information\n message, resultBoard = gm.compareCards(\n card1, card2, board, displayBoard)\n game.guesses += 1\n game.boardState = resultBoard\n # Check to see if the game has now been won\n if gm.isGameWon(game.boardState):\n message += ' Congratulations - You win! All cards matched!'\n game.status = 'Won'\n game.win_game()\n\n # Append the current move to the game history\n game.history.append(\n 'guess: {0} result: {1}'.format([card1, card2], message))\n game.put()\n return game.to_form(message=message)", "title": "" }, { "docid": "69105b2c46190254b9a70cbc272d773d", "score": "0.5064289", "text": "def action(self, outcome):\n\n # action forward\n if self._action == 0:\n self.anticipation_0 = outcome\n self.valeur_hedoniste_anticipee_pour_action_0 = self.hedonist_table[0][self.anticipation_0]\n if outcome == self.anticipated_outcome and self.valeur_hedoniste_anticipee_pour_action_0 >= self.valeur_hedoniste_anticipee_pour_action_1:\n self.new_action = self._action\n if self.ennui >= 3 or self.valeur_hedoniste_anticipee_pour_action_0 <= self.valeur_hedoniste_anticipee_pour_action_1:\n # compare with previous turn\n if self._action == 0:\n self.new_action = random.randint(1, 2)\n if self._action == 1 or self._action == 2:\n self.new_action = 0\n\n # action turn left\n elif self._action == 1:\n action = 1\n self.anticipation_1 = outcome\n self.valeur_hedoniste_anticipee_pour_action_1 = self.hedonist_table[1][self.anticipation_1]\n if outcome == self.anticipated_outcome and self.valeur_hedoniste_anticipee_pour_action_1 >= self.valeur_hedoniste_anticipee_pour_action_0:\n self.new_action = self._action\n if self.ennui >= 3 or self.valeur_hedoniste_anticipee_pour_action_1 <= self.valeur_hedoniste_anticipee_pour_action_0:\n # compare with previous turn\n if self._action == 0:\n self.new_action = random.randint(1, 2)\n if self._action == 1 or self._action == 2:\n self.new_action = 0\n\n # action turn right\n elif self._action == 2:\n self.anticipation_2 = outcome\n self.valeur_hedoniste_anticipee_pour_action_2 = self.hedonist_table[2][self.anticipation_2]\n if outcome == self.anticipated_outcome and self.valeur_hedoniste_anticipee_pour_action_2 >= self.valeur_hedoniste_anticipee_pour_action_0:\n self.new_action = self._action\n if self.ennui >= 3 or self.valeur_hedoniste_anticipee_pour_action_2 <= self.valeur_hedoniste_anticipee_pour_action_0:\n # compare with previous turn\n if self._action == 0:\n self.new_action = random.randint(1, 2)\n if self._action == 1 or self._action == 2:\n self.new_action = 0\n\n if self._action == self.new_action:\n self.ennui = self.ennui + 1\n else:\n self.ennui = 0\n if self.ennui >= 4:\n self.ennui = 0\n\n self._action = self.new_action\n\n return self._action", "title": "" }, { "docid": "eab0e12416770c64ed932ac064aa8f08", "score": "0.5061729", "text": "def result(self, state, action):\n rslt = deepcopy(state)\n grid = rslt.grid\n step = DIRECTIONS[action]\n\n aim_cords = vector_add(rslt.player, step)\n grid[rslt.player] -= 7 # returns cell to its default value without player\n\n if grid[aim_cords] in BOX: # from action no blocking box is allowed (a move that won't change anything)\n box_cords = aim_cords\n\n if box_cords in rslt.targets: # if we moved a box from target pos\n rslt.box_left += 1\n rslt.target_left += 1\n\n grid[box_cords] -= 5 # returns cell to its value without box\n rslt.box.remove(box_cords)\n\n seq_cell = vector_add(box_cords, step) # the sequential cell in same direction\n\n while grid[seq_cell] == ICE[0]:\n box_cords = seq_cell\n seq_cell = vector_add(box_cords, step)\n\n if grid[seq_cell] in [CELL, TARGET[0]]:\n box_cords = seq_cell\n\n if box_cords in rslt.targets:\n rslt.box_left -= 1\n rslt.target_left -= 1\n\n grid[box_cords] += 5\n rslt.box.append(box_cords)\n else:\n seq_cell = aim_cords\n while grid[seq_cell] == ICE[0]:\n aim_cords = seq_cell\n seq_cell = vector_add(aim_cords, step)\n if grid[seq_cell] in [CELL, TARGET[0]]: # if player can go out of ice\n aim_cords = seq_cell\n # todo: Understand if box can init on ice(without blocking), and thus jam (or move with player?)\n rslt.player = aim_cords\n # update grid with player pos\n grid[rslt.player] += 7\n\n return rslt", "title": "" }, { "docid": "f3ed7356d104ae4e0cead0ab4b804583", "score": "0.50597495", "text": "def turn_finish (your_turn) :\n your_turn = False\n return your_turn", "title": "" }, { "docid": "13a3675408ff22c870cd43c19bcbf405", "score": "0.5058175", "text": "def step(self, action):\n # Get Action \n self.agent_host.sendCommand('pitch {}'.format(action[0]))\n self.agent_host.sendCommand('turn {}'.format(action[1]))\n if action[2] > 0:\n self.boost()\n if self.allow_move_action:\n self.agent_host.sendCommand('pitch {}'.format(action[0]))\n self.agent_host.sendCommand('turn {}'.format(action[1]))\n time.sleep(.2)\n self.episode_step +=1\n \n\n # Get Observation\n world_state = self.agent_host.getWorldState()\n for error in world_state.errors:\n print(\"Error:\", error.text)\n self.obs, self.allow_move_action, yReward, zReward = self.get_observation(world_state)\n # Get Done\n done = not world_state.is_mission_running \n\n # Get Reward\n reward = 0\n for r in world_state.rewards:\n reward += r.getValue()\n if (yReward < 7):\n reward -= 10\n if (zReward > self.prevZ):\n reward += 7.5\n self.prevZ = zReward\n else:\n reward -= 10\n self.prevZ = zReward\n self.episode_return += reward\n print(\"REWARD \"+ str(reward))\n return self.obs, reward, done, dict()", "title": "" }, { "docid": "da22a8bfaeb565bc8bc8b1208900bfb3", "score": "0.5056061", "text": "def _process_round(self):\n self.movecounter = self.players_left #reset number of non-volatile moves to be performed.\n #reset bid numbers for new round\n for p in self.players:\n p.bid = 0\n self.bid = 0\n self.lastraise = 0\n if self.players_left <= 1:\n #game should end before more turns are taken\n self._resolve_game_abrupt()\n return\n if self.players[0].card1.rank is None:\n #hands haven't been dealt yet\n self.deal()\n else:\n #draw cards or end game according to game progress\n if self.card5.rank is None:\n #burn one as per standard poker rules (pointless, i know)\n self.deck.draw_card()\n if self.card1.rank is None:\n self.card1 = self.deck.draw_card()\n self.card2 = self.deck.draw_card()\n self.card3 = self.deck.draw_card()\n elif self.card4.rank is None:\n self.card4 = self.deck.draw_card()\n elif self.card5.rank is None:\n self.card5 = self.deck.draw_card()\n else:\n self._resolve_game()\n return\n #reset next non-folded player after dealer to first\n while self.players[0] != self.dealer:\n temp = self.players.pop(0)\n self.players.append(temp)\n self._next_player()", "title": "" }, { "docid": "a20b516fbd9e26194654de011a4fa8fb", "score": "0.505541", "text": "def opponent_move(self, move):\n # validate input\n if len(move) > 1:\n source_row = move[1][0]\n source_col = move[1][1]\n if source_row != None and self.board[source_row][source_col] != self.opp:\n raise Exception(\"You don't have a piece there!\")\n if self.board[move[0][0]][move[0][1]] != ' ':\n raise Exception(\"Illegal move detected\")\n # make move\n self.place_piece(move, self.opp)", "title": "" }, { "docid": "9551cea780deff21f2de4a5a825b7d3c", "score": "0.50533557", "text": "def result(self, state, action):\n\n b = board_perform_move(state.board, action)\n s = sol_state(b) \n return s", "title": "" }, { "docid": "3baa2043c1f48de9dda397a278f077f9", "score": "0.5049442", "text": "def handle_sense_result(self, sense_result: List[Tuple[Square, Optional[chess.Piece]]]):\n pass", "title": "" }, { "docid": "244295893922724d92ceaaf7ef5df81d", "score": "0.50453365", "text": "def move(self,player,msglog,DEBUG = False,enemyMove = False):\n if player.shortCom:\n ask = \"\\n(U),(D),(L),(R),(Rest),(B),(J),(Use),(P)\" +\\\n \"(stats),(use),(equip),(dark),(light),(short),(logs)\\n\"\n else: ask = \"\\nMove where? (U)p,(D)own,(L)eft, or (R)ight\\n\" +\\\n \"Other: (Rest), (B)reak Wall, (J)ump Wall, (use) item, or (P)osition\" +\\\n \"\\nShow (Inventory),(equip) gear \\n\" +\\\n \"Toggles: player(stats),(short) commands,(dark),(light) or see (logs)\\n\"\n\n resp = input(ask)\n moved = False\n torchOn = False\n msgWait = False\n tupUp = self.currentTuple #THIS IS NOT A STRING YET\n row,col = tupUp\n resp = resp.lower()\n if resp == \"dark\" or resp == \"light\":\n self.visible(resp)\n if resp == \"enemy\":\n msglog.addLog(str(self.enemyPos))\n for enemy in self.enemyPos:\n print(enemy,self.getDistance(enemy))\n sleep(2)\n if resp == \"stats\":\n player.hideStats = not player.hideStats\n elif resp == \"logs\":\n player.hideLog = not player.hideLog\n elif DEBUG and resp == \"spawnloot\":\n temp = Gear(\"Sword\",\"Legendary\",5)\n msglog.addLog(temp.name)\n print(temp.name)\n player.addInventoryWearable(temp)\n print(msglog)\n #DONT GRADE THIS ELIF\n #This is basically just an endless combat sim\n elif DEBUG and resp == \"combatplus\":\n restsLeft = 5\n lv = int(input(\"what level?\\n\"))\n player.inventory[\"armor\"][\"equip\"][\"Helmet\"] = Gear(\"Helmet\",\"Rare\",lv)\n player.inventory[\"armor\"][\"equip\"][\"Boots\"] = Gear(\"Boots\",\"Rare\",lv)\n player.inventory[\"armor\"][\"equip\"][\"Gloves\"] = Gear(\"Helmet\",\"Rare\",lv)\n player.inventory[\"armor\"][\"equip\"][\"Body Armor\"] = Gear(\"Helmet\",\"Rare\",lv)\n player.inventory[\"sword\"][\"equip\"] = Gear(\"Sword\",\"Rare\",lv)\n recalcDefense(player)\n recalcAttack(player)\n print(player.gearOffense)\n battleRest = \"\"\n battleRest = input(\"Battle 'b', Rest 'r', equip 'e', or 'c' to quit\\n\")\n while battleRest != \"c\" and player.health > 0:\n if battleRest == \"b\":\n e1 = Enemy()\n e1.inventory[\"armor\"][\"equip\"][\"Helmet\"] = Gear(\"Helmet\",\"Rare\",lv)\n e1.inventory[\"armor\"][\"equip\"][\"Boots\"] = Gear(\"Boots\",\"Rare\",lv)\n e1.inventory[\"armor\"][\"equip\"][\"Gloves\"] = Gear(\"Helmet\",\"Rare\",lv)\n e1.inventory[\"armor\"][\"equip\"][\"Body Armor\"] = Gear(\"Helmet\",\"Rare\",lv)\n e1.inventory[\"sword\"][\"equip\"] = Gear(\"Sword\",\"Rare\",lv)\n #print(calcDamage(player,e1))\n #print(\"att\",player.gearOffense)\n #print(\"def\",player.gearDefense)\n print(\"enemy\")\n print(\"att\",e1.gearOffense)\n print(\"def\",e1.gearDefense)\n recalcAttack(e1)\n recalcDefense(e1)\n sleep(2)\n while (player.health > 0 and e1.health > 0):\n battle_monsters(player,e1,self,msglog)\n if player.health < 0:\n break\n elif battleRest == \"r\" and restsLeft > 0:\n player.health += round(.5 * player.maxhealth)\n if player.health > player.maxhealth:\n player.health = player.maxhealth\n restsLeft -=1\n print(\"Rests left: \",restsLeft)\n elif battleRest == \"e\":\n player.equipGear(msglog)\n if player.health > 0:\n battleRest = input(\"Battle 'b', Rest 'r', equip 'e', or 'c' to quit\\n\")\n sleep(2)\n cls()\n elif DEBUG and resp == \"armor\":#adds armor to inv for the player test\n lv = int(input(\"up to what level?\\n\"))\n rarity = [\"Ultra Rare\",\"Legendary\",\"Common\",\"Uncommon\",\"Rare\"]\n \n player.addInventoryWearable(Gear(\"Helmet\",random.choice(rarity),randint(0,lv)))\n player.addInventoryWearable(Gear(\"Boots\",random.choice(rarity),randint(0,lv)))\n player.addInventoryWearable(Gear(\"Gloves\",random.choice(rarity),randint(0,lv)))\n player.addInventoryWearable(Gear(\"Body Armor\",random.choice(rarity),randint(0,lv)))\n player.addInventoryWearable(Gear(\"Sword\",random.choice(rarity),randint(0,lv)))\n #recalcDefense(player)\n #recalcAttack(player)\n #Okay you can look now\n elif resp == \"inventory\":\n lookup = input(\"Gear or Items?\\n\").lower()\n if lookup == \"gear\":\n player.showInventory()\n sleep(3)\n elif lookup == \"items\":\n for item in player.inventory.keys():\n if item != \"sword\" and item != \"armor\" and \\\n player.inventory[item] != 0:\n print(item,\":\",player.inventory[item])\n sleep(4)\n elif resp == \"short\":\n player.shortCom = not player.shortCom\n elif resp == \"use\":\n item = input(\"What item are you using?\\n\"+\\\n \"Food, torch, map, or bandage\\n\").lower()\n player.useItem(item,msglog,self)\n elif resp == \"equip\":\n player.equipGear(msglog)\n elif resp == \"unequip\":\n gearSlot = input(\"Which item to unequip\\n\")\n player.unequipGear(gearSlot,msglog)\n elif resp in [\"u\",\"up\"]:\n self.moveUp(player,msglog,DEBUG)\n moved = True\n elif resp in [\"d\",\"down\"]:\n self.moveDown(player,msglog,DEBUG)\n moved = True\n elif resp in [\"l\",\"left\"]:\n moved = True\n self.moveLeft(player,msglog,DEBUG)\n elif resp in [\"right\",\"r\"]:\n moved = True\n self.moveRight(player,msglog,DEBUG)\n elif resp in [\"j\",\"jump\"]: #jumpWall\n self.jumpWall(player,msglog)\n moved = True\n elif resp in [\"b\",\"break\"]: #breakWall\n self.breakWall(player,msglog)\n elif resp in [\"p\",\"pos\"]: # used primarily for debugging\n msglog.addLog(\"You check your surroundings, you are at \"+\\\n str(self.currentTuple))\n elif resp == \"rest\":\n if player.hunger > 10:\n player.health += round(player.maxhealth/10)\n if player.health > player.maxhealth:\n player.health = player.maxhealth\n player.hunger -= 10\n msglog.addLog(\"You take a short rest\",True)\n for ability in player.abilityList.keys():\n if player.abilityList[ability] > 0:\n player.abilityList[ability] -= 5\n if player.abilityList[ability] < 0:\n player.abilityList[ability] = 0\n else: \n msglog.addLog(\"Invalid Action\")\n msgWait = True\n if moved: #reduce hunger and health and cooldowns inc hp if hunger > 0\n if player.hunger > 0:\n if player.health+1 < player.maxhealth : player.health += 1\n self.afterMove(player,msglog,DEBUG)\n if enemyMove:\n for enemy in self.enemyPos:\n self.enemyMove(enemy,player,msglog)\n if str(self.currentTuple) in self.enemyPos:\n self.tuplemaze[str(self.currentTuple)].obsID = \" \"\n enemyGen = Enemy()\n msglog.addLog(player.name+\" encountered a \" + enemyGen.name)\n cls()\n print(msglog)\n sleep(1.3)\n battle_monsters(player, enemyGen, self,msglog)\n self.enemyPos.remove(str(self.currentTuple))\n if enemy not in self.enemyPos:\n self.tuplemaze[enemy].obsID = \" \"\n if(msgWait):\n sleep(.3)", "title": "" }, { "docid": "e6a133af83944d5f2fed4581b54060c7", "score": "0.5043654", "text": "def process_action(self, action):\n \"\"\"\n Alright. It looks like we're given an int here.\n This would translate to 1-64, piece 1, 65-128 piece 2, 129-192 piece 3\n where action % 64 == the position in a 1d plane\n \"\"\"\n # pieceIdx = math.floor(action / 64)\n # position1d = action % 64\n # posy = math.floor(position1d / 8)\n # posx = position1d % 8\n # pid = -1\n # if pieceIdx == 0:\n # pid = self.piece1\n # elif pieceIdx == 1:\n # pid = self.piece2\n # else:\n # pid = self.piece3\n # piece = Piece.piece_from_id(pid)\n # # print(\"Applying piece {} with pos {} {} to board {}\".format(piece, posx, posy, self.board))\n # new_board = Board.apply_piece_only(np.zeros((8, 8), dtype=np.uint8), piece, posx, posy)\n # print(\"PROCESS ACTION {} , pieceIdx {} position {} posy {} posx {}\".format(action, pieceIdx, position1d, posy, posx))\n # print(\"Piece is \", piece)\n # if new_board is None:\n # new_board = np.zeros((8, 8), dtype=np.uint8)\n # # print(\"Applied Board \", new_board)\n # return new_board\n return action", "title": "" }, { "docid": "d3d780b2d44afb24bfea4d6c48028df2", "score": "0.50429654", "text": "async def action_processor_next(dungeon_sweeper_runner):\n if dungeon_sweeper_runner._runner_state == RUNNER_STATE_END_SCREEN:\n user_state = dungeon_sweeper_runner.user_state\n game_state = user_state.game_state\n stage_source = game_state.stage\n selected_stage = stage_source.after_stage_source\n if selected_stage is None:\n return False\n \n \n dungeon_sweeper_runner._runner_state = RUNNER_STATE_PLAYING\n \n selected_stage_id = selected_stage.id\n user_state.selected_stage_id = selected_stage_id\n \n try:\n stage_result = user_state.stage_results[selected_stage_id]\n except KeyError:\n best = -1\n else:\n best = stage_result.best\n \n user_state.game_state = GameState(selected_stage, best)\n return True\n \n return False", "title": "" }, { "docid": "1904a94b2000f5264e69258a680368e2", "score": "0.5039852", "text": "def result(board, action):\n a = copy.deepcopy(board)\n \"\"\"if board[action[0]-1][action[1]-1] is not None:\n print(\"That move is already taken\")\n return board\"\"\"\n a[action[0]][action[1]] = player(board)\n\n return a", "title": "" }, { "docid": "a519829690e9e29db7c843d3c1deed92", "score": "0.5038086", "text": "def web_play(self, action):\n self.playing = self.i_turn % 2\n current_player = self.players[self.playing]\n if action == (0, 0):\n print(\"Player %s admit defeat!\" % current_player.name)\n self.last_move = action\n if self.place_stone() is False:\n return\n self.hist_moves.append(self.last_move) # for undo\n winner = self.check_winner()\n if not self.silent_mode:\n self.print_board()\n if winner is not None:\n print(\"########## %s is the WINNER! #########\" % current_player.name)\n return None, winner\n self.i_turn += 1\n if self.i_turn == self.board_size ** 2:\n print(\"This game is a Draw!\")\n return None, \"Draw\"\n\n next_action = self.check_next_ai()\n return next_action, winner", "title": "" }, { "docid": "b9e6e8c914a871d4cc80eb993595a906", "score": "0.5036837", "text": "def move_piece(self, move):\n\n old_pos = move[\"old_pos\"]\n new_pos = move[\"new_pos\"]\n\n #Increments turns with no captures counter\n self.turns_no_pieces_removed[self.current_turn-1] = \\\n self.turns_no_pieces_removed[self.current_turn-1] + 1\n\n if self.board.get_piece_at(old_pos).get_type() == \"KING\":\n self.turns_no_advance = self.turns_no_advance + 1\n else:\n self.turns_no_advance = 0\n\n piece = self.board.remove_piece(old_pos)\n piece.set_location(new_pos)\n\n self.board.put_piece(piece, new_pos)", "title": "" }, { "docid": "4a45ff333e163d2d8d975bc497e19b8d", "score": "0.50345296", "text": "def result(board, action):\n i = action[0]\n j = action[1]\n\n result = copy.deepcopy(board)\n\n if(result[i][j] != EMPTY):\n raise NotImplementedError\n\n result[i][j] = player(result)\n return result", "title": "" }, { "docid": "46733a45c8fe46a863a94dba8ee1bd30", "score": "0.503303", "text": "def make_move(self, mv_to, mv_from, player_turn):\n # first get whos turn it is.\n if self.turn:\n player = self.player0\n opponent = self.player1\n else:\n player = self.player1\n opponent = self.player0\n # if mv_to has oppenent peice in it... replace it\n # and update score for both players.\n # elif has own peice(it should'nt) fail move.\n # elif", "title": "" }, { "docid": "fc79cfac1e719bb5dc9b07c20b165c0b", "score": "0.5032202", "text": "def move_piece(self, current_pos, next_pos, gui_move=False, ai=False):\n\n # copy board and retrieve the piece being moved along with it's data\n new_board = self.copy_board_object()\n return_dictionary = {'board': new_board, 'game_over': False, 'draw': False, 'winner': None}\n piece = new_board.get_board()[current_pos[0]][current_pos[1]]\n \"\"\"\n if gui_move:\n print(f\"Board:\")\n new_board.print_board()\n print(f\"Piece: {piece}\")\n print(f\"current position: {current_pos[0], current_pos[1]}\")\n print(f\"destination position: {next_pos[0], next_pos[1]}\")\n if not gui_move:\n print(\"SOME NONE GUI ACTION......\")\n print()\n print(f\"Board:\")\n new_board.print_board()\n print(f\"Piece: {piece}\")\n print(f\"current position: {current_pos[0], current_pos[1]}\")\n print(f\"destination position: {next_pos[0], next_pos[1]}\")\n \"\"\"\n data = piece.move(next_pos[0], next_pos[1], new_board)\n new_board.moves_since_taken += 1\n castle = False\n\n # check for and make castle move\n if isinstance(piece, King) and data and gui_move:\n # update board\n castle = True\n row = 7 if piece.get_team() == Team.WHITE else 0\n king_col = 6 if next_pos[1] == 7 else 2\n rook_col = 5 if next_pos[1] == 7 else 3\n rook = new_board.get_board()[row][next_pos[1]]\n rook.move(row, rook_col, new_board)\n new_board.get_board()[row][king_col] = piece\n new_board.get_board()[row][rook_col] = rook\n new_board.get_board()[current_pos[0]][current_pos[1]] = False\n new_board.get_board()[next_pos[0]][next_pos[1]] = False\n\n if not castle:\n # non-castle move, make the move on the board.board\n destination_piece = new_board.board[next_pos[0]][next_pos[1]]\n new_board.board[current_pos[0]][current_pos[1]] = False\n new_board.board[next_pos[0]][next_pos[1]] = piece\n\n if destination_piece:\n new_board.update_teams_pieces()\n new_board.moves_since_taken = 0\n\n # Check for en_passant and pawn promotion\n if isinstance(piece, Pawn):\n if data[0]: # if en passant\n new_board.moves_since_taken = 0\n new_board.board[next_pos[0] - piece.direction][next_pos[1]] = False\n if data[1] and gui_move: # check if pawn promotion and gui move\n new_board.execute_pawn_promotion(piece.get_team(), next_pos, ai)\n # endif not castle\n\n moving_team_pieces = new_board.white_pieces if piece.get_team() == Team.WHITE else new_board.black_pieces\n for pc in moving_team_pieces:\n if isinstance(pc, Pawn) and pc is not piece:\n pc.en_passant_move = []\n pc.just_moved_two = False\n\n # check for threefold rule\n three_fold_bool = False\n if gui_move:\n three_fold_bool = new_board.add_state_check_threefold()\n\n # was there 50 moves made without a piece being taken or third instance of game board, then game over and draw\n if new_board.moves_since_taken >= 50 or three_fold_bool:\n return_dictionary['game_over'] = True\n return_dictionary['draw'] = True\n\n # change turns\n new_board.turn = Team.BLACK if piece.get_team() == Team.WHITE else Team.WHITE\n\n # update teams pcs\n new_board.update_teams_pieces()\n\n \"\"\"\n # check mate or draw?\n if gui_move:\n pcs = self.white_pieces if new_board.turn == Team.WHITE else self.black_pieces\n moves = []\n check = False\n for pc in pcs:\n moves = moves + pc.get_valid_moves(new_board, False)\n moves = pc.king_in_harms_way(new_board, moves)\n if isinstance(pc, King):\n if pc.in_check(new_board, (pc.row, pc.col)):\n check = True\n if len(moves) == 0:\n return_dictionary['game_over'] = True\n if check:\n return_dictionary['draw'] = False\n return_dictionary['winner'] = 'white team' if new_board.turn == Team.BLACK else 'black team'\n else:\n return_dictionary['draw'] = True\n \"\"\"\n\n return return_dictionary", "title": "" }, { "docid": "2ec9f153dff01bb37f98c1ede3435fb2", "score": "0.5029163", "text": "def get_move(self, bot_id, universe, game_state):", "title": "" }, { "docid": "27e8abee8932e82f721b4926f24c81e8", "score": "0.5020151", "text": "def next_turn(self, piece, move):\n \n self.save_position(piece, move)\n self.current_player = 0 if self.current_player == 1 else 1\n self.current_color = 'wb'[self.current_player]\n \n # Only update move nr is white is back in turn\n if self.current_color == 'w': self.move_nr += 1\n \n self.fifty_move_rule()\n self.endgame()", "title": "" }, { "docid": "46a3ed9ee724533a849f541e7f9bb323", "score": "0.5013251", "text": "def perform_move(self):\r\n new_room = int(input('Where to? ')) - 1\r\n result, bats_picked_up = self.host.move(new_room)\r\n if bats_picked_up:\r\n print('ZAP -- Super Bat snatch! Elsewhereville for you!')\r\n if result == ActionResult.MET_WUMPUS:\r\n print('TSK TSK TSK - Wumpus got you!')\r\n elif result == ActionResult.FELL_IN_PIT:\r\n print('YYYIIIIEEEE . . . Fell in a pit.')\r\n elif result == ActionResult.EXHAUSTED:\r\n print('OOF! You collapse from exhaustion.')\r\n elif result == ActionResult.NOT_AN_EXIT:\r\n print(\"BONK! That's not a possible move.\")", "title": "" }, { "docid": "82862e152f7e0883d348f9d9c4725d88", "score": "0.50115347", "text": "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n\n #self.gazebo.unpauseSim()\n self._set_action(action)\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n self.cumulated_episode_reward += reward\n\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info", "title": "" }, { "docid": "9b8da47a35ff13c10f2c865d2d35cfbd", "score": "0.5009591", "text": "def result(board, action):\n row = action[0]\n col = action[1]\n if board[row][col] != EMPTY:\n raise Exception\n\n playerTurn = player(board)\n boardCopy = copy.deepcopy(board)\n boardCopy[row][col] = playerTurn\n return boardCopy", "title": "" }, { "docid": "10a8eee64069042c5331b57e15cb8485", "score": "0.49943924", "text": "def step(self, player_action, *args):\n opponent_action = self.opponent.act(self._opponent_state_rep(), self.opponent_reward,\n self.is_done())\n _, _, player_rew, self.opponent_reward, done = \\\n super(FixedOpponentSimulator, self).step(player_action, opponent_action)\n if done:\n self.opponent.act(self._opponent_state_rep(), self.opponent_reward,\n self.is_done())\n logging.log(\n msg='Challenge agent using: {}'.format(self.opponent.current_agent.__class__.__name__),\n level=logging.DEBUG)\n\n return self._player_state_rep(), player_rew, done, None", "title": "" }, { "docid": "eff95a2b5ae70a5d5bb82b68eabd9d11", "score": "0.49856293", "text": "def move(self, game):\n # run the minimax calculation passing the player object and the current game\n # we no longer use the returned move separately,\n # we just change the self.AI_chosen_move within minimax\n # and for the what-ai-thought feature we change the game's\n # considered_board property\n the_move, game.considered_board = minimax(self, game)\n # return the global variable AI_chosen_move,\n # whose value will be changed by the minimax() function\n return self.AI_chosen_move", "title": "" }, { "docid": "5574fe0ef9fb19614c26944bef9b3faa", "score": "0.49842906", "text": "def execute(self, ud):\n\n # Check for preemption before executing\n if self.preempt_requested():\n self.node.get_logger().info(\"Preempting %s before sending goal.\" % self._action_name)\n self.service_preempt()\n return 'preempted'\n\n # Make sure we're connected to the action server\n try:\n start_time = self.node.get_clock().now()\n while not self._action_client.server_is_ready():\n if self.preempt_requested():\n self.node.get_logger().info(\"Preempting while waiting for server '%s'.\" % self._action_name)\n self.service_preempt()\n return 'preempted'\n elif not rclpy.ok():\n self.node.get_logger().info(\"Shutting down while waiting for service '%s'.\" % self._action_name)\n return 'aborted'\n elif self._action_client.wait_for_server(1.0):\n self.node.get_logger().debug(\"Connected to server '%s'\" % self._action_name)\n elif self.node.get_clock().now() - start_time > self._server_wait_timeout:\n self.node.get_logger().warn(\"Server connection timeout reached\")\n return 'aborted'\n else:\n self.node.get_logger().warn(\"Still waiting for server '%s'...\" % self._action_name)\n except:\n self.node.get_logger().warn(\"Terminated while waiting for server '%s'.\" % self._action_name)\n return 'aborted'\n\n self._status = SimpleActionState.INACTIVE\n\n # Grab goal key, if set\n if self._goal_key is not None:\n self._goal = ud[self._goal_key]\n\n # Write goal fields from userdata if set\n for key in self._goal_slots:\n setattr(self._goal, key, ud[key])\n\n # Call user-supplied callback, if set, to get a goal\n if self._goal_cb is not None:\n try:\n goal_update = self._goal_cb(\n smach.Remapper(\n ud,\n self._goal_cb_input_keys,\n self._goal_cb_output_keys,\n []),\n self._goal,\n *self._goal_cb_args,\n **self._goal_cb_kwargs)\n if goal_update is not None:\n self._goal = goal_update\n except:\n self.node.get_logger().error(\"Could not execute goal callback: \"+traceback.format_exc())\n return 'aborted'\n\n # Make sure the necessary paramters have been set\n if self._goal is None and self._goal_cb is None:\n self.node.get_logger().error(\"Attempting to activate action \"+self._action_name+\" with no goal or goal callback set. Did you construct the SimpleActionState properly?\")\n return 'aborted'\n\n # Dispatch goal via non-blocking call to action client\n self._activate_time = self.node.get_clock().now()\n self._status = SimpleActionState.ACTIVE\n\n # Wait on done condition\n self._done_cond.acquire()\n send_future = self._action_client.send_goal_async(self._goal, feedback_callback=self._goal_feedback_cb)\n send_future.add_done_callback(self._goal_active_cb)\n\n # Preempt timeout watch thread\n if self._exec_timeout:\n self._execution_timer_thread = threading.Thread(name=self._action_name+'/preempt_watchdog', target=self._execution_timer)\n self._execution_timer_thread.start()\n\n # Wait for action to finish\n self._done_cond.wait()\n\n # Call user result callback if defined\n result_cb_outcome = None\n if self._result_cb is not None:\n try:\n result_cb_outcome = self._result_cb(\n smach.Remapper(\n ud,\n self._result_cb_input_keys,\n self._result_cb_output_keys,\n []),\n self._goal_status,\n self._goal_result)\n if result_cb_outcome is not None and result_cb_outcome not in self.get_registered_outcomes():\n self.node.get_logger().error(\"Result callback for action \"+self._action_name+\", \"+str(self._result_cb)+\" was not registered with the result_cb_outcomes argument. The result callback returned '\"+str(result_cb_outcome)+\"' but the only registered outcomes are: \"+str(self.get_registered_outcomes()))\n return 'aborted'\n except:\n self.node.get_logger().error(\"Could not execute result callback: \"+traceback.format_exc())\n return 'aborted'\n\n if self._result_key is not None:\n ud[self._result_key] = self._goal_result\n\n # Goal might be None, for instance if goal was LOST.\n if self._goal_result is not None:\n for key in self._result_slots:\n ud[key] = getattr(self._goal_result, key)\n\n # Check status\n if self._status == SimpleActionState.INACTIVE:\n # Set the outcome on the result state\n if self._goal_status == GoalStatus.STATUS_SUCCEEDED:\n outcome = 'succeeded'\n elif self._goal_status == GoalStatus.STATUS_CANCELED and self.preempt_requested():\n outcome = 'preempted'\n self.service_preempt()\n else:\n # All failures at this level are captured by aborting, even if we timed out\n # This is an important distinction between local preemption, and preemption\n # from above.\n outcome = 'aborted'\n else:\n # We terminated without going inactive\n self.node.get_logger().warn(\"Action state terminated without going inactive first.\")\n outcome = 'aborted'\n\n # Check custom result cb outcome\n if result_cb_outcome is not None:\n outcome = result_cb_outcome\n\n # Set status inactive\n self._status = SimpleActionState.INACTIVE\n self._done_cond.release()\n\n return outcome", "title": "" }, { "docid": "5a8c6ad0d8fd3b822eb02e0b4ee5943c", "score": "0.4982744", "text": "def _handleResult(self, result):\n if self._state == STATE_WAITING:\n self._state = STATE_NORMAL\n self._current = result\n else:\n self._step(result)", "title": "" }, { "docid": "89cbdeac4c8d095dcca697a8b23a2943", "score": "0.49824277", "text": "def move_request():\n\n if request.method == 'POST':\n game_id = request.form.get('id', type=int)\n move = request.form.get('move')\n game_data = games.get_game_data_if_to_move(game_id,\n user.get_logged_in_id())\n\n # Don't let user move in an already completed game\n # or game they are not a player of.\n if not game_data or not move or (\n game_data['status'] != games.Status.NO_MOVE\n and game_data['status'] != games.Status.IN_PROGRESS\n ):\n return jsonify(successful=False)\n\n # Need app context for process_move to send mail.\n with app.app_context():\n move_success = handle_move.process_move(\n move, database.row_to_dict(game_data), mail)\n\n return jsonify(successful=move_success)\n else:\n return redirect('/')", "title": "" } ]
11a8c4e157a89ec7f31cfed011cf3406
Format the record with colors.
[ { "docid": "3d34b659680ba38fc1fc925980d13126", "score": "0.75765485", "text": "def format(self, record):\r\n color = self.color_seq % (30 + self.colors[record.levelname])\r\n message = logging.Formatter.format(self, record)\r\n message = message.replace('$RESET', self.reset_seq)\\\r\n .replace('$BOLD', self.bold_seq)\\\r\n .replace('$COLOR', color)\r\n for color, value in self.colors.items():\r\n message = message.replace(\r\n '$' + color, self.color_seq % (value + 30))\\\r\n .replace('$BG' + color, self.color_seq % (value + 40))\\\r\n .replace('$BG-' + color, self.color_seq % (value + 40))\r\n return message + self.reset_seq", "title": "" } ]
[ { "docid": "dd614764164891b95a84fe3ad9e2b61d", "score": "0.8127916", "text": "def color_format(self, record):\n message = super().format(record)\n parts = message.split('\\n', 1)\n if '<color>' in parts[0] and '</color>' in parts[0]:\n bef, dur, aft = self.SPLIT_COLOR(parts[0])\n parts[0] = bef + self.colorize(dur, record) + aft\n message = '\\n'.join(parts)\n return message", "title": "" }, { "docid": "7eba8ffec2787d54ab366f4f5794855b", "score": "0.6953757", "text": "def colorize(self, message, record):\n if record.levelno in self._scheme:\n color = self._scheme[record.levelno]\n return color + message + self.RESET_ALL\n\n return message", "title": "" }, { "docid": "cc8b4cb1620741056913c5723b0cef5f", "score": "0.6610468", "text": "def format(self, record: logging.LogRecord) -> str:\n # pylint: disable=protected-access\n self._style._fmt = self.FORMAT_CUSTOM.get(record.levelno, self.FORMAT_DEFAULT)\n return super().format(record)", "title": "" }, { "docid": "243f359e251526e2ceab19890f67b7f3", "score": "0.6433786", "text": "def color_format(self, name, fill_color, text_color):\n self._formats[name] = super().add_format({'pattern': 1, 'fg_color': fill_color, 'font_color': text_color})", "title": "" }, { "docid": "b7a2ce536d23eae50d1d00ab8f86b80f", "score": "0.6281782", "text": "def format(self, record):\n if self.formatter:\n fmt = self.formatter\n else:\n fmt = _defaultFormatter\n return fmt.format(record)", "title": "" }, { "docid": "b55750c670b3a9190a2aa241eefe5b9e", "score": "0.6216047", "text": "def format(self, record):\n\n log_message = logging.Formatter.format(self, record)\n\n # Split the log message on line breaks.\n msgs = log_message \\\n .replace('\\r', '\\n') \\\n .rstrip('\\n') \\\n .split('\\n')\n\n lines = []\n for msg in msgs:\n msg = iter(msg)\n\n # Turn msg into line by combining the animation coloring for each\n # \"pixel\" in the row with it's corresponding character in msg.\n line = ''\n for pixel in next(self.animation):\n try:\n character = next(msg)\n except StopIteration:\n character = ' '\n\n line += (nyan_colors[pixel] + character)\n\n # Append any additional chars from the message.\n for character in msg:\n line += character\n\n # Reset formatting.\n line += '\\033[0m'\n\n lines.append(line)\n\n log_message = '\\n'.join(lines)\n setattr(record, 'msg', log_message)\n\n return log_message", "title": "" }, { "docid": "c5fc042e4de627a8eedd45b6edb4a0ff", "score": "0.614969", "text": "def format(self, record):\n # pylint: disable=protected-access\n if record.levelno > logging.INFO:\n self._style._fmt = self.verbose_format\n else:\n self._style._fmt = self.default_format\n return super().format(record)", "title": "" }, { "docid": "6a79fa402040127ef7c1969de2a753c7", "score": "0.61236787", "text": "def format(self, record: logging.LogRecord) -> str:\n return filter_datum(self.fields, self.REDACTION,\n super(RedactingFormatter, self).format(record),\n self.SEPARATOR)", "title": "" }, { "docid": "6a79fa402040127ef7c1969de2a753c7", "score": "0.61236787", "text": "def format(self, record: logging.LogRecord) -> str:\n return filter_datum(self.fields, self.REDACTION,\n super(RedactingFormatter, self).format(record),\n self.SEPARATOR)", "title": "" }, { "docid": "8f5cedd9008945987567d2c33849e439", "score": "0.60516614", "text": "def emit(self, record):\n\n record.colored_levelname = (self.colors[record.levelname] +\n record.levelname +\n self.colors[None])\n sys.stdout.write(self.format(record) + '\\n')", "title": "" }, { "docid": "ccb84fa5ab2487602d000a3f4c291529", "score": "0.6048474", "text": "def emit(self, record):\n if not self.uses_colors:\n return logging.StreamHandler.emit(self, record)\n\n my_record = copy.copy(record)\n level = my_record.levelno\n if level >= logging.ERROR:\n color = '\\x1b[31m\\x1b[1m'\n elif level >= logging.WARNING:\n color = '\\x1b[33m\\x1b[1m'\n elif level >= logging.INFO:\n color = '\\x1b[0m'\n elif level >= logging.DEBUG:\n color = '\\x1b[35m'\n else:\n color = '\\x1b[0m'\n\n msg = my_record.msg\n if msg and msg[0] == \"[\":\n try:\n end_pos = msg.index(\"]\")\n msg = \"'\\x1b[35m%s\\x1b[0m%s\" % (msg[:end_pos+1], msg[end_pos+1:])\n except ValueError:\n pass\n my_record.msg = \"%s%s\\x1b[0m\" % (color, msg)\n logging.StreamHandler.emit(self, my_record)", "title": "" }, { "docid": "5287a31f70cbb1ec97e362040c828783", "score": "0.59804386", "text": "def __format_line_colors(self, line):\n function_match = self.is_function_header(line)\n constant_match = self.is_constant_definition(line)\n section_match = self.is_section_header(line)\n if function_match:\n function_text = function_match.group(2)\n return line.replace(function_text, Text.yellow_text(function_text))\n elif constant_match:\n constant_text = constant_match.group(2)\n return line.replace(constant_text, Text.blue_text(constant_text))\n elif section_match:\n section_text = section_match.group(2)\n return line.replace(section_text, Text.magenta_text(section_text))\n return line", "title": "" }, { "docid": "e13f3d3786b4b6b496fd8c9a9967d55b", "score": "0.58836347", "text": "def __str__(self):\n\n return 'Color(%s, %s, %s)' % (self.red, self.green, self.blue)", "title": "" }, { "docid": "b7309c6b022924763eff4cb0bebdfece", "score": "0.5826912", "text": "def main(sep=' '):\n for line in valid_lines(None):\n r, g, b, name = line.split(maxsplit=3)\n r, g, b = (int(x) for x in [r, g, b])\n\n h = colr.rgb2hex(r, g, b)\n th = colr.rgb2termhex(r, g, b)\n t = colr.rgb2term(r, g, b)\n\n d = dict(r=r, g=g, b=b, name=name, h=h, th=th, t=t)\n d['testfg'] = Colr().hex(h, 'test', rgb_mode=False)\n d['testbg'] = Colr().b_hex(h, 'test ', rgb_mode=False)\n d['testbg_rgb'] = Colr().b_hex(h, ' ', rgb_mode=True)\n\n fmt = sep.join(['{r:3} {g:3} {b:3}',\n '0x{h}',\n '0x{th}',\n '{t:>3s}',\n '{testfg}{testbg}{testbg_rgb}',\n '{name}'])\n print(fmt.format(**d))", "title": "" }, { "docid": "8ee3b260a3d156fe1b88f0bbb0af3f67", "score": "0.5786986", "text": "def __color__(self):", "title": "" }, { "docid": "a1a7f7bf9afc661334f1a1445aa73ba4", "score": "0.5784746", "text": "def __str__(self):\n return f\"{self.color}\"", "title": "" }, { "docid": "b728de7e044e58a794925ff0faf676b0", "score": "0.5768672", "text": "def format_table():\n for style in range(8):\n for fg in range(30, 38):\n s1 = ''\n for bg in range(40, 48):\n format = ';'.join([str(style), str(fg), str(bg)])\n s1 += '\\x1b[%sm %s \\x1b[0m' % (format, range(format))\n print(s1)\n print('\\n')", "title": "" }, { "docid": "0bd7436c771f54f77cc1dd1a46afd6c1", "score": "0.5763466", "text": "def format(self, message, color):\n print color + \"[%s] \" % self.time().time() + message + self.CLEAR", "title": "" }, { "docid": "e339101c4b3ae53259109a359e343a93", "score": "0.57126606", "text": "def _color():\n return RAUSCH", "title": "" }, { "docid": "5306504fe9bbc5da82692d11f514c97c", "score": "0.56617594", "text": "def get_field_color(self) -> tuple:\n if self.field_type == 0: #field\n return self.GREEN\n elif self.field_type == 1: #apple\n return self.RED\n elif self.field_type == 2: #head\n return self.BLUE\n elif self.field_type ==3: #body\n return self.PINK", "title": "" }, { "docid": "b11ad54551708e73a7adcad2b39ee5f4", "score": "0.5658124", "text": "def format(self, record: logging.LogRecord) -> str:\n if not record.exc_info:\n level = record.levelname.lower()\n msg = record.getMessage()\n if level in self.colors:\n prefix = click.style(\"{}: \".format(level), **self.colors[level]) # type: ignore\n msg = \"\\n\".join(prefix + x for x in msg.splitlines())\n return msg\n return logging.Formatter.format(self, record) # pragma: no cover", "title": "" }, { "docid": "525af57a3ed29f72fbdcf3e62e2eeb24", "score": "0.5654587", "text": "def format(self, record: logging.LogRecord) -> str:\n if platform.system() != 'Linux': # Avoid funny logs on Windows & MacOS\n return super().format(record)\n\n record.msg = (\n self.STYLE[record.levelname] + record.msg + self.STYLE['END'])\n record.levelname = (\n self.STYLE['LEVEL'] + record.levelname + self.STYLE['END'])\n return super().format(record)", "title": "" }, { "docid": "caa39bc27d0b81e9ccad34691c0d01f0", "score": "0.5653484", "text": "def output(self):\n for filename, lineno, colno, message in sorted(set(self.records)):\n line = '{}:{}:{} {}\\n'.format(\n colored(filename, RED),\n colored(lineno, YELLOW),\n \"{}:\".format(colored(colno, YELLOW)) if colno else \"\",\n message\n )\n if not self.enable_colors:\n line = strip_colors(line)\n self.file.write(line)", "title": "" }, { "docid": "bd543d286f5652eb293cf463748bc27a", "score": "0.5646699", "text": "def __str__(self):\r\n return str(self.getColor());", "title": "" }, { "docid": "4ff36356cd405c63ac2ef021aa91e110", "score": "0.5637434", "text": "def __str__(self):\n schema = \"\"\n headers = \" \"\n alphabet = list(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\") \n alphabet.reverse()\n\n red_line_top = headers + \"\\033[31m--\\033[0m\" * (len(self.board))\n\n i = 0\n for line in self.board:\n line_txt = \"\"\n headers += alphabet.pop() + \" \"\n\n line_txt += str(f\" {i+1}\") + str(' ' * (i + 1)) + \"\\033[34m \\\\ \\033[0m\" if i < 9 \\\n else str(i + 1) + str(' ' * (i + 1)) + \"\\033[34m \\\\ \\033[0m\"\n\n for stone in line:\n if stone == 0:\n line_txt += \"⬡ \"\n elif stone == 1:\n line_txt += \"\\033[31m⬢ \\033[0m\" # 31=red\n else:\n line_txt += \"\\033[34m⬢ \\033[0m\" # 34=blue\n\n schema += line_txt + \"\\033[34m \\\\ \\033[0m\" + \"\\n\"\n\n i = i + 1\n\n red_line_bottom = (\" \" * (self.size)) + red_line_top\n\n return headers + \"\\n\" + (red_line_top) + \"\\n\" \\\n + schema + red_line_bottom", "title": "" }, { "docid": "9f1b8dee0a5beeefa01f6af4ac83cebe", "score": "0.56329095", "text": "def _format_color(self, value):\n try:\n return self.root.colors[value]\n except KeyError:\n message = \"'%s' color is not defined\" % value\n raise KeyError(message)", "title": "" }, { "docid": "859066cc7c9ccfb4655e6a0d729fdee4", "score": "0.56190306", "text": "def colorize(self, filename, line, line_num, to_colr):\n out = colorCodes.PURPLE + '{} line {}:{} '.format(filename, line_num, colorCodes.END)\n if self.args.caret:\n out = ''\n parts = line.split('{}'.format(to_colr))\n for part in parts[:-1]:\n out += part + colorCodes.RED + '{}'.format(to_colr) + colorCodes.END\n out += parts[-1]\n return out", "title": "" }, { "docid": "39eee5ab5da256bfb66e978628beeee6", "score": "0.5617705", "text": "def _formatting(self):\n return", "title": "" }, { "docid": "196109212882a5b2190d372d298b719f", "score": "0.56032914", "text": "def emit(self, record):\n # Need to make a actual copy of the record\n # to prevent altering the message for other loggers\n myrecord = copy.copy(record)\n levelno = myrecord.levelno\n if levelno >= 50: # CRITICAL / FATAL\n front = '\\033[30;41m' # black/red\n text = '\\033[30;41m' # black/red\n elif levelno >= 40: # ERROR\n front = '\\033[30;41m' # black/red\n text = '\\033[1;31m' # bright red\n elif levelno >= 30: # WARNING\n front = '\\033[30;43m' # black/yellow\n text = '\\033[1;33m' # bright yellow\n elif levelno >= 20: # INFO\n front = '\\033[30;42m' # black/green\n text = '\\033[1m' # bright\n elif levelno >= 10: # DEBUG\n front = '\\033[30;46m' # black/cyan\n text = '\\033[0m' # normal\n else: # NOTSET and anything else\n front = '\\033[0m' # normal\n text = '\\033[0m' # normal\n\n myrecord.levelname = '%s%s\\033[0m' % (front, myrecord.levelname)\n myrecord.msg = textwrap.fill(\n myrecord.msg, initial_indent=text, width=76,\n subsequent_indent='\\033[0m %s' % text) + '\\033[0m'\n logging.StreamHandler.emit(self, myrecord)", "title": "" }, { "docid": "b0b6a8f911230010f07379895ea87bd3", "score": "0.5592324", "text": "def finish (self):\n self.colorize_line (\"\")", "title": "" }, { "docid": "c71ddc9cfaee61c31bb33d4b6057df39", "score": "0.55896115", "text": "def Format():", "title": "" }, { "docid": "f6e468827875b64c41f7d1e524ebcef0", "score": "0.5558507", "text": "def v_color(ob: BaseGeometry) -> Text:\n return COLORS[ob.is_simple + 33]", "title": "" }, { "docid": "9a45dc2c3cd850e1b5065a96931696e0", "score": "0.555764", "text": "def format_row(self, row, key, color):\n value = row[key]\n if not isinstance(value, Number):\n return value\n\n # determine if integer value\n is_integer = float(value).is_integer()\n template = '{}' if is_integer else '{:' + self.floatfmt + '}'\n\n # if numeric, there could be a 'best' key\n key_best = key + '_best'\n if (key_best in row) and row[key_best]:\n template = color + template + Ansi.ENDC.value\n return template.format(value)", "title": "" }, { "docid": "cbfc37ff82a9c0215186387607013c63", "score": "0.5544767", "text": "def format(self, record):\n json_dict = dict(msg=record.msg.replace('\\n', ' '), level=record.levelname)\n json_dict['type'] = record.type\n formatted_message = ' ' + json.dumps(json_dict)\n\n if self._is_first_line:\n self._is_first_line = False\n return formatted_message\n\n return ', ' + formatted_message", "title": "" }, { "docid": "57675de12fd12d0af0647fb18bafe3a7", "score": "0.5512514", "text": "def print_format_table():\n for style in range(8):\n for fg in range(30,38):\n s1 = ''\n for bg in range(40,48):\n format = ';'.join([str(style), str(fg), str(bg)])\n s1 += '\\x1b[%sm %s \\x1b[0m' % (format, format)\n print(s1)\n print('\\n')", "title": "" }, { "docid": "5c3ce37b23acaab5f231e004ed6c9474", "score": "0.55111057", "text": "def colorize(lead, num, color):\n if num != 0 and ANSIBLE_COLOR and color is not None:\n return \"%s%s%-15s\" % (stringc(lead, color),\n stringc(\"=\", color), stringc(str(num), color))\n else:\n return \"%s=%-4s\" % (lead, str(num))", "title": "" }, { "docid": "022a54d7a8a0f4e9fe64b7ca59cb82c5", "score": "0.55050474", "text": "def init_colors(self):", "title": "" }, { "docid": "213ff5e959cf7046f668223025d8777f", "score": "0.54987097", "text": "def __highlight_migration(self, char_format, user_data):\r\n user_data.error = True\r\n char_format = char_format.toCharFormat()\r\n char_format.setUnderlineColor(QColor(\r\n resources.CUSTOM_SCHEME.get('migration-underline',\r\n resources.COLOR_SCHEME['migration-underline'])))\r\n char_format.setUnderlineStyle(\r\n QTextCharFormat.WaveUnderline)\r\n return char_format", "title": "" }, { "docid": "6ef6d5b978b380bc88dfbf0d49cbfad0", "score": "0.54971254", "text": "def format(self, record: logging.LogRecord) -> str:\n return filter_datum(\n self.fields, self.REDACTION, super().format(record),\n self.SEPARATOR)", "title": "" }, { "docid": "40c70d5418019049dbf4e35af12be8e2", "score": "0.54907614", "text": "def format(self, record: logging.LogRecord) -> str:\n return filter_datum(self.fields,\n self.REDACTION,\n super().format(record),\n self.SEPARATOR)", "title": "" }, { "docid": "0116c426dd73469a6a57acfcfb84f11e", "score": "0.5486211", "text": "def print_format_table():\n for style in range(8):\n for fg in range(30, 38):\n s1 = ''\n for bg in range(40, 48):\n formatting = ';'.join([str(style), str(fg), str(bg)])\n s1 += '\\x1b[%sm %s \\x1b[0m' % (formatting, formatting)\n print(s1)\n print('\\n')", "title": "" }, { "docid": "fea96d9f3b66571d109dc9d659b865d8", "score": "0.5484012", "text": "def add_colour_to_conc_df(conc):\n colourdict = objs._conc_colours[len(objs._old_concs)-1]\n fores = []\n backs = []\n stys = [] \n for index in list(conc.index):\n line = colourdict.get(str(index))\n if not line:\n fores.append('')\n backs.append('')\n stys.append('')\n else:\n fores.append(line.get('Fore', ''))\n backs.append(line.get('Back', ''))\n stys.append(line.get('Style', ''))\n\n if any(i != '' for i in fores):\n conc['Foreground'] = fores\n if any(i != '' for i in backs):\n conc['Background'] = backs\n if any(i != '' for i in stys):\n conc['Style'] = stys\n return conc", "title": "" }, { "docid": "45bf94f05f5c8731055e0fb306e4460f", "score": "0.54822093", "text": "def color_red(val):\n color = \"black\"\n if isinstance(val, datetime.datetime):\n pass\n else:\n try:\n color = (\n \"red\" if float(val) >= THRESHOLD / 10 and float(val) <= 1 else \"black\"\n )\n except ValueError:\n color = \"black\"\n return \"color: %s\" % color", "title": "" }, { "docid": "3d9e5ca537a872568e490c541d37f1ed", "score": "0.5460795", "text": "def format(self, record):\r\n\r\n # store project info\r\n record.project = self.project\r\n record.version = self.version\r\n\r\n # store request info\r\n context = getattr(local.store, 'context', None)\r\n if context:\r\n d = _dictify_context(context)\r\n for k, v in d.items():\r\n setattr(record, k, v)\r\n\r\n # NOTE(sdague): default the fancier formatting params\r\n # to an empty string so we don't throw an exception if\r\n # they get used\r\n for key in ('instance', 'color', 'user_identity'):\r\n if key not in record.__dict__:\r\n record.__dict__[key] = ''\r\n\r\n if record.__dict__.get('request_id'):\r\n self._fmt = CONF.logging_context_format_string\r\n else:\r\n self._fmt = CONF.logging_default_format_string\r\n\r\n if (record.levelno == logging.DEBUG and\r\n CONF.logging_debug_format_suffix):\r\n self._fmt += \" \" + CONF.logging_debug_format_suffix\r\n\r\n # Cache this on the record, Logger will respect our formatted copy\r\n if record.exc_info:\r\n record.exc_text = self.formatException(record.exc_info, record)\r\n return logging.Formatter.format(self, record)", "title": "" }, { "docid": "f4f7b113cfd94d591844d7bc978a83dd", "score": "0.5460392", "text": "def __str__(self):\n return self._to_s(self.color)", "title": "" }, { "docid": "c8cfaf2b3bfeaf21fa0de6b0583a5824", "score": "0.5451582", "text": "def __highlight_checker(self, char_format, user_data, color_name):\r\n user_data.error = True\r\n color = QColor(color_name)\r\n self.__apply_proper_style(char_format, color)\r\n return char_format", "title": "" }, { "docid": "5ffcef2dbe2a0aadd3363730797916c0", "score": "0.54512167", "text": "def reformat(color):\n return int(round(color[0] * 255)), \\\n int(round(color[1] * 255)), \\\n int(round(color[2] * 255))", "title": "" }, { "docid": "62ca5a1ff327ad1a78673cddc7cf1240", "score": "0.54490495", "text": "def colorize(self, txt, color):\n if self.__use_colors:\n return '\\x1b[0;%d;40m%s\\x1b[0m' % (color, txt)\n else:\n return txt", "title": "" }, { "docid": "54c49bd0d3b6d0a9ce9a1a33d79513d2", "score": "0.54436195", "text": "def _create_formatter(self, level, fmt):\n color = ''\n reset = ''\n\n if sys.stdout.isatty():\n color_name = self.config['COLOR'].get(level.upper())\n\n if color_name:\n color = getattr(colorama.Fore, color_name.upper(), '')\n\n if color:\n reset = colorama.Fore.RESET\n\n return logging.Formatter(fmt.format(color=color, reset=reset))", "title": "" }, { "docid": "85c3724c5236bb1d5e0d5f06acdc355b", "score": "0.54435194", "text": "def colorize_diff(diff: typing.List[str]):\n marking = ''\n col = 0\n for line in diff:\n if A_MARKER in line or A_MARKER_TXT in line:\n marking = Fore.GREEN\n elif B_MARKER in line or B_MARKER_TXT in line:\n marking = Fore.RED\n elif len(line) - len(line.lstrip()) < col:\n # in case of unindent reset marking color\n marking = ''\n\n # keep track of leading whitespace to catch unindent\n col = len(line) - len(line.lstrip())\n reset = ''\n if marking:\n reset = Fore.RESET\n line = marking + line + reset\n if A_MARKER_TXT in line or B_MARKER_TXT in line:\n # reset because text lines are marked individually\n marking = ''\n yield line", "title": "" }, { "docid": "001044173e9aab6430bc0a589e3224fc", "score": "0.54393107", "text": "def add_styles_and_formats(bag):\n print(\"add_styles_and_formats()\")\n bag.fmt = MyBunch()\n bag.fmt.bold = bag.workbook.add_format({'bold': 1})\n bag.fmt.dol_int = bag.workbook.add_format({'num_format': '$#,##0'})\n# bag.fmt.dol_float6 = bag.workbook.add_format({'num_format': '$0.000000'})\n# bag.fmt.dol_acc_int = bag.workbook.add_format({'num_format': '_($* #,##0_);[red]_($* (#,##0);_($* \"-\"??_);_(@_)'})\n# bag.fmt.dol_acc_float6 = bag.workbook.add_format({'num_format': '_($* 0.000000_);[red]_($* (0.000000);_($* \"-\"??_);_(@_)'})\n \n## bag.fmt.fg_color_orange = bag.workbook.add_format()\n## bag.fmt.fg_color_orange.set_fg_color('#FE9901')\n## bag.fmt.fg_color_black = bag.workbook.add_format()\n## bag.fmt.fg_color_black.set_fg_color('#000000')\n## #bag.fmt.col_title = bag.workbook.add_format({'bold': True, 'border': True, 'fg_color':'#FE9901'}) #orange\n\n bag.fmt.col_title = bag.workbook.add_format({'bold':1, 'border':1, 'fg_color':'#fbd190'})\n bag.fmt.val_row_all_borders = bag.workbook.add_format({'font_size':12, 'border':1, 'border_color':'#CECECE', 'right': 1, 'border_color':'#000000'})\n bag.fmt.val_row_left_right_borders = bag.workbook.add_format({'font_size':12, 'left':1, 'right':1, 'bottom':1,'left_color':'#000000', 'right_color':'#000000', 'bottom_color':'#CECECE' , 'num_format': '_($* 0.000000_);[red]_($* (0.000000);_($* \"-\"??_);_(@_)'})\n bag.fmt.val_row_left_right_borders_shade = bag.workbook.add_format({'font_size':12, 'left':1, 'right':1, 'bottom':1,'left_color':'#000000', 'right_color':'#000000', 'bottom_color':'#CECECE', 'num_format': '_($* 0.000000_);[red]_($* (0.000000);_($* \"-\"??_);_(@_)', 'fg_color':'#DCE6F1'})\n bag.fmt.val_row_all_borders = bag.workbook.add_format({'font_size':12, 'border':1, 'border_color':'#CECECE', 'num_format': '_($* 0.000000_);[red]_($* (0.000000);_($* \"-\"??_);_(@_)'})\n bag.fmt.row_top_border = bag.workbook.add_format({'top':1, 'border_color':'#000000'})", "title": "" }, { "docid": "a628d0894797253dc92c6004362d47f6", "score": "0.54382974", "text": "def value_color(self, type):\n if self.nocolor:\n return ''\n\n if type in ('byte', 'i16', 'i32', 'i64'):\n return ':y'\n elif type == 'bool':\n return ':c'\n elif type == 'double':\n return ':m'\n elif type == 'string':\n return ':g'\n elif type=='struct':\n return ''", "title": "" }, { "docid": "49bf8655a69572b03069893913eaa857", "score": "0.54365593", "text": "def color(self):\n alpha = \"abcdefghijklmnopqrstuvwxyz\" # alpha[1] = \"b\"\n alphaPos = dict([ (x[1],x[0]) for x in enumerate(alpha) ]) # alphaPos[\"b\"] = 1\n colorValue = 0\n for letter in self.formatedText:\n if letter.isdigit():\n colorValue += int(letter)\n else:\n colorValue += alphaPos[letter.lower()]\n return [(colorValue * len(self.formatedText)) % 256, (colorValue * 2) % 256, (colorValue * 3 % 256)]", "title": "" }, { "docid": "4ee7f732e55793db6cfe51420b727d8a", "score": "0.5429466", "text": "def vcfformat(self):\n pass", "title": "" }, { "docid": "53011d0a569c1fcc8aefa183df379380", "score": "0.5422624", "text": "def __str__(self):\n return (\n self.color_name +\n str(self.number) +\n \" \" * (2 - len(str(self.number)))\n )", "title": "" }, { "docid": "d1239186838adcb438282025ce907270", "score": "0.5406479", "text": "def formatted_message(message, color):\n print(decode_color(color) .format(message))", "title": "" }, { "docid": "7843062434c8a220a5ce27512943df48", "score": "0.54019177", "text": "def color(*args: t.Any, **kwargs: t.Any) -> str:\n return _format_rich_text(_color(*args, **kwargs))", "title": "" }, { "docid": "8c6701c3f003eacf98d6ba10caeff052", "score": "0.5400206", "text": "def add_color(tweets):\n colors = list(Color(\"red\").range_to(Color(\"green\"), 100))\n for t in tweets:\n print t\n score = t['score']\n colorscore = (score + 1) / 2 * 100\n color = colors[int(colorscore)]\n t['color'] = color\n\n return tweets", "title": "" }, { "docid": "a83e83c6638ca6599fcecc9e70087980", "score": "0.53994536", "text": "def cf(color, style='', background=''):\n\t_color = QtGui.QColor()\n\t_color.setNamedColor(color)\n\t\n\t_format = QtGui.QTextCharFormat()\n\t_format.setForeground(_color)\n\t\n\tif background:\n\t\t_bg = QtGui.QColor()\n\t\t_bg.setNamedColor(background)\n\t\t_format.setBackground(_bg)\n\t\n\tif 'bold' in style:\n\t\t_format.setFontWeight(QtGui.QFont.Bold)\n\tif 'italic' in style:\n\t\t_format.setFontItalic(True)\n\tif 'underline' in style:\n\t\t_format.setFontUnderline(True)\n\t\n\treturn _format", "title": "" }, { "docid": "ea57c6399b4002581b608f153c08fb64", "score": "0.5386154", "text": "def _color(self, item, lst, color='blue'):\n \n res = []\n for it in lst:\n if item == it:\n res.append(f'<text style=\"color:{color}\">{it}</text>')\n else:\n res.append(it)\n return res", "title": "" }, { "docid": "e8af63586ee2280b2d3d89a773a7222d", "score": "0.53771496", "text": "def test_write_colors3(self):\n\n self.styles.custom_colors = [\n \"FF792DC8\",\n \"FF646462\",\n \"FF5EA29C\",\n \"FF583AC6\",\n \"FFE31DAF\",\n \"FFA1A759\",\n \"FF600FF1\",\n \"FF0CF49C\",\n \"FFE3FA06\",\n \"FF913AC6\",\n \"FFB97847\",\n \"FFD97827\",\n ]\n\n self.styles._write_colors()\n\n exp = \"\"\"<colors><mruColors><color rgb=\"FFD97827\"/><color rgb=\"FFB97847\"/><color rgb=\"FF913AC6\"/><color rgb=\"FFE3FA06\"/><color rgb=\"FF0CF49C\"/><color rgb=\"FF600FF1\"/><color rgb=\"FFA1A759\"/><color rgb=\"FFE31DAF\"/><color rgb=\"FF583AC6\"/><color rgb=\"FF5EA29C\"/></mruColors></colors>\"\"\"\n got = self.fh.getvalue()\n\n self.assertEqual(got, exp)", "title": "" }, { "docid": "accee067aa3ce4d0dd91c4513cb4f523", "score": "0.5365377", "text": "def __str__(self):\n return '{:2d} [{}]: Blue:{},{},{} | Red:{},{},{}'.format(\n self.match_num,\n self.match_time,\n str(self.teams['B1']),\n str(self.teams['B2']),\n str(self.teams['B3']),\n str(self.teams['R1']),\n str(self.teams['R2']),\n str(self.teams['R3']))", "title": "" }, { "docid": "95908536e55571753411ebbdf4b6949d", "score": "0.5362116", "text": "def perform_additional_formatting(self, charter: LineChart) -> None:\n pass", "title": "" }, { "docid": "4a02c4c4dc11f0729df52b8f61608fb9", "score": "0.5341134", "text": "def format(self, records):\n rv = \"\"\n if len(records) > 0:\n rv = rv + self.formatHeader(records)\n for record in records:\n rv = rv + self.linefmt.format(record)\n rv = rv + self.formatFooter(records)\n return rv", "title": "" }, { "docid": "6329c6b60d2a2dce23f78fed17dad033", "score": "0.5339069", "text": "def print_format_table(string, style, fg, bg):\n if (0 <= style <= 8) and (30 <= fg <= 38) and (40 <= bg <= 48):\n s1 = ''\n color = ';'.join([str(style), str(fg), str(bg)])\n s1 += '\\x1b[%sm %s \\x1b[0m' % (color, string)\n print(s1, end='')", "title": "" }, { "docid": "4cc20d94f8ffeaa829280390f60e647c", "score": "0.5308231", "text": "def print_chart(self):\n for fg in range(0, 7):\n for bg in range(0, 7):\n for attr in sorted(self.attribute_table.values()):\n demo_color = Color(foreground=fg, background=bg, attribute=attr,\n bright_foreground=False, bright_background=False)\n print demo_color(\"Hello World!\"), repr(demo_color)\n demo_color.bright_foreground = True\n print demo_color(\"Hello World!\"), repr(demo_color)\n demo_color.bright_background = True\n print demo_color(\"Hello World!\"), repr(demo_color)", "title": "" }, { "docid": "7231ede4ccae553e05092349f53209b6", "score": "0.53072095", "text": "def color(self):\n return self[\"color\"]", "title": "" }, { "docid": "7231ede4ccae553e05092349f53209b6", "score": "0.53072095", "text": "def color(self):\n return self[\"color\"]", "title": "" }, { "docid": "7231ede4ccae553e05092349f53209b6", "score": "0.53072095", "text": "def color(self):\n return self[\"color\"]", "title": "" }, { "docid": "f6c59c87751b87ea22241a5c34328ed4", "score": "0.5306652", "text": "def test_write_colors1(self):\n\n self.styles.custom_colors = [\"FF26DA55\"]\n self.styles._write_colors()\n\n exp = \"\"\"<colors><mruColors><color rgb=\"FF26DA55\"/></mruColors></colors>\"\"\"\n got = self.fh.getvalue()\n\n self.assertEqual(got, exp)", "title": "" }, { "docid": "c11df9c7b40ad3cf1c2b6455c1a03d81", "score": "0.5302407", "text": "def define_color(way_id, congestion):\r\n if congestion.current_state == '0':\r\n return 'white'\r\n elif congestion.current_state == '1':\r\n return '#33ff7c'\r\n elif congestion.current_state == '2':\r\n return '#13d2bc'\r\n elif congestion.current_state == '3':\r\n return '#e9ff00'\r\n elif congestion.current_state == '4':\r\n return '#ff5500'\r\n elif congestion.current_state == '5':\r\n return '#ff0004'\r\n else:\r\n return 'black'", "title": "" }, { "docid": "b214a2c83368c36472970d1524a2b1e7", "score": "0.5300098", "text": "def _additional_formatting(self, line):\n return line", "title": "" }, { "docid": "f8a3a752935c2364eaafd57bca96b099", "score": "0.5298588", "text": "def test_write_colors2(self):\n\n self.styles.custom_colors = [\"FF26DA55\", \"FF792DC8\", \"FF646462\"]\n self.styles._write_colors()\n\n exp = \"\"\"<colors><mruColors><color rgb=\"FF646462\"/><color rgb=\"FF792DC8\"/><color rgb=\"FF26DA55\"/></mruColors></colors>\"\"\"\n got = self.fh.getvalue()\n\n self.assertEqual(got, exp)", "title": "" }, { "docid": "9c304a59329644432f61712dd9f08c98", "score": "0.52941513", "text": "def fmt_custom_order(self, fd, record):\n subresult = {}\n for full_name, field_type, summary in self.walk_fields(record):\n row = full_name.title() + u': ' + summary\n if '\\n' in summary:\n row = row + \"\\n\"\n subresult.setdefault(full_name + '_' + field_type, []).append(row)\n if len(subresult) > 0:\n output = []\n for field in self.fields:\n output.extend(subresult.get(field, []))\n fd.write(u'\\n'.join(output).encode('utf8'))", "title": "" }, { "docid": "dbbc5065907e43f9a207f011d140738d", "score": "0.5267226", "text": "def fmt_record_order(self, fd, record):\n subresult = []\n for full_name, field_type, summary in self.walk_fields(record):\n row = full_name.title() + u': ' + summary\n if '\\n' in summary:\n row = row + \"\\n\"\n subresult.append(row)\n if len(subresult) > 0:\n fd.write(u'\\n'.join(subresult).encode('utf8'))", "title": "" }, { "docid": "ad2b508520090e32d6312412269a7e13", "score": "0.52655625", "text": "def logColor ( color = True ) :\n return ColorLogging ( color )", "title": "" }, { "docid": "4f34a439db079b6304d41d7cda197139", "score": "0.52648586", "text": "def colorize_params(workbook, worksheet, df, column, dict_with_colors = InterventionLabels.INTERVENTION_LABELS_COLOR):\n col_index = list(df.columns).index(column)\n for intervention_label in dict_with_colors:\n worksheet.conditional_format(1, col_index, len(df), col_index, {\n 'type': 'cell',\n 'criteria': '==',\n 'value': '\"%s\"' % intervention_label,\n 'format': workbook.add_format({\n 'bg_color': dict_with_colors[intervention_label][0],\n 'font_color': dict_with_colors[intervention_label][1]\n })\n })", "title": "" }, { "docid": "e5f49288326742e67d520344e6fb0c3d", "score": "0.52616084", "text": "def getColorAtAddress(self,addr):\n return HopperLowLevel.colorAtAddress(self.__internal_document_addr__,addr)", "title": "" }, { "docid": "b393baf8ff157b7eca6743d1d49a7598", "score": "0.5251488", "text": "def __str__(self):\n return '{} ({})'.format(self.name, self.color)", "title": "" }, { "docid": "886fb7f6dac0eb8930e854ad2a685b1c", "score": "0.5242738", "text": "def print_device_formatted(record):\n record_lst = list(record)\n\n icmp_acc = str(record_lst[3])\n http_acc = str(record_lst[4])\n\n if icmp_acc == \"1\":\n icmp_acc = '\\33[33m' + \"online\" + '\\033[0m'\n elif icmp_acc == \"0\":\n icmp_acc = \"offline\"\n\n if http_acc == \"1\":\n http_acc = '\\033[1;34m' + \"up\" + '\\033[0m'\n elif http_acc == \"0\":\n http_acc = \"down\"\n\n print(str(record_lst[0]) + \"\\t\" + str(record_lst[1]) + \"\\t\" + str(record_lst[2]) + \"\\t\" + icmp_acc + \"\\t\" +\n http_acc)", "title": "" }, { "docid": "c7ce04da115eb057c74a6df60e0220e2", "score": "0.5241384", "text": "def _get_text_color(self, style):\r\n if style['color'] is not None:\r\n fill = '#' + style['color']\r\n else:\r\n fill = '#000'\r\n return fill", "title": "" }, { "docid": "c7ce04da115eb057c74a6df60e0220e2", "score": "0.5241384", "text": "def _get_text_color(self, style):\r\n if style['color'] is not None:\r\n fill = '#' + style['color']\r\n else:\r\n fill = '#000'\r\n return fill", "title": "" }, { "docid": "05802f4c97a0cd06bd932d6bd5ff6271", "score": "0.5238506", "text": "def add_format_field(self, chrom, coord, ref, alt, fname, fvalue):\n self.vcf[(chrom, coord, ref, alt)][8] = ':'.join([self.vcf[(chrom, coord, ref, alt)][8],\n fname])\n self.vcf[(chrom, coord, ref, alt)][9] = ':'.join([self.vcf[(chrom, coord, ref, alt)][9],\n str(fvalue)])", "title": "" }, { "docid": "b440105bdbf3e4ff7f2bb0ac08508a96", "score": "0.52340305", "text": "def color(name, std_pos, score, width):\n return (code_std_pos[std_pos] +\n code_score[score] +\n name.ljust(width) +\n RESET)", "title": "" }, { "docid": "68ae6bcf8650d38a34ccf8cd41a7772e", "score": "0.5216656", "text": "def format(self, record):\n message = logging.Formatter.format(self, record)\n check = \"\\n\".join([x for x in message.split(\"\\n\")])\n return check", "title": "" }, { "docid": "dc23587b3a9520812e765b9c0408a02d", "score": "0.52125394", "text": "def __recordFmt(self):\r\n if not self.numRecords:\r\n self.__dbfHeader()\r\n fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])\r\n fmtSize = calcsize(fmt)\r\n return (fmt, fmtSize)", "title": "" }, { "docid": "7bb4f35051d6542372efca71a0921fb0", "score": "0.5200798", "text": "def _parse_color(self, col):\n if re.match(\"#[0-9a-f]{6}\", col):\n return col\n else:\n return ManimColor(col).to_hex()", "title": "" }, { "docid": "7a04454d9f9d37c4143a5925a303848f", "score": "0.52005464", "text": "def _define_formats(self, workbook):\n self.format_bold = workbook.add_format({\"bold\": True})\n self.format_right = workbook.add_format({\"align\": \"right\"})\n self.format_left = workbook.add_format({\"align\": \"left\"})\n self.format_right_bold_italic = workbook.add_format(\n {\"align\": \"right\", \"bold\": True, \"italic\": True}\n )\n self.format_header_left = workbook.add_format(\n {\"bold\": True, \"border\": True, \"bg_color\": \"#FFFFCC\"}\n )\n self.format_header_center = workbook.add_format(\n {\"bold\": True, \"align\": \"center\", \"border\": True, \"bg_color\": \"#FFFFCC\"}\n )\n self.format_header_right = workbook.add_format(\n {\"bold\": True, \"align\": \"right\", \"border\": True, \"bg_color\": \"#FFFFCC\"}\n )\n # Company Amount Format\n self.format_header_amount_company = workbook.add_format(\n {\"bold\": True, \"border\": True, \"bg_color\": \"#FFFFCC\"}\n )\n currency_id = self.env[\"res.company\"]._get_user_currency()\n if not currency_id.decimal_places:\n company_amount_format = \"#,##0\"\n else:\n company_amount_format = \"#,##0.\" + \"0\" * currency_id.decimal_places\n self.format_header_amount_company.set_num_format(company_amount_format)\n self.format_amount_company = workbook.add_format()\n self.format_amount_company.set_num_format(company_amount_format)\n self.format_amount_company_bold = workbook.add_format({\"bold\": True})\n self.format_amount_company_bold.set_num_format(company_amount_format)\n # Percent Format\n self.format_percent_bold_italic = workbook.add_format(\n {\"bold\": True, \"italic\": True}\n )\n self.format_percent_bold_italic.set_num_format(\"#,##0.00%\")\n # Exchange rate Format\n self.format_exchange_rate = workbook.add_format()\n self.format_exchange_rate.set_num_format(\"#,##0.00000000\")\n # Standard amount Format\n self.format_amount = workbook.add_format()\n self.format_amount.set_num_format(\"#,##0\")\n # Quantity Format\n self.format_quantity = workbook.add_format()\n self.format_quantity.set_num_format(\"#,##0\")", "title": "" }, { "docid": "5dc877370d9fb071e530402085e74f7b", "score": "0.5195361", "text": "def style_field(token, field):\n s = StringIO()\n formatter.format(((token, field),), s)\n return s.getvalue()", "title": "" }, { "docid": "29b066b6dbfdb7c869031a2aadae6cf0", "score": "0.5193958", "text": "def OneColor(color):\n ser.write('\\r')\n for unused_row in range(8):\n ser.write(color * 8)\n ser.write('\\n')\n ser.flush()", "title": "" }, { "docid": "fa7d9bdeda7127f7c5b771a610a57fd9", "score": "0.5193706", "text": "def colorized_output(key, value):\n green_text = Fore.GREEN + \"{:<14}\".format(key)\n normal_text = Style.RESET_ALL + \": \" + str(value)\n return green_text + normal_text", "title": "" }, { "docid": "6f9bfa6b451bd1065e547b14a524c3a8", "score": "0.51914215", "text": "def __call__(self, index, s):\n if self.colorize:\n self._color_wrap(index, s)\n else:\n print(s)", "title": "" }, { "docid": "4bff9594ae835b7c9ace688e10ed10e9", "score": "0.5188814", "text": "def colorof(self):\n qtype = self.questtype[2]\n\n item = random.choice(list(self.data[qtype]))\n article = self.data[qtype][item][1]\n question = (\n f\"/me ▬W▬H▬A▬T▬S▬▬T▬H▬E▬▬C▬O▬L▬O▬R▬▬ :thinking: \"\n f\"What's the color of {article}{item}? :thinking: ▬W▬H▬A▬T▬S▬▬T▬H▬E▬▬C▬O▬L▬O▬R▬▬\"\n )\n answer = self.data[qtype][item][0]\n\n return {\"colorof\": {\"question\": question, \"answer\": answer}}", "title": "" }, { "docid": "2f652c31292f743e281e056b004bb0dc", "score": "0.5188091", "text": "def generate_color_text(prev_val, cur_val):\n delta = cur_val - prev_val\n if(delta >= 0):\n return (bcolors.OKGREEN + format(cur_val, \".3f\") + bcolors.ENDC)\n else:\n return (bcolors.FAIL + format(cur_val, \".3f\") + bcolors.ENDC)", "title": "" }, { "docid": "d42e7c387eb46bc8ca17b295300463d1", "score": "0.51852137", "text": "def init_colors(self):\n self.RED = '\\033[0;31m'\n self.GREEN= '\\033[0;32m'\n self.PURPLE='\\033[0;35m'\n self.NC = '\\033[0m'", "title": "" }, { "docid": "fd6e6ffe69acd0f21db90ac2450dbd7c", "score": "0.5180658", "text": "def color(letter):\n color_num=colordict[letter]\n return f\"\\x1b[{color_num};1;m\"", "title": "" }, { "docid": "8997258c2fd648c826cf80c6bee9641d", "score": "0.5174436", "text": "def set_colors(self, options):\n \n # set the color and border color for the track\n if 'color' in options:\n self.color = options['color'].replace('\"', '')\n else:\n # use a default color\n self.color = \"#E41A1C\"\n\n # some tracks allow different fill for fwd / rev strand\n if 'fwd_color' in options:\n self.fwd_color = options['fwd_color'].replace('\"', '')\n else:\n # use default color for fwd strand\n self.fwd_color = self.color\n\n if 'rev_color' in options:\n self.rev_color = options['rev_color'].replace('\"', '')\n else:\n self.rev_color = self.color\n \n\n # set colors for border\n if 'draw_border' in options:\n self.draw_border = self.parse_bool_str(options['draw_border'])\n else:\n self.draw_border = True\n \n if self.draw_border: \n if 'border_color' in options:\n self.border_color = options['border_color'].replace('\"', '')\n else:\n # use same color as fill\n self.border_color = self.color\n\n if 'fwd_border_color' in options:\n self.fwd_border_color = \\\n options['fwd_border_color'].replace('\"', '')\n else:\n self.fwd_border_color = self.fwd_color\n\n if 'rev_border_color' in options:\n self.rev_border_color = \\\n options['rev_border_color'].replace('\"', '')\n else:\n self.rev_border_color = self.rev_color\n else:\n # no not draw any border\n self.border_color = robjects.r(\"NA\")\n self.rev_border_color = robjects.r(\"NA\")\n self.fwd_border_color = robjects.r(\"NA\")", "title": "" }, { "docid": "623f6b537d5edb7202df99078d8f7839", "score": "0.51725346", "text": "def formatHeader(self, records):\n return \"\"", "title": "" }, { "docid": "07077dd822a122cc0c0016de4be96e97", "score": "0.51678747", "text": "def getColor(self):\n return str(self.color)", "title": "" }, { "docid": "42c2a062e68e5c62319a4e311a5872de", "score": "0.516046", "text": "def getColor(self):\r\n return (self.red,self.green,self.blue,self.alpha);", "title": "" } ]
b998911a668d0cd73cf46f4ea01aa101
Returns true if both objects are equal
[ { "docid": "f89ca6807840c0f7c2fecc9b650a234a", "score": "0.0", "text": "def __eq__(self, other):\n if not isinstance(other, Episode):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" } ]
[ { "docid": "35781205f766178c26df8024228c9541", "score": "0.8067084", "text": "def __eq__(self, other: object) -> bool:\n return self.__class__ == other.__class__ and self.points == other.points", "title": "" }, { "docid": "df1ca4bbc0e302958ca9db15d603d19b", "score": "0.7999691", "text": "def same_as(self, other):\n if not isinstance(other, ObjectBase):\n return False\n return self.__hash__() == other.__hash__()", "title": "" }, { "docid": "c2637b9fb3d16ba4f1c42499814598b1", "score": "0.7983334", "text": "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "4965a048c3f0895b76301f00b910c344", "score": "0.7967544", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "c86f650008ba8e5e6e01ad8684ee340b", "score": "0.79617655", "text": "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "title": "" }, { "docid": "c86f650008ba8e5e6e01ad8684ee340b", "score": "0.79617655", "text": "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "title": "" }, { "docid": "ac89811fd56352666e3c7503d26cad28", "score": "0.79458547", "text": "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d45b3e4d035cc6835b78e6627ba77db4", "score": "0.7932253", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "c9739d504a0b2dca72d9450ff9c75d8a", "score": "0.7919929", "text": "def __eq__(self, other):\n\t\tif isinstance(other, self.__class__):\n\t\t\treturn self.__dict__ == other.__dict__\n\t\treturn False", "title": "" }, { "docid": "a707a1d981633ccb072ff12903ae9be8", "score": "0.7914556", "text": "def __eq__(self, other) -> bool:\n pass", "title": "" }, { "docid": "c299befb6fc6d9fab25cb0fc72557080", "score": "0.7912908", "text": "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "title": "" }, { "docid": "017bbd144c42a3c4d56ebe865a8d33b9", "score": "0.7900343", "text": "def __eq__(self, other):\n if (self.a == other.a and self.b == other.b):\n return True\n else:\n return False", "title": "" }, { "docid": "f6ab7da5db29f84338fe5d8bb3881f9c", "score": "0.7882993", "text": "def __eq__(self, other):\n return self is other", "title": "" }, { "docid": "f6ab7da5db29f84338fe5d8bb3881f9c", "score": "0.7882993", "text": "def __eq__(self, other):\n return self is other", "title": "" }, { "docid": "e278c9358c932711655956e8c0dd4c9d", "score": "0.78820705", "text": "def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "907f63795b3673755f64eee8b00b403f", "score": "0.7865756", "text": "def __eq__(self, other):\n\t\tif isinstance(other, self.__class__):\n\t\t\treturn self.__dict__ == other.__dict__\n\t\telse:\n\t\t\treturn False", "title": "" }, { "docid": "539a55292a9df3a089913e1acf2c78d6", "score": "0.78603745", "text": "def __eq__(self, other):\n return self.model == other.model and self.data == other.data", "title": "" }, { "docid": "ad3c3d54def4ed89a3c8fad437e3643c", "score": "0.7858231", "text": "def __eq__(self, other):\n return isinstance(other, type(self)) and self.__dict__ == other.__dict__", "title": "" }, { "docid": "75ee8a75c6ed32d6a6b52d19d96bf570", "score": "0.78527725", "text": "def __eq__(self, other):\n return False", "title": "" }, { "docid": "fdb36f346cc1fdca4372feb54712d03f", "score": "0.78444034", "text": "def __eq__(self, other: object) -> bool:\n return (\n self.__class__ == other.__class__\n and self.latitude == other.latitude\n and self.longitude == other.longitude\n )", "title": "" }, { "docid": "ed7e4441cea7d07af7f5917d2bd36b49", "score": "0.7837964", "text": "def __eq__(self, other):\n for ii in self.__dict__:\n if not (self.__dict__[ii] == other.__dict__[ii]):\n return False\n return True", "title": "" }, { "docid": "da8f392a2c66fa01ec313d6e31d20f26", "score": "0.7803705", "text": "def __eq__(self, other):\n return self._object_type == other._object_type and self.name == other.name", "title": "" }, { "docid": "7b76837e3817eb70e2cd0c9d793e9cf0", "score": "0.7785871", "text": "def __eq__(self, other):\n return self == other", "title": "" }, { "docid": "8d6aadbfcd48e95f69d8454abecb36ec", "score": "0.7759563", "text": "def __eq__(self, other) -> bool:\n ...", "title": "" }, { "docid": "83e530b3a3009fb6e88e919c52aaa951", "score": "0.7751384", "text": "def equals(self, o: object) -> bool:\n ...", "title": "" }, { "docid": "7181b53325646f42b562ceee3ea4cb51", "score": "0.7746395", "text": "def same(self, other):\n return \\\n self.name == other.name and \\\n self.instance == other.instance and \\\n self.cvtype == other.cvtype", "title": "" }, { "docid": "5961b34485fd78642d3ac66d2e00b33f", "score": "0.77459407", "text": "def __eq__(self, obj):\n\t\treturn (self.points, self.initials) == (obj.points, obj.initials)", "title": "" }, { "docid": "139be5ca490b9553f8e7cf202b32fb04", "score": "0.77420056", "text": "def __eq__(self, other):\n for k in self.__dict__.keys():\n if k not in other.__dict__.keys():\n return False\n if getattr(self, k) != getattr(self, k):\n return False\n\n for k in other.__dict__.keys():\n if k not in self.__dict__.keys():\n return False\n if getattr(self, k) != getattr(self, k):\n return False\n\n return True", "title": "" }, { "docid": "8c88ebdb041b97d8298dfac3a420a677", "score": "0.7725468", "text": "def __eq__(self, other):\n if not isinstance(other, AclObject):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "15e9cc5622e684ce7b55b191d8052042", "score": "0.77219", "text": "def __eq__(self, other):\n if self == other:\n return True\n else:\n return False", "title": "" }, { "docid": "2ece111b8785bfdb703f4956f900a8d0", "score": "0.7709168", "text": "def __eq__(self, other):\n return type(other) == type(self)", "title": "" }, { "docid": "2ece111b8785bfdb703f4956f900a8d0", "score": "0.7709168", "text": "def __eq__(self, other):\n return type(other) == type(self)", "title": "" }, { "docid": "7275660fe6da9d2f3a7b29bfc361750a", "score": "0.77020013", "text": "def __eq__(self, other: Any) -> bool:\n return self._data == other._data", "title": "" }, { "docid": "91f1d694f4b1b343baa560fd4918536d", "score": "0.76938546", "text": "def __eq__(self, other):\n\n for attr in [\"jws_\", \"sha256_\", \"base64_\"]:\n if getattr(self, attr, None) != getattr(other, attr, None):\n return False\n if set(getattr(self, \"links_\", [])) != set(getattr(other, \"links_\", [])):\n return False\n return True", "title": "" }, { "docid": "0d912e560fdb7d9b2819a8dd9e36d456", "score": "0.76901186", "text": "def __eq__(self, other: 'DnsRecordsObjectResult') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d24ba3b097744baef6a44af0bc07189b", "score": "0.768716", "text": "def __eq__(other):", "title": "" } ]
aed1e738084454900d605240cb481d43
Sync a room for a client which is starting without any state
[ { "docid": "3cbcddfc577383c7fb2465514a7dfefb", "score": "0.0", "text": "def full_state_sync_for_archived_room(self, room_id, sync_config,\n leave_event_id, leave_token,\n timeline_since_token, tags_by_room):\n\n batch = yield self.load_filtered_recents(\n room_id, sync_config, leave_token, since_token=timeline_since_token\n )\n\n leave_state = yield self.store.get_state_for_event(leave_event_id)\n\n defer.returnValue(ArchivedSyncResult(\n room_id=room_id,\n timeline=batch,\n state=leave_state,\n account_data=self.account_data_for_room(\n room_id, tags_by_room\n ),\n ))", "title": "" } ]
[ { "docid": "9b07bce640e05bbb4999c959ad52528c", "score": "0.61042523", "text": "def test_incremental_sync(self) -> None:\n channel = self.make_request(\"GET\", \"/sync\", access_token=self.tok)\n self.assertEqual(channel.code, 200, channel.result)\n next_batch = channel.json_body[\"next_batch\"]\n\n self.helper.send(self.excluded_room_id, tok=self.tok)\n self.helper.send(self.included_room_id, tok=self.tok)\n\n channel = self.make_request(\n \"GET\",\n f\"/sync?since={next_batch}\",\n access_token=self.tok,\n )\n self.assertEqual(channel.code, 200, channel.result)\n\n self.assertNotIn(self.excluded_room_id, channel.json_body[\"rooms\"][\"join\"])\n self.assertIn(self.included_room_id, channel.json_body[\"rooms\"][\"join\"])", "title": "" }, { "docid": "18bbaf3630da66a54dd83f06f3f92d1c", "score": "0.60239524", "text": "def _update_client(self, _: Optional[Dict[str, Any]] = None) -> None:\n self.async_write_ha_state()", "title": "" }, { "docid": "77e9910d01f22c3f4ca877baf0acfce6", "score": "0.5994365", "text": "def sync():", "title": "" }, { "docid": "b0388f648e0d6a66ccb6f632ce43a6c8", "score": "0.59534687", "text": "def sync(self):\n self._device.CheckCall(self._BuildCommand('sync'))", "title": "" }, { "docid": "07868d6159df5bb5e6a4f8a4b4cf4ffe", "score": "0.5818409", "text": "def _sync(self, timeout_ms=30000):\n response = self.api.sync(self.sync_token, timeout_ms, filter=self.sync_filter)\n self.sync_token = response[\"next_batch\"]\n\n for presence_update in response['presence']['events']:\n for callback in self.presence_listeners.values():\n self.call(callback, presence_update)\n\n for room_id, invite_room in response['rooms']['invite'].items():\n for listener in self.invite_listeners:\n self.call(listener, room_id, invite_room['invite_state'])\n\n for room_id, left_room in response['rooms']['leave'].items():\n for listener in self.left_listeners:\n self.call(listener, room_id, left_room)\n if room_id in self.rooms:\n del self.rooms[room_id]\n\n for room_id, sync_room in response['rooms']['join'].items():\n if room_id not in self.rooms:\n self._mkroom(room_id)\n room = self.rooms[room_id]\n # TODO: the rest of this for loop should be in room object method\n room.prev_batch = sync_room[\"timeline\"][\"prev_batch\"]\n\n for event in sync_room[\"state\"][\"events\"]:\n event['room_id'] = room_id\n self.call(room._process_state_event, event)\n\n for event in sync_room[\"timeline\"][\"events\"]:\n event['room_id'] = room_id\n self.call(room._put_event, event)\n\n # TODO: global listeners can still exist but work by each\n # room.listeners[uuid] having reference to global listener\n\n # Dispatch for client (global) listeners\n for listener in self.listeners:\n if (\n listener['event_type'] is None or\n listener['event_type'] == event['type']\n ):\n self.call(listener['callback'], event)\n\n for event in sync_room['ephemeral']['events']:\n event['room_id'] = room_id\n self.call(room._put_ephemeral_event, event)\n\n for listener in self.ephemeral_listeners:\n if (\n listener['event_type'] is None or\n listener['event_type'] == event['type']\n ):\n self.call(listener['callback'], event)\n\n for event in sync_room['account_data']['events']:\n room.account_data[event['type']] = event['content']\n\n for event in response['account_data']['events']:\n self.account_data[event['type']] = event['content']", "title": "" }, { "docid": "92e0e1c36a51820b934e7b7c56bc4ad4", "score": "0.57885987", "text": "def sync(self):\n self.synced = True", "title": "" }, { "docid": "e73918b5bc3aa43985f5177095ae22e6", "score": "0.56940854", "text": "def sync(self):\n self.debug('Synchronizing clients')\n plist = self.getPlayerList(maxRetries=4)\n self.verbose2('plist: %s' % plist)\n mlist = {}\n\n for cid, c in plist.iteritems():\n client = self.clients.getByCID(cid)\n if client:\n self.verbose2('client found: %s' % client.name)\n if client.guid and 'guid' in c and not self.IpsOnly:\n if b3.functions.fuzzyGuidMatch(client.guid, c['guid']):\n # player matches\n self.debug('in-sync %s == %s', client.guid, c['guid'])\n mlist[str(cid)] = client\n else:\n self.debug('no-sync %s <> %s', client.guid, c['guid'])\n client.disconnect()\n elif client.ip and 'ip' in c:\n if client.ip == c['ip']:\n # player matches\n self.debug('in-sync %s == %s', client.ip, c['ip'])\n mlist[str(cid)] = client\n else:\n self.debug('no-sync %s <> %s', client.ip, c['ip'])\n client.disconnect()\n else:\n self.debug('no-sync: no guid or ip found')\n else:\n self.verbose2('no client found for cid: %s' % cid)\n \n return mlist", "title": "" }, { "docid": "16e8b92329a80d8ee32e71adf6f2f858", "score": "0.5647099", "text": "def sync():\n pass", "title": "" }, { "docid": "67ed3503835c5d7801972f80c079495f", "score": "0.5581597", "text": "def sync(self):\n plist = self.getPlayerList()\n mlist = dict()\n for cid, c in plist.iteritems():\n client = self.getByCidOrJoinPlayer(cid)\n if client:\n if client.guid and 'guid' in c.keys():\n if client.guid == c['guid']:\n # player matches\n self.debug('in-sync %s == %s', client.guid, c['guid'])\n mlist[str(cid)] = client\n else:\n self.debug('no-sync %s <> %s', client.guid, c['guid'])\n client.disconnect()\n elif client.ip and 'ip' in c.keys():\n if client.ip == c['ip']:\n # player matches\n self.debug('in-sync %s == %s', client.ip, c['ip'])\n mlist[str(cid)] = client\n else:\n self.debug('no-sync %s <> %s', client.ip, c['ip'])\n client.disconnect()\n else:\n self.debug('no-sync: no guid or ip found')\n \n return mlist", "title": "" }, { "docid": "673bffcb453dc91261f3b4dcf2e64eb0", "score": "0.5563498", "text": "def sync(self):\n pass", "title": "" }, { "docid": "d624f76faa44f4debc4e8f46d548c289", "score": "0.5550224", "text": "def client():\n print(\"CLIENT STARTED\", argv)\n client_path = argv[-2]\n server_path = argv[-1]\n sync_paths(client_path, server_path)\n try:\n while True:\n if path_content_to_string(client_path) != path_content_to_string(server_path):\n print(\"Client syncing...\")\n sync_paths(client_path, server_path)\n print(\"Client sleeping 1 second\")\n sleep(1.0)\n finally:\n print(\"CLIENT DONE\", argv)", "title": "" }, { "docid": "13825b2fbb897390fc0a687cc74d7d7b", "score": "0.5508697", "text": "def sync():\n settings = _load_settings()\n status = _load_settings(\".TDB\", check=[])\n settings.update(status)\n settings[\"database\"]\n client, msg = _connect(settings, new_db=False)\n click.echo(msg)\n click.echo(_sync(client))", "title": "" }, { "docid": "ddc23127af72e14f8101e49832da15ad", "score": "0.54933107", "text": "def _from_client_commands(self):\n while self._running:\n all_data = receive(self._from_client_connections.keys(), 0.1)\n\n for data in all_data:\n if data[\"type\"] == \"change\":\n self._paddles[data[\"sender\"]].update_location(\n data[\"change\"])", "title": "" }, { "docid": "fe77edd5a0fe8ef672d278c6f472464d", "score": "0.5472058", "text": "def send_sync(self) :\n self.send(Protocol.syc(\"socket\", \"1.1.1\").pack())", "title": "" }, { "docid": "128fc13c86af4b99bf4a54e3f94c0cda", "score": "0.5413396", "text": "def background_thread():\n count = 0\n while True:\n # count += 1\n # socketio.emit('my_response',\n # {'data': 'Server generated event', 'count': count},\n # namespace=namespace)\n for room in all_rooms.values():\n room.sync()\n socketio.sleep(3)", "title": "" }, { "docid": "b65ff4e7a16279a6e3c71a7321984bc5", "score": "0.541258", "text": "def test_user_with_no_rooms_receives_self_device_list_updates(self) -> None:\n device_id = \"TESTDEVICE\"\n\n # Register a user and login, creating a device\n self.user_id = self.register_user(\"kermit\", \"monkey\")\n self.tok = self.login(\"kermit\", \"monkey\", device_id=device_id)\n\n # Request an initial sync\n channel = self.make_request(\"GET\", \"/sync\", access_token=self.tok)\n self.assertEqual(channel.code, 200, channel.json_body)\n next_batch = channel.json_body[\"next_batch\"]\n\n # Now, make an incremental sync request.\n # It won't return until something has happened\n incremental_sync_channel = self.make_request(\n \"GET\",\n f\"/sync?since={next_batch}&timeout=30000\",\n access_token=self.tok,\n await_result=False,\n )\n\n # Change our device's display name\n channel = self.make_request(\n \"PUT\",\n f\"devices/{device_id}\",\n {\n \"display_name\": \"freeze ray\",\n },\n access_token=self.tok,\n )\n self.assertEqual(channel.code, 200, channel.json_body)\n\n # The sync should now have returned\n incremental_sync_channel.await_result(timeout_ms=20000)\n self.assertEqual(incremental_sync_channel.code, 200, channel.json_body)\n\n # We should have received notification that the (user's) device has changed\n device_list_changes = incremental_sync_channel.json_body.get(\n \"device_lists\", {}\n ).get(\"changed\", [])\n\n self.assertIn(\n self.user_id, device_list_changes, incremental_sync_channel.json_body\n )", "title": "" }, { "docid": "a8f49d8b21dfa53e69dc72d8ebf2869c", "score": "0.540039", "text": "def client():\n return LunoSyncClient()", "title": "" }, { "docid": "4708cee57f8aaae70e5d09db89eb6301", "score": "0.53896546", "text": "def open(self):\n self.client_id = uuid.uuid4().hex\n CLIENTS[self.client_id] = self\n [self.write_message(msg) for msg in CACHE]", "title": "" }, { "docid": "6b862e2566a3bc61ad1af675c30aab5d", "score": "0.53893983", "text": "def SeedServerStateOnClient(self, client_channel):\n print(f\"SeedServerStateOnClient({client_channel.id})\")\n for target_channel in self.clients.values():\n if target_channel.client_hash != client_channel.client_hash:\n for object_hash in target_channel.objects:\n client_channel.Send({\n \"action\": \"cloneobjectfromserver\",\n \"data\": target_channel.objects[object_hash]\n })\n print(f\" >> {target_channel.id} : {object_hash}\")\n # print(f\"seedserver sent cloneobjectfromserver \"\n # + f\"{target_channel.objects[object_hash]} to {client_channel.id}\")", "title": "" }, { "docid": "ebcef1ef603f30b086087a34c113a087", "score": "0.53885525", "text": "def sync_clients_save(sender, instance, **kwargs):\n channel_layer = get_channel_layer()\n async_to_sync(channel_layer.group_send)(\n \"sync_clients\", {\"type\": \"sync_clients.save\", \"model\": sender.__name__}\n )", "title": "" }, { "docid": "54ab3e394fa4034a973afd9a7a0319cd", "score": "0.53870213", "text": "def sync(self):\n cu_sync_device()", "title": "" }, { "docid": "26796fc310a405c43a9b0f121818330b", "score": "0.53840584", "text": "def pri_chat(s_send, username):\n \n uname = s_send.recv(1024).decode()\n\n if uname == 'DEL\\n': # client request to delete room\n s_send.send(b'OK')\n\n roomName = s_send.recv(1024).decode()\n s_send.send(b'OK')\n \n # send quit command to all clients in roomName\n r = rooms[roomName]\n for c in r:\n r[c].send(b'PRIVCHAT')\n r[c].recv(1024) # 'OK'\n r[c].send(b'QUIT')\n r[c].recv(1024) # 'OK'\n r[c].send(roomName.encode())\n r[c].recv(1024) # 'OK'\n rooms.pop(roomName)\n \n # remove all files belong to roomName\n for r, d, f in os.walk('uploads'):\n for i in f:\n room = i.split('+', 1)[0]\n if room == 'all':\n continue\n room = room.split(' ')\n room = room[0] + '\\n' + room[1]\n if room == roomName:\n os.remove('uploads/' + i)\n break\n return\n\n # create new room\n if not uname in online_users:\n s_send.send(b'NO')\n else:\n s_send.send(b'OK')\n\n # create room\n roomName = username + '\\n' + uname\n rooms[roomName] = {}\n rooms[roomName][username] = rooms['all'][username]\n rooms[roomName][uname] = rooms['all'][uname]\n\n # send command create room to all client in room\n r = rooms[roomName]\n for c in r:\n r[c].send(b'PRIVCHAT')\n r[c].recv(1024) # 'OK'\n r[c].send(roomName.encode() + b' @' + username.encode() + b' + @' + uname.encode())\n r[c].recv(1024) # 'OK'", "title": "" }, { "docid": "19595cde2466916c71e7f26e1236f9bb", "score": "0.5382695", "text": "def __init__(self):\n self.lock = asyncio.Lock()\n self.client_connections = dict()\n self.game = g.Game()", "title": "" }, { "docid": "efdf521aebdb491ac6db4689dab5e10e", "score": "0.5378586", "text": "def update(self, client: socket) -> None:\n client_nickname = \"\"\n while True:\n # decode the incoming message using utf-8\n message = client.recv(constants.BUFFER_SIZE).decode(constants.ENCODING)\n\n if message.startswith(\"[SENDTO:ALL]\") and client_nickname:\n self.send_to_clients(bytes(message.replace(\"[SENDTO:ALL]\", \"[SENDTO:ALL:\" + client_nickname + \"]\"), constants.ENCODING))\n continue\n elif message.startswith(\"[SENDTO\") and client_nickname:\n message_split = message.split(\"]\")\n name = message_split[0][1:].split(\":\")[1]\n for client_sock, client_name in self.connections.items():\n if name == client_name:\n client_sock.send(bytes(\"[SENDTO:\" + client_nickname + \"]=\" + message_split[1].split(\"=\")[1], constants.ENCODING))\n continue\n elif message.startswith(\"[JOINED]\"):\n client_nickname = message.split(\"=\")[1]\n current_clients = []\n [current_clients.append(client_name) for _, client_name in self.connections.items()]\n if client_nickname in current_clients:\n client.send(bytes(\"[DUPLICATE]\" + client_nickname, constants.ENCODING))\n continue\n time.sleep(0.1)\n self.connections[client] = client_nickname\n print(client_nickname + \" joined that chat!\")\n self.send_to_clients(bytes(\"[JOINED]=\" + client_nickname, constants.ENCODING))\n time.sleep(0.1)\n clients = []\n for _, client_name in self.connections.items():\n clients.append(client_name)\n self.send_to_clients(bytes(\"[CLIENTS]=\" + \"-\".join(clients), constants.ENCODING))\n continue\n elif message.startswith(\"[LEFT]\"):\n client.close()\n try:\n del self.connections[client]\n except KeyError:\n pass\n if len(client_nickname) != 0:\n print(client_nickname + \" left that chat!\")\n self.send_to_clients(bytes(\"[LEFT]=\" + client_nickname, constants.ENCODING))\n time.sleep(0.1)\n clients = []\n for _, client_name in self.connections.items():\n clients.append(client_name)\n self.send_to_clients(bytes(\"[CLIENTS]=\" + \"-\".join(clients), constants.ENCODING))\n break", "title": "" }, { "docid": "32477dbc02a4fd8b914aeba391da4241", "score": "0.5373981", "text": "def sync_workdir_from_guest(self):\n pass", "title": "" }, { "docid": "24f837691b68ab5bef996b06890572b1", "score": "0.53714913", "text": "def __sync():\n log.debug('getting in sync with LUA')\n self.__clear_buffers()\n try:\n self.__writeln('UUUUUUUUUUUU') # Send enough characters for auto-baud\n self.__clear_buffers()\n time.sleep(self.autobaud_time) # Wait for autobaud timer to expire\n self.__exchange(';') # Get a defined state\n self.__writeln('print(\"%sync%\");')\n self.__expect('%sync%\\r\\n> ')\n except CommunicationTimeout:\n raise DeviceNotFoundException('Device not found or wrong port')", "title": "" }, { "docid": "b79da3de9e01cbaa7f18501d85175fb4", "score": "0.537101", "text": "def refresh(self):\r\n self.update(self['__client__'].get(self['id']))", "title": "" }, { "docid": "8f6649f50890cb790c023c2fb653b6f9", "score": "0.5370351", "text": "async def vaillant_update(self):\n new_room: Room = self.hub.find_component(self._room)\n\n if new_room:\n _LOGGER.debug(\n \"New / old state: %s / %s\", new_room.child_lock, self._room.child_lock\n )\n else:\n _LOGGER.debug(\"Room %s doesn't exist anymore\", self._room.id)\n self._room = new_room", "title": "" }, { "docid": "eee982f3ec8f07d9c45cee31b2f9d0e2", "score": "0.5331417", "text": "def roomState(self, user):\n \n\n version = 1\n typeMessage = 4\n \n #to get the room of the client\n sessionToken_1 = self.usersession[user.userAddress][0]\n sessionToken_2 = self.usersession[user.userAddress][1]\n \n sequenceNumber = self.usersequenceNumber[user.userAddress][0]\n self.usersequenceNumber[user.userAddress][1] = 4\n room = user.userChatRoom\n \n \n if room == ROOM_IDS.MAIN_ROOM :\n \n #if the client in the main room , call the function messageMainroom()\n mainbuf = self.messageMainroom()\n roomListLength = len(self.serverProxy.getMovieList())\n #to get the data of the movie room\n roomListbuf = struct.pack('>H', roomListLength)\n for movie in self.serverProxy.getMovieList():\n roomListbuf = roomListbuf + self.messageMovieroom(movie.movieTitle)\n payloadSize = len(mainbuf) + len(roomListbuf)\n #pack the header\n headbuf = struct.pack('>BHBHH', version*16+typeMessage, sessionToken_1, sessionToken_2,\n sequenceNumber, payloadSize)\n #the whole packet\n buf = headbuf +mainbuf +roomListbuf\n \n #if the client in the movie room\n else :\n #to call the function messageMovieroom(room)\n moviebuf = self.messageMovieroom(room)\n payloadSize = len(moviebuf) \n #the header\n headbuf = struct.pack('>BHBHH', version*16+typeMessage, sessionToken_1, sessionToken_2,\n sequenceNumber, payloadSize)\n #the whole packet\n buf = headbuf +moviebuf\n \n # Stop sending more than three times\n if self.usercount[user.userAddress][0] ==3:\n print(\"Connection Lost\")\n if self.leave[user.userAddress] == 0:\n self.serverProxy.removeUser(user.userName)\n self.leave[user.userAddress] = 1\n for user in self.serverProxy.getUserList() :\n if (user.userChatRoom == ROOM_IDS.MAIN_ROOM or user.userChatRoom == room):\n self.roomState(user)\n \n else:\n #send the packet\n print('********Send RoomState***************')\n print(\"roomState :\", buf)\n# print('***********************')\n user.userChatInstance.transport.write(buf)\n # Here we consider retransmitting this message after timing 1s without receiving ack response. \n # Stop sending more than three times\n self.usercount[user.userAddress][0] +=1\n if self.usercount[user.userAddress][0] <= 3:\n self.send[user.userAddress] = [reactor.callLater(1, self.roomState, user)]", "title": "" }, { "docid": "dbd667ec2dbd712e7d6059a91e51623b", "score": "0.53257537", "text": "def sync_state(self):\n pass", "title": "" }, { "docid": "c03fd5f072f56f6f63ca47b63fa39890", "score": "0.5298573", "text": "def sync(self):\n self.sot.sync_from(self.network, diff_class=NetworkImporterDiff)", "title": "" }, { "docid": "c1804939a76345fdb971d8e45cef6982", "score": "0.5287648", "text": "def sync_workdir_to_guest(self):\n pass", "title": "" }, { "docid": "aa7d56dc5a31a9a32b792c1b9a7950b6", "score": "0.5284305", "text": "def test_receiving_local_device_list_changes(self) -> None:\n # Register two users\n test_device_id = \"TESTDEVICE\"\n alice_user_id = self.register_user(\"alice\", \"correcthorse\")\n alice_access_token = self.login(\n alice_user_id, \"correcthorse\", device_id=test_device_id\n )\n\n bob_user_id = self.register_user(\"bob\", \"ponyponypony\")\n bob_access_token = self.login(bob_user_id, \"ponyponypony\")\n\n # Create a room for them to coexist peacefully in\n new_room_id = self.helper.create_room_as(\n alice_user_id, is_public=True, tok=alice_access_token\n )\n self.assertIsNotNone(new_room_id)\n\n # Have Bob join the room\n self.helper.invite(\n new_room_id, alice_user_id, bob_user_id, tok=alice_access_token\n )\n self.helper.join(new_room_id, bob_user_id, tok=bob_access_token)\n\n # Now have Bob initiate an initial sync (in order to get a since token)\n channel = self.make_request(\n \"GET\",\n \"/sync\",\n access_token=bob_access_token,\n )\n self.assertEqual(channel.code, 200, channel.json_body)\n next_batch_token = channel.json_body[\"next_batch\"]\n\n # ...and then an incremental sync. This should block until the sync stream is woken up,\n # which we hope will happen as a result of Alice updating their device list.\n bob_sync_channel = self.make_request(\n \"GET\",\n f\"/sync?since={next_batch_token}&timeout=30000\",\n access_token=bob_access_token,\n # Start the request, then continue on.\n await_result=False,\n )\n\n # Have alice update their device list\n channel = self.make_request(\n \"PUT\",\n f\"/devices/{test_device_id}\",\n {\n \"display_name\": \"New Device Name\",\n },\n access_token=alice_access_token,\n )\n self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)\n\n # Check that bob's incremental sync contains the updated device list.\n # If not, the client would only receive the device list update on the\n # *next* sync.\n bob_sync_channel.await_result()\n self.assertEqual(bob_sync_channel.code, 200, bob_sync_channel.json_body)\n\n changed_device_lists = bob_sync_channel.json_body.get(\"device_lists\", {}).get(\n \"changed\", []\n )\n self.assertIn(alice_user_id, changed_device_lists, bob_sync_channel.json_body)", "title": "" }, { "docid": "0d546284fbd9db4f7f2d2f8987984821", "score": "0.5283178", "text": "def test_bin_enter_room(session):\n\n client_bin._on_data(smpacket.SMPacketClientNSSMONL(\n packet=smpacket.SMOPacketClientEnterRoom(enter=1, room=\"Room client-json\", password=\"password\")\n ).binary)\n\n room = session.query(models.Room).filter_by(name=\"Room client-json\").first()\n user1 = session.query(models.User).filter_by(name=\"clientbin-user1\").first()\n user2 = session.query(models.User).filter_by(name=\"clientbin-user2\").first()\n\n assert client_bin.room == room.id\n\n assert user1.room == room\n assert user2.room == room\n\n assert user1.level(room.id) == 1\n assert user2.level(room.id) == 1", "title": "" }, { "docid": "0fae9f3b314c8164142e4753ec1d2be6", "score": "0.5249324", "text": "def SendMazeDataToClient(self, client_channel):\n print(f\"SendMazeDataToClient({client_channel.id})\")\n client_hash = client_channel.client_hash\n client_channel.Send({\n \"action\": \"sendmazedatatoclient\",\n \"maze\": self.maze_rows\n })", "title": "" }, { "docid": "991fdb8042df46d95c68ad5a4d137c2c", "score": "0.52418274", "text": "def sync(self):\n\n sync_count = 4\n\n rtt = 999\n for i in range(sync_count):\n st1 = time.time()\n self.senderThread.send({'type': 'sync'})\n reply = self.syncQueue.get()\n st2 = time.time()\n new_rtt = st2 - st1\n if new_rtt < rtt:\n rtt = new_rtt\n self.clockOffset = reply['ct'] + rtt / 2 - st2\n\n print 'synchronized with client', self.id, ': clockOffset =',\\\n self.clockOffset", "title": "" }, { "docid": "dd748414da362133cd5d41060895911c", "score": "0.52354807", "text": "def unsync(self):\n self.request(\"sync -\")", "title": "" }, { "docid": "ab5253e23202244440d824154c61b19e", "score": "0.5229868", "text": "def setStartRoom(self, value): \n self.__startRoom = value", "title": "" }, { "docid": "1431cc525cbc29e4f49340033dc21545", "score": "0.5228958", "text": "def test_json_enter_room(session):\n\n client_json._on_data(smpacket.SMPacketClientNSSMONL(\n packet=smpacket.SMOPacketClientEnterRoom(enter=1, room=\"Room client-bin\", password=\"password\")\n ).json)\n\n room = session.query(models.Room).filter_by(name=\"Room client-bin\").first()\n user = session.query(models.User).filter_by(name=\"clientjson-user1\").first()\n\n assert client_json.room == room.id\n assert user.room == room\n assert user.level(room.id) == 1", "title": "" }, { "docid": "6dce936dea892167030b991746164bc6", "score": "0.52219886", "text": "def _synchronize(networked_peer):\n # TODO\n if networked_peer:\n pass", "title": "" }, { "docid": "f3b9709b753c66a72e7cb49f0dbfd84b", "score": "0.5206759", "text": "def start_in(self, room_name):\n self.initial_room_id = self.room_names_to_ids[room_name]\n return self", "title": "" }, { "docid": "2a9f378aa67893ce7bdd14c84c59b185", "score": "0.5203585", "text": "def sync(self):\n # cannot be implemented as we have no means of retrieving the current player list\n connected_clients = self.getPlayerList()\n for c in self.clients.getList():\n if c.cid not in connected_clients:\n c.disconnect()\n return connected_clients", "title": "" }, { "docid": "89785bb3822f546ca24fb785e88140f2", "score": "0.5180891", "text": "async def _sync_friends():\n global _SYNCED, _WATCHES\n expected = set([str(k, \"utf8\").lower() for k in _WATCHES.keys()])\n while True:\n actual = await _friends()\n if actual is None:\n raise eqcmd.CommandError(\"Failed to retrieve friends\")\n if actual == expected:\n _SYNCED = True\n return\n for person in expected.symmetric_difference(actual):\n if person in _ONLINE_STATES:\n del _ONLINE_STATES[person]\n await _toggle_friend(person)", "title": "" }, { "docid": "ff11c57d1ae4403d6d6a43eeab448114", "score": "0.51796305", "text": "def start(self, event):\n self.send_presence()\n self.get_roster()\n\n self.send_message(mto=self.recipient,\n mbody=self.msg,\n mtype='chat')\n\n # Using wait=True ensures that the send queue will be\n # emptied before ending the session.\n self.disconnect(wait=True)", "title": "" }, { "docid": "ff11c57d1ae4403d6d6a43eeab448114", "score": "0.51796305", "text": "def start(self, event):\n self.send_presence()\n self.get_roster()\n\n self.send_message(mto=self.recipient,\n mbody=self.msg,\n mtype='chat')\n\n # Using wait=True ensures that the send queue will be\n # emptied before ending the session.\n self.disconnect(wait=True)", "title": "" }, { "docid": "c94e606c10a4f53404b110f6e8ce15ee", "score": "0.51696575", "text": "def sync(self, sync_id: Optional[int] = None) -> \"Server\":\n if self._boot_status not in (BootStatus.BOOTING, BootStatus.ONLINE):\n raise ServerOffline\n Sync(\n sync_id=sync_id if sync_id is not None else self._get_next_sync_id()\n ).communicate(server=self)\n return self", "title": "" }, { "docid": "f1dfd874ee437576482d0a3084fdadc2", "score": "0.51671684", "text": "def client_thread(connection, address):\n connection.send(\"Chatroom instantiated.\")\n \n # Continue indefinitely.\n while True:\n try:\n message = connection.recv(2048)\n if message:\n \n broadcast_message = address[0] + \" : \" + message\n print(broadcast_message)\n broadcast(broadcast_message, connection)\n else:\n remove(connection)\n except:\n continue", "title": "" }, { "docid": "3963d121ff34c950c1ce18df6a000aef", "score": "0.51654917", "text": "def SyncWithPresenter(self):\n pass", "title": "" }, { "docid": "cf90cf581dd7a81bfd4b672bfc71b1f5", "score": "0.51643354", "text": "def test_not_receiving_local_device_list_changes(self) -> None:\n # Register two users\n test_device_id = \"TESTDEVICE\"\n alice_user_id = self.register_user(\"alice\", \"correcthorse\")\n alice_access_token = self.login(\n alice_user_id, \"correcthorse\", device_id=test_device_id\n )\n\n bob_user_id = self.register_user(\"bob\", \"ponyponypony\")\n bob_access_token = self.login(bob_user_id, \"ponyponypony\")\n\n # These users do not share a room. They are lonely.\n\n # Have Bob initiate an initial sync (in order to get a since token)\n channel = self.make_request(\n \"GET\",\n \"/sync\",\n access_token=bob_access_token,\n )\n self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)\n next_batch_token = channel.json_body[\"next_batch\"]\n\n # ...and then an incremental sync. This should block until the sync stream is woken up,\n # which we hope will happen as a result of Alice updating their device list.\n bob_sync_channel = self.make_request(\n \"GET\",\n f\"/sync?since={next_batch_token}&timeout=1000\",\n access_token=bob_access_token,\n # Start the request, then continue on.\n await_result=False,\n )\n\n # Have alice update their device list\n channel = self.make_request(\n \"PUT\",\n f\"/devices/{test_device_id}\",\n {\n \"display_name\": \"New Device Name\",\n },\n access_token=alice_access_token,\n )\n self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)\n\n # Check that bob's incremental sync does not contain the updated device list.\n bob_sync_channel.await_result()\n self.assertEqual(\n bob_sync_channel.code, HTTPStatus.OK, bob_sync_channel.json_body\n )\n\n changed_device_lists = bob_sync_channel.json_body.get(\"device_lists\", {}).get(\n \"changed\", []\n )\n self.assertNotIn(\n alice_user_id, changed_device_lists, bob_sync_channel.json_body\n )", "title": "" }, { "docid": "7598d7a7e07e1c6d02124efc3705fdd0", "score": "0.51580983", "text": "def _checkin_client(self, ts, client):\n # If the connection is now stale, don't return it to the pool.\n # Push an empty slot instead so that it will be refreshed when needed.\n if client.is_connected():\n now = int(time.time())\n if ts + self.timeout > now:\n self.clients.put((ts, client))\n else:\n if self.maxsize is not None:\n self.clients.put(EMPTY_SLOT)", "title": "" }, { "docid": "99b038b03062231ddeb4d68ab73a806e", "score": "0.51447207", "text": "async def sync(self):\n mime = 'setup.sync'\n await self.send_request(mime, block=False)", "title": "" }, { "docid": "d0bbf30e8fcf3d5f9d497207fda2a451", "score": "0.5141951", "text": "def resetclient():\n global _client\n _client = NullClient()", "title": "" }, { "docid": "c400ec9c3cc50f94f8749a82c09810ec", "score": "0.5131805", "text": "def update_clients(id, data):\n data['id'] = id\n socketio.emit('update_track', data, json=True)", "title": "" }, { "docid": "2e8c31191dc8de846924deb3f591a781", "score": "0.5116015", "text": "def start_clients():\n for client in CLIENTS:\n if client.running:\n continue\n \n Task(client.connect(), KOKORO)\n \n if (current_thread() is not KOKORO):\n KOKORO.wake_up()", "title": "" }, { "docid": "b2b4c6278547deb911006a4f9ca5c414", "score": "0.511151", "text": "async def sync_from_mudlet(self, *rooms, clear=False):\n db=self.db\n done = set()\n broken = set()\n todo = deque()\n explore = set()\n more = set()\n area_names = {int(k):v for k,v in (await self.mud.getAreaTableSwap())[0].items()}\n area_known = set()\n area_rev = {}\n for k,v in area_names.items():\n area_rev[v] = k\n self.__logger.debug(\"AREAS:%r\",area_names)\n await self.print(_(\"Start syncing. Please be patient.\"))\n\n if clear:\n r_old = {}\n for r in rooms:\n r_old[r.id_old] = id_mudlet\n db.q(db.Room).update().values(id_mudlet = None).execute()\n for r in rooms:\n r.id_mudlet = r_old[r.id_old]\n\n for r in rooms:\n todo.append(r)\n\n def know_area(self, ra):\n if isinstance(ra,str):\n ra = area_rev[area]\n if ra not in area_known:\n if db.q(db.Area).filter(db.Area.id == ra).one_or_none() is None:\n a = db.Area(id=ra,name=area_names[ra])\n db.add(a)\n db.commit()\n area_known.add(ra)\n return ra\n\n while todo:\n r = todo.pop()\n if r.id_old in done:\n continue\n done.add(r.id_old)\n if not (len(done)%100):\n await self.print(_(\"{done} rooms ...\"), done=len(done))\n\n try:\n y = await r.mud_exits\n except NoData:\n # didn't go there yet?\n self.__logger.debug(\"EXPLORE %s %s\",r.id_str,r.name)\n explore.add(r.id_old)\n continue\n\n # Iterate exits but do the \"standard\" directions first,\n # then the reversible nonstandard ways, then the others.\n def exits(y):\n for d,mid in y.items():\n if self.dr.is_std_dir(d):\n yield d,mid\n for d,mid in y.items():\n if not self.dr.is_std_dir(d) and self.dr.loc2rev(d) is not None:\n yield d,mid\n for d,mid in y.items():\n if not self.dr.is_std_dir(d) and self.dr.loc2rev(d) is None:\n yield d,mid\n\n for d,mid in exits(y):\n if not mid:\n continue # unknown\n try:\n x = r.exit_at(d)\n nr = x.dst\n except KeyError:\n x = None\n nr = None\n if nr is not None:\n continue\n\n try:\n if nr is None:\n nr = db.r_mudlet(mid)\n elif nr.id_mudlet is not None:\n continue\n except NoData:\n name = await self.mud.getRoomName(mid)\n name = self.dr.clean_shortname(name[0]) if name and name[0] else None\n gmcp = await self.mud.getRoomHashByID(mid)\n gmcp = gmcp[0] if gmcp and gmcp[0] else None\n\n nr = await self.new_room(name, id_gmcp=gmcp, id_mudlet=mid, offset_from=r, offset_dir=d, explode=self.conf['mudlet_explode'])\n\n if x is None:\n x,_xf = await r.set_exit(d,nr,skip_mud=True)\n elif x.dst is None:\n x.dst = nr\n todo.append(nr)\n db.commit()\n\n await self.print(_(\"Finished, {done} rooms processed\"), done=len(done))", "title": "" }, { "docid": "f7508219d8098a05aa12c6691cf3f63f", "score": "0.5110341", "text": "def on_pushButton_synchro_client_launch_clicked(self):\r\n # TODO: not implemented yet\r\n goslaunchera3.action.rsyncGos(self, \"@CLIENT\",self.label_synchro_client_state, self.progressBar_synchro_client_fichier,self.progressBar_synchro_client_global,self.label_synchro_client_debit,self.pushButton_synchro_client_launch)", "title": "" }, { "docid": "4c5fa05fea940fffa35d268fff8fd520", "score": "0.50803006", "text": "def sync(ctx, spec, dry_run):\n engine = ctx.obj[\"engine\"]\n do_sync(engine, spec, dry_run)", "title": "" }, { "docid": "1e01384d90bc68883ade07eb62c0810b", "score": "0.5068519", "text": "def start(self, event):\n self.get_roster()\n self.send_presence()\n self.plugin['xep_0045'].joinMUC(self.room,\n self.nick,\n # If a room password is needed, use:\n # password=the_room_password,\n wait=True)", "title": "" }, { "docid": "964c4c8aaaca293c8d12c62d6fed3f8e", "score": "0.506675", "text": "def updateClients(self, client):\n self.clients.append(client)", "title": "" }, { "docid": "a279078b5d9dab7c683e205980a60605", "score": "0.5065684", "text": "def sync(self):\n resp = self.action('sync')\n resp.raise_if_err()\n return resp", "title": "" }, { "docid": "c81533a8a811bc8a86056a63e755475b", "score": "0.5056151", "text": "def _sync(self, state):\n\n if self.software_only: return\n self.dispenser_select.sync(state)", "title": "" }, { "docid": "b5942542f0d3c015c759ab1bf98de66f", "score": "0.50473946", "text": "def room(self, room):\n\n self._room = room", "title": "" }, { "docid": "9348303c6c6258f7e62aee9db9a9f332", "score": "0.50335497", "text": "def reset_client(self):\n self.GAME_ON = False\n self.tcp_server_port = None\n self.server_ip = None", "title": "" }, { "docid": "fa9e191fc6fdeabc8a9ede53c89b4b56", "score": "0.5033359", "text": "def join(self, room):\n self.socket.rooms.add(self._get_room_name(room))", "title": "" }, { "docid": "fa9e191fc6fdeabc8a9ede53c89b4b56", "score": "0.5033359", "text": "def join(self, room):\n self.socket.rooms.add(self._get_room_name(room))", "title": "" }, { "docid": "649cb44f9c372790f1c2d1c0758992a8", "score": "0.50316966", "text": "def _sync(self, state):\r\n\r\n if self.software_only: return\r\n self.dispenser_select.sync(state)", "title": "" }, { "docid": "8581d8b16cf4d27adf49fb4e2e118238", "score": "0.50132996", "text": "def connected(client):\n with warnings.catch_warnings():\n # Ignore warning that \"ismaster\" is always routed to primary even\n # if client's read preference isn't PRIMARY.\n warnings.simplefilter(\"ignore\", UserWarning)\n client.admin.command('ismaster') # Force connection.\n\n return client", "title": "" }, { "docid": "dd31ce447fce85e124d0d22f1f4e9a05", "score": "0.5012992", "text": "def test_multiplayer_sync_response():\n test = DummyMultiplayerGame()\n messages = []\n\n for client in test.clients:\n for actor in client.actors:\n message = send_dummy_message(actor, response='sync')\n messages.append(message)\n\n # Make sure the message is handled like usual by the sender.\n\n for observer in client.observers:\n assert observer.dummy_messages_received == messages\n assert client.world.dummy_messages_executed == messages\n\n # Make sure the server relays the message to all the other \n # observers participating in the game.\n\n test.update()\n\n for observer in test.observers:\n assert observer.dummy_messages_received == messages\n for world in test.worlds:\n assert world.dummy_messages_executed == messages\n\n # Make sure the clients are instructed to sync up.\n\n for observer in test.client_observers:\n assert observer.dummy_sync_responses_received == messages\n for world in test.client_worlds:\n assert world.dummy_sync_responses_executed == messages", "title": "" }, { "docid": "039623ae0448a85b90eb0d21c5723984", "score": "0.49944133", "text": "async def sync(self, sync_id: Optional[int] = None) -> \"AsyncServer\":\n if self._boot_status not in (BootStatus.BOOTING, BootStatus.ONLINE):\n raise ServerOffline\n await Sync(\n sync_id=sync_id if sync_id is not None else self._get_next_sync_id()\n ).communicate_async(server=self)\n return self", "title": "" }, { "docid": "2bd923a10f00b7a03f173339c43cd2e3", "score": "0.4993173", "text": "def start(self, event):\n\t\tself.get_roster()\n\t\tself.send_presence()\n\t\tself.plugin['xep_0045'].joinMUC(self.room,\n\t\t\tself.nick,\n\t\t\t# If a room password is needed, use:\n\t\t\t# password=the_room_password,\n\t\t\twait=True)", "title": "" }, { "docid": "cb5bd70f6dfeee97a127ba093c0a42e3", "score": "0.49927256", "text": "def EnterRoom(clientid, roomname):\n try:\n room = objmgr.GetRoomByName(roomname)\n except KeyError:\n return Response(clientid, 'Invalid room name: %s' % (roomname,))\n\n player = objmgr.GetPlayerByClientId(clientid)\n room.EnterRoom(player)\n\n return Response(clientid, 'You are now in %s' % (room.name,))", "title": "" }, { "docid": "a485d26451f23b3ab0f50694db8d39e4", "score": "0.49908218", "text": "def incremental_sync_with_gap(self, sync_config, since_token):\n now_token = yield self.event_sources.get_current_token()\n\n rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string())\n room_ids = [room.room_id for room in rooms]\n\n presence_source = self.event_sources.sources[\"presence\"]\n presence, presence_key = yield presence_source.get_new_events(\n user=sync_config.user,\n from_key=since_token.presence_key,\n limit=sync_config.filter.presence_limit(),\n room_ids=room_ids,\n # /sync doesn't support guest access, they can't get to this point in code\n is_guest=False,\n )\n now_token = now_token.copy_and_replace(\"presence_key\", presence_key)\n\n now_token, ephemeral_by_room = yield self.ephemeral_by_room(\n sync_config, now_token, since_token\n )\n\n rm_handler = self.hs.get_handlers().room_member_handler\n app_service = yield self.store.get_app_service_by_user_id(\n sync_config.user.to_string()\n )\n if app_service:\n rooms = yield self.store.get_app_service_rooms(app_service)\n joined_room_ids = set(r.room_id for r in rooms)\n else:\n joined_room_ids = yield rm_handler.get_joined_rooms_for_user(\n sync_config.user\n )\n\n timeline_limit = sync_config.filter.timeline_limit()\n\n room_events, _ = yield self.store.get_room_events_stream(\n sync_config.user.to_string(),\n from_key=since_token.room_key,\n to_key=now_token.room_key,\n limit=timeline_limit + 1,\n )\n\n tags_by_room = yield self.store.get_updated_tags(\n sync_config.user.to_string(),\n since_token.account_data_key,\n )\n\n joined = []\n archived = []\n if len(room_events) <= timeline_limit:\n # There is no gap in any of the rooms. Therefore we can just\n # partition the new events by room and return them.\n logger.debug(\"Got %i events for incremental sync - not limited\",\n len(room_events))\n\n invite_events = []\n leave_events = []\n events_by_room_id = {}\n for event in room_events:\n events_by_room_id.setdefault(event.room_id, []).append(event)\n if event.room_id not in joined_room_ids:\n if (event.type == EventTypes.Member\n and event.state_key == sync_config.user.to_string()):\n if event.membership == Membership.INVITE:\n invite_events.append(event)\n elif event.membership in (Membership.LEAVE, Membership.BAN):\n leave_events.append(event)\n\n for room_id in joined_room_ids:\n recents = events_by_room_id.get(room_id, [])\n logger.debug(\"Events for room %s: %r\", room_id, recents)\n state = {\n (event.type, event.state_key): event\n for event in recents if event.is_state()}\n limited = False\n\n if recents:\n prev_batch = now_token.copy_and_replace(\n \"room_key\", recents[0].internal_metadata.before\n )\n else:\n prev_batch = now_token\n\n just_joined = yield self.check_joined_room(sync_config, state)\n if just_joined:\n logger.debug(\"User has just joined %s: needs full state\",\n room_id)\n state = yield self.get_state_at(room_id, now_token)\n # the timeline is inherently limited if we've just joined\n limited = True\n\n room_sync = JoinedSyncResult(\n room_id=room_id,\n timeline=TimelineBatch(\n events=recents,\n prev_batch=prev_batch,\n limited=limited,\n ),\n state=state,\n ephemeral=ephemeral_by_room.get(room_id, []),\n account_data=self.account_data_for_room(\n room_id, tags_by_room\n ),\n )\n logger.debug(\"Result for room %s: %r\", room_id, room_sync)\n\n if room_sync:\n joined.append(room_sync)\n\n else:\n logger.debug(\"Got %i events for incremental sync - hit limit\",\n len(room_events))\n\n invite_events = yield self.store.get_invites_for_user(\n sync_config.user.to_string()\n )\n\n leave_events = yield self.store.get_leave_and_ban_events_for_user(\n sync_config.user.to_string()\n )\n\n for room_id in joined_room_ids:\n room_sync = yield self.incremental_sync_with_gap_for_room(\n room_id, sync_config, since_token, now_token,\n ephemeral_by_room, tags_by_room\n )\n if room_sync:\n joined.append(room_sync)\n\n for leave_event in leave_events:\n room_sync = yield self.incremental_sync_for_archived_room(\n sync_config, leave_event, since_token, tags_by_room\n )\n archived.append(room_sync)\n\n invited = [\n InvitedSyncResult(room_id=event.room_id, invite=event)\n for event in invite_events\n ]\n\n defer.returnValue(SyncResult(\n presence=presence,\n joined=joined,\n invited=invited,\n archived=archived,\n next_batch=now_token,\n ))", "title": "" }, { "docid": "28714f8887493fbb2141ce720979c262", "score": "0.49907303", "text": "def handle_client_connect_event():\n # when the client connects, send all existing data\n print('dashboard client connected, sending any existing data...')\n socketio.emit('existingData', existingData)\n global thread", "title": "" }, { "docid": "5461ec7ce6982a7c3855f282cfdc89c6", "score": "0.4987587", "text": "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n \r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "title": "" }, { "docid": "0b164f74c505c12292147abc7c6f4357", "score": "0.49763384", "text": "def sync(self) -> None:\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing session value at each run.\n #\n # Example: session.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "title": "" }, { "docid": "030d20a4d8ceadf744091ffdb75b472c", "score": "0.49734926", "text": "def enterRoom(self):\n\n\n\t\t# Sets the room = player x,y location\n\t\tif (self.world.tile_exists(self.player.location_x, self.player.location_y)):\n\t\t\tself.room = self.world.tile_exists(self.player.location_x, self.player.location_y)\n\n\t\telse:\n\t\t\tself.world.generate_world(self.player.location_x, self.player.location_y)\n\t\t\tself.room = self.world.tile_exists(self.player.location_x, self.player.location_y)\n\n\t\t\n\n\t\tavailable_actions = self.room.available_actions()", "title": "" }, { "docid": "a1b3ca2cffe0143a985f3050c242d978", "score": "0.49679375", "text": "def runclient():\n global mcast\n mcast = McastClient(MCAST_PORT,MCAST_ADDR)\n\n while 1:\n mcast.send(\"5+5\")\n time.sleep(5)", "title": "" }, { "docid": "29e3cac73dfc82940258c2f767eb4811", "score": "0.49630406", "text": "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n \n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "title": "" }, { "docid": "18ef190a286d00ff1dc973955d8f9fdb", "score": "0.49612492", "text": "async def vaillant_update(self):\n new_room: Room = self.hub.find_component(self.room)\n new_device: Device = self._find_device(new_room, self.device.sgtin)\n\n if new_room:\n if new_device:\n _LOGGER.debug(\n \"New / old state: %s / %s\",\n new_device.battery_low,\n self.device.battery_low,\n )\n else:\n _LOGGER.debug(\"Device %s doesn't exist anymore\", self.device.sgtin)\n else:\n _LOGGER.debug(\"Room %s doesn't exist anymore\", self.room.id)\n self.room = new_room\n self.device = new_device", "title": "" }, { "docid": "b37729195b22d51fd9f1aa3fa1f8bab8", "score": "0.49547064", "text": "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None)", "title": "" }, { "docid": "1962cc15abac4892272ca9a654276139", "score": "0.49545133", "text": "def join_room_socketio(data):\n challenge = db.session.query(Challenge).get(data[\"challenge_id\"])\n if challenge:\n chat_room = challenge.chat_room\n join_room(chat_room.room_id)\n history = chat_room.get_history()\n emit(\"join_room\", {\"history\": history})", "title": "" }, { "docid": "44742eb52d96d6d533c77d679e84613d", "score": "0.49509966", "text": "def joinRoom(self,data):\n roomId, = struct.unpack('>H', data[8 : 10])\n usernow = self.serverProxy.getUserByName(self.userName)\n \n # room id is wrong\n if roomId not in self.roomID:\n self.roomState(usernow)\n else: \n #the client joins to main room\n if roomId == 1 :\n self.serverProxy.stopStreamingMovie(usernow.userChatRoom)\n userPastroom = usernow.userChatRoom\n self.serverProxy.updateUserChatroom(usernow.userName, ROOM_IDS.MAIN_ROOM)\n for user in self.serverProxy.getUserList():\n if (user.userChatRoom == ROOM_IDS.MAIN_ROOM or user.userChatRoom == userPastroom):\n self.roomState(user)\n #the client joins to movie room\n else :\n for movie in self.serverProxy.getMovieList() :\n if movie.movieId == roomId :\n self.serverProxy.startStreamingMovie(movie.movieTitle)\n self.serverProxy.updateUserChatroom(usernow.userName, movie.movieTitle)\n for user in self.serverProxy.getUserList():\n if (user.userChatRoom == movie.movieTitle or user.userChatRoom == ROOM_IDS.MAIN_ROOM): \n self.roomState(user)", "title": "" }, { "docid": "4f28335f3f9c9246c282b31529135bbb", "score": "0.49460658", "text": "def sync(path):\r\n path = path.rstrip('\\\\').rstrip('/')\r\n click.confirm(f'This will DELETE remote data that is not present.\\nAre you sure you want to sync {path}?', abort=True)\r\n \r\n try:\r\n coach = get_coach()\r\n coach.sync_local(path)\r\n except Exception as e:\r\n click.echo(e)", "title": "" }, { "docid": "8600b3b6b671d5f8c6fe9a332524d7bf", "score": "0.4940865", "text": "def sequence_master_sync(self, c, sync=None):\n if sync is None:\n sync = c['master_sync']\n else:\n c['master_sync'] = sync\n return sync", "title": "" }, { "docid": "00a6ae505c43959042b7029cf7070f45", "score": "0.49383396", "text": "def save(self, *args, **kwargs):\n # Check if changes have been done to the room itself\n if self._room != self.room:\n self.notify_room()\n super(Player, self).save(*args, **kwargs)", "title": "" }, { "docid": "2c1c6ba7b5c525949a38d88d31bac190", "score": "0.49371472", "text": "def sync(self):\n odir=self.get_temp_dir()\n if not os.path.exists(odir):\n os.makedirs(odir)\n\n remote_url=self.get_remote_coadd_file('g')\n remote_url = remote_url.replace('_g','{_g,_r,_i}')\n cmd = r\"\"\"\n rsync \\\n -aP \\\n --password-file $DES_RSYNC_PASSFILE \\\n %(remote_url)s \\\n %(local_dir)s/\n \"\"\" % dict(\n remote_url=remote_url,\n local_dir=odir,\n )\n\n print(cmd)\n subprocess.check_call(cmd,shell=True)", "title": "" }, { "docid": "81bd268d8f30368faf706b54739698e5", "score": "0.49305964", "text": "def start(self, event):\n debugger_print(\"session_start was received; self.start was called\")\n self.send_presence()\n self.get_roster()\n \"\"\" self.get_roster(callback=self.print_roster)\n debugger_print(\"CLIENT ROSTER:\")\n debugger_print(self.client_roster) \"\"\"", "title": "" }, { "docid": "dcf800e50654da7df8c999ad2a2bd880", "score": "0.49289438", "text": "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "title": "" }, { "docid": "dfd1220be4b617973ca1e0634bd58af8", "score": "0.49222025", "text": "def enqueue_state(self, lobby: bool = True) -> None:\n if not self.chat:\n point_of_interest()\n\n # TODO: hmm this is pretty bad, writes twice\n\n # send password only to users currently in the match.\n self.chat.enqueue(packets.updateMatch(self, send_pw=True))\n\n if lobby and (lchan := glob.channels['#lobby']) and lchan.players:\n lchan.enqueue(packets.updateMatch(self, send_pw=False))", "title": "" }, { "docid": "d7d1ad57f6b93521cfbc13977fabbdc5", "score": "0.492037", "text": "def do_green_room(self):\n if self._is_client_owner:\n if self.privacy_settings.set_greenroom():\n self.send_bot_msg('*Green room is enabled.*')\n self.rtmp_parameter['greenroom'] = True\n else:\n self.send_bot_msg('*Green room is disabled.*')\n self.rtmp_parameter['greenroom'] = False", "title": "" }, { "docid": "7ca106992aec42bd315c2bc298b0acbb", "score": "0.4911232", "text": "def send_update_game(self):\n game = Game.objects.get_or_none(id=self.id)\n if not game:\n return\n current_player = game.get_current_player()\n if not current_player:\n current_player = \"\"\n current_player_order = 0\n else:\n current_player_order = current_player.order\n current_player = current_player.user.username\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n \"type\": \"update_game\",\n \"game\": game.as_json(),\n \"current_player\": current_player,\n \"current_player_order\": current_player_order,\n }\n )", "title": "" }, { "docid": "50c51163b454055008f8ba32f5ababd1", "score": "0.49042723", "text": "def cmd_sync(args):\n\n if args.get(0):\n # Optional branch specifier.\n branch = fuzzy_match_branch(args.get(0))\n if branch:\n is_external = True\n original_branch = get_current_branch_name()\n else:\n print(\"{0} doesn't exist. Use a branch that does.\".format(\n colored.yellow(args.get(0))))\n sys.exit(1)\n else:\n # Sync current branch.\n branch = get_current_branch_name()\n is_external = False\n\n if branch in get_branch_names(local=False):\n\n if is_external:\n switch_to(branch)\n\n if repo.is_dirty():\n status_log(stash_it, 'Saving local changes.', sync=True)\n\n status_log(smart_pull, 'Pulling commits from the server.')\n status_log(push, 'Pushing commits to the server.', branch)\n\n if unstash_index(sync=True):\n status_log(unstash_it, 'Restoring local changes.', sync=True)\n\n if is_external:\n switch_to(original_branch)\n\n else:\n print('{0} has not been published yet.'.format(\n colored.yellow(branch)))\n sys.exit(1)", "title": "" }, { "docid": "3a565dabc3ef2326732058946c1a71cf", "score": "0.48988125", "text": "def updateclientmodels(sock, updatedweights):\n client.load_state_dict(updatedweights)\n for clientss in connectedclients:\n try:\n if clientss != sock:\n send_msg(clientss, updatedweights)\n except:\n pass", "title": "" }, { "docid": "c64aa9141bbb42188fba31680647f721", "score": "0.48850664", "text": "async def _sync_db():\n global _WATCHES, _ONLINE_STATES\n while True:\n _WATCHES.close()\n _WATCHES = dbm.open(os.path.join(os.environ[\"HOME\"], \"r99infowatches.db\"), \"c\")\n _ONLINE_STATES.close()\n _ONLINE_STATES = shelve.open(os.path.join(os.environ[\"HOME\"], \"r99infoonline.db\"), \"c\", 2)\n await asyncio.sleep(5)", "title": "" }, { "docid": "dc0702450078db1cdaecef1e3e76a382", "score": "0.48746255", "text": "def sync(self):\n self.syncing_function = self.stim_syncer.sync(self.bcontrol_loader,\n self.raw_data_loader)", "title": "" }, { "docid": "f4b79856311e39cd4fcacfa7502c89ef", "score": "0.48694023", "text": "async def __aenter__(self):\n client_id = self.client_id\n logger.info(\"OPEN %s %s\", self.ip_address, client_id)\n\n server = self.server\n queue = asyncio.Queue(loop=server.loop)\n\n for client in server.clients.values():\n await queue.put(Event(client.data, event_type=\"created\"))\n\n self.queue = queue\n server.clients[client_id] = self\n await server.add_event(Event(self.data, event_type=\"created\"))\n\n return self", "title": "" }, { "docid": "a4ecc93cdacc00ac478770a2c6789685", "score": "0.48664892", "text": "def incremental_sync_with_gap_for_room(self, room_id, sync_config,\n since_token, now_token,\n ephemeral_by_room, tags_by_room):\n logger.debug(\"Doing incremental sync for room %s between %s and %s\",\n room_id, since_token, now_token)\n\n # TODO(mjark): Check for redactions we might have missed.\n\n batch = yield self.load_filtered_recents(\n room_id, sync_config, now_token, since_token,\n )\n\n logging.debug(\"Recents %r\", batch)\n\n current_state = yield self.get_state_at(room_id, now_token)\n\n state_at_previous_sync = yield self.get_state_at(\n room_id, stream_position=since_token\n )\n\n state = yield self.compute_state_delta(\n since_token=since_token,\n previous_state=state_at_previous_sync,\n current_state=current_state,\n )\n\n just_joined = yield self.check_joined_room(sync_config, state)\n if just_joined:\n state = yield self.get_state_at(room_id, now_token)\n\n room_sync = JoinedSyncResult(\n room_id=room_id,\n timeline=batch,\n state=state,\n ephemeral=ephemeral_by_room.get(room_id, []),\n account_data=self.account_data_for_room(\n room_id, tags_by_room\n ),\n )\n\n logging.debug(\"Room sync: %r\", room_sync)\n\n defer.returnValue(room_sync)", "title": "" }, { "docid": "40777ae1de874dee374bb0a56ab3ffa2", "score": "0.48618433", "text": "def test_sync_backwards_typing(self) -> None:\n typing_url = \"/rooms/%s/typing/%s?access_token=%s\"\n sync_url = \"/sync?timeout=3000000&access_token=%s&since=%s\"\n\n # Register the user who gets notified\n user_id = self.register_user(\"user\", \"pass\")\n access_token = self.login(\"user\", \"pass\")\n\n # Register the user who sends the message\n other_user_id = self.register_user(\"otheruser\", \"pass\")\n other_access_token = self.login(\"otheruser\", \"pass\")\n\n # Create a room\n room = self.helper.create_room_as(user_id, tok=access_token)\n\n # Invite the other person\n self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)\n\n # The other user joins\n self.helper.join(room=room, user=other_user_id, tok=other_access_token)\n\n # The other user sends some messages\n self.helper.send(room, body=\"Hi!\", tok=other_access_token)\n self.helper.send(room, body=\"There!\", tok=other_access_token)\n\n # Start typing.\n channel = self.make_request(\n \"PUT\",\n typing_url % (room, other_user_id, other_access_token),\n b'{\"typing\": true, \"timeout\": 30000}',\n )\n self.assertEqual(200, channel.code)\n\n channel = self.make_request(\"GET\", \"/sync?access_token=%s\" % (access_token,))\n self.assertEqual(200, channel.code)\n next_batch = channel.json_body[\"next_batch\"]\n\n # Stop typing.\n channel = self.make_request(\n \"PUT\",\n typing_url % (room, other_user_id, other_access_token),\n b'{\"typing\": false}',\n )\n self.assertEqual(200, channel.code)\n\n # Start typing.\n channel = self.make_request(\n \"PUT\",\n typing_url % (room, other_user_id, other_access_token),\n b'{\"typing\": true, \"timeout\": 30000}',\n )\n self.assertEqual(200, channel.code)\n\n # Should return immediately\n channel = self.make_request(\"GET\", sync_url % (access_token, next_batch))\n self.assertEqual(200, channel.code)\n next_batch = channel.json_body[\"next_batch\"]\n\n # Reset typing serial back to 0, as if the master had.\n typing = self.hs.get_typing_handler()\n typing._latest_room_serial = 0\n\n # Since it checks the state token, we need some state to update to\n # invalidate the stream token.\n self.helper.send(room, body=\"There!\", tok=other_access_token)\n\n channel = self.make_request(\"GET\", sync_url % (access_token, next_batch))\n self.assertEqual(200, channel.code)\n next_batch = channel.json_body[\"next_batch\"]\n\n # This should time out! But it does not, because our stream token is\n # ahead, and therefore it's saying the typing (that we've actually\n # already seen) is new, since it's got a token above our new, now-reset\n # stream token.\n channel = self.make_request(\"GET\", sync_url % (access_token, next_batch))\n self.assertEqual(200, channel.code)\n next_batch = channel.json_body[\"next_batch\"]\n\n # Clear the typing information, so that it doesn't think everything is\n # in the future.\n typing._reset()\n\n # Now it SHOULD fail as it never completes!\n with self.assertRaises(TimedOutException):\n self.make_request(\"GET\", sync_url % (access_token, next_batch))", "title": "" }, { "docid": "929b045475245ddd3763a313623a8404", "score": "0.4861605", "text": "def __init__(self,room):\r\n self.move_to(room)", "title": "" }, { "docid": "30d2b587275ae879c9d55be99d478047", "score": "0.48561555", "text": "async def getCompactRoom(self, room_id: str) -> Room:\n\t\treturn await self.call(\"getCompactRoom\", room_id)", "title": "" }, { "docid": "e6ec225892dbc19c52d9a97142ca091a", "score": "0.48492607", "text": "def reqsync(votername, config):\n\t\n\tprint(\"sync\")\n\tprint(votername)", "title": "" } ]
0a48dcc0e5d8acc80bdcf94d7eb2230f
Return only compounds with no RO5 violations
[ { "docid": "3e3ac8481a00d1140725d7ed8624fbd8", "score": "0.71112686", "text": "def filter_ro5(mols):\n return mols.filter(molecule_properties__num_ro5_violations=0)", "title": "" } ]
[ { "docid": "ab98fc705bde3128e358773730bc475c", "score": "0.56570566", "text": "def find_redundant_compounds(self): \n\n stoich = self.stoichiometry\n\n # Transposing, to use the qr algorithm\n stoich_t = stoich.T\n n_rows = stoich_t.shape[0]\n n_cols = stoich_t.shape[1]\n \n if n_cols > n_rows:\n zeros = np.zeros((n_cols - n_rows, n_cols))\n padded = np.vstack((stoich_t, zeros))\n else:\n zeros = np.zeros((n_rows, n_rows - n_cols))\n padded = np.hstack((stoich_t, zeros))\n \n Q, R, P = qr(padded, pivoting=True)\n dependent = P[np.abs(R.diagonal()) < 1e-10]\n\n # Trimming padded values\n redundant = stoich.index[[i for i in dependent if i < n_cols]]\n\n msg = str(len(redundant)) + ' compounds identified as redundant'\n omfa.logger.info(msg)\n for i in range(len(redundant)):\n omfa.logger.info('\\t' + str(i+1) + ' ' + redundant[i])\n\n return(redundant)", "title": "" }, { "docid": "12450f44c51ad811c8dc97c57c31b3e9", "score": "0.5574998", "text": "def missed_ground_truth(self):\n return [box for box in self.ground_truth\n if box[4] < self.confidence_threshold]", "title": "" }, { "docid": "a70b8ae2aedd3e86cb1151a90a79af55", "score": "0.5422111", "text": "def _check_for_residual_space(self):\n covered_volume = sum(rect.volume for rect in self.hyperrectangles)\n if not np.isclose(covered_volume, 1):\n raise ValueError(\"The passed in rectangles do not cover \"\n \"all of [0,1]^n\")", "title": "" }, { "docid": "2f1bf8c3b8493d019bb12f38d0773f29", "score": "0.5327456", "text": "def free_throw_filter(shot_rows):\r\n filtered_shot_rows = []\r\n for shot_row in shot_rows:\r\n if (not (shot_row[0] == 0 and abs(shot_row[1]) in [28,42]) \r\n and (\"Free\" not in shot_row[3] and \"Lay\" not in shot_row[3])):\r\n filtered_shot_rows.append(shot_row)\r\n return filtered_shot_rows", "title": "" }, { "docid": "26da5fb3389411856906a7947052208b", "score": "0.5311303", "text": "def in_Ro5(mol):\n \n h_donor = Lipinski.NumHDonors(mol)\n h_accept = Lipinski.NumHAcceptors(mol)\n mw = Descriptors.MolWt(mol)\n logP = Descriptors.MolLogP(mol)\n \n Ro5 = h_donor <= 5 and h_accept <= 10 and mw <= 500 and logP < 5\n return(Ro5)", "title": "" }, { "docid": "885b92e61fe8896700dd4986651fb218", "score": "0.5289528", "text": "def test_query_cone_badfilt(self):\n tst_args = { 'ra': '53.157662568', 'dec': '-27.8075199236', 'size': '0.0002777' }\n lst = self.imgr.query_cone(tst_args, filt='BADfilt')\n assert lst is not None\n assert len(lst) == 0", "title": "" }, { "docid": "7d9dddf2e36a7689f925f215df5cdc2d", "score": "0.52892494", "text": "def test_query_cone_badcoll(self):\n tst_args = { 'ra': '53.157662568', 'dec': '-27.8075199236', 'size': '0.0002777' }\n lst = self.imgr.query_cone(tst_args, collection='BADcoll')\n assert lst is not None\n assert len(lst) == 0", "title": "" }, { "docid": "c6890bf33820ca81b87f1a57e73c647f", "score": "0.52669555", "text": "def accept(potList):\n if potList['NOE'].violations()>0:\n return False\n if potList['RDC'].rms()>1.2: #this might be tightened some\n return False\n if potList['CDIH'].violations()>0:\n return False\n if potList['BOND'].violations()>0:\n return False\n if potList['ANGL'].violations()>0:\n return False\n if potList['IMPR'].violations()>1:\n return False\n \n return True", "title": "" }, { "docid": "4e348f5da5cdf43d96764dd347d6c4f9", "score": "0.52626145", "text": "def test_query_cone_badboth(self):\n tst_args = { 'ra': '53.157662568', 'dec': '-27.8075199236', 'size': '0.0002777' }\n lst = self.imgr.query_cone(tst_args, filt='BADfilt', collection='BADcoll')\n assert lst is not None\n assert len(lst) == 0", "title": "" }, { "docid": "f027d6a06547fc706bd6745380c6b521", "score": "0.52531433", "text": "def check_all_checks(cls):\n all_checked = set((b.scope, b.name) for b in cls.all)\n unchecked = all_checked - cls.all_checks\n if unchecked:\n print(\"** These Boogers were never checked:\", file=sys.stderr)\n print(\"\\n\".join(\": \".join(check) for check in unchecked), file=sys.stderr)", "title": "" }, { "docid": "a0197ea31b4ab0fbfcef49167914505c", "score": "0.5250094", "text": "def testFilterNonCrosProjects(self):\n base_func = itertools.cycle(['chromiumos', 'chromeos']).next\n patches = self.GetPatches(8)\n for patch in patches:\n patch.project = '%s/%i' % (base_func(), _GetNumber())\n patch.tracking_branch = str(_GetNumber())\n\n non_cros_patches = self.GetPatches(2)\n for patch in non_cros_patches:\n patch.project = str(_GetNumber())\n\n filtered_patches = patches[:4]\n allowed_patches = []\n projects = {}\n for idx, patch in enumerate(patches[4:]):\n fails = bool(idx % 2)\n # Vary the revision so we can validate that it checks the branch.\n revision = ('monkeys' if fails\n else 'refs/heads/%s' % patch.tracking_branch)\n if fails:\n filtered_patches.append(patch)\n else:\n allowed_patches.append(patch)\n projects.setdefault(patch.project, {})['revision'] = revision\n\n manifest = MockManifest(self.build_root, projects=projects)\n\n self.mox.ReplayAll()\n results = validation_pool.ValidationPool._FilterNonCrosProjects(\n patches + non_cros_patches, manifest)\n\n def compare(list1, list2):\n mangle = lambda c:(c.id, c.project, c.tracking_branch)\n self.assertEqual(list1, list2,\n msg=\"Comparison failed:\\n list1: %r\\n list2: %r\"\n % (map(mangle, list1), map(mangle, list2)))\n\n compare(results[0], allowed_patches)\n compare(results[1], filtered_patches)", "title": "" }, { "docid": "447fc7a12873a8292aa15de75339b044", "score": "0.5234065", "text": "def rejects(self):\n #: lines that were rejected b/c they were for a card that isnt supported\n return self.reject_lines", "title": "" }, { "docid": "41d35c2829fd5870fdd86df234a9d487", "score": "0.5187228", "text": "def find_dead_end_compounds(self): \n\n stoich = self.stoichiometry\n\n dead_end = [i for i in stoich.index if sum(stoich.ix[i, stoich.columns] != 0) <= 1]\n\n msg = str(len(dead_end)) + ' compounds identified as dead-end'\n omfa.logger.info(msg)\n for i in range(len(dead_end)):\n omfa.logger.info('\\t' + str(i+1) + ' ' + dead_end[i])\n\n return(dead_end)", "title": "" }, { "docid": "d8cdf8999dec2a4fea373a740a320b63", "score": "0.51506704", "text": "def dead_reactions(self):\n\t\treturn list(filter(lambda reaction: abs(reaction.upper_bound) < CONST_EPSILON and abs(reaction.lower_bound) < CONST_EPSILON, self.__cobra_model.reactions))", "title": "" }, { "docid": "b54d36ca30a1f8ecb81873b85a8e782b", "score": "0.5139265", "text": "def get_clean_data_frame(self,df):\n g = df['r-free final : no ncs'] > 0\n g &= (df['r-free final : cartesian ncs restraints'] > 0)\n g &= (df['r-free final : torsion ncs restraints'] > 0)\n return df[g]", "title": "" }, { "docid": "017570a040a16f38ff744ef3e9792111", "score": "0.51170933", "text": "def get_unsolved(grid):\n\n unsolved = []\n\n for row in grid.grid:\n for square in row:\n if len(square.possibles) > 1:\n unsolved.append(square)\n return unsolved", "title": "" }, { "docid": "a3aa2257d43b58ec25a37228f45141f4", "score": "0.508613", "text": "def test_NoIons(self):\n pdb, path = get_pdb_from_remote_or_db(\"1izi\", \"all\", test_data.__path__[0])\n os.remove(path)\n self.assertItemsEqual(['Q50'], set(pdb.select(CurationSelections.LIGAND_SELECTION).getResnames()))", "title": "" }, { "docid": "fd18071ab2ff4016399cb2562abfa5e2", "score": "0.50748146", "text": "def check_rolled_records(self, today):\n rolled_objects = PrescribedBurn.objects.filter(date=today, rolled=True).exclude(form_name=PrescribedBurn.FORM_268A) #.exclude(completed=True)\n unset_objects = list(set(rolled_objects.filter(area__isnull=True, distance__isnull=True)).union(rolled_objects.filter(status__isnull=True)))\n return unset_objects", "title": "" }, { "docid": "c59921fba65d776ddc88cb917bc1754f", "score": "0.5068812", "text": "def missing_cards(tuple_of_cards):\n cards = set(tuple_of_cards)\n return [card for card in CARDS if card not in cards]", "title": "" }, { "docid": "99cc4a7f4247add7fe385c4572ac7980", "score": "0.50678724", "text": "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False", "title": "" }, { "docid": "8cf1ad501f309f67b8858669bb5dd036", "score": "0.5063048", "text": "def summarizeExcessCap(self):\n f = lambda c: e_cap(c, c.assignedRoom)\n return 100 * sum(map(f, self.courses_filt))/len(self.courses_filt)", "title": "" }, { "docid": "b1152ac514cd417c5933bd379eea7288", "score": "0.5059752", "text": "def remove_bad_values(self,bad_value=-99.9):\n for cml in self.set:\n cml.remove_bad_values(bad_value)", "title": "" }, { "docid": "7b30cb2e21a7f393de01db4e0963c26c", "score": "0.505736", "text": "def test_remove_census_not_in_ycom(self):\n census = pd.read_csv('climops/data/acs2015_county_data.csv')\n census_remove_pr = prepare_data.remove_census_not_in_ycom(census)\n #check to se whether remaining dataset includes PR (remaining obs\n # should be 0).\n test_set = census_remove_pr[census_remove_pr['State'] == 'Puerto Rico']\n pr_obs = test_set.shape\n self.assertTrue(pr_obs[0] == 0)", "title": "" }, { "docid": "bcc7fa560d693296706d17cdfb97b26f", "score": "0.50406694", "text": "def _elf_prune_problematic_conformers(\n cls, molecule: \"Molecule\"\n ) -> List[unit.Quantity]:\n\n valid_conformers = []\n\n for i, conformer in enumerate(molecule.conformers):\n\n is_problematic, reason = cls._elf_is_problematic_conformer(\n molecule, conformer\n )\n\n if is_problematic:\n logger.warning(f\"Discarding conformer {i}: {reason}\")\n else:\n valid_conformers.append(conformer)\n\n return valid_conformers", "title": "" }, { "docid": "157eb57161fa6c8ff3d8b3ec445b8337", "score": "0.50365067", "text": "def items_that_need_coverage(self):\n q = Edition.missing_coverage_from(self._db, [], self.coverage_source)\n clause = and_(Edition.data_source_id==LicensePool.data_source_id,\n Edition.primary_identifier_id==LicensePool.identifier_id)\n q = q.join(LicensePool, clause)\n q = q.filter(LicensePool.open_access == True).filter(\n Edition.open_access_download_url==None\n )\n return q", "title": "" }, { "docid": "d1c6b355c4dbc1394ad8bc3fbc30b5e1", "score": "0.50348026", "text": "def exclude_from_overlap_check(self):", "title": "" }, { "docid": "210a4699118872d44edc147c9fcd426a", "score": "0.50296766", "text": "def _check_bundles(cursor):\n b = cursor.all(\"\"\"\n SELECT bundles_total, balance\n FROM (\n SELECT owner, sum(amount) AS bundles_total\n FROM cash_bundles b\n GROUP BY owner\n ) foo\n JOIN participants p ON p.id = owner\n WHERE bundles_total <> balance\n \"\"\")\n assert len(b) == 0, \"bundles are out of whack: {}\".format(b)", "title": "" }, { "docid": "210a4699118872d44edc147c9fcd426a", "score": "0.50296766", "text": "def _check_bundles(cursor):\n b = cursor.all(\"\"\"\n SELECT bundles_total, balance\n FROM (\n SELECT owner, sum(amount) AS bundles_total\n FROM cash_bundles b\n GROUP BY owner\n ) foo\n JOIN participants p ON p.id = owner\n WHERE bundles_total <> balance\n \"\"\")\n assert len(b) == 0, \"bundles are out of whack: {}\".format(b)", "title": "" }, { "docid": "7ec860c4062330cdb17aae869356ea1f", "score": "0.50128496", "text": "def keep_science_exposures(self,df):\n bands= df.loc[:,'band']\n isGRZ= (bands == 'g') | (bands == 'r') | (bands == 'z') \n isObject= df.loc[:,'obstype'] == 'object'\n isScience= df['object'].str.lower().str.contains('decals')\n longExp= df['exptime'] > 30\n print('isGRZ %d/%d' % (len(df[isGRZ]),len(df)))\n print('isObject %d/%d' % (len(df[isObject]),len(df)))\n print('isScience %d/%d' % (len(df[isScience]),len(df)))\n print('longExp %d/%d' % (len(df[longExp]),len(df)))\n allCuts= ((isGRZ) & \n (isObject) &\n (isScience) &\n (longExp))\n print('allCuts %d/%d' % (len(df[allCuts]),len(df)))\n return df[allCuts]", "title": "" }, { "docid": "73357c82d674e4222b1117b4260aa2e5", "score": "0.49999732", "text": "def test_no_clips_in_project(self):\n pid = create_project(name=\"test_project2\")\n code, res = get_clips(data={PROJECT_ID: pid})\n self.assertEqual(code, 200)\n self.assertEqual(len(res[CLIPS]), 0)", "title": "" }, { "docid": "a7c59f387e1ac5847c79e892b6884f9c", "score": "0.49900657", "text": "def ALL_REGIONS_WITHOUT_CONTENT_RATINGS():\n return set(ALL_REGIONS) - set(ALL_REGIONS_WITH_CONTENT_RATINGS())", "title": "" }, { "docid": "d3223e00cc4b2c770e93434536de2846", "score": "0.49888155", "text": "def crit_5(t):\r\n #Escolheu-se nao obter as laterais programaticamente porque \r\n #diminui bastante a complexidade deste criterio \r\n posicoes = [\"b1\", \"a2\", \"c2\", \"b3\"] \r\n posicoes = [cria_posicao(*pos) for pos in posicoes] \r\n for pos in posicoes: \r\n if pecas_iguais(obter_peca(pos), cria_peca()): \r\n return pos", "title": "" }, { "docid": "03495209168bd332fe21a059f8119712", "score": "0.49805313", "text": "def get_empty_squares(self):", "title": "" }, { "docid": "7420a2febd494b7ceb2a72811ff67aab", "score": "0.4971833", "text": "def test_is_unidentified(self):\n self.assertEqual(Compound().is_unidentified, True)\n self.assertEqual(Compound(names=['Coumarin 343']).is_unidentified, False)\n self.assertEqual(Compound(labels=['3a']).is_unidentified, False)\n self.assertEqual(Compound(names=['Coumarin 343'], labels=['3a']).is_unidentified, False)\n self.assertEqual(Compound(melting_points=[MeltingPoint(value='250')]).is_unidentified, True)", "title": "" }, { "docid": "6e7d73c959b9ffb2fabb192b74cf9e7b", "score": "0.49689627", "text": "def filter_acceptable(self, query):\n # type: (Any) -> Any\n return query.filter(opentuner.resultsdb.models.Result.size < 1.0)", "title": "" }, { "docid": "c72635e364b0c90caefdbdd3fdea1379", "score": "0.49637672", "text": "def not_approved_ltcs(self) -> int:\n return self.__test_design_metric(row_nr=9)", "title": "" }, { "docid": "42cd6c958f4ce87ef29e7458b054f330", "score": "0.49596122", "text": "def test_incompatible_cable_stitches():\n in_data = [1, 1, 1, 4, 15, 1, 1, 1]\n with pytest.raises(KnitPaintCheckException) as err:\n resolve_cable_stitches(in_data, len(in_data))\n problems = err.value.problems\n assert len(problems) == 2\n assert problems[0].course == 0\n assert problems[0].wale == 3\n assert problems[1].course == 0\n assert problems[1].wale == 4", "title": "" }, { "docid": "e54d5d0d66550fbc9bea42e0ffab4b00", "score": "0.4951831", "text": "def excessCap(self):\n f = lambda c: e_cap(c, c.assignedRoom)\n return map(f, self.courses_filt)", "title": "" }, { "docid": "3124fb24fd596395e1dbb1e6028153b1", "score": "0.49452466", "text": "def get_nonstrict_lines(lines):\n return filter(lambda l: l.upper().find(\"NON\") != -1 or empty_section(l), lines)", "title": "" }, { "docid": "78f036fac00d9b5c861922eb512ea1f9", "score": "0.49444968", "text": "def remove_compounds(self, compounds):\n\n # Checking if string to avoid iterating by character\n if isinstance(compounds, basestring):\n compounds = [compounds]\n # Carrying on to check if integer to maintain consistency\n elif isinstance(compounds, (int, long)):\n compounds = [compounds]\n\n # Identify invalid compounds\n invalid = set(compounds) - set(self.stoichiometry.index)\n\n if len(invalid) > 0:\n msg = 'The following compounds are invalid: ' + ', '.join(invalid)\n omfa.logger.error(msg)\n raise omfa.ModelError(msg)\n else:\n msg = 'Dropping ' + ', '.join(compounds)\n omfa.logger.info(msg)\n self.stoichiometry = self.stoichiometry.drop(compounds)", "title": "" }, { "docid": "6867bec609242f22886d5c47e56d8f32", "score": "0.49380073", "text": "def rule_1(self):\n return [cell for cell in self.cells if cell.neighbors(self.cells) < 2]", "title": "" }, { "docid": "107fa14844cbfda199098f861da25940", "score": "0.4931519", "text": "def everything_incorrect(self):\n return self.results.everything_incorrect()", "title": "" }, { "docid": "107fa14844cbfda199098f861da25940", "score": "0.4931519", "text": "def everything_incorrect(self):\n return self.results.everything_incorrect()", "title": "" }, { "docid": "304f8c04da55f527d1ce97fed607bd6e", "score": "0.4929644", "text": "def misclosure_error(self):\n for survey in self.project:\n survey.misclosure_error()", "title": "" }, { "docid": "0cad7409c85610e5c28bf78f51df1292", "score": "0.4926443", "text": "def to_ignore(self, enclosure):\n return all((\n enclosure['product'] == 'SGPIOEnclosure',\n '-MINI-' not in enclosure['model'],\n not enclosure['model'].startswith('R20'),\n ))", "title": "" }, { "docid": "1c92c7c424273853240d6e9d049b227c", "score": "0.49256653", "text": "def filter_biotherapeutic(mols):\n return mols.filter(biotherapeutic__isnull=False)", "title": "" }, { "docid": "9fcb99deba127260759e6ccb0aa128e6", "score": "0.49252394", "text": "def test_get_appliances_with_no_appliances(self):\n # Populate appliance table\n a1 = Appliance.objects.create(name=\"A 1\")\n a2 = Appliance.objects.create(name=\"A 2\")\n a1.save()\n a2.save()\n # Create empty recipe to test against\n recipe = Recipe.objects.create(title=\"Empty\", instructions=\"Empty\")\n\n self.assertEquals(list(recipe.get_appliances()), [], \"Empty recipe returns appliances.\")", "title": "" }, { "docid": "e70f4cb3943c603fbba73a7b8edac89e", "score": "0.4921788", "text": "def outstanding(self):\n return (\n self.filter(suppressed_at__isnull=True)\n .filter(expired_at__isnull=True)\n .filter(vol_prospect_contact_events=None)\n )", "title": "" }, { "docid": "87b238b389c2b23f57d2576bf1a10af2", "score": "0.4920438", "text": "def malformed_cards(tuple_of_objects):\n return [obj for obj in tuple_of_objects if not is_card(obj)]", "title": "" }, { "docid": "49227fe7a92dd013292a181c617fd8f2", "score": "0.49042115", "text": "def test_find_clashes_forall_other_series(self):\n par = self.real_parameters\n par.aions = True\n par.xions = True\n par.zions = True\n par.bions = False\n par.yions = False\n #par.bMinusNH3 = True\n #par.bMinusH2O = True\n #par.bPlusH2O = True\n\n par.q3_window = 4.5\n\n q3_low, q3_high = self.real_parameters.get_q3range_transitions()\n precursor = self.precursor\n transitions = precursor.calculate_transitions(q3_low, q3_high)\n \n nonunique = c_getnonuis._find_clashes_forall_other_series( \n tuple(transitions), self.interfering_precursors, par, q3_low, q3_high,\n par.q3_window, par.ppm, precursor.q1 - par.q1_window, False)\n\n self.assertEqual( len( nonunique ), 4)\n self.assertEqual( nonunique.keys(), [3,4,5,6] )\n\n self.assertEqual( len( nonunique[3] ), 1)\n self.assertEqual( len( nonunique[4] ), 2)\n self.assertEqual( len( nonunique[5] ), 3)\n self.assertEqual( len( nonunique[6] ), 1)\n\n # we have one interference with 3\n self.assertTrue( abs(nonunique[3][0][0] - 456.756009652) < self.EPSILON )\n self.assertTrue( abs(nonunique[3][0][1] - 500.787837374) < self.EPSILON )\n self.assertEqual( nonunique[3][0][2], 0) # empty\n self.assertEqual( nonunique[3][0][3], 665) # peptide key\n self.assertEqual( nonunique[3][0][4], 'x')\n self.assertEqual( nonunique[3][0][5], 8)\n self.assertEqual( nonunique[3][0][6], 'GGLIVELGDK') # sequence\n self.assertEqual( nonunique[3][0][-1], 2) # charge\n\n # we have two interferences with 4\n self.assertTrue( abs(nonunique[4][0][0] - 443.22541272) < self.EPSILON )\n self.assertTrue( abs(nonunique[4][0][1] - 506.58461326) < self.EPSILON )\n self.assertEqual( nonunique[4][0][2], 0) # empty\n self.assertEqual( nonunique[4][0][3], 618) # peptide key\n self.assertEqual( nonunique[4][0][4], 'a')\n self.assertEqual( nonunique[4][0][5], 10)\n self.assertEqual( nonunique[4][0][6], 'NGTDGGLQVAIDAMR') # sequence\n self.assertEqual( nonunique[4][0][-1], 2) # charge\n\n self.assertTrue( abs(nonunique[4][1][0] - 443.7267378235) < self.EPSILON )\n self.assertTrue( abs(nonunique[4][1][1] - 506.58461326 ) < self.EPSILON )\n self.assertEqual( nonunique[4][1][2], 0) # empty\n self.assertEqual( nonunique[4][1][3], 618) # peptide key\n self.assertEqual( nonunique[4][1][4], 'z')\n self.assertEqual( nonunique[4][1][5], 8)\n self.assertEqual( nonunique[4][1][6], 'NGTDGGLQVAIDAMR') # sequence\n self.assertEqual( nonunique[4][1][-1], 2) # charge\n\n # we have three interferences with 5\n self.assertTrue( abs(nonunique[5][0][0] - 557.280912722) < self.EPSILON )\n self.assertEqual( nonunique[5][0][4], 'a')\n self.assertEqual( nonunique[5][0][5], 12)\n self.assertEqual( nonunique[5][0][6], 'NGTDGGLQVAIDAMR') # sequence\n self.assertEqual( nonunique[5][0][-1], 2) # charge\n\n self.assertTrue( abs(nonunique[5][1][0] - 550.28240465) < self.EPSILON )\n self.assertEqual( nonunique[5][1][4], 'x')\n self.assertEqual( nonunique[5][1][5], 10)\n self.assertEqual( nonunique[5][1][6], 'NGTDGGLQVAIDAMR') # sequence\n self.assertEqual( nonunique[5][1][-1], 2) # charge\n\n self.assertTrue( abs(nonunique[5][2][0] - 557.2902278235) < self.EPSILON )\n self.assertEqual( nonunique[5][2][4], 'z')\n self.assertEqual( nonunique[5][2][5], 11)\n self.assertEqual( nonunique[5][2][6], 'NGTDGGLQVAIDAMR') # sequence\n self.assertEqual( nonunique[5][2][-1], 2) # charge\n\n # we have one interference with 6\n self.assertTrue( abs(nonunique[6][0][0] - 665.327537823 ) < self.EPSILON )\n self.assertEqual( nonunique[6][0][2], 0) # empty\n self.assertEqual( nonunique[6][0][3], 618) # peptide key\n self.assertEqual( nonunique[6][0][4], 'z')\n self.assertEqual( nonunique[6][0][5], 13)\n self.assertEqual( nonunique[6][0][6], 'NGTDGGLQVAIDAMR') # sequence\n self.assertEqual( nonunique[6][0][-1], 2) # charge", "title": "" }, { "docid": "714e4eaab013bb1cc8404881a64c63e6", "score": "0.4903402", "text": "def get_strict_lines(lines):\n return filter(lambda l: l.upper().find('NON') == -1 or empty_section(l), lines)", "title": "" }, { "docid": "d681cf368d3845ab503eaa4ba27899eb", "score": "0.4903144", "text": "def filter_by_rect_and_find():\n\n reduced_possibilities = {}\n for area_coord, area_possibilities in remaining_possibilities.items():\n accurate_area_possibilities = [possibility for possibility in area_possibilities\n if is_zone_free(possibility, grid)]\n if len(accurate_area_possibilities) == 0: # not shape fit in the area, the grid cannot be solved\n Log.info(f'<<< unsolvable - impossible for {area_info(area_coord, grid)}')\n return None\n elif len(accurate_area_possibilities) == 1: # found an area solution\n Log.info(f'rectangle added for {area_info(area_coord, grid)} - from rectangles')\n add_rectangle(accurate_area_possibilities[0], grid)\n else:\n reduced_possibilities[area_coord] = accurate_area_possibilities\n # logging purpose only\n eliminated_possibilities = set(area_possibilities) - set(accurate_area_possibilities)\n if len(eliminated_possibilities) > 0:\n Log.debug(f'eliminate {len(eliminated_possibilities)} inaccurate rectangles'\n f' for {area_info(area_coord, grid)}')\n return reduced_possibilities", "title": "" }, { "docid": "3f45c4a57e8ec273835b754952c068ff", "score": "0.4891872", "text": "def identifyOAssetholders(calc1):\n has_int1 = (calc1.array('e00300') != 0)\n has_int2 = (calc1.array('e00400') != 0)\n hasoassets = has_int1 | has_int2\n return hasoassets", "title": "" }, { "docid": "cff9679195f9f2bd95ad7a1ec74ef0a1", "score": "0.48909375", "text": "def get_sellability_report(melons):\n\n # Fill in the rest ", "title": "" }, { "docid": "1f826aed53f154939605fe2635c3b810", "score": "0.48850587", "text": "def test_ready_computations_excludes_r2c(self):\n name = Name(\"/test/NFN\")\n request_name = Name(\"/test/R2C\")\n self.computationList.add_computation(name, 0, Interest(name))\n self.computationList.container[0].add_name_to_await_list(request_name)\n self.assertEqual(len(self.computationList.container[0].awaiting_data), 1)\n ready_comps = self.computationList.get_ready_computations()\n self.assertEqual(ready_comps, [NFNComputationTableEntry(name)])", "title": "" }, { "docid": "aebf17e049ef781dab2a00f7a05c5cef", "score": "0.48828843", "text": "def testall(composite, examples, badExamples=[]):\n wrong = []\n for example in examples:\n if composite.GetActivityQuantBounds():\n answer = composite.QuantizeActivity(example)[-1]\n else:\n answer = example[-1]\n res, conf = composite.ClassifyExample(example)\n if res != answer:\n wrong.append((res, conf))\n badExamples.append(example)\n\n return wrong", "title": "" }, { "docid": "7a523913bc344e3b9c4712e342b8252b", "score": "0.48817363", "text": "def filter_diagnosed(self):\n queryset = self.filter(created__date=datetime.today())\n return queryset.filter(status__in=[Encounter.STATUS.WAIT_RESULT, Encounter.STATUS.RECEIVE_RESULT,\n Encounter.STATUS.CHECKED_OUT, Encounter.STATUS.WAIT_PAY,\n Encounter.STATUS.WAIT_DISPENSE, Encounter.STATUS.DISCHARGED])", "title": "" }, { "docid": "3f9508e44021d77921e50b74a099138d", "score": "0.48805833", "text": "def clean_redundancies(self) -> None:\n candidates_to_remove = [[candidate for candidate in candidate_set][0]\n for candidate_set in self._mapping.values() if len(candidate_set) == 1]\n sets_to_check = [candidate_set for candidate_set in self._mapping.values() if len(candidate_set) > 1]\n for candidate_to_remove in candidates_to_remove:\n for set_to_check in sets_to_check:\n if candidate_to_remove in set_to_check:\n set_to_check.remove(candidate_to_remove)", "title": "" }, { "docid": "dc3b517d8384bc9c7e0368296c0df2d7", "score": "0.4876179", "text": "def get_exclusions():\n nb_vdw_list = []\n model = cmd.get_model('all')\n for at in model.atom:\n nb = cmd.index('(index %s extend 2)' % at.index)\n nb_vdw_list.extend([tuple(sorted([at.index-1, i[1]-1])) for i in nb])\n nb_vdw = set(nb_vdw_list)\n return nb_vdw", "title": "" }, { "docid": "c29c87ffcb2ab80446bdd62fa76bd6a4", "score": "0.4874136", "text": "def refine_compounds(compounds, mols, gaps, failed_mols):\n for idx in sorted(failed_mols, reverse=True):\n del compounds[idx]\n del mols[idx]\n del gaps[idx]", "title": "" }, { "docid": "8776b01c5322597487bbaf5a49bb93ba", "score": "0.48680595", "text": "def test_image_metadata_by_filter_badcoll(self):\n res = self.imgr.image_metadata_by_filter('F444W', collection='BADcoll')\n print(res)\n assert res is not None\n assert len(res) == 0", "title": "" }, { "docid": "8f2bab583554447bb46643d540df70db", "score": "0.4858687", "text": "def _yield_compounds(self): # noqa: ANN101\n undefined_compounds = []\n for name in self.model.compounds:\n if not self.model.compounds[name]:\n undefined_compounds.append(name)\n if undefined_compounds:\n raise ValueError( # noqa: TRY003\n f\"undefined compounds: {', '.join(undefined_compounds)}\", # noqa: EM102\n ) # noqa: RUF100\n\n logfiles_table = Table(\n Column(\"no\", justify=\"right\"),\n Column(\"compound\", justify=\"left\"),\n Column(\"path\", justify=\"left\"),\n title=\"logfiles\",\n box=self.box_style,\n )\n compounds_table = Table(\n Column(\"no\", justify=\"right\"),\n Column(\"compound\", justify=\"left\"),\n Column(\"elec. energy\\n〈Eₕ〉\", justify=\"center\"),\n Column(\"spin mult.\", justify=\"center\"),\n Column(\"smallest vibfreqs\\n〈cm⁻¹〉\", justify=\"center\"),\n Column(\"point group\", justify=\"center\"),\n title=\"compounds\",\n box=self.box_style,\n )\n for i, (name, data) in enumerate(self.model.compounds.items()):\n path_text = None\n if data.logfile is not None:\n path_text = Text(data.logfile)\n path_text.highlight_regex(r\"[^\\/]+$\", \"bright_blue\")\n logfiles_table.add_row(f\"{i:d}\", name, path_text)\n\n vibfreqs_text = None\n if data.vibfreqs is not None:\n vibfreqs_text = Text(\n \", \".join([f\"{vibfreq:+7.1f}\" for vibfreq in data.vibfreqs[:3]]),\n )\n vibfreqs_text.highlight_regex(r\"-\\d+\\.\\d\", \"bright_yellow\")\n\n point_group = coords.find_point_group(\n atommasses=data.atommasses,\n atomcoords=data.atomcoords,\n )\n compounds_table.add_row(\n f\"{i:d}\",\n name,\n f\"{data.energy / (constants.hartree * constants.N_A):17.12f}\",\n f\"{data.mult}\",\n vibfreqs_text,\n point_group,\n )\n yield logfiles_table\n yield compounds_table", "title": "" }, { "docid": "a56df535398c5a5953995a1035a300db", "score": "0.48563296", "text": "def test_find_clashes_forall_other_series_by(self):\n par = self.real_parameters\n q3_low, q3_high = self.real_parameters.get_q3range_transitions()\n precursor = self.precursor\n transitions = precursor.calculate_transitions(q3_low, q3_high)\n \n nonunique = c_getnonuis._find_clashes_forall_other_series( \n tuple(transitions), self.interfering_precursors, par, q3_low, q3_high,\n par.q3_window, par.ppm, precursor.q1 - par.q1_window, False)\n\n self.assertEqual( len( nonunique ), 3)\n self.assertEqual( nonunique.keys(), [0,2,4] )\n\n self.assertTrue( abs(nonunique[0][0][0] - 842.4008) < self.EPSILON )\n self.assertTrue( abs(nonunique[0][0][1] - 506.584613) < self.EPSILON )\n self.assertEqual( nonunique[0][0][2], 0) # empty\n self.assertEqual( nonunique[0][0][3], 618) # peptide key\n self.assertEqual( nonunique[0][0][4], 'b')\n self.assertEqual( nonunique[0][0][5], 9)\n self.assertEqual( nonunique[0][0][6], 'NGTDGGLQVAIDAMR') # sequence\n self.assertEqual( nonunique[0][0][-1], 1) # charge\n\n self.assertTrue( abs(nonunique[2][0][0] - 565.8035) < self.EPSILON )\n self.assertEqual( nonunique[2][0][4], 'y')\n self.assertEqual( nonunique[2][0][5], 11)\n self.assertEqual( nonunique[2][0][6], 'NGTDGGLQVAIDAMR') # sequence\n self.assertEqual( nonunique[2][0][-1], 2) # charge\n\n self.assertTrue( abs(nonunique[4][0][0] - 440.287275) < self.EPSILON )\n self.assertTrue( abs(nonunique[4][0][1] - 500.787837374 ) < self.EPSILON )\n self.assertEqual( nonunique[4][0][2], 0) # empty\n self.assertEqual( nonunique[4][0][3], 665) # peptide key\n self.assertEqual( nonunique[4][0][4], 'b')\n self.assertEqual( nonunique[4][0][5], 5)\n self.assertEqual( nonunique[4][0][6], 'GGLIVELGDK') # sequence\n self.assertEqual( nonunique[4][0][-1], 1) # charge", "title": "" }, { "docid": "347082105d2d5e24af21546f4c5501fd", "score": "0.48525828", "text": "def find_not_removed_watchee(self, email):\n res = self.watcher_cc.aggregate([\n {\"$match\": {\"email_addr\": email}},\n {\"$project\": {\n \"not_removed\": {\"$filter\": {\n \"input\": \"$crn\",\n \"as\": \"courses\",\n \"cond\": {\"$eq\": [\"$$courses.removed\", False]}\n }}\n }}\n ])\n not_removed = []\n for i in res:\n not_removed.extend(i[\"not_removed\"])\n return not_removed", "title": "" }, { "docid": "13a1992ca1cc119a7814cb5f0860d0a7", "score": "0.48480925", "text": "def check_constraints_w_enforcement(self):\n\n return exclusions.open()", "title": "" }, { "docid": "6add7d15eb78d51eb98c5e1f5166390c", "score": "0.48479852", "text": "def test_capture_branch_no_rxn():\n u4br = {\"U234\": {\"U235\": 0.5, \"U235_m1\": 0.5}}\n\n chain_file = Path(__file__).parents[1] / \"chain_simple.xml\"\n chain = Chain.from_xml(chain_file)\n\n u5m = nuclide.Nuclide(\"U235_m1\")\n\n chain.nuclides.append(u5m)\n chain.nuclide_dict[u5m.name] = len(chain.nuclides) - 1\n\n with pytest.raises(AttributeError, match=\"U234\"):\n chain.set_branch_ratios(u4br)", "title": "" }, { "docid": "01fcfafaf5fd59a43c311cb305222420", "score": "0.48459184", "text": "def drop_less_significant(cleaned):\r\n cleaned=cleaned.groupby(\"SOC_CODE\").filter(lambda x: len(x)>15)\r\n cleaned=cleaned.groupby(\"SOC_TITLE\").filter(lambda x: len(x)>15)\r\n cleaned=cleaned.groupby(\"EMPLOYER_NAME\").filter(lambda x: len(x)>15)\r\n cleaned=cleaned.groupby(\"WORKSITE_STATE\").filter(lambda x: len(x)>15)\r\n return cleaned", "title": "" }, { "docid": "bdf4d15fe7c8d91749369cfa9df82dac", "score": "0.48420027", "text": "def category_conflicts(course):\n cat_found = dict.fromkeys(GE_AREAS.keys(), False)\n conflicts = list()\n\n for area in course.ge_areas:\n for cat, areas in GE_AREAS.items():\n if cat not in conflicts and area in areas:\n if cat_found[cat] == 1:\n conflicts.append(cat)\n else:\n cat_found[cat] = 1\n\n return conflicts", "title": "" }, { "docid": "4c3abc4d12592b9219c5ca52dd8a59ad", "score": "0.48231664", "text": "def test_exclude_precise(self):\n exclude = [\"H2O\", \"H2O_PLUS\", \"H2O_MINUS\", \"CO2\", \"LOI\"]\n df = pd.DataFrame(data=self.one_row)\n df.columns = self.cols\n devdf = devolatilise(df, exclude=exclude)\n # There should be all those which weren't excluded\n self.assertTrue(\n np.array(\n [\n i in devdf.columns\n for i in [i for i in df.columns if i not in exclude]\n ]\n ).all()\n )\n # There should be no new things which where unexpected included\n self.assertTrue(np.array([i in df.columns for i in devdf.columns]).all())", "title": "" }, { "docid": "c9f69a1d0632f2a2c6ff614398d89a9e", "score": "0.48216188", "text": "def false_detections(self):\n return [det for det in self.detections\n if det[4] >= self.confidence_threshold and not det[5]]", "title": "" }, { "docid": "7941a33eb6230cff9bad2b85fec83f45", "score": "0.48207965", "text": "def test_0(self):\n grid = Grid([[Color.black, Color.white, Color.black],\n [Color.white, Color.black, Color.green]])\n self.assertFalse(grid.all_rows_distinct_colors(BWG))", "title": "" }, { "docid": "9c03417bea93707ac4d075431282b193", "score": "0.48172104", "text": "def get_cpx_unsatisfied_cts(self, cts, sol, tolerance=1e-6):\n unsatisfied = []\n for ct in cts:\n if not ct.is_satisfied(sol, tolerance):\n # use mixin API to convert to cplex lingo\n cpx_lhs, cpx_sense, cpx_rhs = self.linear_ct_to_cplex(ct)\n unsatisfied.append( (ct, cpx_lhs, cpx_sense, cpx_rhs) )\n return unsatisfied", "title": "" }, { "docid": "443172915c8b0fde212760e5fde44d4d", "score": "0.48160225", "text": "def filter_must(self):\r\n return filter(\r\n lambda x: isinstance(x.constraint, CommitmentConstraint),\r\n self\r\n )", "title": "" }, { "docid": "d7b66572d57d5669e463c27cb6c194f2", "score": "0.48029044", "text": "def test_get_compound_dataframe():\n directory = r'../assets\\data\\instrument_data\\agilent_gc_residual_solvents\\sequence-1'\n filename = 'REPORT01.xls'\n data_file = os.path.join(directory, filename)\n workbook_data = pd.read_excel(data_file, sheet_name=None)\n compounds = instruments.get_compound_dataframe(workbook_data)\n assert 'measurement' in compounds.columns\n return compounds", "title": "" }, { "docid": "2a2f146da8a5596b47c1e67d8602fb62", "score": "0.48009792", "text": "def get_uncalibrated_jsons(self):\n ps = dep_util.get_files_ext(self.path_rigs, \"json\")\n files = [x for x in ps if \"calibrated\" not in x]\n return sorted(files, key=lambda s: s.casefold())", "title": "" }, { "docid": "e35c88be061a0ab8081044226a75f920", "score": "0.47851583", "text": "def is_guilty_to_lesser(self):\n return self.verdict == pc.VERDICT_GUILTY and self.offense_records.count() == 2 and not self.has_equivalent_offense_records()", "title": "" }, { "docid": "fb05c96f4426214551232da2608c3f49", "score": "0.47788897", "text": "def test_none(self):\n df = self.df.head(0).copy()\n for cation in [\"Mg\", \"Fe\"]:\n with self.subTest(cation=cation):\n aggdf = aggregate_element(df, to=cation)\n # Check that only one form is returned", "title": "" }, { "docid": "c57d5e912254eaa591da580b0ba3801d", "score": "0.47734663", "text": "def test_list_no_match(self):\n func = create_constrained_inputcubelist_converter(\n \"airspeed_velocity_of_unladen_swallow\",\n )\n with self.assertRaisesRegex(ConstraintMismatchError, \"^Got 0 cubes\"):\n func(self.wind_cubes)", "title": "" }, { "docid": "53acbfd6ddcc5196be3b266dc1b99262", "score": "0.47680584", "text": "def missing(self):\r\n #-----------------\r\n\r\n return set(range(1,10)) - set([cell.value for cell in self.cells])", "title": "" }, { "docid": "eb6b7ae3a5d1875fa0c6ddf0e3132995", "score": "0.4767532", "text": "def test_3d_Greyscale_cine_clip_all(self):\n from deid.dicom import DicomCleaner\n\n dicom_file = get_file(self.dataset, \"GREYSCALE_CINE.zip\", self.tmpdir)\n deid = os.path.join(self.deidpath, \"remove_coordinates_us_all.dicom\")\n\n client = DicomCleaner(output_folder=self.tmpdir, deid=deid)\n out = client.detect(dicom_file)\n self.assertTrue(out[\"flagged\"])\n\n client.clean()\n cleanedfile = client.save_dicom()\n\n outputfile = read_file(cleanedfile)\n outputpixels = outputfile.pixel_array\n\n inputfile = read_file(dicom_file)\n inputpixels = inputfile.pixel_array\n compare = inputpixels == outputpixels\n self.assertFalse(compare.all())\n\n inputpixels[:, :, :] = 0\n compare = inputpixels == outputpixels\n self.assertTrue(compare.all())", "title": "" }, { "docid": "524b7abd1329753777149e1bd5400ab9", "score": "0.47633016", "text": "def roc_conds(data, mort, ret_aucs = False):\n conds = []\n aucs = []\n for d in range(pl.shape(data)[1]):\n col = pl.copy(data[:,d])\n mort_nonan = pl.copy(mort)\n if sum(pl.isnan(col)) > 0:\n mort_nonan = pl.copy(mort[~pl.isnan(col)])\n col = col[~pl.isnan(col)]\n auc = roc_auc_score(mort_nonan, col)\n if auc < 0.5:\n conds.append(operator.lt)\n else:\n conds.append(operator.gt)\n aucs.append(auc)#pl.amax([auc, 1-auc]))\n if ret_aucs:\n return conds, aucs\n else:\n return conds", "title": "" }, { "docid": "179b058186838562280f96c33b67ac45", "score": "0.47622558", "text": "def check(self):\n errors = [err for rule in self.rules for err in rule.check(self.dictionary)]\n errors = list(set(errors))\n errors = self.sorting.sort(errors)\n errors = self.filtering.filter(errors)\n if errors:\n raise CertumException(f\"\\n\\n{self.printing.print(errors)}\")", "title": "" }, { "docid": "8c003d8b531ab8478d1604907bbdf996", "score": "0.47564667", "text": "def test_incomplete_cable_stitches_1():\n in_data = [1, 1, 4, 4, 1, 1]\n with pytest.raises(KnitPaintCheckException) as err:\n resolve_cable_stitches(in_data, len(in_data))\n problems = err.value.problems\n assert len(problems) > 0", "title": "" }, { "docid": "5faacdea569b09a42668df3a8acb9f26", "score": "0.47448885", "text": "def valid_mice(self):\n return [m.label for m in self.mice if not m.is_ignored]", "title": "" }, { "docid": "2f27c6f0b751941ed2e48d39bc39256c", "score": "0.47410977", "text": "def quick_check_of_OCS_emissions(target='OCS'):\n #\n root = '/users/ts551/scratch/GC/rundirs/'\n file_str = 'geosfp_4x5_tropchem.v12.2.1.AQSA.{}'\n suffix = 'CH3I.ALL.test_other_sources.repeat.II.OCS/'\n run_dict = {\n # intial test runs\n 'OCS_TEST' : root + file_str.format(suffix),\n }\n # use the run_dict from - obs.get_ground_surface_OCS_obs_DIRECT\n wds = run_dict # for debugging...\n target = 'OCS' # for testing\n #\n filename = 'HEMCO_diagnostics.201401010000.nc'\n # Get a dictionary of all the data\n dsDH = GetEmissionsFromHEMCONetCDFsAsDatasets(wds=run_dict)\n # - Analysis the totals\n # Extract the annual totals to a dataFrame\n df = pd.DataFrame()\n for run in dsDH.keys():\n print(run)\n # Get the values and variable names\n vars2use = [i for i in dsDH[run].data_vars if i != 'AREA']\n vars2use = list(sorted(vars2use))\n vals = [ dsDH[run][i].sum().values for i in vars2use ]\n # Save the summed values to the dataframe\n df[run] = pd.Series( dict(zip(vars2use, vals)) )\n# print( dsDH[run].sum() )\n # Print the DataFrame to screen\n print(df)", "title": "" }, { "docid": "6e1819d93ded8d3039247376e215a669", "score": "0.47367904", "text": "def test_bulk_exclusion_does_not_leave_too_few_candidates(self):\n\n votes = 17 * (('A Suitable Boy', ), ) + \\\n 12 * (('Farewell My Lovely', ), ) + \\\n 2 * (('What I Loved', ), ) + \\\n 1 * (('Gone Girl', ), )\n\n candidates = [\n 'A Suitable Boy',\n 'Farewell My Lovely',\n 'What I Loved',\n 'The Da Vinci Code',\n 'Angels and Demons',\n 'Gone Girl'\n ]\n vacancies = 4\n\n stv_round = Round(vacancies, candidates, votes)\n bulk_exclusions = stv_round._bulk_exclusions()\n\n self.assertTrue(len(bulk_exclusions) == 0)", "title": "" }, { "docid": "79c534b9ff58df76ab396b9d671c404d", "score": "0.4735367", "text": "def remove_never_executed(self):\n for cond in self.branch_conds:\n partialbr = []\n for branch in cond.get_branches():\n text = branch.get_name()\n if \"never executed\" not in text:\n partialbr.append(branch)\n if len(partialbr) == 0:\n self.only_never_executed.append(cond)\n else:\n cond.branches = partialbr\n self.interesting_conds.append(cond)", "title": "" }, { "docid": "1611ad5988ab48b2e43bc21659909bb5", "score": "0.47326818", "text": "def drop_score_empty(): # 删去评分为空的公式,如果第一列名为空,第一列也会被删\n empty_check_list = [u'企业总评分'.encode('utf-8')]\n for file_n in category_finance_files:\n print file_n\n\n dcu.merge_rows(file_n + '_index', file_url=corporation_index_file_url,\n dst_file_url=corporation_index_file_url)\n dcu.drop_rows_too_many_empty(file_n + '_index', file_url=corporation_index_file_url,\n dst_file_url=corporation_index_file_url, columns=empty_check_list, thresh=1)", "title": "" }, { "docid": "f64f7b8888e3d289d4e75abef923ed5e", "score": "0.47240153", "text": "def test_incomplete_cable_stitches_2():\n in_data = [1, 4, 4, 1, 4, 4, 1]\n with pytest.raises(KnitPaintCheckException) as err:\n resolve_cable_stitches(in_data, len(in_data))\n problems = err.value.problems\n assert len(problems) > 0", "title": "" }, { "docid": "08d0bdc5e3b17bd2ab695d0d70f05b01", "score": "0.47177666", "text": "def analyze_unconditional_status(self):\n #Initialize status to unknown for all blocks\n for block in self.iterate_blocks(BLACK+WHITE+EMPTY):\n block.status = \"unknown\"\n #import pdb; pdb.set_trace()\n self.analyze_color_unconditional_status(BLACK)\n self.analyze_color_unconditional_status(WHITE)\n #cleanup\n for block in self.iterate_blocks(BLACK+WHITE+EMPTY):\n del block.eye", "title": "" }, { "docid": "5f332ab5832d3d9b8e552a7d070709c5", "score": "0.47138566", "text": "def __unused(self):\n return [c for c in self.candidates if not self.redundant_feature(c)\n and related(c, single=True)]", "title": "" }, { "docid": "fbc4ae1e41bd59e5c97b6c597cabcf7d", "score": "0.4712267", "text": "def test_non_offenses_get_removed(self):\n result = remove_non_offenses(self.sheet.fillna(\"None\"))\n for item in result[\"Triggering Offense Category\"]:\n self.assertTrue(\"None\" not in item)", "title": "" }, { "docid": "e5c7dd13bd22c07c2a4307033c92eab8", "score": "0.4704868", "text": "def get_ignores(self, dt_boxes, gt_boxes):\n if gt_boxes.size:\n ioas = bbox_overlaps(dt_boxes, gt_boxes, mode='iof')\n ioas = np.max(ioas, axis=1)\n rows = np.where(ioas > self.iou_thres)[0]\n return len(rows)\n else:\n return 0", "title": "" }, { "docid": "f65aeb3ff0572d53675cba9e510a392f", "score": "0.46947604", "text": "def find_non_matching_alleles(sumstat_rec, vcf_rec):\n alts_to_remove = []\n for ref, alt in vcf_rec.yeild_alleles():\n if not compatible_alleles_either_strand(sumstat_rec.other_al,\n sumstat_rec.effect_al,\n ref,\n alt):\n alts_to_remove.append(alt)\n return alts_to_remove", "title": "" }, { "docid": "e8a57d37abbe932d2ea52fbc4fcc8fd4", "score": "0.46943173", "text": "def filter_boxes(self, boxes, box_confidences, box_class_probs):\n return None", "title": "" }, { "docid": "f5aed6297ee749d4756f874651f124a1", "score": "0.4692294", "text": "def remove_unwanted_analytes_from_data_file(self):\n # removes unwanted analytes\n self.trimmed_data_frame = self.raw_xml_data_frame[self.raw_xml_data_frame['id17'].isin([1.0, 2.0, 3.0])]", "title": "" }, { "docid": "b70246c6b06b4413139a5669eba2b87b", "score": "0.46901798", "text": "def whether_uncovered(self, m, n, c, Filled):\n for i in self.covered(m, n, c):\n if i in Filled:\n return False\n return True", "title": "" }, { "docid": "23c6dca894fa0e96fa1030d4b1e70a53", "score": "0.46892768", "text": "def for_opd_encounter(self):\n return self.filter(type__in=[Division.TYPE.CLINIC,\n Division.TYPE.PREMIUM_CLINIC,\n Division.TYPE.SHORTSTAY,\n Division.TYPE.SPECIMEN_COLLECTOR])", "title": "" }, { "docid": "587c8fe6f783634f6d71b71078d4fde5", "score": "0.4686451", "text": "def testAllLosing(self):\r\n\r\n allLoseStrings = [\"c\", \"co\", \"coo\", \"bag\", \"bar\"]\r\n errors = [prefix for prefix in allLoseStrings if self.winning(prefix)]\r\n assert not errors,\\\r\n (\"the following are incorrectly marked as winning: \" +\r\n str(errors))", "title": "" }, { "docid": "667c6c5d4557eb48fff0ee1570036396", "score": "0.46835056", "text": "def remove_sparse(self, n=2):\n bad=(self.total<n).any(axis=1)\n self.filter_snps(~bad)\n \n print(\"Removed \" +str(sum(bad)) + \n \" SNPs with <\" + str(n) + \" alleles in one population\", \n file=sys.stderr)", "title": "" } ]
e9edfb819f9cb3682910bd9b2ef8e39d
Make directory and subdirectories.
[ { "docid": "efb6daf34dbf00c329c8f286cb5fcfdc", "score": "0.64890075", "text": "def MakeDirs( self, path ) :\n \n # modules:\n import os\n \n # split in domain and remaining path:\n if ':' in path :\n domain,dpath = path.split(':',1)\n else :\n domain,dpath = 'home',path\n #endif\n # remove trailing '/' if present:\n dpath = dpath.rstrip('/')\n \n # first try to list full path, if this fails try parent, etc.\n # keep list of subdirs to be created:\n subdirs = []\n # init parents directory as full path:\n ppath = dpath\n # loop:\n while len(ppath) > 0 :\n # loop until directory can be listed or if it is not present;\n # list command:\n command = ['ecaccess-file-dir','%s:%s' % (domain,ppath)]\n # try to list, return code -1 for 'file not found':\n stdout,stderr,retcode = self._Call( command )\n # no failure if exists ...\n if retcode == 0 :\n # directory was found, no new mkdirs needed; leave:\n break\n elif retcode == -1 :\n # any subdirs left?\n if '/' in ppath :\n # split into new parent directory\n ppath,subdir = ppath.rsplit('/',1)\n # prepend in list:\n subdirs = [subdir]+subdirs\n else :\n # no subdirs left, the current ppath is a subdir to be created too:\n subdirs = [ppath]+subdirs\n # empty:\n ppath = ''\n #endif\n else :\n # some error ...\n print( stdout )\n print( stderr )\n print( 'ERROR from command: %s' % command )\n raise Exception\n #endif\n #endwhile\n # create subdirs?\n if len(subdirs) > 0 :\n # get creation mode, None if not defined:\n mode = self.GetMode( directory=True )\n # loop:\n for subdir in subdirs :\n # new path:\n if len(ppath) > 0 : ppath = ppath+'/'\n ppath = ppath+subdir\n # create:\n command = ['ecaccess-file-mkdir','%s:%s' % (domain,ppath)]\n stdout,stderr,retcode = self._Call( command )\n # error?\n if retcode != 0 :\n print( stdout )\n print( stderr )\n print( 'ERROR from command: %s' % command )\n raise Exception\n #endif\n # change mode?\n if mode is not None :\n # set mode:\n command = ['ecaccess-file-chmod',mode,'%s:%s' % (domain,ppath)]\n stdout,stderr,retcode = self._Call( command )\n # error?\n if retcode != 0 :\n print( stdout )\n print( stderr )\n print( 'ERROR from command: %s' % command )\n raise Exception\n #endif\n #endif\n #endfor # subdirs\n #endif # create subdirs", "title": "" } ]
[ { "docid": "ce93fd6c8b470bab7977b1f05bf747f8", "score": "0.7336835", "text": "def mkdirs():\n dirs = [\n PYTHON_DIR,\n BUILDS_DIR,\n SHARED_DIR,\n RUN_DIR,\n STATIC_DIR,\n LOG_DIR,\n MEDIA_DIR,\n ]\n for dir in dirs:\n _run_web(\"mkdir -p %s\" % dir)\n _run_web(\"touch %s\" % CURRENT_DIR)", "title": "" }, { "docid": "542fce1e614cde6b60dd7f8745efacea", "score": "0.71491116", "text": "def makeDir(self, path,*args):\n if not os.path.exists(path):\n pm.sysFile(path, makeDir=True)\n #os.makedirs(path)", "title": "" }, { "docid": "c0ca4b8d9f0d8f9a12e6fb356fa876ee", "score": "0.70842254", "text": "def mk_all_dirs(dirname):\n original_dir=os.getcwd()\n subdirs=dirname.split(\"/\")\n for curdir in subdirs:\n if os.path.isfile(curdir):\n os.rename(curdir,os.tempnam(\"./\",curdir+\"_\"))\n if not os.path.isdir(curdir):\n try:\n os.mkdir(curdir)\n except Exception as e:\n print(e)\n os.chdir(curdir)\n\n os.chdir(original_dir)", "title": "" }, { "docid": "79ea8c357728d501d6da80503ffd22d6", "score": "0.70482004", "text": "def makeDirs(*args):\n for arg in args:\n path = os.path.dirname(arg['target'] if isinstance(arg, flow.Node) else arg)\n if path:\n try:\n os.makedirs(path)\n except EnvironmentError, e:\n if e.errno != errno.EEXIST:\n raise", "title": "" }, { "docid": "d588a389e460c3c9c8f778ea4b51b65b", "score": "0.70305526", "text": "def _MakeDirs(*args, **kwargs):\n try:\n os.makedirs(*args, **kwargs)\n except OSError:\n pass", "title": "" }, { "docid": "58d1446ddc0395ebcbc48dca23cd8812", "score": "0.7022971", "text": "def create_dir_tree():\n dirs = paths.get_dirs()\n for d in dirs.values():\n if not os.path.isdir(d):\n print('creating {}'.format(d))\n os.makedirs(d)", "title": "" }, { "docid": "2d390ea27b861aa1da9f0a8a3ab6f19c", "score": "0.6991802", "text": "def MakeDirs( self, path ) :\n \n # modules:\n import os\n\n # create if necessary:\n if not os.path.isdir(path) :\n # info ..\n self.info( 'create directory: %s' % path )\n # get creation mode:\n mode = self.GetMode( directory=True )\n # with mode?\n if mode is not None :\n # create recursively, convert mode from octal to integer:\n os.makedirs( path, mode=eval('0o'+mode) )\n else :\n # create:\n os.makedirs( path )\n #endif\n #endif", "title": "" }, { "docid": "45046fe26186f8a44326f16e0bb63b68", "score": "0.69616294", "text": "def make_directory(dir_list):\n for _dir in dir_list:\n if not os.path.exists(_dir):\n os.makedirs(_dir)\n return", "title": "" }, { "docid": "786faaa4d6a71117545ebdd6286d6565", "score": "0.69375515", "text": "def mkdirs(*args):\n for p in args:\n if not os.path.isdir(p):\n os.mkdir(p)", "title": "" }, { "docid": "830c82f04eafe0dc39878546a07811fe", "score": "0.6930362", "text": "def make_directories(self, path: str):\n current_dir = \"\"\n\n for directory in os.path.split(path)[0].split(os.sep):\n next_dir = os.path.join(current_dir, directory)\n if not self.directory_exists(current_dir, directory):\n self.ftp.mkd(next_dir)\n current_dir = next_dir", "title": "" }, { "docid": "d27c1ed355184281d7912893dfb499f9", "score": "0.6889773", "text": "def createAllDirs():\n createDirIfNeeded(DIR_OUTPUT_BLOCKSTATES)\n createDirIfNeeded(DIR_OUTPUT_BLOCKS)\n createDirIfNeeded(DIR_OUTPUT_ITEMS)", "title": "" }, { "docid": "458197556cb42509ccf36f2224dd6be0", "score": "0.6878645", "text": "def make_directories(self) -> types.TaskDict:\n\n def mkdirs(targets: Sequence[str]) -> None:\n for directory in targets:\n Path(directory).mkdir(exist_ok=True)\n\n task = self.basic_task\n task.update(\n {\n \"name\": \"{}/{}\".format(self._root, MAKE_TASK_NAME),\n \"doc\": \"Create directory hierarchy for {}.\".format(self._root),\n \"title\": utils.title_with_target1(\"MKTREE\"),\n \"actions\": [mkdirs],\n \"targets\": self.directories,\n \"uptodate\": [True],\n }\n )\n return task", "title": "" }, { "docid": "117716f9f6008b4c9fd9afffe44d11a1", "score": "0.6869847", "text": "def create_dirs(f):\n make_dir(OUTPUT_DIR)\n make_dir_rec(OUTPUT_DIR + \"/ckpt/\")\n make_dir_rec(OUTPUT_DIR + \"/performance/\")", "title": "" }, { "docid": "f443860cd378406f87a7ba14bf96293a", "score": "0.68128544", "text": "def MakeDir(dst):\n print 'mkdir -p ' + dst\n oshelpers.Mkdir(['-p', dst])", "title": "" }, { "docid": "cc440fbfa828238f949e02fa27d046a1", "score": "0.6782039", "text": "def make_dirs(dirs):\n\tfor d in dirs:\n\t\tos.makedirs(d, exist_ok=True)", "title": "" }, { "docid": "3dcd7e481eb0f9d6aa91a40cbe09b43a", "score": "0.6735967", "text": "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "title": "" }, { "docid": "0e3903bca5e441d1aa69c6b41121d423", "score": "0.6730184", "text": "def make_sub_dirs(model_type, ver, main_dir):\n sub_dir_1 = main_dir + model_type + '/'\n sub_dir_2 = sub_dir_1 + ver + '/'\n\n make_directory(main_dir)\n make_directory(sub_dir_1)\n make_directory(sub_dir_2)\n\n return sub_dir_2", "title": "" }, { "docid": "8a25ee436bcd0d47e4348003b8607cb4", "score": "0.6705954", "text": "def make_dir(path):\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)", "title": "" }, { "docid": "bc5faacd8dba93e8a736e0934eaee9db", "score": "0.6704804", "text": "def make_dirs(*dirs):\n for dir in dirs:\n if not path.isdir(dir):\n makedirs(dir)", "title": "" }, { "docid": "d40cddb37ff95ee4ec77574702a2fb70", "score": "0.67003304", "text": "def recursive_directories_create(project_directory, structure, dry_run=False):\n logger = logging.getLogger('optimus')\n \n for item in structure:\n if len(item)>0:\n new_dir = item[0]\n path_dir = os.path.join(project_directory, new_dir)\n if not os.path.exists(path_dir):\n logger.info('* Creating new directory : %s', path_dir)\n if not dry_run:\n os.makedirs(path_dir)\n else:\n logger.warning('* Following path allready exist : %s', path_dir)\n # Follow children directories to create them\n if len(item)>1:\n recursive_directories_create(path_dir, item[1], dry_run=dry_run)\n \n return", "title": "" }, { "docid": "e640d778ad39d38a50f4a469e5e5d9d9", "score": "0.6659188", "text": "def MakeDirs( path, **kwargs ) :\n \n # obtain GSS ojbect and remaining path:\n gss,rpath = GSS( path, **kwargs )\n\n # create:\n gss.MakeDirs( rpath )", "title": "" }, { "docid": "e7b9268af5d87d5d1d5e780dbc01a411", "score": "0.6656963", "text": "def make_dirs(path):\n if not os.path.isdir(path):\n os.makedirs(path, mode=0o777)", "title": "" }, { "docid": "612e60776c5bbda74e8c225fc3181c25", "score": "0.6554929", "text": "def create_directory(path, new_directory):\n for i in range(43):\n\n directory = path + '\\\\' + digit_number(i) + str(i)\n\n for root, dirs, files in os.walk(directory):\n for filename in files:\n copyfile(directory + '\\\\' + filename, new_directory + '\\\\' + str(i) + \"_\" + filename)", "title": "" }, { "docid": "6b10190914ae9538a4c3c6433ca106a3", "score": "0.65497", "text": "def mkdirs(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "title": "" }, { "docid": "5fa6bb333c56d3ad5adc2c38709b490d", "score": "0.65257365", "text": "def make_dir_structure(base_dir):\n\n def maybe_makedir(*args):\n\n p = join(base_dir, *args)\n\n if exists(p) and not isdir(p):\n raise IOError(\"File '{}' exists but is not a directory \".format(p))\n\n if not exists(p):\n makedirs(p)\n\n maybe_makedir(DOWNLOAD_DIR)\n maybe_makedir(PACKAGE_DIR)\n maybe_makedir(OLD_DIR)", "title": "" }, { "docid": "1fe5915c27dc576ba391459bb1b3f26a", "score": "0.65208316", "text": "def folder_maker(path):\n if not os.path.exists(path):\n os.mkdir(path)", "title": "" }, { "docid": "87968f892ef4054ebfabdfbca3b63502", "score": "0.6519899", "text": "def make_directory(dir_path, mode=0o777):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, mode=mode)", "title": "" }, { "docid": "2104c9b7aab28f39d76b00a5d45256be", "score": "0.65141076", "text": "def make_dir_rec(path):\n if not os.path.isdir(path):\n os.makedirs(path, exist_ok=True)", "title": "" }, { "docid": "77af259a6ed67947338e86d591f08a45", "score": "0.65131664", "text": "def make_dir (dir):\n if not os.path.exists (dir):\n os.mkdir (dir)", "title": "" }, { "docid": "8ddf6c00f9bf1e9ff385e0a712c151dc", "score": "0.6507649", "text": "def mkdirs(newdir):\n printDBG('mkdirs: \"%s\"' % newdir)\n if os.path.isdir(newdir):\n pass\n elif os.path.isfile(newdir):\n raise OSError(\"cannot create directory, file already exists: '%s'\" % newdir)\n else:\n head, tail = os.path.split(newdir)\n if head and not os.path.isdir(head) and not os.path.ismount(head) and not os.path.islink(head):\n mkdirs(head)\n if tail:\n os.mkdir(newdir)", "title": "" }, { "docid": "95b801b238c3613a58f6e25c97e5b9d1", "score": "0.64925635", "text": "def mkdir(self,rmdir=False):\n import os\n if rmdir:\n os.rmdir(str(self))\n cur=Path('./')\n for intdir in self.split():\n cur+=Path(intdir)\n if not os.path.isdir(cur):\n os.mkdir(cur)", "title": "" }, { "docid": "d9ec0c2b73c2281a8cd40a92c41b0735", "score": "0.648629", "text": "def make_dir(fpath) -> None:\n pathlib.Path(fpath).mkdir(parents=True, exist_ok=True)", "title": "" }, { "docid": "d87b17eb611b9a509ec401d082e055af", "score": "0.647003", "text": "def make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "title": "" }, { "docid": "d87b17eb611b9a509ec401d082e055af", "score": "0.647003", "text": "def make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "title": "" }, { "docid": "0af557cc3ff3661a85fc4e988ee7d705", "score": "0.6455032", "text": "def make_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)", "title": "" }, { "docid": "01d601164078d66e2ffa3c51aad36be3", "score": "0.64532316", "text": "def _make_directory(self, path, do_raise=False):\n try:\n os.makedirs(path)\n except OSError:\n if do_raise: raise\n else:\n os.chown(path, self.owner.pw_uid, self.owner.pw_gid)\n os.chmod(path, 0775)", "title": "" }, { "docid": "4ae22980416872bd8fce06ff3dcc8dee", "score": "0.6449504", "text": "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "title": "" }, { "docid": "4ae22980416872bd8fce06ff3dcc8dee", "score": "0.6449504", "text": "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "title": "" }, { "docid": "4ae22980416872bd8fce06ff3dcc8dee", "score": "0.6449504", "text": "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "title": "" }, { "docid": "7865ebc96ef41e1835fdb2eb847585d0", "score": "0.6440436", "text": "def makeDirectory(DIR):\n \n if not os.path.exists(DIR):\n os.makedirs(DIR)\n print \">>> made directory \" + DIR\n \n\n\n\n\n ##############\n # Categories #\n ##############", "title": "" }, { "docid": "272dcba0213acdbd54529d588eb8d745", "score": "0.6428357", "text": "def make_directories(path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError, e:\n if e.errno != errno.EEXIST: # don't complain if already exists\n raise", "title": "" }, { "docid": "a5ba4fbe3aa5b2ada08e66228d6a4a02", "score": "0.6424633", "text": "def make_dirs_if_needed(*dirs) :\n\tfor dir in dirs :\n\t\tif not exists(dir) :\n\t\t\tmakedirs(dir)", "title": "" }, { "docid": "61162d699e776d88bb8da21674860458", "score": "0.6400825", "text": "def replicate_directory_tree(input_dir, output_dir):\n def transplant_dir(target, dirname):\n x = dirname.replace(input_dir, target)\n if not os.path.exists(x):\n LOGGER.info('Creating: {}'.format(x))\n os.makedirs(x)\n\n dir_visitor(\n input_dir,\n functools.partial(transplant_dir, output_dir)\n )", "title": "" }, { "docid": "ad5ddf63aa8169d732db5815ad657277", "score": "0.63999045", "text": "def make_dir(dir_name):\n try:\n os.makedirs(dir_name)\n except OSError:\n pass", "title": "" }, { "docid": "ecfe1510e749ff34acd0faad2494225e", "score": "0.637565", "text": "def make_dir(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise", "title": "" }, { "docid": "22a08bd2fb487d5d46b8cc9cfff9247b", "score": "0.63677007", "text": "def make_directory(path):\n os.makedirs(path, exist_ok=True)", "title": "" }, { "docid": "420e190b9c95c8c605ea8d09c34f349a", "score": "0.6364349", "text": "def makeDirectory(DIR):\n if not os.path.exists(DIR):\n os.makedirs(DIR)\n #print \">>> made directory \" + DIR", "title": "" }, { "docid": "822edd48b61ff8317464ac3be0c8ecde", "score": "0.63573384", "text": "def directoryCreate (\n \n self,\n path = None\n ) :\n\n return self.makeDirectory( path )", "title": "" }, { "docid": "2cc8708b739cdbfb958203e8a43a03d9", "score": "0.63418084", "text": "def create_dir(self):\n dir1 = os.path.dirname(self.dst + \"/\")\n if not os.path.exists(dir1):\n print \"Making directory: \" + dir1\n os.makedirs(dir1)", "title": "" }, { "docid": "f3c3552b52cb80543c91e58a31d7c360", "score": "0.63380724", "text": "def mkdir(self, parents: bool = False, exist_ok: bool = False):\n pass", "title": "" }, { "docid": "99cabd05fa6b79e637804fdd45810747", "score": "0.63192296", "text": "def set_up_directory_simple(rootdir, classname):\n dir_path = os.path.join(rootdir, classname)\n make_directory(dir_path)\n return dir_path", "title": "" }, { "docid": "810e3e2fff199bd58a803549731af5e3", "score": "0.63168776", "text": "def mkdir(d):\n if not test(\"-d %s\" % d):\n run(\"mkdir -p %s\" % d)", "title": "" }, { "docid": "751699902181c6e16a45feb86b887fd0", "score": "0.63147205", "text": "def __create_dir(dir):\n if not dir.exists():\n dir.mkdir(parents=True)", "title": "" }, { "docid": "e338187034b2526155ffdeaae411e8d9", "score": "0.6303725", "text": "def dir_create(dir_path):\r\n if not os.path.isdir(dir_path):\r\n os.makedirs(dir_path)", "title": "" }, { "docid": "6aac36f54f602af9748bcadb8931eda6", "score": "0.626529", "text": "def make_dir(dirname, verbose=True):\n absdir = fix_path(dirname)\n if not os.path.isdir(absdir):\n os.makedirs(absdir)\n if verbose:\n print >>sys.stderr, \"Created directory: %s\" % dirname", "title": "" }, { "docid": "d11fd45a96d5ba5601d3ba52c15353d2", "score": "0.6244863", "text": "def make_assembly_subdir(self):\n if self.assembly_subdir_needed:\n if self.assembly_subdir is not None:\n if not os.path.isdir(self.assembly_subdir):\n os.makedirs(self.assembly_subdir)\n else:\n pass", "title": "" }, { "docid": "54f3207fdb84f7df7a91dd7f535b1b57", "score": "0.62383574", "text": "def createDir(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "title": "" }, { "docid": "fbbe786214f5f9fcfe729508a2907c35", "score": "0.62380534", "text": "def directory_setup(path):\n for i in count(1):\n new_path = path + str(i)\n try:\n makedirs(new_path)\n break\n except:\n pass\n return new_path", "title": "" }, { "docid": "28233374daa1caf4ed54300ba5f827d0", "score": "0.6236168", "text": "def makeDirs(dir, absMode = None, reuseExisting = False):\n\n # Don't remake dir if already exists\n if (reuseExisting):\n if os.path.isdir(dir):\n return\n\n # calc path depth\n depth = 0\n (head, tail) = os.path.split(dir)\n while head:\n if tail:\n depth += 1 \n if head == '/':\n break\n (head, tail) = os.path.split(head)\n \n retries = 0\n # Set umask to zero, to enable the creation of 777 dirs\n oldUmask = os.umask(0) \n while retries <= depth:\n retries += 1\n try:\n # Create the dir\n if (absMode is not None):\n os.makedirs(dir, absMode)\n else:\n os.makedirs(dir)\n except:\n # maybe the directory was created by another thread\n if os.path.isdir(dir):\n break\n else:\n if retries == depth:\n os.umask(oldUmask)\n raise\n\n os.umask(oldUmask)", "title": "" }, { "docid": "291f67063b72608be2acefa0a69c04f4", "score": "0.6226757", "text": "def mkdirs(newdir, mode=None):\n if mode is None:\n mode = 0o750\n if os.path.isdir(newdir):\n pass\n elif os.path.isfile(newdir):\n raise OSError(\"a file with the same name as the desired \"\n \"dir, '%s', already exists.\" % newdir)\n else:\n os.makedirs(newdir, mode)", "title": "" }, { "docid": "cf39f8535de9c2f1be6e7d7f586861ee", "score": "0.6210859", "text": "def mkdirs(path):\n os.makedirs(path, exist_ok=True)", "title": "" }, { "docid": "36aee08538fdeb51f51c3bb91d010438", "score": "0.62106806", "text": "def make_dir(path, name):\n\n dir = os.path.join(path, name)\n check_dir = os.path.isdir(dir)\n\n if not check_dir:\n os.makedirs(dir)\n print(f'New directory, {name}, made in {path}.')\n else:\n print(f'{name} directory already exists in {path}.')\n return dir", "title": "" }, { "docid": "400b403da03372443bf5f8d9570efc4a", "score": "0.6205585", "text": "def setup_work_directory(self):\n for subdirectory in self.WORK_DIRECTORIES:\n Path(self.work_directory, subdirectory).mkdir(parents=True, exist_ok=True)", "title": "" }, { "docid": "383d0d06f29c51cfbd59eff1b179fc88", "score": "0.62014335", "text": "def create_dirs(path):\n if path and not os.path.isdir(path):\n os.makedirs(path)", "title": "" }, { "docid": "dc1b60059e5e167505cf5fa7c7ae0f09", "score": "0.6201415", "text": "def mkdirs(filename: typing.Union[os.PathLike, str], /, *, mode: int = 0o777) -> None:\n dirname = os.path.dirname(filename)\n if not dirname:\n return\n log.debug('os.makedirs(%r)', dirname)\n os.makedirs(dirname, mode=mode, exist_ok=True)", "title": "" }, { "docid": "5d82ac0e019dd658274ee7bdbc7da138", "score": "0.6195935", "text": "def mkdirs(self, *directories: str) -> None:\n for directory in list(directories):\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise", "title": "" }, { "docid": "a73e4dd67c0449a80f960ace25cbe4a3", "score": "0.61948305", "text": "def mkdir(self, folder: str):", "title": "" }, { "docid": "6786034af029e3c41b8c46efca3d4abc", "score": "0.6191701", "text": "def recursive_makedir(path: str, is_file=False):\n try:\n target = path if not is_file else os_file_separator.join(path.split(os_file_separator)[:-1])\n makedirs(target)\n except FileExistsError as ex:\n assert ex.errno == errno.EEXIST", "title": "" }, { "docid": "a3c0462ec6e07bd2b077c04f850d7d98", "score": "0.6188425", "text": "def mkdirs(path):\r\n if not os.path.isdir(path):\r\n try:\r\n os.makedirs(path)\r\n except OSError as err:\r\n if err.errno != errno.EEXIST or not os.path.isdir(path):\r\n raise", "title": "" }, { "docid": "1f0ba42992865d9738bafafcbffb7b4e", "score": "0.6187536", "text": "def folder_creator(path):\n if not os.path.exists(path):\n os.makedirs(path)", "title": "" }, { "docid": "31d20e0de407373a8fb829f507fdc4ce", "score": "0.6184182", "text": "def setUpDirs():\n if not os.path.isdir(\"videos\"):\n os.mkdir(\"videos\")\n\n if not os.path.isdir(\"thumbnails\"):\n os.mkdir(\"thumbnails\")\n\n if not os.path.isdir(\"music\"):\n os.mkdir(\"music\")\n\n if not os.path.isdir(\"music/wav\"):\n os.mkdir(Path(\"music/wav\"))\n\n if not os.path.isdir(\"music/mp3\"):\n os.mkdir(Path(\"music/mp3\"))\n\n if not os.path.isdir(\"faces\"):\n os.mkdir(Path(\"faces\"))\n\n if not os.path.isdir(\"temp\"):\n os.mkdir(Path(\"temp\"))", "title": "" }, { "docid": "cc3bb9e5a51fb57c169610ad532eabb6", "score": "0.6183504", "text": "def create_directories(get_torrents, get_magnets, dest_dir):\n if get_torrents:\n folder = os.path.join(dest_dir, \"torrents\")\n file_io.create_directory(folder)\n\n if get_magnets:\n folder = os.path.join(dest_dir, \"magnets\")\n file_io.create_directory(folder)", "title": "" }, { "docid": "8f2dec531d09c23b9833bb6ca19f500a", "score": "0.6180252", "text": "def create_dir(path):\n os.makedirs(path)", "title": "" }, { "docid": "a8018f37c0776cdb2b2d356fd183cd12", "score": "0.61692864", "text": "def _makedir(self, system):\n dirname = self._filenames(system)[\"dirname\"]\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "title": "" }, { "docid": "e4f474922a5bd9770605224c45a9ec88", "score": "0.6165306", "text": "def directory_creator(self, path):\n if not os.path.exists(path):\n os.makedirs(path)", "title": "" }, { "docid": "c011d60610ab3fc192ade74fbf84ebb3", "score": "0.61579937", "text": "def MakeDirectoryIfAbsent(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "title": "" }, { "docid": "1da648de619005d516f09730453223f2", "score": "0.61553186", "text": "def create_dirs(target_dirs):\n for dir_path in target_dirs:\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)", "title": "" }, { "docid": "6e5fd68f978c2abff32699c84cf807cc", "score": "0.61537683", "text": "def setup_dirs(conf, root):\n conf.vardir = root / \"var/lib/fastpath\"\n conf.cachedir = conf.vardir / \"cache\"\n conf.s3cachedir = conf.cachedir / \"s3\"\n conf.dfdir = conf.vardir / \"dataframes\"\n conf.outdir = conf.vardir / \"output\"\n conf.msmtdir = conf.outdir / \"measurements\"\n for p in (\n conf.vardir,\n conf.cachedir,\n conf.s3cachedir,\n conf.dfdir,\n conf.outdir,\n conf.msmtdir,\n ):\n p.mkdir(parents=True, exist_ok=True)", "title": "" }, { "docid": "bb24a4158950a54dc14f547d8a2fc8c5", "score": "0.61509436", "text": "def mkdir(path):\n\tif not os.path.isdir(path):\n\t\tos.makedirs(path)", "title": "" }, { "docid": "82b1460d019655d95bc8844555d4608e", "score": "0.61493057", "text": "def make_dir(base_dir, name):\n if path.exists(base_dir) and path.isdir(base_dir):\n directory = path.join(base_dir, name)\n if path.exists(directory) and path.isdir(directory):\n #raise RuntimeError(\"Directory already exists: {}\".format(directory))\n return directory\n else:\n makedirs(directory)\n return directory\n else:\n raise RuntimeError(\"Directory does not exist: {}\".format(base_dir))", "title": "" }, { "docid": "7742470e80ebe4a70fe0a8468b9b64b4", "score": "0.61452585", "text": "def create_dirs(idiomas):\n directorios=os.listdir(\"./\")\n if (\"graphics\" not in directorios):\n os.makedirs(\"./graphics\")\n else:\n dir_lang=os.listdir(\"./graphics/\")\n for idioma in idiomas:\n if idioma not in dir_lang:\n os.makedirs(\"./graphics/\"+idioma+\"/data\")\n if(\"data\" not in directorios):\n os.makedirs(\"./data\")", "title": "" }, { "docid": "1af956ea1f8afc72313e94093e37ae0e", "score": "0.613982", "text": "def mkdir(path):\n if not exists(path):\n makedirs(path)", "title": "" }, { "docid": "2bca5e99be6d6710f3b84824884a19a7", "score": "0.61383635", "text": "def _make_folders(self, folders: List[str]):\n self._make_initial_folders()\n for folder in folders:\n self._mkdir(folder)", "title": "" }, { "docid": "abc571b0a16121be0f37301c68836ee6", "score": "0.61260813", "text": "def new_dirs():\n for target in FIELDS:\n if not os.path.exists(target):\n print(':: Create folder')\n os.makedirs(target)", "title": "" }, { "docid": "9b1e067a1793470df1cf279baae8de82", "score": "0.6119317", "text": "def mkdirs(paths):\n if isinstance(paths, list) and not isinstance(paths, str):\n for path in paths:\n mkdir(path)\n else:\n mkdir(paths)", "title": "" }, { "docid": "9b1e067a1793470df1cf279baae8de82", "score": "0.6119317", "text": "def mkdirs(paths):\n if isinstance(paths, list) and not isinstance(paths, str):\n for path in paths:\n mkdir(path)\n else:\n mkdir(paths)", "title": "" }, { "docid": "b4df1e8db9535b588dce22e1a37727c6", "score": "0.6117014", "text": "def CreacionDirectoriosProyecto(Root):\n os.makedirs(os.path.join(Root, 'input'), exist_ok=True)\n os.makedirs(os.path.join(Root, 'input\\processed'), exist_ok=True)\n os.makedirs(os.path.join(Root, 'library'), exist_ok=True)\n os.makedirs(os.path.join(Root, 'output'), exist_ok=True)", "title": "" }, { "docid": "82dc71342a6ff2779c3dde128684aa10", "score": "0.6111609", "text": "def create_dir(self, relpath):\n return self.get_dir(relpath).create()", "title": "" }, { "docid": "80569fcdbdd5bc308de052972d1b5e7f", "score": "0.61066616", "text": "def make_dir(datapath, mypath):\n if not os.path.exists(datapath + os.sep + mypath):\n os.makedirs(datapath + os.sep + mypath)\n pass", "title": "" }, { "docid": "e525cb19975ee6e8c790e0219c8e6dc0", "score": "0.6106294", "text": "def make_directory(path):\n if not os.path.exists(path):\n os.mkdir(path)\n return None", "title": "" }, { "docid": "6a69620517d73a8483ad6af42edfda97", "score": "0.6105435", "text": "def mktree(path):\n if not os.path.isdir(path):\n debug(\"mktree {}\", path)\n os.makedirs(path)", "title": "" }, { "docid": "98fbe08fe2a147d3df527b523999b3a2", "score": "0.61048704", "text": "def mkdircond(directory):\n if not os.path.isdir(directory):\n os.mkdir(directory)", "title": "" }, { "docid": "24e5c6e65e347a03323b508787f8672d", "score": "0.6104711", "text": "def do_mkdir(self, path):\n self.dropbox.file_create_folder(self.current_path + \"/\" + path)", "title": "" }, { "docid": "e93025724e397343b4a1195d282cb56e", "score": "0.61009216", "text": "def create_dir(path):\n if (os.path.exists(path)) and (os.listdir(path) != []):\n shutil.rmtree(path)\n os.makedirs(path)\n if not os.path.exists(path):\n os.makedirs(path)", "title": "" }, { "docid": "30966f589e31ee707b856994a87b8f7f", "score": "0.60996324", "text": "def mkdirs(self, dir):\n\n dirPath = self.jvm.org.apache.hadoop.fs.Path(dir)\n self.logger.info(\"Making directory %s\" % dir)\n return self.hadoop_fs.mkdirs(dirPath)", "title": "" }, { "docid": "a639f519d3879d1e31500ad5bfa5ccab", "score": "0.60952425", "text": "def setup_dirs():\n pathlib.Path(RESULTS_DIR).mkdir(parents=True, exist_ok=True)\n pathlib.Path(DOWNLOADS_DIR).mkdir(parents=True, exist_ok=True)", "title": "" }, { "docid": "526dd079101b01c55668cc00edb78412", "score": "0.6093531", "text": "def mkdir(new_dir, auth, parents=False):\n dirname = os.path.basename(new_dir)\n headers = dict(auth)\n headers['Content-Type'] = 'application/json'\n data = json.dumps({\n \"name\": dirname,\n \"folder\": {}\n })\n\n # make dir. This somehow only works with the ID. So get the ID before.\n parent = os.path.dirname(new_dir)\n parent_meta = get_metadata(file=parent, auth=auth)\n\n if parent_meta.status_code == 404 and parents: # parent does not exist but should be created!\n mkdir(parent, auth, True) # recurse into parents\n parent_meta = get_metadata(file=parent, auth=auth)\n\n parent_id = dict(parent_meta.json_body()).get('id', '00000000')\n res = requests.post(base_url + \"/drive/items/\" + parent_id + \"/children\", headers=headers, data=data)\n return Result(res)", "title": "" }, { "docid": "1a3bf831319a44f8647d448b15afb58b", "score": "0.6092909", "text": "def make_parent_dir(path):\n parent_dir = get_dir(path)\n mkdir(parent_dir)", "title": "" }, { "docid": "bcb22c79da211755c5ece8c6ed925e34", "score": "0.60842854", "text": "def main():\n for root, _dirs, files in os.walk(DIR):\n for file in files:\n try:\n in_file = f\"{root}/{file}\"\n out_dir = os.path.join(os.pardir, root)\n pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)\n generate(in_file, f\"{out_dir}/{file}\")\n except ValueError:\n pass", "title": "" }, { "docid": "0684a8519b7897800a61615a3261223d", "score": "0.60746723", "text": "def createFolders(self):\n if not os.path.exists(self.imgFolder):\n os.makedirs(self.imgFolder)\n if not os.path.exists(self.xformFolder):\n os.makedirs(self.xformFolder)\n if not os.path.exists(self.txtFolder):\n os.makedirs(self.txtFolder)\n if not os.path.exists(self.gifFolder):\n os.makedirs(self.gifFolder)", "title": "" } ]
689ef4dc44033992faded2f451aa3274
Sets the diameter of the
[ { "docid": "f8714e9170fbddd16ab67ffce4d4ee82", "score": "0.6536274", "text": "def set_diameter(self, value):\n if self._device.successfully_set_diameter(value):\n return \"{}\".format(self.Stopped)\n else:\n run_status = self.get_run_status()\n return \"{}NA{}\".format(self._return, run_status)", "title": "" } ]
[ { "docid": "2e8922b60569a2d01d86bc413100f22d", "score": "0.85909635", "text": "def diameter(self, diameter):\n\n self._diameter = diameter", "title": "" }, { "docid": "7353f3cffe18dd73e85d4804f159418a", "score": "0.701887", "text": "def diameter(self):\r\n return self.radius * 2", "title": "" }, { "docid": "74d5b5f3e71278a0157da9f8a56cb49c", "score": "0.678239", "text": "def diameter(self):\n return self.r*2", "title": "" }, { "docid": "4b2693c86e5831b0531d69370087029c", "score": "0.6764692", "text": "def diameter(self):\r\n return self.r*2", "title": "" }, { "docid": "a33acde53617599535ece21fec6e7441", "score": "0.6754572", "text": "def test_set_diameter():\n d1 = Circle(3)\n d2 = Circle(10)\n\n d1.diameter = 12\n assert d1.diameter == 12\n assert d1.radius == 6\n\n d2.diameter = 3\n assert d2.diameter == 3\n assert d2.radius == 1.5", "title": "" }, { "docid": "cf11a07caa1ef66d9fe2cc80dda00191", "score": "0.6695417", "text": "def diameter(self):\r\n return self._r*2", "title": "" }, { "docid": "392b72a4926740f235673612bf698911", "score": "0.6691262", "text": "def diameter(self):\n return self._diameter", "title": "" }, { "docid": "516d48c330f15e0e01ab3488cff766b7", "score": "0.66901207", "text": "def diameter(self):\n return self._r*2", "title": "" }, { "docid": "6ff3bf2aed186d2fb5f7d28687d6554a", "score": "0.6498333", "text": "def set_soma_size_from_Diam(self, diam):\n if self.use_morphology:\n return # do not do this if we are using morphology\n # print(\"Setting soma size from Diameter\",)\n # assert self.use_morphology is False # do not reset values if we are using hoc file\n self.somaarea = 1e-8 * 4.0 * np.pi * (diam / 2.0) ** 2 # in microns^2\n self.totcap = self.c_m * self.somaarea * 1e6\n # lstd = diam # 1E4 * ((self.somaarea / np.pi) ** 0.5) # convert from cm to um\n self.soma.diam = diam\n self.soma.L = diam", "title": "" }, { "docid": "04d98e8ff68c4754081fdcfd48c5f62b", "score": "0.61506265", "text": "def size(self, size):\n self.width = size\n self.height = size", "title": "" }, { "docid": "1980dda368e870a37bf7051c0e49803a", "score": "0.6136078", "text": "def from_diameter(cls, diameter):\n return cls(diameter / 2)", "title": "" }, { "docid": "50c6af3b733aed903f92d4679eeb5507", "score": "0.6123093", "text": "def diameter(self) -> BaPSFConstant:\n return self._diameter", "title": "" }, { "docid": "c6827446bc5713d2faf3dcc15a19f6c4", "score": "0.60529727", "text": "def size(self, value):\n self.width = value\n self.height = value", "title": "" }, { "docid": "c6827446bc5713d2faf3dcc15a19f6c4", "score": "0.60529727", "text": "def size(self, value):\n self.width = value\n self.height = value", "title": "" }, { "docid": "c6827446bc5713d2faf3dcc15a19f6c4", "score": "0.60529727", "text": "def size(self, value):\n self.width = value\n self.height = value", "title": "" }, { "docid": "c6827446bc5713d2faf3dcc15a19f6c4", "score": "0.60529727", "text": "def size(self, value):\n self.width = value\n self.height = value", "title": "" }, { "docid": "41cfae0eb1c66bdffbdbcca65263749a", "score": "0.6037229", "text": "def test_sphere_set_diameter():\n s1 = Sphere(3)\n s2 = Sphere(10)\n\n s1.diameter = 6\n assert s1.diameter == 6\n assert s1.radius == 3\n assert s2.radius == 10\n assert s2.diameter == 20\n\n s2.diameter = 3\n assert s2.diameter == 3\n assert s2.radius == 1.5", "title": "" }, { "docid": "679e4b613aef9caa0710f13c8cbf8b2d", "score": "0.60282296", "text": "def density(self, density):\n\n self._density = density", "title": "" }, { "docid": "679e4b613aef9caa0710f13c8cbf8b2d", "score": "0.60282296", "text": "def density(self, density):\n\n self._density = density", "title": "" }, { "docid": "679e4b613aef9caa0710f13c8cbf8b2d", "score": "0.60282296", "text": "def density(self, density):\n\n self._density = density", "title": "" }, { "docid": "3e42d44b052cf3c6f00fb7931b2d1143", "score": "0.6002557", "text": "def set_dspacing(self, d):\n self.d = d", "title": "" }, { "docid": "811a40ccee4e58310b9ae7711b5db77d", "score": "0.59813046", "text": "def size(self, value):\n\n self.width = value\n self.height = value", "title": "" }, { "docid": "300896b4392f07cbdd0382b87f3c5666", "score": "0.5973357", "text": "def set_size(self, width: int, height: int):\n self.__width = width\n self.__height = height", "title": "" }, { "docid": "46c3b1aec624bd92f3ee37e1e3b3a375", "score": "0.5933524", "text": "def setRadius(self, radius=1.0):\n self.radius = radius\n self.radiusSq = self.radius**2", "title": "" }, { "docid": "d4a4940b1b6258ebff4120eada3dfdbc", "score": "0.59329057", "text": "def size(self, size):\n self.dimensions = (self.x, self.y, size[0], size[1])\n self.invalidate()", "title": "" }, { "docid": "77490edad6c71efec0f24826723590e3", "score": "0.5931716", "text": "def update_size(self):\n self.size = self.orange + self.blue + self.yellow", "title": "" }, { "docid": "c3ed10a728fff63c5d72b07bbadbe191", "score": "0.5900453", "text": "def setDimensions(self, dim):\n self.dim = dim\n L, H = dim\n x, y = self.coords\n self.vertices[:] = [[x-0.5*L, y-0.5*H],\n [x+0.5*L, y-0.5*H],\n [x+0.5*L, y+0.5*H],\n [x-0.5*L, y+0.5*H]]\n self.volume = L*H", "title": "" }, { "docid": "5537eaf9628ccd716186ffbbc2260935", "score": "0.5887625", "text": "def size(self, value):\n super().__setattr__('width', value)\n super().__setattr__('heigth', value)", "title": "" }, { "docid": "00e8a54a06e38a617bb45132c8e67631", "score": "0.58542734", "text": "def diamond(self, x, y, size, scale):\n\n top = self.get(x, y - size)\n right = self.get(x + size, y)\n bottom = self.get(x, y + size)\n left = self.get(x - size, y)\n\n average = ((top + right + bottom + left) / 4)\n self.set(x, y, average + scale)", "title": "" }, { "docid": "258422a8e3c1c642414e173aead0f120", "score": "0.5847665", "text": "def set_density(self, units, density=NO_DENSITY):\n\n check_type('the density for Material ID=\"{0}\"'.format(self._id),\n density, Real)\n check_value('density units', units, DENSITY_UNITS)\n\n if density == NO_DENSITY and units is not 'sum':\n msg = 'Unable to set the density Material ID=\"{0}\" ' \\\n 'because a density must be set when not using ' \\\n 'sum unit'.format(self._id)\n raise ValueError(msg)\n\n self._density = density\n self._density_units = units", "title": "" }, { "docid": "374b34da3f58a88f1b07ed0b06a25cc4", "score": "0.58173096", "text": "def set_size(self, xs=1.0, ys=1.0):\n self.xSize = xs\n self.ySize = ys", "title": "" }, { "docid": "76fb2de55c9e138786d5f0b93edff6df", "score": "0.5807616", "text": "def setRadius(self, rad):\r\n self.radius = rad", "title": "" }, { "docid": "4d0312509c1d42b9733a8bf6ca4f57da", "score": "0.58073926", "text": "def set_size(self, size):\n\t\tself.size = size", "title": "" }, { "docid": "fad65005837c9a8660b1391298208202", "score": "0.57615745", "text": "def diameter(self):\n return max(starmap(self.distance, combinations(self._graph, 2)))", "title": "" }, { "docid": "36be5eafe929a4e0e67fcf30e7e43cf8", "score": "0.57609415", "text": "def set_size(self, size):\r\n\r\n pass", "title": "" }, { "docid": "2f03de8a762801128e00ce22aac71ebc", "score": "0.5749086", "text": "def set_radius( self, radius ):\n self.radius = radius\n self.invalidate()", "title": "" }, { "docid": "2f03de8a762801128e00ce22aac71ebc", "score": "0.5749086", "text": "def set_radius( self, radius ):\n self.radius = radius\n self.invalidate()", "title": "" }, { "docid": "2b00b72d4670fc6713a10f0c1b8f0605", "score": "0.5734548", "text": "def set_value(self, width, height):\n self.__width = width\n self.__height = height", "title": "" }, { "docid": "0e2c72bb0e1764ef56fef43638534b9a", "score": "0.57194686", "text": "def set_fraction(self, value):\n if value < 0:\n value *= -1\n value = min(value, 1)\n if self.horizontal:\n width = int(self.width * value)\n height = self.height\n else:\n width = self.width\n height = int(self.height * value)\n self.canvas.coords(self.meter, self.xpos, self.ypos,\n self.xpos + width, self.ypos + height)", "title": "" }, { "docid": "f8b48f6f4293c11219299e53e0aaacce", "score": "0.5716819", "text": "def thickness(self, th: float) -> None:\n self._lvis.thickness = th", "title": "" }, { "docid": "ee00dca4f32972b00c4a2e790994ce6a", "score": "0.57103926", "text": "def set_size(self, size):\n self.set_property(\"size\", size)\n self.emit(\"appearance_changed\")", "title": "" }, { "docid": "10cc5b03ead2e040c2475848956152d6", "score": "0.57092535", "text": "def setSize(self, sizeX, sizeY):\n self.size = LVecBase2i(sizeX, sizeY)", "title": "" }, { "docid": "a4e9d41ffa82d1e83950508322e8c850", "score": "0.5688525", "text": "def setDimensions(self, dim):\n self.dim = dim\n L, W, H = dim\n x, y, z = self.coords\n self.vertices[:] = [[x-0.5*L, y-0.5*W, z-0.5*H],\n [x-0.5*L, y+0.5*W, z-0.5*H],\n [x+0.5*L, y+0.5*W, z-0.5*H],\n [x+0.5*L, y-0.5*W, z-0.5*H],\n [x-0.5*L, y-0.5*W, z+0.5*H],\n [x-0.5*L, y+0.5*W, z+0.5*H],\n [x+0.5*L, y+0.5*W, z+0.5*H],\n [x+0.5*L, y-0.5*W, z+0.5*H]]\n self.volume = L*W*H", "title": "" }, { "docid": "4b18f5d928654a994cfc030e158be8e4", "score": "0.5669085", "text": "def measure_diode(self):\n self.mode = 'diode'", "title": "" }, { "docid": "8e3f61423aa8e39de9982809b89314cd", "score": "0.56675017", "text": "def set_size(self, size):\r\n pass", "title": "" }, { "docid": "9239fd74a378c7247109906676d52e5c", "score": "0.5664343", "text": "def sized(self, size: float):\n self._p.size = size\n return self", "title": "" }, { "docid": "0209b3616ad8d3caada9093541843702", "score": "0.5663205", "text": "def setSize(self, size):\n pass", "title": "" }, { "docid": "79cc7fc15f9595982a4337f696b4555c", "score": "0.56621206", "text": "def _setHeight(self, value):\r\n self.size.height = value", "title": "" }, { "docid": "6da91e7a714dd88e3d86a53c275565bc", "score": "0.5637468", "text": "def set_size(self, size):\n self.size = size\n self.call()", "title": "" }, { "docid": "5e2bc91a482f63e79141f56a9a7fa5f6", "score": "0.5637214", "text": "def set_size(self, xs, ys, zs):\n self.cone.set_size(xs*1.0, ys*1.0, zs*1.0)\n self.tip.set_size(xs*0.15, ys*0.15, zs*0.15)\n self.tip.set_location(self.cone.getX(), self.cone.getY() + self.cone.ySize/0.7, self.cone.getZ())", "title": "" }, { "docid": "55da39fddd18e01265e90b7a3b94b334", "score": "0.56249297", "text": "def set_dx(self, dx):\n assert dx >= 0., \"dx must be nonnegative\"\n self.dx = float(dx)", "title": "" }, { "docid": "938e1d60942e9c117b41fb0b011245b2", "score": "0.5608492", "text": "def set_radius(self, radius: float) -> None:\n pass", "title": "" }, { "docid": "454238b3cd47cc4f3ad57be430b735bd", "score": "0.55843496", "text": "def set_size(self, width, height):\n self.left = -width / 2\n self.right = width / 2\n if self.bottom_up:\n self.top = -height / 2\n self.bottom = height / 2\n else:\n self.top = height / 2\n self.bottom = -height / 2\n self.width = width\n self.height = height", "title": "" }, { "docid": "1f46a0897ff81df55d02747c985965b9", "score": "0.55642694", "text": "def _radius_changed(self):\n\n diameter = self.radius * 2\n self.bounds = [diameter, diameter]\n\n return", "title": "" }, { "docid": "0df08e9a8116477c2242459b447a61db", "score": "0.556325", "text": "def setDensity(self,zRange, rho):\n self.setProperty(self.rho, zRange, rho)", "title": "" }, { "docid": "bf17474110c96e3c77a30412be41cf26", "score": "0.55541277", "text": "def set_radius(self):\n self._radius = (\n (self._location[0]-1,self._location[1]-1),\n (self._location[0]-1,self._location[1]),\n (self._location[0]-1,self._location[1]+1),\n (self._location[0],self._location[1]+1),\n (self._location[0]+1,self._location[1]+1),\n (self._location[0]+1,self._location[1]),\n (self._location[0]+1,self._location[1]-1),\n (self._location[0],self._location[1]-1))", "title": "" }, { "docid": "762785a66468fefe30c27c2a58e8699d", "score": "0.55431616", "text": "def SetDiscRadius(self, _arg: 'double const') -> \"void\":\n return _itkHoughTransform2DLinesImageFilterPython.itkHoughTransform2DLinesImageFilterFF_SetDiscRadius(self, _arg)", "title": "" }, { "docid": "7b12832229cb2560003a8df5b7c12b19", "score": "0.5542856", "text": "def density(self, value):\n self._density = value\n if self._ipoly is not None:\n self.densities[self._ipoly] = value\n self.polygons[self._ipoly].set_color(self._density2color(value))\n # self._update_data()\n self._update_data_plot()\n self.canvas.draw()", "title": "" }, { "docid": "a3bca098f2e33d3d96a4ff5c81709832", "score": "0.55379707", "text": "def set_box_size(self, size):\n self.height_offset = size\n self.height = self.y + self.height_offset\n self.width_offset = size\n self.width = self.x + self.width_offset", "title": "" }, { "docid": "451feab23deedf6d4012ac4029843b90", "score": "0.5528222", "text": "def set_size(self, options):\r\n\r\n # Set dimensions or scale for the chart.\r\n self.width = options.get('width', self.width)\r\n self.height = options.get('height', self.height)\r\n self.x_scale = options.get('x_scale', 1)\r\n self.y_scale = options.get('y_scale', 1)\r\n self.x_offset = options.get('x_offset', 0)\r\n self.x_offset = options.get('y_offset', 0)", "title": "" }, { "docid": "d4e57727699ee763bcc898394042d13d", "score": "0.54924905", "text": "def SetThickness(self, num):\n self.thickness = num\n self.pen = wx.Pen(self.colour, self.thickness, wx.SOLID)\n self.Notify()", "title": "" }, { "docid": "f7dda58e5276fb465efd615c98eaab08", "score": "0.54830927", "text": "def setDim(self,dim):\n self.dim = dim", "title": "" }, { "docid": "1dce63ed407cf315881c0b72a8e6631e", "score": "0.5478455", "text": "def set_size(self, xs=1.0, ys=1.0, zs=1.0):\n self.xSize = xs\n self.ySize = ys\n self.zSize = zs", "title": "" }, { "docid": "08efe673d7479c501228a51a6a8d0411", "score": "0.5474349", "text": "def setDp(self, dp): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "89be1fbfca8beab5cd6f04defd190868", "score": "0.54719377", "text": "def set_size(self, size):\n if size < 2:\n raise ValueError(\"Size must be greater than 1.\")\n self.size = size", "title": "" }, { "docid": "44bb93382ce0d0fae40990ed08af856f", "score": "0.5466153", "text": "def _set_pixel_size(self) -> None:\n if self.constellation in [\n Constellation.L8,\n Constellation.L9,\n Constellation.L7,\n ] or (\n self.constellation in [Constellation.L4, Constellation.L5]\n and self.instrument == LandsatInstrument.TM\n ):\n pixel_size = 30.0\n else:\n pixel_size = 60.0\n\n self.pixel_size = pixel_size", "title": "" }, { "docid": "18095b36ae3497c75e37d1975fce6f65", "score": "0.5446678", "text": "def test_diameter():\n c1 = Circle(10)\n c2 = Circle(4)\n\n assert c1.diameter == 20\n assert c2.diameter == 8\n\n c2.radius = 12\n c1.radius = 180\n assert c2.diameter == 24\n assert c1.diameter == 360", "title": "" }, { "docid": "d83c9faa49c8787b20a531664fc1ead4", "score": "0.5443408", "text": "def setPatchSize(self, patchSizeX, patchSizeY):\n self.patchSize = LVecBase2i(patchSizeX, patchSizeY)", "title": "" }, { "docid": "44d68aeb8546298576b01c17b39c2715", "score": "0.54306895", "text": "def __setattr__(self, name, value):\r\n if name in ('cx', 'cy'):\r\n value_str = str(int(value))\r\n self.set(name, value_str)\r\n else:\r\n super(CT_SlideSize, self).__setattr__(name, value)", "title": "" }, { "docid": "600b331502dbf03c007ef2f407d6eb79", "score": "0.54270935", "text": "def setSize(self, x=None, y=None, z=None, size=None):\n if size is not None:\n x = size.x()\n y = size.y()\n z = size.z()\n self.__size = [x, y, z]\n self.update()", "title": "" }, { "docid": "d11e27c1b0c940b5b5620e565a8369e7", "score": "0.54261667", "text": "def set_dy(self, dy):\n assert dy >= 0., \"dy must be nonnegative\"\n self.dy = float(dy)", "title": "" }, { "docid": "4baebfb8054d85aa42a1c5e5986725e2", "score": "0.54051864", "text": "def setPixelSize(self, pixel_size):\n self.hdf5.attrs['pixel_size'] = pixel_size", "title": "" }, { "docid": "b00c6f038307e2f7d5b09c612daccf1e", "score": "0.5402376", "text": "def set_mdot_charge(self): \n\n self.mdot_charge =( \n (self.RPM / 2. * self.displacement * self.eta_V *\n self.air.rho) / 60.\n ) \n # charge flow (kg/s) in engine", "title": "" }, { "docid": "afe88b6fab3928584dffe948cdc9fd0b", "score": "0.53907526", "text": "def size(self, size):\n\n\t\tif self.fixed:\n\t\t\traise ValueError('cannot change the size of fixed dimension {}'.format(self.name))\n\n\t\t# size must be a postive integer\n\t\tif size >= 0 and type(size) == int:\n\t\t\tself._size = size\n\t\telse:\n\t\t\traise TypeError('size must be positive int')", "title": "" }, { "docid": "91ba1656824a8632f2459354105b448f", "score": "0.53904915", "text": "def set_size(self, size):\n self._check_scene_editor()\n self.scene_editor.set_size(size)", "title": "" }, { "docid": "ab4a82cee33ccb00484a0248f122b88d", "score": "0.5373509", "text": "def setDof(self, dof):\n self.dof = dof\n self.norm = None", "title": "" }, { "docid": "4f1f31cc85dd8747286d3867eca52793", "score": "0.53572285", "text": "def setDesiredVelocity(self, dxdes):\n\t\tself.dxdes = dxdes", "title": "" }, { "docid": "308baea7b07df81c23d77107e5280195", "score": "0.5340301", "text": "def set_aperture(self, aperture_size, sky_annulus=None):\n\n self.aperture_size = aperture_size\n if isinstance(sky_annulus, (list, tuple)) and len(sky_annulus) == 2:\n self.sky_annulus = sky_annulus\n else:\n self.sky_annulus = (2.0 * self.aper, 4.0 * self.aper)", "title": "" }, { "docid": "60c0370c307b54599c7e956c39c1de2f", "score": "0.5333745", "text": "def _set_height(self, height: int) -> None:", "title": "" }, { "docid": "e2184447ffeb416252f5c4b032649ace", "score": "0.5308759", "text": "def set_tick(self, size: float = 1) -> None:\n self.set_dxf_attrib(\"dimtsz\", size)", "title": "" }, { "docid": "b5e413d26ce490de4279852532162627", "score": "0.53087366", "text": "def set_size(self, new_size):\n self.size = new_size", "title": "" }, { "docid": "d193d1a1c4295dd9bab67272222355aa", "score": "0.5305684", "text": "def setPlotSize(self, _plotSizeWidth, _plotSizeHeight): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "659eda28dde6b456c359fdffcbd9887f", "score": "0.5302359", "text": "def __init__(self, size=6):\n self.sides = size", "title": "" }, { "docid": "7fcada28f08f7e6ad9d69d368cf88cb9", "score": "0.53008664", "text": "def SetSize(self, value):\n\n self._size = value", "title": "" }, { "docid": "357b36843ef2a421d8db6d3a6b344ee4", "score": "0.53003496", "text": "def diameter(self):\n if self.__Order == 0: return 0\n return self.distance_matrix().max()", "title": "" }, { "docid": "ec660baa7a7c50eeaa5640e354d2cabe", "score": "0.5296701", "text": "def circum_ference(diameter):\n return 22*diameter/7", "title": "" }, { "docid": "e7f4e2a6cd8eadcf6833d75943730fb7", "score": "0.5290093", "text": "def setSize(self, width, height):\n self.WIDTH = width\n self.HEIGHT = height\n self.PIXELCOUNT = self.WIDTH * self.HEIGHT\n self.progressStep = self.PIXELCOUNT / 100\n self.camera = Camera(HomVector(0, 0, 6, 1), HomVector(0, 0, 0, 1), HomVector(0, 1, 0, 0), 45, self.WIDTH, self.HEIGHT)", "title": "" }, { "docid": "95c55420b28ba1ac428a6f8a31c656bd", "score": "0.5280665", "text": "def radius(self, radius):\n\n self._radius = radius", "title": "" }, { "docid": "95c55420b28ba1ac428a6f8a31c656bd", "score": "0.5280665", "text": "def radius(self, radius):\n\n self._radius = radius", "title": "" }, { "docid": "cbcefa294f9801516cc919d9bb8b2084", "score": "0.5267082", "text": "def density(self, value):\n self._data[\"density\"] = float(value)\n self._cache.delete(\"mass_properties\")", "title": "" }, { "docid": "212fb16280274096f5b36e5653685767", "score": "0.5265638", "text": "def set_size(size):", "title": "" }, { "docid": "1bd5c9671dcee33c9929f4f32e76d89e", "score": "0.5263938", "text": "def set_size(self, width, height):\n\n self.width = width\n self.height = height\n self.render_list.size(width, height)\n return self", "title": "" }, { "docid": "2eadee9f06119520e665d432a1ba68db", "score": "0.5262886", "text": "def radius(self, radius: float):\n\n self._radius = radius", "title": "" }, { "docid": "e1aedf8d2b58ece5d986463a75957809", "score": "0.5258798", "text": "def _changeNeuronSize(self,neuron,new_radius):\r\n\r\n x = neuron[\"x\"]\r\n y = neuron[\"y\"]\r\n\r\n # point 1\r\n UL_x = self._convertToPixels(x - new_radius) + 1\r\n UL_y = self._convertToPixels(y - new_radius) + 1\r\n # point 2\r\n LR_x = self._convertToPixels(x + new_radius)\r\n LR_y = self._convertToPixels(y + new_radius)\r\n\r\n self.canvas.coords(neuron[\"circle\"][\"object\"], UL_x, UL_y, LR_x, LR_y)", "title": "" }, { "docid": "0a0161464fac0dd184f54d8cf74308af", "score": "0.5254805", "text": "def topo_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n distance.append(len(pathlist[k]) - 1)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.topodiameter = Temp", "title": "" }, { "docid": "4fb1ef5db6543e8745a1ac1ef89cfd9d", "score": "0.52368236", "text": "def set_dc(self, dc):\n self.dc = float(dc)", "title": "" }, { "docid": "001918febd29662654208c30aa10985f", "score": "0.5233458", "text": "def SetOtherSize( self, size ):\n\t\tif isinstance( size, (int,long)):\n\t\t\tself.otherSize = size\n\t\telse:\n\t\t\tif self.dockSide in ('l','r'):\n\t\t\t\tself.otherSize = size[0]\n\t\t\telse:\n\t\t\t\tself.otherSize = size[1]\n\t\treturn self.otherSize", "title": "" }, { "docid": "fcc7a652747410f3efb57533467544e1", "score": "0.5224733", "text": "def size(self, size):\n self._size = size\n self._resize_img()", "title": "" }, { "docid": "1bd211125b5958b5cc2c2e1adefcc990", "score": "0.5221301", "text": "def set_voxel_size(self, size):\n self.voxel_size = size", "title": "" }, { "docid": "aeed35ca2196b1dcc999ceb16b014499", "score": "0.51937336", "text": "def __init__(self, (x,y), size):\n self.x = x\n self.y = y\n self.size = size\n self.colour = (0,128,255)\n self.thickness = 1\n self.speed = 0.01\n self.angle = math.pi/2", "title": "" } ]
8c9d60bda2e7faa6539e0ead3b569e28
Initialize the emitter with an event loop.
[ { "docid": "3d57c407a3975b589594f2da47098ef7", "score": "0.673187", "text": "def __init__(self, loop=None):\n self._loop = loop or asyncio.get_event_loop()\n self._listeners = collections.defaultdict(list)\n self._once = collections.defaultdict(list)\n self._max_listeners = self.DEFAULT_MAX_LISTENERS", "title": "" } ]
[ { "docid": "e0c3369da3f4c1abd76581fe10e69f2f", "score": "0.69916075", "text": "def __init__(self, loop=None) -> None:\n\n self.loop = loop if loop else asyncio.get_event_loop()", "title": "" }, { "docid": "1fb97ccd138ea175404892d6753ba676", "score": "0.6815965", "text": "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "title": "" }, { "docid": "1fb97ccd138ea175404892d6753ba676", "score": "0.6815965", "text": "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "title": "" }, { "docid": "587b2fbc314808e7183dcfc4a9280b14", "score": "0.6783072", "text": "def _init_process(self) -> None:\n # create new event loop after fork\n asyncio.get_event_loop().close()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)", "title": "" }, { "docid": "eaf4fdb1ba981668590d4593254d7e8d", "score": "0.67776525", "text": "def __init__(self, loop: Optional[AbstractEventLoop] = None) -> None:\n self.loop: AbstractEventLoop = Helper.ensure_loop(loop)\n self._shutdown_event: Event = Event(loop=self.loop)\n self._shutdown_from_signal: bool = False\n\n # SIGINT for local debugging\n self.loop.add_signal_handler(SIGINT, self._initiate_shutdown_signal)\n self.loop.add_signal_handler(SIGTERM, self._initiate_shutdown_signal)", "title": "" }, { "docid": "9bc256bc25d8441dc118b3d3a70ac00d", "score": "0.6658748", "text": "def new_event_loop():\n return Loop()", "title": "" }, { "docid": "9556f5dfb622bc049a47a7a5df6ffd87", "score": "0.65241927", "text": "def create_io_loop(self):\n assert self.ioloop is None # This should only ever be run once\n\n def _run(loop):\n asyncio.set_event_loop(loop)\n loop.run_forever()\n\n self.ioloop = asyncio.new_event_loop()\n self.ioloop.set_exception_handler(self._ioloop_exc_handler)\n threading.Thread(target=partial(_run, self.ioloop), name=\"Thread-asyncio\", daemon=True).start()", "title": "" }, { "docid": "86e89cba19b488613da5d7df70149df1", "score": "0.638678", "text": "def __init__(self, event_manager):\r\n self.__eventManager = event_manager", "title": "" }, { "docid": "1d3c4f78fa64fa0b3c56b3f2d0239296", "score": "0.6357002", "text": "def __init__(self, options: Options, loop: Optional[asyncio.AbstractEventLoop] = None):\n self.loop = loop or asyncio.get_event_loop()\n super().__init__(self.loop)\n self.options = options\n\n self._future = self.loop.create_future()\n self._engine = EngineService(self)\n self._socket = SocketService(self)\n\n self.on('message_received', self._on_message_received)\n self.on('socket_open', self._on_socket_open)", "title": "" }, { "docid": "ca43c8b669973b423461a191a4ed877c", "score": "0.6318867", "text": "def set_event_loop(self, loop: typing.Optional[asyncio.AbstractEventLoop]) -> None:\n self._local._set_called = True\n assert loop is None or isinstance(loop, asyncio.AbstractEventLoop)\n self._local._loop = loop\n\n if (\n self._watcher is not None\n and threading.current_thread() is threading.main_thread()\n ):\n self._watcher.attach_loop(loop)", "title": "" }, { "docid": "228ed8ce030c442248b06bbf93163688", "score": "0.63083553", "text": "def event_loop():\n return asyncio.get_event_loop()", "title": "" }, { "docid": "e494d5dc916560caa19c75f4512e8ff3", "score": "0.62807494", "text": "def get_event_loop(self):\n loop = super().get_event_loop()\n # Do something with loop ...\n return loop", "title": "" }, { "docid": "122243c37574fad0bc0a1edb232ec86d", "score": "0.6272463", "text": "async def _init_main_loop(self):", "title": "" }, { "docid": "c501c2b433049be04684f942b0eaed70", "score": "0.6232068", "text": "def __init__(self,\n prodables: List[Prodable]=None,\n loop=None,\n debug=False,\n autoStart=True):\n self.prodables = list(prodables) if prodables is not None \\\n else [] # type: List[Prodable]\n\n # if sys.platform == 'linux':\n # asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\n if loop:\n self.loop = loop\n else:\n try:\n # if sys.platform == 'win32':\n # loop = asyncio.ProactorEventLoop()\n # asyncio.set_event_loop(loop)\n evl = asyncio.get_event_loop()\n if evl.is_closed():\n raise RuntimeError(\"event loop was closed\")\n except Exception as ex:\n logger.debug(\"Looper could not get default event loop; \"\n \"creating a new one: {}\".format(ex))\n # Trying out uvloop for linux\n evl = asyncio.new_event_loop()\n asyncio.set_event_loop(evl)\n self.loop = evl\n\n logger.info(\"Starting up indy-node\")\n self.runFut = self.loop.create_task(self.runForever()) # type: Task\n self.running = True # type: bool\n\n self.loop.set_debug(bool(int(os.getenv('PYTHONASYNCIODEBUG', debug))))\n\n # TODO: uncomment this when python bug fixed (not just closed, but solved!)\n # https://bugs.python.org/issue23548\n #\n # signals = [item for item in dir(signal)\n # if item.startswith(\"SIG\") and item[3] != \"_\"]\n\n self.signals = [\"SIGINT\", \"SIGTERM\"]\n\n setSignal = \\\n signal.signal if sys.platform == 'win32' \\\n else self.loop.add_signal_handler\n\n for sigName in self.signals:\n try:\n logger.debug(\"Setting handler for {}\".format(sigName))\n sigNum = getattr(signal, sigName)\n setSignal(sigNum, self.handleSignal)\n except RuntimeError as e:\n logger.debug(\n \"Cannot set handler for {} because {}\".format(sigName, e))\n\n self.autoStart = autoStart # type: bool\n if self.autoStart:\n self.startall()", "title": "" }, { "docid": "0c50fe5b3413c092ae58fff6b9ef481e", "score": "0.61680555", "text": "def __init__(\n self, loop: asyncio.AbstractEventLoop, logger: logging.Logger, probe_routine: Callable[[], Awaitable[bool]]\n ):\n super().__init__()\n self.loop = loop\n self.startup_complete = loop.create_future()\n self.disconnected = loop.create_future()\n self.exited = loop.create_future()\n self._logger = logger\n self._probe = probe_routine", "title": "" }, { "docid": "426600a2b9f22e238812ea45c9e8e4aa", "score": "0.6110142", "text": "def __init__(\n self, loop: asyncio.AbstractEventLoop, identifier: Optional[str] = None\n ):\n super().__init__()\n self.loop = loop\n self.identifier = identifier", "title": "" }, { "docid": "097d78fd0462e27b001b785319986bce", "score": "0.6069578", "text": "def __init__(self, **kwargs):\n self.communicator = self.open_listener_connection(DATA[\"id\"])\n self.loop = asyncio.get_event_loop()\n asyncio.ensure_future(self.communicator.start_listening())\n super().__init__(**kwargs)", "title": "" }, { "docid": "10117bdba94a726d593aef6a9989c90f", "score": "0.6060124", "text": "def init(self):\n listener_class = get_listener_class()\n logger.info('Using %s as a listener' % (listener_class, ))\n self.listener = listener_class()", "title": "" }, { "docid": "cdbfd4efe2b6ecfc2a3915549f7f6f6e", "score": "0.601411", "text": "async def __aenter__(self):\n self.loop = asyncio.get_event_loop()\n await self.setup()\n return self", "title": "" }, { "docid": "a1eb6bf9ace8dc84eeb62b9e8c6b65be", "score": "0.5978569", "text": "def setup(self, owner: Entity, loop: ActionLoop) -> None:\n self.owner = owner\n self.loop = loop\n self.start_tick = loop.get_tick()", "title": "" }, { "docid": "a80e82fa995ec7b8e5dea7bae7a9da51", "score": "0.5948992", "text": "def event_loop(self):\n return self.__loop_from_run_thread", "title": "" }, { "docid": "7546a93ea888d8631d4b63076bf72997", "score": "0.59326774", "text": "def autonomousInit(self):\r\n self.auto_loop_counter = 0", "title": "" }, { "docid": "09ec980a5d9135d831e02fa5b1f49941", "score": "0.59053296", "text": "def event_loop():\n loop = uvloop.Loop()\n yield loop\n loop.close()", "title": "" }, { "docid": "512fa841417a0688afe9fbbba9c995b9", "score": "0.587469", "text": "def _init_events(self):\n self._init_pipe()\n LOG.info('Starting native event thread')\n event_thread = native_threading.Thread(target = self.read_from_tap)\n event_thread.setDaemon(True)\n event_thread.start()\n LOG.info('Starting green dispatch thread')\n dispatch_thread = hub.spawn(self.dispatch_thread)", "title": "" }, { "docid": "d847e53bc910548e9b3eba5e8210624c", "score": "0.5860455", "text": "def __init__(self, args=None):\n self.stop_request = Event()\n self.force_stop = False\n self.started = False\n self.completed = False\n self.config = None\n self.args = args\n self.workerPool = []\n self.manager = None\n self._setup_loggers(args=args)\n # attach to the logging queue\n self.logger.info(\"Logging Setup Complete.\")\n\n self._generator_queue_size = getattr(self.args, \"generator_queue_size\", 500)\n if self._generator_queue_size < 0:\n self._generator_queue_size = 0\n self.logger.info(\n \"Set generator queue size:{}\".format(self._generator_queue_size)\n )\n\n if self.args and \"configfile\" in self.args and self.args.configfile:\n self._load_config(self.args.configfile, args=args)", "title": "" }, { "docid": "3ac5fd2daba7cc0a11c8782f7babb4d6", "score": "0.58336735", "text": "def __init__(self, loop, run, *args, **kwargs):\n self.loop = loop\n self.run = run\n self.args = args\n self.kwargs = kwargs\n self.watcher = None", "title": "" }, { "docid": "4ce2e6580fda6ff071869c45cc7ddecf", "score": "0.5827722", "text": "def event_loop():\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()", "title": "" }, { "docid": "b2dc75ab97179382e49fbfdeed970b17", "score": "0.5813702", "text": "async def on_start(self):\n await self.real_init()", "title": "" }, { "docid": "a30fbb44ffd972dfb5dbdf5ad583dc6e", "score": "0.58116555", "text": "def get_or_create_event_loop() -> asyncio.AbstractEventLoop:\n try:\n loop = asyncio.get_event_loop()\n except (DeprecationWarning, RuntimeError):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n return loop", "title": "" }, { "docid": "7ca0b75fa14a64a7bc1427da032f763c", "score": "0.5756037", "text": "async def on_init(self):\n pass", "title": "" }, { "docid": "990ab8967ff19dec856ebfdfd6ec89b4", "score": "0.57422704", "text": "def test_eventloop_factory(get_io_loop):\n assert isinstance(get_io_loop, eventloop.NonBlockingPoll)", "title": "" }, { "docid": "3cef879a89efbb8de4a58922132477c7", "score": "0.5739024", "text": "def main_loop(self):\n\n raise NotImplementedError()", "title": "" }, { "docid": "d56d500419201411cc89d5215b7a8419", "score": "0.5714047", "text": "def main_loop(self):\n LOGGER.info('Entering main event loop...')\n while self._handle_faucet_events():\n pass", "title": "" }, { "docid": "6a0dfce6f921340de1ba9eebbc105f6d", "score": "0.5713494", "text": "def event_loop(request: FixtureRequest) -> Iterator[asyncio.AbstractEventLoop]:\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()", "title": "" }, { "docid": "4b896883eb6a9b880b4c64f1bea0e1ff", "score": "0.57091033", "text": "def on_loop_start(self):\n pass", "title": "" }, { "docid": "d4d9bcbcdd80a51ba907c39943cf77ab", "score": "0.56964403", "text": "def __init__(self, event_queue):\n\n Thread.__init__(self)\n self.error_handler = NullEventHandler()\n self._event_queue = event_queue\n self._stop_flag = 0", "title": "" }, { "docid": "8bc37cf9e8894adb53061cdcf4d6f9fe", "score": "0.5687923", "text": "def new_event_loop(self) -> PyObjCEventLoop:\n return PyObjCEventLoop()", "title": "" }, { "docid": "c27b05432f7e3b90299b283012b2e3ab", "score": "0.5656225", "text": "def init_kernel(self): # type: () -> None\n # Reset the core\n self.core.reset()\n\n # Set direction of pins\n self.ttl_out.output()\n self.ttl_in.input()\n\n # Wait until event is submitted\n self.core.wait_until_mu(now_mu())\n\n # Record DMA burst\n self._record_dma_burst()", "title": "" }, { "docid": "59cf8d38e2e9094386f033fceb0df908", "score": "0.56523794", "text": "def initialize(self):\n self.log(self.args)\n\n self.remote = self.args[\"remote\"]\n self.media_player = self.args[\"media_player\"]\n\n self.listen_event(self.handle_event, \"deconz_event\")", "title": "" }, { "docid": "3952c92236e7f2db9eb7dd55b6e730d9", "score": "0.5652089", "text": "def setup(self):\n\n # NOTE: this thread is never closed\n thread = threading.Thread(target=Server(self.nao).serve)\n thread.daemon = True\n thread.start()\n\n self.nao.speak(\"Ready to connect!\")", "title": "" }, { "docid": "b6baec542f13bda1f55a48bfe794d757", "score": "0.5641426", "text": "def event_loop(self):\n self._stop = False\n while not self._stop:\n self.handle()", "title": "" }, { "docid": "8ad77c761671c4a4d5b68ab5ea83c1a5", "score": "0.56314456", "text": "def initialize(self):\n #self.listen_event(self.receive_telegram_text, 'telegram_text')\n self.settings = self.get_app(\"settings\")\n self.listen_event(self.receive_telegram_command, 'telegram_command')\n self.listen_event(self.receive_telegram_callback, 'telegram_callback')", "title": "" }, { "docid": "59eed19f8de179da931e19028cffc616", "score": "0.5625552", "text": "def __init__(self, target):\n self._target = target\n self._active = Event()\n self._thread = None", "title": "" }, { "docid": "e96de400566530d0753ec5f1066bc016", "score": "0.56232387", "text": "async def init(self):\n pass", "title": "" }, { "docid": "3b8ae8f50c71825ac83f06cd5f4aeef6", "score": "0.56219727", "text": "def __init__(self, context, detach=False):\n self.context = context\n\n # the thread pool to convert block execution of task into async process\n self.thread_pool = ThreadPoolExecutor(4)\n self.wait_queue = queues.Queue()\n self.exec_queue = queues.Queue()\n\n def engine_loop():\n _ioloop = ioloop.IOLoop.current()\n _ioloop.add_callback(self.daemon_loop)\n _ioloop.start()\n\n # if the detached mode is enabled\n # use a seperated thread to boot the io-loop\n self.loop_thread = None\n if detach:\n logger.debug(\"engine running in detach mode\")\n self.loop_thread = threading.Thread(target=engine_loop)\n self.loop_thread.start()", "title": "" }, { "docid": "1271b33e391bd72271f95d266c6d4a4a", "score": "0.5619265", "text": "def event_manager(**kwargs):\n manager = EventManager(**kwargs)\n return manager.run()", "title": "" }, { "docid": "aa895b64990f0c04782569a15c978d40", "score": "0.5610994", "text": "def create_listener(self):\n self._listener = ListenerThread(self._app, self.options)", "title": "" }, { "docid": "1ecfaf521575c5b3322ba8ca7f424a34", "score": "0.56055164", "text": "def start(self):\n self.run = True\n asyncio.ensure_future(self.listen(), loop=self.loop)", "title": "" }, { "docid": "c9d72f324e9ea93f7760a45b7bf96811", "score": "0.5600224", "text": "def start(self, loop):\n raise NotImplementedError(\"subclass {} should implement this method\"\n .format(self))", "title": "" }, { "docid": "2f534f24c501f10f24ed4736234bfda6", "score": "0.55800086", "text": "async def initialize(self):\n pass", "title": "" }, { "docid": "02f5810db0693c43783ba81e1854bd30", "score": "0.55662316", "text": "def init(self):\n self.runner.init()", "title": "" }, { "docid": "2fe39f58e2bff124fd8b391ff5ca11ea", "score": "0.5553751", "text": "def setup(self):\n # set the Event lock\n self.lock.set()\n\n # setup configuration\n self.config = self.get_config()\n\n if self.config.get('cli_command'):\n self.handle_cli_command(self.config['cli_command'])\n sys.exit()\n\n if self.config['debug']:\n syslog(\n LOG_INFO,\n 'py3status started with config {}'.format(self.config)\n )\n\n # setup i3status thread\n self.i3status_thread = I3status(\n self.lock,\n self.config['i3status_config_path'],\n self.config['standalone']\n )\n if self.config['standalone']:\n self.i3status_thread.mock()\n else:\n self.i3status_thread.start()\n while not self.i3status_thread.ready:\n if not self.i3status_thread.is_alive():\n err = self.i3status_thread.error\n raise IOError(err)\n sleep(0.1)\n if self.config['debug']:\n syslog(\n LOG_INFO,\n 'i3status thread {} with config {}'.format(\n 'started' if not self.config['standalone'] else 'mocked',\n self.i3status_thread.config\n )\n )\n\n # setup input events thread\n self.events_thread = Events(\n self.lock,\n self.config,\n self.modules,\n self.i3status_thread.config\n )\n self.events_thread.start()\n if self.config['debug']:\n syslog(LOG_INFO, 'events thread started')\n\n # suppress modules' ouput wrt issue #20\n if not self.config['debug']:\n sys.stdout = open('/dev/null', 'w')\n sys.stderr = open('/dev/null', 'w')\n\n # get the list of py3status configured modules\n self.py3_modules = self.i3status_thread.config['py3_modules']\n\n # get a dict of all user provided modules\n user_modules = self.get_user_modules()\n if self.config['debug']:\n syslog(LOG_INFO, 'user_modules={}'.format(user_modules))\n\n if self.py3_modules:\n # load and spawn i3status.conf configured modules threads\n self.load_modules(self.py3_modules, user_modules)\n else:\n # legacy behaviour code\n # load and spawn user modules threads based on inclusion folders\n self.load_modules(user_modules, user_modules)", "title": "" }, { "docid": "7724df2272e36e68d0c1ce3729fa3a83", "score": "0.5553123", "text": "def create_listener(self, logger):\n self._event_mask = (CGEventMaskBit(kCGEventLeftMouseDown) |\n CGEventMaskBit(kCGEventFlagsChanged) |\n CGEventMaskBit(kCGEventKeyDown))\n self._event_tap = CGEventTapCreate(kCGHIDEventTap,\n kCGHeadInsertEventTap,\n kCGEventTapOptionDefault,\n self._event_mask,\n _callback_func,\n logger)\n\n self._runLoopSource = CFMachPortCreateRunLoopSource(\n kCFAllocatorDefault, self._event_tap, 0)\n CFRunLoopAddSource(CFRunLoopGetCurrent(), self._runLoopSource,\n kCFRunLoopCommonModes)\n CGEventTapEnable(self._event_tap, True)\n CFRunLoopRun()", "title": "" }, { "docid": "38597fd1ba66c3c35ddae685e088130e", "score": "0.55393517", "text": "def main(self) -> None:\n self.loop.run()", "title": "" }, { "docid": "3951c07b9b577bae2e7b522c31f73931", "score": "0.5534345", "text": "def start_event_listener(self):\n self._sdl.start_event_listener()", "title": "" }, { "docid": "f5547529649ee9dc0b74674f0230ee2f", "score": "0.55334944", "text": "def install():\n __asyncio.set_event_loop_policy(EventLoopPolicy())", "title": "" }, { "docid": "a601e1f5433201403735e306e35f22db", "score": "0.5532086", "text": "def start_glib_loop(cls):\n GObject.threads_init()\n Gst.init(None)\n\n cls.glib_loop = GObject.MainLoop()\n cls.glib_thread = threading.Thread(target=cls.glib_loop.run)\n cls.glib_thread.start()", "title": "" }, { "docid": "e2bec545a3c07c9b40ae66a0f0c5893f", "score": "0.5532003", "text": "def run(self):\n self.running = True\n self.evManager.Post(InitializeEvent())\n while self.running:\n newTick = TickEvent()\n self.evManager.Post(newTick)", "title": "" }, { "docid": "a4446993c8b673c88b433e564435202a", "score": "0.55284584", "text": "def run(self) -> None:\n if self._was_started.is_set() and not self._was_stopped.is_set():\n asyncio.set_event_loop(self._loop)\n self._loop.run_forever()\n else:\n raise Exception(\"Cannot call directly! Begin EventLoopThreadSafe by start method.\")", "title": "" }, { "docid": "5de1abdf8072071e6c46b8dbe29a5585", "score": "0.55279434", "text": "def set_ioloop(self, ioloop=None):\n self._ioloop_set_to = ioloop", "title": "" }, { "docid": "1dd075090003f8d5616f8e61016c1b39", "score": "0.5518384", "text": "def start(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "title": "" }, { "docid": "487587861c4c2d1678e357d54e105fdf", "score": "0.5509251", "text": "async def session_start(self):\n self.ast_ctx.add_logger_handler(self.console)\n _LOGGER.info(\"Starting session %s\", self.global_ctx_name)\n\n self.tasks[\"housekeep\"] = {asyncio.create_task(self.housekeep_run())}\n self.tasks[\"startup_timeout\"] = {asyncio.create_task(self.startup_timeout())}\n\n self.iopub_server, self.iopub_port = await self.start_one_server(self.iopub_listen)\n self.heartbeat_server, self.heartbeat_port = await self.start_one_server(self.heartbeat_listen)\n self.control_server, self.control_port = await self.start_one_server(self.control_listen)\n self.stdin_server, self.stdin_port = await self.start_one_server(self.stdin_listen)\n self.shell_server, self.shell_port = await self.start_one_server(self.shell_listen)\n\n #\n # For debugging, can use the real ZMQ library instead on certain sockets; comment out\n # the corresponding asyncio.start_server() call above if you enable the ZMQ-based\n # functions here. You can then turn of verbosity level 4 (-vvvv) in hass_pyscript_kernel.py\n # to see all the byte data in case you need to debug the simple ZMQ implementation here.\n # The two most important zmq functions are shown below.\n #\n # import zmq\n # import zmq.asyncio\n #\n # def zmq_bind(socket, connection, port):\n # \"\"\"Bind a socket.\"\"\"\n # if port <= 0:\n # return socket.bind_to_random_port(connection)\n # # _LOGGER.debug(f\"binding to %s:%s\" % (connection, port))\n # socket.bind(\"%s:%s\" % (connection, port))\n # return port\n #\n # zmq_ctx = zmq.asyncio.Context()\n #\n # ##########################################\n # # Shell using real ZMQ for debugging:\n # async def shell_listen_zmq():\n # \"\"\"Task that listens to shell messages using ZMQ.\"\"\"\n # try:\n # _LOGGER.debug(\"shell_listen_zmq connected\")\n # connection = self.config[\"transport\"] + \"://\" + self.config[\"ip\"]\n # shell_socket = zmq_ctx.socket(zmq.ROUTER)\n # self.shell_port = zmq_bind(shell_socket, connection, -1)\n # _LOGGER.debug(\"shell_listen_zmq connected\")\n # while 1:\n # msg = await shell_socket.recv_multipart()\n # await self.shell_handler(shell_socket, msg)\n # except asyncio.CancelledError:\n # raise\n # except Exception:\n # _LOGGER.error(\"shell_listen exception %s\", traceback.format_exc(-1))\n # await self.housekeep_q.put([\"shutdown\"])\n #\n # ##########################################\n # # IOPub using real ZMQ for debugging:\n # # IOPub/Sub:\n # async def iopub_listen_zmq():\n # \"\"\"Task that listens to iopub messages using ZMQ.\"\"\"\n # try:\n # _LOGGER.debug(\"iopub_listen_zmq connected\")\n # connection = self.config[\"transport\"] + \"://\" + self.config[\"ip\"]\n # iopub_socket = zmq_ctx.socket(zmq.PUB)\n # self.iopub_port = zmq_bind(self.iopub_socket, connection, -1)\n # self.iopub_socket.add(iopub_socket)\n # while 1:\n # wire_msg = await iopub_socket.recv_multipart()\n # _LOGGER.debug(\"iopub received %s\", wire_msg)\n # except asyncio.CancelledError:\n # raise\n # except EOFError:\n # await self.housekeep_q.put([\"shutdown\"])\n # _LOGGER.debug(\"iopub_listen got eof\")\n # except Exception as err:\n # _LOGGER.error(\"iopub_listen exception %s\", err)\n # await self.housekeep_q.put([\"shutdown\"])\n #\n # self.tasks[\"shell\"] = {asyncio.create_task(shell_listen_zmq())}\n # self.tasks[\"iopub\"] = {asyncio.create_task(iopub_listen_zmq())}\n #", "title": "" }, { "docid": "c1858a35b41b675f2f849587cc96c0d8", "score": "0.5507766", "text": "def __init__(self, original_emitter):\n self.original_emitter = original_emitter", "title": "" }, { "docid": "ddfe7ea246c6dfe7934abbedd7b0643a", "score": "0.5497893", "text": "def start(self) -> None:\n self.start_async()\n assert self.startup_complete is not None # `start_async` will create it, but mypy doesn't know that\n self._loop.run_until_complete(self.startup_complete)", "title": "" }, { "docid": "4dc9af9c4911d39aa5caa0739606a752", "score": "0.5496606", "text": "def __init__(self, **kwargs):\r\n kwargs['daemon'] = kwargs.get('daemon', True)\r\n super().__init__(**kwargs)\r\n\r\n self.running = threading.Event()\r\n self.running.set()", "title": "" }, { "docid": "edb794732a685a4caeb0099c43856d80", "score": "0.5488664", "text": "def start(self):\n self.__reset_command_loop()\n self.__start_listener_thread()\n self.__start_command_thread()", "title": "" }, { "docid": "13d7026ee0d04330d510be43bf1c8135", "score": "0.54775965", "text": "def init_kernel(self): # type: () -> None\n # Reset the core\n self.core.reset()\n\n # Set direction of pin\n self.ttl_out.output()\n\n # Wait until event is submitted\n self.core.wait_until_mu(now_mu())\n\n # Record DMA burst\n self._record_dma_burst()", "title": "" }, { "docid": "c6edd84510fc3901d18e6e4bd9091634", "score": "0.547686", "text": "def _start_loop(self):\n self.th_loop = Thread(target=asyncore.loop, args=(), )\n self.th_loop.start()", "title": "" }, { "docid": "ecce3b93380b5d278eb1f0f67433c8d6", "score": "0.5474614", "text": "def get_loop() -> asyncio.AbstractEventLoop:\n if os.name == \"nt\":\n # for subprocess' pipes on Windows\n loop = asyncio.ProactorEventLoop() # type: ignore\n asyncio.set_event_loop(loop)\n else:\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n asyncio.set_child_watcher(ThreadedChildWatcher())\n return loop", "title": "" }, { "docid": "895bdfe5ea69d4056eda76e8376f1e85", "score": "0.5461658", "text": "def start_event_loop(self, timeout=0):\n if hasattr(self, '_event_loop'):\n raise RuntimeError(\"Event loop already running\")\n id = wx.NewId()\n timer = wx.Timer(self, id=id)\n if timeout > 0:\n timer.Start(timeout * 1000, oneShot=True)\n self.Bind(wx.EVT_TIMER, self.stop_event_loop, id=id)\n\n # Event loop handler for start/stop event loop\n self._event_loop = wxc.EventLoop()\n self._event_loop.Run()\n timer.Stop()", "title": "" }, { "docid": "f7bbc688dd349d33ac3c8def717f1cd6", "score": "0.5458578", "text": "def get_new_event_loop():\n if uvloop:\n return uvloop.new_event_loop()\n else:\n return asyncio.new_event_loop()", "title": "" }, { "docid": "42fbc7a66f76816df4f995257d119afe", "score": "0.54584116", "text": "def __init__(self, *args, **kw):\n self.mutex = threading.Lock()\n if \"mutex\" not in kw:\n kw[\"mutex\"] = self.mutex\n EventDispatcherBase.__init__(self, *args, **kw)", "title": "" }, { "docid": "44abfe11e59fa940dd54fea873e733d9", "score": "0.5453497", "text": "def initialize(self):\n print(\"Engine 1 is initializing. Registering device...\")\n self._registerDevice(\"device1\")\n self._setDevice(\"device1\", { \"time\" : self._time, \"timestep\": 0 })", "title": "" }, { "docid": "0167afce13f1f510b52dbb26c1383e2b", "score": "0.54530144", "text": "def initialize_websockets_manager(self):\n self.manager = WebSocketManager()\n self.manager.start()", "title": "" }, { "docid": "d29c3d07a5996614ef90e6117d47b709", "score": "0.54521775", "text": "def __init__(self):\n self._services = []\n self.backdoor_port = eventlet_backdoor.initialize_if_enabled(CONF)", "title": "" }, { "docid": "e94a0ddb5945adc67bef4da14d0d7c9f", "score": "0.5444111", "text": "def setup(bot: Bot) -> None:\n bot.add_cog(Events(bot))", "title": "" }, { "docid": "9a632e83ffabef8dbd4fd9d017fb93f0", "score": "0.54405904", "text": "def get_loop():\n global _BG_LOOP\n try:\n return _BG_LOOP\n except NameError:\n _BG_LOOP = asyncio.new_event_loop()\n _set_async_debug_and_log_level(_BG_LOOP)\n thread = Thread(target=_loop_main, args=(_BG_LOOP,))\n thread.daemon = True # <-- thread will exit when main thread exists\n thread.start()\n _setup_cleanup(_BG_LOOP, thread)\n return _BG_LOOP", "title": "" }, { "docid": "e11221a005a964b171c516512bee266d", "score": "0.5437474", "text": "def initialize_listening(self):\n self.listened_time = 0", "title": "" }, { "docid": "14d125ec782de84de7b90f1185683830", "score": "0.5435087", "text": "def connect(self, loop):\n raise NotImplementedError", "title": "" }, { "docid": "0bdb1a3ab98835c3c41fc77abcb829a3", "score": "0.5433524", "text": "def __init__(self, middleman, config):\n self.analytics = Analytics(config)\n self.middleman = middleman\n self.thread = None\n\n server = Server(\n {'/': self.init},\n io_loop=IOLoop.current(),\n port=config.PORT,\n num_procs=1)\n\n server.start()\n self.server = server\n server.io_loop.add_callback(server.show, \"/\")\n server.io_loop.start()", "title": "" }, { "docid": "4b6a75744dccca240e366093ff32ae91", "score": "0.543057", "text": "def initEvents(self):\r\n SceneManager().registerEventReader(self.do_action)", "title": "" }, { "docid": "3c16d382f5ed9e2e495df4037bfea794", "score": "0.5425537", "text": "def start(self):\n if self._eager_init:\n self.bus.log(\n \"initializing Aglyph singleton and borg component objects\")\n self.init_singletons()\n self.init_borgs()\n self.bus.log(\"starting Aglyph dependency injection support\")\n self.bus.subscribe(\"aglyph-assemble\", self.assemble)\n self.bus.subscribe(\"aglyph-init-singletons\", self.init_singletons)\n self.bus.subscribe(\"aglyph-clear-singletons\", self.clear_singletons)\n self.bus.subscribe(\"aglyph-init-borgs\", self.init_borgs)\n self.bus.subscribe(\"aglyph-clear-borgs\", self.clear_borgs)\n self.bus.subscribe(\"aglyph-clear-weakrefs\", self.clear_weakrefs)", "title": "" }, { "docid": "fe8dab6986fe4154855098026917c1f2", "score": "0.5417963", "text": "def initialize(self):\n self._load_timers()\n self._reset_timer_index()\n if self.active_timers:\n self.log.info(\"found {} active timers\".format(str(len(self.active_timers))))\n self._show_gui()\n self._start_display_update()\n self._start_expiration_check()\n\n # To prevent beeping while listening\n self.add_event(\"recognizer_loop:wakeword\", self.handle_wake_word_detected)\n self.add_event(\n \"mycroft.speech.recognition.unknown\", self.handle_speech_recognition_unknown\n )\n self.add_event(\"speak\", self.handle_speak)\n self.add_event(\"skill.timer.stop\", self.handle_timer_stop)", "title": "" }, { "docid": "2d9b87adc86234be4355d912692e0ab4", "score": "0.54103523", "text": "def start_loop(self):\n pass", "title": "" }, { "docid": "15d943ccc1a47b79f2ce5ab04a5f4aca", "score": "0.54057145", "text": "def io_loop(self):\n if self._io_loop is None:\n self._io_loop = DaemonIOLoop()\n self._io_loop._cod_daemon = self\n return self._io_loop", "title": "" }, { "docid": "ef3ba4bb25dbc8b536a937736dcd924c", "score": "0.5401579", "text": "def get_loop():\n\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n return loop", "title": "" }, { "docid": "745de160cfe20f6bd2a10d59f17d732e", "score": "0.5398136", "text": "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "title": "" }, { "docid": "a8bdddc8c0bba6c02e434c1d71958791", "score": "0.53943837", "text": "def get_event_loop(self) -> asyncio.AbstractEventLoop:\n if (\n self._local._loop is None\n and not self._local._set_called\n and threading.current_thread() is threading.main_thread()\n ):\n self.set_event_loop(self.new_event_loop())\n\n if self._local._loop is None:\n raise RuntimeError(\n \"There is no current event loop in thread %r.\"\n % threading.current_thread().name\n )\n\n return self._local._loop", "title": "" }, { "docid": "f205ade07c19418dea2cab0f25740e7d", "score": "0.5391418", "text": "def run_soon(coros):\n if _CURRENT_EVENT_LOOP is None:\n raise RuntimeError('no running event loop')\n\n _CURRENT_EVENT_LOOP.run_soon(coros)", "title": "" }, { "docid": "9aece9ae91ddc17961ff2ddbf0f027b9", "score": "0.5387953", "text": "def run(self):\n self.io_loop = IOLoop.current()\n self.event.set()\n\n opts = salt.config.client_config('/etc/salt/master')\n stream = salt.utils.event.get_event('master', io_loop=self.io_loop,\n transport=opts['transport'], opts=opts)\n stream.set_event_handler(self._handle_event_recv)\n\n self.io_loop.start()", "title": "" }, { "docid": "6469f57255af5d95879c26d25cf3c477", "score": "0.53871477", "text": "def init(self) -> None:\n self._configure_started = True\n self.do_init()\n self._configure_finished = True", "title": "" }, { "docid": "66e5449e7afbad7c5da50b690de47c42", "score": "0.5386321", "text": "def __init__(self):\n\t\tself.servo = InitioServo();\n\t\tself._initialisedPorts = {};\n\t\tRPIO.wait_for_interrupts(threaded=True)", "title": "" }, { "docid": "2f0fbd86f19d055f7f43cd316a07f348", "score": "0.5383896", "text": "def __init__(self, email: str, password: str) -> None:\n self.loop = asyncio.new_event_loop()\n self.loop.set_exception_handler(sse_exception_handler)\n self.websession: ClientSession = ClientSession(loop=self.loop, read_timeout=None)\n self.event_bus: EventBus = EventBus()\n self.state = NOT_RUNNING\n self.available = False\n self.stop_event: Optional[asyncio.Event] = None\n # Instance information\n self.access_token: Optional[str] = None\n self.email = email\n self.password = password\n # Initialize thread\n super().__init__(target=self.run)", "title": "" }, { "docid": "49bf192ccf31961c16e90fe73e3c1ad7", "score": "0.5382844", "text": "def start(self):\n from threading import Thread\n self.loop.add_callback(self._go)\n self._loop_thread = Thread(target=self.loop.start)\n self._loop_thread.start()", "title": "" }, { "docid": "256f6d40c49a28f2b81a0a3bd3bd7616", "score": "0.5381491", "text": "def initialize_server(self):\n \n # Pub/sub pattern for async connections\n self.plot_context = zmq.Context()\n self.plot_socket = self.plot_context.socket(zmq.PUB) \n self.plot_socket.bind(\"tcp://*:6020\")\n self.plot_topic = 20000", "title": "" }, { "docid": "9f720ddccd1d518ed494a1d7f6870b02", "score": "0.5372912", "text": "def run_loop(self):\n pass", "title": "" }, { "docid": "9f658d9b68dfeffb5ed194222db9a8c4", "score": "0.53652525", "text": "def __init__(self, hostIP=\"0.0.0.0\", log=None):\n if log:\n self.log = log\n else:\n self.log = logging.getLogger(\"Pixelblaze.{}\".format(__class__.__name__))\n self.loop = asyncio.get_event_loop()\n self.hostIP = hostIP\n self.transport = None\n self._exit = False\n self.devices = {}\n self.autoSync = False\n self.new_data = asyncio.Event() #event trigger for new data\n # must run async self.start()", "title": "" }, { "docid": "c80470d97edf5fdcc01c450790d84b5d", "score": "0.53593564", "text": "def event_loop (timeout=30):\n global event_loop_is_running, with_timeout, sleep_relative\n # replace time.time with our tsc-based version\n time.time, time.original_time = tsc_time.now_raw_posix_fsec, time.time\n with_timeout = _original_with_timeout\n sleep_relative = _original_sleep_relative\n if install_signal_handlers:\n signal_handler.register(signal.SIGTERM, sigterm_handler)\n signal_handler.register(signal.SIGINT, sigterm_handler)\n spawn (tick_updater).set_name ('tick_updater')\n try:\n event_loop_is_running = True\n _original_event_loop (timeout)\n finally:\n event_loop_is_running = False\n # put it back\n time.time = time.original_time", "title": "" }, { "docid": "179ca14ec4bef8ab9a27d57f4be0d8b4", "score": "0.53558624", "text": "def run(self):\n self.network_event_loop.run_forever()", "title": "" }, { "docid": "1235760f29fd2168cd88c2492196e5cb", "score": "0.53547347", "text": "def __init__(self, poll_interval=1):\n\n Thread.__init__(self)\n self.poll_interval = poll_interval\n self._stop_flag = 0\n self._event_generators = []\n self._event_queue = Queue()\n self._event_processor = EventProcessor(self._event_queue)\n self._event_processor.start()", "title": "" } ]
7937c999a5afa4e76d49a4c8dc45d0e3
Returns the point at time t (0>1) along the curve.
[ { "docid": "4d96d37707793141520f29d32bd33f79", "score": "0.72235733", "text": "def pointAtTime(self,t):\n x = (1 - t) * (1 - t) * self[0].x + 2 * (1 - t) * t * self[1].x + t * t * self[2].x;\n y = (1 - t) * (1 - t) * self[0].y + 2 * (1 - t) * t * self[1].y + t * t * self[2].y;\n return Point(x,y)", "title": "" } ]
[ { "docid": "df6681abd3f94fe59f3e39c4d68cc2c1", "score": "0.6694643", "text": "def get(self, t):\n # return self.value[np.where((self.time - t) <= 0)[0][-1]] # last one that is <= 0\n return self._value[\n (self._time.searchsorted(t + 1) - 1).clip(0, len(self._value) - 1)\n ]", "title": "" }, { "docid": "26b3d9c97fc621f35b7769a3cf410da5", "score": "0.66819525", "text": "def at(self, t):\n return \\\n self._avg + (self._lastValue - self._avg)*(1 - (1 - self.x.alpha)**( t - self._lastTime)) \\\n if self._avg is not None else None", "title": "" }, { "docid": "f78a9033d29c8a7a04871a994512a5ba", "score": "0.6530438", "text": "def exact(self,x,t):\n alpha = 4\n period = 2*pi/self.mlength\n beta = alpha*period\n u = sin(beta*(x-(1-beta**2)*t)) + 1\n return u", "title": "" }, { "docid": "6aba8059fb89079e6ab0e1d18a206c3e", "score": "0.6412015", "text": "def getSegment(self,t):\n if len(self.times)==0:\n raise ValueError(\"Empty trajectory\")\n if len(self.times)==1:\n return (-1,0)\n if t > self.times[-1]:\n return (len(self.times),0)\n if t < self.times[0]:\n return (0,0)\n i = bisect.bisect_right(self.times,t)\n p=i-1\n u=(t-self.times[p])/(self.times[i]-self.times[p])\n if i==0:\n return (-1,0)\n assert u >= 0 and u <= 1\n return (p,u)", "title": "" }, { "docid": "f2d2f13af33820e7f19bba5902bb3438", "score": "0.6325142", "text": "def sample_trajectory(trajectory, t):\n # First point\n if t <= 0.0:\n return copy.deepcopy(trajectory.points[0])\n # Last point\n if t >= trajectory.points[-1].time_from_start.to_sec():\n return copy.deepcopy(trajectory.points[-1])\n # Finds the (middle) segment containing t\n i = 0\n while trajectory.points[i + 1].time_from_start.to_sec() < t:\n i += 1\n return interp_cubic(trajectory.points[i], trajectory.points[i + 1], t)", "title": "" }, { "docid": "f3f7100b7d57926b7b71c8919c575ee0", "score": "0.63108504", "text": "def _getSegment(self,t):\n if len(self.trajectoryTimes)==0:\n raise ValueError(\"Empty trajectory\")\n if len(self.trajectoryTimes)==1:\n return (-1,0)\n if t >= self.trajectoryTimes[-1]:\n return (len(self.trajectoryTimes)-1,0)\n if t <= self.trajectoryTimes[0]:\n return (0,0)\n i = bisect.bisect_right(self.trajectoryTimes,t)\n p=i-1\n assert i > 0 and i < len(self.trajectoryTimes),\"Invalid time index \"+str(t)+\" in \"+str(self.trajectoryTimes)\n u=(t-self.trajectoryTimes[p])/(self.trajectoryTimes[i]-self.trajectoryTimes[p])\n if i==0:\n return (-1,0)\n assert u >= 0 and u <= 1\n return (p,u)", "title": "" }, { "docid": "c7c0f839e694ce9962e8031e40223cf9", "score": "0.627243", "text": "def bezier_point(cps, t):\n\n p = ((1 - t) ** 3) * cps[0, :]\n p += 3 * t * ((1 - t) ** 2) * cps[1, :]\n p += 3 * (t ** 2) * (1 - t) * cps[2, :]\n p += (t ** 3) * cps[3, :]\n\n return p", "title": "" }, { "docid": "6c29e21036eda671ad8ea0549b5ae16f", "score": "0.6193054", "text": "def fn(t):\n # must use exp() from numpy to handle input from linspace\n return 0.1*(-3.0*np.exp(-t) + np.exp(2.0*t)*np.sin(t) + 3.0*np.exp(2.0*t)*np.cos(t) + 2.0)", "title": "" }, { "docid": "3253902be54de0fca2a7256fd3a39547", "score": "0.61587834", "text": "def fp(self, t, y):\n pass", "title": "" }, { "docid": "0c5dcf3e68541659ad36d4386d8658ed", "score": "0.6094813", "text": "def p(x,t):\n\n if x <= (-1 * v_max * t):\n return p_max\n elif x >= (v_max * t):\n return 0.0\n else:\n return 0.5 * (1. - x/(v_max*t))*p_max", "title": "" }, { "docid": "2c6db1613fc639cf4de9f33ab57625b8", "score": "0.6055308", "text": "def position_at_time(self, t):\n t += TIME_OFFSET # Compensate for start time\n\n n = 2 * np.pi / self.period # mean motion\n M = n * t # mean anomaly\n\n def kepler_root_fn(E_):\n return (E_ - self.ecc * np.sin(E_) - M) ** 2\n result = minimize(kepler_root_fn, 0)\n if not result.success:\n raise Exception(\"ERROR\")\n E = result.x[0]\n\n theta = 2 * np.arctan(np.sqrt(((1 + self.ecc) / (1 - self.ecc)) * np.tan(E / 2) ** 2))\n\n if E % (2 * np.pi) > np.pi:\n theta = 2 * np.pi - theta\n\n #theta = np.arccos((np.cos(E) - self.ecc) / (1 - self.ecc * np.cos(E)))\n r = self.semimajor * (1 - self.ecc * np.cos(E))\n\n theta *= self.sign # Compensate for direction\n\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return (x, y)", "title": "" }, { "docid": "628098083ea3ed0b9506e12d00d192db", "score": "0.60532224", "text": "def P_t(self, past, present):\n\t\treturn self.p_t_coef*self.P_a(past)#*self.p_q", "title": "" }, { "docid": "41358e99b0527b64732154c0543a0501", "score": "0.60171324", "text": "def control_signal(t):\n return 0 if t < 0.5 else -1", "title": "" }, { "docid": "71a484e6d46c9ab0fdcf1798c8429253", "score": "0.60109663", "text": "def bezierCurveCLinear(self, p0, p1, t):\n\n if t <= 0:\n return p1[0], p1[1]\n\n elif t >= 1:\n return p0[0], p0[1]\n\n position = (1-t)*p0 + t*p1\n\n return position[0], position[1]", "title": "" }, { "docid": "64a79d315b02aac31626cff8a18d38dc", "score": "0.5996135", "text": "def at(self, t):\n\n return self.bez(t)", "title": "" }, { "docid": "c8846cad9676337303d23c7dbab07e4b", "score": "0.59881216", "text": "def pt0_from_t(SA, t, p):\n\n SA = np.maximum(SA, 0)\n\n s1 = SA * (35. / SSO)\n\n pt0 = t + p * (8.65483913395442e-6 -\n s1 * 1.41636299744881e-6 -\n p * 7.38286467135737e-9 +\n t * (-8.38241357039698e-6 +\n s1 * 2.83933368585534e-8 +\n t * 1.77803965218656e-8 +\n p * 1.71155619208233e-10))\n\n dentropy_dt = cp0 / ((Kelvin + pt0) * (1 - 0.05 * (1 - SA / SSO)))\n\n true_entropy_part = entropy_part(SA, t, p)\n\n for Number_of_iterations in range(0, 2, 1):\n pt0_old = pt0\n dentropy = entropy_part_zerop(SA, pt0_old) - true_entropy_part\n # Half way the mod. method (McDougall and Wotherspoon, 2012).\n pt0 = pt0_old - dentropy / dentropy_dt\n pt0m = 0.5 * (pt0 + pt0_old)\n dentropy_dt = -gibbs_pt0_pt0(SA, pt0m)\n pt0 = pt0_old - dentropy / dentropy_dt\n\n # maximum error of 6.3x10^-9 degrees C for one iteration. maximum error is\n # 1.8x10^-14 degrees C for two iterations (two iterations is the default,\n # \"for Number_of_iterations = 1:2\"). These errors are over the full\n # \"oceanographic funnel\" of McDougall et al. (2010), which reaches down to\n # p = 8000 dbar.\n\n return pt0", "title": "" }, { "docid": "7b4650f9f86749dc275529ccac586c5b", "score": "0.5983686", "text": "def point_from_triangle(t):\n A, B, C = t[0], t[1], t[2]\n x = np.random.uniform(0, 1)\n y = np.random.uniform(0, 1)\n p = ( A[0] + x * (B[0] - A[0]) + y * (C[0] - A[0]), A[1] + x * (B[1] - A[1]) + y * (C[1] - A[1]) )\n\n if point_in_traingle(p, A, B, C):\n return p\n else: \n return ( A[0] + A[0] + (B[0] - A[0]) + (C[0] - A[0]) - p[0], A[1] + A[1] + (B[1] - A[1]) + (C[1] - A[1]) - p[1] )", "title": "" }, { "docid": "dd54d47773be8e0c826929fde713ab9c", "score": "0.5979847", "text": "def tsat(p, bounds = False):\n if bounds:\n ok = (sat(0.01) <= p <= Pc1)\n else: ok = True\n if ok:\n from scipy.optimize import fsolve\n def f(t): return sat(t)-p\n from math import log\n t0=max(4606.0/(24.02-log(p))-273.15,5.0) # starting estimate\n t=fsolve(f,t0)\n import collections # need to check this as some versions of SciPy return an array from fsolve\n if isinstance(t,collections.Iterable): return t[0]\n else: return t\n else: return None", "title": "" }, { "docid": "5a4962aad30a9cd3ca2db4bb49cf1aa0", "score": "0.59513766", "text": "def time_to_position(self, x):\n if x == 0: return self.t_0\n\n state0 = self.state_before_position(x)\n\n # p0 is the last position before the desired position\n \n d = x - p0.x\n\n if p0.a == 0:\n # d = v * t \n # t = d / v\n t = d / p0.v\n else:\n # d = v * t + 0.5 * a * t**2\n t = quadratic(0.5 * p0.a, p0.v, -d)\n \n print(\"state \" + str(p0))\n print(\"t_0\", self.t_0)\n print(\"time to position\", x, p0.t + t)\n \n return p0.t + t", "title": "" }, { "docid": "50449250875a308362fd19c1d8d1589a", "score": "0.59355855", "text": "def derivativeAt(self, t):\n if self._avg is None:\n return None\n dt = t - self._lastTime\n return -(self._lastValue - self._avg)*math.log(1 - self.x.alpha)*(1 - self.x.alpha)**dt", "title": "" }, { "docid": "2b6445b613193e5066ff6a31741f5743", "score": "0.5922067", "text": "def get_position_by_time(self, t):\n if t<=0 or t<=self.startTime:\n return None\n i = int((t-self.startTime)/NetflixSession.TS_GRANULARITY)\n if i >= len(self.position):\n return None\n return self.position[i]", "title": "" }, { "docid": "95611c2e87f0d20fcd252e46d23c0dac", "score": "0.5907894", "text": "def stdp(t):\n\n if t > 0:\n return np.exp(-t/tau) \n elif t < 0:\n return -np.exp(t/tau)\n else:\n return 0", "title": "" }, { "docid": "ff45196d44dd2130c764c17dbfe8f877", "score": "0.5897936", "text": "def pt(self, t):\n return foot(vec.zero, self) + t*self.dir", "title": "" }, { "docid": "17cae00aefb3c38a9519f7be4f2f81a0", "score": "0.58472365", "text": "def linearbezier(p0, p1, t):\n ## p0 and p1 are of type numpy.array\n return p0+t*(p1-p0)", "title": "" }, { "docid": "6d4a0bf1811fcc6bc0449dfbcf9af3c9", "score": "0.5799749", "text": "def pt_from_t(SA, t, p, p_ref=0):\n\n p_ref = np.asanyarray(p_ref)\n\n SA = np.maximum(SA, 0)\n\n s1 = SA * 35. / SSO\n\n pt = (t + (p - p_ref) * (8.65483913395442e-6 -\n s1 * 1.41636299744881e-6 -\n (p + p_ref) * 7.38286467135737e-9 +\n t * (-8.38241357039698e-6 +\n s1 * 2.83933368585534e-8 +\n t * 1.77803965218656e-8 +\n (p + p_ref) * 1.71155619208233e-10)))\n\n dentropy_dt = cp0 / ((Kelvin + pt) *\n (1 - 0.05 * (1 - SA / SSO)))\n\n true_entropy_part = entropy_part(SA, t, p)\n\n for Number_of_iterations in range(0, 2, 1):\n pt_old = pt\n dentropy = entropy_part(SA, pt_old, p_ref) - true_entropy_part\n pt = pt_old - dentropy / dentropy_dt # half way through the method\n ptm = 0.5 * (pt + pt_old)\n dentropy_dt = -gibbs(n0, n2, n0, SA, ptm, p_ref)\n pt = pt_old - dentropy / dentropy_dt\n\n # maximum error of 6.3x10^-9 degrees C for one iteration. maximum error\n # is 1.8x10^-14 degrees C for two iterations (two iterations is the default,\n # \"for Number_of_iterations = 1:2). These errors are over the full\n # \"oceanographic funnel\" of McDougall et al. (2010), which reaches down to\n # p = 8000 dbar.\n\n return pt", "title": "" }, { "docid": "60b197e5cdc1687a0f746a86a9f5a81a", "score": "0.5793214", "text": "def _evaluate_at_time_lw(self, t: float) -> float:\n\n if 0 < t < self._onset + self._decay_duration:\n if t <= self._onset:\n return np.square(t/self._onset)\n else:\n return (np.exp(-(t-self._onset)/self._decay)-self._epsilon)/(1.-self._epsilon)\n else:\n return 0.", "title": "" }, { "docid": "849ec256637303755d7debca77a21b4f", "score": "0.5764049", "text": "def rhs(x, y, t):\n\n return 5 * x * (x_end - x) * y * (y_end - y) + 10 * a * t * (y * (y_end - y) + x * (x_end - x))", "title": "" }, { "docid": "83777d12b52de145243a1f9e2ea35954", "score": "0.57630384", "text": "def get(self, t):\n\n beta = self.schedule(t)\n dim = self.dim\n return (1 - beta) * jnp.eye(dim) + beta * jnp.ones((dim, dim)) / dim", "title": "" }, { "docid": "cfd8cd75dcf11e7b3aaa720d98845d2a", "score": "0.5747864", "text": "def Klookup(self, t):\n idx_closest = (np.abs(self.ttraj - t)).argmin()\n\n # limit to valid indices\n if idx_closest >= (self.K.shape[0] - 1):\n return self.K[-1]\n elif idx_closest <= 0:\n return self.K[0]\n else:\n # linearly interpolate between two points\n t_closest = self.ttraj[idx_closest]\n if t_closest > t:\n dt = self.ttraj[idx_closest] - self.ttraj[idx_closest - 1]\n return (t_closest - t) * self.K[idx_closest - 1] / dt + (t + dt - t_closest) * self.K[idx_closest] / dt\n elif t > t_closest:\n dt = self.ttraj[idx_closest + 1] - self.ttraj[idx_closest]\n return (t - t_closest) * self.K[idx_closest + 1] / dt + (t_closest + dt - t) * self.K[idx_closest] / dt\n else:\n return self.K[idx_closest]", "title": "" }, { "docid": "501146cd970eac51b86862c4d7aefe23", "score": "0.5746347", "text": "def time2depthpt(self, t, point):\n # Find the nearest profile\n coords = np.array(self.coords)\n closest = np.abs(coords - point).argmin()\n\n time = self.data[closest][:, 0]\n depth = self.data[closest][:, 1]\n\n return np.interp(t, time, depth)", "title": "" }, { "docid": "57d86501a89087fe30912d7181b4e762", "score": "0.5741742", "text": "def EvaluateCurve(curve_id, t, segment_index=-1):\r\n curve = rhutil.coercecurve(curve_id, segment_index, True)\r\n return curve.PointAt(t)", "title": "" }, { "docid": "54a9cbdb4f54b65505c366e7d08d96cc", "score": "0.5734176", "text": "def get_value(datablocks, channel_name, t, lpfilter=None):\n x, y = get_waveform(datablocks, channel_name, lpfilter)\n # First index where time is just greater than t\n result = np.where(x >= t)\n index = result[0][0]\n return y[index]", "title": "" }, { "docid": "978754abc990fe8f1987c71f019283b6", "score": "0.57319343", "text": "def objective(point):\n\n t = point['t']\n x = point['x']\n a = 2 * np.pi\n b = a * t \n c = a * x\n d = np.exp(- (x + 1) ** (t + 1)) * np.cos(c)\n e = np.sin((t + 2) * c) + np.sin((t + 2)**2 * c) + np.sin((t + 2)**3 * c)\n f = d * e\n\t\n\t\n\t\n \"\"\"\n f(t,x) = x^2+t\n \"\"\"\t\n # t = point['t']\n # x = point['x']\t\n # f = 20*x**2+t\n\n return f", "title": "" }, { "docid": "33bfcc14f4bab592b1442d138c53f9de", "score": "0.5709217", "text": "def c2pt(t, *p):\n val = 0\n for j in range(0, len(p), 2):\n val += 2*p[j]*np.exp(-p[j+1]*T)*np.cosh(p[j+1]*(T - t))\n return val", "title": "" }, { "docid": "00c8cbf96fe0a0581000526457d1493b", "score": "0.56889415", "text": "def func(cls, t, tau, amp, offset):\n return offset + amp * (1 - np.exp(- t / tau))", "title": "" }, { "docid": "011bc5b84117c4b7b14c383991702196", "score": "0.5664827", "text": "def lt_de(t, T):\n rhs = 20 * m.sin(3 * t) - 3 * T\n return rhs", "title": "" }, { "docid": "5b61971802152ffbf6c7c953f0977e15", "score": "0.56464267", "text": "def myCurve(x):\n return np.abs(x) + x * np.sin(x)", "title": "" }, { "docid": "09b3e0aa2293bcd0f67f55e09421a1c0", "score": "0.5580157", "text": "def get(self, t):\n beta = self.schedule(t)\n dim = self.dim\n width = self.width\n\n band = jnp.tri(\n dim, dim, width // 2, dtype=jnp.int32) - jnp.tri(\n dim, dim, -width // 2, dtype=jnp.int32)\n\n arr = band / band.sum(0, keepdims=True)\n\n return beta * arr + (1 - beta) * jnp.eye(dim)", "title": "" }, { "docid": "2a84555b38822b4ede3df84cd82c70fe", "score": "0.55614114", "text": "def time2depthpt(self, t, point):\n return t*self.velocity/2.0", "title": "" }, { "docid": "9eeda22685517dc295d3b900b90e094d", "score": "0.55608034", "text": "def state_at_t(self, t) -> Tuple[float, float]:\n if self.a * self.total_delta_loc < 0:\n if t < .5 * (self.delta_t - (self.v_initial / self.a)):\n # Deceleration - Phase 1\n return self.accelerating_state(t)\n else:\n # Acceleration - Phase 2\n return self.decelerating_state(t)\n\n accel_end = self.accelerate_end_t()\n if t < accel_end:\n return self.accelerating_state(t)\n accel_end_p = self.accelerate_end_p()\n decel_begin = self.decelerate_begin_t(accel_end)\n if t < decel_begin:\n return self.steady_state(t, accel_end, accel_end_p)\n return self.decelerating_state(t)", "title": "" }, { "docid": "55c67b317cb9713b3c093810a6145b34", "score": "0.55558425", "text": "def weather_at_time(self, t: torch.Tensor) -> torch.Tensor:\n\n inbetween = (\n ((t % self._normalized_timestep_delta) / self._normalized_timestep_delta)\n .unsqueeze(1)\n .to(self._device)\n )\n left_index = (t // self._normalized_timestep_delta).long().to(self._device)\n\n if any(torch.isnan(t)):\n raise ValueError()\n if any(left_index < 0):\n left_index = torch.zeros_like(left_index)\n if any(left_index >= len(self._normalized_weather_tensor) - 2):\n left_index = torch.zeros_like(left_index) - 2\n\n right_index = left_index + 1\n\n left_weather = self._normalized_weather_tensor[left_index]\n right_weather = self._normalized_weather_tensor[right_index]\n\n return left_weather * (1 - inbetween) + right_weather * inbetween", "title": "" }, { "docid": "4342a5e924f2f97902f46a87a4ba23aa", "score": "0.55510455", "text": "def getValueT(self,t=None):\n\n\t\tp = []\n\t\tfor index, val in enumerate(self.coef):\n\t\t\tp.append(val * t + self.point[index]) \n\t\t\t# p[1] = val * t + self.point[index]\n\t\t\t# p[2] = val * t + self.point[index]\n\t\treturn p", "title": "" }, { "docid": "dd67fce3431965b73879ff9032368247", "score": "0.5541475", "text": "def yValue(self, time):\n #----------------------\n i = self._index(time)\n if i is not None:\n if i >= (self._poly.size() - 1):\n i = self._poly.size() - 2\n p0 = self._poly.at(i)\n p1 = self._poly.at(i+1)\n if p0.x() == p1.x(): return (p0.y() + p1.y())/2.0\n return p0.y() + (time - p0.x())*(p1.y() - p0.y())/(p1.x() - p0.x())", "title": "" }, { "docid": "57eb1349f0beebdb135cef26e7a8bd31", "score": "0.5521312", "text": "def value(self, time):\n\n # 1. ensure time is within bounds else return boundary keyframe\n # if time <= self.times[0] or time >= self.times[-1]:\n # return self.values[0 if time <= self.times[0] else -1]\n\n # 2. search for closest index entry in self.times, using bisect_left function\n i = bisect_left(self.times, time%self.times[-1]) # _i is the time index just before t\n\n # 3. using the retrieved index, interpolate between the two neighboring values\n # in self.values, using the initially stored self.interpolate function\n fraction = (time%self.times[-1]-self.times[i-1]) / (self.times[i]-self.times[i-1])\n return self.interpolate(self.values[i-1], self.values[i], fraction)", "title": "" }, { "docid": "9320ee0c79c40beb966dfe6b2b12db22", "score": "0.5517267", "text": "def Rx(t):\n return np.array(((1,0,0),(0,np.cos(t),-np.sin(t)),(0,np.sin(t),np.cos(t))))", "title": "" }, { "docid": "5c8f9784a3dad0884113cbe7c617bda0", "score": "0.5515756", "text": "def at_dt(self, t):\n\n return self.bez(t, self.derive[:])", "title": "" }, { "docid": "5d97bf37db187eebda63ba7c5963fd4c", "score": "0.5515326", "text": "def calc(self, t):\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.a[i] + self.b[i] * dx + \\\n self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0\n\n return result", "title": "" }, { "docid": "f974947e48308375cd41f79504a50118", "score": "0.5510095", "text": "def est_am(t,y):\n ch=t>t_eq\n y1=y[ch]\n return y1[-1]-y1[0]", "title": "" }, { "docid": "89b8c08cc433340a1ca3586e15a9b1bc", "score": "0.550863", "text": "def forward_curve(self, x):\n y = scipy.interpolate.splev(x, self.tck, der=0)\n return y", "title": "" }, { "docid": "eda7341692115079bbe420f60d75925b", "score": "0.5503552", "text": "def getSignal(t):\n s1 = np.sin(2*np.pi*100*t)\n s2 = 2*np.sin(2*np.pi*500*t)\n # create a transient \"chirp\" (zero s2 signal before/after chirp)\n mask = np.where(np.logical_and(t>10, t<12), 1.0, 0.0)\n s2 = s2 * mask\n # add some noise into the mix\n nse = 0.01*np.random.randn(len(t))/1.0\n s = s1 + s2 + nse\n return s # the signal", "title": "" }, { "docid": "36882814fd88d49ceac91a73111b8822", "score": "0.54710823", "text": "def findNearest(self,t,timevec):\n tnow = self.SecondsSince(t)\n tvec = self.SecondsSince(timevec)\n \n #tdist = np.abs(tnow - tvec)\n \n #idx = np.argwhere(tdist == tdist.min())\n \n #return int(idx[0])\n return np.searchsorted(tvec,tnow)[0]", "title": "" }, { "docid": "02f8de0469a570f2ff10b62d6fcc58e8", "score": "0.5462917", "text": "def _evaluate_at_time_lw(self, t: float) -> float:\n return 0.", "title": "" }, { "docid": "6032c4321e5fcc21980dcaa09a133dac", "score": "0.5456722", "text": "def get_t(t21,t2p1, timestep): \n if t21 < 0 or t2p1 < 0:\n #w2 or w2p will come at t<0\n t_min = min(t21, t2p1) - early_buffer\n else:\n t_min = -early_buffer\n #last pulse dictates when emitted coherences should start to be watched for integration\n if t21 > 0 or t2p1 > 0:\n #w2 or w2p will come at t>0\n t_max = max(t21, t2p1) + late_buffer\n else:\n t_max = late_buffer\n #note: dtype float16 can screw up t working properly!\n t = np.arange(t_min, t_max, timestep)\n #tprime_i = t_max - late_buffer - early_buffer\n return t", "title": "" }, { "docid": "f1c8fcf5b50529812bbeff28e91816fa", "score": "0.5451557", "text": "def value(self, time):\n\n # 1. ensure time is within bounds else return boundary keyframe\n if time <= self.times[0]:\n return self.values[0]\n if time >= self.times[-1]:\n return self.values[-1]\n\n # 2. search for closest index entry in self.times, using bisect_left function\n t_i = bisect_left(self.times, time) - 1 # note the -1\n\n # 3. using the retrieved index, interpolate between the two neighboring values\n # in self.values, using the initially stored self.interpolate function\n f = (time - self.times[t_i]) / (self.times[t_i + 1] - self.times[t_i])\n return self.interpolate(self.values[t_i], self.values[t_i + 1], f)", "title": "" }, { "docid": "efc7cdd95796b7367a1608ab254237ac", "score": "0.54389536", "text": "def _sample_position(self, t, N=1):\n v = np.zeros(N)\n iv = np.ones(N, dtype=bool)\n x = self.startPosition * np.ones(N)\n continue_ = True\n i = 1\n t = t - self.startTime # should be positive.\n size = v.shape[0]\n while continue_:\n n_samples = iv.sum()\n v[iv] += self.T.rvs(n_samples)\n\n # if the next time is beyond reach, we do not add a new jump\n iv[v >= t] = False\n n_samples = iv.sum()\n\n x[iv] += self.J.rvs(n_samples)\n\n size = iv.sum() # any left?\n empty = size == 0\n\n i += 1\n if empty:\n continue_ = False\n\n if i > 10000:\n warnings.warn(\"The algorithm did not converge. Be sure 'T' is a non negative random \\\n variable, or set 't' to a smaller value.\")\n break\n\n return x", "title": "" }, { "docid": "3f2a367331b2ae237fe894c6805c1373", "score": "0.5409791", "text": "def path(t):\n t = gs.array(t)\n t = gs.cast(t, initial_cotangent_vec.dtype)\n t = gs.to_ndarray(t, to_ndim=1)\n\n cotangent_vecs = gs.einsum(\"i,...k->...ik\", t, initial_cotangent_vec)\n\n points_at_time_t = [\n self.exp(tv, pt, n_steps=n_steps)\n for tv, pt in zip(cotangent_vecs, initial_point)\n ]\n points_at_time_t = gs.stack(points_at_time_t, axis=0)\n\n return (\n points_at_time_t[0] if n_initial_conditions == 1 else points_at_time_t\n )", "title": "" }, { "docid": "7e916fd07c3010a6d073f8bf25380f1f", "score": "0.5408187", "text": "def interp(self,t,data):\n # helps to keep mod T here for integrators\n t = np.mod(t,self.T)\n fn = interp1d(self.tLC,data)\n \n #print('t,fn',t,fn(t))\n \n return fn(t)", "title": "" }, { "docid": "75929f6d25cbb52eff53177f44c63aa5", "score": "0.5399461", "text": "def ToPoint(self) -> _n_1_t_1:", "title": "" }, { "docid": "b6ac56e27ba8a6cb2df8ec9e2efb1ef1", "score": "0.5399192", "text": "def time2depthpt(self, t, point):\n # Find the nearest profile\n coords = np.array(self.coords)\n closest = np.abs(coords - point).argmin()\n profile = self.data[closest]\n\n # HACK TO MAKE FAKE VELOCITIES\n velocity = np.ones(profile.data.size) * 2000.0\n\n samp = profile.stats[\"sampling_rate\"]\n t_profile = np.arange(velocity.size) * 1.0 / samp\n vrms = np.cumsum(velocity) / (np.arange(velocity.size)+1)\n vrms = np.interp(t, t_profile, vrms)\n return t*vrms/2.", "title": "" }, { "docid": "f642acbb32b4c47686fc9687fc16e155", "score": "0.5388491", "text": "def isPointOnCurve(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "f051eb29df6c6b48de6ce04874d84744", "score": "0.5374317", "text": "def O(self,t,x,p):\n return x", "title": "" }, { "docid": "2323ac8c24359c184549681ff9d1045b", "score": "0.5364349", "text": "def time(self, t, s=1.0, complete=True):\n w = self.w0\n\n x = t / s\n\n output = np.exp(1j * w * x)\n\n if complete:\n output -= np.exp(-0.5 * (w ** 2))\n\n output *= np.exp(-0.5 * (x ** 2)) * np.pi ** (-0.25)\n\n return output", "title": "" }, { "docid": "5ae02d0df6086ff67e6da8169bbd44ac", "score": "0.5362421", "text": "def state(k, x, t):\n return exp(1j*(k*x - omega*t))", "title": "" }, { "docid": "331a394258f5359453cf93fb876218fa", "score": "0.5360645", "text": "def quadraticbezier(p0, p1, p2, t):\n ## p0, p1 and p2 are of type numpy.array\n return (1-t)**2*p0+2*(1-t)*t*p1+t**2*p2", "title": "" }, { "docid": "cc2985354355e1fe87456aacdca3c8f3", "score": "0.5360297", "text": "def sinc_p(t, t0, Tb):\n\n p = np.sinc(np.pi * (t - t0) / Tb)\n return (p)", "title": "" }, { "docid": "8fc93ab057115be1d01909abb130e0ca", "score": "0.5358552", "text": "def get_timestamp_by_time(self, t):\n if t<=0 or t<=self.startTime:\n return None\n i = int((t-self.startTime)/NetflixSession.TS_GRANULARITY)\n if i >= len(self.timeStamps):\n return None\n return self.timeStamps[i]", "title": "" }, { "docid": "2e1ed925682763bd477ee53df1f4794f", "score": "0.5357576", "text": "def next_change(self, t=None):\n if t is None:\n t = self.engine.get_now()\n\n delta = random.expovariate(1.0 / self.interval) # expovariate delivers long-tailed distribution between 0 and infinity, centred on given value\n return t + delta", "title": "" }, { "docid": "1a7fbd7defadbe4f74956360ca6cabfb", "score": "0.53535336", "text": "def interval(self, t):\n return np.random.poisson(self.time_variance(t))", "title": "" }, { "docid": "2b3a23bc342344cdbec79aaa5f41f100", "score": "0.53485364", "text": "def true_posterior(self, t):\n return (sigmoid(t)**self.a)*(sigmoid(-t)**self.b)/beta(self.a, self.b)", "title": "" }, { "docid": "037f7936f651b07572c42a40308dcf23", "score": "0.533736", "text": "def generate_pnt(self, s, t, *args):\n pnt = TrajectoryPoint()\n # Trajectory time stamp\n pnt.t = t\n # Set position vector\n pnt.pos = self.generate_pos(s).tolist()\n # Set rotation quaternion\n pnt.rotq = self.generate_quat(s)\n return pnt", "title": "" }, { "docid": "fcbb72c7f6790b4d3e6c239a3c1f41a6", "score": "0.5330515", "text": "def take_step(self,p):\n dpos = np.random.randn(2)\n dpos = dpos / np.linalg.norm(dpos)\n temp = self.pos + dpos * p\n if temp[0]>= 0 and temp[0]<=1 and temp[1]>= 0 and temp[1]<=1:\n self.pos = temp", "title": "" }, { "docid": "5ed2732aa09b659bf58b65c4cdd0e96c", "score": "0.5329308", "text": "def Price(t):\n A = 100.\n return .5*A*np.sine(t) + A", "title": "" }, { "docid": "66a1c2aaf585333da657e650474ba79d", "score": "0.5327765", "text": "def is_in_t(pt):\n flag = True\n if pt[0] < 0 or pt[1] < 0:\n flag = False\n if pt[0] + pt[1] > 1:\n flag = False\n return flag", "title": "" }, { "docid": "6e1de4504ffd58e59e1053934e64d216", "score": "0.5325302", "text": "def sigmoid(t):\n return (np.exp(t))/(1+(np.exp(t)))", "title": "" }, { "docid": "3688f4880480de3c62a053429809f68e", "score": "0.5319139", "text": "def plot_me(self, t):\n return np.array([np.cos(2*np.pi*t), np.sin(2*np.pi*t)])", "title": "" }, { "docid": "2b0d92acbbcb3881c7b04c8d85dbdecb", "score": "0.5317961", "text": "def f1(y, t):\n\n u1 = y[0]\n u2 = y[1]\n u3 = t\n return u2, -2 * params[\"gamma\"] * u2 - np.sin(u1) + params[\"mu\"] * np.sin(params[\"omega\"] * u3)", "title": "" }, { "docid": "a9c139f6f789c7c9897e44700cd5aa56", "score": "0.5314853", "text": "def sigmoid(t):\n return 1.0 / (1.0 + np.exp(-t))", "title": "" }, { "docid": "2e8bd6921587eb2edd707f3968305d73", "score": "0.5311136", "text": "def kt(self, t):\n K = np.searchsorted(self.data.t, t)\n N = len(self)\n return np.clip(K, 0, N-1)", "title": "" }, { "docid": "5edd6ac6ced0629b5c26e79a5ac365e0", "score": "0.5307474", "text": "def trajectory_ex(t_start=0, t_stop=1, dt=2**(-4), x0=np.array([1])):\n parameters = [1.5, 1]\n number_of_timesteps = int((t_stop - t_start) / dt)\n x = np.zeros((number_of_timesteps + 1, len(x0)))\n x_exact = np.zeros_like(x)\n x[0, :] = x0\n x_exact[0, :] = x0\n time = np.linspace(t_start, t_stop, number_of_timesteps + 1)\n\n random_force = np.random.normal(scale=np.sqrt(dt), size=(number_of_timesteps, 1))\n\n for step in range(number_of_timesteps):\n x[step + 1, :] = x[step, :] + parameters[0]*x[step, :]*dt + parameters[1] * x[step, :] * random_force[step]\n x_exact[step+1, :] = x0*np.exp((parameters[0]-parameters[1]**2/2)*time[step]+parameters[1]*np.sum(random_force[:step+1]))\n\n plt.plot(time, x, label='Numerical solution')\n plt.plot(time, x_exact, label='Exact solution')\n plt.legend()\n plt.show()", "title": "" }, { "docid": "ee114c73fbdab147225971456e36a8f6", "score": "0.5297463", "text": "def sigmoid(t):\n \n return 1.0 / (1.0 + np.exp(-t))", "title": "" }, { "docid": "e93395abe21453359df5e2f86ddbec22", "score": "0.52953637", "text": "def getFirstPoint(self):\r\n return Trackpoint(self.trackdata[self.trackdataStart:self.trackdataStart + Trackpoint.TRACKPOINTLEN])", "title": "" }, { "docid": "e8a907ebcfb41fb01d335c4a66f58979", "score": "0.5291992", "text": "def interp_tT(t):\n return interp(t, conditions[:, 0], conditions[:, 1])", "title": "" }, { "docid": "c25c14c125f44a908dc21329584908a4", "score": "0.5285083", "text": "def simple_harmonic_oscillator(p, t):\n return p[0] + p[1] * np.cos(p[2] * t - p[3])", "title": "" }, { "docid": "4e544b08b240864f702c80dd40b390cb", "score": "0.5267563", "text": "def f(self, t, y):\n pass", "title": "" }, { "docid": "b15632e6bc69a368ffd79374489089bf", "score": "0.52611744", "text": "def Xrt(t, amp, centre, period = 365): \n\n x = t * (2 * pi / period) \n\n return (amp*centre * sin(x)) + centre", "title": "" }, { "docid": "cdd2c8cfb52a9f9a708db0fd0b5d02f6", "score": "0.5255001", "text": "def current_point(self):\n try:\n data = self.read_reg(TD_STATUS, 1)\n # if touch points==1\n if data is not None and data[0] == 0x1:\n # Read Xhigh, Xlow, Yhigh and Ylow\n data_buf = self.read_reg(PN_XH, 4)\n x = ((data_buf[0] & 0x0F) << 8) | (data_buf[1])\n y = ((data_buf[2] & 0x0F) << 8) | (data_buf[3])\n return (x, y)\n except Exception as e:\n return e # debug\n return None", "title": "" }, { "docid": "b0dbd036caae087aa7bcdd74a09ff6bf", "score": "0.52548707", "text": "def interpolate_linear(t, t0, x0, t1, x1):\n if t0 > t1:\n t0, x0, t1, x1 = t1, x1, t0, x0\n if t0 == t1 and not x0 == x1:\n raise ValueError(\"Interpolated quantity can not have two \"\n \"different values at the same time\")\n if t < t0 or t > t1:\n raise ValueError(\"Time argument outside of the given interval\")\n if t == t0:\n return x0\n if t == t1:\n return x1\n timeDelta = 1.0*(t1 - t0)\n t = (t - t0)/timeDelta\n onemt = 1.0 - t\n return onemt*x0 + t*x1", "title": "" }, { "docid": "2234a4f341fbef9a66a08f9ebecd2607", "score": "0.52531505", "text": "def edge_point(self, edge, t=0.5):\n if t == 0.0:\n return self.edge_start(edge)\n if t == 1.0:\n return self.edge_end(edge)\n if t == 0.5:\n return self.edge_midpoint(edge)\n\n a, b = self.edge_coordinates(edge)\n ab = subtract_vectors(b, a)\n return Point(*add_vectors(a, scale_vector(ab, t)))", "title": "" }, { "docid": "98cb9bdf5d9ce9fc779c4a405a4ac317", "score": "0.5249009", "text": "def point_in_time(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"point_in_time\")", "title": "" }, { "docid": "dd850db1ad2991f87dc6b321fd962dd2", "score": "0.5246322", "text": "def pdf(self, t, l):\n r, c, tau = self.r, self.c, self.tau\n\n t = np.atleast_1d(t)\n\n tcut = (l+r)/c\n tmin = (r-l)/c\n\n x = t.copy()\n x[t > tcut] = tcut\n\n y = c*x*tau*np.exp(tmin/tau)*(r+l-c*tau)\n y += np.exp(x/tau)*(l**2-r**2+c**2*x*tau)*tau\n y += x*(l**2-r**2)*(expi(tmin/tau) - expi(x/tau))\n y /= 4*c*l*x*tau\n y *= np.exp(-t/tau)/tau\n y[t < tmin] = 0\n\n return y if y.size > 1 else y.item()", "title": "" }, { "docid": "06c714a6db88ab6d623bd0a7a6b7d0ba", "score": "0.5241759", "text": "def log_t(u, t):\n if t == 1.0:\n return u.log()\n else:\n return (u.pow(1.0 - t) - 1.0) / (1.0 - t)", "title": "" }, { "docid": "553cbb4db0feaadc0f78c5bfbcaa9f0c", "score": "0.5236777", "text": "def get_phase(self, t, TA=None):\n \n if TA is None:\n TA = self.true_anomaly(t)\n \n phase = (TA-self.true_anomaly(self.t0))/(2.*np.pi)\n phase = phase + 1.*(phase<0.).astype(int)\n return phase", "title": "" }, { "docid": "2a9ba343afc0de8d070313bde35efe51", "score": "0.5235387", "text": "def __interpolate(self, t):\r\n try:\r\n c = [0 for _ in range(self.n)]\r\n w = [0 for _ in range(self.n)]\r\n for i in range(0, self.n):\r\n w[i] = self.Y[i]\r\n for j in reversed(range(i)):\r\n w[j] = (w[j + 1] - w[j]) / (self.X[i] - self.X[j])\r\n c[i] = w[0]\r\n s = c[self.n - 1]\r\n for i in reversed(range(self.n - 1)):\r\n s = s * (t - self.X[i]) + c[i]\r\n return s\r\n except Exception as e:\r\n raise", "title": "" }, { "docid": "1925a5c4de8821953bd94db3a32c50a0", "score": "0.5232862", "text": "def run(self, t):\r\n temp_x = self.x\r\n temp_t = self.t\r\n traj = np.zeros((len(self.x), int(t/self.dt)))\r\n for i in range(len(traj.T)):\r\n traj.T[i] = self.x\r\n self.step()\r\n self.x = temp_x\r\n self.t = temp_t\r\n self.y = self.output()\r\n return traj", "title": "" }, { "docid": "091ffb581b0f4ba35758e514b61bab70", "score": "0.52275234", "text": "def interp_cubic(p0, p1, t_abs):\n T = (p1.time_from_start - p0.time_from_start).to_sec()\n t = t_abs - p0.time_from_start.to_sec()\n q = [0] * 7\n qdot = [0] * 7\n qddot = [0] * 7\n for i in range(len(p0.positions)):\n a = p0.positions[i]\n b = p0.velocities[i]\n c = (-3 * p0.positions[i] + 3 * p1.positions[i] - 2 * T * p0.velocities[i] - T * p1.velocities[i]) / T**2\n d = (2 * p0.positions[i] - 2 * p1.positions[i] + T * p0.velocities[i] + T * p1.velocities[i]) / T**3\n\n q[i] = a + b * t + c * t**2 + d * t**3\n qdot[i] = b + 2 * c * t + 3 * d * t**2\n qddot[i] = 2 * c + 6 * d * t\n return JointTrajectoryPoint(positions=q, velocities=qdot, accelerations=qddot, time_from_start=rospy.Duration(t_abs))", "title": "" }, { "docid": "d2e4bb68773d57eca852839ac36577a5", "score": "0.5226863", "text": "def _A(self, t, T):\r\n P0T = self.curve.P_M(T)\r\n P0t = self.curve.P_M(T)\r\n f0t = self.curve.instantFwd(t)\r\n par = self._B(t, T)*f0t\\\r\n \t-((self.sigma**2)/(4*self.a)*(1-np.exp(-2*self.a*t))*(self._B(t, T)**2))\r\n return P0T/P0t * np.exp(par)", "title": "" }, { "docid": "bf4162a969c1b4c81c4444e540f2eef3", "score": "0.522631", "text": "def tuple_to_point(t):\n return Point(t[0], t[1]) if t else None", "title": "" }, { "docid": "931fd18e756832d3c7de73e4773dd208", "score": "0.5223334", "text": "def seg_velocity_time(v0,v1,t):\n \n v0 = float(v0)\n v1 = float(v1)\n x = float(t)\n \n a = (v1-v0)/t\n\n x = a * (t**2) / 2.\n\n return abs(x), v0, v1, t, a", "title": "" }, { "docid": "fc2ea06aae009702465bff8b3b510f5a", "score": "0.5210453", "text": "def update(self, t):\n\n x = np.zeros((3,))\n x_dot = np.zeros((3,))\n x_ddot = np.zeros((3,))\n x_dddot = np.zeros((3,))\n x_ddddot = np.zeros((3,))\n yaw = 0\n yaw_dot = 0\n\n # STUDENT CODE HERE\n\n # from waypoint_traj\n if np.shape(self.points) == (3,) or np.shape(self.points) == (1, 3):\n x = np.reshape(self.points, (3,))\n elif np.shape(self.points) != (3,) or np.shape(self.points) != (1, 3):\n if t > self.point_t[-1]:\n x = self.points[-1]\n else:\n x = self.f(t)\n\n # if np.shape(self.points) == (3,) or np.shape(self.points) == (1,3):\n # x = np.reshape(self.points, (3,))\n # elif np.shape(self.points) != (3,) or np.shape(self.points) != (1,3):\n # if t >= self.t[-1]:\n # x = self.points[-1]\n # elif t == self.t[0]:\n # x = self.points[0]\n # else:\n # for i in range(len(self.t)-1):\n # if self.t[i] < t <= self.t[i+1]:\n # x_dot = self.v\n # x = x_dot*(t-self.t[i]) + self.points[i]\n #\n\n\n flat_output = {'x': x, 'x_dot': x_dot, 'x_ddot': x_ddot, 'x_dddot': x_dddot, 'x_ddddot': x_ddddot,\n 'yaw': yaw, 'yaw_dot': yaw_dot}\n return flat_output", "title": "" }, { "docid": "f175c55466e23a469d255393e97ec7b4", "score": "0.5204807", "text": "def PreciseTime():", "title": "" } ]
70ee95c12ccaa52305a62aa13c0596da
List the keys under path, if any, of the original store.
[ { "docid": "67ed1062aee6a8d18ebc3daeff4a9172", "score": "0.6513379", "text": "def listdir(self, path: str = None) -> List[str]:\n return zarr.storage.listdir(self._store, path)", "title": "" } ]
[ { "docid": "fedbcb88517228f6e49224e6c7ab64ae", "score": "0.7502651", "text": "def traversed_keys(self, root_path: str) -> List[str]:\n return self.redis_client_traversed.hkeys(root_path)", "title": "" }, { "docid": "37907698267ce4b2bc7e19d9e795d099", "score": "0.7457224", "text": "def listdir(self):\n\n if self._path not in self._fs._dirs:\n raise UnlistableError()\n\n i = chain(self._fs._dirs, self._fs._store.iterkeys())\n\n # Linear-time search. Could be better.\n p = self._path\n l = len(p) + 1\n ks = [t[-1] for t in i if t[:-1] == p and len(t) == l]\n\n return ks", "title": "" }, { "docid": "6aa2649548acf2fae83833a7b837eb59", "score": "0.7161985", "text": "def keys(self):\n try:\n return self.__storage__.keys()\n except:\n return []\n #return self.__state.keys()", "title": "" }, { "docid": "4a635d4524904026319caa0ac5886e96", "score": "0.70467705", "text": "def list_keys(self):\n self.automate_handle()\n return self.kcl.list_keys(self.handle)", "title": "" }, { "docid": "98048518d74d20c1495ac43433054718", "score": "0.699317", "text": "def listkeys(self):\n\n\t\tif not self.root is None:\n\t\t\treturn self.root.listkeys()\n\t\telse:\n\t\t\treturn []", "title": "" }, { "docid": "45eeb5bca6857d1d25aa73e938ae45c5", "score": "0.69510627", "text": "def keys(self):\n return self._store.keys()", "title": "" }, { "docid": "45eeb5bca6857d1d25aa73e938ae45c5", "score": "0.69510627", "text": "def keys(self):\n return self._store.keys()", "title": "" }, { "docid": "45eeb5bca6857d1d25aa73e938ae45c5", "score": "0.69510627", "text": "def keys(self):\n return self._store.keys()", "title": "" }, { "docid": "04948e5f43fafcd2400c8aed44838e71", "score": "0.6910418", "text": "def get_keys(self):\n return self.keys", "title": "" }, { "docid": "fd1769d606ca9bde8a2471be456ab835", "score": "0.68885016", "text": "def keys():\n return _keys", "title": "" }, { "docid": "f5e5e753065a864b38650caf0302cce3", "score": "0.68775463", "text": "def get_keys(path: Union[str, Path]) -> List[str]:\n path = _get_valid_hdf_path(path)\n with tables.open_file(str(path)) as file:\n keys = _get_keys(file.root)\n return keys", "title": "" }, { "docid": "5a372f201e68abc39bffcc3aac400665", "score": "0.6820589", "text": "def keys(self) -> List[str]:\n pass", "title": "" }, { "docid": "248d3b0e5677f9481e5f979648f53060", "score": "0.67937136", "text": "def listkeys(self):\n\n\t\tl = []\n\n\t\tif not self.left is None:\n\t\t\tl.extend(self.left.listkeys())\n\n\t\tl.append(self.key)\n\n\t\tif not self.right is None:\n\t\t\tl.extend(self.right.listkeys())\n\n\t\treturn l", "title": "" }, { "docid": "30a0c2b213c93766d87dd58404c453af", "score": "0.6758386", "text": "def iterkeys(self):\n\n return self.storage.iterkeys()\n #return self.__state.iterkeys()", "title": "" }, { "docid": "8b75ad9866dd3db3a6acf04809f3244f", "score": "0.6737822", "text": "def _keys(self) -> AbstractSet[str]:\n if self._cached_keys is None:\n self._cached_keys = self._store.keys()\n return self._cached_keys", "title": "" }, { "docid": "2cdf586783a5d2dfdf31ac601eebb700", "score": "0.67342883", "text": "def key_list(self):\n return self._key_list", "title": "" }, { "docid": "2554256781cb92b2dd821df2bcc5ea5b", "score": "0.67309415", "text": "def keys(self):\n return self._storage.keys()", "title": "" }, { "docid": "4153849c74df4f61db7313f41d3aafcf", "score": "0.67070645", "text": "def get_keys(self):\n return list(self.keys())", "title": "" }, { "docid": "3ee6ae19c1d139f4a0de2ec41ffc1b01", "score": "0.6659498", "text": "def get_keys(self):\n raise NotImplemented", "title": "" }, { "docid": "8bc31ba0e19e019a92abfe0768933eb1", "score": "0.66281956", "text": "def keys(self):\n return [row[0] for row in self._database.execute(\"SELECT Key FROM Shelf\")]", "title": "" }, { "docid": "b030dd0d7b687bdb1a22992ff50df2c8", "score": "0.66176766", "text": "def keys(self):\n\t\treturn self._keys", "title": "" }, { "docid": "8b8f721f3c601846849adc0c6dc12638", "score": "0.6596256", "text": "def keys(self):\n return list(self)", "title": "" }, { "docid": "8b8f721f3c601846849adc0c6dc12638", "score": "0.6596256", "text": "def keys(self):\n return list(self)", "title": "" }, { "docid": "ad7e7602fd6334c336c9c38bec029463", "score": "0.65922254", "text": "def keys(self):\n return list(self._keys)", "title": "" }, { "docid": "254e5846c5c1098d482491c50b970772", "score": "0.6525932", "text": "def all_paths_list(self):\n return self._db.fetch_col(\"SELECT path FROM cms_paths_hashes\")", "title": "" }, { "docid": "7f417b44442ae3427692726d77ad1d71", "score": "0.6514444", "text": "def list_all():\n return KEY_DIR.glob(\"*.key\")", "title": "" }, { "docid": "beb9bdda6c89a1b3ea8ec6322a052365", "score": "0.6507522", "text": "def get_keys(self):\n return self.f_redis.keys()", "title": "" }, { "docid": "8f5fdf09a60d34c3cdb1fbc75ab81939", "score": "0.64822406", "text": "def keys(self):\n keys = []\n for item in self.stored_configuration:\n if item['key'] != 'common':\n keys.append(item['key'])\n return keys", "title": "" }, { "docid": "6ac40797b587a34f4854483ecdcad7e8", "score": "0.6475047", "text": "def list_keys(self, bucket_name, prefix=None):\n key_list = []\n\n if prefix:\n root = os.path.join(STORAGE_FOLDER, bucket_name, prefix)\n else:\n root = os.path.join(STORAGE_FOLDER, bucket_name)\n\n for path, subdirs, files in os.walk(root):\n for name in files:\n if prefix:\n key_list.append(os.path.join(prefix, path.replace(root+'/', ''), name))\n else:\n key_list.append(os.path.join(path.replace(root+'/', ''), name))\n\n return key_list", "title": "" }, { "docid": "eda326825acb2c8b532dd76eb0c0d138", "score": "0.6473079", "text": "def keys(self):\n return self.root.keys", "title": "" }, { "docid": "e33cbc56d83d6e36896055f547aaf68c", "score": "0.6464677", "text": "def list_keys(self, prefix=''):\n try:\n bucket = self.get_buck()\n lister = bucket.list(prefix)\n\n for key in lister:\n yield key.name\n except ssl.SSLError:\n yield", "title": "" }, { "docid": "a957b057e47e489e888c7f86f18d75a4", "score": "0.64603865", "text": "def keys(self):\n return list(self.iterkeys())", "title": "" }, { "docid": "a957b057e47e489e888c7f86f18d75a4", "score": "0.64603865", "text": "def keys(self):\n return list(self.iterkeys())", "title": "" }, { "docid": "373ff0d62d40854573ab78c8abfde618", "score": "0.6441174", "text": "def keys(self):\n return [u for u in self.iterkeys()]", "title": "" }, { "docid": "20d5a421312e5052c939c42f60d4b434", "score": "0.6433927", "text": "def keys(self, bucket, user=None):\n return (d[2] for d in os.walk(os.path.join(self.directory, bucket)))", "title": "" }, { "docid": "db8f59e316f81d5ee25d35061252d038", "score": "0.6374483", "text": "def keys(self):\n\t\tif not self._cache_populated:\n\t\t\tself._populate_cache()\n\t\treturn self._cache.keys()", "title": "" }, { "docid": "b2ff1fac0752a1e5f939401b0831727f", "score": "0.63630563", "text": "def list_keys(self, bucket_name, prefix=None):\n prefix = prefix or ''\n redis_prefix = self._format_key(bucket_name, prefix)\n\n pdir = '/'.join(redis_prefix.split('/')[:-1]) + '/'\n dir_keys = [key.decode() for key in self._client.smembers(pdir)]\n key_list = []\n\n for key in dir_keys:\n full_key = pdir + key\n if full_key.startswith(redis_prefix):\n if full_key.endswith('/'):\n key_list.extend(self._walk(bucket_name, full_key))\n else:\n key_list.append(full_key)\n\n offset = len(bucket_name) + 1\n return [key[offset:] for key in key_list]", "title": "" }, { "docid": "133076af977a0e9f16863d647f05d497", "score": "0.636247", "text": "def keys(self) -> Iterator[str]:\n raise NotImplementedError", "title": "" }, { "docid": "9a31c7ec9fe6a54e4949060bba7c2b9c", "score": "0.63469154", "text": "def keys(self):\n return (k for k in self._keys)", "title": "" }, { "docid": "00584b6ece7bac0308ed7cc8d54f1df9", "score": "0.6345846", "text": "def keys():", "title": "" }, { "docid": "790aa09d21b90d1d7be2db819d2416e8", "score": "0.6340655", "text": "def list_by_prefix(self, prefix):\n filtered = self._bucket.objects.filter(Prefix=prefix)\n return list(obj.key for obj in filtered)", "title": "" }, { "docid": "219f0f05773fc5e803fcfd43f5945a00", "score": "0.63325286", "text": "def keys(self):\n return self.__keys", "title": "" }, { "docid": "80ae1eaa5973a681b843812a47fead9d", "score": "0.6330527", "text": "def keys(tokens):\n\n return [\n token.path\n for token in tokens\n if isinstance(token, Key)\n ]", "title": "" }, { "docid": "e2faac7fc5d7115ca72fafeecadedc0c", "score": "0.6322146", "text": "def keys(self):\n return list(self._registry.keys())", "title": "" }, { "docid": "98fd02ff1df531f81d1b094b7df307d4", "score": "0.6317723", "text": "def keys(self):", "title": "" }, { "docid": "37f77f0978327d530c472fdc8c3bf2e8", "score": "0.63081914", "text": "def keys(self) -> dict_keys:\n return self._stored.keys()", "title": "" }, { "docid": "3294f2c18118a9800b0f4450677bba3d", "score": "0.6307059", "text": "def keys(self):\n \n keylist = []\n \n for key in self.iterkeys():\n keylist.append(key)\n\n return keylist", "title": "" }, { "docid": "bd688856634ffd47d815f213de5aa8f4", "score": "0.629564", "text": "def keys(self):\n\n return list(self.keys())", "title": "" }, { "docid": "72964863465444a1d961d2de8ef33bfa", "score": "0.62811106", "text": "def get_keys(self):\n try:\n self.cur.execute(\"SELECT DISTINCT ON (key) key \"\n \"FROM public.entities\")\n logger.debug(\"Retrieving keys from shopify database\")\n list_of_keys = [x[0] for x in self.cur.fetchall()]\n return list_of_keys\n except psycopg2.Error as e:\n self.conn.rollback()\n logger.exception(e.pgerror)\n raise DatabaseError(e.pgerror)", "title": "" }, { "docid": "793ac9bd72f142e6c73ff450b789ea69", "score": "0.6276947", "text": "def list(self):\n session_keys = self.redis.keys(self.key_template[:-2] + '*')\n return [s[len(self.key_template)-2:] for s in session_keys]", "title": "" }, { "docid": "a8ab1a13381ab73aaf8e9d6a5132faab", "score": "0.62743986", "text": "def keys(self):\n raise NotImplementedError", "title": "" }, { "docid": "a8ab1a13381ab73aaf8e9d6a5132faab", "score": "0.62743986", "text": "def keys(self):\n raise NotImplementedError", "title": "" }, { "docid": "62fc10b349675a667522b7c536ed41fc", "score": "0.62735236", "text": "def getKeysList(self, dict):\n list = []\n for key in dict.keys():\n list.append(key)\n return list", "title": "" }, { "docid": "07684118902c7556820c8c1dc58c8093", "score": "0.6259769", "text": "def list(self):\n return list(self.keys())", "title": "" }, { "docid": "0faf41de3d66b04c280bf651148b1a16", "score": "0.62584966", "text": "def keys(self):\n super_keys = dict.keys(self)\n my_keys = []\n for keys in super_keys:\n my_keys += list(keys)\n\n return my_keys", "title": "" }, { "docid": "d72024e9cf85c881b55f1d85b86e430c", "score": "0.6239055", "text": "def keys(self):\n raise NotImplementedError()", "title": "" }, { "docid": "f37a9781d4606912a55fb338ac59acf3", "score": "0.6238693", "text": "def untranslated_keys(self):\n return [self._storage[k][1] for k in self._storage]", "title": "" }, { "docid": "5cef338a24bbbc498021bd3bb96537f4", "score": "0.62290853", "text": "def list(self) -> List[str]:\n return list(self.keys())", "title": "" }, { "docid": "da2e54603781cb53effeebf6754d304e", "score": "0.62283486", "text": "def keys(self):\n self.synch()\n for k in self.storage.keys():\n yield self.key_conv.deserialize(k)", "title": "" }, { "docid": "232005db55523656bd6766be3b7f94bf", "score": "0.62247896", "text": "def iterkeys(self):\n if not self._data:\n self._data = self.poll()\n\n for file in self._data:\n yield file[\"path\"]", "title": "" }, { "docid": "f51df4dba857fab3c10ad83034469eb6", "score": "0.6216685", "text": "def keys(self):\n return list(self._DL.keys())", "title": "" }, { "docid": "652a68454bbf2f1762686e6cdbf71f03", "score": "0.6215189", "text": "def keys(self) -> Sequence[str]:\n return self.__keys", "title": "" }, { "docid": "026e17ed2ccf1063a2e095b099c0ae64", "score": "0.6213427", "text": "def namelist(self):\n namelist = self.parent.namelist()\n return [item for item in namelist if item.startswith(self.key)]", "title": "" }, { "docid": "9eab05d3f9e0be11d5e7e216436a93dd", "score": "0.6209803", "text": "def keys(self):\n return self.Keys", "title": "" }, { "docid": "0e4d461dbb1e2977f376d560cd407798", "score": "0.6203239", "text": "def _internal_kv_list(prefix):\n if isinstance(prefix, bytes):\n pattern = prefix + b\"*\"\n else:\n pattern = prefix + \"*\"\n return ray.worker.global_worker.redis_client.keys(pattern=pattern)", "title": "" }, { "docid": "fb80083cc8bc110a4b2e20fee2aa2e09", "score": "0.61997396", "text": "def keys(self):\n return self.rpcl('','')", "title": "" }, { "docid": "05f7a192c55ef0aec52679874a22e7bb", "score": "0.617626", "text": "def keys(self):\n return [v[0] for v in self._dict.values()]", "title": "" }, { "docid": "b665ef778d4ab6227aed9d01b17d17ed", "score": "0.61623245", "text": "def ls(self, path):\n return os.path.listdir(path)", "title": "" }, { "docid": "deb897fa2984ed2457df63c766acffd7", "score": "0.61621964", "text": "def get_keys(self):\n return list(map((lambda x: x[DEST]), self.routes.copy()))", "title": "" }, { "docid": "22ff98b5cd16ecc03e788e6e6972da09", "score": "0.6158181", "text": "def keys(self):\n return self._db_keys(self._db, self._reconstruct_obj)", "title": "" }, { "docid": "4bafb6eafc072380c780c9953009d867", "score": "0.61563", "text": "def keys(self):\n return list(self.__entries.keys())", "title": "" }, { "docid": "5238be11fe4067485df1496700309826", "score": "0.6155828", "text": "def keys(self):\n\t\treturn dir(self).keys()", "title": "" }, { "docid": "7f44d426ec669cada50cdb8b053dd1d1", "score": "0.6155747", "text": "def GetDirList(self, path):\n props = self.__norm_path_keys(self.client.propfind(path, depth=1))\n # remove this path\n del props[os.path.normpath(path)]\n return [os.path.basename(p) for p in props.keys()]", "title": "" }, { "docid": "b08b394b2ec094d9e63b6135b374a229", "score": "0.6152318", "text": "def getKeys(self):\n return self.keysArray", "title": "" }, { "docid": "71c40711a586ec5b74c38621429fbb81", "score": "0.61424494", "text": "def keys(self):\n return [n.key for n in self.nodes()]", "title": "" }, { "docid": "f536b62d04a5a65664a12d7de225fa4d", "score": "0.61362773", "text": "def list(self):\n # find all files in the encrypted directory\n keys = self.bucket.list('%s/' % self.gpg_email)\n document_names = map(lambda key: key.name, keys)\n\n # don't show directory in listing\n document_names = map(lambda name: name.replace('%s/' % self.gpg_email, ''),\n document_names)\n return document_names", "title": "" }, { "docid": "deebd9689ab2697995a39c07f78188b3", "score": "0.6134762", "text": "def listdir(self, path):\n return ([], self.container.list_objects(path=path))", "title": "" }, { "docid": "e63eb5ecce410a99fb597ceef9ed5225", "score": "0.61342365", "text": "def collect_keys(self, only_parents=False):\n result = self.parent and self.parent.collect_keys() or []\n if not only_parents:\n result += getattr(self, 'keys', ())\n return result", "title": "" }, { "docid": "dd50b1d0af6b75ff189b7804111a2fb8", "score": "0.6124687", "text": "def ls(self, path):\n return self.list(path)", "title": "" }, { "docid": "d69f855cd704991c22a0e910fcaf101e", "score": "0.6115973", "text": "def keys(self) -> Iterable[KV]:\n return self._result_cache.keys()", "title": "" }, { "docid": "c9e7bb7e87a309261f733e1f38f09b9a", "score": "0.61143994", "text": "def keys(self) -> AbstractSet[str]:\n return self._keys()", "title": "" }, { "docid": "97941c89e839aae2ea9d80b6fc8b6e5a", "score": "0.6113517", "text": "def keys(self) -> KeysView:\n return self._dict.keys() # type: ignore", "title": "" }, { "docid": "490a2c29acf38d9b6da253b53f79e106", "score": "0.6104836", "text": "def keys(self, private=False):\n return self.gnupg.list_keys(private)", "title": "" }, { "docid": "ee18f5e305db99a4c3825ff9a6441801", "score": "0.61041945", "text": "def iterkeys(self):\n for name, folder in self.iteritems(): yield name", "title": "" }, { "docid": "eeb450d8acea98109104563cdb0cc5d7", "score": "0.61017627", "text": "def list_stores(self):\n return [store.prefix for store in self._stores.values()]", "title": "" }, { "docid": "390f94d2ef19b0e8a9c2b488c4e0ac65", "score": "0.6101698", "text": "def keys(self):\n for k in self.__index.keys():\n yield k", "title": "" }, { "docid": "629e819b1c6c3c046be9829a053ffd6e", "score": "0.6099832", "text": "def keys(self) -> AsyncIterable[str]:", "title": "" }, { "docid": "2b3a8aadd87e7109b072b083dd1cda7d", "score": "0.6098269", "text": "def keys(self):\n yield from self.index.keys()", "title": "" }, { "docid": "c767932774d8273a47e4f698acee4b9e", "score": "0.60764456", "text": "def keys(self, pattern):\n return self.client.keys(pattern)", "title": "" }, { "docid": "82a4340ed21793cf0973a84111307855", "score": "0.6074092", "text": "def get_stored_list(self, key):\n return self._redis.lrange(name=key, start=0, end=-1)", "title": "" }, { "docid": "fe9269b9884ed18df9b6e4212e2385eb", "score": "0.60739213", "text": "def keys(self):\n return self.__cfg.keys()", "title": "" }, { "docid": "fa35043b6c4e2db2085d541e98a591ef", "score": "0.60543495", "text": "def loadkeys(keys):\n return []", "title": "" }, { "docid": "0b48b5a3598bf1f8006f65c5ccd7b85a", "score": "0.6053376", "text": "def get_unchaged_keys(aug_store, current):\n\n prefixes = [\":\".join(key.split(\":\")[:-1]) for key in aug_store.keys()]\n stable =[k for k in prefixes if k not in current]\n unchanged = [k for k in aug_store.keys() if \":\".join(k.split(\":\")[:-1]) in stable ]\n\n return unchanged", "title": "" }, { "docid": "d95b5690b34c954ba1cf803d2085b175", "score": "0.60490924", "text": "def keys(self, key: Optional[Text] = None) -> List[Text]:\n if key is None:\n return list(self.data.keys())\n\n if key in self.data:\n return list(self.data[key].keys())\n\n return []", "title": "" }, { "docid": "8f8be2bd6274aed26ebb00a42565a0fe", "score": "0.60289943", "text": "def _keys(self, **kwargs):\n layer = self.defaultLayer\n return layer.keys()", "title": "" }, { "docid": "1e8148ee8673e96851b03c26cb5dd6ec", "score": "0.6023591", "text": "def keys():\n raise NotImplemented", "title": "" }, { "docid": "5d961f06ef2a3bd949a9c1d099e0d6de", "score": "0.6014965", "text": "def traversed_path(self) -> List[str]:\n if not self.redis_client_traversed.hget(self.status.root_path, self.start_path):\n return list()\n return json.loads(\n self.redis_client_traversed.hget(\n self.status.root_path, self.start_path\n ).decode()\n )", "title": "" }, { "docid": "74da75dbc15ec2db31fb417d74caa72c", "score": "0.6014804", "text": "def items(self, path_root=None):\n keys = list(self._dict.keys())\n keys.sort()\n ret = []\n\n for key in keys:\n # filter out expired items\n if self._dict[key].is_expired():\n continue\n\n # filter out keys that do not match the root path\n if path_root and not key.startswith(path_root):\n continue\n\n ret.append((key, self._dict[key].value))\n\n return ret", "title": "" }, { "docid": "dcee6b77dd54326aa5fd4caba48d350e", "score": "0.60106474", "text": "def keys(self):\n\t\treturn self.data.keys()", "title": "" }, { "docid": "0e6b77a08672b3cf8a7e3f6083ae5c93", "score": "0.6007989", "text": "def listkeys(self) -> requests.Response:\n url = \"{}/api/v1/keys\".format(self.url)\n return self._httpsession.get(url, headers=self._header())", "title": "" } ]
444eac86840ee38810c270d702dc34e7
Return True if the node is significant at the 5% level according to the chisquared distrubtion with 1 df.
[ { "docid": "8a857b67f48c7662b3e4bb4e57d94841", "score": "0.76875913", "text": "def _significant(self, node):\n\t\tx_square_cutoff = 3.841 #1df @ 5% level\n\t\tx_square_node = float(\"inf\") \n\n\t\treturn x_square_node > x_square_cutoff", "title": "" } ]
[ { "docid": "556cfbe6e05fad6380053e80fac12608", "score": "0.5665601", "text": "def techChange_sunny(p):\n return p[:, 0] > 0.325", "title": "" }, { "docid": "fc21c6804aef56340de033ee2840d9c3", "score": "0.5657123", "text": "def __feat_num_significant(self, col, df_uni_var_norm, df_uni_var_un_norm, target_col, group_col, add_null):\n df_target_col = df_uni_var_norm[[target_col, group_col, col]]\n df_bin_col = self.__bin_generator.create_percentile_bins(df_target_col.copy(), [col], \n num_bins=self.__config[Constants.num_bins_numerical],\n add_null=add_null)\n binned_feats = [feat for feat in df_bin_col.columns if col in feat]\n\n sig = False\n\n for feat in binned_feats:\n num_c, len_c, num_t, len_t = self.__feat_info(df_bin_col[[target_col, group_col, feat]],\n feat,\n target_col,\n group_col)\n sig = self.__sig_check(num_c, len_c, num_t, len_t)\n if sig:\n break\n\n # if none of the binned features are significant return False, 0 impact\n if not sig:\n return False, 0, 0, 0, 0, 0\n\n # contribution on the non-normalized data set\n df_target_col = df_uni_var_un_norm[[target_col, group_col, col]]\n df_bin_col = self.__bin_generator.create_percentile_bins(df_target_col.copy(), [col], \n num_bins=self.__config[Constants.num_bins_numerical],\n add_null=add_null)\n binned_feats = [feat for feat in df_bin_col.columns if col in feat]\n\n expected = []\n actual = []\n contribution = []\n is_sig = []\n\n for feat in binned_feats:\n num_c, len_c, num_t, len_t = self.__feat_info(df_bin_col[[target_col, group_col, feat]], feat, target_col,\n group_col)\n contribution.append(num_t - num_c * len_t / len_c)\n actual.append(num_t)\n expected.append(num_c * len_t / len_c)\n is_sig.append(self.__sig_check(num_c, len_c, num_t, len_t))\n\n return True, binned_feats, is_sig, expected, actual, contribution", "title": "" }, { "docid": "288f86a1ae143dbb536b20452075b74a", "score": "0.5455548", "text": "def has_percent_row(minerals: List[str], df: pd.DataFrame) -> bool:\n return any(abs(x - 100) < EPSILON for x in df[minerals].sum(axis=1))", "title": "" }, { "docid": "ff1e36da300e087e37992359ce0f5fef", "score": "0.5452519", "text": "def __feat_cat_significant(self, col, df_uni_var_norm, df_uni_var_un_norm, target_col, group_col):\n num_c, len_c, num_t, len_t = self.__feat_info_cat(col, df_uni_var_norm, target_col, group_col)\n\n sig = self.__sig_check(num_c, len_c, num_t, len_t)\n if not sig:\n return False, 0, 0, 0\n\n # If number of drops is significant return the number of impacted calls on the original data set.\n num_c, len_c, num_t, len_t = self.__feat_info_cat(col, df_uni_var_un_norm, target_col, group_col)\n\n return True, num_c * len_t / len_c, num_t, num_t - num_c * len_t / len_c", "title": "" }, { "docid": "b6fe87d44a0d2a0ee2b6fd0d2541e5b9", "score": "0.5441717", "text": "def SSA_sensitivity(database_size):\r\n return 9 + (5/database_size)", "title": "" }, { "docid": "5301dfea61004bf4bdbac600cf7216bc", "score": "0.5410713", "text": "def pravdepodobnost():\n cislo = np.random.rand()\n\n if cislo >= 0.99:\n return False\n else:\n return True", "title": "" }, { "docid": "5098cb0757edb133d8c30fea185d9d22", "score": "0.5357177", "text": "def __bool__(self):\n\n return self.fitness == 28", "title": "" }, { "docid": "683f5fdb57b992fb4a186835a57ea876", "score": "0.5337217", "text": "def safe(self):\n return math.fabs(numpy.linalg.det(self.scatter)) > 1e-6", "title": "" }, { "docid": "a5f1e777edb7f9d8a24ff1ec74a515fa", "score": "0.53181183", "text": "def weighted(self):\n\t\tmatrix = self.get_matrix()\n\t\tdim = self.get_nodes()\n\t\tfor i in range(dim):\n\t\t\tfor j in range(dim):\n\t\t\t\tif matrix[i][j] != 0 and matrix[i][j] != 1:\n\t\t\t\t\treturn True\n\t\treturn False", "title": "" }, { "docid": "0992ec16f99913effc93c5560db59ca2", "score": "0.5310074", "text": "def is_over_weight(self,df):\r\n return int(df['bmi'] > 25 and df['bmi'] < 29.9)", "title": "" }, { "docid": "0b7c3c8f1c1433adb239fa23ee9359f8", "score": "0.529007", "text": "def getUtileDistinction_KS(self, node, p_significanceLevel=float(0.001)):\n\n assert node.nodeType == NodeLeaf\n\n root_utils = self.getEFDRs(node) # Get all expected future discounted returns for all instances in a node\n\n cds = self.getCandidateDistinctions(node) # Get all the candidate distinctions\n\n p_min = float(1)\n cd_min = None\n\n for cd in cds: # test all possible distinctions until find the one satisfy KS test\n child_utils = []\n self.splitToFringe(node, cd) # split to fringe node with split candidate\n # self.split(node,cd)\n for c in node.children:\n if len(c.instances) < self.minSplitInstances: # if not enough instance in a node, stop split\n continue\n child_utils.append(\n self.getEFDRs(c)) # Get all expected future discounted returns for all instances in a children\n # self.unsplit(node) # delete split fringe node\n\n for i, cu in enumerate(child_utils):\n k, p = ks_2samp(root_utils, cu)\n # print p\n if p < p_significanceLevel and p < p_min: # significance_level=0.00005, if p below it, this distinction is significant\n p_min = p\n cd_min = cd\n\n if len(root_utils) == len(cu):\n print 'find you'\n\n print(\"KS passed, p={}, d = {}, back={}\".format(p, cd.dimension, cd.back_idx))\n # print(root_utils)\n # print(cu)\n # elif p< 0.1:\n # print(\"KS failed, p={}. d= {}, back={}\".format(p,cd.dimension,cd.back_idx))\n self.unsplit(node) # delete split fringe node\n\n if cd_min:\n print(\"Will be split, p={}, d={}, back={}\".format(p_min, cd_min.dimension_name, cd_min.back_idx))\n # else:\n # print \"Not found\"\n return cd_min\n else:\n return None", "title": "" }, { "docid": "698d753fdfdfbd3df9a67c0e551473ee", "score": "0.5286751", "text": "def __convergence_test(self, cost):\n return cost <= self.threshold", "title": "" }, { "docid": "d3c6761a824f02195d5148bb3216555d", "score": "0.52796507", "text": "def cf_true(x):\n return is_cf(x) and x > CF.cutoff", "title": "" }, { "docid": "1c938fb0c1bd723663ef85e4090583af", "score": "0.5265434", "text": "def _compute_gfes(self):\n return self._det_dcv is not None", "title": "" }, { "docid": "cf4ee9468f107cf84d46f7d39832f313", "score": "0.5253931", "text": "def test_Cacciato09Cens5():\n model = Cacciato09Cens(threshold=9.5)\n clf = model.clf(model.median_prim_galprop(prim_haloprop=1e12), prim_haloprop=1e12)\n assert np.isclose(\n np.sqrt(2 * np.pi * model.param_dict[\"sigma\"] ** 2) ** (-1),\n clf,\n rtol=1e-6,\n atol=1.0e-6,\n )\n model.param_dict[\"sigma\"] = 0.24\n clf = model.clf(model.median_prim_galprop(prim_haloprop=1e13), prim_haloprop=1e13)\n assert np.isclose(\n np.sqrt(2 * np.pi * model.param_dict[\"sigma\"] ** 2) ** (-1),\n clf,\n rtol=1e-6,\n atol=1.0e-6,\n )", "title": "" }, { "docid": "5dba6ffbdb1d51f49cb8e69eddf025bb", "score": "0.5246929", "text": "def validate_t(self, solution, t):\r\n no_sense_prob = 0\r\n active_nodes = [i for i, a in enumerate(solution) if a != cf.SLEEP and self.network.get_node(i).energy >= (cf.SENSING_ENERGY +cf.COMMUNICATION_ENERGY)]\r\n\r\n for n in active_nodes:\r\n no_sense_prob += self.sensing_log_matrix[t][n]\r\n if no_sense_prob >= self.sensing_log_threshold:\r\n return True\r\n if no_sense_prob < self.sensing_log_threshold:\r\n return False\r\n else:\r\n return True", "title": "" }, { "docid": "6f91ded376aa0411f143afd5996a6780", "score": "0.5231144", "text": "def is_statistically_significant(self):\n return self.fall_in_critical_region()", "title": "" }, { "docid": "a2f9b8f3d95405f594c4a6feed052ef0", "score": "0.52248955", "text": "def SSE_sensitivity():\r\n return 7", "title": "" }, { "docid": "69a5e030bab96f26aafc90941f1d4701", "score": "0.52139014", "text": "def oversampled(semibmaj, semibmin, x=30):\n return semibmaj > x or semibmin > x", "title": "" }, { "docid": "66d1cbd6c11e381fe8e633e2980fa584", "score": "0.5210077", "text": "def set_threshold(df, threshold):\n return df[\"rating\"] >= threshold", "title": "" }, { "docid": "be9ea8fc2921361876d9ca665f8d5279", "score": "0.52058774", "text": "def check_eigval(self):\n iok = True\n thresh = self.param.thresh", "title": "" }, { "docid": "5312fc342c14a75d5b5bcf167fbf75d2", "score": "0.5201662", "text": "def snowcover(self):\n return self.swe > 0.0", "title": "" }, { "docid": "b414b0df04d5a58e342c96c42f8e4cd1", "score": "0.51848173", "text": "def is_on(self) -> bool:\n return (\n self._func_channel.dimLevel is not None\n and self._func_channel.dimLevel > 0.0\n )", "title": "" }, { "docid": "d9404e8cc9aeaf2e2e7dff934e2b5ecd", "score": "0.51786757", "text": "def has_unit_root(self, data_array, sig=.05):\n adf = adfuller(data_array)\n pvalue = adf[1]\n # print(pvalue)\n if pvalue < sig:\n return False\n else:\n return", "title": "" }, { "docid": "d8633bc4986c4e6f82f4a311ce24ac23", "score": "0.51761365", "text": "def is_perfect(self):\n h = self._get_max_depth(self._root)\n num = self._get_num_of_node(self._root)\n return (num == (2 ** h - 1))", "title": "" }, { "docid": "49dff13affac125d2382cb69718c501a", "score": "0.5174382", "text": "def is_normal(r , level = 0.1):\n statistic, p_value=scipy.stats.jarque_bera(r)\n return p_value > level", "title": "" }, { "docid": "162d90b19b80cbf02fa982cc5c9759b6", "score": "0.51675516", "text": "def check_f(row):\n if row['TRTCD1'] == 50.0:\n return 1\n if row['TRTCD2'] == 50.0:\n return 1\n if row['TRTCD3'] == 50.0:\n return 1\n return 0", "title": "" }, { "docid": "b028c3185d861084186314f0ba48356d", "score": "0.5159486", "text": "def check_dataset(self, dataset):\n in_support = np.sum(\n (dataset.x >= self.min_x) * (dataset.x <= self.max_x),\n axis=1) == self.num_p\n print(\"percent in support\", np.sum(in_support) / dataset.num_obs)\n return np.sum(in_support) == dataset.num_obs", "title": "" }, { "docid": "f08a77fec455c5dbad94dc0effede95d", "score": "0.51545405", "text": "def test(self, df, target):\n\n model = DecisionTreeClassifier(random_state=0)\n\n kf = KFold(n_splits=10, shuffle=True, random_state=12345678)\n res = cross_val_score(model, df, target, cv=kf, scoring='f1_weighted')\n\n return res.mean()", "title": "" }, { "docid": "3bfe43c45443a2150c231e4242705995", "score": "0.5126107", "text": "def need_add_outlier3(self, node, shortened_path):\n if node.id not in self.outliers3:\n return True\n if shortened_path < self.outliers3[node.id].shortened:\n return True", "title": "" }, { "docid": "f20af5806bf23cb9e74cadb961322499", "score": "0.51235455", "text": "def get_decision(mean_diff):\n\n if mean_diff > -5:\n return True\n else:\n return False", "title": "" }, { "docid": "0c83d57c90ee944456989aff7ddda5de", "score": "0.51080835", "text": "def s_in_small_g(self):\n\n for node in self.core_s:\n if not self.core_s[node]:\n return False\n return True", "title": "" }, { "docid": "6c025a1d41543ea942da0893991f103f", "score": "0.51033574", "text": "def _node_cut(self, configs, wf):\n ne = configs.configs.shape[1]\n d2 = 0.0\n for e in range(ne):\n d2 += np.sum(wf.gradient(e, configs.electron(e)) ** 2, axis=0)\n r = 1.0 / (d2 * ne * ne)\n return r < self.nodal_cutoff ** 2", "title": "" }, { "docid": "ab2ca107a549b01047162c7415a92ab1", "score": "0.5095412", "text": "def FNratio_binned(E): \n if E <=0 :\n fnr = 0.0\n elif E > 0.0 and E <= 0.5 :\n fnr = 1.56980937443\n elif E > 0.5 and E <= 1.0 :\n fnr = 1.68239230683\n elif E > 1.0 and E <= 1.5 :\n fnr = 1.59944451298\n elif E> 1.5 and E<=2.0 :\n fnr = 1.4945939881\n elif E>2.0 and E<=2.5 :\n fnr = 1.35689923574\n elif E>2.5 and E<=3.0 :\n fnr = 1.23797378825\n elif E>3.0 and E<=3.5 :\n fnr = 1.17535790024\n elif E>3.5 and E<=4.0 :\n fnr = 1.23006538251\n elif E>4.0 and E<=4.5 :\n fnr = 1.41616622315\n elif E>4.5 and E<=5.0 :\n fnr = 1.55534133432\n elif E>5.0 and E<=5.5 :\n fnr = 1.57093938166\n elif E>5.5 and E<=6.0 :\n fnr = 1.46452790065\n elif E>6.0 and E<=6.5 :\n fnr = 1.3737572329\n elif E>6.5 and E<=7.0 :\n fnr = 1.29550053164\n elif E>7.0 and E<=7.5 :\n fnr = 1.27981366277\n elif E>7.5 and E<=8.0 :\n fnr = 1.24848762549\n elif E>8.0 and E<=9.0 :\n fnr = 1.26719442633\n elif E>9.0 and E<=10.0 :\n fnr = 1.24833629993\n elif E>10.0 and E<=11.0 :\n fnr = 1.26389158134\n elif E>11.0 and E<=12.0 :\n fnr = 1.34670943259\n elif E>12.0 and E<=14.0 :\n fnr = 1.41853479724\n elif E>14.0 and E<=16.0 :\n fnr = 1.56070845743\n elif E>16.0 and E<=18.0 :\n fnr = 1.55427712082\n elif E>18.0:\n fnr = 1.55427712082\n return (fnr*1.0e-6)", "title": "" }, { "docid": "3f9f1fcb4afbf62e2e877b2bcd1d36e5", "score": "0.5087331", "text": "def test_Cacciato09Sats5():\n model = Cacciato09Sats(threshold=9.0)\n lum_mc = model.mc_prim_galprop(prim_haloprop=np.ones(int(1e5)) * 5e13, seed=1)\n assert np.all(lum_mc >= 10 ** model.threshold)\n\n # Check that luminosities follow the expected distribution.\n def cdf(lum):\n return np.array(\n [\n (\n model.mean_occupation(prim_haloprop=5e13)\n - model.mean_occupation(prim_haloprop=5e13, prim_galprop_min=l)\n )\n / model.mean_occupation(prim_haloprop=5e13)\n for l in lum\n ]\n )\n\n p_value = kstest(lum_mc, cdf)[1]\n assert p_value > 0.001", "title": "" }, { "docid": "04afc103353679d41c0db2a774391d17", "score": "0.50792843", "text": "def sat(n: int, x=329437923.5):\n return abs(n - x) <= 0.5", "title": "" }, { "docid": "b3cd06496ec28e40ef1b7260e38aca1a", "score": "0.5044325", "text": "def _stat_chi2(self):\n model = self._model(self.data.e_ref)\n data = self.data.table[\"dnde\"].quantity\n sigma = self.data.table[\"dnde_err\"].quantity\n return ((data - model) / sigma).to(\"\").value ** 2", "title": "" }, { "docid": "bd060625ac1a8a4f67faa8d28ad9dee0", "score": "0.5042267", "text": "def has_solution(self, edges, dom_val):\n s = dom_val\n for e in edges:\n s = s | e\n return s == self.graph.dominant_value", "title": "" }, { "docid": "0ec2f5126db1b1de8baa297e7c548b0f", "score": "0.50391954", "text": "def chi_squared(df, target_col_str):\n\n print('Ho: No significant relationship, feature is independent of target\\n')\n\n categorical_cols = df.select_dtypes(['object']).columns.tolist()\n print('Features tested: {}'.format(categorical_cols))\n\n def chi_test(c):\n ct = pd.crosstab(df[target_col_str], df[c])\n chi2, p, dof, expected = scs.chi2_contingency(ct)\n exp_under_five_percent = np.sum(expected < 5) / len(expected.flat)\n\n if exp_under_five_percent > 0.2:\n return None\n else:\n return p\n\n chi_dict = {c: chi_test(c) for c in categorical_cols}\n\n chi_df = pd.DataFrame.from_dict(chi_dict, orient='index')\n chi_df.columns = ['p_value']\n\n chi_df = chi_df.sort_values(by='p_value', ascending=True)\n\n plt.style.use('fivethirtyeight')\n\n chi_df['p_value'].sort_values(ascending=True).plot(kind='barh', use_index=True, color='deepskyblue',\n title='Chi-squared Test for Independence with Target')\n plt.xlabel('p-value')\n plt.xscale('log')\n p_line = plt.axvline(x=0.05, color='darkorange', label='p-value cut-off', lw=2)\n plt.legend([p_line], ['Significant p-value\\ncut-off (95% CI)'], bbox_to_anchor=(1.05, 1), loc=2)\n plt.grid(False)\n plt.show()\n\n return chi_df", "title": "" }, { "docid": "606d65620831db9fc7dbc53be0e0d683", "score": "0.5024171", "text": "def classify(y):\r\n c = np.where(y < 0.5, 0, 1)\r\n return c", "title": "" }, { "docid": "3f1d9496fa1a47bebc3971643245d4a6", "score": "0.5011161", "text": "def check_dataset(self, dataset):\n new_x = self.pca.transform(dataset.x.reshape((dataset.x.shape[0], -1)))\n in_support = np.sum(\n (new_x >= self.min_x) * (new_x <= self.max_x),\n axis=1) == self.num_p\n print(\"percent in support\", np.sum(in_support) / dataset.num_obs)\n return np.sum(in_support) == dataset.num_obs", "title": "" }, { "docid": "a19bcab237bce40c63a9889fef3286ef", "score": "0.50060344", "text": "def true_decision_function(input_features):\n if input_features[0] <= n_bins // 2:\n return -1\n else:\n if input_features[1] <= n_bins // 3:\n return -1\n else:\n return 1", "title": "" }, { "docid": "11de9fca78d1d04b63910f6710ce9449", "score": "0.5001753", "text": "def is_quantized_weights(node: NNCFNode, nncf_graph: NNCFGraph) -> bool:\n for prev_node in nncf_graph.get_previous_nodes(node):\n if prev_node.node_type in OP_NAMES_QUANTIZE_NODE:\n return True\n return False", "title": "" }, { "docid": "94f154bce994db0008fcf9d2069cd4f7", "score": "0.50000864", "text": "def operator_test5():\n\n l = 5.0\n dx = 0.02\n x = np.arange(0, l+dx, dx)\n\n vx = pk.linear_ramp(x)\n H = pk.build_hamiltonian(x, vx, dx=dx)\n\n evals, evecs = np.linalg.eigh(H)\n\n for i in range(5):\n psi = evecs[:, i]\n psi = pk.normalize_wfn(x, psi)\n pdf = pk.prob_density(psi)\n\n plt.plot(x, psi)\n plt.plot(x, pdf)\n plt.show()", "title": "" }, { "docid": "079cd691420fc207745ae4d3c28ce298", "score": "0.49968618", "text": "def cond_exp_tree(self, x, tree, S, algo=\"shap\", N=50000, data=None):\n\n if type(data) != type(self.data):\n data = self.data\n\n a = tree.children_left\n b = tree.children_right\n f = tree.feature\n t = tree.threshold\n r_w = tree.weighted_n_node_samples\n v = np.array([tree.value[i][0][0] for i in range(tree.node_count)])\n index = range(x.shape[0])\n\n def G(i, w):\n\n if f[i] == -2:\n return float(w * v[i])\n else:\n if (f[i] in S):\n if x[f[i]] <= t[i]:\n return G(a[i], w)\n else:\n return G(b[i], w)\n else:\n return G(a[i], w * r_w[a[i]] / r_w[i]) + G(b[i], w * r_w[b[i]] / r_w[i])\n\n def explore(i, tab):\n\n if f[i] == -2:\n tab[i] = 1\n else:\n if f[i] in S:\n if x[f[i]] <= t[i]:\n explore(a[i], tab)\n else:\n explore(b[i], tab)\n else:\n explore(a[i], tab)\n explore(b[i], tab)\n\n def get_partition(leaf_id, part):\n\n left = np.where(tree.children_left == leaf_id)[0]\n right = np.where(tree.children_right == leaf_id)[0]\n\n if (len(left) == 0) * (len(right) == 0):\n return (part)\n\n else:\n if len(right) != 0:\n right = int(right[0])\n\n part[f[right]] = np.concatenate((part[f[right]], np.array([[t[right], np.inf]])))\n return get_partition(right, part)\n else:\n left = int(left[0])\n part[f[left]] = np.concatenate((part[f[left]], np.array([[-np.inf, t[left]]])))\n return get_partition(left, part)\n\n def get_final_partition(part):\n final_partition = {}\n for i, var_part in enumerate(part):\n final_partition[i] = [np.max(var_part[:, 0]), np.min(var_part[:, 1])]\n return final_partition\n\n def explore_part(i, tab, partition_leaves, partition_global, prob_global, S, S_bar, data, intv=False):\n if f[i] == -2:\n # tab[i] = 1\n compatible_leaves.append(i)\n partition_global[i] = partition_leaves\n partition_leaves = np.squeeze(np.array(partition_leaves))\n # partition_leaves = np.reshape(np.array(partition_leaves), (self.d + 2, 2)).T\n # partition_leaves = pd.DataFrame(partition_leaves)[S]\n low = FloatVector(partition_leaves[:, 0][S_bar])\n up = FloatVector(partition_leaves[:, 1][S_bar])\n\n prob_global[i] = cond.pcmvnorm(low, up, self.r_mean, self.r_cov, c_index, given, FloatVector(x[S]))[0]\n # print(prob_global[i])\n\n else:\n if f[i] in S:\n if x[f[i]] <= t[i]:\n part = partition_leaves.copy()\n part[f[i]] = np.concatenate((part[f[i]], np.array([[-np.inf, t[i]]])))\n part[f[i]] = np.array([[np.max(part[f[i]][:, 0]), np.min(part[f[i]][:, 1])]])\n explore_part(a[i], tab, part, partition_global, prob_global, S, S_bar, data, intv)\n else:\n part = partition_leaves.copy()\n part[f[i]] = np.concatenate((part[f[i]], np.array([[t[i], np.inf]])))\n part[f[i]] = np.array([[np.max(part[f[i]][:, 0]), np.min(part[f[i]][:, 1])]])\n explore_part(b[i], tab, part, partition_global, prob_global, S, S_bar, data, intv)\n else:\n part = partition_leaves.copy()\n part[f[i]] = np.concatenate((part[f[i]], np.array([[-np.inf, t[i]]])))\n part[f[i]] = np.array([[np.max(part[f[i]][:, 0]), np.min(part[f[i]][:, 1])]])\n\n part_2 = partition_leaves.copy()\n part_2[f[i]] = np.concatenate((part_2[f[i]], np.array([[t[i], np.inf]])))\n part_2[f[i]] = np.array([[np.max(part_2[f[i]][:, 0]), np.min(part_2[f[i]][:, 1])]])\n\n explore_part(a[i], tab, part, partition_global, prob_global, S, S_bar, data, intv)\n explore_part(b[i], tab, part_2, partition_global, prob_global, S, S_bar, data, intv)\n\n def explore_partition(i, tab, partition_leaves, partition_global, prob_global, S, S_bar, data, intv=False):\n\n if f[i] == -2:\n # tab[i] = 1\n compatible_leaves.append(i)\n partition_global[i] = partition_leaves\n partition_leaves = np.squeeze(np.array(partition_leaves))\n\n if not intv:\n section_x = np.prod(\n [(data[:, s] <= partition_leaves[s, 1]) * (data[:, s] >= partition_leaves[s, 0]) for s in S],\n axis=0)\n else:\n section_x = np.prod(\n [(data[:, s] <= partition_leaves[s, 1]) * (data[:, s] >= partition_leaves[s, 0]) for s in\n S_bar], axis=0)\n\n prob_global[i] = np.sum(section_x)\n\n else:\n if f[i] in S:\n if x[f[i]] <= t[i]:\n part = partition_leaves.copy()\n part[f[i]] = np.concatenate((part[f[i]], np.array([[-np.inf, t[i]]])))\n part[f[i]] = np.array([[np.max(part[f[i]][:, 0]), np.min(part[f[i]][:, 1])]])\n explore_partition(a[i], tab, part, partition_global, prob_global, S, S_bar, data, intv)\n else:\n part = partition_leaves.copy()\n part[f[i]] = np.concatenate((part[f[i]], np.array([[t[i], np.inf]])))\n part[f[i]] = np.array([[np.max(part[f[i]][:, 0]), np.min(part[f[i]][:, 1])]])\n explore_partition(b[i], tab, part, partition_global, prob_global, S, S_bar, data, intv)\n else:\n part = partition_leaves.copy()\n part[f[i]] = np.concatenate((part[f[i]], np.array([[-np.inf, t[i]]])))\n part[f[i]] = np.array([[np.max(part[f[i]][:, 0]), np.min(part[f[i]][:, 1])]])\n\n part_2 = partition_leaves.copy()\n part_2[f[i]] = np.concatenate((part_2[f[i]], np.array([[t[i], np.inf]])))\n part_2[f[i]] = np.array([[np.max(part_2[f[i]][:, 0]), np.min(part_2[f[i]][:, 1])]])\n\n explore_partition(a[i], tab, part, partition_global, prob_global, S, S_bar, data, intv)\n explore_partition(b[i], tab, part_2, partition_global, prob_global, S, S_bar, data, intv)\n\n if algo == 'shap':\n return G(0, 1)\n\n elif algo == 'plugin':\n if S == []:\n compatible_leaves = np.zeros(tree.node_count)\n explore(0, compatible_leaves)\n compatible_leaves = [i for i in range(tree.node_count) if compatible_leaves[i] == 1]\n p = r_w[compatible_leaves] / np.sum(r_w[compatible_leaves])\n\n elif len(S) == len(x):\n y_pred = tree.predict(np.expand_dims(np.array(x, dtype=np.float32), axis=0))\n return np.mean(y_pred)\n\n else:\n S_bar = [i for i in self.index if i not in S]\n partition_leaves = [np.array([[-np.inf, np.inf]]) for i in range(data.shape[1])]\n partition_global = {i: [np.array([[-np.inf, np.inf]]) for i in range(data.shape[1])]\n for i in range(tree.node_count)}\n prob_global = {}\n compatible_leaves = []\n explore_partition(0, compatible_leaves, partition_leaves, partition_global, prob_global, S, S_bar, data,\n False)\n\n nbs_leaf = np.array([prob_global[key] for key in compatible_leaves])\n p = r_w[compatible_leaves] / nbs_leaf\n\n return np.sum(p * v[compatible_leaves])\n\n\n elif algo == 'plugin_interventionalv2':\n return self.cond_exp_tree_intv(tree, x, S, v, explore, get_partition, get_final_partition, data)\n\n elif algo == 'plugin_interventional':\n S_bar = [i for i in self.index if i not in S]\n if S == []:\n compatible_leaves = np.zeros(tree.node_count)\n explore(0, compatible_leaves)\n compatible_leaves = [i for i in range(tree.node_count) if compatible_leaves[i] == 1]\n p = r_w[compatible_leaves] / np.sum(r_w[compatible_leaves])\n\n elif len(S) == len(x):\n y_pred = tree.predict(np.expand_dims(np.array(x, dtype=np.float32), axis=0))\n return np.mean(y_pred)\n\n else:\n partition_leaves = [np.array([[-np.inf, np.inf]]) for i in range(data.shape[1])]\n partition_global = {i: [np.array([[-np.inf, np.inf]]) for i in range(data.shape[1])]\n for i in range(tree.node_count)}\n prob_global = {}\n compatible_leaves = []\n explore_partition(0, compatible_leaves, partition_leaves, partition_global, prob_global, S, S_bar, data,\n True)\n p = np.array([prob_global[key] / len(data) for key in compatible_leaves])\n\n return np.sum(p * v[compatible_leaves])\n\n elif algo == 'monte_carlo':\n if len(S) != len(index):\n S_bar = [i for i in index if i not in S]\n rg_data = np.zeros(shape=(N, len(index)))\n rg_data[:, S] = x[S]\n rg = sampleMVN(N, self.mean, self.cov, S_bar, S, x[S])\n rg_data[:, S_bar] = rg\n\n y_pred = tree.predict(np.array(rg_data, dtype=np.float32))\n\n else:\n y_pred = tree.predict(np.expand_dims(np.array(x, dtype=np.float32), axis=0))\n\n return np.mean(y_pred)\n\n elif algo == 'monte_carlo_interventional':\n if len(S) != len(index):\n rg = sampleMarMVN(N, self.mean, self.cov, self.index, [])\n rg_data = pd.DataFrame(rg, columns=[str(i) for i in self.index])\n\n def get_given_data(idx):\n val = np.array([x[idx]])\n val = np.tile(val, N)\n return val\n\n for val_id in S:\n rg_data[str(val_id)] = get_given_data(val_id)\n\n rg_data = rg_data[sorted(rg_data.columns)]\n y_pred = tree.predict(np.array(rg_data.values, dtype=np.float32))\n\n else:\n y_pred = tree.predict(np.expand_dims(np.array(x, dtype=np.float32), axis=0))\n\n return np.mean(y_pred)\n\n elif algo == 'exact':\n S_bar = [i for i in self.index if i not in S]\n given = IntVector(np.array([i + 1 for i in S]))\n c_index = np.array([i + 1 for i in index if i not in S])\n # S_bar = [i for i in index if i not in S]\n # if S == []:\n # compatible_leaves = np.zeros(tree.node_count)\n # explore(0, compatible_leaves)\n # compatible_leaves = [i for i in range(tree.node_count) if compatible_leaves[i] == 1]\n # p = r_w[compatible_leaves] / np.sum(r_w[compatible_leaves])\n\n if len(S) == len(x):\n y_pred = tree.predict(np.expand_dims(np.array(x, dtype=np.float32), axis=0))\n return np.mean(y_pred)\n\n else:\n\n # S_bar = [i for i in self.index if i not in S]\n partition_leaves = [np.array([[-np.inf, np.inf]]) for i in range(data.shape[1])]\n partition_global = {i: [np.array([[-np.inf, np.inf]]) for i in range(data.shape[1])]\n for i in range(tree.node_count)}\n prob_global = {}\n compatible_leaves = []\n explore_part(0, compatible_leaves, partition_leaves, partition_global, prob_global, S, S_bar, data,\n False)\n\n p = np.array([prob_global[key] for key in compatible_leaves])\n # print(p)\n # p = r_w[compatible_leaves] / nbs_leaf\n\n return np.sum(p * v[compatible_leaves])\n\n elif algo == 'exact_v1':\n\n if len(S) != len(index):\n given = IntVector(np.array([i + 1 for i in S]))\n c_index = np.array([i + 1 for i in index if i not in S])\n S_bar = [i for i in index if i not in S]\n\n compatible_leaves = np.zeros(tree.node_count)\n explore(0, compatible_leaves)\n\n leaf_idx = [i for i in range(tree.node_count) if compatible_leaves[i] == 1]\n leaf_proba = []\n for leaf_id in leaf_idx:\n partition_leaves = [np.array([[-np.inf, np.inf]]) for i in range(x.shape[0])]\n partition_leaves = get_partition(leaf_id, partition_leaves)\n partition_leaves = pd.DataFrame(get_final_partition(partition_leaves))[S_bar]\n\n low = FloatVector(partition_leaves.iloc[0])\n up = FloatVector(partition_leaves.iloc[1])\n\n leaf_proba.append(\n cond.pcmvnorm(low, up, self.r_mean, self.r_cov, c_index, given, FloatVector(x[S]))[0])\n\n leaf_proba = np.array(leaf_proba)\n return np.sum(leaf_proba * v[compatible_leaves == 1])\n\n else:\n y_pred = tree.predict(np.expand_dims(np.array(x, dtype=np.float32), axis=0))\n return np.mean(y_pred)\n else:\n raise ValueError(\"This algo is not implemented. Available estimators are: plugin, exact, monte_carlo,\\\n monte_carlo_interventional, plugin_interventional\")", "title": "" }, { "docid": "1d085c7705ab79aad4831a67c51c9dcd", "score": "0.49960086", "text": "def _goodSN(self, sourceCat):\n if self.config.minSnr <= 0:\n return True\n else:\n with np.errstate(invalid=\"ignore\"): # suppress NAN warnings\n return sourceCat.get(self.fluxKey)/sourceCat.get(self.fluxSigmaKey) > self.config.minSnr", "title": "" }, { "docid": "12e8d9d7c98d0b896b71d5272f0ffd0b", "score": "0.49935928", "text": "def get_differentially_expressed_proteins_from_log2FC_df(method, sample1, sample2, specie, ratio, two_sided = False):\n ARATH_FC_matrix, CAEEL_FC_matrix, HUMAN_FC_matrix = get_log2FC_ratio_matrices()\n \n if specie == \"HUMAN\":\n FC_matrix = HUMAN_FC_matrix\n FC_treshold = ratio * FC_matrix[sample2][sample1] #The smaples are reversed because the log2FC in this function is reversed.\n file_name = method+\"_\"+\"log2FC\"+\"_\"+sample1+\"_\"+sample2+\"_\"+specie+\".csv\"\n df_log2FC = pd.read_csv(file_name, sep = \"\\t\")\n diffExp = np.sum(df_log2FC[df_log2FC > FC_treshold] > 0) #This comparison should be more then\n elif specie == \"ARATH\":\n FC_matrix = ARATH_FC_matrix\n FC_treshold = ratio * FC_matrix[sample2][sample1] #The smaples are reversed because the log2FC in this function is reversed.\n file_name = method+\"_\"+\"log2FC\"+\"_\"+sample1+\"_\"+sample2+\"_\"+specie+\".csv\"\n df_log2FC = pd.read_csv(file_name, sep = \"\\t\")\n diffExp = np.sum(df_log2FC[df_log2FC < -ratio] > 0) + np.sum(df_log2FC[df_log2FC > ratio] > 0) #This comparison should be if less or more than ratio\n elif specie == \"CAEEL\":\n FC_matrix = CAEEL_FC_matrix\n FC_treshold = ratio * FC_matrix[sample2][sample1] #The smaples are reversed because the log2FC in this function is reversed.\n file_name = method+\"_\"+\"log2FC\"+\"_\"+sample1+\"_\"+sample2+\"_\"+specie+\".csv\"\n df_log2FC = pd.read_csv(file_name, sep = \"\\t\")\n diffExp = np.sum(df_log2FC[df_log2FC < FC_treshold] > 0) #This comparison should be less than\n else:\n print(\"no species specificed\")\n # return \n return diffExp", "title": "" }, { "docid": "6cdf7774a775c38dc3bb1c50407afc16", "score": "0.49933684", "text": "def probable(self, threshold=0.3):\r\n return self.score > threshold", "title": "" }, { "docid": "e594385a28bad52ea0422bbc0ccaba45", "score": "0.49910995", "text": "def leaf_node_check(df):\n class_count = set(df['class'].values.tolist())\n if len(class_count) == 2:\n return False\n else:\n return True", "title": "" }, { "docid": "8d2dc40beacc1f3b48c20feae0a48073", "score": "0.49809405", "text": "def is_silent(self, data) -> bool:\n return np.sqrt(np.mean(data ** 2)) < self.threshold", "title": "" }, { "docid": "7176da4a18060754fd6873ff7f344235", "score": "0.4973127", "text": "def _is_pos_def(self, mat: np.ndarray) -> bool:\n return np.all(np.linalg.eigvals(mat) > 0)", "title": "" }, { "docid": "a5b22ae6142b2c791fc5a096ba9dd32f", "score": "0.49697605", "text": "def summarize_sensitivity(sens_df):\n cols = ['star', 'date', '[Fe/H]', 'logg', 'addmode', 'temperature', 'vsini']\n detrate = sens_df.groupby(cols).apply(lambda d: (d.significance > 5).sum() / float(len(d)))\n detrate = detrate.reset_index().rename(columns={0: 'detrate'})\n significance = sens_df.groupby(cols).apply(lambda d: np.nanmean(d.significance))\n significance = significance.reset_index().rename(columns={0: 'significance'})\n detrate['significance'] = significance['significance']\n return detrate", "title": "" }, { "docid": "ebbe54b6adec2df35a2cd4b3a46163d8", "score": "0.49613017", "text": "def test_distributions1(dist):\n assert entropy(dist) <= np.log2(len(dist.pmf))", "title": "" }, { "docid": "7fda52437ae6a85c4134d1741fa0a5f0", "score": "0.49526456", "text": "def test_unsupervised_density():\n # !TODO: Implement a suitable scenario.\n pass", "title": "" }, { "docid": "bd915870af46803e5ec1eb4d8dd16c49", "score": "0.4946569", "text": "def prob55():", "title": "" }, { "docid": "77b13ec72bb195ee995150e1b95e7433", "score": "0.49439466", "text": "def test_5vIQ(self):\n for i in range(1000):\n self.assertTrue(61 <= Consignment._get_percent(None, 5) <= 80)", "title": "" }, { "docid": "6fc001d90fc77d9d7be6395036917507", "score": "0.4943591", "text": "def _is_ok_feature(self, feature) -> bool:\n if feature.isnull().mean() >= self.max_nan_rate:\n return False\n if (feature.value_counts().values[0] / feature.shape[0]) >= self.max_constant_rate:\n return False\n return True", "title": "" }, { "docid": "d4f12f27ec44d7c85e9b5e30c30a9158", "score": "0.494273", "text": "def test_correct_score(self):\n dist = Bernoulli(VISIBLE_UNITS, HIDDEN_UNITS, weights=self.weights, bias_hidden=self.bias_hidden, bias_visible=self.bias_visible)\n target = np.sum(self.weights) + np.sum(self.bias_hidden) + np.sum(self.bias_visible)\n actual = np.abs(dist.score_energy(self.vis_inp, self.hid_inp))\n self.assertEqual(target, actual, \"Didn't match. Target: \" + str(target) + \", Actual: \" + str(actual))", "title": "" }, { "docid": "c0d027fa8cb6d3e97644e9593d97007b", "score": "0.493798", "text": "def get_consistence(self, node):\n return self.active[self.node_index[node]].sum(0) != 0", "title": "" }, { "docid": "7c5ada0c0b35792369c00069f44d3a45", "score": "0.4937976", "text": "def is_weight(w):\n \"*** YOUR CODE HERE ***\"\n return root(w)!=None", "title": "" }, { "docid": "1cc787c5105bf00a5c54960b299f26a7", "score": "0.4936937", "text": "def _test_dist(self,group1,elem1,criterion):\n\n test=True\n for elem0 in group1:\n distsq=np.sum(np.power(elem0-elem1,2))\n if distsq < criterion**2:\n test=False\n break\n\n return test", "title": "" }, { "docid": "920bf55811ff330da85305e32d152711", "score": "0.49358174", "text": "def detectability_calculator(Theta_CDF,min_CDF,max_CDF,SNR_in):\n \n #input type checking\n assert type(min_CDF) == float, 'min_CDF should be a float.'\n assert type(max_CDF) == float, 'max_CDF should be a float.'\n assert type(SNR_in) == float, 'SNR_in should be a float.'\n \n if SNR_in <= (8 / max_CDF):\n det = 0 #always undetectable\n #set separately so Theta_CDF doesn't get input above 1\n #the max_CDF (just below 1) has to be included because the function\n #Theta_dist is not defined for the small range between max_CDF and 1\n #and this can occasionally cause problems if just 8 is used\n elif SNR_in >= (8 / min_CDF):\n det = 1 #always detectable\n #similarly, Theta_CDF is not defined between 0 and min_CDF\n else:\n threshold = 8 / SNR_in\n #how much the SNR would need to be lowered to hit the threshold SNR=8\n undet = Theta_CDF(threshold)\n #the proportion of Theta values that would lower SNR below 8\n det = 1 - undet #proportion that would keep SNR above 8\n \n #output type conversion\n det = float(det)\n \n return det", "title": "" }, { "docid": "20f342a41fcc353b8e6e6b2622f6f4db", "score": "0.4930901", "text": "def crowded2(t, data):\n cell_score = 0\n cell_score += n_check(t, data)\n cell_score += s_check(t, data)\n cell_score += e_check(t, data)\n cell_score += w_check(t, data)\n cell_score += ne_check(t, data)\n cell_score += nw_check(t, data)\n cell_score += sw_check(t, data)\n cell_score += se_check(t, data)\n if cell_score >= 5:\n return True\n return False", "title": "" }, { "docid": "76d4ad7012679e0d0f613dbea0d73401", "score": "0.4929446", "text": "def too_many_significant(ts,len_data,criticalVal,acfVals,upper_significance,lower_significance,numSignificantAllowed=5): \n num_significant=0\n #determine how many points are significant based on upper and lower bound levels\n for i in range(0, len(acfVals)):\n if acfVals[i]>upper_significance[i] or acfVals[i]<lower_significance[i]:\n num_significant+=1\n \n return num_significant > numSignificantAllowed", "title": "" }, { "docid": "2eb74c9f4d717d40c19afb0e7d15d8ea", "score": "0.49262944", "text": "def gtd_logic(df, G, label_column, column_prefix, progress=True):\n\n # Create a collection of shortest Paths from the root node (VRN) to each of the leave nodes\n\n P = find_shortest_paths(G, progress=progress)\n\n # Calculate the Gain Ratio for each feature in the data (therby for each node in the Graph)\n\n gr_values = calc_gr(df, label_column, progress=progress)\n\n SF = set()\n\n # Create a dictionary to keep track of the availability of nodes\n\n node_availability = {}\n\n for node in list(G.nodes()):\n\n node_availability[node] = True\n\n if progress:\n iterator = tqdm(\n P, desc=\"Greedy Top Down - (3/3) Finding most relevant nodes.\")\n else:\n iterator = P\n\n for path in iterator:\n\n # Get candidate_nodes for that path. Candidate Nodes are nodes that are part of the path AND are still available\n\n candidate_nodes = [\n node for node in path if node_availability[node] == True]\n\n # Check if there are any available nodes in the path.\n\n if len(candidate_nodes) > 0:\n\n # Get the Candidate Node with the highest Gain Ratio\n\n max_node = get_max_node(candidate_nodes, gr_values, column_prefix)\n\n # Add that Node to the Selected Features Set (SF)\n\n SF.add(max_node)\n\n # Change the availability of that node, it's ancestors and it's descendants to False\n\n node_availability[max_node] = False\n\n for ancestor in nx.ancestors(G, max_node):\n\n node_availability[ancestor] = False\n\n for decendant in nx.descendants(G, max_node):\n\n node_availability[decendant] = False\n\n # Check if there are any available nodes left. If not, end the algorithm.\n\n if all(availability == False for availability in node_availability.values()):\n\n break\n\n return SF", "title": "" }, { "docid": "2baa67545bb3503fadc4575b425e5b58", "score": "0.4923733", "text": "def q7(self, infile):\n\t\treturn 0.1", "title": "" }, { "docid": "922def58e2ccb9565bb1d0dbc91069d1", "score": "0.4921992", "text": "def percent_heterozygous(self):\n return (self.genotypes==1).sum() / self.size", "title": "" }, { "docid": "076ab58fe92feca3846916edf0dc751e", "score": "0.4913728", "text": "def column_based_infogain_right(df):\n\n if entype == 'bag':\n individual_entropy_list_right = []\n for each_column in df.columns:\n if each_column != 'class' and each_column != 'bruises?-no':\n df_right = df[[each_column, \"class\"]][df[each_column] == 1].groupby(\"class\").count()\n df_right_sum = df_right.sum()\n entropy = 0.0\n outer_dist = df_right_sum[each_column] / len(df)\n for each_index in df_right.index:\n inner_element = (df_right.loc[each_index, each_column] / df_right_sum[each_column])\n entropy += inner_element * np.log2(inner_element)\n final_entropy = -entropy * outer_dist\n individual_entropy_list_right.append(final_entropy)\n return individual_entropy_list_right\n\n else:\n right_df = df\n header_list = get_column_list(right_df)\n individual_entropy_list_right = []\n for each_column in header_list:\n if each_column != 'class' and each_column != 'weight':\n right_child_entropy = []\n right_temp_df = pd.DataFrame(right_df[[each_column, \"class\", 'weight']][right_df[each_column] == 1],\n columns=[each_column, 'class', 'weight'])\n right_child_entropy.append(round(right_temp_df.loc[right_temp_df['class'] == 1, 'weight'].sum(), 2))\n right_child_entropy.append(round(right_temp_df.loc[right_temp_df['class'] == -1, 'weight'].sum(), 2))\n individual_entropy_list_right.append(entropy_calculation(right_child_entropy, right_df))\n return individual_entropy_list_right", "title": "" }, { "docid": "9bb4b4560a0684e56b16123ee244524f", "score": "0.49126238", "text": "def observed_class_distribution_is_pure(self):\n count = 0\n for _, weight in self._stats.items():\n if weight != 0:\n count += 1\n if count == 2: # No need to count beyond this point\n break\n return count < 2", "title": "" }, { "docid": "26926a84fc6132413beef3d47ac5e74a", "score": "0.4904977", "text": "def is_on(self) -> bool:\n func_channel = self._device.functionalChannels[self._channel]\n return func_channel.dimLevel is not None and func_channel.dimLevel > 0.0", "title": "" }, { "docid": "f1262293adbbb02348947abb2dc8207d", "score": "0.4897986", "text": "def omni(self):\r\n\t\treturn Sstats.normaltest(self.e)", "title": "" }, { "docid": "0f8e42511507492255de2e4ecd9dfa9a", "score": "0.48942423", "text": "def migrate(self):\r\n\r\n if sl.random() < self.params['mu'] * self.fitness():\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "07a58eefa3687e1c9710d2a8425a2e8b", "score": "0.4888489", "text": "def alifestd_has_multiple_roots(phylogeny_df: pd.DataFrame) -> bool:\n return (\n (phylogeny_df[\"ancestor_list\"] == \"[]\").sum()\n + (phylogeny_df[\"ancestor_list\"] == \"[NONE]\").sum()\n + (phylogeny_df[\"ancestor_list\"] == \"[none]\").sum()\n + (phylogeny_df[\"ancestor_list\"] == \"[None]\").sum()\n ) >= 2", "title": "" }, { "docid": "def834de491b51ef8e25d28b20cfb44d", "score": "0.48781985", "text": "def is_train_sample(df):\n entry_idx = df.index.names.index(\"entry\")\n is_odd = (df.index.get_level_values(entry_idx) % 2).to_numpy(dtype=bool)\n is_train = is_odd\n return is_train", "title": "" }, { "docid": "9970eba9ee6e5787493ea3e526abd2bf", "score": "0.48741898", "text": "def keep_going(sample,d=1,k=31):\n log(sample)\n if (statistics.stdev(sample)/math.sqrt(k))>= d:\n return True\n return False", "title": "" }, { "docid": "a615b6db2e3aec6716df3a141b88fdf0", "score": "0.48723033", "text": "def check_pos_def(self):\n return np.all(np.linalg.eigvals(self.A) > 0)", "title": "" }, { "docid": "43949de78d4c811f75ebc917618015f8", "score": "0.4871751", "text": "def test_logistic_sigmoid_derivative(self):\n f = self.logistic_sigmoid\n f_prime = self.logistic_sigmoid_derivative\n for x in self.test_points:\n self.assertAlmostEqual(f_prime(x),\n (f(x) * (1 - f(x))) / self.std_dev,\n msg=\"Logistic sigmoid satisfies f' = f(1-f)/sigma\")", "title": "" }, { "docid": "c5d105f66cc93024db4eb9213e2ca103", "score": "0.48715493", "text": "def is_normal(r, level=0.01):\n statistic, p_value = scipy.stats.jarque_bera(r)\n return p_value > level", "title": "" }, { "docid": "9bcc4e69a233a6c320c4fdc4a16164fe", "score": "0.48659343", "text": "def p_value(self):\n return 0.5 * chi2.sf(self.ts, 1)", "title": "" }, { "docid": "fb49264abd56def98310b7fee34412ef", "score": "0.4865289", "text": "def call_significant_sites(fg_counts, bg_counts, max_pvalue):\n print fg_counts.max(), fg_counts.min(), bg_counts\n return poisson.sf(fg_counts, bg_counts) < max_pvalue", "title": "" }, { "docid": "1c939c86348d25341fb2efedb64f7630", "score": "0.4860694", "text": "def column_based_infogain_left(df):\n\n if entype == \"bag\":\n\n individual_entropy_list_left = []\n for each_column in df.columns:\n if each_column != 'class' and each_column != 'bruises?-no':\n df_left = df[[each_column, \"class\"]][df[each_column] == 0].groupby(\"class\").count()\n df_left_sum = df_left.sum()\n entropy = 0.0\n outer_dist = df_left_sum[each_column] / len(df)\n for each_index in df_left.index:\n inner_element = (df_left.loc[each_index, each_column] / df_left_sum[each_column])\n entropy += inner_element * np.log2(inner_element)\n final_entropy = -entropy * outer_dist\n individual_entropy_list_left.append(final_entropy)\n return individual_entropy_list_left\n\n else:\n\n left_df = df\n header_list = get_column_list(left_df)\n individual_entropy_list_left = []\n for each_column in header_list:\n if each_column != 'class' and each_column != 'weight':\n left_child_entropy = []\n left_temp_df = pd.DataFrame(left_df[[each_column, \"class\", 'weight']][left_df[each_column] == 0],\n columns=[each_column, 'class', 'weight'])\n left_child_entropy.append(round(left_temp_df.loc[left_temp_df['class'] == 1, 'weight'].sum(), 2))\n left_child_entropy.append(round(left_temp_df.loc[left_temp_df['class'] == -1, 'weight'].sum(), 2))\n individual_entropy_list_left.append(entropy_calculation(left_child_entropy, left_df))\n return individual_entropy_list_left", "title": "" }, { "docid": "4bf5748a30421b620cf49808b20477b3", "score": "0.4857367", "text": "def is_converged(self):\n prob = np.array(self._prob.values())\n return np.all(np.logical_or(prob<THRESHOLD_LOWER_BOUND, prob>THRESHOLD_UPPER_BOUND))", "title": "" }, { "docid": "ff0aa15b762290ea7d30469e2fa573c5", "score": "0.4856261", "text": "def little_variation(M,m,p):\r\n return ( M*(1-float(p)/100) < m < M*(1+float(p)/100) )", "title": "" }, { "docid": "bab722b9d5d3d54dfb4e3df6a8ed5327", "score": "0.48550674", "text": "def decision(probability):\n\treturn random.random() > probability", "title": "" }, { "docid": "bc67082adc2ad74b8b71f37f6e98621c", "score": "0.48462313", "text": "def check_convergence(metrics):\n print(\"###### Evaluating sample size convergence ######\")\n trained = False\n direction = convergence_operator()\n threshold = settings[\"data\"][\"convergence_limit\"]\n \n if settings[\"data\"][\"convergence_relative\"]:\n window_size = 1 ###########\n if len(metrics) < 2:\n return False\n else:\n diff = np.diff(metrics)\n\n if direction(np.mean(diff[-window_size:]),0):\n if abs(np.mean(diff[-window_size:])) < threshold:\n trained = True\n else:\n if direction(metrics[-1],threshold):\n trained = True\n\n print(f\"Sample size convergence metric: {settings['data']['convergence']} - {metrics[-1]}\")\n\n return trained", "title": "" }, { "docid": "8ca3e27b67167e7f2666dab16fd5a798", "score": "0.48436168", "text": "def isSimilarTo(self, other):\n cython.declare(T=cython.double)\n cython.declare(Tdata=list)\n \n Tdata = [300,400,500,600,800,1000,1500,2000]\n for T in Tdata:\n if not (0.8 < self.getHeatCapacity(T) / other.getHeatCapacity(T) < 1.25):\n return False\n elif not (0.8 < self.getEnthalpy(T) / other.getEnthalpy(T) < 1.25):\n return False\n elif not (0.8 < self.getEntropy(T) / other.getEntropy(T) < 1.25):\n return False\n elif not (0.8 < self.getFreeEnergy(T) / other.getFreeEnergy(T) < 1.25):\n return False\n\n return True", "title": "" }, { "docid": "21af97891471a69dd66f8031370dca12", "score": "0.48369926", "text": "def deepCheckWeight( file ):\n from math import isnan\n from RootTools.core.Sample import Sample\n\n # convert dpm file pathes\n sample = Sample.fromFiles(name=\"sample\", treeName=\"Events\", files=file)\n # check for branch:\n l = sample.chain.GetListOfBranches()\n if not 'weight' in [ l.At(i).GetName() for i in range(l.GetSize()) ]:\n return 0\n val = sample.getYieldFromDraw(weightString=\"weight\" )['val']\n del sample\n #logger.debug(\"Val in deepCheckWeight: %r\", val) \n return not isnan(val)", "title": "" }, { "docid": "262e2256c8c79645ca80e4f534171a72", "score": "0.48350605", "text": "def revised_szeged_index(self):\n if not self.is_connected():\n return False\n s = 0.0\n D = self.distance_matrix()\n for u, v in self.edges():\n diff = D[u, :] - D[v, :]\n o = (diff == 0).sum()\n s += ((diff > 0).sum() + .5 * o) * ((diff < 0).sum() + .5 * o)\n return s", "title": "" }, { "docid": "141a0ee3c8bacdd041380cc8cf78e683", "score": "0.48326996", "text": "def eccenbin(df, thresh):\n col = 'pl_highecc'\n df[col] = None\n for i, row in df.iterrows():\n if row.pl_orbeccen + row.pl_orbeccenerr1 < thresh:\n df.ix[i,col] = False\n if row.pl_orbeccen + row.pl_orbeccenerr2 > thresh:\n df.ix[i,col] = True\n return df", "title": "" }, { "docid": "631de088809027f2138a2345a5cd0de0", "score": "0.48291457", "text": "def validate(self, s):\r\n nodes = [i for i, a in enumerate(s) if\r\n a != cf.SLEEP and self.network.get_node(i).energy >= (cf.SENSING_ENERGY +cf.COMMUNICATION_ENERGY)] # get list of the active nodes\r\n\r\n for t in range(cf.NUM_TARGETS):\r\n no_sense_prob = 0\r\n for n in nodes:\r\n no_sense_prob += self.sensing_log_matrix[t][n]\r\n if no_sense_prob >= self.sensing_log_threshold:\r\n break\r\n\r\n if no_sense_prob < self.sensing_log_threshold:\r\n return False\r\n\r\n return True", "title": "" }, { "docid": "5da2b496988fd6be3bc9e8a543ce23bf", "score": "0.48289847", "text": "def isNormal(self):\n for A in self.subsets:\n if self.function(A) < 0:\n return False\n return True", "title": "" }, { "docid": "39932d3df9d7d30eb30db4223c906b83", "score": "0.48279193", "text": "def test_Cacciato09Cens7():\n model = Cacciato09Cens(threshold=9.5)\n lum_mc = model.mc_prim_galprop(prim_haloprop=np.ones(int(1e5)) * 5e11, seed=1)\n assert np.all(lum_mc >= 10 ** model.threshold)\n\n # Check that luminosities follow the expected distribution.\n def cdf(lum):\n return np.array(\n [\n (\n model.mean_occupation(prim_haloprop=5e11)\n - model.mean_occupation(prim_haloprop=5e11, prim_galprop_min=l)\n )\n / model.mean_occupation(prim_haloprop=5e11)\n for l in lum\n ]\n )\n\n p_value = kstest(lum_mc, cdf)[1]\n\n assert p_value > 0.001", "title": "" }, { "docid": "43cc7959fef16c09edae3765d2865a57", "score": "0.48257014", "text": "def sconfitto(self):\n return self.vita <= 0", "title": "" }, { "docid": "3ca71d30d2546f4d73416375191400e6", "score": "0.48147565", "text": "def prob52():", "title": "" }, { "docid": "cea9e25bd2ef57be3a2c3708ab493ad9", "score": "0.4813539", "text": "def _likelihood():\n return True", "title": "" }, { "docid": "48faa7192845e3219bee90823c958449", "score": "0.4811759", "text": "def instance_assigned_simulations(self):\n if not isinstance(self.parentnode, M4_16):\n if any(self.parentnode.fpga_slots):\n return True\n return False", "title": "" }, { "docid": "79c69108d2758db2cec36bf7b66e975c", "score": "0.4807606", "text": "def do_a_decision(the_random_number):\n if the_random_number <= 5.0:\n return 'small decision'\n else:\n return 'big decision'", "title": "" }, { "docid": "f1a377cf80837ddfc35f2f9828f7f0ef", "score": "0.48062202", "text": "def check_is_gvh5074(self) -> bool:\n return self._mfg_data_check(9, 6)", "title": "" }, { "docid": "26c3b26971b975055f96264c569a0beb", "score": "0.48012188", "text": "def activate(self, values):\n\n # First calculate the strength with which the perceptron fires\n strength = np.dot(values, self.weights)\n\n # Then return 0 or 1 depending on strength compared to threshold\n return int(strength > self.threshold)", "title": "" }, { "docid": "27897bd2cadb1632642978bb57693c87", "score": "0.48001188", "text": "def is_normal(r, level=0.01):\n if isinstance(r, pd.DataFrame):\n return r.aggregate(is_normal)\n else:\n statistic, p_value = scipy.stats.jarque_bera(r)\n return p_value > level", "title": "" } ]
0e10ce96f1b52e7bb623b034794ee907
Get the primary database driver.
[ { "docid": "287097578a00ab75ea8ce1b57de8262b", "score": "0.8098757", "text": "def db_driver(self) -> DbDriver:\n\n primary_db_driver = None\n\n # noinspection PyUnusedLocal\n def filter_db_drivers(service_id, service, config):\n nonlocal primary_db_driver\n if not isinstance(service, DbDriver):\n return False\n if config.get('primary', False):\n if primary_db_driver is not None:\n raise RuntimeError('There can only be a single primary database driver')\n primary_db_driver = service\n return True\n\n drivers = self._db_drivers.find_services(service_filter=filter_db_drivers)\n if primary_db_driver is None:\n if len(drivers) == 1:\n primary_db_driver = drivers[0]\n elif len(drivers) == 0:\n raise RuntimeError('No database driver found')\n else:\n raise RuntimeError('With multiple database drivers, one must be configured to be the primary one')\n\n # noinspection PyTypeChecker\n return primary_db_driver", "title": "" } ]
[ { "docid": "ea9c2f86bbbbb9465beea3207fedac07", "score": "0.7569865", "text": "def driver(self):\n return self._c.driver", "title": "" }, { "docid": "00e26464acdf461fbf5fae34c90c48af", "score": "0.7451214", "text": "def _get_driver(self):\n if self._config and \"driver\" in self._config and \\\n \"name\" in self._config[\"driver\"]:\n return self._config[\"driver\"][\"name\"]\n return None", "title": "" }, { "docid": "b826cb94f0ed30aba10f9feb5ec471bc", "score": "0.7443861", "text": "def driver(self):\n return self._dialect.driver", "title": "" }, { "docid": "d3b559d3029683909163d6bf0b7df8b1", "score": "0.7383822", "text": "def _get_dbdriver_instance():\n if CONF.db_type == 'sql':\n return IMPL\n elif CONF.db_type == 'etcd':\n import zun.db.etcd.api as etcd_api\n return etcd_api.get_connection()\n else:\n raise exception.ConfigInvalid(\n _(\"db_type value of %s is invalid, \"\n \"must be sql or etcd\") % CONF.db_type)", "title": "" }, { "docid": "4d2a775aff21e4422057c3c5da7ae8b1", "score": "0.7305779", "text": "def driver(self) -> str:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "4d2a775aff21e4422057c3c5da7ae8b1", "score": "0.7305779", "text": "def driver(self) -> str:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "4d2a775aff21e4422057c3c5da7ae8b1", "score": "0.7305779", "text": "def driver(self) -> str:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "4d2a775aff21e4422057c3c5da7ae8b1", "score": "0.7305779", "text": "def driver(self) -> str:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "c9a3c6f85770664aa32b0eec808123ae", "score": "0.7287726", "text": "def driver(self) -> str:\n return self.__driver", "title": "" }, { "docid": "6ce23da40f54c7aabd97ffcba11073c7", "score": "0.71992856", "text": "def driver(self):\n\t\tif self.dev:\n\t\t\treturn Driver(pyf.fp_dev_get_driver(self.dev))\n\t\tif self.dscv:\n\t\t\treturn Driver(pyf.fp_dscv_dev_get_driver(self.dscv))", "title": "" }, { "docid": "61eb3dbadc66fefcee473a3acba55c01", "score": "0.71473116", "text": "def driver_id(self):\n\t\treturn pyf.fp_driver_get_driver_id(self.drv)", "title": "" }, { "docid": "099e179c479331282caf566dcce39a2a", "score": "0.6994633", "text": "def get_driver(self):\n if self.plaid is not None:\n driver = self.plaid.drivers\n elif IS_MACOS:\n driver = pynvx.cudaSystemGetDriverVersion(ignore=True)\n else:\n try:\n driver = pynvml.nvmlSystemGetDriverVersion().decode(\"utf-8\")\n except pynvml.NVMLError:\n driver = \"No Nvidia driver found\"\n if self.logger:\n self.logger.debug(\"GPU Driver: %s\", driver)\n return driver", "title": "" }, { "docid": "460279b03197b4aded83a2dda7a82ab9", "score": "0.6889848", "text": "def driver_id(self):\n return self._driver_id", "title": "" }, { "docid": "460279b03197b4aded83a2dda7a82ab9", "score": "0.6889848", "text": "def driver_id(self):\n return self._driver_id", "title": "" }, { "docid": "fc264c5da0fb911c4f1af47378da7587", "score": "0.6871765", "text": "def driver_protocol(self):\n return self._driver_protocol", "title": "" }, { "docid": "9ea33acf37bea67703d3d70b1b16b034", "score": "0.67590284", "text": "def get_driver(conf):\n return _get_driver(conf.storage.driver,\n conf.storage)", "title": "" }, { "docid": "9fd7baca2772dca05a4ad15202fac1c7", "score": "0.6708061", "text": "def driver(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "9fd7baca2772dca05a4ad15202fac1c7", "score": "0.6708061", "text": "def driver(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "9fd7baca2772dca05a4ad15202fac1c7", "score": "0.6708061", "text": "def driver(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "9fd7baca2772dca05a4ad15202fac1c7", "score": "0.6708061", "text": "def driver(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "313baa47fc4a233ccaef980cf1fc9979", "score": "0.66667956", "text": "def get_db():\n return DatabaseServer.get_instance()", "title": "" }, { "docid": "b077c475de5d840bc19797a77b29aac7", "score": "0.6631761", "text": "def get_backend_driver(self):\n if self.backend_driver is None:\n context = get_application_context()\n\n if context == CONTEXT_NORMAL:\n driver = TestBackendDriver\n elif context == CONTEXT_TEST:\n driver = TestBackendDriver\n else:\n raise RuntimeError(\"unknonw application context: %s\" % context)\n\n self.backend_driver = driver\n\n return self.backend_driver", "title": "" }, { "docid": "fe96f4d8ac3006493fdce764d97789a2", "score": "0.6545788", "text": "def get_default_engine():\n\n return get_component(DatabasePackage.COMPONENT_NAME).get_default_engine()", "title": "" }, { "docid": "1f8977be62ab87aae144f765fa556a0d", "score": "0.65122044", "text": "def get_driver():\n pass", "title": "" }, { "docid": "fcd1d36106cc80f0829044b41f4aca49", "score": "0.6484046", "text": "def driver(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "fcd1d36106cc80f0829044b41f4aca49", "score": "0.6484046", "text": "def driver(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "fcd1d36106cc80f0829044b41f4aca49", "score": "0.6484046", "text": "def driver(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "fcd1d36106cc80f0829044b41f4aca49", "score": "0.6484046", "text": "def driver(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"driver\")", "title": "" }, { "docid": "26d9ca1035448806024b894d36c6749e", "score": "0.64766526", "text": "def _get_driver(name, conf):\n d = driver.DriverManager('gnocchi.storage',\n name).driver\n return d(conf)", "title": "" }, { "docid": "4bcaddbc686ed78ab0567a9280989a6a", "score": "0.64453536", "text": "def driver_version(self):\n return self._driver_version", "title": "" }, { "docid": "fde50d6509d43221f0acd754b7ecdaac", "score": "0.6436149", "text": "def default_database(self) -> Optional[str]:\n return pulumi.get(self, \"default_database\")", "title": "" }, { "docid": "fde50d6509d43221f0acd754b7ecdaac", "score": "0.6436149", "text": "def default_database(self) -> Optional[str]:\n return pulumi.get(self, \"default_database\")", "title": "" }, { "docid": "f327b0818c9a5c9f7a7fb0b85da396e8", "score": "0.64345896", "text": "def driver_vendor(self):\n return self._driver_vendor", "title": "" }, { "docid": "76f7f45ccd9c16b7d23b9289bcc69fdc", "score": "0.64014137", "text": "def db_engine(self):\n if self.__connection is not None:\n return self.__db_name, self.__connection.version\n else:\n print(\"Not connected\")", "title": "" }, { "docid": "e7deb58f9bdab2a972269f126cfc3c94", "score": "0.63967466", "text": "def get_db():\n if not hasattr(g, 'oracle_db'):\n g.oracle_db = connect_db()\n return g.oracle_db", "title": "" }, { "docid": "375a5cef41b15eef2192ac2df8c67a3a", "score": "0.63757753", "text": "def get_db_name():\n\tconfig = SafeConfigParser()\n\tconfig.read('config.ini')\n\tdb = config.get('Mongo_Config', 'database')\n\treturn db", "title": "" }, { "docid": "48ec4937c08e08712b544de929038494", "score": "0.6345324", "text": "def driver_id(self):\n\t\tif self.data_ptr:\n\t\t\treturn pyf.fp_print_data_get_driver_id(self.data_ptr)\n\t\telif self.dscv_ptr:\n\t\t\treturn pyf.fp_dscv_print_get_driver_id(self.dscv_ptr)\n\t\traise \"no print\"", "title": "" }, { "docid": "185da7a87920d3a9bbfc8d501d683351", "score": "0.6331698", "text": "def get_connection(self):\n return self._db", "title": "" }, { "docid": "fbfd73bf5ce96ee551fd76117b708692", "score": "0.63186634", "text": "def get_driver(backend):\n host, port = get_neo4j_host_and_port()\n scheme = \"bolt://%s:%d\" % (host, port)\n return Driver(backend, scheme, get_authorization())", "title": "" }, { "docid": "a06f60d16dfafbb82597f4ce753ac8bf", "score": "0.6311597", "text": "def get_driver(cls, name):\r\n\r\n LOG.debug('Looking for driver %s in %s', name, cls.__plugin_ns__)\r\n\r\n mgr = driver.DriverManager(cls.__plugin_ns__, name)\r\n\r\n return mgr.driver", "title": "" }, { "docid": "88825e67a67039d31a776941f613cf69", "score": "0.63104516", "text": "def get_db():\n\n return db_wrapper.database", "title": "" }, { "docid": "b324045365417bd6c28e4f3d33da5245", "score": "0.6293812", "text": "def get_db():\n\tif not hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db = connect_db()\n\n\treturn g.sqlite_db", "title": "" }, { "docid": "819edf44e055902535c91115abcd34c9", "score": "0.62842745", "text": "def get_connection(self):\n return self._db_connection", "title": "" }, { "docid": "6b7a4d5a7903c237f33675483627c924", "score": "0.62807256", "text": "def get_db():\n if not hasattr(g, \"sqlite_db\"):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "42a923c024478558ce79f1773ad39b2f", "score": "0.6277844", "text": "def database(self) -> str:\n return pulumi.get(self, \"database\")", "title": "" }, { "docid": "42a923c024478558ce79f1773ad39b2f", "score": "0.6277844", "text": "def database(self) -> str:\n return pulumi.get(self, \"database\")", "title": "" }, { "docid": "5d1fc753492278446ac1e3ed211f0f7d", "score": "0.62718475", "text": "def getdb(self):\n return self._database", "title": "" }, { "docid": "9b08fecb3d74bdf9fbf7f400fdff4c75", "score": "0.62621504", "text": "def get_db():\r\n if not hasattr(g, 'sqlite_db'):\r\n g.sqlite_db = connect_db()\r\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "9707f4a0631a0b305b746ea7c9346b40", "score": "0.6261481", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "930d7ae3738ea14ac11e56ffde65f641", "score": "0.6240257", "text": "def getdriver(self):\n idn = self.idn()\n if not idn:\n logger.warning(\"IDN not found %s.\")\n return\n vendor = self.__namecleanup(idn[0])\n model = self.__namecleanup(idn[1])\n self.vendormodel = vendor + model\n return self.vendormodel", "title": "" }, { "docid": "7ddabf3e66c156791d9ee8b574f88ed7", "score": "0.623206", "text": "def get_db():\n\tif not hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db = connect_db()\n\treturn g.sqlite_db", "title": "" }, { "docid": "a72d16d940dd7c8d83d383602b67d820", "score": "0.6231732", "text": "def backend(self):\n return self.provider.get_backend(self.backend_name)", "title": "" }, { "docid": "fe16e72d56ad3ed1ccf3dbcdc5105d5d", "score": "0.6230065", "text": "def get_database(self):\n if self.get('dbtype') == 'sqlite':\n return pw.SqliteDatabase(self.get('dbname'),\n pragmas={'journal_mode': 'wal',\n 'cache_size': -1 * 64000})\n else:\n if self.get('username') is None or self.get('password') is None:\n raise AttributeError('[ERROR] dbtype %s requires username and'\n ' password.\\n' % str(self.get('dbtype')))\n if self.get('hostname') is None:\n self.set('hostname', 'localhost')\n if self.get('dbtype') == 'mysql':\n if self.get('port') is None or self.get('port') == '':\n self.set('port', str(3306))\n return pw.MySQLDatabase(\n self.get('dbname'),\n user=self.get('username'),\n password=self.get('password'),\n host=self.get('hostname'),\n port=int(self.get('port')))\n elif self.get('dbtype') == 'postgres':\n if self.get('port') is None or self.get('port') == '':\n self.set('port', str(5432))\n return pw.PostgresqlDatabase(\n self.get('dbname'),\n user=self.get('username'),\n password=self.get('password'),\n host=self.get('hostname'),\n port=int(self.get('port')))", "title": "" }, { "docid": "47570e6202f938364da72d5505982373", "score": "0.6204253", "text": "def get_default_database_name():\n\n return get_component(DatabasePackage.COMPONENT_NAME).get_default_database_name()", "title": "" }, { "docid": "96e855f6a1253725e79bf5540b9a4070", "score": "0.61888283", "text": "def get_database(self, database=None):\n if not self.connection:\n raise ConnectionFailure('No connection')\n if not database:\n if not self._database:\n raise Exception('No database submitted')\n database = self._database\n return self.connection[database]", "title": "" }, { "docid": "8caa5ad96118eb4b45416c937cf6044c", "score": "0.6181815", "text": "def database_name(self):\n return self.__db", "title": "" }, { "docid": "f60e77c828d268907e32bb29dd75b701", "score": "0.616323", "text": "def get_database(self):\n\n return self.__database", "title": "" }, { "docid": "981bc7087205722b9b2c024e67037393", "score": "0.6155437", "text": "def dbname(self):\n\n if self.isolate_db:\n return get_random_identifier(\n max_len=MAX_IDENTIFIER_LEN,\n prefix_str=DbSession.DB_PREFIX_NAME\n )\n else:\n return self._base_conn_params['dbname']", "title": "" }, { "docid": "cbc8d0bf5c2e22c2681c2cabf39ae4e6", "score": "0.6142031", "text": "def get_database(self):\n return self.__c", "title": "" }, { "docid": "6cb31a1bd2d06a29de506194ee3c99e7", "score": "0.61184365", "text": "def db_name(self):\n if self.__connection is not None:\n return self.__db_name\n else:\n print(\"Not connected\")", "title": "" }, { "docid": "0a4680bb02cca30e7ffa42b1f229e458", "score": "0.6111468", "text": "def get_db():\n if not hasattr(g, 'pg_db'):\n g.pg_db = connect_db()\n return g.pg_db", "title": "" }, { "docid": "fe73dc58c6df927526f37f85c88dcf3a", "score": "0.6088333", "text": "def get_database(self, database=None):\n database = database if database !=None else self.database\n \n if self._database is None:\n conn = self.get_connection()\n db = conn[database]\n self._database = db\n \n return self._database", "title": "" }, { "docid": "e8a8e3595f5f19e15305c4161573d1cb", "score": "0.60825795", "text": "def getDatabaseName(self):\n return self._base.getDatabaseName()", "title": "" }, { "docid": "34a13446b9632ae323261b298f130474", "score": "0.6080003", "text": "def get_db():\n if not hasattr(g, 'sqlite_db'): #store db in a global variable 'g'\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "292564626349474c7eec3dfa443766de", "score": "0.60793155", "text": "def get_db():\n if 'app_db' not in g:\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "e1439839b958f651e44403a553f35cd2", "score": "0.60618937", "text": "def get_db_client():\n db, user, password, host = get_db_parameters()\n return psycopg2.connect(database=db, user=user, host=host, password=password)", "title": "" }, { "docid": "8adcb3f0dc5f64fa10bf010c80e2dea4", "score": "0.60596836", "text": "def db(self):\n if not self._database:\n if cfg.uri and 'replicaSet' in cfg.uri:\n conn = pymongo.MongoReplicaSetClient(cfg.uri)\n else:\n conn = pymongo.MongoClient(cfg.uri)\n\n self._database = conn[cfg.database]\n\n return self._database", "title": "" }, { "docid": "1c0602f968e269866160a0b6f8d5b658", "score": "0.60579604", "text": "def get_db():\r\n # top = _app_ctx_stack.top\r\n # if not hasattr(top, 'sqlite_db'):\r\n # top.sqlite_db = connect_db()\r\n # return top.sqlite_db\r\n if not hasattr(g, 'sqlite_db'):\r\n g.sqlite_db = connect_db()\r\n return g.sqlite_db", "title": "" }, { "docid": "67e1b128e7c4ecc63f1cf9de6ca95a3c", "score": "0.60555863", "text": "def dbname(self):\n return self._dbname", "title": "" }, { "docid": "67e1b128e7c4ecc63f1cf9de6ca95a3c", "score": "0.60555863", "text": "def dbname(self):\n return self._dbname", "title": "" }, { "docid": "daedd58f99d639df264379f95bc6e118", "score": "0.60529375", "text": "def sql_engine(self) -> str:\n return pulumi.get(self, \"sql_engine\")", "title": "" }, { "docid": "7cfdea3e647f215200e143568a04bff3", "score": "0.6052886", "text": "def get_db():\n ## todo: use connection pooling\n import psycopg2\n params = get_db_connection_params()\n return psycopg2.connect(**params)", "title": "" }, { "docid": "01ec1dedd6d8d723f7b08b6ece50e4e1", "score": "0.60479516", "text": "def get_db():\n\n #return globalDB\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "title": "" }, { "docid": "77ff73efddcfbcb35d499add2395163f", "score": "0.6044578", "text": "def get_db_host():\n if TEST_POSTGRESQL and not TEST_WITH_TLS:\n return PG_UNIX_HOST\n else:\n return DB_HOST", "title": "" } ]
c3b2cf2d9c71baccb00c198de43cf5af
Iterates columnwise over the marker alignment. Excludes samples.
[ { "docid": "300674607fabeb0d2f4d672f073bedbc", "score": "0.5351515", "text": "def iter_marker_sites(self, start=0, stop=None, size=1):\n if self.markers is None or self.markers.nrows == 0:\n return\n if stop is None:\n stop = self.nsites\n if (stop - start) % size != 0:\n raise ValueError('Alignment cannot be completely divided into '\n 'chucks of size {}'.format(size))\n for i in range(start, stop, size):\n yield [s[i:i+size] for s in self.markers.sequences]", "title": "" } ]
[ { "docid": "a0d44a244dde0b542e3a237cd14452b2", "score": "0.55822045", "text": "def others_in_column(self, row_, col_):\n for r in range(9):\n if r != row_:\n yield self[r, col_]", "title": "" }, { "docid": "5988acb9ea0c626347aba9bba02a519c", "score": "0.54252195", "text": "def iter_markers(self):\n for i in self.nmarkers:\n yield Record(\n self.markers.ids[i],\n self.markers.descriptions[i],\n self.markers.sequences[i],\n )", "title": "" }, { "docid": "f46c3bfebc4e391badd53d4010381023", "score": "0.52099943", "text": "def _get_me_the_result_by_looping_through_each_col(self, fkp, row, col, count_for_output, reference, output,\n alignment_result):\n # push same tokens to alignment_result (#3)\n # if it is not zero, which means the reference and the output are the same in this part.\n if fkp.get_n_match(n_row=row, n_col=col) > 0: # 3\n for i in range(fkp.get_n_match(n_row=row, n_col=col)):\n count_for_output -= 1\n self._alignment_add(reference[fkp.get_n(n_row=row, n_col=col)-1-i], output[count_for_output],\n alignment_result)\n # distinguish insert / del / sub according to #2\n # no matter if the third number is zero or not, we will have to look at the second number;\n return self._second_number(row, col, count_for_output, reference, output, fkp, alignment_result)", "title": "" }, { "docid": "eda9b179be47168f8af1ded2289e7306", "score": "0.513998", "text": "def _iter_offset(self):\n index = 0\n length = 1\n while True:\n for _ in range(2):\n for _l in range(length):\n yield [self._step * k for k in self._rotation[index]]\n index = (index + 1) % len(self._rotation)\n length += 1", "title": "" }, { "docid": "361f20cabe33898c81536afe79a1d3c3", "score": "0.508139", "text": "def iterate_across(padded_x, padded_y=None, batch_size=1):\n curr = 0\n data_len = len(padded_x[0])\n index_shuffler = list(range(data_len))\n random.shuffle(index_shuffler)\n while curr < data_len and curr < 1000:\n if padded_y is not None:\n yield [np.array([padded_x[i][j] \\\n for j in index_shuffler[curr:curr+batch_size]]) \\\n for i in range(len(padded_x))], \\\n [np.array([padded_y[i][j] \\\n for j in index_shuffler[curr:curr+batch_size]]) \\\n for i in range(len(padded_y))]\n else:\n yield [np.array([padded_x[i][j] \\\n for j in index_shuffler[curr:curr+batch_size]]) \\\n for i in range(len(padded_x))]\n curr += batch_size", "title": "" }, { "docid": "142e5a5f3fbf15888d339e113da700d9", "score": "0.5047755", "text": "def align():", "title": "" }, { "docid": "c44099a0e4d84015d6721334ebf9941d", "score": "0.4997019", "text": "def others_in_row(self, row_, col_):\n for c in range(9):\n if c != col_:\n yield self[row_, c]", "title": "" }, { "docid": "9c4b27d3bd6f934aabb3d79cc41acdc2", "score": "0.49119234", "text": "def get_markers(self) -> iter:\n for cm in self.mappings:\n yield cm.marker", "title": "" }, { "docid": "53ea1396a36f06157d874ab378077d78", "score": "0.4888976", "text": "def align_columns() -> Iterable[tuple[int, list[str]]]:\n for page, lines in extract_pages():\n # find a position that is blank in every line\n max_len = max(len(line) for line in lines)\n best_blank = -1\n for i in range(max_len):\n if not all(len(line) <= i or line[i] == \" \" for line in lines):\n continue\n num_lines = len([line for line in lines if len(line) > i])\n if num_lines < 10:\n continue\n best_blank = i\n assert best_blank != -1, f\"failed to find split for {page}\"\n first_column = [line[:best_blank].rstrip() for line in lines]\n second_column = [line[best_blank + 1 :].rstrip() for line in lines]\n yield page, first_column + second_column", "title": "" }, { "docid": "42422da18a8780865576df8848ca2049", "score": "0.48882017", "text": "def __iter__(self):\n for col in self._selection:\n yield self.df.icol(col)", "title": "" }, { "docid": "6366ee59f36eb689ab6ab9d20a09b0ed", "score": "0.48636395", "text": "def align(self, column, groups):\n raise NotImplementedError() # pragma: no cover", "title": "" }, { "docid": "04bef7898a1b96c617f1b85624da2194", "score": "0.48118094", "text": "def iter_rows(self):\n for i in self.nsamples:\n yield Record(\n self.samples.ids[i],\n self.samples.descriptions[i],\n self.samples.sequences[i],\n )\n for i in self.nmarkers:\n yield Record(\n self.markers.ids[i],\n self.markers.descriptions[i],\n self.markers.sequences[i],\n )", "title": "" }, { "docid": "46896471637ee32dc4dba4ef12c0c9ec", "score": "0.47870177", "text": "def alignment(path_muse, path_note, path_save, columns):\n files_muse = sorted(os.listdir(path_muse)) # read muse files\n files_notes = sorted(os.listdir(path_note)) # read annotation files\n\n for index in range(len(files_muse)): # for each file muse\n muse = pd.read_csv(path_muse + '/' + files_muse[index], index_col=False) # keep the file\n print(index)\n\n muse = muse[['TimeStamp'] + columns] # choose the columns\n if len(muse['TimeStamp'][0]) >= 12: # if there is the date\n muse['TimeStamp'] = list(map(lambda x: x[11:], muse['TimeStamp']))\n\n note = pd.read_csv(path_note + '/' + files_notes[index], index_col=False) #\n\n # fix tables\n first_check = note['check time'][1][:-3]\n note = note[2:-1]\n note['check time'] = list(map(lambda x: (x[:-3]), note['check time']))\n muse['TimeStamp'] = list(map(lambda x: str(x), muse['TimeStamp']))\n\n # fix dimension of csv muse\n print('Dataset size before cleaning samples:' + str(muse.shape))\n old_shape = muse.shape[0]\n muse = muse.dropna()\n muse = muse[(muse[columns] != 0).all(1)]\n print('Size of dataset after cleaning samples:' + str(muse.shape))\n print('Diference: ' + str(old_shape - muse.shape[0]))\n\n # muse csv must be start with the same time of note csv\n first_index = 0\n while pd.to_datetime(first_check) > pd.to_datetime(list(muse['TimeStamp'])[first_index]):\n first_index = first_index + 1\n print(first_index)\n muse = muse[first_index:]\n\n muse = create_new_muse_table(note, muse, first_check)\n\n muse.to_csv(path_save + \"/\" + str(index) + \"alignment.csv\", index=False)", "title": "" }, { "docid": "a2a0e6cc803539a0accb2a194bd02b8f", "score": "0.4769264", "text": "def iter_multialignments(self):\n sam_iter = iter(self)\n multialignment = [next(sam_iter)]\n for record in sam_iter:\n if record.qname == multialignment[0].qname: # is this expensive?\n multialignment.append(record)\n else:\n yield MultiAlignment(multialignment)\n multialignment = [record]\n yield MultiAlignment(multialignment)", "title": "" }, { "docid": "1fa56691dcc6453eacc6ba779f066afd", "score": "0.47459912", "text": "def remove_irrelevant_data_from_alignment(ali_plus_top_seq_plus_1):\n # Parse the input alignment.\n alignment = AlignIO.read(ali_plus_top_seq_plus_1, 'nexus')\n\n # Remove the original file.\n os.remove(ali_plus_top_seq_plus_1)\n\n # Remove all but the last two sequences from the alignment.\n alignment = alignment[-2:]\n\n\n # Remove positions in the alignment that have only gaps for both\n # sequences.\n\n # get a list of columns as strings in the original alignment.\n seq_len = alignment.get_alignment_length()\n columns = [alignment[:, col] for col in range(seq_len)] \n\n # Get a list of column indexes for columns that need to be removed.\n col_to_remove = []\n col_index = -1\n for col in columns:\n col_index += 1\n if col[0] == '-' and col[1] == '-':\n col_to_remove.append(col_index)\n\n \n #seq_len = alignment.get_alignment_length()\n #print(seq_len)\n #print(alignment[:,col_to_remove[0]])\n\n # Remove columns (positions) from alignment.\n num = 0\n for col_index in col_to_remove:\n num += 1\n adj_col_index = col_index - num\n alignment = alignment[:,:adj_col_index] + alignment[:,adj_col_index + 1:]\n #if col_index != (seq_len -1):\n # alignment = alignment[:,:adj_col_index] + alignment[:,adj_col_index + 1:]\n #else:\n # alignment = alignment[:,:adj_col_index]\n\n #seq_len = alignment.get_alignment_length()\n #print(seq_len)\n #print(alignment[:,col_to_remove[0]])\n\n # Write modified alignment to the same filepath as the original.\n AlignIO.write(alignment, ali_plus_top_seq_plus_1, 'nexus')", "title": "" }, { "docid": "90651807ce66f66bdf7c3a2fe35d9007", "score": "0.46938723", "text": "def _test_shifted(sequences):\n\n for i in range(1, len(sequences)):\n\n if sequences[i - 1][:-1].sum() == 0:\n # Change of user, all columns but one\n # are padding.\n continue\n\n assert np.all(sequences[i][1:] == sequences[i - 1][:-1])", "title": "" }, { "docid": "d3a03e5696aa7c858b18a732b64d26cd", "score": "0.4686958", "text": "def column(self, col_):\n for row in self.raw:\n yield row[col_]", "title": "" }, { "docid": "77b91c3138c977e9a0b69c8d144b7bc1", "score": "0.46589667", "text": "def _iterate_over_counter_columns(self, func):\n col_count = 0\n for row_index in xrange(self._params['num_counter_rows']):\n row_key = self._generate_row_key(row_index)\n for col_index in xrange(self._params['num_cols']):\n col_name = self._generate_col_name(col_index)\n func(row_key, col_name)\n col_count += 1\n debug(\"iterated over %d counter columns\" % col_count)", "title": "" }, { "docid": "ff8e979417a0fcf1739b14f6ea9f1830", "score": "0.4653444", "text": "def iterate_over_spatial(self):\n data = self[:]\n\n def calc_slice_idx(idx):\n slice_obj = list(idx[:3]) + [slice(None), ] * (data.ndim - 3)\n return tuple(slice_obj)\n\n for idx in np.ndindex(data.shape[:3]):\n yield self[idx], calc_slice_idx(idx)", "title": "" }, { "docid": "eea3e376a73398e4ab00914b35642bc1", "score": "0.46411833", "text": "def refine_all_alignment(original, limit=4):\n new_alignment = []\n for i, item in enumerate(original):\n new_alignment.append(refine_individual_alignment(original, i, limit = limit))\n return new_alignment", "title": "" }, { "docid": "7a6246840c523f1a633321e65830459c", "score": "0.4640228", "text": "def _align_columns(tab_stop_array):\r\n i = 0\r\n while i < len(tab_stop_array[0]):\r\n # Start with leftmost column and work right\r\n # Grab initial tab positions for this column\r\n column = sorted([row[i] for row in tab_stop_array])\r\n # To figure out if a given tab position exceeds MAX_OFFSET, we have to get an accurate value for min_tab first.\r\n # The logic is simplest if we just sort the list.\r\n min_tab = None\r\n max_tab = None\r\n\r\n # Figure out the left and rightmost tabs\r\n for tab in column:\r\n if tab is None:\r\n continue\r\n if min_tab is None or tab < min_tab:\r\n min_tab = tab\r\n if (max_tab is None or tab > max_tab) and tab - \\\r\n min_tab <= MAX_OFFSET:\r\n max_tab = tab\r\n\r\n # Recalculate the rest of the tab stops after offsetting one to align\r\n # with the rest\r\n for j, row in enumerate(tab_stop_array):\r\n aligned_row = row[:i]\r\n original_tab = row[i]\r\n offset = None if original_tab is None else max_tab - original_tab\r\n if offset is None:\r\n # This column is not being used on this row. Leave as None, and\r\n # don't change remaining columns.\r\n aligned_row += [None]\r\n aligned_row += row[i + 1:]\r\n elif offset < 0:\r\n # The field to the left was much wider than the other fields in that column. Shift the columns to the\r\n # right and insert an empty column\r\n aligned_row += [None]\r\n aligned_row += [original_tab]\r\n aligned_row += row[i + 1:]\r\n else:\r\n # Align this field with others in the column, and recalculate\r\n # the remaining columns for this row.\r\n aligned_row += [original_tab + offset]\r\n for tab in row[i + 1:]:\r\n aligned_row += [None if tab is None else tab + offset]\r\n tab_stop_array[j] = aligned_row\r\n\r\n _pad_array(tab_stop_array)\r\n i += 1\r\n return", "title": "" }, { "docid": "008bd0b88e5eed0546f4fabba8a5e83e", "score": "0.46262637", "text": "def iterating(self):\n\n for l_0, line in enumerate(self.layer):\n self.count = 0\n for c_0, column in enumerate(line):\n if l_0 % 2 == 0:\n self.check_line_1(l_0, c_0)\n else:\n self.check_line_2(l_0, c_0)\n if self.flag_end_line:\n self.flag_end_line = False\n if l_0 + 1 == len(self.layer):\n self.combinations.append(self.layer)\n return\n break", "title": "" }, { "docid": "e339d741d5a5a21b12213e0e2598b63c", "score": "0.46220663", "text": "def _iter_array_or_map(self):\n while self._block_count != 0:\n if self._block_count < 0:\n self._block_count = -self._block_count\n # Read block size, unused\n self.read_long()\n\n for i in range(self._block_count):\n yield\n self._block_count = self.read_long()", "title": "" }, { "docid": "80abe283cb9df061472336b7537ab8d3", "score": "0.46200323", "text": "def _adjust_alignment(self):\n col_diff = self._num_columns - len(self._align_list)\n if col_diff <= 0:\n self._align_list = self._align_list[:self._num_columns]\n\n else:\n self._align_list.extend(['>'] * col_diff)", "title": "" }, { "docid": "5acee35efaabe4f84585c8803b0b3aff", "score": "0.4612931", "text": "def mapaligns(self, pairs):\n # re-map ref_aligns using provided pairs\n # first index is to current (ref_align) sequence, second is to new ref\n # (should be numpy array)\n\n # clear ref_align\n refal = self.ref_align\n ra0 = refal > 0\n self.ref_align = 0*self.ref_align\n # now make the pairs unique in x (own seq)\n (_,uinds) = np.unique(pairs[:,0],return_index=True)\n pairs = pairs[uinds,:]\n # and now use interpolation to remap, return -1 out of range\n self.ref_align[ra0] = np.round(np.interp(refal[ra0],pairs[:,0],pairs[:,1],0,0))\n \n # make sure all of our arrays are in order\n self.makecontiguous()", "title": "" }, { "docid": "809b21ef8646189d16c261175fa49371", "score": "0.46081325", "text": "def _equalize_batch(padding, gen):\n for batch in gen:\n longest = 0\n for ngs in batch[0]:\n longest = max(longest, len(ngs))\n for i in range(len(batch[0])):\n batch[0][i] = pad_to_length(batch[0][i], longest, padding)\n yield batch", "title": "" }, { "docid": "3eb588113bad5582cf47511920473caf", "score": "0.45983833", "text": "def whiteIterator(self) :\n for i in range(self.n) :\n for j in range(self.m) :\n if not self.image[i][j] : yield [i,j]", "title": "" }, { "docid": "a6dfe6532a806ab9cf2370ff01d290db", "score": "0.4586409", "text": "def blackIterator(self) :\n for i in range(self.n) :\n for j in range(self.m) :\n if self.image[i][j] : yield [i,j]", "title": "" }, { "docid": "c31bbbea5c58091145ee2cb687feadf7", "score": "0.4584737", "text": "def offsetElements(self, i):\n\n #iterate over each tile and subtract\n #if the value is -1, indicating a blank tile, leave it as that\n for y in range(0, len(self.array)):\n for x in range(0, len(self.array[0])):\n if self.array[y][x] != -1:\n self.array[y][x] -= i", "title": "" }, { "docid": "09b39387823ec1a23601b7cc847db985", "score": "0.45449036", "text": "def main_extracting_features_per_atom(base_path, \n xyz_file, \n out_path, \n out_file_name, \n dic_encode_atom_type, \n lst_atom_types, \n max_vals = [1,1,1], \n centered = True,\n ordering = 'sorted',\n df_bond = pd.DataFrame([1.7], columns = ['bonds'])\n ):\n list_alfa = [0.05, 0.144, 0.5, 1]\n if not (os.path.isdir(out_path)):\n os.mkdir(out_path)\n\n atom_type_col = 'atom_type'\n xyz_cols = ['x', 'y', 'z']\n try:\n df_xyz_plain = xyz_to_df(base_path, xyz_file, dic_encode_atom_type, lst_atom_types, centered = True)\n except:\n print(f'{xyz_file}')\n return\n\n n_atom_types = len(lst_atom_types)\n\n df_xyz_plain['r_dist_min'] = np.linalg.norm((df_xyz_plain[xyz_cols] - df_xyz_plain[xyz_cols].min()).values, axis=1)\n df_xyz_plain['r_dist_max'] = np.linalg.norm((df_xyz_plain[xyz_cols] - df_xyz_plain[xyz_cols].max()).values, axis=1)\n df_xyz_plain['r_dist_mean'] = np.linalg.norm((df_xyz_plain[xyz_cols] - df_xyz_plain[xyz_cols].mean()).values, axis=1)\n\n for alfa in list_alfa:\n for i in range(n_atom_types):\n for j in range(i, n_atom_types):\n atom_1 = lst_atom_types[i]\n atom_2 = lst_atom_types[j]\n coord_1 = df_xyz_plain[xyz_cols][df_xyz_plain['atom_type'] == atom_1]\n coord_2 = df_xyz_plain[xyz_cols][df_xyz_plain['atom_type'] == atom_2]\n\n dist_mat = distance_matrix(coord_1, coord_2)\n density_loc = np.exp(-alfa * dist_mat**2).sum(axis=1)\n df_xyz_plain[f'{atom_1}_{atom_2}_{alfa}'] = density_loc\n # =================================================================\n # 1. Extracting XYZ positions, encoding atoms types, ordering atoms\n # =================================================================\n if ordering == 'org':\n df_xyz = xyz_normalize_centered(df_xyz_plain, max_vals = max_vals, centered = centered)\n\n elif ordering == 'sorted': \n df_xyz = xyz_normalize_centered(df_xyz_plain, max_vals = max_vals, centered = centered)\n df_xyz = df_xyz.sort_values(['x', 'y', 'z'])\n\n elif ordering == 'org_aligned':\n df_xyz_plain[xyz_cols] = xyz_align_PCA(df_xyz_plain[xyz_cols].values)\n df_xyz = xyz_normalize_centered(df_xyz_plain, max_vals = max_vals, centered = centered)\n\n elif ordering == 'sorted_aligned':\n df_xyz_plain[xyz_cols] = xyz_align_PCA(df_xyz_plain[xyz_cols].values)\n df_xyz = xyz_normalize_centered(df_xyz_plain, max_vals = max_vals, centered = centered)\n df_xyz = df_xyz.sort_values(['x', 'y', 'z'])\n\n # ================================\n # 2. Adding coordination of atoms\n # ================================ \n _, coords = return_neighbour_list(df_xyz_plain, df_bond, atom_type_col = 'atom_type', xyz_cols = ['x', 'y', 'z'])\n df_xyz['coordination'] = coords\n\n df_xyz.to_csv(os.path.join(out_path, out_file_name), index=False)", "title": "" }, { "docid": "8a19046787e8b6dba090652a2ab8b127", "score": "0.4524642", "text": "def column_align(self):\n return self._column_align", "title": "" }, { "docid": "9fb749300f2dff02083957687026b93d", "score": "0.45214546", "text": "def iter_all_cells(self):\n for row in range(self.table.nrows):\n for col in range(self.table.ncols):\n yield row, col", "title": "" }, { "docid": "62db8e4e2320b1399a973aadf88d224f", "score": "0.45199275", "text": "def _alignment_histogram_pruning(self, align_beam_size, align_model_probs, bucket_key,\n num_align_models, start, end):\n utils.check_condition(num_align_models == 1, \"Skip alignments only implemented for one alignment model\")\n skip_alignments = np.array([True] * bucket_key[0])\n np_align_model_probs = align_model_probs[0].asnumpy()\n for sent in range(self.batch_size * self.beam_size):\n source_sel = slice(start[sent].asscalar(), end[sent].asscalar())\n rows = slice(sent, (sent + 1))\n sliced_scores = np_align_model_probs[:, rows, source_sel] # .reshape(shape=(1, -1))\n if (source_sel.stop - source_sel.start) > align_beam_size:\n # returns: best_hyp_indices_, best_hyp_pos_indices , best_word_indices\n (_, _, best_word_indices), _ = utils.smallest_k( -1 * sliced_scores, align_beam_size, False)\n for k in best_word_indices:\n if 0 <= k < bucket_key[0]:\n skip_alignments[k] = False\n else:\n for k in range(source_sel.stop - source_sel.start):\n skip_alignments[k] = False\n\n return skip_alignments", "title": "" }, { "docid": "f600fdb9ef8c158e28610e161de5ddb0", "score": "0.4515573", "text": "def slice(self, aligned):\n slices = {}\n for (mseq, rname, pos), count in aligned.iteritems():\n # check where this read mapped\n read_start = int(pos)\n read_end = read_start + len(mseq.strip('-'))\n coords = self.coords[rname]\n if read_end < coords['Core'][0] or read_start > coords['NS5b'][1]:\n # read falls outside of ORF\n continue\n\n # did it map to one of the target genes?\n for target_gene, target_coords in self.targets.iteritems():\n if target_gene not in slices:\n slices.update({target_gene: {}})\n left, right = coords[target_gene]\n\n for tc in target_coords:\n if tc not in slices[target_gene]:\n slices[target_gene].update({tc: []})\n gene_left, gene_right = tc # unpack tuple\n\n # adjust gene coordinates to genome coordinates\n genome_left = gene_left + left\n genome_right = gene_right + left\n if read_start > genome_left or read_end < genome_right:\n # full coverage of target not possible\n continue\n\n slice = mseq[genome_left:genome_right]\n slices[target_gene][tc].append((rname, slice, count))\n\n return slices", "title": "" }, { "docid": "1858c8f74ce09109bd064850b1b046e4", "score": "0.45005003", "text": "def _iter_values(self):\n _slice = list(self._none_slices_except_domain_axis)\n slices = []\n for index in range(self.shape[self.labels_axis]):\n _slice[self.labels_axis] = index\n slices.append(tuple(_slice))\n return zip(\n slices,\n np.moveaxis(self.magnitude, self.labels_axis, 0)\n )", "title": "" }, { "docid": "7ea5ed663fbc1ff285496f5e78e963be", "score": "0.44990557", "text": "def _traverse_column_data_records(self,\n column_name: str,\n *,\n keys: bool = True,\n values: bool = True) -> Iterable[Union[bytes, Tuple[bytes, bytes]]]:\n try:\n datatxn = TxnRegister().begin_reader_txn(self._dataenv)\n schemaColumnRangeKey = schema_db_range_key_from_column_unknown_layout(column_name)\n with datatxn.cursor() as cur:\n if not cur.set_range(schemaColumnRangeKey):\n raise KeyError(f'Traversal of commit references failed. '\n f'No column named `{column_name}` exists.')\n schemaColumnKey = cur.key()\n column_record = schema_column_record_from_db_key(schemaColumnKey)\n startRangeKey = dynamic_layout_data_record_db_start_range_key(column_record)\n yield from self.cursor_range_iterator(datatxn, startRangeKey, keys, values)\n finally:\n TxnRegister().abort_reader_txn(self._dataenv)", "title": "" }, { "docid": "06fb292427ce719e3bfac454d30f0af8", "score": "0.4497053", "text": "def iter_sites(self, start=0, stop=None, size=1):\n if stop is None:\n stop = self.nsites\n if (stop - start) % size != 0:\n raise ValueError('Alignment cannot be completely divided into '\n 'chucks of size {}'.format(size))\n for i in range(start, stop, size):\n samples = [s[i:i+size] for s in self.samples.sequences]\n if not (self.markers is None or self.markers.nrows == 0):\n markers = [s[i:i+size] for s in self.markers.sequences]\n yield samples + markers\n else:\n yield samples", "title": "" }, { "docid": "0f599eda1779ea5e1588ff1283dbca19", "score": "0.4495233", "text": "def cols(self):\n for col_heading in self.col_hds:\n yield (self._data[row_heading][col_heading] for row_heading in self.row_hds)", "title": "" }, { "docid": "556f50071cc174a2c820bdb799d2a522", "score": "0.44883695", "text": "def score_alignment(self,chunk1,chunk2):\n self.tradition_alignment(chunk1,chunk2)\n\n def filter_both_chunks(tmp_a,tmp_b,filter_list):\n \"\"\"Filters two alignment_blocks based on residues that made it into both alignments\n \"\"\"\n filt_chunk1 = [i._resraw for i in tmp_a._res_list]\n filt_chunk2 = [i._resraw for i in tmp_b._res_list]\n tmp_lista = []\n tmp_listb = []\n for i,j in filter_list:\n if i in filt_chunk1 and j in filt_chunk2:\n tmp_lista.append(i)\n tmp_listb.append(j)\n return ([i for i in tmp_a._res_list if i._resraw in tmp_lista],[*tmp_a._res_list]),\\\n ([i for i in tmp_b._res_list if i._resraw in tmp_listb],[*tmp_b._res_list])\n\n self.pep1,self.pep2 = chunk1._chunklist,chunk2._chunklist\n self.len1,self.len2 = len(self.pep1)+1,len(self.pep2)+1\n\n self.init_matrix()\n tmp_dict = defaultdict(list)\n for i,j in self.alignment.alignkey:\n tmp_chunk1 = chunk1._chunklist_dict[i]\n tmp_chunk2 = chunk2._chunklist_dict[j]\n tmp_a,tmp_b = filter_both_chunks(tmp_chunk1,tmp_chunk2,self.alignment.alignkey)\n alignment_score = align_chunks(tmp_a,tmp_b,self.walk)\n for k,l in alignment_score.items():\n tmp_dict[k].append(l)\n for i,j in tmp_dict.items():\n self.rawm[i[0]][i[1]] = min(j)\n self.init_adjustmatrix()", "title": "" }, { "docid": "4f67a7c2d55845aba3a5ae0703a99a73", "score": "0.44797224", "text": "def _match(self):\n #disable optimized matching\n optimized_rows = None\n optimized_columns = None\n for match in self.__match_rows(optimized_rows):\n #match in rows\n yield match\n for match in self.__match_rows(optimized_columns,\n transpose=True):\n #match in columns and transpose coordinates\n yield match", "title": "" }, { "docid": "65b66a0dfd54b20bd4a14659306b1c97", "score": "0.44747886", "text": "def filter_multimappers(align_file, data):\n config = dd.get_config(data)\n type_flag = \"\" if bam.is_bam(align_file) else \"S\"\n base, ext = os.path.splitext(align_file)\n out_file = base + \".unique\" + ext\n bed_file = dd.get_variant_regions(data)\n bed_cmd = '-L {0}'.format(bed_file) if bed_file else \" \"\n if utils.file_exists(out_file):\n return out_file\n base_filter = '-F \"[XS] == null and not unmapped {paired_filter} and not duplicate\" '\n if bam.is_paired(align_file):\n paired_filter = \"and paired and proper_pair\"\n else:\n paired_filter = \"\"\n filter_string = base_filter.format(paired_filter=paired_filter)\n sambamba = config_utils.get_program(\"sambamba\", config)\n num_cores = dd.get_num_cores(data)\n with file_transaction(out_file) as tx_out_file:\n cmd = ('{sambamba} view -h{type_flag} '\n '--nthreads {num_cores} '\n '-f bam {bed_cmd} '\n '{filter_string} '\n '{align_file} '\n '> {tx_out_file}')\n message = \"Removing multimapped reads from %s.\" % align_file\n do.run(cmd.format(**locals()), message)\n bam.index(out_file, config)\n return out_file", "title": "" }, { "docid": "6d07347a453302f98fef4c1d2c5f73d6", "score": "0.44727546", "text": "def _alignment_threshold_pruning(self, align_skip_threshold, align_model_probs, bucket_key,\n num_align_models, start, end):\n\n utils.check_condition(num_align_models == 1, \"Skip alignments only implemented for one alignment model\")\n skip_jumps = mx.nd.zeros((self.batch_size * self.beam_size, bucket_key[0]))\n\n for idx in range(self.batch_size * self.beam_size):\n source_sel = slice(start[idx].asscalar(), end[idx].asscalar())\n target_sel = slice(0, source_sel.stop - source_sel.start)\n skip_jumps[idx, target_sel] = align_model_probs[0][0, idx, source_sel]\n\n skip_alignments = np.all((skip_jumps < align_skip_threshold).asnumpy(), axis=0)\n\n return skip_alignments", "title": "" }, { "docid": "e02a1b99a2af5d5096049a0ded75f0f8", "score": "0.44660947", "text": "def slice(self, aligned):\n slices = {}\n for (mseq, rname, pos), count in aligned.iteritems():\n # check where this read mapped\n read_start = int(pos) # 1based coord wrt subtype full genome corresponding to read start\n read_end = read_start + len(mseq.strip('-')) # 1based coord wrt subtype full genome corresponding to read end + 1\n if rname not in self.coords:\n print \"Read %s didn't have a corresponding map\" % rname\n continue\n coords = self.coords[rname]\n if read_end < coords['Core'][0] or read_start > coords['NS5b'][1]:\n # read falls outside of ORF\n continue\n\n # did it map to one of the target genes?\n for target_gene, target_coords in self.targets.iteritems():\n if target_gene not in slices:\n slices.update({target_gene: {}})\n left, right = coords[target_gene] # 0based nuc coordinates wrt subtype full genome corresponding to gene start, gene end+1\n\n for tc in target_coords: # 0based nuc coordinates wrt H77 gene corresponding to target start, target end + 1\n if tc not in slices[target_gene]:\n slices[target_gene].update({tc: []})\n # 0based nuc coordinates wrt H77 gene corresponding to target start, target end + 1\n gene_left, gene_right = tc # unpack tuple\n\n # adjust gene coordinates to genome coordinates. Assume no indels wrt H77\n genome_left = gene_left + left # 0based nuc coordinates wrt subtype full genome corresponding to H77 target start\n genome_right = gene_right + left # 0based nuc coordinates wrt subtype full genome corresponding to H77 target end + 1\n\n # If the read begins after the target region or ends before the target region, read_slice_size will be negative.\n read_slice_size = min(genome_right, read_end-1) - max(genome_left, read_start-1)\n target_slice_size = genome_right - genome_left\n if read_slice_size / float(target_slice_size) < self.min_target_width:\n # merged read does not cover the minimum required width of the target region\n continue\n\n slice = mseq[genome_left:genome_right] # mseq should be left-padded wrt subtype full genome\n\n # After slicing merged sequences, we may end up with duplicate slices for the same target gene & coords\n # against the same reference\n slices[target_gene][tc].append((rname, slice, count))\n\n return slices", "title": "" }, { "docid": "91838a895be3d319df0504b7548f3ff9", "score": "0.4461923", "text": "def test_iter():\n bg = ipythonblocks.BlockGrid(2, 2)\n\n coords = ((0, 0), (0, 1), (1, 0), (1, 1))\n\n for b, c in zip(bg, coords):\n assert b.row == c[0]\n assert b.col == c[1]", "title": "" }, { "docid": "a708c617e8d17c1f92957907c583cfa5", "score": "0.445352", "text": "def _listed_ea_column_check():\n\n def diff(li1, li2):\n return list(set(li1) - set(li2))\n\n for ea_row in unused_list:\n ddi_index = views_index[ea_row[15]]\n # This check is performed in\n # _ea_in_disposition_col0_and_empty_ipr_d_col\n if ea_row[1] not in ddi_data[ddi_index]:\n continue\n if ea_row[0] in ea_listed_values['IPR Designation'] and \\\n 'IPR Designation' not in \\\n ddi_data[ddi_index][ea_row[1]]['extattrs']:\n continue\n # Processing listable columns.\n for key, value in ea_index.items():\n # Skip's unused keys.\n if key not in ['Datacenter', 'IPR Designation']:\n continue\n # Check for blank column and blank source column.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Check Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n if key in ea_listed_values:\n ipr_temp_list = []\n ipam_temp_list = []\n # Building list for diff's against DDI data.\n if ea_row[0] in ea_listed_values[key] \\\n and ea_row[0] not in ea_row[value]:\n ipr_temp_list.append(ea_row[0])\n # Extend list if listed values in IPR D column.\n if ',' in ea_row[ea_index[key]]:\n ipr_temp_list.extend([x.strip()\n for x in ea_row[ea_index[key]].\n split(',')\n if x.strip() in\n ea_listed_values[key]])\n # Append list if non-listed values in IPR D column.\n if ',' not in ea_row[ea_index[key]] and \\\n ea_row[ea_index[key]] in \\\n ea_listed_values[key]:\n ipr_temp_list.append(ea_row[ea_index[key]])\n\n # Remove blank elements from list.\n ipr_temp_list = [x for x in ipr_temp_list if x]\n\n # If in IPR and no key listed in IPAM data.\n if ea_row[1] == '192.168.0.0':\n print('myvar: {}'.format(ea_row[1]))\n\n # If no data in IPAM but new data received.\n if key not in \\\n ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ipr_temp_list:\n if len(ipr_temp_list) > 1:\n import_report_data['update'].append(ea_row)\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ','.join(\n ipr_temp_list)}])\n else:\n import_report_data['update'].append(ea_row)\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ipr_temp_list[\n 0]}])\n continue\n\n # Building DDI list for diff against the Multi Att. Columns\n if isinstance(ddi_data[ddi_index][\n ea_row[1]]['extattrs'][\n key]['value'], list):\n ipam_temp_list.extend(ddi_data[ddi_index][\n ea_row[1]]['extattrs'][\n key]['value'])\n else:\n ipam_temp_list.append(ddi_data[ddi_index][\n ea_row[1]]['extattrs'][\n key]['value'])\n\n # Check for diff between listed sets.\n in_ipr_not_ipam = compare(ipr_temp_list, ipam_temp_list)\n if not in_ipr_not_ipam:\n import_report_data['update'].append(ea_row)\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ','.join(\n ipr_temp_list)}])\n continue", "title": "" }, { "docid": "e3b5d92ab399f764c4a006f9d73bc711", "score": "0.44291773", "text": "def _traverse_column_schema_records(self, keys: bool = True, values: bool = True\n ) -> Iterable[Union[Tuple[bytes], Tuple[bytes, bytes]]]:\n startSchemaRangeKey = schema_record_count_start_range_key()\n try:\n datatxn = TxnRegister().begin_reader_txn(self._dataenv)\n yield from self.cursor_range_iterator(datatxn, startSchemaRangeKey, keys, values)\n finally:\n TxnRegister().abort_reader_txn(self._dataenv)", "title": "" }, { "docid": "1db63069a26bad34c3556090769af3ce", "score": "0.44239363", "text": "def _test_final_column_no_padding(sequences):\n\n assert np.all(sequences[:, -1] > 0)", "title": "" }, { "docid": "3031aa4345d39ab60ed5e9be81afab47", "score": "0.44170517", "text": "def getColumnAlignments(self):\n if self._columnAlignments is None:\n return None\n\n alignments = list()\n\n for col in self._visibleColumns:\n alignments.append( self.getColumnAlignment(col) )\n\n return alignments", "title": "" }, { "docid": "e2199c1859f95becc996735ee9c3d00a", "score": "0.44155955", "text": "def prep_alleles_table_compare(df_alleles,sample_name_1,sample_name_2,MAX_N_ROWS,MIN_FREQUENCY):\n\tdna_to_numbers={'-':0,'A':1,'T':2,'C':3,'G':4,'N':5}\n\tseq_to_numbers= lambda seq: [dna_to_numbers[x] for x in seq]\n\n\tX=[]\n\tannot=[]\n\ty_labels=[]\n\tinsertion_dict=defaultdict(list)\n\tper_element_annot_kws=[]\n\n\tre_find_indels=re.compile(\"(-*-)\")\n\tidx_row=0\n\tfor idx,row in df_alleles.ix[df_alleles['%Reads_'+sample_name_1] + df_alleles['%Reads_'+sample_name_2]>=MIN_FREQUENCY][:MAX_N_ROWS].iterrows():\n\t\tX.append(seq_to_numbers(str.upper(idx)))\n\t\tannot.append(list(idx))\n\t\ty_labels.append('%.2f%% (%d reads) %.2f%% (%d reads) ' % (row['%Reads_'+sample_name_1],row['#Reads_'+sample_name_1],\n\t\t\t\t\t\t\t\t\t\t\t\t\trow['%Reads_'+sample_name_2],row['#Reads_'+sample_name_2]))\n\n\n\t\tfor p in re_find_indels.finditer(row['Reference_Sequence']):\n\t\t\tinsertion_dict[idx_row].append((p.start(),p.end()))\n\n\t\tidx_row+=1\n\n\n\t\tidxs_sub= [i_sub for i_sub in range(len(idx)) if \\\n\t\t\t\t (row['Reference_Sequence'][i_sub]!=idx[i_sub]) and \\\n\t\t\t\t (row['Reference_Sequence'][i_sub]!='-') and\\\n\t\t\t\t (idx[i_sub]!='-')]\n\t\tto_append=np.array([{}]*len(idx),dtype=np.object)\n\t\tto_append[ idxs_sub]={'weight':'bold', 'color':'black','size':16}\n\t\tper_element_annot_kws.append(to_append)\n\n\treturn X,annot,y_labels,insertion_dict,per_element_annot_kws", "title": "" }, { "docid": "294bf27b8f1067ab809de007cb69131b", "score": "0.44148335", "text": "def compute_shift_neighbor_point(self, j, i, contributing_alignment_points):\n\n self.y_shifts[j][i] = 0.\n self.x_shifts[j][i] = 0.\n for ap in contributing_alignment_points:\n j_ap = self.alignment_points[ap['alignment_point_index']][0]\n i_ap = self.alignment_points[ap['alignment_point_index']][1]\n self.y_shifts[j][i] += ap['weight'] * self.y_shifts[j_ap][i_ap]\n self.x_shifts[j][i] += ap['weight'] * self.x_shifts[j_ap][i_ap]", "title": "" }, { "docid": "f6a826d0e604e0f8a1b594042c08262c", "score": "0.44122374", "text": "def indices_allpeople_with_allpeople_in_two_frames(meanPositions_b, meanPositions_c, meanPositions_a, printAuxFlag=False, distScale=8*30, b_d=-0.8, b_lr=-0.2):\r\n nb, nc = len(meanPositions_b), len(meanPositions_c)\r\n EDMatrix, EDLMatrix, LRMatrix, TAMatrix = np.ones((nb, nc)), np.ones((nb, nc)), np.ones((nb, nc)), np.ones((nb, nc))\r\n DBAMatrix, DBBMatrix, DBCMatrix = np.ones((nb, nc)), np.ones((nb, nc)), np.ones((nb, nc))\r\n for i, meanPosition_b in enumerate(meanPositions_b):\r\n meanPosition_a = meanPositions_a[i]\r\n for j, meanPosition_c in enumerate(meanPositions_c):\r\n indices = indices_person_with_person_in_three_frames(meanPosition_a, meanPosition_b, meanPosition_c)\r\n EDMatrix[i, j] = indices['ED']\r\n EDLMatrix[i, j] = indices['EDL']\r\n LRMatrix[i, j] = indices['LR']\r\n TAMatrix[i, j] = indices['TA']\r\n DBAMatrix[i, j] = indices['DBA']\r\n DBBMatrix[i, j] = indices['DBB']\r\n DBCMatrix[i, j] = indices['DBC']\r\n # when a person is new-coming, he has no poseVector_a information, and would lead to a constant-zero LR and angel\r\n for nrow in range(LRMatrix.shape[0]):\r\n if np.all(LRMatrix[nrow, :] == 0) and np.all(TAMatrix[nrow, :] == 0):\r\n for ncol in range(LRMatrix.shape[1]):\r\n if np.any(LRMatrix[:, ncol] > 0):\r\n LRMatrix[nrow, ncol] = np.min([x for x in LRMatrix[:, ncol] if x > 0])\r\n if np.any(TAMatrix[:, ncol] > 0):\r\n TAMatrix[nrow, ncol] = np.min([x for x in TAMatrix[:, ncol] if x > 0])\r\n # if EDMatrix.shape[1] == 1:\r\n # NED1Matrix = np.ones((EDMatrix.shape[0], 1))\r\n # else:\r\n # NED1Matrix = (EDMatrix - np.min(EDMatrix, axis=1).reshape(-1, 1)) / (0.00000000000000000000001 + (\r\n # np.max(EDMatrix, axis=1).reshape(-1, 1) - np.min(EDMatrix, axis=1).reshape(-1, 1)))\r\n # if EDMatrix.shape[0] == 1:\r\n # NED2Matrix = np.ones((1, EDMatrix.shape[1]))\r\n # else:\r\n # NED2Matrix = (EDMatrix - np.min(EDMatrix, axis=0).reshape(1, -1)) / (0.00000000000000000000001 + (\r\n # np.max(EDMatrix, axis=0).reshape(1, -1) - np.min(EDMatrix, axis=0).reshape(1, -1)))\r\n\r\n NED1Matrix = EDMatrix / distScale\r\n NED2Matrix = NED1Matrix\r\n \r\n R_ED1Matrix = np.argsort(np.argsort(EDMatrix))\r\n R_ED2Matrix = np.argsort(np.argsort(EDMatrix, axis=0), axis=0)\r\n indicesMatrix = {'ED': EDMatrix, 'EDL': EDLMatrix, 'LR': LRMatrix, 'TA': TAMatrix,\r\n 'DBA': DBAMatrix, 'DBB': DBBMatrix, 'DBC': DBCMatrix, 'R_ED1': R_ED1Matrix, 'R_ED2': R_ED2Matrix}\r\n\r\n VMatrix_toChooseNext = NED1Matrix * b_d + LRMatrix * b_lr\r\n VMatrix_toChoosePrevious = NED2Matrix * b_d + LRMatrix * b_lr\r\n\r\n # these V are the min the best, so we need to use -V to calculate P\r\n P_toChooseNext = V2P(VMatrix_toChooseNext, axis=1)\r\n P_toChoosePrevious = V2P(VMatrix_toChoosePrevious, axis=0)\r\n\r\n # P_toChooseNext = P_toChooseNext[:, :P_toChooseNext.shape[1] - 1]\r\n # P_toChoosePrevious = P_toChoosePrevious[:P_toChoosePrevious.shape[0] - 1, :]\r\n\r\n P = P_toChooseNext * P_toChoosePrevious\r\n if printAuxFlag:\r\n print('\\nDist: \\n{}'.format(EDMatrix))\r\n print('\\nNormarlized Dist: \\n{}'.format(NED1Matrix))\r\n print('\\nLength Ratio: \\n{}'.format(LRMatrix))\r\n print('\\nAngle: \\n{}'.format(TAMatrix))\r\n return P, indicesMatrix", "title": "" }, { "docid": "c02e2eb5cdeda941eebd9a938f60bde9", "score": "0.4409557", "text": "def internal_iter(self):\n for idx, x in np.ndenumerate(self.internal_t):\n yield x, self.yout[idx, ...]", "title": "" }, { "docid": "6299bff0ad05e6c9ca04350907e91e17", "score": "0.4403628", "text": "def alignment_stop():\n\n smi = SMI_Beamline()\n yield from smi.modeMeasurement()\n proposal_id('2023_2', '311645_Zhang_1')", "title": "" }, { "docid": "d90fdd225b304891acff7bfe911d7686", "score": "0.44032338", "text": "def __iter__(self):\n batch = []\n for row in self.reader:\n # Default collate does not work nicely on namedtuples and treat them as lists\n # Using dict will result in the yielded structures being dicts as well\n row_as_dict = row._asdict()\n batch.append(self.transform(row_as_dict) if self.transform else row_as_dict)\n if len(batch) == self.batch_size:\n yield self.collate_fn(batch)\n batch = []\n if batch:\n yield self.collate_fn(batch)", "title": "" }, { "docid": "74cfef2ba0f48aedd7645db60e6e1612", "score": "0.44022062", "text": "def ialign(images, reference = None, mask = None, fill_value = 0.0, fast = True):\n images = iter(images)\n \n if reference is None:\n reference = next(images)\n yield reference\n\n yield from map(partial(align, reference = reference, mask = mask, fill_value = fill_value, fast = fast), images)", "title": "" }, { "docid": "89155a6f4a71a0e56e4ed835ceb1a325", "score": "0.4399901", "text": "def _next_column_iterator(previous_column, height, i = None):\n if i is None:\n i = height\n if i == 0:\n yield [-1]*height\n else:\n for column in _next_column_iterator(previous_column, height, i-1):\n min_value = previous_column[i-1]\n if i > 1:\n min_value = max(min_value, column[i-2]+1)\n for value in range(min_value, previous_column[i]+1):\n c = copy.copy(column)\n c[i-1] = value\n yield c", "title": "" }, { "docid": "59559f6d99a5f27fe18e26c27c8131af", "score": "0.43922815", "text": "def fragment_annot(df, barcodes):\n fw = []\n rv = []\n for i, b in enumerate(barcodes):\n df_b = df.loc[df['Barcode'] == b]\n cloud_idx = 0\n for j in df_b.Reference.unique().tolist():\n ref_idx = cloud_idx\n df_r = df_b.loc[df_b['Reference'] == j]\n start_pos = df_r.Start.tolist()[0] # Leftmost start on reference j. Should be pre-sorted from SAM\n for k in range(len(df_r)):\n if df_r.iloc[k,3] > start_pos + 200000: # Read start outside 2 x max. est. fragment size\n ref_idx += 1\n start_pos = df_r.iloc[k,3]\n read_info = [df_r.iloc[k,0], ref_idx, j + '-' + str(ref_idx)] # ref-enh_num\n rv.append(read_info) if df_r.iloc[k,1] else fw.append(read_info)\n cloud_idx = ref_idx + 1\n return fw, rv", "title": "" }, { "docid": "68d6f06e824b73b96be95020e674c0a0", "score": "0.4389992", "text": "def remove_element_column(self):\n for i in range(1, len(self.pdb_map)+1):\n ele = self.pdb_map[i][\"element\"]\n e = ele[11]\n self.pdb_map[i][\"element\"]=\" \"+e\n print \"Extra stuff in Element Columns Removed\"\n return self.pdb_map", "title": "" }, { "docid": "9ba00a38e0c18497f2468e44c98702b3", "score": "0.4381122", "text": "def filter_annoying_cell(self):\n\n idx_center = self.labels.index(self.label_center)\n not_neighbors = []\n for i, label in enumerate(self.labels):\n if label not in self.neighbors[idx_center] and i != idx_center:\n not_neighbors.append([i, self.angles[i]])\n not_neighbors = np.array(not_neighbors)\n if len(not_neighbors) > 2:\n to_delete = not_neighbors[not_neighbors[:, 1].argsort()][2:, 0]\n self.delete(to_delete.astype(int))", "title": "" }, { "docid": "4fcc8b9790e994e95d2df22dd53a85a6", "score": "0.4373849", "text": "def findMarkers(markerDictList, text):\n global stopWords\n\n rows = []\n for markerType, markerRe, markerDict in markerDictList:\n textLower = text.lower()\n #if markerType in [\"genbankList\", \"genbank\"]:\n #keywords = \n #if not textContainsAny(textLower, keywords):\n #continue\n \n if markerType==\"genbankList\":\n for row in iterGenbankRows(markerRe, markerType, text):\n #yield row\n rows.append(row)\n continue\n\n if markerType in neededWordDict:\n keywords = neededWordDict[markerType]\n if not textContainsAny(textLower, keywords):\n continue\n\n for match in markerRe.finditer(text):\n word = match.group(\"id\")\n if word in stopWords:\n continue\n\n\n if markerType==\"pdb\":\n word = word.lower()\n\n if markerType in [\"hg17\", \"hg18\", \"hg19\"]:\n word = word.replace(\",\", \"\").replace(\" \", \"\")\n\n if markerDict==None:\n idList = [word]\n else:\n idList = markerDict.get(word, None)\n\n if idList != None:\n start = match.start(\"id\")\n end = match.end(\"id\")\n for recogId in idList:\n if word==recogId:\n word=\"\"\n row = [ start, end, markerType, word, recogId]\n #yield result\n rows.append(row)\n\n if len(rows)<MAXROWS:\n return rows\n else:\n return []", "title": "" }, { "docid": "ca7df8187d285acc5433547dca0cbb43", "score": "0.4372176", "text": "def apply_offset(self):\n \n if self.x_offset or self.y_offset:\n for y_idx, row in enumerate(self.coordinates):\n for x_idx, coord in enumerate(row):\n self.coordinates[y_idx][x_idx] = [coord[0] + self.x_offset, coord[1] + self.y_offset]", "title": "" }, { "docid": "a5f5886c5074e18871ae560302d7925d", "score": "0.43685156", "text": "def align(self):\n\n # populate the score matrices based on the input parameters\n self.populate_score_matrices()\n\n # print(\"-------M MATRIX-------\")\n # self.m_matrix.print_scores()\n # # print(\"-------IX MATRIX-------\")\n # self.ix_matrix.print_scores()\n # # print(\"-------IY MATRIX-------\")\n # self.iy_matrix.print_scores()\n\n # print(self.find_traceback_start())\n\n # Perform traceback, then write the output to file\n self.traceback()\n self.write_output()\n\n # perform a traceback and write the output to an output file\n ### FILL IN ###", "title": "" }, { "docid": "99088bc9eae65a1377b577b6932371a1", "score": "0.43635973", "text": "def align(data):\n arr = []\n for d in data:\n # DO\n if d.shape[0] == 3:\n d = np.insert(d, [1, 2], 0, axis=0)\n arr.append(d)\n data = np.array(arr)\n dapi = data[:,0]\n return lasagna.bayer.register_and_offset(data, registration_images=data[:, 0])", "title": "" }, { "docid": "c31c69fa60d9f67aa1436c1069691c88", "score": "0.43540114", "text": "def _align(x, y, mean_xy, variance_xy, bead_costs):\n m = {}\n for i in range(len(x) + 1):\n for j in range(len(y) + 1):\n if i == j == 0:\n m[0, 0] = (0, 0, 0)\n else:\n m[i, j] = min((m[i-di, j-dj][0] +\n length_cost(x[i-di:i], y[j-dj:j], mean_xy, variance_xy) \\\n + bead_cost, di, dj)\n for (di, dj), bead_cost in BEAD_COSTS.items()\n if i-di>=0 and j-dj>=0)\n\n i, j = len(x), len(y)\n while True:\n (c, di, dj) = m[i, j]\n if di == dj == 0:\n break\n yield (i-di, i), (j-dj, j)\n i -= di\n j -= dj", "title": "" }, { "docid": "b1123b28913e51596181521fead73a7a", "score": "0.43500638", "text": "def unmap(data, count, inds, fill=0):", "title": "" }, { "docid": "9b2177638e43250dc7a567d67b659953", "score": "0.43485883", "text": "def alignment_extract(seed, setting = \"AWO\"):\n with open('./work/results_seed' + str(seed) + '/alignments/extracted_matrix.pkl', 'rb') as f:\n extract_matrix = pickle.load(f)\n\n with open(test_de_bpe, encoding=\"utf-8\") as fbpe:\n src_bpe_sents = fbpe.readlines()\n with open(test_de_word, encoding=\"utf-8\") as fword:\n src_word_sents = fword.readlines()\n\n with open(test_en_bpe, encoding=\"utf-8\") as fbpe:\n tgt_bpe_sents = fbpe.readlines()\n with open(test_en_word, encoding=\"utf-8\") as fword:\n tgt_word_sents = fword.readlines()\n\n for mode in [\"attn\", \"summed_afx\"]:\n for l in range(6):\n with open(\"./work/results_seed\" + str(seed) + \"/alignments/hypothesis-{}-{}-{}\".format(mode,l,setting), \"w\") as f:\n for i in range(len(src_bpe_sents)):\n src_bpe_sent = src_bpe_sents[i]\n src_word_sent = src_word_sents[i]\n src_word_to_bpe = convert_bpe_word(src_bpe_sent, src_word_sent)\n src_len = len(src_word_sent.split())\n\n tgt_bpe_sent = tgt_bpe_sents[i]\n tgt_word_sent = tgt_word_sents[i]\n tgt_word_to_bpe = convert_bpe_word(tgt_bpe_sent, tgt_word_sent)\n \n if mode == \"summed_afx\":\n attention_matrix = torch.squeeze(extract_matrix[i][mode][l]).detach().numpy()\n else:\n attention_matrix = torch.squeeze(extract_matrix[i][mode][l]).mean(dim=0).detach().numpy()\n\n if setting == \"AWI\":\n attention_matrix = attention_matrix[list(range(1,len(attention_matrix)))+[0]]\n attention_matrix = get_word_word_attention(attention_matrix, src_word_to_bpe, tgt_word_to_bpe)\n attention_matrix = np.argmax(attention_matrix, -1)\n\n for t, s_a in enumerate(attention_matrix):\n if s_a != src_len:\n f.write(\"{}-{} \".format(t+1, s_a+1))\n f.write(\"\\n\") \n\n for mode in [\"attn\", \"afx\"]:\n for l in range(6):\n for h in range(4):\n with open(\"./work/results_seed\" + str(seed) + \"/alignments/hypothesis-{}-{}-{}-{}\".format(mode,l,h,setting), \"w\") as f:\n for i in range(len(src_bpe_sents)):\n src_bpe_sent = src_bpe_sents[i]\n src_word_sent = src_word_sents[i]\n src_word_to_bpe = convert_bpe_word(src_bpe_sent, src_word_sent)\n src_len = len(src_word_sent.split())\n\n tgt_bpe_sent = tgt_bpe_sents[i]\n tgt_word_sent = tgt_word_sents[i]\n tgt_word_to_bpe = convert_bpe_word(tgt_bpe_sent, tgt_word_sent)\n\n attention_matrix = torch.squeeze(extract_matrix[i][mode][l])[h].detach().numpy()\n if setting == \"AWI\":\n attention_matrix = attention_matrix[list(range(1,len(attention_matrix)))+[0]]\n attention_matrix = get_word_word_attention(attention_matrix, src_word_to_bpe, tgt_word_to_bpe)\n attention_matrix = np.argmax(attention_matrix, -1)\n\n for t, s_a in enumerate(attention_matrix):\n if s_a != src_len:\n f.write(\"{}-{} \".format(t+1, s_a+1))\n f.write(\"\\n\")\n return None", "title": "" }, { "docid": "86c2007a4d655eaf33350a76b400111b", "score": "0.43483782", "text": "def zihan_alignment():\n proposal_id('2023_1', '000000_tests')\n\n try:\n yield from alignement_gisaxs_hex(angle=0.5, rough_y=0.5)\n except:\n yield from alignement_gisaxs_hex(angle=0.1, rough_y=0.5)\n\n proposal_id('2023_1', '311645_Zhang')", "title": "" }, { "docid": "7f0922a1b6e889b4803e6edc0da63ca6", "score": "0.43459523", "text": "def iterlabels(self):\n return iter(\n np.moveaxis(self.magnitude, self.labels_axis, 0)\n * self.units\n )", "title": "" }, { "docid": "0de6dbfcfc915d6816eb5c45f1b6c516", "score": "0.43381003", "text": "def align_cluster( cluster ):\n x_0 = cluster[0][:,0]\n x_f = cluster[0][:,-1]\n distance = lambda x : np.dot(x,x)\n reverse_or_not = lambda x: x[:,::-1] if distance(x[:,0]-x_0) > distance(x[:,0]-x_f) else x\n return [ reverse_or_not(c) for c in cluster ]", "title": "" }, { "docid": "c8adf025194219c181105f446c52690c", "score": "0.43354973", "text": "def iteridat():\n while True:\n try:\n type, data = self.chunk(lenient=lenient)\n except ValueError as e:\n raise ChunkError(e.args[0])\n if type == b'IEND':\n # http://www.w3.org/TR/PNG/#11IEND\n break\n if type != b'IDAT':\n continue\n # type == b'IDAT'\n # http://www.w3.org/TR/PNG/#11IDAT\n if self.colormap and not self.plte:\n warnings.warn(\"PLTE chunk is required before IDAT chunk\")\n yield data", "title": "" }, { "docid": "0b9a7ba2a402e26bc5c94a0e7f26bab2", "score": "0.43353194", "text": "def dlmat2imat(dlmat):\n\n for i, row in enumerate(dlmat):\n if i > 0:\n for j, v in enumerate(row):\n if j > 0:\n yield i-1, j-1, v", "title": "" }, { "docid": "0d8ee4ea567471feeceac879e534658d", "score": "0.43341258", "text": "def _extract_images(self, image):\n _shape_height = image.shape[0]#the height of the image\n _shape_width = image.shape[1]#the width of the image\n\n #search for the first column that is not black\n _x = np.min(np.nonzero(np.any(image, axis = (1,2))))\n\n # search along this column, we can obtain the start pixel row of each image row\n _row_mask = np.any(image[:,_x,:], axis = 1)\n _row_start_y = np.insert(np.nonzero(np.diff(_row_mask.astype(int)) == 1)[0]+1, 0, 0)\n # to be used in later loop\n _row_start_y = np.append(_row_start_y, _shape_height-1)\n _extracted_list = []\n #Search by all the starting rows\n for _y_index in range(_row_start_y.size-1):\n _starting_y = _row_start_y[_y_index]\n\n # we can find all the start and end column of each image\n # (i.e. they are horizonally bounded)\n _column_mask = np.any(image[_starting_y], axis = 1) # all the non-black point\n _column_start_x = np.insert(np.nonzero(np.diff(_column_mask.astype(int)) == 1)[0]+1, 0, 0)\n _column_end_x = np.nonzero(np.diff(_column_mask.astype(int)) == -1)[0]+1\n # check each horizontally bounded region\n for _x_index in range(_column_start_x.size):\n # check the leftmost column\n _image_starting_x = _column_start_x[_x_index]\n\n #search in this column between two starting y\n _image_y_mask = np.any(image[_starting_y:_row_start_y[_y_index + 1],_image_starting_x], axis=1)\n\n #get the end point of y\n _image_end_y = np.where(np.diff(_image_y_mask.astype(int)) == -1)[0][0]+1\n\n #append the image to the final list\n _extracted_list.append(\n image[_starting_y:_starting_y+_image_end_y,\n _image_starting_x:_column_end_x[_x_index]]\n )\n return _extracted_list", "title": "" }, { "docid": "59d83b140eb22b2dd755a449cbf00c28", "score": "0.4333457", "text": "def motif_pair_dfi(dfi_filtered, motif_pair):\n dfa = dfi_filtered[dfi_filtered.pattern_name == motif_pair[0]]\n dfb = dfi_filtered[dfi_filtered.pattern_name == motif_pair[1]]\n\n dfab = pd.merge(dfa, dfb, on='example_idx', how='outer')\n dfab = dfab[~dfab[['pattern_center_x', 'pattern_center_y']].isnull().any(1)]\n\n dfab['center_diff'] = dfab.pattern_center_y - dfab.pattern_center_x\n if \"pattern_center_aln_x\" in dfab:\n dfab['center_diff_aln'] = dfab.pattern_center_aln_y - dfab.pattern_center_aln_x\n dfab['strand_combination'] = dfab.strand_x + dfab.strand_y\n # assure the right strand combination\n dfab.loc[dfab.center_diff < 0, 'strand_combination'] = dfab[dfab.center_diff < 0]['strand_combination'].map(comp_strand_compbination).values\n\n if motif_pair[0] == motif_pair[1]:\n dfab.loc[dfab['strand_combination'] == \"--\", 'strand_combination'] = \"++\"\n dfab = dfab[dfab.center_diff > 0]\n else:\n dfab.center_diff = np.abs(dfab.center_diff)\n if \"center_diff_aln\" in dfab:\n dfab.center_diff_aln = np.abs(dfab.center_diff_aln)\n if \"center_diff_aln\" in dfab:\n dfab = dfab[dfab.center_diff_aln != 0] # exclude perfect matches\n return dfab", "title": "" }, { "docid": "462892548462bee1f446d8f04e6d1e15", "score": "0.43329483", "text": "def macs_peaks_knowngene_overall_analysis(macs_peaks_strand_filename,knowngene_genesymbol_filename,annotated_peaks_filename,unannotated_peaks_filename):\n macs_file=open(macs_peaks_strand_filename,'r')\n peaks=macs_file.readlines()\n \n gene_file=open(knowngene_genesymbol_filename,'r')\n genes=gene_file.readlines()\n \n list1=[]\n for i in range(20):\n list1.append(\"chr%s+\" % (i))\n list1.append(\"chr%s-\" % (i))\n list1.append(\"chrM+\")\n list1.append(\"chrM-\")\n list1.append(\"chrX+\")\n list1.append(\"chrX-\")\n list1.append(\"chrY+\")\n list1.append(\"chrY-\")\n \n dic1={}\n for j in list1:\n dic1[j]=[]\n \n for gene in genes[1:]: #the knowngene_genesymbol_file has a header\n chr=gene.split('\\t')[1]+gene.split('\\t')[2] # e.g.,\"chr1\" + \"-\"\n if chr in dic1.keys():\n dic1[chr].append(gene.strip('\\n').split('\\t'))\n dic1[chr][-1][3]=int(dic1[chr][-1][3]) #the txStart position is like \"12345\" and should be stored as 12345\n dic1[chr][-1][6]=int(dic1[chr][-1][6]) #the cdsEnd position is like \"12345\" and should be stored as 12345\n \n annotated_peaks=open(annotated_peaks_filename,'w')\n unannotated_peaks=open(unannotated_peaks_filename,'w')\n a=0 # used to store number of unannoted peaks\n b=0 #used to store number of annotated peaks\n for line in peaks:\n peak=line.strip('\\n').split('\\t')\n peak_chr=peak[0]+peak[10] #e.g., \"chr1\"+ \"-\"\n peak_start=int(peak[1])\n peak_end=int(peak[2])\n peak_summit=int(peak[1])+int(peak[4])\n low=0\n high=len(dic1[peak_chr])-1\n gene_pos=return_loci(dic1[peak_chr],3,6,peak_summit,low,high)#1 and 2 denote the positions in the list for txStart and cdsEnd\n if gene_pos==\"not found!\":\n unannotated_peaks.write(line)\n a=a+1\n else:\n peak.append(dic1[peak_chr][gene_pos][-1])\n new_peak='\\t'.join(peak)+'\\n'\n annotated_peaks.write(new_peak)\n b=b+1\n \n print \"unannotated peaks: %s\\t and annotated peaks: %s\\n\" %(a,b)\n macs_file.close()\n gene_file.close()\n annotated_peaks.close()\n unannotated_peaks.close()", "title": "" }, { "docid": "06e2a0b007927a14887adcd8904811be", "score": "0.4317422", "text": "def _get_alignment_result(self, fkp, row, col, reference, output):\n\n alignment_result = AlignmentResult()\n count_for_output = 0\n reach_first_cell = False\n while not reach_first_cell:\n # we will only stop when it is first cell\n reach_first_cell, row, col, count_for_output = self._get_me_the_result_by_looping_through_each_col(\n fkp, row, col, count_for_output, reference, output, alignment_result\n )\n\n alignment_result.merge_none_tokens()\n return alignment_result", "title": "" }, { "docid": "2e12e366846d985f622c6904a2250995", "score": "0.43162665", "text": "async def align(self, alignment = None):\n\t\tif alignment is None:\n\t\t\tif self.reader.sysinfo.ProcessorArchitecture == PROCESSOR_ARCHITECTURE.AMD64:\n\t\t\t\talignment = 8\n\t\t\telse:\n\t\t\t\talignment = 4\n\t\toffset = self.current_position % alignment\n\t\tif offset == 0:\n\t\t\treturn\n\t\toffset_to_aligned = (alignment - offset) % alignment\n\t\tawait self.seek(offset_to_aligned, 1)\n\t\treturn", "title": "" }, { "docid": "51fca61cdff1c4352be24f8e867db2fd", "score": "0.43139875", "text": "def __mark_new_columns_with_zeros_in_marked_rows(self):\n num_marked_columns = 0\n for index, column in enumerate(self._zero_locations.T):\n if index not in self._marked_columns:\n if column.any():\n row_indices, = np.where(column)\n zeros_in_marked_rows = (set(self._marked_rows) & set(row_indices)) != set([])\n if zeros_in_marked_rows:\n self._marked_columns.append(index)\n num_marked_columns += 1\n return num_marked_columns", "title": "" }, { "docid": "c7f3b5dec13f760f0c240b8acdad2ab4", "score": "0.43103832", "text": "def centroid_align(stars):\n center = int(stars[0].shape[0] / 2), int(stars[0].shape[1] / 2)\n aligned = []\n\n for star in stars:\n centroid = calculate_centroid(star)\n shift = np.subtract(center, centroid)\n star = fourier_shift(np.fft.fftn(star), shift)\n star = np.fft.ifftn(star).real\n aligned.append(star)\n\n return aligned", "title": "" }, { "docid": "90862dac95aa092d1af86bcb2c7d8a14", "score": "0.43101203", "text": "def __generateArtifactualLinkageMatrix(self, corrOnly=False):\n\n\t\tdef find_similar_peakwidth(featureMetadata, deltaMZ, deltaOverlap):\n\t\t\t\"\"\"Find 'identical' features based on m/z and peakwidth overlap\n\t\t\t\tinput:\n\t\t\t\t\tfeatureMetada msDataset.featureMetadata\n\t\t\t\t\tdeltaMZ\t\t m/z distance to consider two features identical [ <= ] (same unit as m/z)\n\t\t\t\t\tdelta overlap minimum peak overlap between two grouped features (0-100%)\n\t\t\t\toutput:\n\t\t\t\t\tpandas.DataFrame listing matched features based on deltaMZ and deltaOverlap\n\t\t\t\"\"\"\n\n\t\t\tdef get_match(i, ds, deltaMZ):\n\t\t\t\t\"\"\"Find identical features for a given variable\n\t\t\t\t\toutput:\n\t\t\t\t\t\tpandas.DataFrames listing the matching features based overlap of peakwidth\n\t\t\t\t\"\"\"\n\t\t\t\tmatch = (abs(ds.loc[i, 'Retention Time'] - ds.loc[:, 'Retention Time']) <= (\n\t\t\t\t\t\t\tds.loc[i, 'Peak Width'] + ds.loc[:, 'Peak Width']) / 2) & (\n\t\t\t\t\t\t\t\t\tabs(ds.loc[i, 'm/z'] - ds.loc[:, 'm/z']) <= deltaMZ) # find match\n\t\t\t\treturn (pandas.DataFrame(data={'node1': ds.index[i], 'node2': ds.index[match], 'Peak Overlap': ((((\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ds.loc[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t i, 'Peak Width'] +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ds.loc[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t match, 'Peak Width']) / 2) - abs(\n\t\t\t\t\tds.loc[i, 'Retention Time'] - ds.loc[match, 'Retention Time'])) / ((ds.loc[i, 'Peak Width'] +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tds.loc[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmatch, 'Peak Width']) / 2)) * 100})) # return the matching rows\n\n\t\t\t# get feature overlap\n\t\t\tds = featureMetadata[['Feature Name', 'Retention Time', 'm/z', 'Peak Width']]\n\t\t\t# By concatenating all the matches once, save ~22% compared to do it at each loop round\n\t\t\tmatches = [get_match(i, ds, deltaMZ) for i in range(ds.shape[0])] # get a list of matches\n\t\t\tres = pandas.concat(matches)\n\t\t\tres = res.loc[res.node1 < res.node2] # keeps feat1-feat2, removes feat1-feat1 and feat2-feat1\n\n\t\t\t# filter interactions by overlap\n\t\t\tres = res.loc[res.loc[:, 'Peak Overlap'] >= deltaOverlap, ['node1', 'node2']]\n\t\t\tres.reset_index(drop=True, inplace=True)\n\n\t\t\treturn (res)\n\n\t\t# end find_similar_peakwidth\n\n\t\tdef remove_min_corr_overlap(overlappingFeatures, intensityData, corrCutoff):\n\t\t\t\"\"\" Return the overlap match DataFrame with overlap of metabolites correlated < cut-off removed (and correlation added)\n\t\t\t\tinput:\n\t\t\t\t\toverlappingFeatures pandas.DataFrame as generated by find_similar_peakwidth\n\t\t\t\t\tintensityData\t pandas.DataFrame of data value for each sample (row) / feature (column)\n\t\t\t\t\tcorrCutoff\t\t minimum percentage of overlap (0-1)\n\t\t\t\toutput:\n\t\t\t\t\toverlapping features filtered\n\t\t\t\"\"\"\n\t\t\tlink_corr = numpy.zeros([overlappingFeatures.shape[0]])\n\t\t\tfor jrow in range(0, len(link_corr)):\n\t\t\t\tlink_corr[jrow] = numpy.corrcoef(intensityData[:, overlappingFeatures.loc[jrow, 'node1']],\n\t\t\t\t\t\t\t\t\t\t\t\t intensityData[:, overlappingFeatures.loc[jrow, 'node2']])[0, 1]\n\n\t\t\treturn (overlappingFeatures.loc[link_corr >= corrCutoff,])\n\n\t\t# end remove_min_corr_overlap\n\n\t\t# check required info in featureMetadata for artifactual filtering. If missing, sets self.Attributes['artifactualFilter'] to False\n\t\tif self.Attributes['featureFilters']['artifactualFilter'] == False:\n\t\t\traise ValueError(\n\t\t\t\t'Attributes[\\'artifactualFilter\\'] set to \\'False\\', artifactual filtering cannot be run, use \\'updateMasks(withArtifactualFiltering=False)\\' and \\'generateReport(data, reportType=\\'feature selection\\', withArtifactualFiltering=False)\\'')\n\t\tif 'Feature Name' not in self.featureMetadata.columns:\n\t\t\tself.Attributes['featureFilters']['artifactualFilter'] = False\n\t\t\traise LookupError(\n\t\t\t\t'Missing feature metadata \\\"Feature Name\\\". Artifactual filtering cannot be run, set MSDataset.Attributes[\\'artifactualFilter\\'] = \\'False\\', or use \\'updateMasks(withArtifactualFiltering=False)\\' and \\'generateReport(data, reportType=\\'feature selection\\', withArtifactualFiltering=False)\\'')\n\t\tif 'Retention Time' not in self.featureMetadata.columns:\n\t\t\tself.Attributes['featureFilters']['artifactualFilter'] = False\n\t\t\traise LookupError(\n\t\t\t\t'Missing feature metadata \\\"Retention Time\\\". Artifactual filtering cannot be run, set MSDataset.Attributes[\\'artifactualFilter\\'] = \\'False\\', or use \\'updateMasks(withArtifactualFiltering=False)\\' and \\'generateReport(data, reportType=\\'feature selection\\', withArtifactualFiltering=False)\\'')\n\t\tif 'm/z' not in self.featureMetadata.columns:\n\t\t\tself.Attributes['featureFilters']['artifactualFilter'] = False\n\t\t\traise LookupError(\n\t\t\t\t'Missing feature metadata \\\"m/z\\\". Artifactual filtering cannot be run, set MSDataset.Attributes[\\'artifactualFilter\\'] = \\'False\\', or use \\'updateMasks(withArtifactualFiltering=False)\\' and \\'generateReport(data, reportType=\\'feature selection\\', withArtifactualFiltering=False)\\'')\n\t\tif 'Peak Width' not in self.featureMetadata.columns:\n\t\t\tself.Attributes['featureFilters']['artifactualFilter'] = False\n\t\t\traise LookupError(\n\t\t\t\t'Missing feature metadata \\\"Peak Width\\\". Artifactual filtering cannot be run, set MSDataset.Attributes[\\'artifactualFilter\\'] = \\'False\\', or use \\'updateMasks(withArtifactualFiltering=False)\\' and \\'generateReport(data, reportType=\\'feature selection\\', withArtifactualFiltering=False)\\'')\n\n\t\tif ((not corrOnly) | (corrOnly & self._tempArtifactualLinkageMatrix.empty)):\n\t\t\tself._tempArtifactualLinkageMatrix = find_similar_peakwidth(featureMetadata=self.featureMetadata,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdeltaMZ=self.Attributes['filterParameters'][\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'deltaMzArtifactual'], deltaOverlap=\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.Attributes['filterParameters'][\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'overlapThresholdArtifactual'])\n\t\tartifactualLinkageMatrix = remove_min_corr_overlap(self._tempArtifactualLinkageMatrix, self._intensityData,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.Attributes['filterParameters'][\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'corrThresholdArtifactual'])\n\n\t\treturn (artifactualLinkageMatrix)", "title": "" }, { "docid": "8426a4c876d885b4757e926f40783a44", "score": "0.43041614", "text": "def get_alignments(self, species, start, stop, strand):\n files = dict()\n visited = set()\n query_size = stop - start\n idx = 0\n for iv in self.tree[species][start: stop]:\n iv_start, iv_end = iv.begin, iv.end\n if (iv_start, iv_end) not in visited:\n visited.add((iv_start, iv_end))\n block_start, block_stop, pathfile = iv.data\n # store all blocks related to a file\n #order.append((iv_start, iv_end, idx))\n files.setdefault(pathfile, list()).append((block_start, block_stop, idx))\n idx += 1\n # because segments can be in multiple files and are not necessary in the right order \n # we remember the segment positions to reorder the alignment at the end\n alignments = dict()\n ordered_alignments = list()\n order = list()\n if DEBUG:\n print(files)\n for pathfile in files:\n # final positions\n positions = sorted(files[pathfile])\n prev_pos = 0\n prev_stop = -1\n with open(pathfile) as handle:\n for file_pos_start, file_pos_stop, file_pos_idx in positions:\n # reading blocks\n assert(file_pos_start > prev_stop) # block cannot overlap, can they?\n file_pos_size = file_pos_stop - file_pos_start\n # we remove prev because we only move of the number of lines between the two blocks\n line = next(islice(handle, file_pos_start - 1 - prev_pos , file_pos_start - prev_pos)) \n align = next(MafIO.MafIterator(handle))\n # look for the sequence of the species that we are interested in\n found = False\n for record in align:\n if record.id == species:\n found = True\n break\n # only keep part of the alignment that we are interest of\n if found:\n start_seq = record.annotations[\"start\"]\n size_seq = record.annotations[\"size\"] \n stop_seq = start_seq + size_seq\n strand_seq = int(record.annotations[\"strand\"])\n srcSize_seq = record.annotations[\"srcSize\"]\n seq = record.seq\n msa_len = len(seq)\n \n qstart = start\n qstop = stop\n if strand_seq < 0:\n # convert query positions\n qstart = srcSize_seq - stop\n #qstop = srcSize_seq - tmp\n qstop = srcSize_seq - start\n # from this point everything is on the plus strand\n max_start = max(start_seq, qstart)\n max_stop = min(stop_seq, qstop)\n strand_p_start = max_start\n strand_p_stop = max_stop\n if strand_seq < 0:\n strand_p_start, strand_p_stop = srcSize_seq - strand_p_stop, srcSize_seq - strand_p_start\n\n abs_start = max_start - start_seq\n abs_stop = max_stop - start_seq\n\n # get new start of alignment \n msa_start, msa_stop, msa_size = seq2msa_startstop(str(seq), abs_start, abs_stop)\n \n if DEBUG:\n print(strand_seq, strand, file_pos_start, file_pos_stop, pathfile)\n print(start_seq, size_seq, srcSize_seq, qstart, qstop, max_start, max_stop, abs_start, abs_stop)\n print(msa_start, msa_stop)\n #print(\">\", record.seq[msa_start: msa_stop])\n #print(\" \", seq[msa_start: msa_stop])\n\n alignments[file_pos_idx] = (align[:, msa_start: msa_stop], strand_seq)\n order.append((strand_p_start, strand_p_stop, file_pos_idx))\n else:\n print(\"Unable to find SeqRecord for species {} in alignment:\".format(species))\n print(align)\n #file_pos_size = number of lines between two blocks\n prev_pos = file_pos_start + file_pos_size \n prev_stop = file_pos_stop\n # order segments and convert to the correct strand if necessary\n if DEBUG:\n print(order)\n order.sort()\n if DEBUG:\n print(order)\n if strand < 0:\n order = order[::-1]\n if DEBUG:\n print(order)\n for iv_start, iv_stop, i in order:\n align, strand_seq = alignments[i]\n if strand < 0:\n if strand_seq < 0:\n ordered_alignments.append(align)\n else:\n ordered_alignments.append(self._reverse_msa(align)) \n else:\n if strand_seq < 0:\n ordered_alignments.append(self._reverse_msa(align))\n else:\n ordered_alignments.append(align)\n return ordered_alignments", "title": "" }, { "docid": "4e0d3137c993e6e3f670d890450d0953", "score": "0.42939565", "text": "def feed_cells_island(self):\n for landscape in self.map.values():\n landscape.feed_all()", "title": "" }, { "docid": "0cfb6dc63dab0d8384f256528d781f48", "score": "0.4291958", "text": "def iterstraight(self, raw):\n\n # length of row, in bytes\n rb = self.row_bytes\n a = array('B')\n # The previous (reconstructed) scanline. None indicates first\n # line of image.\n recon = None\n for some in raw:\n a.extend(some)\n while len(a) >= rb + 1:\n filter_type = a[0]\n scanline = a[1:rb+1]\n del a[:rb+1]\n recon = self.undo_filter(filter_type, scanline, recon)\n yield recon\n if len(a) != 0:\n # :file:format We get here with a file format error:\n # when the available bytes (after decompressing) do not\n # pack into exact rows.\n raise FormatError(\n 'Wrong size for decompressed IDAT chunk.')\n assert len(a) == 0", "title": "" }, { "docid": "59a5bac148b6aa49b755f730da69144f", "score": "0.4291224", "text": "def align_seqs(seq1, seq2, map_qual):\n\n assert len(seq1) == len(seq2) == len(map_qual)\n \n mismatches = []\n for i in range(len(seq1)):\n if seq1[i] == seq2[i]:\n continue\n else:\n if map_qual[i] >= 30:\n mms = [seq1[i], seq2[i], i]\n mismatches.append(mms)\n\n return mismatches", "title": "" }, { "docid": "f861514d7511d76836992e79d9755b20", "score": "0.42891642", "text": "def flag_cells(self):\n for coordinate in self.get_coordinates():\n\n for cell in self.get_surrounding_cells(coordinate):\n if self.board[cell[0]][cell[1]] == '*':\n continue\n\n self.board[cell[0]][cell[1]] += 1", "title": "" }, { "docid": "d05207a804ff9a2d363afcfb36887169", "score": "0.42879254", "text": "def _iterate_groups(self):\n for self.groupID, g in enumerate(self.u_groups):\n self.df_g = self.df.loc[self.groups == g]\n if len(self.df_g) < self.min_grp_size:\n continue\n self.df_g.reset_index(inplace=True)\n self._pair_group()", "title": "" }, { "docid": "feecae6fe6b05b2e713df5d18a70f211", "score": "0.42837226", "text": "def iterate(Primate1, Primate2, side, Primate_inter=None):\n\n individuals = [i for i in os.listdir(Primate2) if '.' not in i]\n\n print('reading affine transformations')\n\n dir = Primate1 + '_to_' + Primate2\n if Primate_inter is not None:\n dir = Primate1 + '_to_' + Primate2 + '_via' + Primate_inter\n\n affine_model = os.path.join(dir, 'affine_trans_' + dir + '_' + side + '.txt')\n int_lon, int_lat, lon_transform, lat_transform = read_affine(affine_model)\n\n for ind in individuals:\n\n nameLon = os.path.join(Primate2, ind, ind + '_' + side + 'white_lon.gii')\n nameLat = os.path.join(Primate2, ind, ind + '_' + side + 'white_lat.gii')\n\n # print('reading coordinates')\n\n r = aims.Reader()\n texLatF = r.read(nameLat)\n texLonF = r.read(nameLon)\n texLat = np.array(texLatF[0])\n texLon = np.array(texLonF[0])\n\n # print('processing longitude')\n\n newLon = rescale(texLon, lon_transform, int_lon)\n\n # print('processing latitude')\n\n newLat = rescale(texLat, lat_transform, int_lat)\n\n # print('writing textures')\n\n nv = texLat.size\n newLatT = aims.TimeTexture_FLOAT(1, nv)\n newLonT = aims.TimeTexture_FLOAT(1, nv)\n\n for i in range(nv):\n newLatT[0][i] = newLat[i]\n newLonT[0][i] = newLon[i]\n\n outLat = os.path.join(dir, Primate1 + '_' + side + 'white_lat_to' + ind + '.gii')\n outLon = os.path.join(dir, Primate1 + '_' + side + 'white_lon_to' + ind + '.gii')\n\n r = aims.Writer()\n r.write(newLatT, outLat)\n r.write(newLonT, outLon)\n\n print('done')", "title": "" }, { "docid": "0ef576af79368e892d5af7bc09fc1b04", "score": "0.42796093", "text": "def extract(self, x, y, ys):\n#as the input function in dcpg_data.py, we get to know that:\n# x = chunk_pos; x.shape = (32768,)\n# y = cpg_table.pos.values; y.shape = (3971744,)\n#ys = cpg_table.value.values; ys.shape = (3971744,) This is the original input data information\n\n n = len(x) #target length\n m = len(y) #provided length\n k = self.k\n kk = 2 * self.k\n yc = self.__larger_equal(x, y)\n knn_cpg = np.empty((n, kk), dtype=np.float16)\n knn_cpg.fill(np.nan)\n knn_dist = np.empty((n, kk), dtype=np.float32)\n knn_dist.fill(np.nan)\n\n for i in range(n):\n # Left side\n yl = yc[i] - k\n yr = yc[i] - 1\n if yr >= 0:\n xl = 0\n xr = k - 1\n if yl < 0:\n xl += np.abs(yl)\n yl = 0\n xr += 1\n yr += 1\n knn_cpg[i, xl:xr] = ys[yl:yr]\n knn_dist[i, xl:xr] = np.abs(y[yl:yr] - x[i])\n\n # Right side\n yl = yc[i]\n if yl >= m:\n continue\n if x[i] == y[yl]:\n yl += 1\n if yl >= m:\n continue\n yr = yl + k - 1\n xl = 0\n xr = k - 1\n if yr >= m:\n xr -= yr - m + 1\n yr = m - 1\n xl += k\n xr += k + 1\n yr += 1\n knn_cpg[i, xl:xr] = ys[yl:yr]\n knn_dist[i, xl:xr] = np.abs(y[yl:yr] - x[i])\n\n return (knn_cpg, knn_dist)", "title": "" }, { "docid": "bf4a100704a01a5955c235d4efdc6b5c", "score": "0.4274558", "text": "def _iter_break_from_left_to_right(self):\n yield IdentityTransform(), self", "title": "" }, { "docid": "ff8723c1912591b71c55e55e87d6b9b0", "score": "0.42716604", "text": "def chromosomesOffsets(self, *args):\n pass", "title": "" }, { "docid": "7e8faccaec9fc773f4ed89fbad97276b", "score": "0.4269407", "text": "def alignment_csv(out_file):\n writer = csv.DictWriter(out_file, (\n\n 'chadh_slug',\n 'bpo_pub_title',\n 'bpo_title',\n 'bpo_article_type',\n\n 'a_start',\n 'b_start',\n 'size',\n 'a_token_count',\n\n 'a_prefix',\n 'a_snippet',\n 'a_suffix',\n\n 'b_prefix',\n 'b_snippet',\n 'b_suffix',\n ))\n\n writer.writeheader()\n\n alignments = (\n session\n .query(Alignment)\n .join(ChadhNovel, BPOArticle)\n .filter(Alignment.size >= 5)\n .yield_per(1000)\n )\n\n counts = {}\n\n for i, a in enumerate(alignments):\n\n slug = a.chadh_novel.slug\n\n if slug not in counts:\n text = Text(a.chadh_novel.text)\n counts[slug] = len(text.tokens)\n\n writer.writerow(dict(\n\n chadh_slug=a.chadh_novel.slug,\n bpo_pub_title=a.bpo_article.publication_title,\n bpo_title=a.bpo_article.record_title,\n bpo_article_type=a.bpo_article.object_type,\n\n a_start=a.a_start,\n b_start=a.b_start,\n size=a.size,\n a_token_count=counts[slug],\n\n a_prefix=a.a_prefix,\n a_snippet=a.a_snippet,\n a_suffix=a.a_suffix,\n\n b_prefix=a.b_prefix,\n b_snippet=a.b_snippet,\n b_suffix=a.b_suffix,\n\n ))\n\n if i % 1000 == 0:\n print(i)", "title": "" }, { "docid": "30f231716666c378e0ce3209b73f8768", "score": "0.42603087", "text": "def get_aligned_pairs(self, refset, targetset, unique=True, use_distance=True):\n global_mat, global_matched = self.align(refset, targetset, get_matrix=use_distance)\n for pair in iter_aligned_pairs(refset, targetset, global_mat, global_matched, unique):\n self.pairs_found += 1\n yield pair\n self.log_infos()", "title": "" }, { "docid": "bf685e297261c0138f9adc84a545b657", "score": "0.42578217", "text": "def _map_segment_pairs_inclined(\n self, i_pair, j_pair, k_pair, borehole_to_borehole):\n i_segment = np.concatenate(\n [i_pair + self._i0Segments[i] for (i, j) in borehole_to_borehole])\n j_segment = np.concatenate(\n [j_pair + self._i0Segments[j] for (i, j) in borehole_to_borehole])\n k_segment = np.tile(k_pair, len(borehole_to_borehole))\n return i_segment, j_segment, k_segment", "title": "" }, { "docid": "efc02674c6ec094fd8de81a0768b562c", "score": "0.42540073", "text": "def _iter_values(self):\n\n return zip([self._none_slices_except_domain_axis], [self.magnitude])", "title": "" }, { "docid": "9fafddce7d4a9691252df8f62f0057a0", "score": "0.4251796", "text": "def extract_alignments(self, alignment_output_file = None):\n\n # set the input file if not given\n if not alignment_output_file:\n alignment_output_file = self.alignment_output_file\n alignments = extract_alignments(alignment_output_file)\n self.protein_alignments = alignments", "title": "" }, { "docid": "df79407572c8571b2ec2c997b9dc877a", "score": "0.42514947", "text": "def tilegen(self):\n for row in self.cells:\n for col in row:\n yield col", "title": "" }, { "docid": "e70efc72557e75fbf8264af798d1a6b7", "score": "0.42489278", "text": "def _align(data, index_align=0, channel_offsets=None):\n\n # shapes might be different if stitched with different configs\n # keep shape consistent with DO\n\n shape = data[0].shape\n data = lasagna.utils.pile(data)\n data = data[..., :shape[-2], :shape[-1]]\n\n indices = range(len(data))\n indices.pop(index_align)\n indices_fwd = [index_align] + indices\n indices_rev = np.argsort(indices_fwd)\n aligned = lasagna.process.register_and_offset(data[indices_fwd], registration_images=data[indices_fwd,0])\n aligned = aligned[indices_rev]\n if channel_offsets:\n aligned = fix_channel_offsets(aligned, channel_offsets)\n\n return aligned", "title": "" }, { "docid": "e66dc91350c5e4d3f7cb8d857261da95", "score": "0.42437807", "text": "def inner_mask(self):\n for p in flex.nested_loop(self._grid_size):\n if self._inner_mask_binary[self._grid_idxr(p)]: yield p", "title": "" }, { "docid": "fe5299cf8473c10706cf8839d4cbf644", "score": "0.4241394", "text": "def outer_mask(self):\n for p in flex.nested_loop(self._grid_size):\n if self._outer_mask_binary[self._grid_idxr(p)]: yield p", "title": "" }, { "docid": "ec14f4d10c412f5629940f9e71a84489", "score": "0.4238155", "text": "def split_alignment(self):\n aln_list = []\n for name, start, stop in self._linspace.to_list():\n aln = self.get_sites(list(range(start, stop)))\n aln.name = name\n aln._linspace = self._subspaces[name]\n aln.metadata = deepcopy(self.metadata)\n\n aln_list.append(aln)\n return aln_list", "title": "" }, { "docid": "9891a7038593cfc7c1c20f03ad6a5d17", "score": "0.42331967", "text": "def data_iterator(self, data_info, params, shuffle=False):\n\n collate_fn = lambda d: collate_libri(d, self.mfcc, params, self.word2Idx)\n\n # make a list that decides the order in which we go over the data- this avoids explicit shuffling of data\n data_loader = tdata.DataLoader(self.datasets[data_info['type']], batch_size=params.batch_size, shuffle=shuffle, collate_fn=collate_fn)\n\n # one pass over data\n for data in enumerate(data_loader):\n # fetch waveforms and keywords\n\n yield data[1][0], data[1][1]", "title": "" } ]
db08e1177f874ec5a2073ffa3d3b937c
Parse swiftrecon output into list of lists grouped by the content of the delimited blocks.
[ { "docid": "c075bcdf0c76921e47a2d0a4551ff995", "score": "0.71614885", "text": "def parse_swift_recon(recon_out):\n\n lines = recon_out.splitlines()\n delimiter_regex = re.compile(r'^={79}')\n collection = []\n\n delimiter_positions = [ind for ind, x in enumerate(lines)\n if delimiter_regex.match(x)]\n\n for ind, delimiter_position in enumerate(delimiter_positions):\n if ind != len(delimiter_positions) - 1: # Are in the last position?\n start = delimiter_position + 1\n end = delimiter_positions[ind + 1]\n collection.append(lines[start:end])\n return collection", "title": "" } ]
[ { "docid": "2d0bf5d3c0c116e3052fefa8030ca8ba", "score": "0.65458053", "text": "def processBlock(block):\n out = []\n a = \"\"\n for i in block:\n for j in i:\n if a == \"\":\n a = j\n else:\n a = a + \"|\" + j\n\n out = a.split(\"|\")\n return out", "title": "" }, { "docid": "8def8263cd4fa650d980b83ea61f310c", "score": "0.6182205", "text": "def parse(\n data: str,\n raw: bool = False,\n quiet: bool = False\n) -> List[Dict]:\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output: List = []\n\n if jc.utils.has_data(data):\n\n for line in filter(None, data.splitlines()):\n\n buddy_list = line.split()\n\n raw_output.append(\n {\n 'node': buddy_list[1][:-1],\n 'zone': buddy_list[3],\n 'free_chunks': buddy_list[4:]\n }\n )\n\n return raw_output if raw else _process(raw_output)", "title": "" }, { "docid": "af33e7d182eb59a5bd6714d2604d1e18", "score": "0.6090104", "text": "def _parse(self):\n res = []\n if not isfile(self.path):\n return res\n sep_pat = re.compile(r'==>\\s*(.+?)\\s*<==')\n with open(self.path, 'r') as f:\n lines = f.read().splitlines()\n for line in lines:\n line = line.strip()\n if not line:\n continue\n m = sep_pat.match(line)\n if m:\n res.append((m.group(1), set(), []))\n elif line.startswith('#'):\n res[-1][2].append(line)\n else:\n res[-1][1].add(line)\n return res", "title": "" }, { "docid": "d4390d0cebd4ce2ddb2035b692d393f1", "score": "0.60860026", "text": "def parse_conll(stream, filename=None, separator='\\t', is_empty=_line_is_empty):\n\n li, l = 0, None\n try:\n blocks = []\n current_block = []\n for l in stream:\n l = l.rstrip()\n li += 1\n if is_empty(l):\n blocks.append(current_block)\n current_block = []\n else:\n current_block.append(l.split(separator))\n except Exception:\n # whatever goes wrong\n raise ParseError(l, li)\n\n return blocks", "title": "" }, { "docid": "92530a8d4449d64bea3b27da912f8706", "score": "0.6025703", "text": "def loop_block_to_list(block):\n\n names = [l for l in block[\"lines\"] if l.startswith(\"_\" + block[\"category\"])]\n lines = [l for l in block[\"lines\"][1:] if l not in names]\n names = [l.split(\".\")[1].split()[0] for l in names]\n lines = [split_values(l) for l in lines]\n l = []\n for n in range(len(lines) - 1):\n while n < len(lines) - 1 and\\\n len(lines[n]) + len(lines[n + 1]) <= len(names):\n lines[n] += lines[n + 1]\n lines.pop(n + 1)\n for line in lines:\n l.append({\n name: value for name, value in zip(names, line)\n })\n return l", "title": "" }, { "docid": "f419dd927d7d914b03abc1d95f393f0d", "score": "0.5958842", "text": "def process_output(output):\n return [line.strip('^$').split('/') for line in output.split('\\n')]", "title": "" }, { "docid": "774b21217cb9181150d8ba1a91159f03", "score": "0.59145683", "text": "def _extract_multi_block_from_log(log_content, block_name):\n # Parse out the portion we need\n data_started = False\n data_content = []\n data_block_list = []\n model_names = []\n\n for line in log_content.split('\\n'):\n if line.startswith(\"SIMULTANEOUS\"):\n clean_str = line.replace(\"SIMULTANEOUS \", \"\")\n model_names = json.loads(clean_str)\n if line.startswith(\"%s_START\" % block_name):\n data_started = True\n elif line.startswith(\"%s_END\" % block_name):\n data_started = False\n if len(data_content) > 0:\n data_path = ''\n if len(model_names) > len(data_block_list):\n data_path = model_names[len(data_block_list)]\n data_block_list.append([data_path, '\\n'.join(data_content)])\n data_content = []\n elif data_started is True:\n data_content.append(line)\n return data_block_list", "title": "" }, { "docid": "f3ccf5aa0e22f92fa37ec8565dac1e33", "score": "0.5887196", "text": "def parse(self, text):\r\n return []", "title": "" }, { "docid": "958fecf1816c8c467d193103d247d28f", "score": "0.58773464", "text": "def parse_list(content):\n csv_list = []\n # Convert into a list of strings\n unicode = content.decode()\n content_list = unicode.split('\\n')\n state = 0\n for line in content_list:\n # print(\"state, line:\", state, line)\n if state == 0:\n # Look for first <PRE>\n if line.startswith('<PRE>'):\n state = 1\n elif state == 1:\n # Look for header\n if line.startswith('No., Day,DD,MMM,YYYY,'):\n csv_list.append(line.split(','))\n state = 2\n elif state == 2:\n # Process rows until blank line\n if line:\n csv_list.append(line.split(','))\n else:\n # Processed all rows so quit\n break\n else:\n continue\n return csv_list", "title": "" }, { "docid": "0d368548f977d370eb3c3c8575391f6a", "score": "0.5766159", "text": "def separate_records(listofdata):\n\n\troottag = re.compile(r'rootTag')\n\trtlist=[]\n\tfor i,v in enumerate(listofdata):\n \t\tif re.match(roottag, str(v)):\n \trtlist.append(i)\n\n\tlistofrecords = []\t\n\tind = 1\n\tfor r in rtlist:\n \t\tsingle_record = listofdata[r:rtlist[ind+1]]\n \t\tlistofrecords.append(single_record)\n \t\tif ind < len(rtlist)-2:\n \t\tind += 1\n \t\telse:\n \t\tbreak\n \n\treturn listofrecords", "title": "" }, { "docid": "45df3fd00d8018255d01ac47213efaed", "score": "0.5765179", "text": "def __process_gorn_list(self, s):\n if not s:\n return []\n parts = re.split(r\"\\s*;\\s*\", s)\n seqs = list(map((lambda x : list(map(int, re.split(r\"\\s*,\\s*\", x)))), parts))\n return seqs", "title": "" }, { "docid": "598f7244152fa5e10763c42a012048a4", "score": "0.57588154", "text": "def rnaforester_parser(lines):\n \n result = []\n \n for block in cluster_parser(lines):\n for struct in line_parser(block):\n result.append(struct)\n\n return result", "title": "" }, { "docid": "c80e989565bfc91cb03e2063b40f99ca", "score": "0.57435733", "text": "def parse_mol(self,):\n f = open(self.molfile, 'r')\n flag = True\n blocks = []\n block = []\n \n while(flag):\n flag = False\n for i in f:\n flag = True\n if i.strip().startswith('step'):\n if len(block) == 0:\n # append step to list as the first element\n block.append(i.strip().split()[0][4:])\n else:\n blocks.append(block)\n block = []\n # append step to list as the first element\n block.append(i.strip().split()[0][4:])\n break\n # ignore the white lines\n elif len(i.strip()) < 1:\n pass\n else:\n block.append(i.strip())\n blocks.append(block)\n return blocks", "title": "" }, { "docid": "a756a118fe526c1185328529402c0ac8", "score": "0.5712363", "text": "def get_records(text_block):\n #TODO: Refactor all of these into a classmethod for Record, then\n # set arguments for start, end, find, and non-single-line behavior\n start = text_block.find('(cid:87)(cid:73)(cid:84)(cid:72)(cid:68)'\\\n '(cid:82)(cid:65)(cid:87)(cid:65)(cid:76)(cid:83)(cid:32)'\\\n '(cid:38)(cid:32)(cid:68)(cid:69)(cid:80)(cid:79)(cid:83)'\\\n '(cid:73)(cid:84)(cid:83) (cid:32)')\n end = text_block.find('NET WITHDRAWALS (cid:38) DEPOSITS')\n text_block = text_block[start:end]\n deposits = []\n text_block = text_block.split('\\n')\n \"\"\"\n for line in text_block:\n print line\n \"\"\"\n for i, record in enumerate(text_block):\n if record.find('Deposit ACH') != -1:\n if Deposit.is_single_line(record):\n deposits.append(Deposit.from_record(record))\n elif len(record.split(' ')) < 1:\n continue\n else:\n record = ''.join([text_block[i], text_block[i+1], \n ' ', text_block[i-1]])\n record = Deposit.clean(record)\n record = Deposit.from_record(record)\n deposits.append(record)\n return deposits", "title": "" }, { "docid": "9b2be6e27c1f5ef9bfd32111bd7a6763", "score": "0.5686293", "text": "def splitBlock(self, stdout):\n results = []\n for line in stdout.splitlines(False):\n # Intentionally skips empty lines.\n if not line:\n continue\n if not line.startswith('__'):\n if results:\n results[-1].append(line)\n else:\n # TODO(maruel): gclient's git stdout is inconsistent.\n # This should fail the test instead!!\n pass\n continue\n\n match = re.match(r'^________ ([a-z]+) \\'(.*)\\' in \\'(.*)\\'$', line)\n if match:\n results.append([[match.group(1), match.group(2), match.group(3)]])\n continue\n\n match = re.match(r'^_____ (.*) is missing, synching instead$', line)\n if match:\n # Blah, it's when a dependency is deleted, we should probably not\n # output this message.\n results.append([line])\n continue\n\n # These two regexps are a bit too broad, they are necessary only for git\n # checkouts.\n if (re.match(r'_____ [^ ]+ at [^ ]+', line) or\n re.match(r'_____ [^ ]+ : Attempting rebase onto [0-9a-f]+...', line)):\n continue\n\n # Fail for any unrecognized lines that start with '__'.\n self.fail(line)\n return results", "title": "" }, { "docid": "05bbe76893762bac7d11ced2dd9f6e92", "score": "0.5671271", "text": "def parse(data):\n data = LookAheadStringIter(data)\n result = list(parse_recursive(data))\n if data.ahead:\n raise ParseError('Inconsistent nesting of lists', data)\n return result", "title": "" }, { "docid": "95e2f9adfb7a6ed16c1277c2f274ead2", "score": "0.5658487", "text": "def __process_trees(self, s):\n if not s:\n return []\n tree_strs = s.split(\"|||\")\n return list(map(Tree.fromstring, tree_strs)) ## important fix TS", "title": "" }, { "docid": "d4bd9478a3915907d3ae940892744d02", "score": "0.56497824", "text": "def parse(lines):\n lines = [i.decode('utf-8').rstrip() for i in lines]\n lines = strip_footer(lines)\n\n out = parse_header_block(lines)\n out['subitems'] = parse_content(lines)\n out['multiple'] = len(out['subitems']) > 1\n\n return out", "title": "" }, { "docid": "668db40667e1fdc1677dfad9c130549c", "score": "0.56468004", "text": "def sent_split(inlines, abbrevs=None):\n\n outlines = []\n lcont = \"\"\n for l in inlines:\n # if this line is empty, continue to next\n if l.strip() == \"\":\n continue\n\n # protect any abbrevs by using a tag string and the abbrevs index\n if abbrevs is not None:\n for i, a in enumerate(abbrevs):\n\n # if a has a dot then I need to escape it\n if a.find(\".\") != -1:\n a = a.replace(\".\", r\"\\.\")\n\n ab_re = re.compile(r\"\\b\" + a)\n abtag = \"#ABTAG{}#\".format(i)\n ab_re.sub(abtag, l)\n\n # split the current l into groups\n parts = S_RE.split(l.strip())\n lparts = []\n for i, p in enumerate(parts):\n if i > 0 and S_RE.match(p):\n lparts[-1] += \" \" + p\n else:\n lparts.append(p)\n\n # see if there's more than one part\n if len(lparts) > 1: # multi part\n\n # append any lcont to the first\n lparts[0] = lcont + \" \" + lparts[0]\n lcont = \"\"\n\n # see if the last segment will continue to the next\n if not S_RE.match(l.strip()[-1]):\n outlines.extend(\n [p.strip() for p in lparts[0:-1] if p.strip() != \"\"]\n )\n lcont = lparts[-1]\n else:\n outlines.extend([p.strip() for p in lparts if p.strip() != \"\"])\n\n else: # just one part here\n # see if this line will continue to the next\n if not S_RE.match(l.strip()[-1]):\n lcont = lcont + \" \" + l.strip()\n else:\n if lcont + \" \" + l.strip() != \"\":\n outlines.append(lcont + \" \" + l.strip())\n\n # go through outlines and put back strings from abtags\n if abbrevs is not None:\n ABTAG_NUM_RE = re.compile(\"#ABTAG([0-9]+)#\")\n cleanlines = []\n for o in outlines:\n logging.info(\"preclean: \" + o, file=sys.stderr)\n while o.find(\"#ABTAG\") > -1:\n i = int(ABTAG_NUM_RE.search(o).group(1))\n o = (\n o[0:o.find(\"#ABTAG\")]\n + abbrevs[i]\n + \" \"\n + o[o.find(\"#ABTAG\") + 7 + len(str(i)):]\n )\n cleanlines.append(o)\n logging.info(\"cleaned: \" + o, file=sys.stderr)\n return cleanlines\n else:\n return outlines", "title": "" }, { "docid": "12e49697d72620307fdcd596f164f242", "score": "0.5633962", "text": "def line_parser(block):\n odd = True\n record = False\n first = True\n seq = ''\n con_seq = ''\n struct = ''\n alignment = {}\n for line in block:\n #find alignments\n if line.startswith('seq'):\n if line.__contains__(')') or line.__contains__('('):\n continue\n else:\n sline = line.strip().split()\n name = sline[0]\n tmp_seq = sline[-1]\n if alignment.__contains__(name):\n seq = alignment[name]\n seq = ''.join([seq,tmp_seq])\n alignment[name] = seq\n else:\n alignment[name] = tmp_seq\n\n if line.startswith('Consensus sequence/structure:'): #start\n record = True\n if not first:\n struct = to_pairs(struct)\n yield [alignment,con_seq,struct]\n result = []\n first = True\n elif record:\n if line.startswith(' '):\n line = line.strip()\n if odd:\n con_seq = ''.join([con_seq,line])\n odd = False\n else:\n struct = ''.join([struct,line])\n odd = True\n\n struct = to_pairs(struct)\n yield [alignment,con_seq,struct]", "title": "" }, { "docid": "a35f8fd6e3293a985ccb06e3ad6d7a9d", "score": "0.56085324", "text": "def parse(stream):\n content = stream.read()\n content = boneyard_re.sub('', content)\n lines = linebreak_re.split(content)\n del content\n return parse_lines(lines)", "title": "" }, { "docid": "ba107700108cb17b31c6891a10f1ef58", "score": "0.5605392", "text": "def parse(self):\n res = []\n if not isfile(self._log_path):\n return res\n sep_pat = re.compile(r'==>\\s*(.+?)\\s*<==')\n for line in open(self._log_path):\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n m = sep_pat.match(line)\n if m:\n dt = m.group(1)\n res.append((dt, set()))\n else:\n res[-1][1].add(line)\n return res", "title": "" }, { "docid": "a1fb83c031879994c6b2d6bcb5d1fd0e", "score": "0.56053007", "text": "def __process_span_list(self, s):\n if not s:\n return []\n parts = re.split(r\"\\s*;\\s*\", s)\n seqs = list(map((lambda x : list(map(int, re.split(r\"\\s*\\.\\.\\s*\", x)))), parts))\n return seqs", "title": "" }, { "docid": "66d32cef5a6efa05332fa6fbc15386af", "score": "0.5528002", "text": "def deduplicate_delimiters(output):\n i = 0\n while i < len(output) - 1:\n if isinstance(output[i], OutputDelimiter) and isinstance(output[i + 1], OutputDelimiter):\n if output[i].string.isspace() and not output[i + 1].string.isspace():\n output.pop(i)\n continue\n elif output[i + 1].string.isspace() and not output[i].string.isspace():\n output.pop(i + 1)\n continue\n elif len(output[i].string) < len(output[i + 1].string):\n output.pop(i)\n continue\n elif len(output[i].string) > len(output[i + 1].string):\n output.pop(i + 1)\n continue\n elif output[i].string.isspace():\n output.pop(i)\n continue\n i += 1\n # Strip leading block delimiters.\n while output and isinstance(output[0], OutputDelimiter) and output[0].string.isspace():\n output.pop(0)\n # Strip trailing block delimiters.\n while output and isinstance(output[-1], OutputDelimiter) and output[-1].string.isspace():\n output.pop(-1)", "title": "" }, { "docid": "03ee338cf906a6665c539bfa0f3b5247", "score": "0.54913294", "text": "def parse_input(puzzle_data):\n structured_puzzle = list()\n for line in puzzle_data.split(\"\\n\"):\n structured_puzzle.append(line.split(\" \"))\n\n return structured_puzzle", "title": "" }, { "docid": "d81c2c1c335d308e721854cf29f34fd3", "score": "0.5482219", "text": "def processor(main_dict: dict):\n main_dict[\"group_contents\"] = \\\n main_dict[\"group_contents\"].replace(\"}{\", \"}|{\")\n code_strings_list = main_dict[\"group_contents\"].split(\"|\")\n return code_strings_list", "title": "" }, { "docid": "8bf95ad25f0d0ee9a777781993e56751", "score": "0.5477923", "text": "def parsed_output(self):\n data = self.output_data\n args = [iter(data)] * 3\n return zip(*args)", "title": "" }, { "docid": "deb5694485fa5bd9c6244f01e931cd13", "score": "0.5441826", "text": "def parse_and_extract(conllu_path) -> List[Tuple[Inp, Out]]:\n data = []\n with open(conllu_path, \"r\", encoding=\"utf-8\") as data_file:\n for token_list in conllu.parse_incr(data_file): # type: ignore\n data.append(extract(token_list))\n return data", "title": "" }, { "docid": "cfa59e9fd55254f80a663c4e81fd6105", "score": "0.54314446", "text": "def split_raw_data(raw_data):\n return [\n line if line == \"\\n\"\n else line.split(\"\\t\")\n for line in raw_data\n ]", "title": "" }, { "docid": "fa17a259d594e9a93523c6950b560d5b", "score": "0.5430212", "text": "def parse_tokens(self, tokens):\n valid_tokens = self.get_valid_tokens(tokens)\n\n if len(valid_tokens) <= 0:\n return []\n\n segmented_tokens = []\n current_tokens = [valid_tokens[0]]\n\n for t in valid_tokens[1:]:\n # Tokens have consecutive indices\n # so same segment\n if current_tokens[-1][0] + 1 == t[0]:\n current_tokens.append(t)\n\n # start new segment\n else:\n segmented_tokens.append(current_tokens)\n current_tokens = [t]\n\n segmented_tokens.append(current_tokens)\n\n segments = []\n\n for token_group in segmented_tokens:\n text = ' '.join(t[3].strip() for t in token_group)\n start = token_group[0][1]\n end = token_group[-1][2]\n segments.append((start, end, text))\n\n return segments", "title": "" }, { "docid": "fb3a1e250dac457027cba191c45d0428", "score": "0.54013276", "text": "def unpack(self, data):\n return [m.split(self.separator) for m in data.split(\"\\n\")]", "title": "" }, { "docid": "b2e8a57fa800b26fccd0fac20913c753", "score": "0.5373687", "text": "def group_tokens(self, blocks_and_lines):\n\n def horizontal_distance_between(token1, token2):\n if token1.coordinates[\"x\"] > token2.coordinates[\"x\"]:\n return (\n token1.coordinates[\"x\"]\n - token2.coordinates[\"x\"]\n - token2.coordinates[\"width\"]\n )\n else:\n return (\n token2.coordinates[\"x\"]\n - token1.coordinates[\"x\"]\n - token1.coordinates[\"width\"]\n )\n\n def combine_tokens_into_one_token(token_list):\n x = y = float(\"inf\")\n bottom_right_x = bottom_right_y = 0\n text = \"\"\n\n for token in token_list:\n if not text:\n text = token.text\n else:\n text = text + \" \" + token.text\n\n x = min(token.coordinates[\"x\"], x)\n y = min(token.coordinates[\"y\"], y)\n bottom_right_x = max(\n token.coordinates[\"x\"] + token.coordinates[\"width\"], bottom_right_x\n )\n bottom_right_y = max(\n token.coordinates[\"y\"] + token.coordinates[\"height\"], bottom_right_y\n )\n\n return Token(\n text,\n {\n \"x\": x,\n \"y\": y,\n \"width\": bottom_right_x - x,\n \"height\": bottom_right_y - y,\n },\n \"NA\",\n {**token.token_structure, \"word_num\": \"Grouped\"},\n )\n\n grouped_tokens = []\n\n for block in blocks_and_lines:\n for line in blocks_and_lines[block]:\n current_line = blocks_and_lines[block][line]\n current_group = []\n ADJUSTMENT_FACTOR = 0.25\n\n # take note of all the IS CURRENCY code that is meant to prevent currency and amount from grouping together\n for token in current_line:\n IS_CURRENCY = token.text in currencies\n if current_group: # If there exists tokens in the current group\n height_of_current_group = max(\n list(\n map(\n lambda token: token.coordinates[\"height\"],\n current_group,\n )\n )\n )\n TOO_FAR = (\n horizontal_distance_between(token, current_group[-1])\n > height_of_current_group / 2 + ADJUSTMENT_FACTOR*height_of_current_group\n )\n LAST_TOKEN_ENDS_WITH_COLON = current_group[-1].text[-1] == \":\"\n ALIGNED_HORIZONTALLY = token.is_horizontally_aligned_with(\n current_group[-1]\n )\n\n # When something is a currency, independently add it to the grouped tokens\n if IS_CURRENCY: \n grouped_tokens.append(\n combine_tokens_into_one_token(current_group)\n )\n grouped_tokens.append(token)\n current_group = []\n \n # If something should not belong to the current group, group the current group and start a new group\n elif (\n TOO_FAR\n or LAST_TOKEN_ENDS_WITH_COLON\n or not ALIGNED_HORIZONTALLY\n ): # This token should not be combined into the current group\n grouped_tokens.append(\n combine_tokens_into_one_token(current_group)\n )\n current_group = [token] # Reset the current group\n \n # It meets the criteria for normal append-ment of the token\n else:\n current_group.append(token)\n\n else: # Start a new group, since current_group is empty. Unless it is a currency, in which case, append directly to grouped tokens\n if IS_CURRENCY:\n grouped_tokens.append(token)\n else:\n current_group.append(token)\n\n # After finishing a line, just combine remaining tokens\n if current_group:\n grouped_tokens.append(combine_tokens_into_one_token(current_group))\n\n return grouped_tokens", "title": "" }, { "docid": "b45b7b7fc5810eaa0db8e94970899300", "score": "0.5351421", "text": "def parse(lines):\n pass", "title": "" }, { "docid": "2c51139f008c7389bc18da4bd16104dc", "score": "0.5342632", "text": "def parse_input(puzzle_data):\n structured_puzzle = list()\n for line in puzzle_data.split(\"\\n\"):\n structured_puzzle.append(line)\n return structured_puzzle", "title": "" }, { "docid": "17c9edc50893b332ea89ea63e951cff3", "score": "0.53323346", "text": "def sub_group_lines(lines):\n groups, sub_group = [], []\n\n for line in lines:\n if len(line.rstrip()) > 0:\n sub_group.append(line.rstrip())\n else:\n groups.append(sub_group)\n sub_group = []\n\n if len(sub_group) > 0:\n groups.append(sub_group)\n\n return groups", "title": "" }, { "docid": "45ee9a3e15a7ad670150aad3097bd7a8", "score": "0.531893", "text": "def parse(self) -> list[list[str]]:\n cells = []\n row = []\n text = self.text\n while text:\n try:\n cell, cell_end, row_done = self.parser.parse(text)\n except ParseError as e:\n if self.debug:\n error = ParseError(f'Error in parse. Cannot match quoted key. '\n f'Text is {text!r}\\ncells length is {len(cells)}\\n'\n f'row length is {len(row)}')\n raise error from e\n else:\n raise\n for formatter in self.formatters:\n cell = formatter.format(cell)\n row.append(cell)\n if row_done:\n cells.append(row)\n row = []\n text = text[cell_end:]\n return cells", "title": "" }, { "docid": "9272d9a18bc118ec1ad1e92a102298e3", "score": "0.5317757", "text": "def parse_list(parser=lambda x: x, delim=' '):\n return chain(parse_text, lambda value: value.split(delim), map_(parser))", "title": "" }, { "docid": "856fc1de97d3e1f53e335b31e0bff69a", "score": "0.5311281", "text": "def parse_input():\n\n wires = []\n\n with open('input.txt', 'r') as txt:\n for segments in txt:\n wire = Wire()\n for segment in segments.strip().split(','):\n wire.update(segment)\n wires.append(wire)\n\n return wires", "title": "" }, { "docid": "800f3616a9e08a3b0c53ca49c0977acc", "score": "0.5310942", "text": "def parseoutput(infilepath, has_headers):\n # open up the file\n f = open(infilepath)\n # this is my way to handle some files having headers and some not\n if has_headers == True:\n # read off the headers \n x = f.readline()\n # for each row in infile strip off newlines and split\n mylist = [r.strip().split('|') for r in f]\n return mylist", "title": "" }, { "docid": "76c1777b899ac82f1cb5e47bb04b1e3b", "score": "0.53012073", "text": "def parse(self):\n self.data = []\n self.extract_sections()\n for section in self._parsing['sections']:\n self.data.append(self.parse_section(section))\n return self.data", "title": "" }, { "docid": "5863d062b5d4b82b4fe09de2ed7af561", "score": "0.52934253", "text": "def parse_input_output_string(self, data: str):\n ret = []\n for line in data.split(\"\\n\"):\n if line:\n if line[1] == \" \":\n prev = ret[-1]\n n = (prev[0], prev[1] +\"\\n \" + line.strip())\n ret[-1] = n\n # ret[-1][1] += line.strip()\n else:\n vd = line.split(\" \", 1)\n ret.append((vd[0].strip(),vd[1].strip())) \n \n return ret", "title": "" }, { "docid": "1412ca944e60c113c85eccd508f4bb05", "score": "0.527264", "text": "def read_list(data):\n assert data.next() == '('\n result = list(parse_recursive(data))\n if not data.ahead or data.next() != ')':\n raise ParseError('Unexpected end of list', data)\n return result", "title": "" }, { "docid": "5169299abeabf63bc9f110e62362caec", "score": "0.52621293", "text": "def _commands_output_to_list(self, output):\n output = [x.split('=') for x in output.split('\\n')]\n\treturn output", "title": "" }, { "docid": "216ff80ba234f5eb6bd94172bd7d3c79", "score": "0.52569085", "text": "def parse(lines):\n output = dict()\n\n for line in lines:\n [start, pipes] = line.split(\"<->\")\n output[int(start)] = [int(pipe) for pipe in pipes.split(\",\")]\n\n return output", "title": "" }, { "docid": "d74ca577f5420687b9f96e4463f92cf5", "score": "0.5242003", "text": "def splitter(data, vendors, EC):\n final_list = []\n split_list = data.split(\"\\n\")\n\n del split_list[len(split_list) - 1]\n\n for item in split_list:\n\n final_list.append(item.split(\";\"))\n\n level_sorter(final_list, vendors, EC)", "title": "" }, { "docid": "c25d139e0b710040e29fe5b512697763", "score": "0.52353776", "text": "def custom_parser(output: str, command: str, device_type: str, logger_poller) -> list:\n # Install pip install pyATS and genie to use the parser in Netmiko. Now is not necessary for 1 command to parse\n parsed_output = []\n if command == \"show vlan\" and device_type == \"cisco_ios\":\n output = output.split(\"\\n\")\n pattern = r\"^(\\d+)\\s+(\\S+)\\s+\\S+.*$\"\n for line in output:\n match = re.search(pattern, line)\n if match:\n parsed_output.append({\"vlan_id\": match.group(1), \"vlan_name\": match.group(2)})\n else:\n logger_poller.info(f\"Parsed not implemented for command {command} on device_type {device_type}\")\n return parsed_output", "title": "" }, { "docid": "181baa6ca3a82ad9e99cddb505f43718", "score": "0.52280134", "text": "def parse_colon_record_list(self, response, err):\n results = []\n records = response.split('\\n\\n')\n for record in records:\n obj = self.parse_colon_record(record.strip(), err)\n\n if obj == None:\n continue\n\n results.append(obj)\n\n return results", "title": "" }, { "docid": "7557d97104cf193f6c6a2362cb6115fe", "score": "0.5226994", "text": "def parse(content: str) -> List[Sentence]:\n return ConlluParserBuilder.build().parse(content)", "title": "" }, { "docid": "2cad2a8522b146e539fb3334ce2a59c5", "score": "0.52262414", "text": "def parse_input(filename=\"input.txt\"):\n with open(filename, \"r\") as f:\n text = f.read()\n # Parse the groups\n groups = text.split(\"\\n\\n\")\n answers = [group.split(\"\\n\") for group in groups]\n return answers", "title": "" }, { "docid": "dce2742ce3e7af2c87257e6737ec0875", "score": "0.5214416", "text": "def groups(self):\n groups = []\n with open(self.fname) as reader:\n for line in reader.readlines():\n m = re.match(r\"\\[\\[([^\\[\\]]+)\\|([^\\[\\]]+)\\]\\]\", line)\n if m:\n group = (m.group(1), m.group(2))\n groups.append(group)\n return groups", "title": "" }, { "docid": "be08c7ed23720dab2344331390742144", "score": "0.51933235", "text": "def items(self):\n self.reset()\n\n for l in diff_split_lines(self.unified_diff, False):\n self._bytes_processed += len(l) + 1\n m = re.match(r'^--- ([^\\s]*)', l)\n if m:\n yield from self.empty_buffer()\n continue\n m = re.match(r'^\\+\\+\\+ ([^\\s]*)', l)\n if m:\n yield from self.empty_buffer()\n continue\n\n m = re.match(r\"@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*)\", l)\n if m:\n yield from self.empty_buffer()\n hunk_data = map(lambda x: x == \"\" and 1 or int(x), m.groups())\n self.hunk_off1, self.hunk_size1, self.hunk_off2, self.hunk_size2 = (\n hunk_data\n )\n self.line1, self.line2 = self.hunk_off1, self.hunk_off2\n yield \"H\", (\n self.hunk_off1,\n self.hunk_size1,\n self.hunk_off2,\n self.hunk_size2,\n )\n continue\n\n if re.match(r'^\\[', l):\n yield from self.empty_buffer()\n yield \"C\", l\n\n if re.match(r\"^\\\\ No newline\", l):\n if self.hunk_size2 == 0:\n self.buf[-1] = (\n self.buf[-1][0],\n self.buf[-1][1] + '\\n' + l[2:],\n )\n else:\n self.buf[-1] = (\n self.buf[-1][0] + '\\n' + l[2:],\n self.buf[-1][1],\n )\n continue\n\n if self.hunk_size1 <= 0 and self.hunk_size2 <= 0:\n yield from self.empty_buffer()\n continue\n\n m = re.match(r\"^\\+\\[ (\\d+) lines removed \\]$\", l)\n if m:\n self.add_cpt += int(m.group(1))\n self.hunk_size2 -= int(m.group(1))\n self.buf.append((None, l[1:]))\n continue\n\n if re.match(r\"^\\+\", l):\n self.add_cpt += 1\n self.hunk_size2 -= 1\n self.buf.append((None, l[1:]))\n continue\n\n m = re.match(r\"^-\\[ (\\d+) lines removed \\]$\", l)\n if m:\n self.del_cpt += int(m.group(1))\n self.hunk_size1 -= int(m.group(1))\n self.buf.append((l[1:], None))\n continue\n\n if re.match(r\"^-\", l):\n self.del_cpt += 1\n self.hunk_size1 -= 1\n self.buf.append((l[1:], None))\n continue\n\n if re.match(r\"^ \", l) and self.hunk_size1 and self.hunk_size2:\n yield from self.empty_buffer()\n self.hunk_size1 -= 1\n self.hunk_size2 -= 1\n self.buf.append((l[1:], l[1:]))\n continue\n\n yield from self.empty_buffer()\n\n yield from self.empty_buffer()", "title": "" }, { "docid": "bac22f7e817cba31d52d11a36d797ff0", "score": "0.5186413", "text": "def parser(self, inp):\n file = open(inp)\n ecotypes = []\n for line in file:\n if \"Ecotype\" in line and \"[\" in line and \"]\" in line:\n start = False\n ecotype = []\n sequence = \"\"\n for char in line:\n if char == '[':\n start = True\n if start == True:\n if char != '[' and char != ',' and char != ' ' and char != ']':\n sequence += char\n elif char == ' ':\n ecotype.append(sequence)\n sequence = \"\"\n elif char == ']':\n ecotype.append(sequence)\n ecotypes.append(ecotype)\n sequence = \"\"\n break\n return ecotypes", "title": "" }, { "docid": "7f7ae34386eb4185f4611582cdad64b4", "score": "0.51828325", "text": "def get_items(self, block):\r\n items = []\r\n for line in block.split('\\n'):\r\n m = self.CHILD_RE.match(line)\r\n if m:\r\n # This is a new item. Append\r\n items.append(m.group(3))\r\n elif self.INDENT_RE.match(line):\r\n # This is an indented (possibly nested) item.\r\n if items[-1].startswith(' '*markdown.TAB_LENGTH):\r\n # Previous item was indented. Append to that item.\r\n items[-1] = '%s\\n%s' % (items[-1], line)\r\n else:\r\n items.append(line)\r\n else:\r\n # This is another line of previous item. Append to that item.\r\n items[-1] = '%s\\n%s' % (items[-1], line)\r\n return items", "title": "" }, { "docid": "5f6549a2bab8f7c1bff90efd65f43693", "score": "0.51791257", "text": "def parse(self, format_string):\n\n\t\treturn [\n\t\t\t(\n\t\t\t\tbefore,\n\t\t\t\tidentifiant,\n\t\t\t\tstr(\n\t\t\t\t\tlen(\n\t\t\t\t\t\tre.search('\\t*$', before).group(0)\n\t\t\t\t\t)\n\t\t\t\t) + '\\t' + (param if param is not None else ''),\n\t\t\t\tmodif,\n\t\t\t)\n\t\t\tfor before, identifiant, param, modif\n\t\t\tin super().parse(format_string)\n\t\t]", "title": "" }, { "docid": "580a98f211a45ad7955c8983c132bded", "score": "0.5173377", "text": "def chunk(lines):\n chunks = []\n chunk = []\n for lineno, line in lines:\n stripped = line.strip()\n if not stripped or stripped[0] == '#':\n if chunk:\n chunks.append(chunk)\n chunk = []\n else:\n chunk.append((lineno, stripped))\n\n if chunk:\n chunks.append(chunk)\n\n return chunks", "title": "" }, { "docid": "60f5a47b3205bb266f8db9785a65e9d2", "score": "0.5162477", "text": "def get_output_list(self, d):\n output_list = [] \n\n # base output = id | timestamp | body\n output_list.append( Field_id(d).value )\n output_list.append( Field_postedtime(d).value )\n output_list.append( Field_body(d).value )\n\n # urls \n if self.options_urls:\n #\n # TODO: add back this exception handling for -x option\n # https://github.com/DrSkippy/Gnacs/blob/16dd146fb05d02d7c1e3f282254e6718fd13303f/acscsv/twacscsv.py#L97 \n #\n # gnip \n val = Field_gnip_urls(d).value\n if isinstance(val, list): \n try:\n output_list.append( self.buildListString( [ x[\"expanded_url\"] for x in val ] ) ) \n except KeyError:\n output_list.append(Field_gnip_urls(d).default_value)\n else: \n output_list.append( val ) \n # twitter\n val = Field_twitter_entities_urls(d).value \n if isinstance(val, list):\n url_list = self.buildListString( [ x[\"url\"] for x in val ] ) \n exp_url_list = self.buildListString( [ x[\"expanded_url\"] for x in val ] ) \n else:\n url_list = val\n exp_url_list = val \n output_list.append( url_list ) \n output_list.append( exp_url_list ) \n \n # languages \n if self.options_lang:\n # actor\n # - this field has *very* infrequently contained unicode chars. drop them.\n output_list.append( Field_actor_language(d).value.encode('ascii', 'ignore') ) \n # classifications\n output_list.append( Field_gnip_language_value(d).value ) \n output_list.append( Field_twitter_lang(d).value ) \n \n # rules\n if self.options_rules:\n val = Field_gnip_rules(d).value \n if isinstance(val, list):\n # output: '[\" value (tag)\", ... ]'\n output_list.append( \n self.buildListString( \n [ \"{} ({})\".format( x[\"value\"], x[\"tag\"] ) for x in Field_gnip_rules(d).value ]\n )\n ) \n else: \n output_list.append( val ) \n\n # geo-related fields\n if self.options_geo:\n # geo-tag \n val = Field_geo_coordinates(d).value\n # keep self.geoCoordsList for backward compatibility\n self.geoCoordsList = None\n if isinstance(val, list):\n output_list.append( str(val) ) \n self.geoCoordsList = val \n else:\n output_list.append( val ) \n output_list.append( Field_geo_type(d).value )\n val = Field_location_geo_coordinates(d).value \n if isinstance(val, list): \n output_list.append( str(val) ) \n else:\n output_list.append( val ) \n output_list.append( Field_location_geo_type(d).value )\n output_list.append( Field_location_displayname(d).value ) \n output_list.append( Field_location_twitter_country_code(d).value ) \n # user \n output_list.append( Field_actor_utcoffset(d).value ) \n output_list.append( Field_actor_location_displayname(d).value ) \n # profileLocations\n output_list.append( Field_gnip_profilelocations_displayname(d).value ) \n output_list.append( Field_gnip_profilelocations_objecttype(d).value ) \n output_list.append( Field_gnip_profilelocations_address_country(d).value ) \n output_list.append( Field_gnip_profilelocations_address_region(d).value ) \n output_list.append( Field_gnip_profilelocations_address_countrycode(d).value ) \n output_list.append( Field_gnip_profilelocations_address_locality(d).value ) \n output_list.append( Field_gnip_profilelocations_geo_type(d).value ) \n output_list.append( Field_gnip_profilelocations_geo_coordinates(d).value ) \n\n # user\n if self.options_user:\n output_list.append( Field_actor_displayname(d).value ) \n output_list.append( Field_actor_preferredusername(d).value ) \n output_list.append( Field_actor_id(d).value ) \n \n # user connections, klout\n if self.options_influence:\n output_list.append( Field_gnip_klout_score(d).value ) \n output_list.append( Field_actor_followerscount(d).value ) \n output_list.append( Field_actor_friendscount(d).value ) \n output_list.append( Field_actor_listedcount(d).value ) \n output_list.append( Field_actor_statusesCount(d).value ) \n \n # structure\n if self.options_struct:\n output_list.append( Field_activity_type(d).value ) \n\n # done building output list \n return output_list", "title": "" }, { "docid": "54c9aeb9113235841a192435e9b759ca", "score": "0.5161918", "text": "def _extract_results(self, response):\r\n res = re.compile(self.pattern, re.DOTALL).findall(response)\r\n results = list()\r\n for r in res:\r\n results.append(r)\r\n return results", "title": "" }, { "docid": "87bdfeb9b018da62afb88a5ddc597e41", "score": "0.5153912", "text": "def test_extract_blocks(self):\n f = StringIO.StringIO(\"\"\"\n// No block\n.foobar {}\n\n// Block1\n//\n// foobar\n//\n// Styleguide\ndiv {}\n\n// Block2\n//\n// Styleguide\n \"\"\")\n\n blocks = list(_extract_blocks(f))\n self.assertEqual(len(blocks), 2)\n self.assertEqual(blocks[0], [\"Block1\", \"\", \"foobar\", \"\", \"Styleguide\"])\n self.assertEqual(blocks[1], [\"Block2\", \"\", \"Styleguide\"])", "title": "" }, { "docid": "95121c78fff829854cb2fd773c48be30", "score": "0.51532143", "text": "def parseDataFile(self, data):\n\n block = 0\n clientsbool = False\n self.dataDicts = []\n self.dataList = []\n for line in data:\n try:\n if '!CLIENTS:' in line:\n clientsbool = True\n else:\n if line.find(';') == 0:\n if not line.find('; ') == 0:\n block = block + 1\n if not line.find(';\\n') == 0:\n block = block + 1\n if block > 1:\n clientsbool = False\n block = 0\n if clientsbool:\n splitline = line.split(u':', 40)\n self.dataList.append(splitline)\n self.dataDicts.append(profilemapper.profileMapper(splitline))\n except:\n pass", "title": "" }, { "docid": "2d4e5ea8804ccd15d68bd90e6c32b951", "score": "0.5145597", "text": "def mmcif_lines_to_mmcif_blocks(lines):\n\n category = None\n block, blocks = [], []\n while lines:\n line = lines.pop(0)\n if line.startswith(\"data_\"): continue\n if line.startswith(\"_\"):\n line_category = line.split(\".\")[0]\n if line_category != category:\n if category:\n blocks.append({\"category\": category[1:], \"lines\": block})\n category = line_category\n block = []\n if line.startswith(\"loop_\"):\n if category:\n blocks.append({\"category\": category[1:], \"lines\": block})\n category = lines[0].split(\".\")[0]\n block = []\n block.append(line)\n if block: blocks.append({\"category\": category[1:], \"lines\": block})\n return blocks", "title": "" }, { "docid": "86132f917a800ac0305419ccedc4e10b", "score": "0.5145506", "text": "def yield_records(handle):\n record = []\n for line in handle:\n if line.startswith('//'):\n if len(record) > 0:\n yield record\n record = []\n else:\n record.append(line.strip())\n if len(record) > 0:\n yield record", "title": "" }, { "docid": "dfd046d167e1e06b4dfb9540eb38f77c", "score": "0.5143903", "text": "def _to_list(self, text):\n r=[]\n for x in text.split(\"$\"):\n x = x.strip()\n assert x\n r.append(x)\n return tuple(r)", "title": "" }, { "docid": "4153c39511f757926cd1684a4963a3aa", "score": "0.5143742", "text": "def asList( self ):\r\n out = []\r\n for res in self.__toklist:\r\n if isinstance(res,ParseResults):\r\n out.append( res.asList() )\r\n else:\r\n out.append( res )\r\n return out", "title": "" }, { "docid": "26cede90ab44af2c00ecd9216eb4535e", "score": "0.51401764", "text": "def parse(self) -> List[Version]:\n versions = []\n while self.next_line():\n assert self.current_line is not None\n if '{' in self.current_line:\n versions.append(self.parse_version())\n else:\n raise ParseError(\n f'Unexpected contents at top level: {self.current_line}')\n\n self.check_no_duplicate_symbols(versions)\n return versions", "title": "" }, { "docid": "08b9e25eaff61e953fbeafdd38239d1c", "score": "0.513816", "text": "def groupByField (listOfTabbedStrings, fieldGroupedBy):\n\n # Convert listOfTabbedStrings to a list of lists with 'lowest' list\n # representing a list of original tab seperated fields\n listOfListsByTab = []\n while listOfTabbedStrings != []:\n listOfListsByTab.append(listOfTabbedStrings.pop(0).split('\\t'))\n \n exonList = []\n\n listOfExonLists = []\n \n while listOfListsByTab != 'end':\n\n # If there is a fresh and clean exonList:\n # add the first coding region of the first/next gene to exonList\n if exonList == []:\n exonList.append(listOfListsByTab.pop(0))\n\n # If the next BioMart record list matches the one(s) in exonList:\n # add it to exonList\n elif listOfListsByTab[0][fieldGroupedBy] == exonList[0][fieldGroupedBy]:\n exonList.append(listOfListsByTab.pop(0))\n\n # Check to see if you just popped the last record:\n # - export last exonList\n # - cull exonList\n # - set listOfListsByTab to 'end' to stop the loop\n if listOfListsByTab == []:\n listOfExonLists.append(exonList)\n #print len(listOfExonLists), '\\n'\n print exonList[0][0]\n exonList = []\n listOfListsByTab = 'end'\n \n # Otherwise append whole exonList to listOfExonLists and clean exonList for next record group\n else:\n listOfExonLists.append(exonList)\n #print len(listOfExonLists), '\\n'\n print exonList[0][0]\n exonList = []\n \n print 'The groupByField function produced ',len(listOfExonLists),' groups.\\n\\n'\n return listOfExonLists", "title": "" }, { "docid": "83fb01b5c7b0ebf28e38ad7902534ab2", "score": "0.5135255", "text": "def _pre_process(src_file):\r\n slices = []\r\n handled_slices = []\r\n with open(src_file) as fs:\r\n for line in fs:\r\n slices.extend(re.split(r'({[%{#].+?[%}#]})', line))\r\n\r\n flg_is_comment = False\r\n handled_line = ''\r\n for slice in slices:\r\n if slice.startswith('{#'):\r\n flg_is_comment = True\r\n elif slice.startswith('#}'):\r\n flg_is_comment = False\r\n slice = slice[2:].strip()\r\n\r\n if flg_is_comment:\r\n continue\r\n elif slice.startswith('{%') or slice.startswith('{{'):\r\n if handled_line:\r\n handled_slices.append(handled_line)\r\n handled_line = ''\r\n handled_slices.append(slice)\r\n elif slice:\r\n handled_line += slice\r\n if handled_line:\r\n handled_slices.append(handled_line)\r\n return handled_slices", "title": "" }, { "docid": "0eed58c93ccb327bfdc21cac039903ff", "score": "0.5134003", "text": "def parse_simple_namelist(filePath, commentchar='!', condense=False ):\n \n lines = []\n with open(filePath, 'r') as f:\n if condense:\n pad = ''\n else:\n pad = ' '\n \n for line in f:\n ulines = unroll_namelist_line(line, commentchar=commentchar, condense=condense)\n lines = lines + ulines\n\n \n return lines", "title": "" }, { "docid": "60a4af7c95339a3859cf5353c33471d5", "score": "0.5131401", "text": "def parse(self):\n standard_formatters = re.compile(r'\\((.+?)\\)', re.IGNORECASE)\n return standard_formatters.findall(self._fmt)", "title": "" }, { "docid": "04cbccf92ec6ad2e5cc085144aa4a7c8", "score": "0.5128008", "text": "def process_data(data: str) -> list[list[str]]:\n return [re.findall(r\"\\d+\", line) for line in data.strip().split(\"\\n\")]", "title": "" }, { "docid": "704586cb6d035209facdc55183c05d32", "score": "0.51262045", "text": "def parse(raspa_output):\n # Reads the string into a newline-separated list, skipping useless lines\n data = [row.strip() for row in raspa_output.splitlines() if row and\n all(d not in row for d in [\"-----\", \"+++++\"])]\n\n # Generally, categories in the output are delimited by equal signs\n delimiters = [i for i, row in enumerate(data) if \"=====\" in row\n and \"Exclusion constraints energy\" not in data[i - 1]]\n\n # Append a row for \"absolute adsorption:\" and \"excess adsorption:\"\n # These values are separated into two rows\n abs_adsorp_rows = [i for i, row in enumerate(data)\n if \"absolute adsorption:\" in row]\n for row in abs_adsorp_rows:\n data[row] += \" \" + data[row + 1]\n data[row + 2] += data[row + 3]\n data[row + 1], data[row + 3] = \" \", \" \"\n\n # Use the delimiters to make a high-level dict. Title is row before\n # delimiter, and content is every row after delimiter, up to the next title\n info = {data[n - 1].strip(\":\"): data[n + 1: delimiters[i + 1] - 1]\n for i, n in enumerate(delimiters[:-1])}\n\n # Let's PARSE!\n for key, values in info.items():\n d, note_index = {}, 1\n for item in values:\n # Takes care of all \"Blocks[ #]\", skipping hard-to-parse parts\n if (\"Block\" in item and \"Box-lengths\" not in key\n and \"Van der Waals:\" not in item):\n blocks = _clean(item.split())\n d[\"\".join(blocks[:2])] = blocks[2:]\n\n # Most of the average data values are parsed in this section\n elif (any(s in item for s in [\"Average \", \"Surface area:\"])\n and \"desorption\" not in key):\n average_data = _clean(item.split())\n # Average values organized by its unit, many patterns here\n if len(average_data) == 8:\n del average_data[2:4]\n d[\" \".join(average_data[4:6])] = average_data[1:4]\n elif len(average_data) == 5:\n d[average_data[-1]] = average_data[1:4]\n elif \"Surface\" in average_data[0]:\n d[average_data[-1]] = average_data[2:5]\n # This is the common case\n else:\n del average_data[2]\n d[average_data[-1]] = average_data[1:4]\n\n # Average box-lengths has its own pattern\n elif \"Box-lengths\" in key:\n box_lengths = _clean(item.split())\n i = 3 if \"angle\" in item else 2\n d[\" \".join(box_lengths[:i])] = box_lengths[i:]\n\n # \"Heat of Desorption\" section\n elif \"desorption\" in key:\n if \"Note\" in item:\n notes = re.split(\"[:\\s]{2,}\", item)\n d[\"%s %d\" % (notes[0], note_index)] = notes[1]\n note_index += 1\n else:\n heat_desorp = _clean(item.split())\n # One line has \"Average\" in front, force it to be normal\n if \"Average\" in item:\n del heat_desorp[0]\n d[heat_desorp[-1]] = heat_desorp[0:3]\n\n # Parts where Van der Waals are included\n elif (\"Host-\" in key or \"-Cation\" in key or\n \"Adsorbate-Adsorbate\" in key) and \"desorption\" not in key:\n van_der = item.split()\n # First Column\n if \"Block\" in van_der[0]:\n sub_data = [_clean(s.split(\":\"))\n for s in re.split(\"\\s{2,}\", item)[1:]]\n sub_dict = {s[0]: s[1] for s in sub_data[:2]}\n d[\"\".join(van_der[:2])] = [float(van_der[2]), sub_dict]\n # Average for each columns\n elif \"Average\" in item:\n avg = _clean(re.split(\"\\s{2,}\", item))\n vdw, coulomb = [_clean(s.split(\": \")) for s in avg[2:4]]\n d[avg[0]] = avg[1]\n d[\"Average %s\" % vdw[0]] = vdw[1]\n d[\"Average %s\" % coulomb[0]] = coulomb[1]\n else:\n d[\"standard deviation\"] = _clean(van_der)\n\n # IMPORTANT STUFF\n elif \"Number of molecules\" in key:\n adsorb_data = _clean(item.rsplit(\" \", 12))\n if \"Component\" in item:\n gas_name = adsorb_data[2].strip(\"[]\")\n d[gas_name] = {}\n else:\n d[gas_name][adsorb_data[0]] = adsorb_data[1:]\n\n # Henry and Widom\n elif \"Average Widom\" in item:\n d[\"Widom\"] = _clean(item.rsplit(\" \", 5))[1:]\n\n elif \"Average Henry\" in item:\n d[\"Henry\"] = _clean(item.rsplit(\" \", 5))[1:]\n\n # Ignore these\n elif any(s in item for s in [\"=====\", \"Starting simulation\",\n \"Finishing simulation\"]):\n continue\n\n # Other strings\n else:\n parsed_data = _clean(re.split(\"[()[\\]:,\\t]\", item))\n d[parsed_data[0]] = parsed_data[1:]\n # Putting subdictionary back into main object\n info[key] = d\n\n return info", "title": "" }, { "docid": "560b8874f8949f62cc58722347c5344d", "score": "0.5124573", "text": "def get_unformatted_output(raw_data):\n out = []\n\n for entry in raw_data:\n if 'Federalist' in entry['pp']:\n continue\n\n out.append({\n 'sort_by': entry['nm'],\n 'name': format_name(entry['nm']),\n 'party': create_acronym(entry['pp']),\n 'term_start_date': '01-20-%s' % entry['tm'][:4]\n })\n\n return out", "title": "" }, { "docid": "ed614c11901cf38d239c0b504e29b48c", "score": "0.5121098", "text": "def parse(data, raw=False, quiet=False):\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output = []\n\n # Clear any blank lines\n cleandata = list(filter(None, data.splitlines()))\n\n if not jc.utils.has_data(data):\n return raw_output\n\n # linux output\n if cleandata[0].startswith(' File: '):\n output_line = {}\n\n # stats output contains 8 lines\n for line in cleandata:\n # line #1\n if line.find('File:') == 2:\n if output_line: # Reached a new file stat info.\n raw_output.append(output_line)\n output_line = {}\n\n line_list = line.split(maxsplit=1)\n output_line['file'] = line_list[1]\n\n # populate link_to field if -> found\n if ' -> ' in output_line['file']:\n filename = output_line['file'].split(' -> ')[0].strip('\\u2018').rstrip('\\u2019')\n link = output_line['file'].split(' -> ')[1].strip('\\u2018').rstrip('\\u2019')\n output_line['file'] = filename\n output_line['link_to'] = link\n else:\n filename = output_line['file'].split(' -> ')[0].strip('\\u2018').rstrip('\\u2019')\n output_line['file'] = filename\n\n continue\n\n # line #2\n if line.startswith(' Size:'):\n line_list = line.split(maxsplit=7)\n output_line['size'] = line_list[1]\n output_line['blocks'] = line_list[3]\n output_line['io_blocks'] = line_list[6]\n output_line['type'] = line_list[7]\n continue\n\n # line #3\n if line.startswith('Device:'):\n line_list = line.split()\n output_line['device'] = line_list[1]\n output_line['inode'] = line_list[3]\n output_line['links'] = line_list[5]\n continue\n\n # line #4\n if line.startswith('Access: ('):\n line = line.replace('(', ' ').replace(')', ' ').replace('/', ' ')\n line_list = line.split()\n output_line['access'] = line_list[1]\n output_line['flags'] = line_list[2]\n output_line['uid'] = line_list[4]\n output_line['user'] = line_list[5]\n output_line['gid'] = line_list[7]\n output_line['group'] = line_list[8]\n continue\n\n # line #5\n if line.startswith('Access: 2'):\n line_list = line.split(maxsplit=1)\n output_line['access_time'] = line_list[1]\n continue\n\n # line #6\n if line.startswith('Modify:'):\n line_list = line.split(maxsplit=1)\n output_line['modify_time'] = line_list[1]\n continue\n\n # line #7\n if line.startswith('Change:'):\n line_list = line.split(maxsplit=1)\n output_line['change_time'] = line_list[1]\n continue\n\n # line #8\n if line.startswith(' Birth:'):\n line_list = line.split(maxsplit=1)\n output_line['birth_time'] = line_list[1]\n continue\n\n if output_line:\n raw_output.append(output_line)\n\n # FreeBSD/OSX output\n else:\n for line in cleandata:\n value = shlex.split(line)\n output_line = {\n 'file': ' '.join(value[15:]),\n 'unix_device': value[0],\n 'inode': value[1],\n 'flags': value[2],\n 'links': value[3],\n 'user': value[4],\n 'group': value[5],\n 'rdev': value[6],\n 'size': value[7],\n 'access_time': value[8],\n 'modify_time': value[9],\n 'change_time': value[10],\n 'birth_time': value[11],\n 'block_size': value[12],\n 'blocks': value[13],\n 'unix_flags': value[14]\n }\n\n raw_output.append(output_line)\n\n return raw_output if raw else _process(raw_output)", "title": "" }, { "docid": "307139b65c5c6a3a0cab40a14eac6c9e", "score": "0.5116401", "text": "def unpack(self, string):\n parsed_list = []\n\n for post in string.split(self.delimiter):\n parsed = self.parse(post)\n if parsed:\n parsed_list.append(parsed)\n return parsed_list", "title": "" }, { "docid": "3a01c0f6e9f0ad94551b4acf956ce922", "score": "0.5112231", "text": "def _docstring_getblocks(self):\n #If there are no lines to look at, we have nothing to do here.\n if self.ibuffer[0] == self.ibuffer[1]:\n return []\n\n lines = self.context.bufferstr[self.ibuffer[0]:self.ibuffer[1]]\n docblock = []\n result = []\n self._doclines = []\n\n #We need to keep track of the line number for the start of the\n #documentation strings.\n docline = 0\n doclength = 0\n\n first = self.docparser.RE_DOCS.match(lines[0])\n if first is not None:\n docblock.append(first.group(\"docstring\"))\n docline = self.ibuffer[0]\n self._doclines.append(docline)\n doclength += len(lines[0]) + 1 # + 1 for \\n removed by split.\n\n #We need to search backwards in the main buffer string for\n #additional tags to add to the block\n i = self.ibuffer[0] - 1\n while i > 0:\n current = self.context.bufferstr[i]\n docmatch = self.docparser.RE_DOCS.match(current)\n if docmatch is not None:\n docblock.append(docmatch.group(\"docstring\"))\n docline = i\n doclength += len(current) + 1\n else:\n break\n i -= 1\n\n #Reverse the docblock list since we were going backwards and appending.\n if len(docblock) > 0:\n docblock.reverse()\n\n #Now handle the lines following the first line. Also handle the\n #possibility of multiple, separate blocks that are still valid XML.\n #We have to keep going until we have exceed the operational changes\n #or found the decorating element.\n i = self.ibuffer[0] + 1\n while (i < len(self.context.bufferstr) and \n (i < self.ibuffer[1] or len(docblock) > 0)):\n line = self.context.bufferstr[i]\n docmatch = self.docparser.RE_DOCS.match(line)\n if docmatch is not None:\n docblock.append(docmatch.group(\"docstring\"))\n doclength += len(line)\n if docline == 0:\n docline = i\n #Only track actual documentation lines that are within the \n #operations list of lines.\n if i < self.ibuffer[1]:\n self._doclines.append(i)\n\n elif len(docblock) > 0:\n key = self._docstring_key(line)\n result.append((docblock, docline, doclength, key))\n docblock = []\n docline = 0\n doclength = 0\n\n #We need to exit the loop if we have exceeded the length of\n #the operational changes\n if len(docblock) == 0 and i > self.ibuffer[1]:\n break\n i += 1\n\n return result", "title": "" }, { "docid": "77360134491b6ef46a448534416bbd10", "score": "0.511177", "text": "def parse_body(source):\n\n paragraphs = []\n for blank, input_lines in itertools.groupby(source, _is_blank):\n if not blank:\n as_string = note_re.sub('', '\\n'.join(input_lines))\n if _is_blank(as_string):\n continue\n paragraph = InputParagraph(as_string.split('\\n'))\n paragraph.update_list(paragraphs)\n\n return paragraphs", "title": "" }, { "docid": "c2a9ff947a847a1e19f01fb638e302e7", "score": "0.5111172", "text": "def _parse_minute_into_tasks(content, group):\n missing_colon_lines = []\n unknown_task_ids = []\n unknown_user = []\n\n task_list = []\n done_list = []\n remove_list = []\n\n for i, line in enumerate(content.splitlines()):\n try:\n if MISSING_COLON_REGEX.search(line):\n missing_colon_lines.append((i, line))\n continue\n\n # Single task for multiple users.\n for names, task in TASK_REGEX.findall(line):\n users = get_list_of_users_from_string(group.id, names)\n task_list.append((i, task, users))\n\n # Single task for individual users.\n for names, task in TASKS_REGEX.findall(line):\n users = get_list_of_users_from_string(group.id, names)\n for user in users:\n task_list.append((i, task, [user]))\n\n # Mark a comma separated list as done.\n for task_id_list in DONE_REGEX.findall(line):\n for b32_task_id in task_id_list.strip().split(\",\"):\n b32_task_id = b32_task_id.strip()\n try:\n task_id = b32.decode(b32_task_id)\n except ValueError:\n unknown_task_ids.append((i, b32_task_id))\n continue\n\n task = pimpy_repository. \\\n find_task_in_group_by_id(task_id, group.id)\n if not task:\n unknown_task_ids.append((i, b32_task_id))\n else:\n done_list.append((i, task))\n\n # Mark a comma separated list as removed.\n for task_id_list in REMOVE_REGEX.findall(line):\n for b32_task_id in task_id_list.strip().split(\",\"):\n b32_task_id = b32_task_id.strip()\n try:\n task_id = b32.decode(b32_task_id)\n except ValueError:\n unknown_task_ids.append((i, b32_task_id))\n continue\n\n task = pimpy_repository. \\\n find_task_in_group_by_id(task_id, group.id)\n if not task:\n unknown_task_ids.append((i, b32_task_id))\n else:\n remove_list.append((i, task))\n\n # Catch invalid user.\n except ValidationException:\n unknown_user.append((i, line))\n\n if len(missing_colon_lines) or len(unknown_task_ids) or len(unknown_user):\n raise InvalidMinuteException(missing_colon_lines, unknown_task_ids,\n unknown_user)\n\n return task_list, done_list, remove_list", "title": "" }, { "docid": "a7c5ea87e48462dd7e7fa67b2f7ae793", "score": "0.51104", "text": "def parse_data(block):\n lines = block.strip().splitlines()\n indices = []\n symbols = []\n charges = []\n for line in lines[1:]:\n words = line.split()\n indices.append(int(words[0]))\n symbols.append(words[1])\n charges.append(float(words[2]))\n return (np.array(indices), np.array(symbols), np.array(charges))", "title": "" }, { "docid": "40aa9b7d37e59916f5d19911d60a7aa7", "score": "0.50997615", "text": "def tokenizer(self):\n all_tokens = []\n index = 0 #TODO remove\n # for section, index in zip(self.zettel, range(len(self.zettel))): #TODO\n # if index == 3:\n # self.given_tags = re.split(\";\", section) #TODO make sure to use ';' \n for section in self.zettel:\n if index == len(self.zettel) - 1:\n self.given_tags = re.split(\";\", section)\n else:\n tokens = re.split('\\W+', section)\n tokens = list(filter(None, tokens))\n all_tokens.append(tokens)\n index += 1 #TODO remove\n return all_tokens", "title": "" }, { "docid": "d48816a8237379718698109d4dede931", "score": "0.50961757", "text": "def parse_lines(self):\n\n\t\tself.lines_raw = Text.DATE_PATTERN.findall(self.data)\n\n\t\tself.lines = []\n\t\tfor i in self.lines_raw:\n\t\t\tdate, rest = i.split(\" - \", 1)\n\t\t\tif ':' in rest:\n\t\t\t\tuser, message = rest.split(\": \", 1)\n\t\t\t\t# message_type = 1 if message == \"<Media omitted>\" else 0\n\t\t\t\tmessage_type = int(message == \"<Media omitted>\")\n\t\t\telse:\n\t\t\t\tuser = \"system\"\n\t\t\t\tmessage = rest\n\t\t\t\tmessage_type = 2\n\n\t\t\ttemp_date = re.findall(\"^(\\d{1,2}/\\d{1,2}/\\d\\d), \", date)[0]\n\t\t\ttemp_hour = date[date.find(',')+2:]\n\t\t\tself.lines.append((\n\t\t\t\t\t\t\tutils.date.parse_date(temp_date), # date\n\t\t\t\t\t\t\tint(temp_hour[:2]) * 60 + int(temp_hour[-2:]), # minutes\n\t\t\t\t\t\t\tuser,\n\t\t\t\t\t\t\tmessage,\n\t\t\t\t\t\t\tmessage_type\n\t\t\t\t\t\t))\n\t\treturn self.lines", "title": "" }, { "docid": "2ef8fdfdf6cfb834d2ff9b32240d3be5", "score": "0.5094201", "text": "def extract_data(data_list):\n data = []\n for block in data_list:\n state = get_state(block[0])\n date = get_date(block[0])\n b_type = get_type(block[0])\n category = get_category(block[0])\n records = get_records(block[0])\n\n extracted_text = (' ').join([state, date, b_type, category, records])\n entity = get_entity(block, extracted_text)\n\n source = get_source(block[-2])\n url = get_url(block[-1])\n\n data.append([entity, state, date, b_type,\n category, records, source, url])\n return data", "title": "" }, { "docid": "6beb61879e656a61a2ff17aa82c8670d", "score": "0.5091759", "text": "def extract_tokens(representation, separators=SEPARATOR_CHARACTERS):\n buff = \"\"\n elements = []\n last_token = None\n\n for index, c in enumerate(representation):\n # if separator character is found, push\n # the content of the buffer in the elements list\n if c in separators:\n if buff:\n # If the last found token is invalid,\n # raise and InvalidTokenError\n if not valid_token(buff):\n raise InvalidTokenError(\n \"Duration representation {0} contains \"\n \"an invalid token: {1}\".format(representation, buff)\n )\n\n # If buffer content is a separator word, for example\n # \"and\", just ignore it\n if not buff.strip() in SEPARATOR_TOKENS:\n elements.append(buff)\n\n # Anyway, reset buffer and last token marker\n # to their zero value\n buff = \"\"\n last_token = None\n else:\n token = compute_char_token(c)\n if token is not None \\\n and last_token is not None \\\n and token != last_token:\n elements.append(buff)\n buff = c\n else:\n buff += c\n\n last_token = token\n\n # push the content left in representation\n # in the elements list\n elements.append(buff)\n\n return list(zip(elements[::2], elements[1::2]))", "title": "" }, { "docid": "8ab2eb313b528d432236ade01923b6d3", "score": "0.507784", "text": "def parse(source):", "title": "" }, { "docid": "b6b49eec132ec12019d9d17be4960870", "score": "0.50764745", "text": "def _prettytable_to_list(self, str):\n retval = []\n if str is not None and len(str) > 0:\n rows = re.findall(r'\\|(.*?)\\n', str)\n # Remove header\n header_row = rows.pop(0)\n key_names = re.findall(r'\\s*(.*?)\\s*\\|', header_row)\n for row in rows:\n values = re.findall(r'\\s*(.*?)\\s*\\|', row)\n entry_dict = dict(zip(key_names, values))\n retval.append(entry_dict)\n return retval", "title": "" }, { "docid": "27865cb21475e9d05ac6cf9e275e820f", "score": "0.50761974", "text": "def parse_clamscan(scanned_file: str, raw_output: str) -> list:\n\n summary_regex = re.compile(\"-+ SUMMARY -+\")\n results = []\n\n undefined_path_signatures = set()\n defined_path_signatures = set()\n for line in raw_output.splitlines():\n if summary_regex.match(line):\n # Output complete\n break\n\n result = parse_clamscan_output_line(scanned_file, line)\n if result:\n path = result[0]\n signature = result[1]\n # Ensure that we have a unique set of signatures when the path is undefined (see parse_clamscan_output_line)\n if path == UNKNOWN and signature not in undefined_path_signatures:\n undefined_path_signatures.add(signature)\n elif path != UNKNOWN:\n results.append({\"path\": path, \"signature\": signature})\n defined_path_signatures.add(signature)\n\n # For each signature that doesn't have a defined path, append it.\n for signature in undefined_path_signatures.difference(defined_path_signatures):\n results.append({\"path\": UNKNOWN, \"signature\": signature})\n return results", "title": "" }, { "docid": "01f7b125dcac1634c401862b4caea196", "score": "0.5072473", "text": "def getBlocks(self, tokzer=None):\n\n def consume_extent(i, tokens, extent=None, detect_change=False):\n \"\"\"Return tokens that belong to the given extent.\n\n It parses all the tokens that follow tokens[i], until getting out\n of the extent. When detect_change is True, it may terminate early\n when detecting preprocessing directives inside the extent.\n \"\"\"\n\n result = []\n if extent is None:\n extent = tokens[i].cursor.extent\n\n while i < len(tokens) and tokens[i].location in extent:\n t = tokens[i]\n if debugBlockParser:\n print(' ' * 2, t.id, t.kind, t.cursor.kind)\n if (detect_change and t.cursor.extent != extent and\n t.cursor.kind == CursorKind.PREPROCESSING_DIRECTIVE):\n break\n result.append(t)\n i += 1\n return (i, result)\n\n def consume_line(i, tokens):\n \"\"\"Return tokens that follow tokens[i] in the same line.\"\"\"\n result = []\n line = tokens[i].location.line\n while i < len(tokens) and tokens[i].location.line == line:\n if tokens[i].cursor.kind == CursorKind.PREPROCESSING_DIRECTIVE:\n break\n result.append(tokens[i])\n i += 1\n return (i, result)\n\n if tokzer is None:\n tokzer = self._tokzer\n tokens = tokzer.tokens\n\n blocks = []\n buf = []\n i = 0\n\n while i < len(tokens):\n t = tokens[i]\n cursor = t.cursor\n\n if debugBlockParser:\n print (\"%d: Processing [%s], kind=[%s], cursor=[%s], \"\n \"extent=[%s]\" % (t.location.line, t.spelling, t.kind,\n cursor.kind,\n self._short_extent(cursor.extent)))\n\n if cursor.kind == CursorKind.PREPROCESSING_DIRECTIVE:\n if buf:\n blocks.append(Block(buf))\n buf = []\n\n j = i\n if j + 1 >= len(tokens):\n raise BadExpectedToken(\"### BAD TOKEN at %s\" % (t.location))\n directive = tokens[j+1].id\n\n if directive == 'define':\n if i+2 >= len(tokens):\n raise BadExpectedToken(\"### BAD TOKEN at %s\" %\n (tokens[i].location))\n\n # Skip '#' and 'define'.\n extent = tokens[i].cursor.extent\n i += 2\n id = ''\n # We need to separate the id from the remaining of\n # the line, especially for the function-like macro.\n if (i + 1 < len(tokens) and tokens[i+1].id == '(' and\n (tokens[i].location.column + len(tokens[i].spelling) ==\n tokens[i+1].location.column)):\n while i < len(tokens):\n id += tokens[i].id\n if tokens[i].spelling == ')':\n i += 1\n break\n i += 1\n else:\n id += tokens[i].id\n # Advance to the next token that follows the macro id\n i += 1\n\n (i, ret) = consume_extent(i, tokens, extent=extent)\n blocks.append(Block(ret, directive=directive,\n lineno=t.location.line, identifier=id))\n\n else:\n (i, ret) = consume_extent(i, tokens)\n blocks.append(Block(ret[2:], directive=directive,\n lineno=t.location.line))\n\n elif cursor.kind == CursorKind.INCLUSION_DIRECTIVE:\n if buf:\n blocks.append(Block(buf))\n buf = []\n directive = tokens[i+1].id\n (i, ret) = consume_extent(i, tokens)\n\n blocks.append(Block(ret[2:], directive=directive,\n lineno=t.location.line))\n\n elif cursor.kind == CursorKind.VAR_DECL:\n if buf:\n blocks.append(Block(buf))\n buf = []\n\n (i, ret) = consume_extent(i, tokens, detect_change=True)\n buf += ret\n\n elif cursor.kind == CursorKind.FUNCTION_DECL:\n if buf:\n blocks.append(Block(buf))\n buf = []\n\n (i, ret) = consume_extent(i, tokens, detect_change=True)\n buf += ret\n\n else:\n (i, ret) = consume_line(i, tokens)\n buf += ret\n\n if buf:\n blocks.append(Block(buf))\n\n # _parsed=True indicates a successful parsing, although may result an\n # empty BlockList.\n self._parsed = True\n\n return BlockList(blocks)", "title": "" }, { "docid": "86b5e15dfc92e4c23fcabeffab0aaaa7", "score": "0.50715864", "text": "def getContentsAsList(self,showHeader=0,includeLineContents=0):\n\t\titems = []\n\t\tif showHeader == 1: items.append('%s\\n' % (self.sStartOfBlock))\n\t\tif includeLineContents==0:\n\t\t\tallkeys = self.aKeywords.keys()\n\t\t\tallkeys.sort()\n\t\t\tfor k in allkeys:\n\t\t\t\tv = self.aKeywords[k]\n\t\t\t\titems.append(\"%s %s\\n\" % (k,v))\n\t\telse:\n\t\t\tfor ilo in self.aLineContents: # Okay, the guys wants it all\n\t\t\t\tif ilo.isComment == 1: continue\n\t\t\t\titems.append(ilo.getRawLine())\n\t\tfor ilo in self.aLineContents: # fd\n\t\t\tif ilo.isComment == 1: items.append(ilo.getRawLine())\n\t\tif showHeader == 1: items.append(\"%s\\n\" % self.sEndOfBlock)\n\t\treturn items", "title": "" }, { "docid": "ddc19bc83520245a90ca1e9fbfcdbbc9", "score": "0.5071156", "text": "def parse_packets(bytestream):\r\n\r\n container = packet_stream.parse(bytestream)\r\n\r\n l = [(i.header, i.payload) for i in container.full_packet]\r\n leftovers = \"\".join(chr(i) for i in container.leftovers)\r\n\r\n if DUMP_ALL_PACKETS:\r\n for header, payload in l:\r\n print \"Parsed packet 0x%.2x\" % header\r\n print payload\r\n\r\n return l, leftovers", "title": "" }, { "docid": "3009dad22f514ae7ddb2da351b83318b", "score": "0.50671506", "text": "def flatten_content_list(content_list: List[bs4.element.NavigableString]) -> str:\n out = ''\n for c in content_list:\n stack = [c]\n while len(stack) > 0:\n tag = stack.pop()\n if tag.string is not None:\n out += tag.string.replace('\\n', ' ').replace(' ', '')\n else:\n stack.extend(tag.contents)\n return out", "title": "" }, { "docid": "49bbf3cbf92bb4636b8ba5721968358d", "score": "0.50620216", "text": "def getBlocks(self, tokzer=None):\n\n def consume_extent(i, tokens, extent=None, detect_change=False):\n \"\"\"Return tokens that belong to the given extent.\n\n It parses all the tokens that follow tokens[i], until getting out\n of the extent. When detect_change is True, it may terminate early\n when detecting preprocessing directives inside the extent.\n \"\"\"\n\n result = []\n if extent is None:\n extent = tokens[i].cursor.extent\n\n while i < len(tokens) and tokens[i].location in extent:\n t = tokens[i]\n if debugBlockParser:\n print ' ' * 2, t.id, t.kind, t.cursor.kind\n if (detect_change and t.cursor.extent != extent and\n t.cursor.kind == CursorKind.PREPROCESSING_DIRECTIVE):\n break\n result.append(t)\n i += 1\n return (i, result)\n\n def consume_line(i, tokens):\n \"\"\"Return tokens that follow tokens[i] in the same line.\"\"\"\n result = []\n line = tokens[i].location.line\n while i < len(tokens) and tokens[i].location.line == line:\n if tokens[i].cursor.kind == CursorKind.PREPROCESSING_DIRECTIVE:\n break\n result.append(tokens[i])\n i += 1\n return (i, result)\n\n if tokzer is None:\n tokzer = self._tokzer\n tokens = tokzer.tokens\n\n blocks = []\n buf = []\n i = 0\n\n while i < len(tokens):\n t = tokens[i]\n cursor = t.cursor\n\n if debugBlockParser:\n print (\"%d: Processing [%s], kind=[%s], cursor=[%s], \"\n \"extent=[%s]\" % (t.location.line, t.spelling, t.kind,\n cursor.kind,\n self._short_extent(cursor.extent)))\n\n if cursor.kind == CursorKind.PREPROCESSING_DIRECTIVE:\n if buf:\n blocks.append(Block(buf))\n buf = []\n\n j = i\n if j + 1 >= len(tokens):\n raise BadExpectedToken(\"### BAD TOKEN at %s\" % (t.location))\n directive = tokens[j+1].id\n\n if directive == 'define':\n if i+2 >= len(tokens):\n raise BadExpectedToken(\"### BAD TOKEN at %s\" %\n (tokens[i].location))\n\n # Skip '#' and 'define'.\n extent = tokens[i].cursor.extent\n i += 2\n id = ''\n # We need to separate the id from the remaining of\n # the line, especially for the function-like macro.\n if (i + 1 < len(tokens) and tokens[i+1].id == '(' and\n (tokens[i].location.column + len(tokens[i].spelling) ==\n tokens[i+1].location.column)):\n while i < len(tokens):\n id += tokens[i].id\n if tokens[i].spelling == ')':\n i += 1\n break\n i += 1\n else:\n id += tokens[i].id\n # Advance to the next token that follows the macro id\n i += 1\n\n (i, ret) = consume_extent(i, tokens, extent=extent)\n blocks.append(Block(ret, directive=directive,\n lineno=t.location.line, identifier=id))\n\n else:\n (i, ret) = consume_extent(i, tokens)\n blocks.append(Block(ret[2:], directive=directive,\n lineno=t.location.line))\n\n elif cursor.kind == CursorKind.INCLUSION_DIRECTIVE:\n if buf:\n blocks.append(Block(buf))\n buf = []\n directive = tokens[i+1].id\n (i, ret) = consume_extent(i, tokens)\n\n blocks.append(Block(ret[2:], directive=directive,\n lineno=t.location.line))\n\n elif cursor.kind == CursorKind.VAR_DECL:\n if buf:\n blocks.append(Block(buf))\n buf = []\n\n (i, ret) = consume_extent(i, tokens, detect_change=True)\n buf += ret\n\n elif cursor.kind == CursorKind.FUNCTION_DECL:\n if buf:\n blocks.append(Block(buf))\n buf = []\n\n (i, ret) = consume_extent(i, tokens, detect_change=True)\n buf += ret\n\n else:\n (i, ret) = consume_line(i, tokens)\n buf += ret\n\n if buf:\n blocks.append(Block(buf))\n\n # _parsed=True indicates a successful parsing, although may result an\n # empty BlockList.\n self._parsed = True\n\n return BlockList(blocks)", "title": "" }, { "docid": "39c11d0136c2832cb21f968dfc218f4e", "score": "0.5059111", "text": "def _read_data(cls, input_file):\n with open(input_file, 'r', encoding=\"utf-8\") as f:\n lines = []\n for line in f:\n content = line.split('|')\n sent = content[0].strip()\n label = content[1].replace('\\n', '')\n lines.append([sent, label])\n return lines", "title": "" }, { "docid": "d9f401c12679be2385faa6457db05ee2", "score": "0.5056553", "text": "def rowParse(self):\n self.rowData = self.RAWdata.split(\"\\n\")\n\n # If final splitted character is EOF delete that void element\n if self.rowData[-1] == \"\":\n del self.rowData[-1]\n\n self.rowData = self.parseComment(self.rowData)", "title": "" }, { "docid": "8419761b95f6e153b2a3d174299d4401", "score": "0.50522107", "text": "def parse_counters_packets(sketch):\n\tparsed_sketch = []\n\tfor i in range(len(sketch)):\n\t\tparsed_sketch.append([])\n\t\tfor j in range(len(sketch[i])):\n\t\t\tparsed_sketch[i].append(str(sketch[i][j]).split(\"=\")[1].split(\",\")[0])\n\treturn parsed_sketch", "title": "" }, { "docid": "06c7f6939fec7dce7680fc0ef18285d0", "score": "0.5051958", "text": "def parse_multiple(source):\n return [parse(exp) for exp in split_exps(source)]", "title": "" }, { "docid": "19e8b18d129504fc5643062564df8058", "score": "0.5050841", "text": "def flatten_parse(parse):\n no_brackets = re.sub(r'[\\[\\], ]+', ' ', str(parse))\n return no_brackets.strip().split(' ')", "title": "" }, { "docid": "41b5addf4a9decdcc17ec16dc0aad5bd", "score": "0.5048182", "text": "def parse(self, buff, delimiter=DELIMITER, separator=SEPARATOR):\n def pushback_generator(iterator):\n \"\"\"\n Generator which allows to push back a previously picked item\n for example:\n gen = pushback_generator(range(10))\n print next(gen)\n print next(gen)\n v = next(gen)\n print v\n gen.send(v)\n print next(gen)\n :param iterator:\n :return:\n \"\"\"\n for value in iterator:\n back = yield value\n if back is not None:\n yield back\n yield back\n\n custom_r = re.compile(FIX_REGEX_STRING.format(d=re.escape(delimiter),\n s=re.escape(separator)).encode('UTF-8'),\n re.DOTALL)\n tagvals = custom_r.findall(buff)\n\n msg_type = None\n if not self._no_groups and self.spec is not None:\n for i in range(4):\n if tagvals[i][0] == b'35':\n msg_type = self.spec.msg_types.get(tagvals[i][1])\n\n if self.encoding is not None:\n tagvals = ((int_or_str(tval[0], self.encoding), tval[1].decode(self.encoding)) for tval in tagvals)\n elif self.decode_all_as_347:\n tagvals = [(int_or_str(tval[0]), tval[1]) for tval in tagvals]\n encoding = None\n for tag, val in tagvals:\n if tag == 347:\n encoding = val.decode('UTF-8')\n break\n if encoding is not None:\n tagvals = ((t[0], t[1].decode(encoding)) for t in tagvals)\n else:\n tagvals = ((int_or_str(tval[0]), tval[1]) for tval in tagvals)\n # Need to add logic to parse Encoded* tags according to 347\n\n if self._no_groups or self.spec is None or msg_type is None:\n # no groups can be found without a spec, so no point looking up the msg type.\n return self._frg_class(tagvals)\n msg = self._frg_class()\n groups = msg_type.groups\n tagvals = pushback_generator(tagvals)\n for tag, value in tagvals:\n if tag not in groups:\n msg[tag] = value\n else:\n if value == '0':\n msg[tag] = RepeatingGroup.create_repeating_group(tag)\n else:\n contents, last_tagval = self._process_group(tag, tagvals,\n msg_type=msg_type,\n group=groups[tag])\n msg[tag] = contents\n if last_tagval:\n tagvals.send(last_tagval)\n return msg", "title": "" }, { "docid": "cf63c656b8e0cac13faebc2a01d9e92b", "score": "0.5044811", "text": "def split_bars(self, body):\r\n body = \"\".join(body)\r\n bars = re.split(self.regexPattern, body)\r\n while(\"\" in bars):\r\n bars.remove(\"\")\r\n if bars[0] in self.delimiters:\r\n bars[1] = bars[0]+bars[1]\r\n bars = bars[1:]\r\n bars = [bars[i*2]+bars[i*2+1] for i in range(int(len(bars)/2))]\r\n\r\n return bars", "title": "" }, { "docid": "252bd4859572f726ed18e0c8027780e3", "score": "0.50384027", "text": "def stream_processing(file_path):\n\n\twith open(file_path, 'r') as f:\n\n\t\t# prepares the date from the file\n\t\tfor line in f:\n\t\t\told_stream = line.strip()\n\t\t\t# a string with the cancelled chars removed \n\t\t\tcleaner_stream = ''\n\t\t\t# \n\t\t\tcleanest_stream = ''\n\t\t\t# cancelled chars flag\n\t\t\tskip_next = False\n\t\t\t# garbage flag\n\t\t\tskip_while = False\n\t\t\t# nesting info for counting the groups\n\t\t\tnesting = 0\n\t\t\t# groups\n\t\t\tgroups = 0\n\n\t\t\t# Part 2\n\t\t\t# garbage chars counter\n\t\t\tgarbage_count = 0\n\n\t\t\t# removes the '!' and the next char from the stream\n\t\t\tfor char in range(0, len(old_stream)):\n\t\t\t\tif skip_next:\n\t\t\t\t\tskip_next = False\n\t\t\t\t\tcontinue\n\n\t\t\t\tif old_stream[char] == '!':\n\t\t\t\t\tskip_next = True\n\t\t\t\telse:\n\t\t\t\t\tcleaner_stream += old_stream[char]\n\n\t\t\t# removes the garbage\n\t\t\tfor char in range(0, len(cleaner_stream)):\n\t\t\t\t# while skip_while is True if the char is not '>' the iteration continues\n\t\t\t\tif skip_while:\n\t\t\t\t\tif cleaner_stream[char] == '>':\n\t\t\t\t\t\tskip_while = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tgarbage_count += 1\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t# if the char is '<', skip while is set to True\n\t\t\t\tif cleaner_stream[char] == '<':\n\t\t\t\t\tskip_while = True\n\t\t\t\telif cleaner_stream[char] == '>':\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tcleanest_stream += cleaner_stream[char]\n\n\t\t\t# counts the groups\n\t\t\tfor char in cleanest_stream:\n\n\t\t\t\tif char == '{':\n\t\t\t\t\tnesting += 1\n\n\t\t\t\tif char == '}':\n\t\t\t\t\tgroups += nesting\n\t\t\t\t\tnesting -= 1\n\n\n\twith open('output_91', 'w') as f:\n\t\tf.write(str(groups))\n\n\twith open('output_92', 'w') as f:\n\t\tf.write(str(garbage_count))", "title": "" }, { "docid": "134e535f3a50148e34d531d30a00b1cd", "score": "0.50335705", "text": "def preprocess_lines(self, lines: List[str]) -> List[str]:\n\n def repl(m: re.Match) -> str:\n \"\"\"\n Split lines, adding leading whitespace to the second line.\n *Don't* separate tags if the tags open and close the same element.\n \"\"\"\n m2 = self.tag_name_pat.match(m.group(2))\n m3 = self.tag_name_pat.match(m.group(3))\n tag_name2 = m2 and m2.group(1) or ''\n tag_name3 = m3 and m3.group(1) or ''\n same_element = (\n tag_name2 == tag_name3\n and not m.group(2).startswith('</')\n and m.group(3).startswith('</')\n )\n lws = g.get_leading_ws(m.group(1))\n sep = '' if same_element else '\\n' + lws\n return m.group(1) + m.group(2).rstrip() + sep + m.group(3)\n\n result_lines = []\n for i, line in enumerate(lines):\n s = re.sub(self.adjacent_tags_pat, repl, line)\n result_lines.extend(g.splitLines(s))\n return result_lines", "title": "" }, { "docid": "278c388bea9a38b5a26011502f8b22ef", "score": "0.50301296", "text": "def parse_slurm(squeue_output: str) -> List[Dict]:\n table = [[str(i) for i in line.split()] for line in squeue_output.splitlines()]\n \"\"\"\n [['JOBID', 'NAME', 'STATE', 'SUBMIT_TIME', 'START_TIME', 'EXEC_HOST', 'REASON'], \n ['418774', 'V_REL_130234420_7_0', 'RUNNING', '2018-05-09T23:34:36', '2018-05-10T09:41:36', 'shangrila01', 'None']]\n \"\"\"\n list_of_dict: List[Dict] = [dict(zip(table[0], row)) for row in table[1:]]\n \"\"\"\n [{'NAME': 'V_REL_130234420_7_0', \n 'START_TIME': '2018-05-10T09:41:36', \n 'REASON': 'None', \n 'JOBID': '418774', \n 'STATE': 'RUNNING', \n 'SUBMIT_TIME': '2018-05-09T23:34:36', \n 'EXEC_HOST': 'shangrila01'}\n ]\n \"\"\"\n return list_of_dict", "title": "" }, { "docid": "43b748036515324d5dc6c9c3f3972916", "score": "0.50250113", "text": "def _parse_block(self, line_feeder, end_marker, parent, deps,\n visible_if_deps, res=None):\n\n block = [] if res is None else res\n\n while 1:\n # Do we already have a tokenized line that we determined wasn't\n # part of whatever we were parsing earlier? See comment in\n # Config.__init__().\n if self.end_line is not None:\n line = self.end_line\n tokens = self.end_line_tokens\n tokens.unget_all()\n\n self.end_line = None\n self.end_line_tokens = None\n else:\n line = line_feeder.get_next()\n if line is None:\n if end_marker is not None:\n raise Kconfig_Syntax_Error(\"Unexpected end of file {0}\"\n .format(line_feeder.filename))\n return block\n\n tokens = self._tokenize(line, False, line_feeder.filename,\n line_feeder.linenr)\n\n t0 = tokens.get_next()\n if t0 is None:\n continue\n\n # Cases are ordered roughly by frequency, which speeds things up a\n # bit\n\n if t0 == T_CONFIG or t0 == T_MENUCONFIG:\n # The tokenizer will automatically allocate a new Symbol object\n # for any new names it encounters, so we don't need to worry\n # about that here.\n sym = tokens.get_next()\n\n # Symbols defined in multiple places get the parent of their\n # first definition. However, for symbols whose parents are\n # choice statements, the choice statement takes precedence.\n if not sym.is_defined_ or isinstance(parent, Choice):\n sym.parent = parent\n\n sym.is_defined_ = True\n\n self.kconfig_syms.append(sym)\n block.append(sym)\n\n self._parse_properties(line_feeder, sym, deps, visible_if_deps)\n\n elif t0 == T_SOURCE:\n kconfig_file = tokens.get_next()\n exp_kconfig_file = self._expand_sym_refs(kconfig_file)\n f = os.path.join(self.base_dir, exp_kconfig_file)\n if not os.path.exists(f):\n raise IOError('{0}:{1}: sourced file \"{2}\" (expands to '\n '\"{3}\") not found. Perhaps base_dir '\n '(argument to Config.__init__(), currently '\n '\"{4}\") is set to the wrong value.'\n .format(line_feeder.filename,\n line_feeder.linenr,\n kconfig_file, exp_kconfig_file,\n self.base_dir))\n # Add items to the same block\n self._parse_file(f, parent, deps, visible_if_deps, block)\n\n elif t0 == end_marker:\n # We have reached the end of the block\n return block\n\n elif t0 == T_IF:\n # If statements are treated as syntactic sugar for adding\n # dependencies to enclosed items and do not have an explicit\n # object representation.\n\n dep_expr = self._parse_expr(tokens, None, line,\n line_feeder.filename,\n line_feeder.linenr)\n # Add items to the same block\n self._parse_block(line_feeder, T_ENDIF, parent,\n _make_and(dep_expr, deps),\n visible_if_deps, block)\n\n elif t0 == T_COMMENT:\n comment = Comment()\n\n comment.config = self\n comment.parent = parent\n comment.filename = line_feeder.filename\n comment.linenr = line_feeder.linenr\n comment.text = tokens.get_next()\n\n self.comments.append(comment)\n block.append(comment)\n\n self._parse_properties(line_feeder, comment, deps,\n visible_if_deps)\n\n elif t0 == T_MENU:\n menu = Menu()\n\n menu.config = self\n menu.parent = parent\n menu.filename = line_feeder.filename\n menu.linenr = line_feeder.linenr\n menu.title = tokens.get_next()\n\n self.menus.append(menu)\n block.append(menu)\n\n # Parse properties and contents\n self._parse_properties(line_feeder, menu, deps,\n visible_if_deps)\n menu.block = self._parse_block(line_feeder, T_ENDMENU, menu,\n menu.dep_expr,\n _make_and(visible_if_deps,\n menu.visible_if_expr))\n\n elif t0 == T_CHOICE:\n name = tokens.get_next()\n if name is None:\n choice = Choice()\n self.choices.append(choice)\n else:\n # Named choice\n choice = self.named_choices.get(name)\n if choice is None:\n choice = Choice()\n choice.name = name\n self.named_choices[name] = choice\n self.choices.append(choice)\n\n choice.config = self\n choice.parent = parent\n\n choice.def_locations.append((line_feeder.filename,\n line_feeder.linenr))\n\n # Parse properties and contents\n self._parse_properties(line_feeder, choice, deps,\n visible_if_deps)\n choice.block = self._parse_block(line_feeder, T_ENDCHOICE,\n choice, deps, visible_if_deps)\n\n choice._determine_actual_symbols()\n\n # If no type is specified for the choice, its type is that of\n # the first choice item with a specified type\n if choice.type == UNKNOWN:\n for item in choice.actual_symbols:\n if item.type != UNKNOWN:\n choice.type = item.type\n break\n\n # Each choice item of UNKNOWN type gets the type of the choice\n for item in choice.actual_symbols:\n if item.type == UNKNOWN:\n item.type = choice.type\n\n block.append(choice)\n\n elif t0 == T_MAINMENU:\n text = tokens.get_next()\n if self.mainmenu_text is not None:\n self._warn(\"overriding 'mainmenu' text. \"\n 'Old value: \"{0}\", new value: \"{1}\".'\n .format(self.mainmenu_text, text),\n line_feeder.filename, line_feeder.linenr)\n self.mainmenu_text = text\n\n else:\n _parse_error(line, \"unrecognized construct\",\n line_feeder.filename, line_feeder.linenr)", "title": "" } ]
f21ec1a454b7684d26c31c403a205e32
A callback called when device setup failed to connect after repeated tries
[ { "docid": "3d4150e47b5c449f1fe6ef81e9f5672e", "score": "0.6424305", "text": "def devices_scan_failed(self):\n pass", "title": "" } ]
[ { "docid": "6ef6703ff92b4c3939b494a6ae277183", "score": "0.6651631", "text": "def test_on_connect_error(self):\n # this assumed the ssdb server being tested against doesn't use 1023\n # port. An error should be raised on connect \n bad_connection = Connection(port=1023)\n bad_connection.connect()", "title": "" }, { "docid": "b46d3244f1db949eed1735145acad8d0", "score": "0.66482955", "text": "def connection_refused_handler(spawn):\n if spawn.device:\n spawn.device.api.execute_clear_line()\n spawn.device.connect()\n return\n raise Exception('Connection refused to device %s' % (str(spawn)))", "title": "" }, { "docid": "7c1f852634a59fe3c39994590ed7631c", "score": "0.659285", "text": "def connectSensorErrback(self, reason):\n print('Connection attempt to sensor failed: %s' %(reason.getErrorMessage()))", "title": "" }, { "docid": "b8ceb3aad3a1bd81a7265cf050c34873", "score": "0.6562177", "text": "async def test_setup_failure_on_connection(opp, caplog):\n\n patch_product_identify(None, side_effect=blebox_uniapi.error.ConnectionError)\n\n entry = mock_config()\n entry.add_to_opp(opp)\n\n caplog.set_level(logging.ERROR)\n await opp.config_entries.async_setup(entry.entry_id)\n await opp.async_block_till_done()\n\n assert \"Identify failed at 172.100.123.4:80 ()\" in caplog.text\n assert entry.state is ConfigEntryState.SETUP_RETRY", "title": "" }, { "docid": "6e6ca924d699daf28724dc244acb29b6", "score": "0.6475644", "text": "def addConnectionFailedCallback(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "64b69eefe5679ddcc5280459033850fb", "score": "0.644714", "text": "def connectionLost(reason):", "title": "" }, { "docid": "a3329a33416f7bf4146fce5169260f14", "score": "0.6442218", "text": "def handle_connect_failure(self):\n self._connected = False\n if not self._shutdown:\n _LOGGER.error('Unable to connect to IP Module. Reconnecting...')\n self._alarmPanel._loginTimeoutCallback(False)\n ensure_future(self.reconnect(self._alarmPanel.connection_timeout), loop=self._eventLoop)", "title": "" }, { "docid": "5f47c3cfd01e183bdc7a0f44851180d0", "score": "0.6427711", "text": "def connectionLost(self, reason):", "title": "" }, { "docid": "3fbd8d9a1e1821c186d11ab059269b92", "score": "0.6407843", "text": "def on_connection_failure(self, exc):\n self._record_exception(exc)\n time.sleep(self._RECONNECT_DELAY)\n sys.exit()", "title": "" }, { "docid": "998494a7ec477718693842f631779fbb", "score": "0.62489235", "text": "async def test_setup_failed_connect(opp, caplog):\n\n respx.get(\"http://localhost\").mock(\n side_effect=httpx.RequestError(\"server offline\", request=MagicMock())\n )\n assert await async_setup_component(\n opp,\n binary_sensor.DOMAIN,\n {\n \"binary_sensor\": {\n \"platform\": \"rest\",\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n }\n },\n )\n await opp.async_block_till_done()\n assert len(opp.states.async_all()) == 0\n assert \"server offline\" in caplog.text", "title": "" }, { "docid": "0c348be3a2014fb3d90b00f8e01bd8e1", "score": "0.6194147", "text": "def connectionLost(self, reason):\n pass", "title": "" }, { "docid": "8f50797131aa438a7052dc4f99b4ff5e", "score": "0.6179521", "text": "def _connection_failed(self, failure, *args, **kwargs):\n log_msg(\"Port test failed to connect: %s\" % (failure), 1)\n self.probeRunning = False\n self.ignoreExits.append(self.circ.get_exit())\n if not self.nextScheduledTest or not self.nextScheduledTest.active():\n #dont count as a failure if the circuit is the thing that failed, not the connection:\n if self.circ.status != \"BUILT\":\n self.circuitFailures += 1\n #otherwise, make sure we get a new circuit to try with\n else:\n self.circ.close()\n self.circ = None\n #dont retry more than self.MAX_TESTS times in a row:\n if self.numProbes - self.circuitFailures < self.MAX_TESTS:\n self._schedule_next_test()\n else:\n log_msg(\"Retried self testing of reachability too many times, stopping.\", 1)\n self._on_unreachable(False)", "title": "" }, { "docid": "307b57c8eddaed0ea53af6180e432de3", "score": "0.611997", "text": "async def test_setup_failure(opp, caplog):\n\n patch_product_identify(None, side_effect=blebox_uniapi.error.ClientError)\n\n entry = mock_config()\n entry.add_to_opp(opp)\n\n caplog.set_level(logging.ERROR)\n await opp.config_entries.async_setup(entry.entry_id)\n await opp.async_block_till_done()\n\n assert \"Identify failed at 172.100.123.4:80 ()\" in caplog.text\n assert entry.state is ConfigEntryState.SETUP_RETRY", "title": "" }, { "docid": "c25746fe2bfa9dccdf0dea17066df518", "score": "0.6088787", "text": "def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"connected OK with returned code=\", rc)\n else:\n print(\"Bad connection with returned code=\", rc)", "title": "" }, { "docid": "e67b550e46046a3005bc4c2fdffc0d64", "score": "0.6082641", "text": "def conn_callback(self):\n print 'Connected to a random stranger!\\n'", "title": "" }, { "docid": "82e07f22e16b2dc6ad90b1bb162a0065", "score": "0.6041003", "text": "def Connect(self, retry):\n pass", "title": "" }, { "docid": "e2a11272666187a9074e2e008f18d0a0", "score": "0.6035678", "text": "def test_connect_exception(self):\n poller = MiFloraPoller(self.TEST_MAC, ConnectExceptionBackend)\n with self.assertRaises(BluetoothBackendException):\n poller.firmware_version()\n with self.assertRaises(BluetoothBackendException):\n poller.name()\n with self.assertRaises(BluetoothBackendException):\n poller.parameter_value(MI_MOISTURE)\n with self.assertRaises(BluetoothBackendException):\n poller.parameter_value(MI_MOISTURE)", "title": "" }, { "docid": "95f36bfc7940848a8bcbf8bccad9b6d9", "score": "0.6025926", "text": "def clientConnectionFailed(self, connector, reason):\n self._onConnection.errback(reason)", "title": "" }, { "docid": "8674e25b2cd3a1a658f3340d15d7c314", "score": "0.6003622", "text": "def on_connect(self, unused_client, unused_userdata, unused_flags, rc):\n self.print_debug('Connection Result:', error_str(rc))\n self.connected = True", "title": "" }, { "docid": "d0d446c2885413ad1e84f82b13269dd0", "score": "0.59826756", "text": "async def test_async_setup_entry_not_ready(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n entry: ConfigEntry = await async_init_integration(\n hass, aioclient_mock, error=\"cannot_connect\"\n )\n await hass.async_block_till_done()\n assert entry.state == ConfigEntryState.SETUP_RETRY", "title": "" }, { "docid": "0f480747e8747440742fbe871d120159", "score": "0.5979643", "text": "def on_reconnect(self):", "title": "" }, { "docid": "cb89c6af5489bebbc2e3e2bda220da47", "score": "0.595108", "text": "def test_connect_exception(self):\n poller = MiTempBtPoller(self.TEST_MAC, ConnectExceptionBackend, retries=0)\n with self.assertRaises(BluetoothBackendException):\n poller.firmware_version()\n with self.assertRaises(BluetoothBackendException):\n poller.name()\n with self.assertRaises(BluetoothBackendException):\n poller.parameter_value(MI_TEMPERATURE)\n with self.assertRaises(BluetoothBackendException):\n poller.parameter_value(MI_HUMIDITY)", "title": "" }, { "docid": "c8135fbe57e761e32d166ebef6ef50df", "score": "0.5943264", "text": "def clientConnectionLost(self, connector, reason): #{{{\n connector.connect()", "title": "" }, { "docid": "eca39338d01be2705630dd5b9fff588d", "score": "0.5935057", "text": "def test_failed_config(self):\n self._conn.execute(\"configure terminal\", allow_state_change=True)\n self._conn.execute(\"test failed\")\n self._conn.spawn.timeout = 60\n self._conn.enable()", "title": "" }, { "docid": "bc8f50cc745c6b3e1e03d7c134df275d", "score": "0.5917956", "text": "def check_for_setup_error(self):\n if not self.ra.is_pool_exists(self.pool):\n LOG.error(\"Setup is incorrect, please check connection settings.\")\n raise exception.VolumeDriverException(\"Bad configuration expected\")", "title": "" }, { "docid": "631654a538a1ec540f042da500487cd5", "score": "0.5916882", "text": "def connectionBroken(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "24b9a391eb61e807cb74f89725510e55", "score": "0.5900489", "text": "async def test_error_on_connection_failure(\n hass: HomeAssistant, mock_api: MagicMock\n) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n mock_api.side_effect = TransmissionConnectError()\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n MOCK_CONFIG_DATA,\n )\n\n assert result2[\"type\"] == FlowResultType.FORM\n assert result2[\"errors\"] == {\"base\": \"cannot_connect\"}", "title": "" }, { "docid": "2378f9c9799121c77bf66454ff88b974", "score": "0.58909976", "text": "async def test_device_setup_os_error(hass: HomeAssistant) -> None:\n device = get_device(\"Office\")\n mock_api = device.get_mock_api()\n mock_api.auth.side_effect = OSError()\n\n with patch.object(\n hass.config_entries, \"async_forward_entry_setup\"\n ) as mock_forward, patch.object(\n hass.config_entries.flow, \"async_init\"\n ) as mock_init:\n mock_setup = await device.setup_entry(hass, mock_api=mock_api)\n\n assert mock_setup.entry.state is ConfigEntryState.SETUP_RETRY\n assert mock_setup.api.auth.call_count == 1\n assert mock_forward.call_count == 0\n assert mock_init.call_count == 0", "title": "" }, { "docid": "480aae69089cb6801bb313cf4dad7b2d", "score": "0.5890543", "text": "def clientConnectionLost(self, connector, reason):\n connector.connect()", "title": "" }, { "docid": "480aae69089cb6801bb313cf4dad7b2d", "score": "0.5890543", "text": "def clientConnectionLost(self, connector, reason):\n connector.connect()", "title": "" }, { "docid": "480aae69089cb6801bb313cf4dad7b2d", "score": "0.5890543", "text": "def clientConnectionLost(self, connector, reason):\n connector.connect()", "title": "" }, { "docid": "480aae69089cb6801bb313cf4dad7b2d", "score": "0.5890543", "text": "def clientConnectionLost(self, connector, reason):\n connector.connect()", "title": "" }, { "docid": "480aae69089cb6801bb313cf4dad7b2d", "score": "0.5890543", "text": "def clientConnectionLost(self, connector, reason):\n connector.connect()", "title": "" }, { "docid": "480aae69089cb6801bb313cf4dad7b2d", "score": "0.5890543", "text": "def clientConnectionLost(self, connector, reason):\n connector.connect()", "title": "" }, { "docid": "480aae69089cb6801bb313cf4dad7b2d", "score": "0.5890543", "text": "def clientConnectionLost(self, connector, reason):\n connector.connect()", "title": "" }, { "docid": "480aae69089cb6801bb313cf4dad7b2d", "score": "0.5890543", "text": "def clientConnectionLost(self, connector, reason):\n connector.connect()", "title": "" }, { "docid": "6423d2face0ba7560cb851368bda102b", "score": "0.5879564", "text": "def Connect(self):\n error_msg = \"Failed to connect to remote device: \" + self.UNSUPPORTED_REASON\n logging.error(error_msg)\n raise BluefruitLEException(error_msg)", "title": "" }, { "docid": "fa3d5464c3e68a7d3cc7b4e67cc91b44", "score": "0.58723104", "text": "def _failure_handler(self) -> None:\n self._log.debug('DHCP request failed')\n self._write_ret(ovpn.CC_RET_FAILED)", "title": "" }, { "docid": "1c2036ce04e3102c35dd22553b110d05", "score": "0.58659947", "text": "def __initialConnectionRefused(self):\n self.__setConnected(False)", "title": "" }, { "docid": "efb70ce6a47a1eba0e7a2cddfec19ce0", "score": "0.58597976", "text": "async def test_device_setup_network_timeout(hass: HomeAssistant) -> None:\n device = get_device(\"Office\")\n mock_api = device.get_mock_api()\n mock_api.auth.side_effect = blke.NetworkTimeoutError()\n\n with patch.object(\n hass.config_entries, \"async_forward_entry_setup\"\n ) as mock_forward, patch.object(\n hass.config_entries.flow, \"async_init\"\n ) as mock_init:\n mock_setup = await device.setup_entry(hass, mock_api=mock_api)\n\n assert mock_setup.entry.state is ConfigEntryState.SETUP_RETRY\n assert mock_setup.api.auth.call_count == 1\n assert mock_forward.call_count == 0\n assert mock_init.call_count == 0", "title": "" }, { "docid": "32da673b59b16992899728759d40fdf4", "score": "0.58591765", "text": "def _finishInit(self, whenDone, skt, error, reactor):\n if whenDone:\n self._commonConnection.__init__(self, skt, None, reactor)\n reactor.callLater(0, whenDone)\n else:\n reactor.callLater(0, self.failIfNotConnected, error)", "title": "" }, { "docid": "7e4c265b9abf0129d59e9dcffddcd740", "score": "0.58291787", "text": "def clientConnectionLost(self, connector, reason):\n print \"connection lost: \", reason\n connector.connect()", "title": "" }, { "docid": "eab6e0cf299e658d3ff774b0bcafc618", "score": "0.5828817", "text": "def on_connect(client,userdata,flags,rc):\n if rc != 0:\n print(\"connection to MQTT failed: \" + connack_string(rc))\n exit(1)\n else:\n print('connected to MQTT!')", "title": "" }, { "docid": "dc5a8ce7ea6f37693079150ff48139ea", "score": "0.58196956", "text": "def connection_lost(self, exc):\n if exc:\n print(exc)\n print(\"Lost connection to serial device\")", "title": "" }, { "docid": "fb8a4edd959ae5fd6beb330e1923a797", "score": "0.58190507", "text": "def after_send_error(self, err):\n pass", "title": "" }, { "docid": "0f8f8a4b940f186af776fb4799df13d5", "score": "0.58111495", "text": "def on_connect(client, userdata, flags, rc):\r\n if rc == 0:\r\n print(\"Connected to \", LOCALHOST)\r\n else:\r\n print(\"Bad connection\")", "title": "" }, { "docid": "344ee7c9870714785f7400dd02693d0c", "score": "0.57919157", "text": "def check_for_setup_error(self):\n if self.lun_ostype not in self.ALLOWED_LUN_OS_TYPES:\n msg = _(\"Invalid value for NetApp configuration\"\n \" option netapp_lun_ostype.\")\n LOG.error(msg)\n raise na_utils.NetAppDriverException(msg)\n if self.host_type not in self.ALLOWED_IGROUP_HOST_TYPES:\n msg = _(\"Invalid value for NetApp configuration\"\n \" option netapp_host_type.\")\n LOG.error(msg)\n raise na_utils.NetAppDriverException(msg)\n lun_list = self.zapi_client.get_lun_list()\n self._extract_and_populate_luns(lun_list)\n LOG.debug(\"Success getting list of LUNs from server.\")\n self.loopingcalls.start_tasks()", "title": "" }, { "docid": "58917727f2131d697489f5e31959cfb8", "score": "0.5762713", "text": "def test_driver_check_for_setup_error_exception(self):\n\n # Since HPE3ParMediator is mocked, we'll hit the except/log.\n self.mock_object(hpe3pardriver, 'LOG')\n self.init_driver()\n self.driver.check_for_setup_error()\n expected_calls = [\n mock.call.debug('HPE3ParShareDriver SHA1: %s', mock.ANY),\n mock.call.debug('Source code SHA1 not logged due to: %s', mock.ANY)\n ]\n hpe3pardriver.LOG.assert_has_calls(expected_calls)", "title": "" }, { "docid": "410a74fdf6aa3a323ec3e35b2b084b1a", "score": "0.5761829", "text": "def _handler_lost_connection_enter(self, *args, **kwargs):\n super(PlatformAgent, self)._common_state_enter(*args, **kwargs)\n log.error(\"%r: (LC) lost connection to the device. Will attempt to reconnect...\",\n self._platform_id)\n\n self._event_publisher.publish_event(\n event_type='ResourceAgentConnectionLostErrorEvent',\n origin_type=self.ORIGIN_TYPE,\n origin=self.resource_id)\n\n # Setup reconnect timer.\n self._autoreconnect_greenlet = spawn(self._autoreconnect)", "title": "" }, { "docid": "9f353f3e29a2785f629a49f9795aeaaa", "score": "0.5735981", "text": "async def test_setup_failed(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n mocked_device = _create_mocked_device(throw_exception=True)\n entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)\n entry.add_to_hass(hass)\n\n with _patch_media_player_device(mocked_device):\n await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n all_states = hass.states.async_all()\n assert len(all_states) == 0\n assert \"[name(http://0.0.0.0:10000/sony)] Unable to connect\" in caplog.text\n assert \"Platform songpal not ready yet: Unable to do POST request\" in caplog.text\n assert not any(x.levelno == logging.ERROR for x in caplog.records)\n caplog.clear()\n\n utcnow = dt_util.utcnow()\n type(mocked_device).get_supported_methods = AsyncMock()\n with _patch_media_player_device(mocked_device):\n async_fire_time_changed(hass, utcnow + timedelta(seconds=30))\n await hass.async_block_till_done()\n all_states = hass.states.async_all()\n assert len(all_states) == 1\n assert not any(x.levelno == logging.WARNING for x in caplog.records)\n assert not any(x.levelno == logging.ERROR for x in caplog.records)", "title": "" }, { "docid": "fddf1257d77e9110022c8cb51a7282dc", "score": "0.57325023", "text": "async def test_config_entry_not_ready(hass: HomeAssistant) -> None:\n config_entry = mock_config_entry(unique_id=\"id_123_not_ready\")\n config_entry.add_to_hass(hass)\n with patch(\n \"homeassistant.components.rituals_perfume_genie.Account.get_devices\",\n side_effect=aiohttp.ClientError,\n ):\n await hass.config_entries.async_setup(config_entry.entry_id)\n assert config_entry.state is ConfigEntryState.SETUP_RETRY", "title": "" }, { "docid": "00a1dee6520e19e8674629cfac2d135f", "score": "0.57131076", "text": "def on_connect(client, userdata, flags, rc):\n print(\"connected with result code {}\".format(rc))", "title": "" }, { "docid": "330ebb43db70d24c45de90fdb1c2f633", "score": "0.57082", "text": "async def test_connection_failed(self):\n with patch(\n \"pyinsteon.protocol.protocol.publish_topic\", self.mock_publish_topic\n ):\n try:\n async with async_protocol_manager(connect=False, retry=False):\n await asyncio.sleep(0.1)\n assert False\n except ConnectionError:\n await asyncio.sleep(0.1)\n assert self.topic == \"connection.failed\"", "title": "" }, { "docid": "52317eedbce371c9fd92b82a4a388f0b", "score": "0.57081854", "text": "def onError(self, conn, exc):", "title": "" }, { "docid": "7462592c810b67da3c920c1361f76dc6", "score": "0.5680518", "text": "async def test_connection_error(hass):\n\n with patch(\n \"aioairzone.localapi_device.AirzoneLocalApi.validate_airzone\",\n side_effect=ClientConnectorError(MagicMock(), MagicMock()),\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}, data=CONFIG\n )\n\n assert result[\"errors\"] == {\"base\": \"cannot_connect\"}", "title": "" }, { "docid": "0bcc08166bb77942566f0809e5217359", "score": "0.5672495", "text": "def DeviceException(self):\n self.dev.DeviceException()", "title": "" }, { "docid": "5de20bf45e13ce926f5c2ceb04bd9eed", "score": "0.56696653", "text": "async def test_dhcp_fails_to_connect(hass: HomeAssistant) -> None:\n\n with patch(\n \"homeassistant.components.radiotherm.data.radiotherm.get_thermostat\",\n side_effect=RadiothermTstatError,\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": config_entries.SOURCE_DHCP},\n data=dhcp.DhcpServiceInfo(\n hostname=\"radiotherm\",\n ip=\"1.2.3.4\",\n macaddress=\"aa:bb:cc:dd:ee:ff\",\n ),\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.ABORT\n assert result[\"reason\"] == \"cannot_connect\"", "title": "" }, { "docid": "0c285dd86319782dc70c0d5b38adaf18", "score": "0.5658114", "text": "def on_connection_success(self):\n\n pass", "title": "" }, { "docid": "e72eea797f65e797945db8c63cd83e4e", "score": "0.56492656", "text": "def handle_not_connected(self):\n LOG.error('msm failed, network connection not available')\n self.next_download = time() + FIVE_MINUTES", "title": "" }, { "docid": "62f6f82d502a3061ef478d91f68226e1", "score": "0.5642949", "text": "def _handler_lost_connection_enter(self, *args, **kwargs):\n super(DataSetAgent, self)._common_state_enter(*args, **kwargs)\n log.error('Dataset agent %s lost connection to the device.',\n self._proc_name)\n\n self._event_publisher.publish_event(\n event_type='ResourceAgentConnectionLostErrorEvent',\n origin_type=self.ORIGIN_TYPE,\n origin=self.resource_id)\n\n # Setup reconnect timer.\n self._autoreconnect_greenlet = gevent.spawn(self._autoreconnect)", "title": "" }, { "docid": "9c5b3df988d0c98224bafe5f91375ccd", "score": "0.5627677", "text": "def register_ad_error_cb(error):\n print('Failed to register advertisement: ' + str(error))\n mainloop.quit()", "title": "" }, { "docid": "bbf4ccc0873d9e92da439b29f84b73b2", "score": "0.5625665", "text": "def onConnectionRefuse(self):", "title": "" }, { "docid": "6d236c806ca11a27e6f2eebc5b4cc7ed", "score": "0.5622576", "text": "def callback_connect(self):\n pass", "title": "" }, { "docid": "7c5897be5640089d7a586d06fa1a2220", "score": "0.5610685", "text": "def got_error(self, error):\n if isinstance(error, self.dbus.exceptions.DBusException) and \\\n error.get_dbus_name() == DBUS_UNKNOWN_SERVICE:\n logger.debug(\"Network Manager not present\")\n self.call_result_cb(UNKNOWN)\n else:\n logger.error(\"Error contacting NetworkManager: %s\" % \\\n str(error))\n self.call_result_cb(UNKNOWN)", "title": "" }, { "docid": "b000b5d2e3bac6c0670e3fe307acc35c", "score": "0.56089234", "text": "def _connection_failed(self, link_uri, msg):\n print('Connection to %s failed: %s' % (link_uri, msg))\n self.is_connected = False", "title": "" }, { "docid": "daf763ced6db021fbc1ff497e6b0943b", "score": "0.56058013", "text": "def failIfNotConnected(self, err):\n if (self.connected or self.disconnected or\n not hasattr(self, \"connector\")):\n return\n\n self._stopReadingAndWriting()\n try:\n self._closeSocket(True)\n except AttributeError:\n pass\n else:\n self._collectSocketDetails()\n self.connector.connectionFailed(failure.Failure(err))\n del self.connector", "title": "" }, { "docid": "81ced1aaf1fe94efa75ce25608893355", "score": "0.5605212", "text": "def _connection_failed(self, link_uri, msg):\n print(\"Connection to %s failed: %s\" % (link_uri, msg))\n self.is_connected = False", "title": "" }, { "docid": "3cb096a849659ddbc2f48eec8d344d62", "score": "0.55960256", "text": "def on_connect(self):\r\n return None", "title": "" }, { "docid": "3cb096a849659ddbc2f48eec8d344d62", "score": "0.55960256", "text": "def on_connect(self):\r\n return None", "title": "" }, { "docid": "82a45ccd2e4e4b3a7a3a1974bfd4093e", "score": "0.5592014", "text": "def connectionFailed(self, reason=None):\n msg = '[%s] Connection Failed: %s/%s'%(\n self.request.backend.base,\n self.request.backendServer.path, self.request.backend_uri)\n\n if reason:\n msg = '%s (%s)'%(msg, reason.getErrorMessage())\n log.debug(\"Connection Failed: \"+str(reason), 'Fetcher')\n log.err(msg)\n\n # Look for alternative fetchers\n if not self.request.activateNextBackendServer(self):\n # No more backends, send error response back to client\n if reason.check(error.ConnectError):\n self.setResponseCode(http.SERVICE_UNAVAILABLE, \"Connect Error\")\n else:\n self.setResponseCode(http.SERVICE_UNAVAILABLE)\n self.apDataReceived(\"\")\n self.apDataEnd(self.transfered, False)\n #Because of a bug in tcp.Client we may be called twice,\n #Make sure that next time nothing will happen\n #FIXME: This hack is probably not anymore pertinent.\n self.connectionFailed = lambda : log.debug('connectionFailed(2)',\n 'Fetcher','9')", "title": "" }, { "docid": "99ca652745e7cc4c8cd093c1e3ab6660", "score": "0.5584071", "text": "def test_driver_check_for_setup_error_success(self):\n\n # Generally this is always mocked, but here we reference the class.\n hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator\n\n self.mock_object(hpe3pardriver, 'LOG')\n self.init_driver()\n self.driver.check_for_setup_error()\n expected_calls = [\n mock.call.debug('HPE3ParShareDriver SHA1: %s', mock.ANY),\n mock.call.debug('HPE3ParMediator SHA1: %s', mock.ANY)\n ]\n hpe3pardriver.LOG.assert_has_calls(expected_calls)", "title": "" }, { "docid": "c44e996a2f9b1b96dd1896119fbd29a6", "score": "0.55826896", "text": "def exception_callback(self, exception):\n log.error('Exception detected in the driver', exc_info=True)\n self._fsm.on_event(ResourceAgentEvent.LOST_CONNECTION)", "title": "" }, { "docid": "df861c7b76afdff058e76553f064380c", "score": "0.5571946", "text": "async def test_device_setup_update_network_timeout(hass: HomeAssistant) -> None:\n device = get_device(\"Office\")\n mock_api = device.get_mock_api()\n mock_api.check_sensors.side_effect = blke.NetworkTimeoutError()\n\n with patch.object(\n hass.config_entries, \"async_forward_entry_setup\"\n ) as mock_forward, patch.object(\n hass.config_entries.flow, \"async_init\"\n ) as mock_init:\n mock_setup = await device.setup_entry(hass, mock_api=mock_api)\n\n assert mock_setup.entry.state is ConfigEntryState.SETUP_RETRY\n assert mock_setup.api.auth.call_count == 1\n assert mock_setup.api.check_sensors.call_count == 1\n assert mock_forward.call_count == 0\n assert mock_init.call_count == 0", "title": "" }, { "docid": "a497fb8d7c964038a1a5d24eb2f531ce", "score": "0.5570269", "text": "async def test_device_setup_authentication_error(hass: HomeAssistant) -> None:\n device = get_device(\"Living Room\")\n mock_api = device.get_mock_api()\n mock_api.auth.side_effect = blke.AuthenticationError()\n\n with patch.object(\n hass.config_entries, \"async_forward_entry_setup\"\n ) as mock_forward, patch.object(\n hass.config_entries.flow, \"async_init\"\n ) as mock_init:\n mock_setup = await device.setup_entry(hass, mock_api=mock_api)\n\n assert mock_setup.entry.state is ConfigEntryState.SETUP_ERROR\n assert mock_setup.api.auth.call_count == 1\n assert mock_forward.call_count == 0\n assert mock_init.call_count == 1\n assert mock_init.mock_calls[0][2][\"context\"][\"source\"] == \"reauth\"\n assert mock_init.mock_calls[0][2][\"data\"] == {\n \"name\": device.name,\n **device.get_entry_data(),\n }", "title": "" }, { "docid": "e2e8b9843877ad3d0599119f28c95953", "score": "0.55674", "text": "def test_connection_failure(self):\n self.assertTrue(Enigma2Error, lambda: enigma2.api.Enigma2Connection(host='1.1.1.1'))", "title": "" }, { "docid": "f270323f113d3126b4ad2f7975732386", "score": "0.5562889", "text": "async def test_async_setup_entry_auth_failed(hass: HomeAssistant) -> None:\n entry = create_entry(hass)\n with patch_interface() as interface:\n interface.side_effect = steam.api.HTTPError(\"401\")\n await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n assert entry.state == ConfigEntryState.SETUP_ERROR\n assert not hass.data.get(DOMAIN)", "title": "" }, { "docid": "f447a97342cd26887d90165464282130", "score": "0.55579406", "text": "def connectionLost(self, reason):\n if self.delegate is not None:\n self.delegate.didLoseConnection(reason.getErrorMessage())", "title": "" }, { "docid": "2b8b579918a53b5438af4706310a2427", "score": "0.5549847", "text": "def post_connect_handler(self):\n pass", "title": "" }, { "docid": "2f2ed6b88820afb070fe6ed46ebaae2b", "score": "0.5541294", "text": "def connectionLost(self, reason):\n if not self.connected:\n self.failIfNotConnected(error.ConnectError(string=reason))\n else:\n self._commonConnection.connectionLost(self, reason)\n self.connector.connectionLost(reason)", "title": "" }, { "docid": "c21214779c2d6a66108cfcdffaddcd11", "score": "0.55395067", "text": "def on_fail(self, cntx):", "title": "" }, { "docid": "042acef581ab149bb5195d53a68b1e4b", "score": "0.553431", "text": "async def test_connection_errors_raise_not_ready(\n hass, config_entry, smartthings_mock):\n setattr(hass.config_entries, '_entries', [config_entry])\n api = smartthings_mock.return_value\n api.app.return_value = mock_coro(\n exception=ClientConnectionError())\n\n with pytest.raises(ConfigEntryNotReady):\n await smartthings.async_setup_entry(hass, config_entry)", "title": "" }, { "docid": "972eb9b76aac68a94ce9d9514767c42e", "score": "0.55272996", "text": "def __on_connect(self, client, userdata, flags, rc):\n logger.debug(\"Connected with rc code: \" + str(rc))\n if rc == 0:\n if self.is_initial_connect:\n self.__publish_bootinfo()\n self.is_initial_connect = False\n self.on_connect_callback()\n else:\n logger.error(\"Failed to connect to mqtt broker: \" + str(rc))", "title": "" }, { "docid": "099df4da2ad5cd5f0133747bf757bd0a", "score": "0.5499527", "text": "def clientConnectionFailed(self, connector, reason):\r\n if hasattr(self, \"server_restart_mode\"):\r\n self.maxDelay = 1\r\n else:\r\n self.maxDelay = 10\r\n self.portal.sessions.announce_all(\" ...\")\r\n protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)", "title": "" }, { "docid": "5697ba07f085e4851c4a57648fa7b067", "score": "0.5495648", "text": "def register_app_error_cb(error):\n print('Failed to register application: ' + str(error))\n mainloop.quit()", "title": "" }, { "docid": "527a980fa1c0d78bcce82092fa59eba7", "score": "0.54914784", "text": "async def _async_connect(self) -> None:\n LOGGER.debug(\"_async_connect: location: %s\", self._location)\n assert self._location, \"self._location has not been set before connect\"\n\n domain_data = get_domain_data(self.hass)\n try:\n device = await domain_data.upnp_factory.async_create_device(self._location)\n except UpnpError as err:\n raise ConnectError(\"cannot_connect\") from err\n\n if not DmrDevice.is_profile_device(device):\n raise ConnectError(\"not_dmr\")\n\n device = find_device_of_type(device, DmrDevice.DEVICE_TYPES)\n\n if not self._udn:\n self._udn = device.udn\n await self.async_set_unique_id(self._udn)\n\n # Abort if already configured, but update the last-known location\n self._abort_if_unique_id_configured(\n updates={CONF_URL: self._location}, reload_on_update=False\n )\n\n if not self._device_type:\n self._device_type = device.device_type\n\n if not self._name:\n self._name = device.name\n\n if not self._mac and (host := urlparse(self._location).hostname):\n self._mac = await _async_get_mac_address(self.hass, host)", "title": "" }, { "docid": "720b730e78a4c6f0a4d8cb236dcf5938", "score": "0.54846907", "text": "def on_mqtt_connect(client, userdata, flags, rc):\n print(\"MQTT connection successful\")", "title": "" }, { "docid": "c9a38cf5ba2d50f3e95cef7b40cac857", "score": "0.54777217", "text": "def connection_lost(self, ex: Optional[Exception]) -> None:\n _LOGGER.debug(\"WizProtocol connection lost: %s\", ex)", "title": "" }, { "docid": "ceea3844dadcbc463ec4081c101eb3e3", "score": "0.5477571", "text": "def clientConnectionFailed(self, connector, reason):\n reactor.stop()", "title": "" }, { "docid": "093be4f57b8b627e4273624eba16feb7", "score": "0.5475054", "text": "async def test_config_entry_retry(hass):\n already_migrated_config_entry = MockConfigEntry(\n domain=DOMAIN, data={CONF_HOST: IP_ADDRESS}, unique_id=MAC_ADDRESS\n )\n already_migrated_config_entry.add_to_hass(hass)\n with _patch_discovery(no_device=True), _patch_single_discovery(no_device=True):\n await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})\n await hass.async_block_till_done()\n assert already_migrated_config_entry.state == ConfigEntryState.SETUP_RETRY", "title": "" }, { "docid": "e0e05b60f952aa20b9c914b583240d3f", "score": "0.54732364", "text": "def on_retry(self, exc_info):\n ...", "title": "" }, { "docid": "a959dc74a92d947b160856c1cbc46aa6", "score": "0.5469504", "text": "def on_connect(client, userd, flags, rc):\n syslog.syslog(f\"MQTT connect rc: {str(rc)}\")", "title": "" }, { "docid": "83d85e3369c301b897490170e142e2fe", "score": "0.5468484", "text": "def test_connectionLost(self):\r\n self.request.connectionLost(Failure(CONNECTION_DONE))\r\n self.handler.lostReason.trap(ConnectionDone)", "title": "" }, { "docid": "34e810d79dc5af0db27ad342e186c235", "score": "0.54684293", "text": "def connectionInitialized():", "title": "" }, { "docid": "8c02916e987ae978a5d194b436db06d9", "score": "0.5462366", "text": "def connection_lost(self, exc):\n if not self.suspendAllOperations:\n log.error(\"ERROR Connection Lost : disconnected because the Ethernet/USB connection was externally terminated.\")\n self._performDisconnect(reason=\"termination\", exc=exc)", "title": "" }, { "docid": "97ddde240ce639e24419bce9dc87bbf5", "score": "0.5454659", "text": "def check_connection(self):\n self.connect()", "title": "" }, { "docid": "ee333a1db1a87d647a564224e6c30635", "score": "0.54460835", "text": "def test_connection_lost(gateway, connection_transport, reconnect_callback):\n assert gateway.tasks.transport.protocol.transport is None\n gateway.tasks.transport.connect()\n assert gateway.tasks.transport.protocol.transport is connection_transport\n gateway.tasks.transport.protocol.connection_lost(\"error\")\n assert connection_transport.serial.close.call_count == 1\n assert reconnect_callback.call_count == 1\n assert gateway.tasks.transport.protocol.transport is None", "title": "" }, { "docid": "2b240bb606ba7d77f90a5927c27907b0", "score": "0.5443004", "text": "def test_check_connect(nodaq):\n logger.debug('test_check_connect')\n with pytest.raises(RuntimeError):\n nodaq.wait()", "title": "" }, { "docid": "e63e4d159c35631c28ff284eb8cd375c", "score": "0.5441226", "text": "def check_device_state(self):\n\n if not (self.switch.cli.conn.check_client() and self.switch.cli.conn.check_shell()):\n try:\n self.switch.ui.connect()\n except (CLISSHException, SSHException, SocketError):\n self.switch.ui.disconnect()\n raise Exception(\"Device is not ready.\")\n\n # Add cli application check", "title": "" }, { "docid": "f7912fe97387787fb5827348571a222e", "score": "0.54365784", "text": "def connectError(self, reason):\n if self.stream_error_called:\n return\n # Before Twisted 11.x the xmlstream object was passed instead of the\n # disconnect reason. See http://twistedmatrix.com/trac/ticket/2618\n if not isinstance(reason, failure.Failure):\n reason_str = 'Reason unknown'\n else:\n reason_str = str(reason)\n\n # If the connection was established and lost, then we need to report\n # the error back to the client, since he needs to reauthenticate.\n # FIXME: If the connection was lost before anything happened, we could\n # silently retry instead.\n if self.verbose:\n log.msg('connect ERROR: %s' % reason_str)\n\n self.stopTrying()\n\n e = error.Error('remote-connection-failed')\n\n do_expire = True\n\n if self.waiting_requests:\n wr = self.waiting_requests.pop(0)\n wr.doErrback(e)\n else:\n # need to wait for a new request and then expire\n do_expire = False\n\n if self.pint and self.sid in self.pint.sessions:\n if do_expire:\n try:\n self.expire()\n except Exception:\n self.onExpire()\n else:\n s = self.pint.sessions.get(self.sid)\n s.stream_error = e", "title": "" }, { "docid": "16c81482f599a6d7eeef1194ab53f772", "score": "0.54336935", "text": "async def test_color_palette_select_connection_error(\n hass: HomeAssistant,\n mock_wled: MagicMock,\n) -> None:\n mock_wled.segment.side_effect = WLEDConnectionError\n\n with pytest.raises(HomeAssistantError, match=\"Error communicating with WLED API\"):\n await hass.services.async_call(\n SELECT_DOMAIN,\n SERVICE_SELECT_OPTION,\n {\n ATTR_ENTITY_ID: \"select.wled_rgb_light_segment_1_color_palette\",\n ATTR_OPTION: \"Icefire\",\n },\n blocking=True,\n )\n\n assert (state := hass.states.get(\"select.wled_rgb_light_segment_1_color_palette\"))\n assert state.state == STATE_UNAVAILABLE\n assert mock_wled.segment.call_count == 1\n mock_wled.segment.assert_called_with(segment_id=1, palette=\"Icefire\")", "title": "" } ]
63e2a9a9fcbb147fd55d6f76d423eb0b
Cache potential symbols in the symbol table.
[ { "docid": "f242891fa3149bdb7fe2262b204e019f", "score": "0.5748251", "text": "def _enumerateSymbols(self, machoCtx) -> None:\n\n\t\tsymtab: symtab_command = machoCtx.getLoadCommand(\n\t\t\t(LoadCommands.LC_SYMTAB,)\n\t\t)\n\t\tif not symtab:\n\t\t\tself._logger.warning(\"Unable to find LC_SYMTAB.\")\n\t\t\treturn\n\n\t\tlinkeditFile = machoCtx.ctxForAddr(\n\t\t\tmachoCtx.segments[b\"__LINKEDIT\"].seg.vmaddr\n\t\t)\n\n\t\tfor i in range(symtab.nsyms):\n\t\t\tself._statusBar.update()\n\n\t\t\t# Get the symbol and its address\n\t\t\tentryOff = symtab.symoff + (i * nlist_64.SIZE)\n\t\t\tsymbolEntry = nlist_64(linkeditFile.file, entryOff)\n\n\t\t\tsymbolAddr = symbolEntry.n_value\n\t\t\tsymbol = linkeditFile.readString(symtab.stroff + symbolEntry.n_strx)\n\n\t\t\tif symbolAddr == 0:\n\t\t\t\tcontinue\n\t\t\tif not machoCtx.containsAddr(symbolAddr):\n\t\t\t\tself._logger.warning(f\"Invalid address: {symbolAddr}, for symbol entry: {symbol}.\") # noqa\n\t\t\t\tcontinue\n\n\t\t\t# save it to the cache\n\t\t\tif symbolAddr in self._symbolCache:\n\t\t\t\tself._symbolCache[symbolAddr].append(bytes(symbol))\n\t\t\telse:\n\t\t\t\tself._symbolCache[symbolAddr] = [bytes(symbol)]\n\t\t\tpass\n\t\tpass", "title": "" } ]
[ { "docid": "5d0760fc5481f25d465307101e3bd6f3", "score": "0.66358835", "text": "def _load_symbols_cache(self, cache):\n\n #\n # Symbols\n #\n\n for sym, address in cache['symbols'].items():\n if self._process.file_type == \"PE\" or \\\n (self.elf is not None and self.elf.type_str == 'DYN'):\n address = address + self.base\n\n sym = symbols.Symbol(self._process, name=sym, address=address)\n self._process.modules._symbol_to_address[self.name][sym.name] = sym\n self._process.modules._address_to_symbol[address] = sym\n\n #\n # Sections\n #\n\n if 'plt_offset' in cache:\n self.plt = cache['plt_offset']\n\n #\n # PLT\n #\n\n if 'plt' in cache:\n for i, sym in enumerate(cache['plt']):\n addr = self.plt + ((i+1)*0x10)\n name = 'plt.' + sym\n sym = symbols.Symbol(self._process, name=name, address=addr)\n self._process.modules._symbol_to_address[self.name][name] = sym\n self._process.modules._address_to_symbol[addr] = sym", "title": "" }, { "docid": "fc0072c550ca48cc70aaac6d930d7679", "score": "0.65396005", "text": "def _symbols_update(self):\n self.symbols_sting = [str(sym) for sym in list(self.expression.free_symbols)]\n self.symbols_sting.sort()\n self.symbols = [symbols(char) for char in self.symbols_sting]", "title": "" }, { "docid": "d2fac262d57dfbd7413fb0e1458ac0e5", "score": "0.58945626", "text": "def update_local_symbols_dict(self, comp):\n pass", "title": "" }, { "docid": "324c3cf79b34a2def8b3b2a9e35f87fb", "score": "0.5756938", "text": "def makeMapSymbols(self):\n n = self.keepAnalysisCount\n pen = self.colorList.next()\n filledbrush = pen\n emptybrush = None\n symbol = self.symbolList.next()\n if n == 0:\n clearFlag = True\n else:\n clearFlag = False\n self.currentSymDict = {'pen': pen, 'filledbrush': filledbrush,\n 'emptybrush': emptybrush, 'symbol': symbol,\n 'n': n, 'clearFlag': clearFlag}", "title": "" }, { "docid": "de66b2e3a9f3773a3e811b658b69d887", "score": "0.56086206", "text": "def symbols_with_same_path(self):\n a, b = Symbol('spam.ham.eggs'), Symbol().spam.ham.eggs\n self.symbs = [a, b]\n return self.symbs", "title": "" }, { "docid": "9940dd1a84f08f221365a89a03aa1e00", "score": "0.5607992", "text": "def _load_symbols(self):\n\n # Either we're loading everything or what we're looking at right now __should__ be loaded\n if self._process._load_symbols is None or any(True for x in self._process._load_symbols if fnmatch(self.name, x)):\n\n # Clear out old symbols if needed\n self._process.modules._symbol_to_address[self.name] = {}\n\n file_io = common.load_file(self._process, self.path)\n cache = self._read_symbols_cache_dict(file_io)\n\n if cache is not None:\n return self._load_symbols_cache(cache)\n\n if self._process.file_type == 'ELF':\n self._load_symbols_elf(file_io)\n\n elif self._process.file_type == \"PE\":\n self._load_symbols_pe(file_io)\n\n # TODO: Windows\n # TODO: Mac\n\n # If we didn't resolve anything, make sure we noted we tried\n if self.name not in self._process.modules._symbol_to_address:\n self._process.modules._symbol_to_address[self.name] = {}", "title": "" }, { "docid": "8f157c87d5d45117e5803321caad471b", "score": "0.558471", "text": "def initialize_local_symbols_dict(self, comp, assigned_var):\n pass", "title": "" }, { "docid": "75423c89f4625aada9920528546243c0", "score": "0.5582418", "text": "def free_symbols(self) -> Set[str]:\n raise NotImplementedError('free_symbols not implemented by \"%s\"' % type(self).__name__)", "title": "" }, { "docid": "965c91085d8c0bf31b4959d99fb5b169", "score": "0.5560573", "text": "def _resolve_symbols(self):\n # TODO: Should attempt to fill self.imports in accordance with angr's expectations\n # note: no rebasing support\n\n # trigger parsing of exports\n self._parse_exports(self.export_blob)\n\n # Set up search tables for searching\n section_tab = [None] # first section is NO_SECT\n for seg in self.segments:\n section_tab.extend(seg.sections)\n\n # A new memory area is created to hold external (undefined) symbols\n ext_symbol_start = 0xff00000000000000 if self.arch.bits == 64 else 0xff000000\n ext_symbol_end = ext_symbol_start\n\n # Update symbol properties\n for sym in self.symbols:\n\n sym._is_export = sym.name in self.exports_by_name\n\n if sym.is_stab: # stab symbols are debugging information - don't care!\n l.debug(\"Symbol %r is debugging information, skipping\", sym.name)\n continue\n\n if sym.name in self.exports_by_name:\n l.debug(\"Symbol %r is an export\", sym.name)\n sym.is_export = True\n else:\n l.debug(\"Symbol %r is not an export\", sym.name)\n sym.is_export = False\n\n if sym.is_common:\n l.debug(\"Symbol %r is common, updating size\", sym.name)\n sym.size = sym.n_value\n\n if sym.sym_type == SYMBOL_TYPE_SECT:\n l.debug(\"Symbol %r is N_SECT, updating names and address\", sym.name)\n sec = section_tab[sym.n_sect]\n if sec is not None:\n sym.segment_name = sec.segname\n sym.section_name = sec.sectname\n\n sym.relative_addr = sym.n_value\n\n elif sym.is_import():\n l.debug(\"Symbol %r is imported!\", sym.name)\n sym.library_name = self.imported_libraries[sym.library_ordinal]\n\n # if the symbol has no address we assign one from the special memory area\n if sym.relative_addr is None:\n sym.relative_addr = ext_symbol_end\n ext_symbol_end += sym.size\n l.debug(\"Assigning address %#x to symbol %r\", sym.relative_addr, sym.name)\n\n # Add special memory area for symbols:\n self.memory.add_backer(ext_symbol_start, \"\\x00\" * (ext_symbol_end - ext_symbol_start))\n\n # Perform binding\n bh = BindingHelper(self) # TODO: Make this configurable\n bh.do_normal_bind(self.binding_blob)\n bh.do_lazy_bind(self.lazy_binding_blob)\n if self.weak_binding_blob is not None and len(self.weak_binding_blob) > 0:\n l.info(\"Found weak binding blob. According to current state of knowledge, weak binding \"\n \"is only sensible if multiple binaries are involved and is thus skipped.\")\n\n # Add to symbols_by_addr\n # All (resolvable) symbols should be resolved by now\n for sym in self.symbols:\n if not sym.is_stab:\n if sym.relative_addr is not None:\n self._symbols_by_addr[sym.relative_addr] = sym\n else:\n # todo: this might be worth an error\n l.warn(\"Non-stab symbol %r @ %#x has no address.\", sym.name, sym.symtab_offset)", "title": "" }, { "docid": "d3ad03e670aa88ff288689847e04e29b", "score": "0.5556712", "text": "def set_is_created_containing_these_symbols(self):\n self.set = set(self.symbs)", "title": "" }, { "docid": "caf0f32edf1c8d82e2ccdc12a58f6c73", "score": "0.5525948", "text": "def IncludingErased(self) -> SymbolTable:", "title": "" }, { "docid": "f4a6d64ce17e563d13be0060e89a0d4b", "score": "0.5494512", "text": "def warm_cache(self):\n words = self.good.keys() + self.rejected.keys()\n for word in words:\n self.rprobs[word] = self.rejected_given_word(word)\n \n return {'rprobs' : self.rprobs,\n 'good' : self.good,\n 'rejected' : self.rejected}", "title": "" }, { "docid": "605e4a336fe6563cbea85e4f3783316a", "score": "0.54501534", "text": "def _clear_cache(self):\n self.__symbolic_jacobian = None", "title": "" }, { "docid": "b2575be7d1b8a7fb0a119b9d250533d9", "score": "0.5398685", "text": "def prepare(self):\n self.find_symbol_table()\n if self._has_symbol is False:\n return None\n self.logger.debug(\"has_symbol: %s\" % self._has_symbol)\n self.get_symbol_table()", "title": "" }, { "docid": "3f7309b7112d6acecd555c8459b37ad8", "score": "0.5389967", "text": "def _read_symbol_table(self):\n section = self.get_section_by_name(\".symtab\")\n if not section:\n raise Exception(\"Missing symbol table\")\n\n if not isinstance(section, SymbolTableSection):\n raise Exception(\"Invalid symbol table section\")\n\n symbols = {}\n for symbol in section.iter_symbols():\n name_str = symbol.name\n if name_str in symbols:\n logging.debug(\"Duplicate symbol %s\", name_str)\n symbols[name_str] = SymbolSimple(name_str, symbol[\"st_value\"],\n symbol[\"st_size\"])\n return symbols", "title": "" }, { "docid": "070c063cecf57669722560a0981a8c7f", "score": "0.5354533", "text": "def free_symbols(self) -> Iterable[sympy.Symbol]:\n return get_free_symbols(self.params)", "title": "" }, { "docid": "b932cb1f8beb2aeb9636a4e4ae74432c", "score": "0.53413147", "text": "def cache_all_data(cls):\n from ir_log import IRProgressBar\n from ir_config import IRConfig\n from ir_mongodb_helper import IRCollection\n bug_id_name = IRConfig.get_instance().get('bug_id_name')\n summary_name = IRConfig.get_instance().get('bug_summary_name')\n description_name = IRConfig.get_instance().get('bug_description_name')\n tfidf_collection = IRCollection(\n 'bug_db_name', 'bug_tfidf_collection_name', 'r')\n cls.set_is_cache(True)\n cls.__cache = {}\n def iter_tfidf(bug):\n cls.__cache[bug[bug_id_name]] = (bug[summary_name],\n bug[description_name])\n IRProgressBar.execute_iteration_for_cursor(tfidf_collection.find(),\n iter_tfidf, \"Caching TFIDF\")", "title": "" }, { "docid": "ffa5e2ae072eba1df0a931b74f25cf0a", "score": "0.53243756", "text": "def symbolizeAddr(self, addr: int) -> List[bytes]:\n\t\tif addr in self._symbolCache:\n\t\t\treturn self._symbolCache[addr]\n\t\telse:\n\t\t\treturn None", "title": "" }, { "docid": "e731dd71a4be8fb6244ea9fd844f35a1", "score": "0.5317222", "text": "def non_trivial_symbol(self):\n a, b = Symbol('os.path.isfile'), Symbol('sys.executable')\n self.symbs = [a, b]\n return self.symbs", "title": "" }, { "docid": "9c88c13f1bb7c417c36d2ba46485d50e", "score": "0.5298955", "text": "def free_symbols(self) -> Set[sp.Basic]:\n return set(\n chain.from_iterable(\n state.get_free_symbols()\n for state in self.states() + self.algebraic_equations()\n )\n )", "title": "" }, { "docid": "d691e4e77a8abc1e93b5e4779cadd38e", "score": "0.52566", "text": "def set_symbols(self, symbols):\n\t\tself.symbols = symbols", "title": "" }, { "docid": "7b80bd13f5791b52d86bd5ea0f3cca52", "score": "0.5239692", "text": "def symbols(self):\n return self._symbols", "title": "" }, { "docid": "b28fafedbb403ba7dd3e4099813258b7", "score": "0.5223729", "text": "def getUncachedGameKeys(self):", "title": "" }, { "docid": "58eede2f0004b3c04cb3294c1207ac05", "score": "0.52194476", "text": "def getSymbols(self) -> List[ghidra.program.model.symbol.Symbol]:\n ...", "title": "" }, { "docid": "6a47dc84ed9e8628929172e06e72f2dd", "score": "0.5188628", "text": "def intern(self, string):\n if string in self.oblist:\n return self.oblist[string]\n sym = symbol(string)\n self.oblist[string] = sym\n return sym", "title": "" }, { "docid": "cee80301dcbdd3338e473d782cbbfed5", "score": "0.5178622", "text": "def scan_cache(force_update=False):\n\n global __CACHED_SCAN__\n if force_update or __CACHED_SCAN__ is None:\n __CACHED_SCAN__ = scan_system()\n return __CACHED_SCAN__", "title": "" }, { "docid": "bf685ba8aa951ff9e726e9efb8c31ad4", "score": "0.5176204", "text": "def symbols(self) -> set[Symbol]:\n\n # this fails because `free_symbols` is a dict on NumExpr but `set` on Expr\n\n symbols = set()\n for rhs in self.values():\n if isinstance(rhs, (Expr, MatrixBase)):\n symbols |= rhs.free_symbols\n else:\n try:\n symbols |= set(rhs.symbols)\n except AttributeError:\n # RHS doesnt have any symbols; for example might be a numpy array\n pass\n\n # symbols = getattr(rhs, 'free_symbols')\n # try:\n # # rhs is a sympy `Expr` and has `free_symbols` as a set\n # symbols |= rhs.free_symbols\n # except TypeError:\n # # rhs is a slimfit `NumExpr`\n # symbols |= set(rhs.symbols)\n # except AttributeError:\n # # RHS doesnt have any symbols; for example might be a numpy array\n # pass\n return symbols", "title": "" }, { "docid": "2e4416089cae9958605ad2d84b3bc489", "score": "0.51651824", "text": "def __init__(self):\n self._cache = {0: 0, 1: 1, 2: 1}", "title": "" }, { "docid": "4070f67b1073d8e761fc82f8cf7656e6", "score": "0.5159139", "text": "def get_priceinfo_cached(self, symbol, thresh):\n cached = self._get_cache_priceinfo(symbol, thresh)\n if not cached:\n cached = self.fetch_priceinfo(symbol)\n if cached:\n self._set_cache_priceinfo(symbol, cached)\n\n numfields = set(['open', 'high', 'low', 'price', 'volume', 'change', 'previous close'])\n return {k: Decimal(v) if k in numfields else v for k, v in cached.items()}", "title": "" }, { "docid": "3a89c9bd2dbf0876f75fb50d2b843d44", "score": "0.5138711", "text": "def symbol_values(self):\n return dict(zip(self.symbols, self.get_weights()[0]))", "title": "" }, { "docid": "a48824c4d9d1c838a72db8b7c5d0b4fd", "score": "0.51371515", "text": "def symbols(self):\n symbols = set()\n\n for word, transcriptions in self.entries.items():\n for t in transcriptions:\n symbols.update(t)\n\n return symbols", "title": "" }, { "docid": "d37d540188094bb50310ce54102d922c", "score": "0.51330703", "text": "def _enumerateSymbolPointers(self) -> Dict[bytes, Tuple[int]]:\n\n\t\t# read all the bind records as they're a source of symbolic info\n\t\tbindRecords: Dict[int, _BindRecord] = {}\n\t\tdyldInfo: dyld_info_command = self._machoCtx.getLoadCommand(\n\t\t\t(LoadCommands.LC_DYLD_INFO, LoadCommands.LC_DYLD_INFO_ONLY)\n\t\t)\n\n\t\tlinkeditFile = self._machoCtx.ctxForAddr(\n\t\t\tself._machoCtx.segments[b\"__LINKEDIT\"].seg.vmaddr\n\t\t)\n\n\t\tif dyldInfo:\n\t\t\trecords: List[_BindRecord] = []\n\t\t\ttry:\n\t\t\t\tif dyldInfo.weak_bind_size:\n\t\t\t\t\t# usually contains records for c++ symbols like \"new\"\n\t\t\t\t\trecords.extend(\n\t\t\t\t\t\t_bindReader(\n\t\t\t\t\t\t\tlinkeditFile,\n\t\t\t\t\t\t\tdyldInfo.weak_bind_off,\n\t\t\t\t\t\t\tdyldInfo.weak_bind_size\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\tpass\n\n\t\t\t\tif dyldInfo.lazy_bind_off:\n\t\t\t\t\trecords.extend(\n\t\t\t\t\t\t_bindReader(\n\t\t\t\t\t\t\tlinkeditFile,\n\t\t\t\t\t\t\tdyldInfo.lazy_bind_off,\n\t\t\t\t\t\t\tdyldInfo.lazy_bind_size\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\tpass\n\n\t\t\t\tfor record in records:\n\t\t\t\t\t# check if we have the info needed\n\t\t\t\t\tif (\n\t\t\t\t\t\trecord.symbol is None\n\t\t\t\t\t\tor record.segment is None\n\t\t\t\t\t\tor record.offset is None\n\t\t\t\t\t):\n\t\t\t\t\t\tself._logger.warning(f\"Incomplete lazy bind record: {record}\")\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tbindAddr = self._machoCtx.segmentsI[record.segment].seg.vmaddr\n\t\t\t\t\tbindAddr += record.offset\n\t\t\t\t\tbindRecords[bindAddr] = record\n\t\t\t\t\tpass\n\t\t\texcept KeyError as e:\n\t\t\t\tself._logger.error(f\"Unable to read bind records, reasons: {e}\")\n\t\t\tpass\n\n\t\t# enumerate all symbol pointers\n\t\tsymbolPtrs: Dict[bytes, List[int]] = {}\n\n\t\tdef _addToMap(ptrSymbol: bytes, ptrAddr: int, section: section_64):\n\t\t\tif ptrSymbol in symbolPtrs:\n\t\t\t\t# give priority to ptrs in the __auth_got section\n\t\t\t\tif section.sectname == b\"__auth_got\":\n\t\t\t\t\tsymbolPtrs[ptrSymbol].insert(0, ptrAddr)\n\t\t\t\telse:\n\t\t\t\t\tsymbolPtrs[ptrSymbol].append(ptrAddr)\n\t\t\telse:\n\t\t\t\tsymbolPtrs[ptrSymbol] = [ptrAddr]\n\t\t\tpass\n\n\t\tfor segment in self._machoCtx.segmentsI:\n\t\t\tfor sect in segment.sectsI:\n\t\t\t\tsectType = sect.flags & SECTION_TYPE\n\t\t\t\tif (\n\t\t\t\t\tsectType == S_NON_LAZY_SYMBOL_POINTERS\n\t\t\t\t\tor sectType == S_LAZY_SYMBOL_POINTERS\n\t\t\t\t):\n\t\t\t\t\tfor i in range(int(sect.size / 8)):\n\t\t\t\t\t\tself._statusBar.update(status=\"Caching Symbol Pointers\")\n\n\t\t\t\t\t\tptrAddr = sect.addr + (i * 8)\n\n\t\t\t\t\t\t# Try to symbolize through bind records\n\t\t\t\t\t\tif ptrAddr in bindRecords:\n\t\t\t\t\t\t\t_addToMap(bindRecords[ptrAddr].symbol, ptrAddr, sect)\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t# Try to symbolize though indirect symbol entries\n\t\t\t\t\t\tsymbolIndex = linkeditFile.readFormat(\n\t\t\t\t\t\t\t\"<I\",\n\t\t\t\t\t\t\tself._dysymtab.indirectsymoff + ((sect.reserved1 + i) * 4)\n\t\t\t\t\t\t)[0]\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\tsymbolIndex != 0\n\t\t\t\t\t\t\tand symbolIndex != INDIRECT_SYMBOL_ABS\n\t\t\t\t\t\t\tand symbolIndex != INDIRECT_SYMBOL_LOCAL\n\t\t\t\t\t\t\tand symbolIndex != (INDIRECT_SYMBOL_ABS | INDIRECT_SYMBOL_LOCAL)\n\t\t\t\t\t\t):\n\t\t\t\t\t\t\tsymbolEntry = nlist_64(\n\t\t\t\t\t\t\t\tlinkeditFile.file,\n\t\t\t\t\t\t\t\tself._symtab.symoff + (symbolIndex * nlist_64.SIZE)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tsymbol = linkeditFile.readString(\n\t\t\t\t\t\t\t\tself._symtab.stroff + symbolEntry.n_strx\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t_addToMap(symbol, ptrAddr, sect)\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t# Try to symbolize though the pointers target\n\t\t\t\t\t\tptrTarget = self._slider.slideAddress(ptrAddr)\n\t\t\t\t\t\tptrFunc = self._arm64Utils.resolveStubChain(ptrTarget)\n\t\t\t\t\t\tif symbols := self._symbolizer.symbolizeAddr(ptrFunc):\n\t\t\t\t\t\t\tfor sym in symbols:\n\t\t\t\t\t\t\t\t_addToMap(sym, ptrAddr, sect)\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t# Skip special cases like __csbitmaps in CoreFoundation\n\t\t\t\t\t\tif self._machoCtx.containsAddr(ptrTarget):\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tself._logger.warning(f\"Unable to symbolize pointer at {hex(ptrAddr)}, with indirect entry index {hex(sect.reserved1 + i)}, with target function {hex(ptrFunc)}\") # noqa\n\t\t\t\t\t\tpass\n\t\t\t\t\tpass\n\t\t\t\tpass\n\t\t\tpass\n\n\t\treturn symbolPtrs", "title": "" }, { "docid": "bc18719db059b3a7738e714477916e7a", "score": "0.5131824", "text": "def clear_cache(self):\n self.phrase2vec_cache = {}\n self.membership_cache = {}\n self.interpret_cache = {}", "title": "" }, { "docid": "913b95798a15f24d84e6b7f4fe58be36", "score": "0.5119424", "text": "def reset_caches(self):\r\n with common.ignored(AttributeError):\r\n del self._used_names", "title": "" }, { "docid": "e3ad59819e704474430d4505e7607173", "score": "0.51084596", "text": "def get_sym_data(self, objective, constraints, cached_data):\n self.validate_cache(objective, constraints, cached_data)\n prob_data = cached_data[self.name()]\n if prob_data.sym_data is None:\n prob_data.sym_data = SymData(objective, constraints, self)\n return prob_data.sym_data", "title": "" }, { "docid": "c09a463f172d3ba34111a74651468369", "score": "0.5107634", "text": "def _reset_cache(self):\n\n\t\tsuper()._reset_cache()\n\t\tif self._initialized == False:\n\t\t\treturn\n\n\t\tself.register_buffer(\"_log_sqrt_dofs_pi_cov\", torch.log(torch.sqrt(\n\t\t\tself.dofs * math.pi * self.covs)))", "title": "" }, { "docid": "be98e8d62b3feafb7ef7f3559e6a8896", "score": "0.51061755", "text": "def _addSymbol(self, symbol):\n setattr(self, symbol, len(vars(self).keys()))", "title": "" }, { "docid": "49a36b30096f5ac81df973e5553a5f40", "score": "0.5098728", "text": "def update_symbols(self, current_thread_only):\n logging.info(\"Updating symbols\")\n mapped_files = _get_mapped_files()\n # Map all symbols from native libraries packages with the APK.\n for file_mappings in mapped_files:\n filename = file_mappings[0].filename\n if ((filename.startswith('/data/data/') or\n filename.startswith('/data/app')) and\n not filename.endswith('.apk') and\n not filename.endswith('.dex')):\n logging.info('Pre-mapping: %s' % file_mappings[0].filename)\n self._try_to_map(file_mappings)\n\n if current_thread_only:\n self._map_symbols_on_current_thread(mapped_files)\n else:\n logging.info('Updating all threads\\' symbols')\n current_thread = gdb.selected_thread()\n nb_threads = len(_gdb_execute(\"info threads\").split(\"\\n\")) - 2\n for i in xrange(nb_threads):\n try:\n _gdb_execute(\"thread %d\" % (i + 1))\n self._map_symbols_on_current_thread(mapped_files)\n except gdb.error:\n traceback.print_exc()\n current_thread.switch()", "title": "" }, { "docid": "bcf4fdc57e80ec04cb9f4678b9529ffc", "score": "0.50945777", "text": "def symbol_table(func):\n\n @functools.wraps(func)\n def inner(*args, **kwargs):\n return func(*args, symbol_table=_SYMBOL_TABLE, **kwargs)\n return inner", "title": "" }, { "docid": "eb24d4bf9b2c40aac6d559fbf5143a44", "score": "0.5071513", "text": "def __dirty(self): \n # You're a dirty, dirty slot table and you should be\n # ashamed of having outdated caches!\n self.availabilitycache = {}\n self.awcache_time = None\n self.awcache = None", "title": "" }, { "docid": "7e77509da934ca4c1ffc4716287111c5", "score": "0.50685626", "text": "def gensym ():\n global symcount\n sym = 'sym%i' % symcount\n symcount += 1\n return sym", "title": "" }, { "docid": "19a4efcf44e7f053bd4673c0e3e4fd18", "score": "0.50659806", "text": "def all_symbols(self):", "title": "" }, { "docid": "fc89c3a98c7f988cd8e6e978e44b1492", "score": "0.50572646", "text": "def new_symbol(s):\n if s not in symbol_table:\n symbol_table[s] = SAtom(s)\n return symbol_table[s]", "title": "" }, { "docid": "44a28ba2735621ae39b0fc0ed743bef5", "score": "0.50570285", "text": "def encodeSymbol(self, sym):\n return Hmm.__dicLookup(self._symbol_dic, sym)", "title": "" }, { "docid": "51813efb13dc378f21f0acf2645d4d84", "score": "0.5043085", "text": "def getSymbolsCache(self) -> \"DyldContext\":\n\n\t\tif not self._subCaches or not self.headerContainsField(\"symbolFileUUID\"):\n\t\t\treturn self\n\n\t\tfor cache in self._subCaches:\n\t\t\tif bytes(self.header.symbolFileUUID) == bytes(cache.header.uuid):\n\t\t\t\treturn cache\n\t\t\tpass\n\n\t\treturn None", "title": "" }, { "docid": "92ac29ff2f082812cd51450b38518a1c", "score": "0.5038338", "text": "def symbols(self):\n return [sympy.Symbol(x) for x in self._symbols_list]", "title": "" }, { "docid": "22b0c43122e94509c8d9bcf28e54a136", "score": "0.5021292", "text": "def get_symbol_table(self):\n if self.symbol_table_start and self.symbol_table_end:\n self._check_vxworks_endian()\n\n else:\n return False\n\n for i in range(self.symbol_table_start, self.symbol_table_end, self._symbol_interval):\n symbol_name_addr = self._firmware[i + 4:i + 8]\n symbol_dest_addr = self._firmware[i + 8:i + 12]\n if self.big_endian:\n unpack_format = '>I'\n else:\n unpack_format = '<I'\n symbol_name_addr = int(struct.unpack(unpack_format, symbol_name_addr)[0])\n self.logger.debug(\"symbol_name_addr: %s\" % symbol_name_addr)\n symbol_dest_addr = int(struct.unpack(unpack_format, symbol_dest_addr)[0])\n self.logger.debug(\"symbol_dest_addr: %s\" % symbol_dest_addr)\n self._symbol_table.append({\n 'symbol_name_addr': symbol_name_addr,\n 'symbol_name_length': None,\n 'symbol_dest_addr': symbol_dest_addr,\n 'offset': i\n })\n # self.logger.debug(\"self._symbol_table: %s\" % self._symbol_table)\n self.logger.debug(\"len(self._symbol_table): %s\".format(len(self._symbol_table)))\n self._symbol_table = sorted(self._symbol_table, key=lambda x: x['symbol_name_addr'])\n for i in range(len(self._symbol_table) - 1):\n self._symbol_table[i]['symbol_name_length'] = self._symbol_table[i + 1]['symbol_name_addr'] - \\\n self._symbol_table[i]['symbol_name_addr']\n self.logger.debug(\"len(self._symbol_table): %s\".format(len(self._symbol_table)))\n return True", "title": "" }, { "docid": "8e0a1f016789fa9491981cf70c8dc49e", "score": "0.50072587", "text": "def _pdb_sccs_cache(pdbid):\n global db,pdb,chain_lengths\n \n # Each process has to keep it's own global dictionary because of\n # concurrency issues.\n if pdb == None:\n pdb = {}\n if chain_lengths == None:\n chain_lengths = {}\n \n # Search for the pdb\n if not pdb.has_key(pdbid):\n if db==None:\n db = _connection()\n cursor = db.cursor()\n try:\n query = \"select part_text, sccs from scop_cla where pdb='%s'\" % pdbid\n cursor.execute(query)\n all = cursor.fetchall()\n # Parse parts and chains\n domains = []\n for txt, sccs in all:\n sccs = sccs.strip()\n if len(sccs) == 0:\n sccs = None\n s = SCOPDomain(pdbid, sccs, part_text=txt)\n for part in s.parts:\n # Determine the length of the chain for parts that might not\n # have a start/stop defined.\n if not chain_lengths.has_key((pdbid,part.chain)):\n query = \"select length(sequence) from pdbIndex i join pdbSeqRes r on i.id=r.pdb_key join sequence s on r.sequence_key=s.id where i.pdbId='%s' and r.chain='%s'\" % (pdbid,part.chain)\n #print query\n cursor.execute(query)\n chain_len = cursor.fetchone()[0]\n chain_lengths[(pdbid,part.chain)]=chain_len\n else:\n chain_len = chain_lengths[(pdbid,part.chain)]\n # Tell the par the length of the chains\n part.chain_len = chain_len\n domains.append(s)\n pdb[pdbid]=list(set(domains))\n finally:\n cursor.close()", "title": "" }, { "docid": "650880fe41cb4f84c784371c398c1536", "score": "0.5006173", "text": "def symbol_fiducials(self):\n return dict(zip(self.symbol_map.sym_list, self.symbol_map.ival_list))", "title": "" }, { "docid": "f373137e1511f0f4611c8ccd33fd5322", "score": "0.49984825", "text": "def enlistSymbols(self ):\n for e in self.Edges:\n if e[2] not in self.symbols and e[2]!='ε':\n self.symbols.append(e[2])", "title": "" }, { "docid": "80be30f6a5b6d16a3c5b3a896299f9b2", "score": "0.4996974", "text": "async def cache_instruments(\n self,\n require: Optional[Dict[Mount, PipetteName]] = None,\n ) -> None:\n ...", "title": "" }, { "docid": "5f665bf932835ff2bbeb7ef5b879a8a9", "score": "0.4989114", "text": "def _reset(self):\n self.cache = SmtLibExecutionCache()\n self.logic = None\n self._current_env = self.pysmt_env\n\n mgr = self.pysmt_env.formula_manager\n self.cache.update({'+':mgr.Plus,\n '-':self._minus_or_uminus,\n '*':mgr.Times,\n '/':self._division,\n '>':mgr.GT,\n '<':mgr.LT,\n '>=':mgr.GE,\n '<=':mgr.LE,\n '=':self._equals_or_iff,\n 'not':mgr.Not,\n 'and':mgr.And,\n 'or':mgr.Or,\n 'xor':mgr.Xor,\n '=>':mgr.Implies,\n '<->':mgr.Iff,\n 'ite':mgr.Ite,\n 'false':mgr.FALSE(),\n 'true':mgr.TRUE(),\n 'to_real':mgr.ToReal,\n 'concat':mgr.BVConcat,\n 'bvnot':mgr.BVNot,\n 'bvand':mgr.BVAnd,\n 'bvor':mgr.BVOr,\n 'bvneg':mgr.BVNeg,\n 'bvadd':mgr.BVAdd,\n 'bvmul':mgr.BVMul,\n 'bvudiv':mgr.BVUDiv,\n 'bvurem':mgr.BVURem,\n 'bvshl':mgr.BVLShl,\n 'bvlshr':mgr.BVLShr,\n 'bvsub':mgr.BVSub,\n 'bvult':mgr.BVULT,\n 'bvxor':mgr.BVXor,\n '_':self._smtlib_underscore,\n # Extended Functions\n 'bvnand':mgr.BVNand,\n 'bvnor':mgr.BVNor,\n 'bvxnor':mgr.BVXnor,\n 'bvcomp':mgr.BVComp,\n 'bvsdiv':mgr.BVSDiv,\n 'bvsrem':mgr.BVSRem,\n 'bvsmod':mgr.BVSMod,\n 'bvashr':mgr.BVAShr,\n 'bvule':mgr.BVULE,\n 'bvugt':mgr.BVUGT,\n 'bvuge':mgr.BVUGE,\n 'bvslt':mgr.BVSLT,\n 'bvsle':mgr.BVSLE,\n 'bvsgt':mgr.BVSGT,\n 'bvsge':mgr.BVSGE,\n })", "title": "" }, { "docid": "a25a01fd803bb24f56de3aedb69ef122", "score": "0.4986482", "text": "def _save_symbols_cache(self, file_io, cache):\n\n file_io.seek(0, 0)\n h = hashlib.sha256(file_io.read()).hexdigest()\n file_io.seek(0, 0)\n\n this_cache_file = os.path.join(symbol_cache_path, h)\n with open(this_cache_file, \"w\") as f:\n f.write(json.dumps(cache))", "title": "" }, { "docid": "8f64f2a05537ba8241e7cd04ded4dc41", "score": "0.4980934", "text": "def unset_symbol(self):\n\t\tself.symbols = None", "title": "" }, { "docid": "0cb36f386662591ece2d36147dc26e2f", "score": "0.4958083", "text": "def _reset_caches(self):\n self._total_nb_possibilities = None\n self._cached_examples = []", "title": "" }, { "docid": "5d722e08f7b423f336e40835591af460", "score": "0.49553323", "text": "def intern(name):\n if isinstance(name, SchemeSymbol):\n if str(name) in _all_symbols:\n return _all_symbols[str(name)]\n else:\n _all_symbols[str(name)] = name\n return name\n if name in _all_symbols:\n return _all_symbols[name]\n else:\n sym = SchemeSymbol(name)\n _all_symbols[name] = sym\n return sym", "title": "" }, { "docid": "4545e9ddbf6793f87760186b40dc183b", "score": "0.49532443", "text": "def fill_cache(self, X):\n cache = dict(lX=self._predict(X))\n cache[\"aX\"] = self.activation_(cache[\"lX\"])\n return cache", "title": "" }, { "docid": "897ffa50017b0af2a87db6cdc4972570", "score": "0.49398854", "text": "def init_cache():\n global memory_cache\n with session_scope() as session:\n last_req_dict = dict(get_last_requested(session))\n hits_dict = dict(get_endpoints_hits(session))\n averages_dict = dict(get_endpoint_averages(session))\n for rule in get_rules():\n memory_cache[rule.endpoint] = EndpointInfo(\n last_requested=last_req_dict.get(rule.endpoint),\n average_duration=averages_dict.get(rule.endpoint),\n hits=hits_dict.get(rule.endpoint),\n )", "title": "" }, { "docid": "d82a1e7eae31a22371aaa9f3ea91fcb2", "score": "0.4927952", "text": "def _calculate_initial_bought(self):\r\n bought = {} \r\n for s in self.symbol_list:\r\n bought[s] = 'OUT'\r\n return bought", "title": "" }, { "docid": "332c1a1ae5a5c92d9cc46a99be8dc6be", "score": "0.49105838", "text": "def symbols(self):\n raise NotImplementedError()", "title": "" }, { "docid": "fd9b7c2e38a94bdf7c4e2745766f7ef1", "score": "0.49104315", "text": "def find_symbol_table(self):\n for offset in range(len(self._firmware)):\n if self.symbol_table_start is None:\n if self._check_symbol_format(offset):\n self.logger.info(\"symbol table start offset: %s\" % (hex(offset)))\n self.symbol_table_start = offset\n self._has_symbol = True\n break\n else:\n break\n\n if self.symbol_table_start:\n for i in range(self.symbol_table_start, len(self._firmware), self._symbol_interval):\n check_data = self._firmware[i:i + self._symbol_interval]\n if len(check_data) < self._symbol_interval:\n self.logger.debug(\"Check_data length is too small\")\n break\n\n if len(check_data) < self._symbol_interval:\n self.logger.debug(\"Check_data length is too small\")\n break\n\n if self._check_symbol_format_simple(check_data):\n self.symbol_table_end = i + self._symbol_interval\n self.logger.debug(\"self.symbol_table_end: {:010x}\".format(self.symbol_table_end))\n\n else:\n self.logger.info(\"symbol table end offset: %s\" % hex(self.symbol_table_end))\n break\n else:\n self.logger.error(\"didn't find symbol table in this image\")\n self._has_symbol = False", "title": "" }, { "docid": "52da5b440a1b2e652cc17bcc84e5f433", "score": "0.49103576", "text": "def freeze(self) -> None:\n self.__frozen = True\n if self.__key_expr:\n fce = make_lambda(self.__key_expr)\n self.__map = dict(((key, index) for index, key in enumerate((fce(item) for item in self))))", "title": "" }, { "docid": "27119c3b7afe9e18f823dc9b91cbee54", "score": "0.48837", "text": "def symbol_cost(self, symbol, seen=set()):\n expansions = self.v['grammar'][symbol]\n return min(self.expansion_cost(e, seen | {symbol}) for e in expansions)", "title": "" }, { "docid": "a4c988225c3599bacdbb01f6ff463cfb", "score": "0.48803112", "text": "def gen_unused_symbols(used, n):\n i = cnt = 0\n while cnt < n:\n s = get_symbol(i)\n i += 1\n if s in used:\n continue\n yield s\n cnt += 1", "title": "" }, { "docid": "2443fcee79c9db7f847b7ec04d5397f3", "score": "0.48796088", "text": "def manin_symbols(self):\n try:\n return self.__manin_symbols\n except AttributeError:\n self.__manin_symbols = ManinSymbolList_gamma_h(\n group=self.group(), weight=self.weight())\n return self.__manin_symbols", "title": "" }, { "docid": "0626a4682147c14e09a7ab19fb2fbdff", "score": "0.48670635", "text": "def init_word_cache(self, length=10000):\n for i in range(0, length):\n self.word_cache.append(self.gen_word())", "title": "" }, { "docid": "ff9958340cc4cd6f309cc930fc287a99", "score": "0.4865642", "text": "def order_collected_historical_data(self):\n for symbol_name in SYMBOLS:\n self.fetched_data[symbol_name] = OrderedDict(sorted(self.fetched_data[symbol_name].items()))", "title": "" }, { "docid": "1e8ff1c58af90a0b648852b5b3e536af", "score": "0.48649782", "text": "def lookup_symbol(self, symbol):\n try:\n s = self.reverse_grammar[symbol]\n return s\n except KeyError:\n return set()", "title": "" }, { "docid": "4f38bd984bb87eafb2a65e68f1f1337b", "score": "0.48638475", "text": "def clearcache():\r\n\r\n global cache\r\n cache = {}", "title": "" }, { "docid": "49c43897503206fba647c250c27e88ec", "score": "0.48610923", "text": "def do_symbols(self, arg):\n symbols = db(db.symbols).select()\n if len(symbols) == 0:\n print('%s %s' % (self.out, 'no symbols stored'))\n return\n for i, sym in enumerate(symbols):\n print('%s %3s: %s - %s' % (self.out, i, sym.ticker, sym.name))", "title": "" }, { "docid": "4b8a32c88ff779907f03763270b410f2", "score": "0.4859931", "text": "def _load_symbol_data(self):\n return False", "title": "" }, { "docid": "5cc3762142a1e458e72d97eaa8e1277f", "score": "0.48566782", "text": "def _reset_caches(self):\n self._initialized = False\n self._is_hermitian = None", "title": "" }, { "docid": "049bc1db08196bea951d633202c589db", "score": "0.48480937", "text": "def gen_symbol_table(min_size=1,max_size=50):\n table = {}\n\n i = randint(min_size,max_size)\n\n \n while i > 0:\n symbol = gen_rand_symbol(randint(3,3))\n if not symbol in table:\n table[symbol] = 0\n i -= 1\n return table", "title": "" }, { "docid": "0a3e8c26f0db20c4764de56c61f25d66", "score": "0.48405343", "text": "def store_matches(\n self,\n atom_slots: dict[TopologyKey, PotentialKey],\n topology: \"Topology\",\n ) -> None:\n for proper in topology.propers:\n atom_indices = tuple(topology.atom_index(atom) for atom in proper)\n top_key = TopologyKey(atom_indices=atom_indices)\n\n pot_key_ids = tuple(\n _get_potential_key_id(atom_slots, idx) for idx in atom_indices\n )\n\n self.key_map[top_key] = PotentialKey(\n id=POTENTIAL_KEY_SEPARATOR.join(pot_key_ids),\n )", "title": "" }, { "docid": "83176714dca3497dc482b278cbb20df0", "score": "0.4837721", "text": "def sym_names(self) -> List[str]:\n return list(self._syms.keys())", "title": "" }, { "docid": "deea379dd8c3c83f12c64ca872b77167", "score": "0.48372594", "text": "def optimize_lookups(self):\n if self._lookup_dict is not None:\n return\n self._list = sorted(self._list, key=lambda x: x.begin)\n if len(self._list) > 1000:\n self._lookup_dict = {}\n cur = 0\n while cur < len(self._list):\n self._lookup_dict[cur] = self._list[cur].begin\n cur += 1000", "title": "" }, { "docid": "d31d56f47fae019ab516ba7a67746e70", "score": "0.48329672", "text": "def cache(self):\n cached_data = self.state_dict()\n cached_data['epoch'] = self.epoch\n cached_data['best_val_error'] = self._best_val_error\n cached_data['best_loss'] = self._best_loss\n self._cacher.save(cached_data)\n self._cacher_params.save(self.hyperparams.hashable_str)", "title": "" }, { "docid": "cc63e587e1ed72bdef9f2f0965265c05", "score": "0.48305044", "text": "def _reset_cache(self):\n\n self._noint_mask = None\n self._escat_mask = None\n self._escatonly_mask = None\n self._line_mask = None\n self._lam_escat = None\n self._lam_noint = None\n self._weights_escat = None\n self._weights_noint = None\n self._line_in_infos = None\n self._line_in_nu = None\n self._line_in_L = None\n self._line_out_infos = None\n self._line_out_nu = None\n self._line_out_L = None", "title": "" }, { "docid": "763413c7d471d5ac0c75ff50214fc6b2", "score": "0.48247024", "text": "def _read_symbols_cache_dict(self, f):\n\n if f is None:\n return None\n\n assert isinstance(f, io.IOBase), 'Unhandled symbol cache load type of {}'.format(type(f))\n\n f.seek(0, 0)\n h = hashlib.sha256(f.read()).hexdigest()\n f.seek(0, 0)\n\n this_cache_file = os.path.join(symbol_cache_path, h)\n\n # Cache miss\n if not os.path.isfile(this_cache_file):\n return None\n\n # Cache hit\n with open(this_cache_file, \"r\") as f:\n return json.loads(f.read())", "title": "" }, { "docid": "cd5a8eb9d63818ecb3fdf6139075ac87", "score": "0.4820459", "text": "def construct_symbol_table(filename):\n pass", "title": "" }, { "docid": "bdd9db309fa25353d7cf8af32d86bbcc", "score": "0.48204294", "text": "def replace_with_cached(\n self, expr: expressions.Expression) -> Dict[str, expressions.Expression]:\n\n replaced_inputs: Dict[str, expressions.Expression] = {}\n self._replace_with_cached_recur(expr, replaced_inputs)\n return replaced_inputs", "title": "" }, { "docid": "eecf32044d99d6808e3c23db2667223b", "score": "0.48199284", "text": "def manin_symbols(self):\n try:\n return self.__manin_symbols\n except AttributeError:\n self.__manin_symbols = ManinSymbolList_gamma0(\n level=self.level(), weight=self.weight())\n return self.__manin_symbols", "title": "" }, { "docid": "bdfca7e1bf42123fc0ddc4ca98b749d4", "score": "0.48153427", "text": "def reinit(self):\n self.mem = {i: [] for i in range(self.env.nbarms)}", "title": "" }, { "docid": "567c52896377a51900de203c1648781c", "score": "0.48125237", "text": "def __hash__(self):\n return hash(('Atom', self.symbol))", "title": "" }, { "docid": "32aadcdefb03a2e9c5fb704dd49d91ba", "score": "0.48118928", "text": "def _calculate_initial_bought(self):\n bought = {}\n for s in self.symbol_list:\n bought[s] = 'OUT'\n return bought", "title": "" }, { "docid": "7945e6f18fde97ced93d36260aa22fba", "score": "0.48061186", "text": "def create_default_symbol_table():\n\n st = SymbolTable({\n 'SP': 0,\n 'LCL': 1,\n 'ARG': 2,\n 'THIS': 3,\n 'THAT': 4,\n 'SCREEN': 16384,\n 'KBD': 24576,\n })\n\n st.update({'R{}'.format(r): r\n for r in range(16)})\n\n return st", "title": "" }, { "docid": "f7029f3237c4bce00f5971226320d92e", "score": "0.48026088", "text": "def _populateCache(self):\n cacheSize = self.len/3 + 1\n self.sort(self._compareBy(\"hits\"))\n for i in range(cacheSize):\n self.cache.append(self.words[i])", "title": "" }, { "docid": "f3fdca96dec7badeeb66bfc23282910c", "score": "0.4795915", "text": "def store_matches(\n self,\n atom_slots: dict[TopologyKey, PotentialKey],\n topology: \"Topology\",\n ) -> None:\n for bond in topology.bonds:\n atom_indices = (\n topology.atom_index(bond.atom1),\n topology.atom_index(bond.atom2),\n )\n top_key = TopologyKey(atom_indices=atom_indices)\n\n pot_key_ids = tuple(\n _get_potential_key_id(atom_slots, idx) for idx in atom_indices\n )\n\n self.key_map[top_key] = PotentialKey(\n id=POTENTIAL_KEY_SEPARATOR.join(pot_key_ids),\n )", "title": "" }, { "docid": "04096965f8cfa97e4588833da73b4295", "score": "0.47878587", "text": "def isBackedByLocalSymbolMap(self) -> bool:\n ...", "title": "" }, { "docid": "c485b9b330cbfdf325ca87146a0bbbd3", "score": "0.47812495", "text": "def find_symbols(symbol_dirs):\r\n known_symbols = {}\r\n for symbol_dir in symbol_dirs:\r\n if os.path.exists(symbol_dir):\r\n for dirpath, dirnames, filenames in os.walk(symbol_dir):\r\n dirnames.sort()\r\n for filename in filenames:\r\n if filename.endswith('.sym'):\r\n filepath = os.path.join(dirpath, filename)\r\n filename, _ = os.path.splitext(filename)\r\n if filename.lower() not in known_symbols:\r\n known_symbols[filename.lower()] = filepath\r\n\r\n return known_symbols", "title": "" }, { "docid": "129d62e7da921ea76970fdc1088aa338", "score": "0.47739196", "text": "def createCache(self):\n \n for i in range(2**self.index):\n self._cache.append({})", "title": "" }, { "docid": "ba273d0dd88c988724380e1a8b93c076", "score": "0.47723415", "text": "def UpdateAndAttachSymbolTable(self, oldTable):\n\n symbols = list(self.ReturnSymbols())\n if len(symbols) == 0:\n self.symTable = oldTable\n return\n\n newProperties = {sym:[False, -1] for sym in symbols}\n\n outSymbol = self.dest.value\n\n if self.dest.is_SCALAR_VARIABLE():\n newProperties[outSymbol] = [False, -1]\n\n # Rest of the symbols count as a \"use\"\n for sym in symbols:\n if sym == outSymbol:\n continue\n\n newProperties[sym] = [True, self.lineID]\n\n # Check if the outSymbol is used as an input as well\n if self.dest.is_SCALAR_VARIABLE():\n if (self.inp1.value == outSymbol or \n self.inp2.value == outSymbol or \n outSymbol in [i.value for i in self.IOArgs]):\n\n newProperties[outSymbol] = [True, self.lineID] \n\n\n self.symTable = BB.SymSetProperties(oldTable, newProperties)", "title": "" }, { "docid": "5b16a34a45e8737d3f6d13717c611a97", "score": "0.47612625", "text": "def add_symbol(self, symbol):\n\t\tsymbol = symbol.lower()\n\t\tif symbol not in self.symbols:\n\t\t\tself.symbols.append(symbol)\n\t\treturn True", "title": "" }, { "docid": "70908542bd1317d7797375be2ecddf4b", "score": "0.47581547", "text": "def bomb_with_cache(self, x, y):\n try:\n return self.cache[(x, y)]\n except AttributeError:\n self.cache = {}\n return self.bomb(x, y)\n except KeyError:\n r = self._bomb(x, y)\n self.cache[(x, y)] = r\n return r", "title": "" }, { "docid": "f322550399b2a0f8de51b149ab475a76", "score": "0.47405848", "text": "def manin_symbols(self):\n try:\n return self.__manin_symbols\n except AttributeError:\n self.__manin_symbols = ManinSymbolList_gamma1(\n level=self.level(), weight=self.weight())\n return self.__manin_symbols", "title": "" }, { "docid": "ecc786710b87dc9540082526dafa6058", "score": "0.47348714", "text": "def _symbol(self, symbol):\n\n if self.current_const != \"\":\n if len(self.current_vars) == 1: # Is a coefficient\n self.current_vars[0].val = str(int(self.current_vars[0].val) *\n int(self.current_const))\n else: # Is an exponent\n self.current_vars[-1].power = int(self.current_const)\n self.current_const = \"\"\n\n if len(self.current_vars) > 0:\n current_term = Term({})\n while len(self.current_vars) > 0:\n current_term.set_var(self.current_vars.pop())\n self.poly.add_term(current_term)\n current_var = Variable(val=symbol+\"1\")\n self.current_vars.append(current_var)", "title": "" }, { "docid": "c63c35d1bcf98b0b00bf74ae94690cd6", "score": "0.47338057", "text": "def set_symbols(self, symbols_str=None):\n symbols_list = []\n if symbols_str:\n for i in symbols_str:\n symbols_list.append(i)\n self.symbols_list = symbols_list", "title": "" }, { "docid": "ab527b3ac5988047f866b348aa4c04bf", "score": "0.47334912", "text": "def collect(self):\n for func in self._caches:\n cache = {}\n for key in self._caches[func]:\n if (time.time() - self._caches[func][key][1]) < self._timeouts[func]:\n cache[key] = self._caches[func][key]\n self._caches[func] = cache", "title": "" }, { "docid": "74f36c5e66f8968332ba8ae255a959c8", "score": "0.47306302", "text": "def _clear_caches():\n _ghi_cache.clear()\n _prf_cache.clear()", "title": "" }, { "docid": "a1a196add6434995e90ffc5ded390fc1", "score": "0.4728394", "text": "def __iter__(self):\n return iter(list(self._symbols.items()))", "title": "" } ]
dd99e7bd4990e39891a390e9335bdb98
Plot the learning curve of the different networks trained
[ { "docid": "3ffe038286369685daca2d21ff9f7913", "score": "0.6725776", "text": "def plot_learning(steps, stats_mean_list=None, stats_std_list=None,\n labels=[], title=\"Learning Curve\", ylabel=\"Loss\",\n y_lim=[0.0, 1.7], share_x=False, share_y=False,\n legend_loc=\"best\", filter_ws=0, step_size=10000,\n xlabel=r\"$\\times 10^4$ Batch Iterations\"):\n fig, ax = plt.subplots(1, 1, figsize=(8, 5))\n # Loop over individual lines, smooth the arrays &\n for i in range(len(labels)):\n if filter_ws > 0:\n temp_mean = savgol_filter(stats_mean_list[i], filter_ws, 3)\n if stats_std_list is not None:\n temp_std = savgol_filter(stats_std_list[i], filter_ws, 3)\n else:\n temp_mean = stats_mean_list[i]\n if stats_std_list is not None:\n temp_std = stats_std_list[i]\n ax.plot(steps, temp_mean, label=labels[i])\n if stats_std_list is not None:\n ax.fill_between(steps,\n temp_mean - 0.5*temp_std,\n temp_mean + 0.5*temp_std, alpha=0.5)\n\n its_ticks = np.arange(step_size, np.max(steps), step_size)\n its_labels_temp = [str(int(it/step_size)) for it in its_ticks]\n its_labels = [it_l for it_l in its_labels_temp]\n its_labels[0] = r\"$1$\"\n ax.set_xticks(its_ticks, minor=False)\n ax.set_xticklabels(its_labels)\n\n ax.legend(loc=legend_loc, fontsize=12)\n ax.set_title(title)\n if not share_y:\n ax.set_ylabel(ylabel)\n else:\n ax.get_yaxis().set_ticks([])\n if not share_x:\n ax.set_xlabel(xlabel)\n else:\n ax.get_xaxis().set_ticks([])\n ax.set_ylim(bottom=y_lim[0], top=y_lim[1])\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.legend(fontsize=25)\n return", "title": "" } ]
[ { "docid": "c021a9f6d6801419b68365d93bbefc7a", "score": "0.7735428", "text": "def plot_learning_curve(training_losses, validation_losses): \n plt.ylabel('Loss')\n plt.xlabel('Training Steps')\n plt.plot(training_losses, label=\"training\")\n plt.plot(validation_losses, label=\"validation\")\n plt.legend(loc=1)", "title": "" }, { "docid": "78275ffe2d1cb5c5812d89bcae3e4d5d", "score": "0.75846165", "text": "def plot_learning_curve(self):\n plt.figure()\n plt.plot(range(self.noi),self.Jv)\n plt.xlabel('Number of iterations')\n plt.ylabel('Cost')\n plt.title('Cost vs Number of iterations curve')", "title": "" }, { "docid": "9cf983abe2d065dbae806a52cd1ff8b9", "score": "0.73652726", "text": "def plot_lr(learning_rates):\n plt.plot(learning_rates)\n plt.title('Change of learning rate over time')\n plt.xlabel('Epoch')\n plt.ylabel('Learning rate')\n plt.show()", "title": "" }, { "docid": "90a600749107f22359b2581e8270d052", "score": "0.73337096", "text": "def plot(self):\n fig = plt.figure(figsize=[10, 4])\n\n plt.subplot(121)\n self.plot_lr()\n plt.xlabel('n iter')\n plt.ylabel('learning rate')\n\n plt.subplot(122)\n self.plot_loss(c='r')\n plt.xlabel('n iter')\n plt.ylabel('loss')\n fig.subplots_adjust(wspace=0.3, bottom=0.18)", "title": "" }, { "docid": "c9bb8a2098f33e2fb9265a305f1f6e3c", "score": "0.7284015", "text": "def train_model(self) -> None:\n self.NN = NeuralNetwork()\n costs = self.network_training(self.NN)\n plt.plot(costs)\n plt.show()", "title": "" }, { "docid": "7f3c8bb6177e1a0774176957da2bbbe5", "score": "0.7243582", "text": "def plot_learning_curve(MODEL):\n train_accs = np.squeeze(MODEL['train_accs'])\n test_accs = np.squeeze(MODEL['test_accs'])\n fig, ax = plt.subplots()\n ax.plot(100 - train_accs, label='Training set error')\n ax.plot(100 - test_accs, label='Test set error')\n ax.set_ylabel('Prediction error (%)')\n plt.legend()\n ax.set_xlabel('Hundreds of iterations')\n ax.set_title(\"Learning rate =\" + str(MODEL[\"learning_rate\"]))", "title": "" }, { "docid": "acb76bdc2341b630ed30417dc3534fcc", "score": "0.72194725", "text": "def learning_curves(X_train, y_train, X_test, y_test):\n \n print \"Creating learning curve graphs for max_depths of 1, 3, 6, and 10.....\"\n \n # Create the figure window\n fig = plt.figure(figsize=(10,8))\n \n # We will vary the training set size so that we have 50 different sizes.\n sizes = np.rint(np.linspace(1, len(X_train),50)).astype(int)\n train_err = np.zeros(len(sizes))\n test_err = np.zeros(len(sizes))\n \n # Create four different models based on max_depth\n\n for k, depth in enumerate([1,3,6,10]):\n \n for i,s in enumerate(sizes):\n \n # Setup a decision tree regressor so that it learns a tree with max_depth = depth\n regressor = DecisionTreeRegressor(max_depth =depth)\n \n # Fit the learner to the training data\n regressor.fit(X_train[:s], y_train[:s])\n \n # Find the performance on the training set\n train_err[i] = performance_metric(y_train[:s],regressor.predict(X_train[:s]))\n \n # Find the performance on the testing set\n test_err[i] = performance_metric(y_test,regressor.predict(X_test))\n \n # Subplot the learning curve graph\n ax = fig.add_subplot(2,2, k+1)\n ax.plot(sizes,test_err, lw = 2, label = \"Testing Error\")\n ax.plot(sizes, train_err, lw = 2, label = \"Training Error\")\n ax.legend()\n ax.set_title('max_depth = %s'%(depth))\n ax.set_xlabel(\"Number of Data Points in Training Set\")\n ax.set_ylabel('Total Error')\n ax.set_xlim([0, len(X_train)])\n \n # Visual aesthetics\n fig.suptitle('Decision Tree Regressor Learning Performances', fontsize = 18, y=1.03)\n fig.tight_layout()\n fig.show()", "title": "" }, { "docid": "adf43744f83cac76d420fe7db3bf1b65", "score": "0.71618503", "text": "def plot_learning(train_loss, eval_loss, path_to_save):\n plt.plot(train_loss)\n plt.plot(eval_loss)\n plt.legend(['Train loss', 'Dev loss'])\n path = os.path.join(path_to_save, 'learning_plot.png')\n plt.savefig(path)\n plt.clf()", "title": "" }, { "docid": "be3c9091e702298fdee0c3a0ddd65f60", "score": "0.70671624", "text": "def show_learning_curves(rew_matrix, naive_rew):\n plt.plot(range(len(rew_matrix[0])), rew_matrix[0], \"-b\", label = \"Feature 1\")\n plt.plot(range(len(rew_matrix[1])), rew_matrix[1], \"-g\", label = \"Feature 7\")\n plt.plot(range(len(rew_matrix[2])), rew_matrix[2], \"-r\", label = \"Feature 12\")\n plt.plot(range(len(rew_matrix[3])), rew_matrix[3], \"-c\", label = \"Feature 13\")\n plt.plot(range(len(naive_rew)), naive_rew, \"-m\", label = \"Naive policy\")\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Reward (learning phase)\")\n plt.legend()", "title": "" }, { "docid": "8f23da3e535be143870e33a7c2b44233", "score": "0.70591086", "text": "def draw_learning_curve(numbers, metric):\r\n\r\n plt.xlabel('Simulation Epoch')\r\n plt.ylabel(metric)\r\n plt.title('Learning Curve')\r\n plt.grid(True)\r\n\r\n plt.plot(numbers['x'], numbers[metric], 'r', lw=1)\r\n plt.show()", "title": "" }, { "docid": "4a2e3d953d6903dd83eada7bc67add9c", "score": "0.6997576", "text": "def plot_learning_curve(classification_method, classification_method_params, data, labels):\n if classification_method == KNeighborsClassifier:\n train_sizes, train_scores, valid_scores = learning_curve(\n classification_method(n_neighbors=classification_method_params[0]), data, labels,\n train_sizes=np.linspace(0.1, 1.0, 25))\n elif classification_method == LogisticRegression:\n train_sizes, train_scores, valid_scores = learning_curve(\n classification_method(C=classification_method_params[0]), data, labels,\n train_sizes=np.linspace(0.1, 1.0, 25))\n else:\n train_sizes, train_scores, valid_scores = learning_curve(\n classification_method(), data, labels, train_sizes=np.linspace(0.1, 1.0, 25))\n plt.figure()\n plt.title(\"Learning Curve ({0})\".format(classification_method.__name__))\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n plt.grid()\n # Compute the mean and std to also plot them on the learning curve.\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(valid_scores, axis=1)\n test_scores_std = np.std(valid_scores, axis=1)\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,\n alpha=0.1, color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,\n alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\", label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\", label=\"Cross-validation score\")\n plt.legend(loc=\"best\")\n plt.show()", "title": "" }, { "docid": "9af31db2b0dee82bc1bf530fd754f4a6", "score": "0.69334346", "text": "def plot_cost_and_acc(self):\n x = list(range(1, len(self.cost_hist_tr) + 1))\n plt.plot(x, self.cost_hist_tr, label = \"train loss\")\n plt.plot(x, self.cost_hist_val, label = \"val loss\")\n plt.title(\"Loss over epochs\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Loss\")\n plt.legend()\n plt.show()\n plt.plot(x, self.acc_hist_tr, label = \"Train accuracy\")\n plt.plot(x, self.acc_hist_val, label = \"Val accuracy\")\n plt.title(\"Accuracy over epochs\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Accuracy\")\n plt.legend()\n plt.show()", "title": "" }, { "docid": "d508a117025eb2f127cf2a40bab7e917", "score": "0.69325346", "text": "def plot(self):\n plt.clf()\n\n plt.plot(np.linspace(0, 1, self.N+1), self.sb_policy, c='tab:orange', label=f\"{self.model_name} Learner\")\n plt.plot(np.linspace(0, 1, self.N+1), self.vi_policy, c='B', label=\"Value Iteration\")\n\n plt.title(\n f\"ACER Train Time: {self.sb_train_time}, VI Train Time: {self.vi_train_time}\\n\"\n f\"tt/ts: {self.params['movement_cost']}/{self.params['sample_cost']}, \"\n f\"N: {int(1/self.params['delta'])}\")\n plt.xlabel(\"Size of Hypothesis Space\")\n plt.ylabel(\"Gym Policy\")\n plt.legend()\n\n plt.xlim((0, 1))\n plt.ylim((0, self.N))\n\n plt.savefig(self.img_path)", "title": "" }, { "docid": "3490ab7802cd264199290c1f38fa36ba", "score": "0.6920023", "text": "def plot_learning_curves(x_train, y_train, x_test, y_test, clf):\n errors = []\n\n def misclf_err(y_predict, y):\n return (y_predict != y).sum() / float(len(y))\n\n rng = [int(i) for i in np.linspace(0, x_train.shape[0], 11)][1:]\n for r in rng:\n print(f\"Running {r / x_train.shape[0]:.0%}\")\n clf.fit(x_train[:r], y_train[:r])\n\n y_test_predict = clf.predict(x_test)\n\n error = misclf_err(y_test, y_test_predict)\n errors.append(error*100)\n\n _, ax = plt.subplots(1, 1, figsize=(8, 5))\n ax.plot(np.arange(10, 101, 10), errors, label=\"Error\", marker=\"o\")\n\n ax.set_xlim([0, 110])\n ax.set_xticks(np.arange(0, 101, 10))\n ax.yaxis.set_major_formatter(PercentFormatter())\n ax.xaxis.set_major_formatter(PercentFormatter())\n\n plt.ylabel(\"Error\")\n plt.xlabel(\"Training set size in percent\")\n plt.title(\"Learning Curve\")\n plt.grid()\n plt.tight_layout()", "title": "" }, { "docid": "5ee3ef41896c39395171d975f95590ac", "score": "0.69093424", "text": "def plot_learning_rates():\n # Choose some alpha value - change this\n alpha = 0.\n num_iters = 100\n\n # init theta and run gradient descent\n theta = np.zeros(3)\n theta, J_history = gradientDescentMulti(X, y, theta, alpha, num_iters)\n\n # Plot the convergence graph\n pyplot.plot(np.arange(len(J_history)), J_history, lw=2)\n pyplot.xlabel('Number of iterations')\n pyplot.ylabel('Cost J')\n\n # Display the gradient descent's result\n print('theta computed from gradient descent: {:s}'.format(str(theta)))\n\n # Estimate the price of a 1650 sq-ft, 3 br house\n # ======================= YOUR CODE HERE ===========================\n # Recall that the first column of X is all-ones.\n # Thus, it does not need to be normalized.\n\n price = 0 # You should change this\n\n # ===================================================================\n\n print('Predicted price of a 1650 sq-ft, 3 br house (using gradient descent): ${:.0f}'.format(price))", "title": "" }, { "docid": "5bb6df2fde4efcba7faf73f33ac502db", "score": "0.6860948", "text": "def plot_cost_per_lr(X, y):\n plt.figure()\n plt.xlabel(\"Number of iterations\")\n plt.ylabel(\"Cost J\")\n\n lr_list = [0.3, 0.1, 0.03, 0.01, 0.003, 0.001]\n losses_per_lr_list = []\n nb_iterations = 50\n for i in range(len(lr_list)):\n lr = lr_list[i]\n model = Model(X.shape[1])\n print('Training model using ', nb_iterations, 'iterations and learning rate=', lr)\n model.train(nb_iterations, X, y, learning_rate=lr)\n J_history = model.losses\n losses_per_lr_list.append(J_history)\n plt.plot(range(nb_iterations), losses_per_lr_list[i], label='alpha= {:.3f}'.format(lr))\n plt.legend()\n plt.savefig('figs/different_learning_rates')", "title": "" }, { "docid": "e1fd06e138e4cb2d23ac6cee1af944b8", "score": "0.6855946", "text": "def plotGraphs(history):\n\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n\n loss = history.history['loss']\n val_loss = history.history['val_loss'] \n\n epochs = range(1, len(acc) + 1)\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n\n plt.title('Training and validation accuracy')\n plt.legend() \n plt.figure() \n\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n plt.show()", "title": "" }, { "docid": "527395ebae2613580c45cadf981dc556", "score": "0.6851773", "text": "def plot_learning_curve(data, prefix):\n mean, lower_bound, upper_bound = utils.mean_confidence_interval(data)\n x_lim = len(data[0])\n plt.plot(mean, color='red')\n plt.fill_between(range(x_lim), lower_bound, upper_bound, color='red', alpha=0.4)\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Avg. number of steps\")\n plt.savefig(prefix + 'learning_curve.png')\n plt.clf()", "title": "" }, { "docid": "3a52cc6f6f07005cb6377c2a6082a5bf", "score": "0.6830098", "text": "def Graph_Costs_Over_Time(self):\n plt.plot(np.squeeze(self.costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(self.learning_rate))\n plt.show()", "title": "" }, { "docid": "ea36c3ff0efb092b09449baccd690860", "score": "0.6811258", "text": "def plot_objectives(self,betas_cyc,betas_rnd,X_train,y_train):\n plt.figure(figsize = (10, 5))\n plt.plot([self.computeobj(betas_cyc[::20][x], X_train, y_train) for x in range(50)], label=\"Cyclic Coordinate Descent\")\n plt.plot([self.computeobj(betas_rnd[::20][x], X_train, y_train) for x in range(50)], label=\"Random Coordinate Descent\")\n plt.xlabel('Iteration')\n plt.ylabel('Objective value')\n plt.title(\"Objective Value vs. Iteration (Lambda = 0.11069)\")\n plt.legend()", "title": "" }, { "docid": "bb5711cfbe8dd3bb4c12d1f08b638d57", "score": "0.67987454", "text": "def plot_train_metrics(losses, train_acc, precision):\n plt.figure(figsize=(10, 12))\n plt.subplot(311)\n plt.title('Loss')\n plt.plot(losses, color='c')\n plt.subplot(312)\n plt.title('Accuracy')\n plt.plot(train_acc, color='c')\n plt.subplot(313)\n plt.title('Precision')\n plt.plot(precision, color='c')\n plt.show()", "title": "" }, { "docid": "6f89459f9020a92452a96fa11188b218", "score": "0.6797175", "text": "def plot_learning(self, x, y):\r\n objective_iter = [self.f_beta(x, y, coef) for coef in self.coeff_list]\r\n fig = plt.figure()\r\n plt.plot(range(len(self.coeff_list)), objective_iter)\r\n plt.xlabel('Iteration')\r\n plt.ylabel('Objective Function')\r\n \r\n return fig", "title": "" }, { "docid": "3f9ecb8ff30164dd210a092450c21446", "score": "0.67761576", "text": "def plot_loss(set_loss, training_loss, parameters, set_name): \n\n plt.clf();\n \n plt.plot(set_loss, color='k', label=set_name);\n plt.plot(training_loss, color='r', linestyle=\":\", label=\"Training Set\");\n\n plt.legend(framealpha=1);\n plt.yscale('log');\n plt.title(f'{parameters[\"dataset\"]}');\n plt.xlabel(\"Epochs\", fontsize=16);\n plt.ylabel(\"Loss\", fontsize=16);\n\n plt.show()", "title": "" }, { "docid": "5128e4f041fe882b585ae76b186b8636", "score": "0.6761979", "text": "def plot(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train','validation'],loc='upper right')\n plt.show()\n \n # YOUR CODE HERE", "title": "" }, { "docid": "3485b50690f2e171364716eaa60610c4", "score": "0.67561424", "text": "def plot_training(history, savedir):\n\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n epochs = range(len(acc))\n\n plt.plot(epochs, acc, 'r.')\n plt.plot(epochs, val_acc, 'r')\n plt.title('Training and validation accuracy')\n plt.savefig(f'{savedir}/acc_vs_epochs.png')\n\n plt.figure()\n plt.plot(epochs, loss, 'r.')\n plt.plot(epochs, val_loss, 'r-')\n plt.title('Training and validation loss')\n plt.savefig(f'{savedir}/loss_vs_epochs.png')", "title": "" }, { "docid": "9f37da37068724f3771e23f01ce0a585", "score": "0.67085624", "text": "def plot_learning_curve(X_train=None,\n y_train=None,\n X_test=None,\n y_test=None,\n kernel=None,\n n_run=10,\n title=\"Learning curve\"):\n n_data = len(self.X_train)\n n_samp = np.linspace(n_data//n_run, n_run*(n_data//n_run),\n n_run, dtype=int)\n est = import_module('sklearn.gaussian_process')\n estimator = getattr(est, 'GaussianProcessRegressor')\n\n\n _MAE_train = []\n _MAE_test = []\n for i, n in enumerate(n_samp):\n xtrain = self.X_train[:n]\n if self.scaling:\n scaled_part_train = self.alpha * self.scaling_y_train[:n] \\\n + self.gamma\n scaled_part_test = self.alpha * self.scaling_y_test \\\n + self.gamma\n ytrain = self.y_train[:n] - scaled_part_train\n estimator = estimator(kernel=kernel,\n n_restarts_optimizer=4,\n alpha=0)\n estimator.fit(xtrain, ytrain)\n _MAE_train += [mean_absolute_error(self.y_train[:n],\n estimator.predict(xtrain) + scaled_part_train)]\n _MAE_test += [mean_absolute_error(self.y_test,\n estimator.predict(self.X_test) + scaled_part_test)]\n kernel = estimator.kernel_\n else:\n ytrain = self.y_train[:n]\n estimator = estimator(kernel=kernel,\n n_restarts_optimizer=4,\n alpha=0)\n estimator.fit(xtrain, ytrain)\n _MAE_train += [mean_absolute_error(ytrain,\n estimator.predict(xtrain))]\n _MAE_test += [mean_absolute_error(self.y_test,\n estimator.predict(self.X_test))]\n kernel = estimator.kernel_\n\n plt.figure()\n plt.title(title)\n plt.grid(color='b', linestyle='-', linewidth=0.5)\n plt.xlabel(\"# of training samples\")\n plt.ylabel(\"MAE [eV]\")\n plt.plot(n_samp, _MAE_train, 'o-', color='r',\n label=\"MAE for training set\")\n plt.plot(n_samp, _MAE_test, 'o-', color='g',\n label=\"MAE for testing set\")\n plt.legend(loc='best')\n\n return plt", "title": "" }, { "docid": "970f1b92c648d8455021f1d0b03638b0", "score": "0.6684306", "text": "def plot_learning_curves(estimators, titles, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 2)):\n nrows = len(estimators)//2\n ncols = (len(estimators)//nrows)+ (0 if len(estimators) % nrows == 0 else 1)\n plt.figure(1)\n fig, axes = plt.subplots(nrows, ncols, figsize=(10, 10))\n \n n = 0\n for col in range(ncols):\n for row in range(nrows):\n estimator = estimators[n]\n title = titles[n]\n axes[row, col].set_title(title) \n if ylim is not None:\n axes[row, col].set_ylim(*ylim) \n axes[row, col].set_xlabel(\"Training examples\")\n axes[row, col].set_ylabel(\"Score\") \n train_sizes, train_scores, test_scores = learning_curve(estimator,\n X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes,\n scoring=\"neg_mean_squared_error\") \n train_scores = np.sqrt(-train_scores)\n test_scores = np.sqrt(-test_scores) \n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n axes[row, col].grid() \n axes[row, col].fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n axes[row, col].fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n axes[row, col].plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n axes[row, col].plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n axes[row, col].legend(loc=\"best\")\n \n n += 1\n plt.tight_layout()\n plt.show()\n plt.gcf().clear()", "title": "" }, { "docid": "8b34e5cdd6c4c329bdb2d359077871f4", "score": "0.6667418", "text": "def create_train_plots(training_losses):\n x = [i for i in range(len(training_losses))]\n plt.clf()\n plt.plot(x, training_losses)\n plt.title('Training Loss per epoch')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.xticks(np.arange(1, len(x)+1, 1))\n plt.savefig('../results/train_plot.png')", "title": "" }, { "docid": "d8d6a87f7ef5c34e3df5280d349da6fe", "score": "0.6653885", "text": "def plot_training(hist, i: int):\n acc = hist.history[\"acc\"]\n val_acc = hist.history[\"val_acc\"]\n epochs = range(len(acc))\n plt.plot(epochs, acc, label=\"Training Accuracy\")\n plt.plot(epochs, val_acc, label=\"Validation Accuracy\")\n plt.title(\"Training and Validation Accuracy\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Accuracy\")\n plt.title(f\"Model {i} Training Accuracy\")\n plt.savefig(f\"figs/{experiment_name}/training_accuracy_m{i}\")\n plt.figure()\n loss = hist.history[\"loss\"]\n val_loss = hist.history[\"val_loss\"]\n plt.plot(epochs, loss, label=\"Training Loss\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Loss\")\n plt.plot(epochs, val_loss, label=\"Validation Loss\")\n plt.title(f\"Model {i} Training Loss\")\n plt.savefig(f\"figs/{experiment_name}/training_loss_m{i}.png\")\n plt.figure()", "title": "" }, { "docid": "e7bcff285ed14d3c4bfbe639a640bd82", "score": "0.6644706", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n\t\t\t\t\t\tn_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n\tplt12.figure()\n\tplt12.title(title)\n\tif ylim is not None:\n\t\tplt12.ylim(*ylim)\n\tplt12.xlabel(\"Training examples\")\n\tplt12.ylabel(\"Score\")\n\ttrain_sizes, train_scores, test_scores = learning_curve(\n\t\testimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n\ttrain_scores_mean = np.mean(train_scores, axis=1)\n\ttrain_scores_std = np.std(train_scores, axis=1)\n\ttest_scores_mean = np.mean(test_scores, axis=1)\n\ttest_scores_std = np.std(test_scores, axis=1)\n\tplt12.grid()\n\n\t#plt12.fill_between(train_sizes, train_scores_mean - train_scores_std,train_scores_mean + train_scores_std, alpha=0.1, color=\"r\")\n\t#plt12.fill_between(train_sizes, test_scores_mean - test_scores_std,test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n\tplt12.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n\t\t\t label=\"Training score\")\n\tplt12.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n\t\t\t label=\"Cross-validation score\")\n\n\tplt12.legend(loc=\"best\")\n\treturn plt12", "title": "" }, { "docid": "2541e6e3dd6ee6d202aee516cd6f428b", "score": "0.6633816", "text": "def plot_learning(\n data, # dictionary of dictionaries of data\n title = None, # title for plot\n iter_key = \"iter\", # iteration key word\n val_key = \"f1\", # validation key word\n score_type = \"F1\", # score type for label name\n linewidth = 1.0, # linewidth for plot\n offset = 10, # pixel offset for axes\n label_x_offset = 200, # x direction offset for label\n label_y_offset = 1, # y direction offset for label\n x_size = 10, # x size of figure\n y_size = 5, # y size of figure\n x_label = \"training iteration\", # name for x-axis\n x_tick_int = 20000, # interval for ticks on x-axis\n iterations = 100000, # max number of training iterations\n y_label = \"score\", # name for y-axis\n y_tick_int = 10, # interval for ticks on y-axis\n max_score = 100 # max score\n ):\n \n # Create plot figure\n fig = plt.figure(figsize = (x_size, y_size))\n ax = fig.add_subplot(111)\n \n # Plot Style\n# =============================================================================\n# plt.style.use('seaborn-deep')\n# =============================================================================\n \n # Format x and y axes\n plt.xlabel(x_label)\n plt.xlim(0,iterations)\n plt.xticks(np.arange(0,iterations+1,x_tick_int))\n \n plt.ylabel(y_label)\n plt.ylim(0,max_score)\n plt.yticks(np.arange(0,max_score+1,y_tick_int))\n \n # Adjust plot borders\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_position(('outward', offset))\n ax.spines['bottom'].set_position(('outward', offset))\n \n # Iterate through keys\n for key in data.keys():\n \n assert key in colors.keys(), \"{} is not a support key in: {}\".format(key, colors.keys())\n \n # Get data set\n experiment = data.get(key)\n \n # Get x and y values for plotting\n temp_iter = experiment.get(iter_key)\n temp_val = experiment.get(val_key)\n \n # Plot data on figure\n ax.plot(temp_iter, temp_val, linestyle='-', marker=markers.get(key),\n color=colors.get(key), mec='w', ms=8, clip_on=False)\n \n # Add dataset labels\n plt.text(temp_iter[-1]+label_x_offset, temp_val[-1]-label_y_offset,\n names.get(key)+\" \"+score_type, color=colors.get(key))\n \n # Add title\n if not title is None:\n plt.title(title)\n \n # Adjust layout\n fig.tight_layout()\n \n return fig", "title": "" }, { "docid": "c8257df28ec55cf1b0eb568db6c4f055", "score": "0.661555", "text": "def print_cost(self):\n costs = self.cost\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per hundreds)')\n plt.title(\"Learning rate =\" + str(self.learning_rate))\n plt.show()", "title": "" }, { "docid": "f77371b675f0504eed404be81751fdad", "score": "0.6612863", "text": "def show_learning_curve(file_paths: List[str], model_names: List[str], **kwargs) -> tuple:\n import matplotlib.pyplot as plt\n from matplotlib.ticker import MaxNLocator\n from matplotlib.lines import Line2D\n from matplotlib import colors as mcolors\n\n available_markers = list(Line2D.markers.keys())[2:]\n available_colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())\n\n total_files = len(file_paths)\n # print(available_markers)\n # markers = available_markers[:total_files]\n # markers_grouped = [markers[n:n + 2] for n in range(0, len(markers), 2)]\n\n markers = kwargs.get(\"markers\", [\"o\", \"v\", \"d\", \"x\", \"*\", \"+\", \"P\", \"s\"])\n markers = markers[:total_files]\n\n if len(markers) != total_files:\n raise ValueError(\n f\"Not enough values for markers. markers contains {len(markers)} elements while it is supposed to\"\n f\"contains at least {total_files} elements.\"\n )\n\n colors = available_colors[:total_files]\n\n fig, axes = plt.subplots(1, 1, figsize=kwargs.get(\"figsize\", (8, 6)))\n\n for idx, (file_path, model_name, marker, color) in enumerate(zip(file_paths, model_names, markers, colors)):\n history: dict = read_log_file(file_path=file_path, model_name=model_name)\n\n epochs: list = history['epoch']\n\n axes.plot(\n epochs,\n history['loss'],\n marker=marker,\n markersize=kwargs.get(\"markersize\", 4),\n linestyle='-',\n lw=1.8,\n label=f'{model_name}',\n color=color\n )\n\n axes.plot(\n epochs,\n history['val_loss'],\n marker=marker,\n markersize=kwargs.get(\"markersize\", 4),\n linestyle='--',\n lw=1.8,\n color=color\n )\n\n fontsize = kwargs.get(\"font_size\", 14)\n axes.set_ylabel('Loss [-]', fontsize=fontsize)\n axes.set_xlabel('Epochs [-]', fontsize=fontsize)\n axes.xaxis.set_major_locator(MaxNLocator(nbins=20, integer=True))\n axes.set_yscale(kwargs.get(\"scale\", \"linear\"))\n axes.tick_params(axis=\"both\", which=\"major\", labelsize=fontsize)\n axes.legend(fontsize=fontsize)\n axes.grid()\n plt.tight_layout()\n\n if kwargs.get(\"save\", True):\n os.makedirs(\"figures/\", exist_ok=True)\n plt.savefig(kwargs.get(\"save_name\", f\"figures/model.png\"), dpi=300)\n if kwargs.get(\"show\", True):\n plt.show()\n\n return fig, axes", "title": "" }, { "docid": "ec1ab34b67710e721f77e0f04cb8e360", "score": "0.6609935", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes = train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n \n print(\"train_scores_mean:\")\n print(np.mean(train_scores, axis=1))\n print(\"test_scores_mean:\")\n print (test_scores_mean)\n\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "20205dff9ce7f52fbee690482eb3588c", "score": "0.66068953", "text": "def plot_learning_curve(self, estimator, title, X, y, save_file_path, ylim=None, cv=None,\n\t\t\t\t\t\t\t\t\t\t\t\t\ttrain_sizes=np.linspace(.1, 1.0, 5)):\n\t\tlogger.info(\"Plotting {}\".format(title))\n\t\tplt.figure()\n\t\tplt.title(title)\n\t\tif ylim is not None:\n\t\t\tplt.ylim(*ylim)\n\t\tplt.xlabel(\"Training examples\")\n\t\tplt.ylabel(\"Score\")\n\t\ttrain_sizes, train_scores, test_scores = learning_curve(\n\t\t\testimator, X, y, cv=cv, train_sizes=train_sizes)\n\t\ttrain_scores_mean = np.mean(train_scores, axis=1)\n\t\ttrain_scores_std = np.std(train_scores, axis=1)\n\t\ttest_scores_mean = np.mean(test_scores, axis=1)\n\t\ttest_scores_std = np.std(test_scores, axis=1)\n\t\tplt.grid()\n\n\t\tplt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1,\n\t\t\t\t\t\t\t\t\t\t color=\"r\")\n\t\tplt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1,\n\t\t\t\t\t\t\t\t\t\t color=\"g\")\n\t\tplt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\", label=\"Training score\")\n\t\tplt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\", label=\"Cross-validation score\")\n\n\t\tplt.legend(loc=\"best\")\n\t\tplt.savefig(\"{}\".format(save_file_path))\n\t\tplt.close()", "title": "" }, { "docid": "14dad150472da33b1b28071d6f9eba2c", "score": "0.6601268", "text": "def plot(self):\r\n points_to_plot = [[I[self.FEATURES_TO_PLOT[0]], I[self.FEATURES_TO_PLOT[1]], I[-1]] for I in self.TRAINING_DATA]\r\n self.plot_2d_points(points_to_plot)\r\n self.plot_weight_vectors(self.tp.W)\r\n plt.xlabel(self.FEATURES[self.FEATURES_TO_PLOT[0]])\r\n plt.ylabel(self.FEATURES[self.FEATURES_TO_PLOT[1]])\r\n plt.title(\"Iris data with three weight vectors from multi-class perceptron training.\")\r\n plt.show()", "title": "" }, { "docid": "ab2e8b4863826fd64d6f122f3073a6ae", "score": "0.6590201", "text": "def plot(network):\n\tplt.figure(figsize=(16, 8))\n\tnetwork.plot()\n\tplt.show()", "title": "" }, { "docid": "aa4f6dafc2e0f7f6d36481366059b026", "score": "0.658493", "text": "def plot_training(history_name):\n acc = history_name.history['acc']\n test_acc = history_name.history['val_acc']\n loss = history_name.history['loss']\n test_loss = history_name.history['val_loss']\n\n epochs = range(1,len(acc) + 1)\n\n plt.plot(epochs, acc, 'bo', label='Training accuracy')\n plt.plot(epochs, test_acc, 'r', label='Validation accuracy')\n plt.title('Training and test accuracy')\n plt.legend()\n plt.axhline(y=0.9, color='black', linestyle='--')\n plt.figure()\n\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, test_loss, 'r', label='Test loss')\n plt.title('Training and test loss')\n plt.legend()\n plt.show()", "title": "" }, { "docid": "c029173e38c4132285925996bf3f9869", "score": "0.6583262", "text": "def generate_plot(data):\n epochs = list(range(1, len(data) + 1))\n plt.ylabel(\"Loss\")\n plt.xlabel(\"Epochs\")\n plt.plot(epochs, data)\n plt.savefig(\"images/loss.png\")\n plt.show()", "title": "" }, { "docid": "7de3dd97f97b93783806a52dfb16bb65", "score": "0.65776277", "text": "def plot_learning_curve(metrics_csv_file, x_attribute, y_attribute):\n df = pd.read_csv(metrics_csv_file)[[x_attribute, y_attribute, \"alpha\"]]\n color_list = ['sienna', 'black', 'darkgreen', 'red', 'blue', 'gold',\n 'silver', 'navy', 'orange', 'm', \"lime\", \"grey\"]\n sns.lmplot(x_attribute, y_attribute, data=df, hue='alpha',\n palette=color_list, fit_reg=False, legend_out=False, markers='o',\n scatter_kws={\"s\": 50})\n sns.plt.suptitle(\"Simulated user on validation set\")\n plt.show()", "title": "" }, { "docid": "356d7df7226595b96a1d8fa320289f25", "score": "0.6573321", "text": "def plot_learning_curve(\n estimator, title, X, y, cv=None, train_sizes=np.linspace(.1, 1.0, 10)\n):\n plt.figure()\n plt.title(title)\n plt.ylim(0.45, 1.01)\n plt.xlabel('Number of samples', labelpad=20)\n plt.ylabel('Score', labelpad=20)\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=-1, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label='Train score')\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label='Cross-validation score')\n\n plt.legend(loc='best')\n return plt", "title": "" }, { "docid": "8a41de375a0d3ab553e41eb169c682b0", "score": "0.65684664", "text": "def __plot(self):\n\n if not self.accuracies or not self.losses:\n raise ValueError(\"ERROR: self.accuracies or self.losses not defined. Make sure self.fit() is called with visible=True or that you called self.update_fit()\")\n plt.plot([i+1 for i in range(len(self.accuracies))],self.accuracies)\n plt.xlabel(\"Number of estimators\")\n plt.ylabel(\"Accuracy\")\n plt.show()\n plt.plot([i+1 for i in range(len(self.losses))], self.losses)\n plt.xlabel(\"Number of estimators\")\n plt.ylabel(\"Loss\")\n plt.show()", "title": "" }, { "docid": "e08c3a35211accafbdfeb2b09b7a966c", "score": "0.65595156", "text": "def plot_cost(train_cost, val_cost, epochs):\n epochs_label = np.arange(0, epochs+1, 1)\n\n fig, ax = plt.subplots()\n\n ax.plot(epochs_label, train_cost, label=\"Training Data\")\n ax.plot(epochs_label, val_cost, label=\"Validation Data\")\n ax.legend()\n ax.set(xlabel='Epochs', ylabel='Cost', ylim=(0, 4), xlim=(0, 22))\n ax.grid()\n\n plt.show()", "title": "" }, { "docid": "272c01e451d339a873f684af02317476", "score": "0.6546069", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Accuracy\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation\")\n\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "e1674e91ec60ccb1d1f11af3ccdcd682", "score": "0.6542977", "text": "def plot_loss(self):\n history_dict = self.history.history\n history_dict.keys()\n loss = history_dict['loss']\n val_loss = history_dict['val_loss']\n\n epochs = range(1, len(loss) + 1)\n\n # \"bo\" is for \"blue dot\"\n plt.plot(epochs, loss, 'bo', label='Training loss')\n # b is for \"solid blue line\"\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n\n plt.show()", "title": "" }, { "docid": "9ba9f10d9151afb9802d84a677b84e16", "score": "0.65290445", "text": "def plot_learning_accuracy(self):\n x = [i for i in range(0,len(self._epoch_accuracy))]\n \n plt.plot(x, self._epoch_accuracy, label='Accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.title('Learning Accuracy\\n')\n plt.legend()\n plt.show()", "title": "" }, { "docid": "023fd3e20414ece902f0b09dc936013d", "score": "0.6525452", "text": "def plotter(train, val, title):\n plt.style.use('seaborn')\n plt.xlabel('epoch')\n plt.ylabel(str(title))\n plt.plot(train, color = 'green', label = \"training \" + str(title))\n plt.plot(val, color = 'red', label = \"validation \" + str(title))\n plt.legend()\n plt.show()\n #plt.savefig('result/loss')\n #plt.close()", "title": "" }, { "docid": "81d81e38ebd617805455b3e93687c6ef", "score": "0.65207815", "text": "def plot_training_curve(path):\n import matplotlib.pyplot as plt\n train_err = np.loadtxt(\"{}_train_err.csv\".format(path))\n val_err = np.loadtxt(\"{}_val_err.csv\".format(path))\n train_loss = np.loadtxt(\"{}_train_loss.csv\".format(path))\n val_loss = np.loadtxt(\"{}_val_loss.csv\".format(path))\n plt.title(\"Train vs Validation Error\")\n n = len(train_err) # number of epochs\n plt.plot(range(1,n+1), train_err, label=\"Train\")\n plt.plot(range(1,n+1), val_err, label=\"Validation\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Error\")\n plt.legend(loc='best')\n plt.show()\n plt.title(\"Train vs Validation Loss\")\n plt.plot(range(1,n+1), train_loss, label=\"Train\")\n plt.plot(range(1,n+1), val_loss, label=\"Validation\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend(loc='best')\n plt.show()", "title": "" }, { "docid": "1a679329bb7ae6543872bfa4f1835fe0", "score": "0.65168494", "text": "def plot(self, n_epochs, title=\"Learning Rate Schedule\"):\n # Compute the set of learning rates for each corresponding\n # Epoch\n epochs = np.arange(0, n_epochs)\n lrs = []\n for i in range(n_epochs):\n lrs.append(self())\n # the learning rate schedule\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(epochs, lrs)\n plt.title(title)\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Learning Rate\")\n plt.show()", "title": "" }, { "docid": "812823d7b039d34de2c17bb6dde63873", "score": "0.6508551", "text": "def accuracy_curve_training(history):\n \n fig = plt.figure(figsize=[8,6])\n plt.plot(history.history['acc'],'r',linewidth=3.0)\n plt.legend(['Training Accuracy'],fontsize=18)\n plt.xlabel('Epochs ',fontsize=16)\n plt.ylabel('Accuracy',fontsize=16)\n plt.title('Accuracy Curves',fontsize=16)\n fig.savefig('/services/scratch/perception/cdamhieu/results/curves/accuracy_loss_per_epoch/accuracy_curves_training.png')", "title": "" }, { "docid": "b7ae5f7fe4c4fc9a2975794cb89bd4d2", "score": "0.64923316", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n\t\t\t\t\t\tn_jobs=1, train_sizes=numpy.linspace(.1, 1.0, 5)):\n\n\tplt.figure()\n\tplt.title(title)\n\tif ylim is not None:\n\t\tplt.ylim(*ylim)\n\tplt.xlabel(\"Training examples\")\n\tplt.ylabel(\"Score\")\n\ttrain_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n\n\ttrain_scores_mean = numpy.mean(train_scores, axis=1)\n\ttrain_scores_std = numpy.std(train_scores, axis=1)\n\ttest_scores_mean = numpy.mean(test_scores, axis=1)\n\ttest_scores_std = numpy.std(test_scores, axis=1)\n\tplt.grid()\n\n\tplt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n\t\t\t\t\t train_scores_mean + train_scores_std, alpha=0.1,\n\t\t\t\t\t color=\"r\")\n\tplt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n\t\t\t\t\t test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n\tplt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n\t\t\t label=\"Training score\")\n\tplt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n\t\t\t label=\"Cross-validation score\")\n\n\tplt.legend(loc=\"best\")\n\treturn plt", "title": "" }, { "docid": "db919439563f163aabe298f0bbbe7cfa", "score": "0.64802206", "text": "def loss_curve_training(history):\n \n fig = plt.figure(figsize=[8,6])\n plt.plot(history.history['loss'],'r',linewidth=3.0)\n plt.legend(['Training loss'],fontsize=18)\n plt.xlabel('Epochs ',fontsize=16)\n plt.ylabel('Loss',fontsize=16)\n plt.title('Loss Curves',fontsize=16)\n fig.savefig('/services/scratch/perception/cdamhieu/results/curves/accuracy_loss_per_epoch/loss_curves_training.png')", "title": "" }, { "docid": "7b8f07d04711f360455bd392fd7351b3", "score": "0.64736956", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "7b8f07d04711f360455bd392fd7351b3", "score": "0.64736956", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "6414421bfed99521f4e17f4e4ce2044a", "score": "0.6466813", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "6414421bfed99521f4e17f4e4ce2044a", "score": "0.6466813", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "6414421bfed99521f4e17f4e4ce2044a", "score": "0.6466813", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "6414421bfed99521f4e17f4e4ce2044a", "score": "0.6466813", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "a620d3a1effbe372db273e526432213c", "score": "0.64666253", "text": "def plot_learning(remaining_pegs):\n episode = [i for i in range(len(remaining_pegs))]\n plt.plot(episode, remaining_pegs)\n plt.xlabel(\"Episode number\")\n plt.ylabel(\"Remaining pegs\")\n plt.show()", "title": "" }, { "docid": "c6d1a41d25320aa94acc92cd23a2ba2a", "score": "0.6466579", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "072799999cacae5916320e963a05181d", "score": "0.64643675", "text": "def update_training_plot(self):\n self.axes1training.cla()\n self.axes2training.cla()\n self.axes3training.cla()\n\n self.axes3training.set_xlabel('Episode')\n\n reward, v, d = self.tr.read_averages()\n\n self.axes1training.plot(reward)\n self.axes2training.plot(v)\n self.axes3training.plot(d)\n\n self.canvas_training.draw()", "title": "" }, { "docid": "b8f41fe53f2b966ef2e9e72c2f18c99b", "score": "0.6459311", "text": "def show_learning_curve_v2(file_paths: List[str], model_names: List[str], **kwargs) -> tuple:\n import matplotlib.pyplot as plt\n from matplotlib.ticker import MaxNLocator\n from matplotlib.lines import Line2D\n from matplotlib import colors as mcolors\n\n available_markers = list(Line2D.markers.keys())[2:]\n available_colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())\n\n total_files = len(file_paths)\n # print(available_markers)\n # markers = available_markers[:total_files]\n # markers_grouped = [markers[n:n + 2] for n in range(0, len(markers), 2)]\n\n markers = kwargs.get(\"markers\", [\"o\", \"v\", \"d\", \"x\", \"*\", \"+\", \"P\", \"s\"])\n markers = markers[:total_files]\n\n if len(markers) != total_files:\n raise ValueError(\n f\"Not enough values for markers. markers contains {len(markers)} elements while it is supposed to\"\n f\"contains at least {total_files} elements.\"\n )\n\n colors = available_colors[:total_files]\n\n fig, axes = plt.subplots(1, 1, figsize=kwargs.get(\"figsize\", (8, 6)))\n\n for idx, (file_path, model_name, marker, color) in enumerate(zip(file_paths, model_names, markers, colors)):\n with open(file_path, \"r\") as log_file:\n lines: List[str] = log_file.readlines()\n\n for line_idx, line in enumerate(lines):\n if line.__contains__(\"epoch\"):\n first_index: int = line_idx - 1\n break\n\n epochs: np.ndarray = np.loadtxt(file_path, usecols=1, skiprows=first_index)\n loss: np.ndarray = np.loadtxt(file_path, usecols=3, skiprows=first_index)\n val_loss: np.ndarray = np.loadtxt(file_path, usecols=5, skiprows=first_index)\n\n axes.plot(\n epochs,\n loss,\n # marker=marker,\n # markersize=kwargs.get(\"markersize\", 4),\n linestyle='-',\n lw=1.8,\n label=f'{model_name}',\n color=color\n )\n\n axes.plot(\n epochs,\n val_loss,\n # marker=marker,\n # markersize=kwargs.get(\"markersize\", 4),\n linestyle='--',\n lw=1.8,\n color=color\n )\n\n fontsize = kwargs.get(\"font_size\", 14)\n axes.set_ylabel('Loss RMSE [-]', fontsize=fontsize)\n axes.set_xlabel('Epochs [-]', fontsize=fontsize)\n axes.xaxis.set_major_locator(MaxNLocator(nbins=10, integer=True))\n axes.set_xscale(kwargs.get(\"scale\", \"linear\"))\n axes.set_yscale(kwargs.get(\"scale\", \"linear\"))\n axes.tick_params(axis=\"both\", which=\"major\", labelsize=fontsize)\n axes.legend(fontsize=fontsize)\n axes.grid()\n\n axes.axhline(y=0.00022, xmin=0.0001, xmax=1, lw=2, color=\"k\")\n axes.annotate(s=r\"deepx score ($2.2 \\times 10^{-4}$)\", xy=(1, 0.00023), fontsize=16)\n\n plt.tight_layout()\n\n if kwargs.get(\"save\", True):\n os.makedirs(\"figures/\", exist_ok=True)\n plt.savefig(kwargs.get(\"save_name\", f\"figures/model.png\"), dpi=300)\n if kwargs.get(\"show\", True):\n plt.show()\n\n return fig, axes", "title": "" }, { "docid": "10845d45e8e17f5885f239b3e6352d1c", "score": "0.64541495", "text": "def plot_graphs(history, component):\n plt.title('Train ' + ' and Validation ' + component)\n plt.plot(history.history[component])\n plt.plot(history.history['val_'+component])\n plt.xlabel('Epochs')\n plt.ylabel(component)\n plt.legend([component, 'val_'+component])\n plt.grid()\n plt.show()", "title": "" }, { "docid": "a739d7b119eb64978aea124e0ed068bb", "score": "0.6453572", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\r\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\r\n plt.figure(1)\r\n plt.title(title)\r\n if ylim is not None:\r\n plt.ylim(*ylim)\r\n plt.xlabel(\"Training examples\",fontsize = fontsize)\r\n plt.ylabel(\"Score\",fontsize = fontsize)\r\n train_sizes, train_scores, test_scores = learning_curve(\r\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\r\n train_scores_mean = np.mean(train_scores, axis=1)\r\n train_scores_std = np.std(train_scores, axis=1)\r\n test_scores_mean = np.mean(test_scores, axis=1)\r\n test_scores_std = np.std(test_scores, axis=1)\r\n plt.grid()\r\n\r\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\r\n train_scores_mean + train_scores_std, alpha=0.1,\r\n color=\"r\")\r\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\r\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\r\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\r\n label=\"Training score\")\r\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\r\n label=\"Cross-validation score\")\r\n plt.xticks(size = fontsize)\r\n plt.yticks(size = fontsize)\r\n plt.legend(loc=\"best\")\r\n return plt", "title": "" }, { "docid": "4553e7cbf098de1565f12cdd3ffa3bf3", "score": "0.64534795", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\", label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\", label=\"Cross-validation score\")\n plt.legend(loc=\"best\")\n return plt", "title": "" }, { "docid": "37852c16230e65af81b1bc776da51a6f", "score": "0.6443604", "text": "def plot_loss(train_loss, val_loss, epochs):\n epochs_label = np.arange(0, epochs+1, 1)\n\n fig, ax = plt.subplots()\n\n ax.plot(epochs_label, train_loss, label=\"Training Data\")\n ax.plot(epochs_label, val_loss, label=\"Validation Data\")\n ax.legend()\n ax.set(xlabel='Epochs', ylabel='Loss', ylim=(0, 3), xlim=(0, 22))\n ax.grid()\n\n plt.show()", "title": "" }, { "docid": "c7d340bd84d65d1779c15c4254770da7", "score": "0.6431484", "text": "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(0.1, 1.0, 5), heldout_score=None):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n\n print \"train scores\\n\", train_scores\n\n print \"test_scores \\n\", test_scores\n \n return plt", "title": "" }, { "docid": "b630a0d5430ecf79a4340cfc0d173906", "score": "0.642106", "text": "def mat_plot():\n line_x = np.linspace(- 5, 5, 100)\n line_y = square_func(line_x)\n x_start = - 5\n iterations = 50\n learning_rate = 0.7\n x = gradient_descent(x_start, derivative_func, iterations, learning_rate)\n\n # 绘制二阶曲线\n plt.plot(line_x, line_y, c='b')\n\n # 绘制梯度下降过程的点\n plt.scatter(x, square_func(x), c='r', )\n plt.plot(x, square_func(x), c='r',\n label='learning rate={}'.format(learning_rate))\n\n # legend函数显示图例\n plt.legend()\n # show函数显示\n plt.show()", "title": "" }, { "docid": "fe72fc138ed41fcdacd6efda4246bd65", "score": "0.64100766", "text": "def show_learning_rate(file_paths: List[str], model_names: List[str], **kwargs) -> tuple:\n import matplotlib.pyplot as plt\n from matplotlib.ticker import MaxNLocator\n from matplotlib.lines import Line2D\n from matplotlib import colors as mcolors\n\n available_markers = list(Line2D.markers.keys())[2:]\n available_colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())\n\n total_files = len(file_paths)\n # print(available_markers)\n # markers = available_markers[:total_files]\n # markers_grouped = [markers[n:n + 2] for n in range(0, len(markers), 2)]\n\n markers = kwargs.get(\"markers\", [\"o\", \"v\", \"d\", \"x\", \"*\", \"+\", \"P\", \"s\"])\n markers = markers[:total_files]\n\n if len(markers) != total_files:\n raise ValueError(\n f\"Not enough values for markers. markers contains {len(markers)} elements while it is supposed to\"\n f\"contains at least {total_files} elements.\"\n )\n\n colors = available_colors[:total_files]\n\n fig, axes = plt.subplots(1, 1, figsize=kwargs.get(\"figsize\", (8, 6)))\n\n for idx, (file_path, model_name, marker, color) in enumerate(zip(file_paths, model_names, markers, colors)):\n with open(file_path, \"r\") as log_file:\n lines: List[str] = log_file.readlines()\n\n for line_idx, line in enumerate(lines):\n if line.__contains__(\"epoch\"):\n first_index: int = line_idx - 1\n break\n\n epochs: np.ndarray = np.loadtxt(file_path, usecols=1, skiprows=first_index)\n lr: np.ndarray = np.loadtxt(file_path, usecols=7, skiprows=first_index)\n\n axes.plot(\n epochs,\n lr,\n # marker=marker,\n # markersize=kwargs.get(\"markersize\", 4),\n linestyle='-',\n lw=1.8,\n label=f'{model_name}',\n color=color\n )\n\n fontsize = kwargs.get(\"font_size\", 14)\n axes.set_ylabel('Learning rate [-]', fontsize=fontsize)\n axes.set_xlabel('Epochs [-]', fontsize=fontsize)\n axes.xaxis.set_major_locator(MaxNLocator(nbins=10, integer=True))\n axes.set_yscale(kwargs.get(\"scale\", \"linear\"))\n axes.tick_params(axis=\"both\", which=\"major\", labelsize=fontsize)\n axes.legend(fontsize=fontsize)\n axes.grid()\n plt.tight_layout()\n\n if kwargs.get(\"save\", True):\n os.makedirs(\"figures/\", exist_ok=True)\n plt.savefig(kwargs.get(\"save_name\", f\"figures/model.png\"), dpi=300)\n if kwargs.get(\"show\", True):\n plt.show()\n\n return fig, axes", "title": "" }, { "docid": "688640d2e8c0fe5fd2cee47fb25a91c2", "score": "0.64025366", "text": "def dual_plot_accuracy(trainingAccuracy, testingAccuracy, lr):\n plt.xlabel('Number of Epochs')\n plt.plot(trainingAccuracy, color = \"blue\", label = 'training accuracy')\n plt.plot(testingAccuracy, color = \"green\", label = 'testing accuracy')\n plt.title('Training vs Testing Accuracy for Learning Rate : ' + str(lr))\n lgd = plt.legend(loc=9, bbox_to_anchor=(0.5, -0.1),ncol=2)\n plt.savefig('training_vs_testing_'+str(lr)+'.png',bbox_extra_artists=(lgd,), bbox_inches='tight')", "title": "" }, { "docid": "97641d969d7c320f1325b0aae10ed825", "score": "0.63984543", "text": "def plot_acc(acc_v, acc_t, save_plots_path):\n\n plt.figure()\n plt.plot(acc_v, label='Validation acc')\n plt.plot(acc_t, label='Training acc')\n plt.legend()\n title = 'Accuracy per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.savefig(save_plots_path + \"sgd_accuracy_plot.png\")", "title": "" }, { "docid": "e7361f09473514bd94291907f1dbd4e1", "score": "0.6388655", "text": "def plot_xgboost_learning_curve(iterations, scorestrain, scorescv) -> None:\n plt.figure(figsize=(12, 7))\n plt.plot(iterations,scorestrain, 'r')\n plt.plot(iterations,scorescv, 'b')\n plt.xlabel('# training examples')\n plt.ylabel('MAE')\n plt.legend(['Training Set', 'CV set'], loc='lower right')\n plt.show()", "title": "" }, { "docid": "628e486b012325d2958b9a0a66a600ad", "score": "0.63880914", "text": "def plotting_graphs(y, tx, k_fold, initial_w, max_iters, gammas, lambdas, optimal_lambda_, optimal_gamma, i, seed=1, model=\"LOG_REG_GD\"):\n training_losses, testing_losses, training_accuracy, testing_accuracy = [], [], [], []\n \n for gamma in range(len(gammas)):\n loss_tr, loss_te, ca_tr, ca_te = total_cross_validation(y, tx, k_fold, initial_w, max_iters, gammas[gamma], optimal_lambda_, seed=1, batch_size=1, model=\"LOG_REG_GD\")\n training_losses.append(loss_tr)\n testing_losses.append(loss_te)\n training_accuracy.append(ca_tr)\n testing_accuracy.append(ca_te)\n cross_validation_visualization(gammas, training_losses, testing_losses, i, parameter = \"gamma_\") # doing only for losses\n\n training_losses, testing_losses, training_accuracy, testing_accuracy = [], [], [], []\n for lambda_ in range(len(lambdas)):\n loss_tr, loss_te, ca_tr, ca_te = total_cross_validation(y, tx, k_fold, initial_w, max_iters, lambdas[lambda_], optimal_gamma, seed=1, batch_size=1, model=\"LOG_REG_GD\")\n training_losses.append(loss_tr)\n testing_losses.append(loss_te)\n training_accuracy.append(ca_tr)\n testing_accuracy.append(ca_te)\n cross_validation_visualization(lambdas, training_losses, testing_losses, i, parameter = \"lambda_\")", "title": "" }, { "docid": "fc049ccea867dacc2fc651de7b8e1382", "score": "0.6367363", "text": "def plot_lr_func(lr_func: Callable, total_steps: int):\n ys = simulate_lr_func(lr_func, total_steps)\n plot_scatter(ys, ytitle='Learning rate', xtitle='Training step')", "title": "" }, { "docid": "26abfb62c6f3793bd5569d596683fcd8", "score": "0.6360308", "text": "def plot_cost(c_v, c_t, save_plots_path):\n\n plt.figure()\n plt.plot(c_v, label='Validation loss')\n plt.plot(c_t, label='Training loss')\n plt.legend()\n title = 'Loss per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.savefig(save_plots_path + \"sgd_loss_plot.png\")", "title": "" }, { "docid": "09723e80b7c6529f0b74df7c37864f8b", "score": "0.635458", "text": "def plot_training_summary(filePath='',savePath='.s/imgs'):\n assert os.path.isfile(filePath) , f'{filePath} does not exist'\n try:\n with open(filePath, 'rb') as f:\n x = pickle.load(f)\n except :\n print(f'could not open {filePath}')\n exit(1)\n\n d_l = np.array(x['discriminator_loss']).ravel()\n g_l = np.array(x['Generator_loss']).ravel()\n acc_history = np.array(x['acc_history'])\n acc = acc_history.sum(axis=1) * 0.5\n acc_real = acc_history[:,1]\n acc_gen = acc_history[:,0]\n kl = np.array(x[\"kl_divergence\"]).ravel()\n\n n = np.arange(len(d_l))\n figname = os.path.split(filePath)[1].replace('.pickle','')\n title = 'Loss and Accuracy plot'+'\\n'+ figname\n\n fig = plt.figure(figsize=(19.20,10.80))\n fig.suptitle(title, fontsize=15,fontweight=\"bold\")\n\n axs1 = plt.subplot(222)\n # axs1.set_title(title,fontsize=5.0,fontweight=\"bold\")\n axs1.plot(n, g_l,label='Generator loss',linewidth=3)\n axs1.plot(n, d_l,label='Discriminator loss',linewidth=3)\n axs1.legend(loc=0, prop={'size': 13})\n axs1.set_ylabel('Loss',fontsize=15.0,fontweight=\"bold\")\n axs1.tick_params(labelsize=10)\n # axs1.tick_params(axis='x',which='both',bottom=False,top=False,labelbottom=False,labelsize=20)\n\n # axs2.plot(n, acc,'r',label='Discriminator accuracy',linewidth=4)\n axs2 = plt.subplot(221)\n axs2.plot(n, acc_gen,label='Accuracy on Generated',linewidth=3)\n axs2.plot(n, acc_real,label='Accuracy on Real',linewidth=3)\n axs2.legend(loc=0,prop={'size': 13})\n axs2.set_ylabel('Accuracy',fontsize=15.0,fontweight=\"bold\")\n # axs2.set_xlabel('Epoch',fontsize=15.0,fontweight=\"bold\")\n axs2.tick_params(labelsize=10)\n\n axs3 = plt.subplot(212)\n n = np.arange(0,(len(kl)*10),10)\n\n axs3.plot(n, kl,label='KL',linewidth=3)\n axs3.legend(loc=0,prop={'size': 13})\n axs3.set_ylabel('KL-Divergence',fontsize=15.0,fontweight=\"bold\")\n axs3.set_xlabel('Epoch',fontsize=15.0,fontweight=\"bold\")\n axs3.tick_params(labelsize=10)\n\n # plt.tight_layout()\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n\n plt.savefig(os.path.join(savePath,figname+'.png'),dpi = 300)\n plt.close('all') #plt.close(fig)", "title": "" }, { "docid": "083d9a6126bd5a94725480fc7cc1e06b", "score": "0.6354436", "text": "def epoch_vizual(self, train_logs, path):\n fig = plt.figure(figsize=(20, 20))\n plt.rc(\"text\", usetex=True)\n ax1 = fig.add_subplot(2, 1, 1)\n ax2 = fig.add_subplot(2, 1, 2)\n\n ax1.semilogy(train_logs[\"g_grad\"], linewidth=7.0)\n ax1.set_title(\"Norm of Generator gradient\", fontsize=45)\n ax1.set_xlabel(\"Training Iteration\", fontsize=42)\n ax1.set_ylabel(\"Norm of gradient\", fontsize=42)\n ax1.tick_params(axis=\"x\", labelsize=40)\n ax1.tick_params(axis=\"y\", labelsize=40)\n ax1.grid()\n\n ax2.semilogy(train_logs[\"c_grad\"], linewidth=7.0)\n ax2.set_title(\"Norm of Discriminator gradient\", fontsize=45)\n ax2.set_xlabel(\"Training Iteration\", fontsize=42)\n ax2.set_ylabel(\"Norm of gradient\", fontsize=42)\n ax2.tick_params(axis=\"x\", labelsize=40)\n ax2.tick_params(axis=\"y\", labelsize=40)\n ax2.grid()\n\n plt.savefig(path)\n plt.show()", "title": "" }, { "docid": "9b65ffa06b3056b5bc3f64eaf29cd038", "score": "0.635388", "text": "def visualize_epoch(history):\n # Summarize history for accuracy\n plt.plot(history['acc']) # Training accuracy\n plt.plot(history['val_acc']) # Validation accuracy\n plt.title('Epoch Iteration vs. Accuracy')\n plt.xlabel('Epoch Number')\n plt.ylabel('Accuracy')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()", "title": "" }, { "docid": "733dc15b79e629a3ffa4cf15031feb6f", "score": "0.63411385", "text": "def plot_learning_curve(self, resolution=100):\n self.calculate_tour_distance()\n # Get the divisor to limit the number of samples.\n step = len(self.samples) // resolution\n if step == 0:\n step = 1\n # Get samples.\n x = list(range(0, len(self.samples) + 1, step))\n y = [(self.samples + [self.chromosomes[0].tour_distance])[i] for i in x]\n # Setup plot.\n plt.grid(True)\n plt.title('TSP Learning Curve: {0}'.format(str(self.chromosomes[0])))\n plt.xlabel('Samples (every {0} generations)'.format(step))\n plt.ylabel('Lowest Distance')\n plt.plot(x, y)\n # Save plot.\n timestamp = datetime.now().strftime('%m-%d-%Y_%I-%M-%S-%p')\n name = 'learning_curve_{0}.png'.format(timestamp)\n save_dir = os.path.join(os.path.abspath(PLOT_DIR), name)\n plt.savefig(save_dir, dpi=200)", "title": "" }, { "docid": "ce59f4b1b4c650026be0209c9773dc5d", "score": "0.63339585", "text": "def plot_learning_curve(model, X_train, y_train, scoring='recall'):\n\n cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\n\n train_sizes, train_scores, test_scores = learning_curve(model, \n X_train, \n y_train, \n train_sizes=np.linspace(0.05, 1, 20),\n cv=cv,\n scoring=scoring,\n n_jobs=-1\n )\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n plt.plot(train_sizes, train_scores_mean, label = 'Train')\n plt.fill_between(train_sizes, \n train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, \n alpha=0.1)\n\n plt.plot(train_sizes, test_scores_mean, label = 'Val')\n plt.fill_between(train_sizes, \n test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, \n alpha=0.1)\n plt.legend()\n plt.ylabel('score')\n plt.xlabel('train sizes')\n if scoring=='recall':\n plt.ylim(0.6, 1)", "title": "" }, { "docid": "0c249dd68937df33a9202e5c3cded280", "score": "0.63328385", "text": "def _draw(train_accuracy_list, valid_accuracy_list):\n fig, ax = plt.subplots(1, 1)\n x_axis = np.arange(1, 21)\n ax.plot(x_axis, train_accuracy_list, label='Training')\n ax.plot(x_axis, valid_accuracy_list, label='Validation')\n ax.set_xlabel('K value')\n ax.set_ylabel('Accuracy')\n plt.legend(loc='best')\n plt.show()\n plt.close()", "title": "" }, { "docid": "b853c8a7b0b345faed311cf62880933d", "score": "0.633202", "text": "def plot_history(history=None):\r\n acc = history.history['acc']\r\n val_acc = history.history['val_acc']\r\n loss = history.history['loss']\r\n val_loss = history.history['val_loss']\r\n epochs = range(1, len(acc) + 1)\r\n plt.plot(epochs, acc, 'bo', label='Training acc')\r\n plt.plot(epochs, val_acc, 'r', label='Validation acc')\r\n plt.title('Training and Validation accuracy')\r\n plt.legend()\r\n\r\n plt.figure()\r\n plt.plot(epochs, loss, 'bo', label='Training loss')\r\n plt.plot(epochs, val_loss, 'r', label='Validation loss')\r\n plt.title('Training and Validation loss')\r\n plt.legend()\r\n plt.show()", "title": "" }, { "docid": "a4919aa76c64c12df39f0a238207283a", "score": "0.63313687", "text": "def plot_losses(self):\n\n plot_x = range(len(self.losses_log['train']))\n\n for type_data, type_plot, color, marker in [\n ('train', plt.plot, None, None),\n ('valid', plt.plot, None, None),\n ('saved', plt.scatter, 'g', 'x'),\n ]:\n if type_data in self.losses_log.keys():\n plot_y = self.losses_log[type_data]\n if len(plot_y) > 0:\n type_plot(plot_x, plot_y, label=type_data, c=color, marker=marker)\n\n\n plt.legend()\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.grid()\n plt.gcf().patch.set_facecolor('white')", "title": "" }, { "docid": "0cc10a02de0562854d72dc0e342be7f5", "score": "0.6329032", "text": "def plot_learning_curve(classifier, X, y, measurements=[0.1, 0.325, 0.55, 0.775, 1.], metric=None, n_jobs=-1,\n save_to_folder=os.path.join(os.path.dirname(__file__), \"ExperimentResults\")):\n sns.set_style(\"whitegrid\")\n\n plt.figure()\n plt.title(\"Learning curves\")\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = skms.learning_curve(classifier, X, y, n_jobs=n_jobs,\n train_sizes=measurements, scoring=metric)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n\n file_name = _get_file_name(\"\", classifier, \"learning_curve\", \"png\")\n if not os.path.exists(save_to_folder):\n os.mkdir(save_to_folder)\n plt.savefig(os.path.join(save_to_folder, file_name))\n plt.close()", "title": "" }, { "docid": "93c89bc0818f3dd895b6dce09e41d5e7", "score": "0.63228965", "text": "def plot_acc_and_loss():\n # Accuracy results per epoch for training and validation data sets\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n\n # Loss results per epoch for training and validation data sets\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n # Plot training and validation accuracy\n plt.figure(figsize=(8, 8))\n plt.subplot(1, 2, 1)\n plt.plot(range(len(acc)), acc, color='#9ACD32',\n lw=3, label='Training Accuracy')\n plt.plot(range(len(acc)), val_acc, color='#1E90FF',\n lw=3, label='Validation Accuracy')\n plt.xlabel(\"Epoch\")\n plt.legend(loc='lower right')\n plt.title('Training and Validation Accuracy')\n\n # Plot training and validation loss\n plt.subplot(1, 2, 2)\n plt.plot(range(len(loss)), loss, color='#9ACD32',\n lw=3, label='Training Loss')\n plt.plot(range(len(loss)), val_loss, color='#1E90FF',\n lw=3, label='Validation Loss')\n plt.xlabel(\"Epoch\", labelpad=8)\n plt.legend(loc='lower left')\n plt.title('Training and Validation Loss')\n\n plt.savefig('acc_and_loss.png')", "title": "" }, { "docid": "7128363289948844668684bb1c175725", "score": "0.63189256", "text": "def Plot_Lift_Curve(X, y, Estimators, Target_Class = None, Number_of_Splits = 50, Train_Test_Split = True, Test_Size =0.3, Random_State = None, Stratify = None):\n #Split the Data,if wanted, so that results aren't subject to overfitting\n #Set up figure for axes\n \n if Train_Test_Split:\n X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = Test_Size, random_state = Random_State, stratify = Stratify)\n else:\n X_train, X_test, y_train, y_test = X, X, y, y\n\n \n \n \n if isinstance(Estimators, (list, type(np.array))):\n number_of_axes = len(Estimators)\n \n if number_of_axes%2 ==0:\n length_axis_0 = number_of_axes/2\n else:\n length_axis_0 = (number_of_axes//2)+1\n\n fig, axes = plt.subplots(nrows = int(length_axis_0), ncols= 2, sharex= False, sharey= False, figsize = (12,int(length_axis_0)*4)) \n\n\n else:\n Estimators = np.array([Estimators])\n fig, axes = plt.subplots(1,1, figsize = (12,8))\n axes = np.array([axes])\n \n for ax, Estimator in zip(axes.ravel(), Estimators):\n\n #Fit the model\n model = Estimator.fit(X_train,y_train)\n\n #Get predicted classifications\n predictions = model.predict(X_test)\n #Predict scores so that samples can be ranked\n if hasattr(model, \"predict_proba\"):\n prediction_certainties_NDA = model.predict_proba(X_test)\n\n elif hasattr(model, \"decision_function\"):\n prediction_certainties_NDA = model.decision_function(X_test)\n\n else:\n print(\"Model doesn't have attribute: decision_function or predict_proba\")\n\n #Find which column in predictions to look at for ranking\n class_index = np.where(model.classes_ == Target_Class)[0][0]\n\n #Get the target column to rank by certainty\n if prediction_certainties_NDA.ndim >1:\n prediction_certainty_array = prediction_certainties_NDA[:, class_index]\n else:\n prediction_certainty_array = prediction_certainties_NDA\n\n\n #Get indicies of predictions sorted by certainty of correct classification\n sorted_certainty_indicies = np.argsort(prediction_certainty_array)\n \n #Rank the actual labels by certainty they are of class: \"Target_Class\"\n sorted_actual_labels = y_test[sorted_certainty_indicies]\n \n\n class_prior = sum(y_test == Target_Class)/len(y_test)\n \n\n def Lift(Sorted_Labels_, Portion_Targeted_, Class_Prior_, Target_Class_):\n\n #Work out how many samples we want to target\n number_to_target = np.floor(Portion_Targeted_ * len(Sorted_Labels_))\n number_to_target = int(number_to_target)\n\n #Create boolean for who we correctly targeted\n correctly_targeted_bool = (Sorted_Labels_[:number_to_target] == Target_Class_)\n \n #How many did we get correct\n number_correctly_classified = np.sum(correctly_targeted_bool)\n \n\n #Calculate lift\n lift_score = (number_correctly_classified)/(number_to_target*Class_Prior_) #Denominator is how many we would have got correct if random\n\n return lift_score\n\n #initialise list where lift scores go \n lift_scores_list = [] \n \n X_percent_targeted = np.arange(0,100, 100/Number_of_Splits)\n\n for i in X_percent_targeted:\n portion = (i+1)/(100)\n lift = Lift(Sorted_Labels_ = sorted_actual_labels, Portion_Targeted_ = portion, Class_Prior_ = class_prior, Target_Class_= Target_Class)\n\n lift_scores_list.append(lift)\n \n \n ax.plot(X_percent_targeted, lift_scores_list, \"r-\")\n \n ax.set_xlabel(\"Percentage of people Targeted\")\n ax.set_title(\"Lift Curve for: {}\".format(Estimator))\n ax.set_ylabel(\"Lift\")\n ax.set_yticks(np.arange(max(lift_scores_list)+2)) \n plt.tight_layout()\n\n\n return fig", "title": "" }, { "docid": "119d8040d186ceac358330dcb003c955", "score": "0.63074523", "text": "def plot_model_training_history(history_object):\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.plot(history_object.history['loss'])\n plt.plot(history_object.history['val_loss'])\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()", "title": "" }, { "docid": "ce99cb5b11ba6587b49c039efbe3daaa", "score": "0.63073224", "text": "def plot_accuracy(self):\n plt.clf() # clear figure\n history_dict = self.history.history\n history_dict.keys()\n acc = history_dict['acc']\n val_acc = history_dict['val_acc']\n epochs = range(1, len(acc) + 1)\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n\n plt.show()", "title": "" }, { "docid": "77cae9211f0e7d8112b5a2eeeec7b228", "score": "0.63038343", "text": "def plot_learning_curve(model, X_train, y_train, name='test', scoring='recall'):\n\n \n cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\n\n train_sizes, train_scores, test_scores = learning_curve(model.best_estimator_, \n X_train, \n y_train, \n train_sizes=np.linspace(0.05, 1, 20),\n cv=cv,\n scoring=scoring,\n n_jobs=-1\n )\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n plt.plot(train_sizes, train_scores_mean, label = 'Train')\n plt.fill_between(train_sizes, \n train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, \n alpha=0.1)\n\n plt.plot(train_sizes, test_scores_mean, label = 'Val')\n plt.fill_between(train_sizes, \n test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, \n alpha=0.1)\n plt.legend()\n plt.ylabel('score')\n plt.xlabel('train sizes')\n if scoring=='recall':\n plt.ylim(0.6, 1)\n plt.savefig('../tree_figs/' + name + '.png', bbox_inches='tight')\n plt.close()", "title": "" }, { "docid": "3e6d91618a6b15768221aadda489d80c", "score": "0.6299842", "text": "def learning_curve(ww_training, rw_training, ww_test, rw_test):\n np.random.shuffle(ww_training)\n np.random.shuffle(rw_training)\n dif_int = len(ww_training) - len(rw_training)\n if dif_int < 0:\n dif_int = 0\n lower_size_int = len(ww_training) - dif_int\n accuracy_array = np.empty((lower_size_int, 2))\n \n for n_trial in range(1, lower_size_int + 1):\n accuracy_array[n_trial - 1] = [n_trial, 100 * experiment(ww_training[:n_trial], rw_training[:n_trial], ww_test, rw_test)]\n \n plt.title(\"Learning Curve\")\n plt.xlabel(\"Number of Training Items\")\n plt.ylabel(\"Accuracy (%)\")\n plt.plot(accuracy_array[:, 0], accuracy_array[:, 1])", "title": "" }, { "docid": "e7530cb297483c53f83dc100d73434ae", "score": "0.6299744", "text": "def train(self):\n\n steps = []\n print(\"Training the agent...\")\n for i in tqdm(range(self.n_episodes)):\n # let agent play\n episode_stats = self.play()\n steps.append(episode_stats[\"steps\"])\n\n if i % 10 == 0:\n self.eps = self.eps - i * self.reduction\n\n # learn from experience\n self.learn()\n\n plt.plot(steps)\n plt.show()", "title": "" }, { "docid": "51d14e0aebc445409e2ce9b5ec0dc91a", "score": "0.6295639", "text": "def training_curve(train_loss, perc_iter, val_loss, psnr, ssim, x, lr, epoch, iters_per_epoch, \n fig: matplotlib.figure.Figure, axis: matplotlib.axes.Axes, linewidth=0.25):\n # Linear scale of loss curve\n ax = axis[0]\n ax.clear()\n line1, = ax.plot(x, val_loss, label=\"Validation Loss\", color='red', linewidth=linewidth)\n line2, = ax.plot(x, train_loss, label=\"Train Loss\", color='blue', linewidth=linewidth)\n ax.plot(x, np.repeat(np.amin(val_loss), len(x)), linestyle=':', linewidth=linewidth)\n ax.set_xlabel(\"Epoch(s) / Iteration: {}\".format(iters_per_epoch))\n ax.set_ylabel(\"Image Loss\")\n ax.set_title(\"Loss\")\n\n if not np.isnan(perc_iter).all():\n ax = axis[4]\n ax.clear()\n line4, = ax.plot(x, perc_iter, label=\"Perceptual Loss\", color='green', linewidth=linewidth)\n ax.set_ylabel(\"Perceptual Loss\")\n\n ax.legend(handles=(line1, line2, line4, )) if not np.isnan(perc_iter).all() else ax.legend(handles=(line1, line2, ))\n\n # Log scale of loss curve\n ax = axis[1]\n ax.clear()\n line1, = ax.plot(x, val_loss, label=\"Validation Loss\", color='red', linewidth=linewidth)\n line2, = ax.plot(x, train_loss, label=\"Train Loss\", color='blue', linewidth=linewidth)\n ax.plot(x, np.repeat(np.amin(val_loss), len(x)), linestyle=':', linewidth=linewidth)\n ax.set_xlabel(\"Epoch(s) / Iteration: {}\".format(iters_per_epoch))\n ax.set_yscale('log')\n ax.set_title(\"Loss(Log scale)\")\n\n if not np.isnan(perc_iter).all():\n ax = axis[5]\n ax.clear()\n line4, = ax.plot(x, perc_iter, label=\"Perceptual Loss\", color='green', linewidth=linewidth)\n ax.set_ylabel(\"Perceptual Loss\")\n\n ax.legend(handles=(line1, line2, line4, )) if not np.isnan(perc_iter).all() else ax.legend(handles=(line1, line2, ))\n\n # Linear scale of PSNR, SSIM\n ax = axis[2]\n ax.clear()\n line1, = ax.plot(x, psnr, label=\"PSNR\", color='blue', linewidth=linewidth)\n ax.plot(x, np.repeat(np.amax(psnr), len(x)), linestyle=':', linewidth=linewidth)\n ax.set_xlabel(\"Epochs(s) / Iteration: {}\".format(iters_per_epoch))\n ax.set_ylabel(\"Average PSNR\")\n ax.set_title(\"Validation Performance\")\n\n ax.legend(handles=(line1, ))\n\n # Learning Rate Curve\n ax = axis[3]\n ax.clear()\n line1, = ax.plot(x, lr, label=\"Learning Rate\", color='cyan', linewidth=linewidth)\n ax.set_xlabel(\"Epochs(s) / Iteration: {}\".format(iters_per_epoch))\n ax.set_title(\"Learning Rate\")\n ax.set_yscale('log')\n\n ax.legend(handles=(line1, ))\n \n return fig, axis", "title": "" }, { "docid": "7f041d83c1a08843dffef745d5adbcd2", "score": "0.62874764", "text": "def training(override=None):\n name = BEST if override is None else override\n data = read(name)\n\n plt.figure(1)\n\n size = 5\n chunks = list(chunk_list(data[:, 1], size))\n averages = [sum(chunk) / len(chunk) for chunk in chunks]\n\n plt.plot(range(0, len(data), size), averages)\n plt.axhline(y=200, color='g', linestyle='--')\n\n try:\n success = [i[0] for i in data if i[2] >= 200][0] - 200\n plt.axvline(x=success, color='r')\n except IndexError:\n print('[WARN] Learner did not succeed')\n\n plt.title('Learning Performance')\n plt.xlabel('Episode')\n plt.ylabel('Episode Total Reward (Averaged Per {})'.format(size))\n\n plt.savefig(PLOT_DIR + 'training.{}.png'.format(name))", "title": "" }, { "docid": "b9e766f64fb7f208f87f0d8d13661b12", "score": "0.6285915", "text": "def plot(self, **kwargs):\n plt.plot(self.epochs, self.kl_weights, **kwargs)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"KL Loss Weight\")", "title": "" }, { "docid": "4e8b3436ae7471fa1b6c26da3f9311fc", "score": "0.6276074", "text": "def plot_loss(train_log,valid_log):\n \n num_log = len(train_log) # number of element in input array\n \n fig = plt.figure()\n fig.suptitle(\"Loss vs. Epoch\")\n ax = fig.add_subplot(111)\n ax.set_xlabel(\"Epoch\")\n ax.set_ylabel(\"Loss\")\n ax.plot(np.arange(0,num_log),train_log,'r-',label='train_loss')\n ax.plot(np.arange(0,num_log),valid_log,'b-',label='validation_loss')\n ax.legend(loc=4,prop={'size':15})", "title": "" }, { "docid": "e17734e3a4bdc51a40a4b05d75d42eb6", "score": "0.6271056", "text": "def part_b(t_lst, loss_test_lst, loss_train_lst):\n\n plt.figure()\n plot_graph(loss_test_lst, t_lst, \"q5_part_b_test\", \"\", \"Iteration vs Loss Test\", \"Loss\", \"Iteration\")\n plt.figure()\n plot_graph(loss_train_lst, t_lst, \"q5_part_b_train\", \"\", \"Iteration vs Loss Train\", \"Loss\", \"Iteration\")", "title": "" }, { "docid": "ca9b7d62ac761328a408f091d495eed2", "score": "0.62491584", "text": "def populate_graph():\n \"\"\"\n plt.figure()\n x = [2,3,4,5,6,7,8,9,10,11,12]\n #train = [0.989662, 0.997180, 0.999060, 0.999060, 0.999060, 1.0, 1.0]\n #test = [0.939850, 0.954887, 0.939850, 0.947368, 0.947368, 0.932331, 0.939850]\n train = [0.986842,0.996241,0.997180,0.998120,0.999060,0.999060,0.999060,0.999060,0.999060,0.999060,0.999060]\n test = [0.932331,0.932331,0.947368,0.947368,0.947368,0.947368,0.947368,0.954887,0.954887,0.954887,0.954887]\n \n\n plt.plot(x,train,'-r', label=\"Train Accuracy\")\n plt.plot(x,test,'-b', label=\"Test Accuracy\")\n plt.title(\"TF-IDF Passes vs Accuracy\")\n plt.xlabel(\"Passes\")\n plt.ylabel(\"Accuracy\")\n plt.axis([2,12,0.93,1.02])\n plt.legend(loc=\"center right\")\n plt.show()\n plt.close()\n \"\"\"\n plt.figure()\n x=[1,2,3,4,5,6,7,8]\n train =[0.965226,0.985902,0.989662,0.990602,0.995301,0.997241,1.0,1.0]\n test = [0.932331,0.954887,0.954887,0.954887,0.962406,0.962406,0.962406,0.962406]\n plt.plot(x,train,'-r', label=\"Train Accuracy\")\n plt.plot(x,test,'-b', label=\"Test Accuracy\")\n plt.title(\"Passes vs Accuracy\")\n plt.xlabel(\"Passes\")\n plt.ylabel(\"Accuracy\")\n plt.axis([1,8,0.93,1.02])\n plt.legend()\n plt.show()\n plt.close()", "title": "" }, { "docid": "d35ac155396f7209e187ac4f38b8de15", "score": "0.6243018", "text": "def plot_graphs(self):\n\n plt.plot(self.history.history[self.metric])\n plt.plot(self.history.history['val_' + self.metric], '')\n plt.xlabel(\"Epochs\")\n plt.ylabel(self.metric)\n plt.legend([self.metric, 'val_'+self.metric])\n plt.show()", "title": "" }, { "docid": "33e2f6a994e0ade5ccad224d56f48b35", "score": "0.6230619", "text": "def neural_net_implementation(x, y, tag=\"\"):\n from keras.models import Sequential\n from keras.layers import Dense\n from keras.callbacks import EarlyStopping\n from sklearn.model_selection import train_test_split\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)\n\n model = Sequential()\n model.add(Dense(50, input_dim=x.shape[1], activation='relu'))\n model.add(Dense(150, activation='relu'))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])\n monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')\n history = model.fit(x, y, validation_data=(x_test, y_test), callbacks=[monitor], verbose=0, epochs=5000)\n\n score, acc = model.evaluate(x_test, y_test)\n print(\"Score (RMSE) : {}\".format(score))\n print(\"accuracy is :{}\".format(acc))\n\n df = pd.DataFrame(history.history)\n df.plot(subplots=True, grid=True, figsize=(10, 15))\n plt.savefig(tag)", "title": "" }, { "docid": "b108737f98f9f52d38eb216dd1cb473d", "score": "0.6223654", "text": "def plot_learning_curve(estimator,fitted_dt, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 10)):\n\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n return train_sizes, train_scores_mean, test_scores_mean", "title": "" } ]
b0235b98074062dc84c332d6568a79c7
Flip a bit and win!!!!!!
[ { "docid": "c4a9a8e4c27026a69c7db13d18888223", "score": "0.0", "text": "def flip_bit_to_win(num):\n \"\"\" Convert to binary \"\"\"\n binary = int(num, 2)\n current = 0\n last = 0\n longest = 0\n for i in range(sequence_length):\n bit = bit_manipulation.getbit(binary, i)\n if bit:\n current += 1\n longest = max(longest, current + last + 1)\n else:\n last = current\n current = 0\n return longest", "title": "" } ]
[ { "docid": "ce646fdea7446fd490869ab2c3aa4a03", "score": "0.7966279", "text": "def flip(self):\n self._repeat(table=bit_flipped)", "title": "" }, { "docid": "a5472cfb27a488c804cc7408fd66cca3", "score": "0.7797152", "text": "def test_bit_flip(self):\n\n self.run_steane_test(\"bit flip\", True, False)", "title": "" }, { "docid": "07a66b6896f903a57e010ffbd046655a", "score": "0.74029297", "text": "def _flip_bit(self, seed):\n flipper = 1 << random.randint(0, _n_bits - 2)\n return seed ^ flipper", "title": "" }, { "docid": "a8cb1559daf488ffdc1b51e13254ea87", "score": "0.7287755", "text": "def flipbits(byte):\n\n bytecopy = 0\n for i in range(8):\n if (byte >> i) & 1 == 1:\n bytecopy += 1\n bytecopy = bytecopy << 1\n\n return bytecopy", "title": "" }, { "docid": "3ce4c60af288319dfd9ba332248b0df5", "score": "0.7200212", "text": "def flip(num):\n return num * -1", "title": "" }, { "docid": "e56b4301fa05185d589d0bff376157e5", "score": "0.7164742", "text": "def flip(array, i):\n array[i] ^= 1", "title": "" }, { "docid": "d57685a31359612e4048fa9902584ffd", "score": "0.7127928", "text": "def flip(self):\n self.flipped = not self.flipped", "title": "" }, { "docid": "333d362f2fed38df49481300e5fa5bab", "score": "0.70924306", "text": "def flip(self, im):\n return im", "title": "" }, { "docid": "fff3997b1079c60f81d9954b32e4124b", "score": "0.7054641", "text": "def __perform_flip(self, image):\n image = np.fliplr(image)\n return image", "title": "" }, { "docid": "fc11d1d9a704f40cf0f81fa4d7ac8078", "score": "0.7006961", "text": "def __flip(self):\n self.board_arr = np.rot90(self.board_arr, 2)\n self.board_arr *= -1 # converts all black to white + v.v.", "title": "" }, { "docid": "2f3140257ad983c84703193a05f55d6c", "score": "0.69987965", "text": "def flip(self,c):\n l=[0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15];\n return ((l[c&0x0F]) << 4) + l[(c & 0xF0) >> 4];", "title": "" }, { "docid": "c7d2cd26b369ee4e392734be011fd491", "score": "0.6993518", "text": "def setFlip(self, flip):\n flipParity = 0\n i = Edge.BR - 1\n while i >= Edge.UR:\n self.eo[i] = int((flip % 2))\n flipParity = flipParity + self.eo[i]\n flip = flip / 2\n i = i - 1\n self.eo[Edge.BR] = int(((2 - flipParity % 2) % 2))\n print \"Done with setFlip()\"\n print self.eo", "title": "" }, { "docid": "fa97627a80ad00b896ff4af8b0b4e919", "score": "0.693533", "text": "def flip(self, flip): \n self.data.d = flip_array(self.im, flip)", "title": "" }, { "docid": "cd807fc28ad553acae84b719687679ba", "score": "0.6930386", "text": "def flip(S): #Used to help convert an 8 bit number to a TC number\r\n if S == '':\r\n return ''\r\n if S[0] == '0':\r\n return '1' + flip(S[1:])\r\n else:\r\n return '0' + flip(S[1:])", "title": "" }, { "docid": "438cbd5100d376b7761514e24d88e499", "score": "0.69281137", "text": "def flip(self):\n self._flip = not self._flip", "title": "" }, { "docid": "07de94ffb3fb776101f6437e54687333", "score": "0.6923269", "text": "def flip_color(self):\r\n\r\n pass", "title": "" }, { "docid": "eac0e47ce7cb781a84104e98ae0c98be", "score": "0.6909622", "text": "def flip_bit(bit):\n\n\tif type(bit) != str:\n\t\traise TypeError(\"The input parameter should be of string type.\")\n\n\tif bit != \"0\" and bit != \"1\":\n\t\traise ValueError(\"The parameter should be a 0 or 1, encoded as a string\")\n\n\tif bit == \"0\":\n\t\treturn \"1\"\n\telse:\n\t\treturn \"0\"", "title": "" }, { "docid": "1430a62a423d2e5e1e82c89a0324b81d", "score": "0.6888029", "text": "def flip_one_bit(x: float):\n bit_array = bitstring.BitArray(float=x, length=32)\n original_bits = np.array(bit_array).astype(bool)\n\n bit_to_flip = int(np.random.uniform(0, 1) * 32)\n original_bits[bit_to_flip] = np.invert(original_bits[bit_to_flip])\n\n bit_array.bin = ''.join(['0' if not bit else '1' for bit in original_bits])\n\n return bit_array.float", "title": "" }, { "docid": "649a3fbf5131903919e8edc28a906c9b", "score": "0.68879527", "text": "def invert_bit(bit):\n if bit:\n bit = 0\n else:\n bit = 1\n return bit", "title": "" }, { "docid": "f4da891c7d019848286e0b02ccd31065", "score": "0.68744814", "text": "def flip_tile(tile):\n return np.fliplr(tile)", "title": "" }, { "docid": "c78684111ff0b27fe6103ef5e5cbb92b", "score": "0.6811382", "text": "def flip_int(num, b):\n o = 0\n for i in range(b/2):\n o |= ((num & (0xff << i*8)) << (b-(2*i+1))*8)\n o |= ((num & (0xff << (b-(i+1))*8)) >> (b-(2*i+1)) * 8)\n if b % 2 == 1:\n o |= (num & (0xff << (b/2)*8))\n return o", "title": "" }, { "docid": "0f733e72894a2d1e261718d3994a150a", "score": "0.6782888", "text": "def flip(self, bev_direction='horizontal'):\n pass", "title": "" }, { "docid": "e78e7d059ad1c5105a6fec56c870ba2d", "score": "0.6776725", "text": "def bit_flip(x: torch.Tensor, p: float) -> torch.Tensor:\n # language=rst\n x = x.float()\n i = torch.bernoulli(p * torch.ones_like(x)).byte()\n x = x.byte()\n x[i] = ~x[i]\n return x.float()", "title": "" }, { "docid": "7df152a542293294f60edb176925bfcc", "score": "0.67738813", "text": "def Flip(src, dst=None, flip_mode=None): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "b336fe80ea67d9581cbd2ff1c389c93a", "score": "0.6757504", "text": "def flip(self, x):\n\t\tif x: pass #pygame.transform.flip(tmpimage,1,0)\n\t\telse: pass #pygame.transform.flip(tmpimage,0,1)\n\t\tpass", "title": "" }, { "docid": "549c89c24bbf98736755b7ec7c6fcb0a", "score": "0.67574906", "text": "def __invert__(self):\n if self.flip:\n return Isometry(self.shift, self.flip)\n else:\n return Isometry(-self.shift, self.flip)", "title": "" }, { "docid": "268c2bd5f5a797c2377dbd6ba39c4d38", "score": "0.6709418", "text": "def _fliplr(im):\n return np.fliplr(im)", "title": "" }, { "docid": "9341ff03e373414899b4e8ef5a9f6c55", "score": "0.66236055", "text": "def flip_a_coin(self):\n if random.randint(0, 1) == 0:\n self.__mode = \"ECB\"\n else:\n self.__mode = \"CBC\"", "title": "" }, { "docid": "1dad58e9836ff2c2c3bda470ca9a46bd", "score": "0.6607879", "text": "def getFlip(self):\n ret = 0\n i = Edge.UR\n while i < Edge.BR:\n ret = int((2 * ret + self.eo[i]))\n i += 1\n return ret", "title": "" }, { "docid": "d40cf07cebef22ca469d5aa82b4c8ae3", "score": "0.6575785", "text": "def bswap1(i):\n\n return i & 0xff", "title": "" }, { "docid": "33cb744026f5441f198e633b6a38f72d", "score": "0.65521175", "text": "def turn(n):\r\n return (((n & -n) << 1) & n) != 0", "title": "" }, { "docid": "93e211d5397c98af8fa509855f8e7e2e", "score": "0.65465194", "text": "def Invert(self):", "title": "" }, { "docid": "93e211d5397c98af8fa509855f8e7e2e", "score": "0.65465194", "text": "def Invert(self):", "title": "" }, { "docid": "93e211d5397c98af8fa509855f8e7e2e", "score": "0.65465194", "text": "def Invert(self):", "title": "" }, { "docid": "e3cd4164d3bfd6193145ee7fba15c300", "score": "0.65242046", "text": "def flip_ver(w):\n return [s[::-1] for s in w]", "title": "" }, { "docid": "fe502485059d6f83734fcd50d82cbc06", "score": "0.65087336", "text": "def flip_array_state(array_state):\n if array_state == 1:\n array_state = -1\n else:\n array_state = 1\n return array_state", "title": "" }, { "docid": "4cb682b90805e30a41247d07175dbce7", "score": "0.6489366", "text": "def __flip(self, img):\n return [img.flip(2), img.flip(3)]", "title": "" }, { "docid": "291d0bff41113b7ce4e94d90520c74d9", "score": "0.6459794", "text": "def flip(self):\n try:\n S=copy.deepcopy(self.S)\n self.S.S11=S.S22\n self.S.S22=S.S11\n self.S.S12=S.S21\n self.S.S21=S.S12\n except AttributeError:\n raise RuntimeError('structure not solved yet')\n self.layers = self.layers[::-1]\n self.d = self.d[::-1]", "title": "" }, { "docid": "3eda56c84b2f1c68efce53f33fb54d07", "score": "0.6453623", "text": "def double_flip(image_array):\n return vertical_flip(horizontal_flip(image_array))", "title": "" }, { "docid": "ebaaff58c0b6227a198ae0df5e7867b8", "score": "0.6423945", "text": "def vflip(self) -> None:\n lib.TCOD_image_vflip(self.image_c)", "title": "" }, { "docid": "e495067156897ea67ac12b29171f99a3", "score": "0.642288", "text": "def flip(x, y):\n return y, x", "title": "" }, { "docid": "08f160ffe1610cb5c1171a6174ad3b48", "score": "0.6421534", "text": "def flip_image(image):\n flipped_image = np.fliplr(image)\n return flipped_image", "title": "" }, { "docid": "dab8c9f10b6cd5ae0bdfbf6f44216bd6", "score": "0.641418", "text": "def flip(self):\r\n self.hidden = not self.hidden", "title": "" }, { "docid": "e32fdc60ce16fe211c005ccb82a6c93d", "score": "0.64125526", "text": "def reverseBits1(self, n):\n # basically, swapping the first halp and the second half\n n = (n >> 16) | (n << 16)\n n = ((n & 0xff00ff00) >> 8) | ((n & 0x00ff00ff) << 8)\n n = ((n & 0xf0f0f0f0) >> 4) | ((n & 0x0f0f0f0f) << 4)\n n = ((n & 0xcccccccc) >> 2) | ((n & 0x33333333) << 2)\n n = ((n & 0xaaaaaaaa) >> 1) | ((n & 0x55555555) << 1)\n return n", "title": "" }, { "docid": "58d6cf3a7a2ee8fd98d795069d02c4b9", "score": "0.6402065", "text": "def torch_fliplr(x: Tensor):\n return x.flip(3)", "title": "" }, { "docid": "9a2291fa9dfd5328ed7799c782a51c02", "score": "0.6359118", "text": "def flip_bits(seq, width):\n out = []\n h = pow(2, width//2)\n for x in seq:\n high_bits = x//h\n low_bits = x % h\n out.append(high_bits + low_bits * h)\n return out", "title": "" }, { "docid": "417b59144fbc91e5bec5d48e84a056ee", "score": "0.6343225", "text": "def flip_player(self):\n if self.player == 1:\n self.player = -1\n else:\n self.player = 1", "title": "" }, { "docid": "9328a0951e7d7f8f061f32e1ec35cfe2", "score": "0.63378704", "text": "def _flip(self, direction):\n if self.direction != direction:\n self.image = pygame.transform.flip(self.image, True, False)\n self.direction = direction", "title": "" }, { "docid": "5158e176444433bd018efda493e087c0", "score": "0.6337502", "text": "def flip(self):\n if self.filter_mask is None:\n return self.raw_flip\n else:\n return self.raw_flip[self.filter_mask]", "title": "" }, { "docid": "ccd37b5a2d5d44d26ce9232c728105a9", "score": "0.6332241", "text": "def flip(self, a, axis=None):\n raise NotImplementedError()", "title": "" }, { "docid": "1eb0950b3004d106b5e2a491563a8115", "score": "0.63109", "text": "def flip_right():\n # TODO: Assert battery is high enough to perform flip before attempting\n send_and_wait(\"flip r\")", "title": "" }, { "docid": "a14c86188e2b886acfa969dec867bfec", "score": "0.6303685", "text": "def flip(self, axis=\"y\"):\n return self.mirror(axis=axis)", "title": "" }, { "docid": "4620a9591fe83ea47782397cba8ad8e0", "score": "0.62975", "text": "def flip(self, bot, event):\n if not event.text:\n bot.message(\"(╯°□°)╯︵ ┻━┻\", target=bot.channel)\n else:\n normal = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890.,!?))(_><\"\n upsidedown = \"∀ᗺƆpƎℲפHIſʞ˥WNOԀQɹS┴∩ΛMX⅄ZɐqɔpǝɟƃɥᴉɾʞlɯuodbɹsʇnʌʍxʎzƖᄅƐㄣϛ9ㄥ860˙'¡¿(()¯<>\"\n mapping = {a: b for a, b in zip(normal, upsidedown)}\n flipped = \"\".join(mapping.get(c, c) for c in reversed(event.text))\n bot.message(\"(╯°□°)╯︵ \" + flipped)", "title": "" }, { "docid": "b251e9ff7d187fbc691bd4d5d2bf4cb4", "score": "0.6292544", "text": "def flip(self, mode='h'):\n # TODO: Implement the flip function. Remember to record the boolean values is_horizontal_flip and\n # is_vertical_flip.\n \n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n x = self.x\n params = self.params\n \n if mode=='h':\n params['is_horizontal_flip'] = not params['is_horizontal_flip']\n x = np.flip(x, axis=2)\n if mode=='v':\n params['is_vertical_flip'] = not params['is_vertical_flip']\n x = np.flip(x, axis=1)\n if mode=='hv':\n params['is_horizontal_flip'] = not params['is_horizontal_flip']\n x = np.flip(x, axis=2)\n params['is_vertical_flip'] = not params['is_vertical_flip']\n x = np.flip(x, axis=1)\n \n self.x = x", "title": "" }, { "docid": "72806623c263b028502b5ce5c37ab63a", "score": "0.62729913", "text": "def flip_image(image):\n\n return pygame.transform.flip(image, True, False)", "title": "" }, { "docid": "683cd9e93bb70d688ccaa351c9bf832d", "score": "0.62667066", "text": "def flipImage(image):\n return cv2.flip(image, 1)", "title": "" }, { "docid": "9a9990487129e329051a960a968185e7", "score": "0.6265247", "text": "def restore_2complement(msb, lsb):\n ...", "title": "" }, { "docid": "2a2bde6e369d5dda8b13398ea63e57c7", "score": "0.62650806", "text": "def flip_image(self, img, mode='h'):\n\n if mode == 'h':\n aug = iaa.Fliplr()\n else:\n aug = iaa.Flipud()\n \n flipped_img = aug(image=img)\n return flipped_img", "title": "" }, { "docid": "75380059610786d2dbc3b3ed56c45d46", "score": "0.6259178", "text": "def flip_ud(self,data):\n\n pass", "title": "" }, { "docid": "91a5f969e41341e2c19bc7c36e248bdb", "score": "0.62308806", "text": "def flip80(data: bytes) -> bytes:\n if len(data) % 4 != 0:\n raise ValueError('data must have a size multiple of 4')\n return b''.join(x[::-1] for x in ichunks(data, 4))", "title": "" }, { "docid": "1e4dd6276985a0398f8988350e985c2c", "score": "0.62290126", "text": "def flip_hor(w):\n return w[::-1]", "title": "" }, { "docid": "b627bfb49948b4c4f66ab165e1012526", "score": "0.6221524", "text": "def flip_backward():\n # TODO: Assert battery is high enough to perform flip before attempting\n send_and_wait(\"flip b\")", "title": "" }, { "docid": "8215f0821080aebaeab86ed679799b0f", "score": "0.621459", "text": "def byteswap( ):", "title": "" }, { "docid": "628a4b38b881ec61c1ad6c34ff777316", "score": "0.6213563", "text": "def flip(self, x=False, y=False):\n self._flipx = x\n self._flipy = y", "title": "" }, { "docid": "f445ef18329e25c50675b29a56616f5d", "score": "0.6210618", "text": "def swap16(x):\n return ((x & 0xff00) >> 8) | \\\n ((x & 0x00ff) << 8)", "title": "" }, { "docid": "ac5013259009fbb7a38644fe0f44ca8a", "score": "0.6208504", "text": "def flipVert():\n\n Im_pix = getRGB('in.png') # read in the in.png image\n print(\"The first two pixels of the first row are\", Im_pix[0][0:2])\n\n New_pix = Im_pix[::-1]\n\n\n saveRGB(New_pix, 'out.png')", "title": "" }, { "docid": "d80dabb5203c857685c9f565a8081536", "score": "0.6208115", "text": "def vflip(val=None):\r\n\t\tCamera.modified[0] = True\r\n\t\tif Camera.opened:\r\n\t\t\t# print(\"Vflip %d\"%val)\r\n\t\t\treturn camera.vflip(val)\r\n\t\treturn None", "title": "" }, { "docid": "ab82fc69023971940328eddf5892517e", "score": "0.61914253", "text": "def flip(self):\r\n # Flip the card and update the image\r\n self.set[self.index].flip()\r\n self.update_image()", "title": "" }, { "docid": "9d1a5d2324c49310a562d0658e0d016e", "score": "0.6190427", "text": "def flip(x):\n return np.sign(np.random.rand(1)-0.5) * x", "title": "" }, { "docid": "16ba589744430eba8ebb3a7cde1b2fb8", "score": "0.6181124", "text": "def flip_x(self):\n return self._flip_x", "title": "" }, { "docid": "7c33441ed607357f8ef47bce59d07a6b", "score": "0.6179949", "text": "def test_roundtrip_flip(self):\n # Compress and decompressed a flipped LDR image\n decompFile = self.get_tmp_image_path(\"LDR\", \"decomp\")\n\n command = [\n self.binary, \"-tl\",\n \"./Test/Data/Tiles/ldr.png\",\n decompFile, \"4x4\", \"-fast\", \"-yflip\"]\n\n self.exec(command)\n\n # Compare TL (0, 0) with TL - should match - i.e. no flip\n colorRef = self.get_color_refs(\"LDR\", \"TL\")\n\n img = tli.Image(decompFile)\n colorVal = img.get_colors([(0, 0)])\n\n self.assertColorSame(colorRef[0], colorVal[0])", "title": "" }, { "docid": "b5bc0969eadf8ac4b83d2d5001aa30cd", "score": "0.61564654", "text": "def flip_v(self):\n lst_t = self.m\n self.m = lst_t[::-1]\n self.clean_all_but_m()", "title": "" }, { "docid": "49aefdb7fd9e3b08d38c75210e61335a", "score": "0.6145693", "text": "def flip_effect(image: Image.Image) -> Image.Image:\n image = ImageOps.mirror(image)\n\n return image", "title": "" }, { "docid": "848d56ce66575af50e2356731a6c33cb", "score": "0.6134909", "text": "def flip_lr(img):\n return np.flip(img.copy(), 4)", "title": "" }, { "docid": "6158a59e135e2b7e70a4cb59b398c617", "score": "0.6128792", "text": "def vertical_flip(self, picture, output_encoding):\n img = images.Image(picture.data)\n img.vertical_flip()\n transformed = img.execute_transforms(output_encoding)\n return transformed", "title": "" }, { "docid": "dc9e623de2ff7cce69d1ca07d1394e27", "score": "0.6115801", "text": "def flip_alleles(self):\n assert(len(self.alt_alleles) == 1)\n self.ref_allele, self.alt_alleles[0] = self.alt_alleles[0], self.ref_allele\n self.alt_allele = self.alt_alleles[0]\n self.flipped = True", "title": "" }, { "docid": "237f2cd4a1b3ee96e0b983e660266821", "score": "0.6113052", "text": "def bswap2(i):\n\n return (i >> 8) & 0xff, i & 0xff", "title": "" }, { "docid": "324d3665cac1291f7d1126015a566ef5", "score": "0.6107709", "text": "def two_bit_flip(node):\r\n\r\n node_list = list(node)\r\n out = set()\r\n bit_length = len(node_list)\r\n for i in range(bit_length):\r\n for j in range(bit_length):\r\n new_node = node_list[:]\r\n if i != j:\r\n new_node[i] = ('1' if node[i] == '0' else '0')\r\n new_node[j] = ('1' if node[j] == '0' else '0')\r\n else:\r\n new_node[i] = ('1' if node[i] == '0' else '0')\r\n out.add(''.join(new_node))\r\n return out", "title": "" }, { "docid": "5ebd1f8b92354eb971e6205d8df5149a", "score": "0.6102803", "text": "def pairwiseSwap(n):\n #put odd at even places then even at odd places\n return ((n & 0xAAAAAAAA) >> 1) | ((n & 0x55555555) << 1)", "title": "" }, { "docid": "ee0e00c5aa68fed60bda000a08cd99cd", "score": "0.6102466", "text": "def __flip__(x):\n return np.flip(x, axis = 1)", "title": "" }, { "docid": "e4855c6ca70e4fe028908b595a26e6ca", "score": "0.60916454", "text": "def invert_bitstring(string):\n return string.replace(\"1\", \"2\").replace(\"0\", \"1\").replace(\"2\", \"0\")", "title": "" }, { "docid": "2aac649f5f256757daa47d1d938a3ca4", "score": "0.6083563", "text": "def flip ( self, direction, position, color ):\n \n if direction == 1:\n # north\n row_inc = -1\n col_inc = 0\n elif direction == 2:\n # northeast\n row_inc = -1\n col_inc = 1\n elif direction == 3:\n # east\n row_inc = 0\n col_inc = 1\n elif direction == 4:\n # southeast\n row_inc = 1\n col_inc = 1\n elif direction == 5:\n # south\n row_inc = 1\n col_inc = 0\n elif direction == 6:\n # southwest\n row_inc = 1\n col_inc = -1\n elif direction == 7:\n # west\n row_inc = 0\n col_inc = -1\n elif direction == 8:\n # northwest\n row_inc = -1\n col_inc = -1\n \n places = [] # pieces to flip\n i = position[0] + row_inc\n j = position[1] + col_inc \n\n if color == WHITE:\n other = BLACK\n else:\n other = WHITE\n \n if i in range( 8 ) and j in range( 8 ) and self.board[i][j] == other:\n # assures there is at least one piece to flip\n places = places + [(i,j)]\n i = i + row_inc\n j = j + col_inc\n while i in range( 8 ) and j in range( 8 ) and self.board[i][j] == other:\n # search for more pieces to flip\n places = places + [(i,j)]\n i = i + row_inc\n j = j + col_inc\n if i in range( 8 ) and j in range( 8 ) and self.board[i][j] == color:\n # found a piece of the right color to flip the pieces between\n for pos in places:\n # flips\n self.board[pos[0]][pos[1]] = color", "title": "" }, { "docid": "a6e38d49dca2e80287ba7c1506df3419", "score": "0.608276", "text": "def flip_forward():\n # TODO: Assert battery is high enough to perform flip before attempting\n send_and_wait(\"flip f\")", "title": "" }, { "docid": "572c35cc26040ffe2cd10d5f29384115", "score": "0.60784805", "text": "def bswap3(i):\n\n return (i >> 16) & 0xff, (i >> 8) & 0xff, i & 0xff", "title": "" }, { "docid": "1585ff409341421811fd930a873ba95d", "score": "0.606961", "text": "def _flip(shp):\n for ring in workflow.utils.generate_rings(shp):\n for i,c in enumerate(ring):\n ring[i] = c[1],c[0]\n return shp", "title": "" }, { "docid": "7671783ff28736f076a276364df8bae7", "score": "0.6061207", "text": "def reverse_bit(num):\n result = 0\n for _ in range(8):\n result <<= 1\n result += (num & 1)\n num >>= 1\n return result", "title": "" }, { "docid": "b3bc6bff7f45526183b2a3d8428875fc", "score": "0.6051063", "text": "def _flip(self):\n time = self._get_ticks()\n self._ticks = time - self._time\n self._elapsed += self._ticks\n self._time = time\n # Once per second...\n if self._elapsed < 1:\n return\n elif self._elapsed >= self._reset_threshold:\n self.reset()\n else:\n self._elapsed %= 1\n # Save stats and clear counters.\n self._tps = 0.0\n self._fps = self._frame_count\n self._ups = self._update_count\n self._frame_count = self._update_count = 0", "title": "" }, { "docid": "c8ffb1679636e4236000a7f54b8da12c", "score": "0.6049809", "text": "def doflip(dec,inc):\n if inc <0:\n inc=-inc\n dec=dec+180.\n if dec > 360: dec=dec-360.\n return dec,inc", "title": "" }, { "docid": "7c479c196d50309c47a8f24bfec5ea6c", "score": "0.6039424", "text": "def prob_flip(self):\n random_num = random.randint(1, 10)\n\n if random_num == 1:\n self.flip = True", "title": "" }, { "docid": "60141950f85200d6b49139f7cf3a72ee", "score": "0.60368615", "text": "def auto_flip_normal(self):\n return False", "title": "" }, { "docid": "b680b75fdb31343f3c23a15665d31117", "score": "0.60357386", "text": "def flip(stack, n):\n return inverse(stack[:n][::-1]) + stack[n:]", "title": "" }, { "docid": "c62b005fcf730347399136d7478e1430", "score": "0.6029708", "text": "def random_flip(image, steering):\n if np.random.randint(2) == 0:\n image = np.fliplr(image)\n steering = -steering\n\n return image, steering", "title": "" }, { "docid": "b751ba9a19b9c482e71da95b4ceeed5b", "score": "0.6020218", "text": "def flip_board(self):\n\n self.board = self.get_flipped_board()", "title": "" }, { "docid": "beca608e9a88236973aca21ba7824d9a", "score": "0.6015986", "text": "def flip_bytes(self, bytes):\n byte_list = list(bytes)\n if binascii.hexlify(self.magic_number) in [MAGIC_SWAP, MAGIC_SWAP_NANO]:\n byte_list.reverse()\n return ''.join(byte_list)", "title": "" }, { "docid": "866d2e67526175406fad301f86587134", "score": "0.60141283", "text": "def doFlip(board, color, pos, direction):\n\tcurrX = pos[0] + direction[0]\n\tcurrY = pos[1] + direction[1]\n\twhile board[currX][currY] == opponent(color):\n\t\tboard[currX][currY] = color\n\t\t(currX, currY) = (currX + direction[0], currY + direction[1])", "title": "" }, { "docid": "7e381c139205b97cb1d82fd901888094", "score": "0.60135734", "text": "def flip (self):\r\n return Link(self[1], self[0])", "title": "" }, { "docid": "9c6d159fb28e2bcedf2ada70fa296e38", "score": "0.60082066", "text": "def __invert__(self):\n product = Bitmap(self.cardinality)\n for i in range(0, len(self.bit_array)):\n product.bit_array[i] = ~self.bit_array[i]\n return product", "title": "" }, { "docid": "3fff317a8f3556cda00b2306676b04f9", "score": "0.60057265", "text": "def flip(point):\n \n x, y = point\n return (x, -y)", "title": "" }, { "docid": "cab5ff7e17e6df6147a6f8dbeb5fba01", "score": "0.6002178", "text": "def _invert_sub_bytes(state: List[int]) -> List[int]:\n return SimpleAES._substitute(state, SimpleAES.INVERSE_SBOX)", "title": "" }, { "docid": "e001c860e560f1e9698e6906da81f4eb", "score": "0.6000485", "text": "def mirror(array):\n return np.rot90(np.flipud(array), 3)", "title": "" }, { "docid": "fb8b6635cb630c222711252a987441e3", "score": "0.59873736", "text": "def reverse_bits(x: int) -> int:\n i = 0\n j = 63\n while i < j:\n x = swap_bits(x, i, j)\n i += 1\n j -= 1\n\n return x", "title": "" } ]
a596b7b2e171e5f8cb5c520718640e74
Takes in a save directory and outputs a list of dictionaries with the
[ { "docid": "1d4acaacaf0b082acc2d5fa21ec896ba", "score": "0.0", "text": "def get_escapes(save_dir):\n\n escapes = []\n with open(os.path.join(save_dir, \"esc.11\"), 'r') as f:\n try:\n # Skip the header\n next(f)\n except StopIteration: # Empty file\n return []\n \n for line in f:\n data_line = line.split()\n escapes.append({\n \"id\": int(data_line[5]), # id is the 6th item in row\n \"time\": float(data_line[0]), # Physical time is 1st item in row\n \"type\": int(data_line[4]) # Star type is the 5th item in row\n })\n\n print 'parsed escapes from ' + save_dir\n\n return escapes", "title": "" } ]
[ { "docid": "b3150e0f984564995f68996ee9055be9", "score": "0.66876465", "text": "def parsed_files():\n files = []\n for filename in os.listdir(SAVE_DIRECTORY):\n path = os.path.join(SAVE_DIRECTORY, filename)\n if os.path.isfile(path):\n files.append(filename)\n return files", "title": "" }, { "docid": "ebd7bb813b4191ed668c986047fb6b3e", "score": "0.6362293", "text": "def load_json(filename, savepath='/'):\n if filename in os.listdir(savepath):\n return json.load(open(savepath+filename, 'r'))\n else:\n print(\"{} not in directory\".format(filename))\n return {}", "title": "" }, { "docid": "1f098d7dddfa8e6cc051797b01c6a235", "score": "0.62440974", "text": "def get_final_dicts(folder_path: str,\n without: List[str] = (\"learn-embeddings\",)) \\\n -> List[Dict[str, Any]]:\n run_folder_paths = [os.path.abspath(os.path.join(folder_path, x))\n for x in sorted(os.listdir(folder_path))\n if x not in without]\n\n return [get_entry_dict_from_json(os.path.join(path, \"results_val.json\"))\n for path in run_folder_paths]", "title": "" }, { "docid": "d9ab37fbd222c0decc3fe133bc73abbd", "score": "0.61829424", "text": "def list_dic(path):\n return [ dic for dic in listdir(path) if not isfile(join(path,dic))]", "title": "" }, { "docid": "fb61303537c9e0537b8cd5cdd6c6107d", "score": "0.6160078", "text": "def get_saved_model_files(model_save_dir: str, model_name=\"model\") -> List:\n return sorted(\n [\n # Split off generation_ prefix to return just number, which is enough to sort all\n # of the tuples lexicographically (there is at most one model for each generation)\n (int(os.path.dirname(model_path).split(\"generation_\", 1)[1]), model_path)\n for model_path in glob.glob(\n os.path.join(\n get_model_save_path(model_save_dir, \"*\"), f\"{model_name}_best.npz\"\n )\n )\n ]\n )", "title": "" }, { "docid": "c995fb1f4dc5366d6e6cdab48da7ac36", "score": "0.6137265", "text": "def save_file(save_dict,filename):\n\n #Finding repository home directory\n data_dir = os.path.dirname(os.path.abspath(__file__))\n data_dir = os.path.dirname(data_dir)\n \n #Saving trapping data to JSON file\n data_dir = os.path.dirname(data_dir) + \"/trapping_data\"\n data_file = data_dir + \"/{}.json\".format(filename)\n \n with open(data_file,\"w\") as write_file:\n json.dump(save_dict,write_file)\n \n return True", "title": "" }, { "docid": "39c572ed6e89d01d75e5ac3ab91af6fd", "score": "0.603015", "text": "def export_songs(self):\n exportdict={}\n consongs=self.csongs\n for song in consongs:\n currsong=consongs[song]\n currsong.check_streak()\n currsong.check_level()\n exportdict[currsong.song_name]=currsong.get_song()\n with open('songs2.json', 'w') as json_file:\n json.dump(exportdict, json_file)", "title": "" }, { "docid": "e2d17f87bd91c638e97c91700ae2067e", "score": "0.60284495", "text": "def get_save_files():\n dir_path = get_dir()\n files = os.listdir(dir_path)\n save_files = []\n for file in files:\n if file.endswith(\".txt\"):\n save_files.append(file)\n return save_files", "title": "" }, { "docid": "f74b297f2317c1c3322c1294df097b83", "score": "0.59729433", "text": "def get_dict(path):\n field = {}\n contents = os.listdir(path)\n for item in contents:\n key = item.split('.')[0]\n field[key] = get_data(item, path)\n\n return field", "title": "" }, { "docid": "987670a454b236137692116d750161ca", "score": "0.59442055", "text": "def make_dict(experiment_dir, basedir):\n results = {}\n for filename in os.listdir(os.path.join(basedir, experiment_dir)):\n if filename[-3:] == 'npy':\n path = os.path.join(basedir, experiment_dir, filename)\n tesults[filename[:-4]] = np.load(path)\n \n return results", "title": "" }, { "docid": "1a877d1da2db40b9a3cce02da6093003", "score": "0.59196275", "text": "def getSetsFromDirDict(path):\n import os\n import h5py\n # if the path is not to a dictionary, return empty directory\n if not os.path.isdir(path):\n return {}\n # form empty directory\n data = {}\n # convert path to directory object\n dir = os.fsencode(path)\n # iterate through files in directory\n for fn in os.listdir(dir):\n # decode the filename to string\n filename = os.fsdecode(fn)\n # if the file name ends with .hdf5\n if filename.endswith(\".hdf5\"):\n # attempt to open HDf5 in read mode\n h5_file = h5py.File(filename,'r')\n # call function to get all datasets in file\n datasets = getAllDatasets(h5_file)\n # update dictionary with found datasets\n data.update(datasets)\n h5_file.close()\n return data", "title": "" }, { "docid": "8f327994d8aa45ec29b206f511fab95e", "score": "0.5914333", "text": "def list_filenames():\n _, filenames = default_storage.listdir('saved_data')\n return [filename for filename in filenames]", "title": "" }, { "docid": "89e2a5d6ca5948fd8d07fd2f0ac5ec3e", "score": "0.5868946", "text": "def toJSON(directory):\n for filename in os.listdir(directory):\n input_file = directory + filename\n output_file = directory + \"json/\" + filename[:-4] + \".json\"\n if filename.endswith(\".txt\"):\n writeDist(input_file, output_file)", "title": "" }, { "docid": "a5f01c688a893c018755693b63d6d7bc", "score": "0.5847419", "text": "def save_parses(corpus, lexicon, savepath):\n parse_dict = dict() \n for i, (ch_sent, en_sent) in enumerate(corpus):\n parses = parse_forests(ch_sent, en_sent, lexicon) \n parse_dict[i] = parses \n f = open(savepath + 'parse-dict.pkl', 'wb')\n pickle.dump(parse_dict, f, protocol=4)\n f.close()", "title": "" }, { "docid": "111b3fd9e09120a8a35d51e695dbaca7", "score": "0.5819619", "text": "def read(self) -> List[Dict]:\n file_list = []\n # generate a list of files in subdirectories\n for root, _dirs, files in os.walk(self.path):\n # for pattern in self.file_filters:\n for match in filter(self.file_filters.match, files):\n # for match in fnmatch.filter(files, pattern):\n path = Path(os.path.join(root, match))\n # ignore the .json file created by the Store\n if path.is_file() and path.name != self.json_name:\n # filter based on depth\n depth = len(path.relative_to(self.path).parts) - 1\n if self.max_depth is None or depth <= self.max_depth:\n file_list.append(self._create_record_from_file(path))\n\n return file_list", "title": "" }, { "docid": "b9dd8efc80d6a84236b6dd7a4a4f524e", "score": "0.58180135", "text": "def create_dictionary(self):\n for filename in self.path:\n with open(filename, 'rt', encoding='utf-8') as f:\n contents = f.read()\n if filename == 'data/disambiguations_en.tql':\n print('processing disambigution file')\n contents = (contents.replace(\"<http://dbpedia.org/ontology/wikiPageDisambiguates>\", ''))\n save_file = \"output/disambiguate_offline_dict.p\"\n\n elif filename == 'data/redirects_en.tql':\n print('processing redirects file')\n contents = (contents.replace(\"<http://dbpedia.org/ontology/wikiPageRedirects>\", ''))\n save_file = 'output/redirect_offline_dict.p'\n\n else:\n print('it skips the file ' + filename)\n continue\n\n contents = (contents.replace(\"<http://en.wikipedia.org/wiki/\", ''))\n contents = contents.split('\\n')\n\n for content in contents:\n array = []\n array.append(content)\n self.big_list.append(array[0].split(' '))\n\n for list in self.big_list:\n key = (list[0].replace('<http://dbpedia.org/resource/', '').replace('>', '').replace(\n '_(disambiguation)', '').replace('_', ' '))\n # print(key)\n if len(list) > 2:\n self.list_key.append(key)\n value = (list[2].replace('<', '').replace('>', ''))\n self.list_value.append(value)\n\n for key in self.list_key:\n self.dict[key] = []\n\n for i, key in enumerate(self.list_key):\n self.dict[key].append(self.list_value[i])\n\n if not os.path.isdir('output/'):\n os.makedirs('output')\n with open(save_file, \"wb\") as f:\n pickle.dump(self.dict, f)", "title": "" }, { "docid": "c4c8569939213cdeb7cc6d5d229e3e32", "score": "0.58179826", "text": "def generate_data_dict(root_path):\n\n file_dict = {}\n for (dirpath, dirnames, filenames) in os.walk(root_path):\n for file in filenames:\n file_dict[file] = os.path.join(dirpath, file)\n\n return file_dict", "title": "" }, { "docid": "1a82b066aed929f99801918b55bdea97", "score": "0.57961017", "text": "def get_eurostats_file_list(s): \n rdf_path_prefix = \"data/rdf/eurostats/\"\n observation_list = []\n for file in os.listdir(rdf_path_prefix):\n observation = {}\n observation_name = str(os.path.basename(file).split('.')[0])\n observation['id'] = observation_name\n observation_list.append(observation)\n return observation_list", "title": "" }, { "docid": "9fc14d67d65dbd57bad428f1196a290c", "score": "0.5788976", "text": "def list_files_info():\n _, filenames = default_storage.listdir('saved_data')\n files = [{'name': filename,\n 'created': default_storage.get_created_time(f'saved_data/{filename}'),\n 'modified': default_storage.get_modified_time(f'saved_data/{filename}')\n } for filename in filenames if re.search('^[a-z0-9]', filename)]\n\n return files", "title": "" }, { "docid": "5096a3ec26994f88ec3ba6c712bb9fda", "score": "0.5774086", "text": "def _load_data():\n data = {}\n for file_name in os.listdir(dir_data):\n name, extension = os.path.splitext(file_name)\n if extension != '.json':\n continue\n file_path = os.path.join(dir_data, file_name)\n with open(file_path, 'r') as file_obj:\n data[name] = json.load(file_obj)\n\n data['model_paths'] = [os.path.join(dir_models, f)\n for f in os.listdir(dir_models)]\n data['2D_files'] = [os.path.join(dir_2D, f) for f in os.listdir(dir_2D)]\n return data", "title": "" }, { "docid": "522067b406854e6b31bcd6f042a892a5", "score": "0.5773998", "text": "def get_archive_details(self):\n\t\tpath = os.path.join(self.paths[\"raw\"])\n\t\titems = os.listdir(path)\n\t\textensions = self.get_supported_archive_extensions()\n\t\tarchives = {}\n\n\t\t# iterate through directory contents\n\t\tfor item in items:\n\t\t\tfor extension in extensions:\n\t\t\t\tif item.endswith(extension):\n\t\t\t\t\tname = item[:-len(extension)]\n\t\t\t\t\tarchives[name] = {\n\t\t\t\t\t\t\"name\": item,\n\t\t\t\t\t\t\"extension\": extension,\n\t\t\t\t\t\t\"path\": os.path.join(path, item)\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\n\t\treturn archives", "title": "" }, { "docid": "5c41295ca0bcd03447586b93ed6ff172", "score": "0.5767471", "text": "def story_dict(self):\n global SCREENPLAY_DIR\n\n screenplay_dir = SCREENPLAY_DIR + self.currentGame.get()[:-4]\n\n path_dict = {}\n for splay_name in os.listdir(screenplay_dir):\n splay_path = os.path.join(screenplay_dir, splay_name)\n\n if os.path.isfile(splay_path) and splay_name.endswith(\".txt\"):\n path_dict[splay_name[:-4]] = splay_path\n\n return path_dict", "title": "" }, { "docid": "9b9ae490567e845d768f4c41dd9002e6", "score": "0.5732806", "text": "def save(self):\n my_obj_dict = {}\n for key in FileStorage.__objects:\n my_obj_dict[key] = FileStorage.__objects[key].to_dict()\n with open(FileStorage.__file_path, 'w') as file_path:\n json.dump(my_obj_dict, file_path)", "title": "" }, { "docid": "a6faeb562d8ba73b6c9f0cde1ab99a73", "score": "0.57288295", "text": "def retrieve_imgs(dir_path, filename):\n # List of folders contained in dir_path folder\n Dir = [f'{dir_path}{dir}' for dir in next(os.walk(dir_path))[1]]\n try:\n # On some OS the root dict is also in the list, must be removed\n Dir.remove(dir_path)\n except ValueError:\n pass\n\n path_dict = dict()\n for dir in Dir:\n name = os.path.basename(os.path.normpath(dir)) # Extract name of study\n path = os.path.abspath(dir) # Turn into absolute paths\n path_dict[name] = f'{path}/{filename}'\n\n return path_dict", "title": "" }, { "docid": "83adf1e83824c6f9c179cf5f05abfab6", "score": "0.57074124", "text": "def get_mediums(folder, prefix=DATA_PATH):\r\n directory = prefix + folder\r\n mediums = dict()\r\n for filename in os.listdir(directory):\r\n with open(directory + SEPERATOR + filename, \"r\") as f:\r\n mediums[filename] = json.load(f)\r\n return mediums", "title": "" }, { "docid": "c6e8fb5e37594db3bee6625d9a7c6678", "score": "0.5695201", "text": "def make_records(self,directory):\n npz_files = glob.glob(os.path.join(directory, '*.npz'))\n meta_files = glob.glob(os.path.join(directory,'*.geojson'))\n\n records = []\n for npz in npz_files:\n ii = npz.split('/')[-1].split('.')[0]\n meta = [m for m in meta_files if m.split('/')[-1].split('.')[0]==ii][0]\n records.append({'data':npz, 'meta':meta})\n\n shuffle(records)\n pickle.dump(records, open(os.path.join(directory,'records.pickle'),'wb'))", "title": "" }, { "docid": "8327c94df7df0e2f919538c8ea965036", "score": "0.56923634", "text": "def get_avail_worlds(self):\n worlds = {}\n savesdir = mclevelbase.saveFileDir\n if os.path.exists(savesdir):\n for dirent in os.listdir(savesdir):\n dirent_path = os.path.join(savesdir, dirent)\n if os.path.isdir(dirent_path):\n test_path = os.path.join(dirent_path, 'level.dat')\n if os.path.exists(test_path):\n worlds[dirent] = test_path\n return worlds", "title": "" }, { "docid": "716f5f2d385239579fb82cda2b821cfd", "score": "0.5688074", "text": "def json_files():\n for (i, file_) in enumerate(os.listdir(DATA_DIR)):\n if file_.endswith('.json'):\n yield os.path.join(DATA_DIR, file_)", "title": "" }, { "docid": "a6d35504a25ef3d654f87b69d4d62619", "score": "0.56841433", "text": "def test_wg_save_to_file(self):\n r1 = Rectangle(1, 1, 1, 1, 1)\n r2 = Rectangle(2, 2, 2, 2, 2)\n l = [r1, r2]\n Rectangle.save_to_file(l)\n with open(\"Rectangle.json\", \"r\") as f:\n ls = [r1.to_dictionary(), r2.to_dictionary()]\n self.assertEqual(json.dumps(ls), f.read())", "title": "" }, { "docid": "d37c7aaeb2dfd345677d76294faa7140", "score": "0.56829023", "text": "def FSaveOutput(cls, path, fileName):\n print(cls.archive)\n print(cls.objects)\n data = dict()\n for n in cls.archive['objects'].keys():\n data[str(n)] = {\n \"ID\" : cls.archive['objects'][n].id,\n \"locations\" : tuple(map(tuple, cls.archive['objects'][n].locations)),\n \"frames\" : cls.archive['objects'][n].frames,\n \"left\": list(map(int, cls.archive['objects'][n].left)),\n \"top\": list(map(int, cls.archive['objects'][n].top)),\n \"width\": list(map(int, cls.archive['objects'][n].width)),\n \"height\" : list(map(int, cls.archive['objects'][n].height)),\n \"area\": list(map(int, cls.archive['objects'][n].area))\n\n }\n for n in cls.objects['objects'].keys():\n if str(n) not in data:\n data[str(n)] = {\n \"ID\" : cls.objects['objects'][n].id,\n \"locations\" : tuple(map(tuple, cls.objects['objects'][n].locations)),\n \"frames\" : cls.objects['objects'][n].frames,\n \"left\": list(map(int, cls.objects['objects'][n].left)),\n \"top\": list(map(int, cls.objects['objects'][n].top)),\n \"width\": list(map(int, cls.objects['objects'][n].width)),\n \"height\" : list(map(int, cls.objects['objects'][n].height)),\n \"area\": list(map(int, cls.objects['objects'][n].area))\n\n }\n path = os.path.join(path, fileName) \n with open(path, 'w') as outFile:\n json.dump(data, outFile)\n return data", "title": "" }, { "docid": "35b952eed32c715126a006bcb64763e8", "score": "0.5679103", "text": "def get_files_list(self):\n list_of_files = os.listdir(os.getcwd() + \"/dictionary\")\n return list_of_files", "title": "" }, { "docid": "23796e940fc0b1f9c07e688bb36a5f31", "score": "0.5651889", "text": "def get_all_final_dicts(folder_path: str) -> List[List[Dict[str, Any]]]:\n all_final_tuples = list()\n\n for folder_path in [os.path.abspath(os.path.join(folder_path, x))\n for x in sorted(os.listdir(folder_path))]:\n all_final_tuples.append(get_final_dicts(folder_path))\n\n return all_final_tuples", "title": "" }, { "docid": "9d6a22dc854b005f247954e7776b320a", "score": "0.5649538", "text": "def get_file_list_from_dict_file(filename):\n previousSearchDictionary = {}\n try:\n with open(filename,'rb') as fp:\n previousSearchDictionary = pickle.load(fp)\n except IOError, e:\n print \"Failed to find previous file search backup. %s\" % e\n return previousSearchDictionary", "title": "" }, { "docid": "4cad9745ece421d33715d96776506855", "score": "0.56466347", "text": "def books_dict():\n\n import os\n \n books_dir = \"./Data/Books\"\n \n book_titles = {}\n for language in os.listdir(books_dir):\n \n authors = {}\n \n for author in os.listdir(books_dir + \"/\" + language):\n titles = []\n for title in os.listdir(books_dir + \"/\" + language + \"/\" + author):\n titles.append(title.replace(\".txt\", \"\"))\n \n authors[author] = titles\n \n book_titles[language] = authors\n\n return book_titles", "title": "" }, { "docid": "9a6ab32a2dbfcbf14239cad08581d23d", "score": "0.5646264", "text": "def save(self):\n this_dict = {}\n for key in FileStorage.__objects:\n this_dict[key] = FileStorage.__objects[key].to_dict()\n with open(FileStorage.__file_path, 'wt') as json_file:\n json.dump(this_dict, json_file)", "title": "" }, { "docid": "11033d062ece2cb814772e1cf74d7c88", "score": "0.5642413", "text": "def save(self):\n sdict = {}\n for k, v in FileStorage.__objects.items():\n sdict[k] = v.to_dict()\n with open(self.__file_path, mode=\"w\") as f:\n json.dump(sdict, f)", "title": "" }, { "docid": "dd6c9dce54b2de496af733a161f1e478", "score": "0.5638316", "text": "def load_saves():\n if os.path.exists(\"mydict.dict\"):\n my_dict = corpora.Dictionary.load(\"mydict.dict\")\n if os.path.exists(\"mycorpus.corp\"):\n my_corpus = corpora.MmCorpus(\"mycorpus.corp\")\n if os.path.exists(\"mydocs.json\"):\n with open(\"tales.json\", \"r\") as afile:\n doc_map = json.load(afile)\n else:\n return None\n return (doc_map, my_dict, my_corpus)", "title": "" }, { "docid": "9661eb83680eca2df8e86ba7ecc337aa", "score": "0.5635972", "text": "def save_json_profile(self, dic):\n\n for k in self.sound_buttons.keys():\n files = str(Path(\"/\".join([str(self.parent_path), self.sound_buttons[k].text()])))\n if files not in dic['Files']:\n dic['Files'].append(files)\n os.chdir(self.config_path)\n with open(f'{dic[\"Name\"]}.json', 'w') as json_file:\n json.dump(dic, json_file)", "title": "" }, { "docid": "28c80da4ba35519ab7d3b737548d9593", "score": "0.56190675", "text": "def scan_data() -> dict:\n geo_files = {}\n for file in os.listdir(__data_folder__):\n if file.endswith(\".csv\"):\n geo_files[file] = os.path.join(__data_folder__, file)\n return geo_files", "title": "" }, { "docid": "ed1888d9277d2589b12a5efa3edaabdc", "score": "0.55890954", "text": "def save(self):\n dic = {}\n for k, v in FileStorage.__objects.items():\n dic[k] = v.to_dict()\n with open(FileStorage.__file_path, \"w\") as f:\n json.dump(dic, f)", "title": "" }, { "docid": "e0d1c00cf3f82413e3c22483649504b5", "score": "0.55861855", "text": "def dir2_dict(path: Path) -> dict:\n return {parse_datafile_name(file.name): file for file in path.iterdir() if is_datafile_path(file)}", "title": "" }, { "docid": "010541d738438ca340e7cd849f3d6fe2", "score": "0.5581375", "text": "def save(self):\r\n dic_json = {}\r\n for key, value in self.__objects.items():\r\n dic_json[key] = value.to_dict()\r\n with open(self.__file_path, 'w') as f:\r\n json.dump(dic_json, f)", "title": "" }, { "docid": "c29ab1e78de6a2303d100539aa19eb66", "score": "0.5576172", "text": "def js_list_dicts(url):\n\n\ttry:\n\t\tld=os.listdir(url)\n\texcept: return []\n\n\tld=[i for i in ld if i[-5:]==\".json\" and os.access(\"{}/{}\".format(url,i),os.R_OK)]\n\n\treturn ld", "title": "" }, { "docid": "5c8ef57dd7576fec984c9d996229cf64", "score": "0.5569854", "text": "def get_paper(paths):\n data = []\n for path in paths:\n with open(path) as json_file:\n data.append(json.load(json_file))\n return data", "title": "" }, { "docid": "42d8c1d77ec4c8c53a3a734ea92c3bc9", "score": "0.556464", "text": "def _save_data(self, data_dict: dict) -> None: \r\n \r\n sub_directories = ['train', 'val', 'test']\r\n \r\n file_counter = 0\r\n\r\n for sub in sub_directories:\r\n for dat in data_dict[sub].items():\r\n file_path = self.output_dir / sub / dat[0]\r\n for fp in dat[1]:\r\n output_name = file_path / \"{:05d}.npy\".format(file_counter)\r\n np.save(output_name, fp)\r\n file_counter += 1", "title": "" }, { "docid": "0023a6c46583861dea1ddc0d6a207262", "score": "0.5560187", "text": "def save_args_as_json(path_save_dir: str, args: argparse.Namespace) -> None:\n path_json = os.path.join(path_save_dir, 'predict_options.json')\n while os.path.exists(path_json):\n number = path_json.split('.')[-2]\n if not number.isdigit():\n number = '-1'\n number = str(int(number) + 1)\n path_json = os.path.join(\n path_save_dir, '.'.join(['predict_options', number, 'json'])\n )\n with open(path_json, 'w') as fo:\n json.dump(vars(args), fo, indent=4, sort_keys=True)\n print('Saved:', path_json)", "title": "" }, { "docid": "3f3ae3c79ebc463169f02cfa77300469", "score": "0.55545866", "text": "def save_list(outlist,fname):\r\n with open(fname,'wb') as f: \r\n pickle.dump(outlist, f)", "title": "" }, { "docid": "fcf6446b2ce4363ad04df096337be293", "score": "0.5552494", "text": "def save_json(ls, file_path):\n with open(file_path, 'w') as fp:\n fp.write('[\\n ' + ',\\n '.join(json.dumps(i) for i in ls) + '\\n]')", "title": "" }, { "docid": "40e9469712103675af5b20d4c0d20eea", "score": "0.5536901", "text": "def _load_data(self) -> dict: \r\n\r\n print(\"sort data into new directories\")\r\n data = {x : [] for x in self.class_names}\r\n\r\n for i in self.input_dir.rglob('*.npy'):\r\n obs = np.load(i, allow_pickle=True).item()\r\n\r\n file_class = self.class_names[obs['action']-1]\r\n\r\n if file_class not in self.exclusion_list:\r\n\r\n new_file = {\r\n \"obs\": obs['observation'],\r\n \"condition\": obs['condition']\r\n }\r\n\r\n data[file_class].append(new_file)\r\n \r\n return data", "title": "" }, { "docid": "d2abbac9db3052f826e856b8782fd154", "score": "0.553421", "text": "def model(self):\n if not self.trained:\n raise ValueError, \"Not trained\"\n paths=[\n \"model-final.others\",\n \"model-final.phi\",\n \"model-final.theta\",\n \"model-final.tassign\",\n \"model-final.twords\",\n \"wordmap.txt\",\n ]\n retval = {}\n for path in paths:\n full_path = os.path.join(self.workdir, path)\n if os.path.exists(full_path):\n with open(full_path) as f:\n retval[path] = f.read()\n return retval", "title": "" }, { "docid": "c641ee8b217c32e7562c06d4f3c3ee77", "score": "0.55215305", "text": "def dir_to_dict(rootdir):\n try:\n subdirs = next(os.walk(rootdir))[1]\n except StopIteration:\n raise FileNotFoundError(\n 'The path provided \\'{}\\' '\n 'does not exist or '\n 'does not match the desired file structure.'.format(rootdir)\n )\n \n if not subdirs:\n return [os.path.join(rootdir, file)\n for file in os.listdir(rootdir)]\n d = dict()\n for s_dir in subdirs:\n d[s_dir] = dir_to_dict(os.path.join(rootdir, s_dir))\n return d", "title": "" }, { "docid": "7f28b99ced001edbb00d648d7e89ee00", "score": "0.55203843", "text": "def save(self):\n dict_obj = {}\n for key, value in FileStorage.__objects.items():\n dict_obj[key] = value.to_dict()\n with open(FileStorage.__file_path, \"w\") as f:\n json.dump(dict_obj, f)", "title": "" }, { "docid": "ecf694101d979196781b9d1a222b30c9", "score": "0.5520274", "text": "def directories_data_to_metax(files):\n metax_directories = []\n for file in files:\n metax_directory_object = {}\n metax_directory_object[\"identifier\"] = file[\"identifier\"]\n metax_directory_object[\"title\"] = file[\"title\"]\n metax_directory_object[\"description\"] = file[\"description\"] if \"description\" in file else \"\"\n metax_directory_object[\"use_category\"] = file[\"useCategory\"]\n metax_directories.append(metax_directory_object)\n return metax_directories", "title": "" }, { "docid": "7f3e7b20604e742292b5fe3448c8691b", "score": "0.5519393", "text": "def extract_files(directory):\n dic = {}\n folders = []\n for folder in os.listdir(directory):\n folder = os.path.join(directory, folder)\n if os.path.isdir(folder):\n folders.append(folder)\n for folder in folders:\n archives_folder = os.path.join(folder, \"archives\")\n files = os.listdir(archives_folder)\n iterate_files(files, archives_folder, os.path.join(folder, \"wav\"))\n return dic", "title": "" }, { "docid": "a18301182cfb89e429d7c4ce703031ed", "score": "0.55163425", "text": "def load_files(directory):\n result = {}\n for filename in os.listdir(directory):\n with open(os.path.join(directory, filename)) as f:\n result[filename] = f.read()\n\n return result", "title": "" }, { "docid": "5f8bdb1aec975144b6ca89305e4be8c1", "score": "0.551416", "text": "def _save_file(self):\n file = open(self.file_name, \"w\")\n json.dump([x.to_dict() for x in self._grade_list], file, indent=4)\n file.close()", "title": "" }, { "docid": "094c0123689a48ad570f536dd78d0576", "score": "0.55115896", "text": "def dump_dicts(self):\n if not os.path.exists(self.pickle_dir):\n os.makedirs(self.pickle_dir)\n data = {\n 'item2num': self.encoder.item2num,\n 'num2item': self.encoder.num2item\n }\n with open(self.pickle_path, 'wb') as file:\n pickle.dump(data, file, pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "943a6b1c094ba099ccc509ff47b8dced", "score": "0.55070764", "text": "def get_env():\r\n\r\n dirs_files = {}\r\n if 'ToSort' in os.listdir():\r\n for item in os.walk(os.path.join(os.getcwd(), 'ToSort')):\r\n file_list = []\r\n directory = str(item[0])\r\n for i in item[2]:\r\n file_list.append(i)\r\n dirs_files[directory] = file_list\r\n return dirs_files\r\n else:\r\n print('There is no ToSort directory present here!')", "title": "" }, { "docid": "6b4f16ed9345eb8c510061cb114a3831", "score": "0.5496495", "text": "def __restore(self,d, basepath=\"/\", filterDirs=None):\n try:\n for key in d.GetListOfKeys():\n kname = key.GetName()\n if key.IsFolder():\n if filterDirs and kname not in filterDirs: \n continue\n for i in self.__restore(d.Get(kname), basepath+kname+\"/\"):\n yield i\n else:\n yield basepath+kname, d.Get(kname)\n except AttributeError, e:\n self._logger.debug(\"Ignore reading object of type %s.\",type(d))", "title": "" }, { "docid": "0c146ac8d97af076180f9fdb2903ee11", "score": "0.5489158", "text": "def list_files():\n entries = [meta.__json__() for meta in FILES.values()]\n return json.dumps({\"files\": entries})", "title": "" }, { "docid": "2388620505254402580e19332958d144", "score": "0.5474785", "text": "def load_files(directory): # successfully implemented\n files = os.listdir(directory)\n file_data_list = list()\n\n for file in files:\n with open(os.path.join(directory, file), encoding='utf-8') as f:\n text = f.read()\n file_data_list.append((file, text))\n print(f\"loaded {file}\")\n\n file_data = dict(file_data_list)\n \n return file_data", "title": "" }, { "docid": "fa2a04f36473ca7e6258acb718143704", "score": "0.5472414", "text": "def save(data, directory):\n\n with open(directory, \"w\") as f:\n f.write(json.dumps(data))\n f.close()\n print(\"Saved to \" + directory)", "title": "" }, { "docid": "28eecd9f04e699f7f016b01d42ec0871", "score": "0.54713994", "text": "def save_file_as(self):\n filepath = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File', self.filepath, \"JSON (*.json)\"))\n filename = os.path.basename(filepath)\n if filename and filepath:\n self.filename = filename\n self.filepath = filepath\n # -- Generate a dictionary of all items in the launch list, each item being a dictionary itself with that items all robot launch data --\n savedata_launch_tree_dict = {}\n # Iterate over all existing (top level) items (a.k.a. robots) in the robotTree and add their column values to the launch_tree_dict.\n root = self.robotTree.invisibleRootItem()\n child_count = root.childCount()\n for i in range(child_count):\n item = root.child(i)\n savedata_launch_tree_dict[unicode(item.text(0))] = {\n \"robot_ID\": unicode(item.text(0)),\n \"navigation\": unicode(item.text(1)),\n \"robot_type\": unicode(item.text(2)),\n \"ds\": unicode(item.text(3))\n }\n\n # -- Generate a dictionary of all docking station objects, each docking station being a dictionary itself with all ds' data. --\n savedata_docking_station_dict = {}\n for ds in self.docking_station_dict.values():\n ds_id, ds_origin_x, ds_origin_y, ds_origin_theta_rad, ds_rows, ds_columns, ds_cell_offset_x, ds_cell_offset_y, ds_cell_theta_rad = ds.get_attributes()\n savedata_docking_station_dict[ds_id] = {\n \"id\": ds_id,\n \"origin_x\": ds_origin_x,\n \"origin_y\": ds_origin_y,\n \"origin_theta_rad\": ds_origin_theta_rad,\n \"rows\": ds_rows,\n \"columns\": ds_columns,\n \"cell_offset_x\": ds_cell_offset_x,\n \"cell_offset_y\": ds_cell_offset_y,\n \"cell_offset_theta_rad\": ds_cell_theta_rad\n }\n \n # -- Generate top level dictionary which contains all launch input fields, a dict of the launch list and a dict of the docking station settings. --\n savedata_dict = {\n # Launch tab\n unicode(self.comboBoxRobotType.objectName()): unicode(self.comboBoxRobotType.itemText(self.comboBoxRobotType.currentIndex())),\n unicode(self.comboBoxDockingStationLaunch.objectName()): unicode(self.comboBoxDockingStationLaunch.itemText(self.comboBoxDockingStationLaunch.currentIndex())),\n unicode(self.spinBoxRobotID.objectName()): self.spinBoxRobotID.value(),\n unicode(self.checkBoxSFMMPDM.objectName()): self.checkBoxSFMMPDM.isChecked(),\n unicode(self.robotTree.objectName()): savedata_launch_tree_dict,\n \n # Docking station tab\n unicode(self.comboBoxDockingStationID.objectName()): unicode(self.comboBoxDockingStationID.itemText(self.comboBoxDockingStationID.currentIndex())),\n \"docking_stations\": savedata_docking_station_dict\n }\n with open(filepath, 'w') as json_savefile:\n json.dump(savedata_dict, json_savefile, indent=4)\n \n self.unsaved_changes_inactive()", "title": "" }, { "docid": "c2fdec46313dc78d4e40a013a7bf2d28", "score": "0.54627717", "text": "def get_all(folder = '/home/pandyr/Dropbox (MIT)/Research/Plasmids'):\n all_data = {} # initialize storage \n # Iterate through all files in fasta folder\n for file in os.listdir(folder):\n if file.endswith(\".dna\"):\n seq = get(folder + '/' + file)\n seqname = get_seqname(file)\n all_data[seqname] = Dseqrecord(seq,linear = False)\n return all_data", "title": "" }, { "docid": "a1273862645cee6c40a8a235337e9403", "score": "0.5461379", "text": "def getFileDict(case):\n return files", "title": "" }, { "docid": "127c356dd3a430fcfef215290d3315ac", "score": "0.54609305", "text": "def folderlist_gen(fname):\n folders = [] # list of folders\n with open(fname) as file:\n for line in file:\n line = line.strip() # get rid of \"\\n\"\n line = \"\\\\\".join([\".\", \"Modified Data\", line]) # get a full path\n folders.append(line)\n files = [] # list of files in all folders\n for folder in folders:\n files = files + dlist_gen(folder + \"\\\\\") # dlist_gen by folder\n return files", "title": "" }, { "docid": "bb00423bb3af99f8c315017249501fe5", "score": "0.54605085", "text": "def loadwtfs(folder):\n wtfbyname = {}\n for wtfpath in wtffiles(folder):\n try:\n name = wtfname(wtfpath)\n wtfdata = Simulate.readwtf(wtfpath)\n wtfbyname[name] = {}\n wtfbyname[name]['wtfpath'] = wtfpath\n wtfbyname[name]['wtfdata'] = wtfdata\n except Exception as exc:\n print exc\n\n return wtfbyname", "title": "" }, { "docid": "52ae031f80a4a38e47efde76ce659840", "score": "0.54601353", "text": "def get_dir_file(self, path):\n result = {}\n dir_list = bucket.list()\n for o in dir_list:\n info = self.get_info(o['name'])\n result[o['name']] = info\n return result", "title": "" }, { "docid": "c3bf5366d6b10e1af5c32ecf75830811", "score": "0.54583526", "text": "def export_listDirectory( self, dir_path, mode ):\n conn , error, userDict = self.__irodsClient( )\n if not conn:\n return S_ERROR( error )\n\n file_path = self.__resolveFileID( dir_path, userDict )\n irodsHome = userDict.get( 'iRodsHome', IRODS_HOME ) \n irodsPath = irodsHome + file_path\n gLogger.debug( \"file_path to read: %s\" % irodsPath )\n\n is_file = False\n irodsFile = irodsOpen( conn , irodsPath , \"r\" )\n if not irodsFile:\n is_file = True\n else:\n irodsDir = os.path.dirname( irodsPath )\n coll = irodsCollection( conn, irodsDir )\n if not coll:\n return S_ERROR( 'Directory not found' )\n objects = coll.getObjects()\n fileList = [ x[0] for x in objects ]\n dirList = coll.getSubCollections() \n\n resultDict = {}\n if mode == 'l':\n if is_file:\n result = self.__getFileStat( dir_path )\n if result['OK']:\n resultDict[dir_path] = result['Value']\n return S_OK( resultDict )\n else:\n return S_ERROR( 'Failed to get the file stat info' )\n else:\n failed_list = []\n one_OK = False\n for fname in dirList+fileList:\n result = self.__getFileStat( dir_path + '/' + fname )\n if result['OK']:\n resultDict[fname] = result['Value']\n one_OK = True\n else:\n failed_list.append( fname )\n if failed_list:\n if one_OK:\n result = S_ERROR( 'Failed partially to get the file stat info' )\n else:\n result = S_ERROR( 'Failed to get the file stat info' )\n result['FailedList'] = failed_list\n result['Value'] = resultDict\n else:\n result = S_OK( resultDict )\n\n return result\n else:\n return S_OK( dirList )", "title": "" }, { "docid": "ec378cc7a2b10329dd5b33d19ffe390b", "score": "0.5448974", "text": "def fetch_reads(self):\n reads = []\n # walk through the raw directory\n for root, dirs, files in os.walk(self.raw_dir):\n for fname in files:\n # skip files that are not one of the recognized extensions.\n extensions = Alaska.RAW_EXT + Alaska.ARCH_EXT\n if not fname.endswith(extensions):\n continue\n\n # otherwise, save info about the file\n path = '{}/{}/{}'.format(Alaska.ROOT_PATH, root, fname)\n folder = root.replace(self.raw_dir, '')\n filename = fname\n size = os.path.getsize(path)\n read = {\n 'folder': folder,\n 'filename': fname,\n 'size': '{} bytes'.format(size),\n 'path': path\n }\n reads.append(read)\n return reads", "title": "" }, { "docid": "b686c243b030034e4ef40bb8b8974196", "score": "0.54465556", "text": "def load_parses(savepath):\n f = open(savepath + 'parse-dict.pkl', 'rb')\n parse_dict = pickle.load(f)\n f.close()\n return parse_dict", "title": "" }, { "docid": "6235661e0f9994b4fe581b6ea774fa9d", "score": "0.5446284", "text": "def create_data_lists(path, output_folder):\n path = os.path.abspath(path)\n\n train_images = list()\n train_objects = list()\n n_objects = 0\n\n # Training data\n # Find IDs of images in training data\n with open(os.path.join(path, 'train.txt')) as f:\n ids = f.read().splitlines()\n\n for id in ids:\n # Parse annotation's XML file\n objects = parse_annotation(os.path.join(path, 'Annotations', id + '.xml'))\n if len(objects['labels']) == 0:\n continue\n n_objects += len(objects['labels'])\n train_objects.append(objects)\n train_images.append(os.path.join(path, id + '.ppm'))\n\n assert len(train_objects) == len(train_images)\n\n # Save to file\n with open(os.path.join(output_folder, 'TRAIN_images.json'), 'w') as j:\n json.dump(train_images, j)\n with open(os.path.join(output_folder, 'TRAIN_objects.json'), 'w') as j:\n json.dump(train_objects, j)\n with open(os.path.join(output_folder, 'label_map.json'), 'w') as j:\n json.dump(label_map, j) # save label map too\n\n print('\\nThere are %d training images containing a total of %d objects. Files have been saved to %s.' % (\n len(train_images), n_objects, os.path.abspath(output_folder)))\n\n # Test data\n test_images = list()\n test_objects = list()\n n_objects = 0\n\n # Find IDs of images in the test data\n with open(os.path.join(path, 'test.txt')) as f:\n ids = f.read().splitlines()\n\n for id in ids:\n # Parse annotation's XML file\n objects = parse_annotation(os.path.join(path, 'Annotations', id + '.xml'))\n if len(objects['labels']) == 0: # might change this later\n continue\n test_objects.append(objects)\n n_objects += len(objects['labels'])\n test_images.append(os.path.join(path, id + '.ppm'))\n\n assert len(test_objects) == len(test_images)\n\n # Save to file\n with open(os.path.join(output_folder, 'TEST_images.json'), 'w') as j:\n json.dump(test_images, j)\n with open(os.path.join(output_folder, 'TEST_objects.json'), 'w') as j:\n json.dump(test_objects, j)\n\n print('\\nThere are %d test images containing a total of %d objects. Files have been saved to %s.' % (\n len(test_images), n_objects, os.path.abspath(output_folder)))", "title": "" }, { "docid": "ae84ed29a5c48bf21a92fcf3c173f12d", "score": "0.5445115", "text": "def save_to_file(cls, list_objs):\n lst = []\n if list_objs:\n for i in list_objs:\n lst.append(i.to_dictionary())\n with open(cls.__name__ + '.json', 'w') as f:\n f.write(Base.to_json_string(lst))", "title": "" }, { "docid": "0415d69dd9b68c8f50d9dbdff3ccefae", "score": "0.544258", "text": "def collect_pickle_data():\n\tphrase_names = []\n\tphrase_labels = []\n\tphrases = []\n\tphrase_counter = 0\n\tfor phrase_name in os.listdir(ASL_FRAME_PATH): # for each phrase\n\t\tphrase_dir = \"\"\n\t\tphrase_dir = ASL_FRAME_PATH+\"\\\\\"+phrase_name\n\t\tpickle_dir = phrase_dir+\"\\\\pickle\"\n\t\tphrase_names.append(phrase_name)\n\t\tphrase_labels.append(phrase_counter)\n\t\tphrases[phrase_counter] = []\n\t\tphrase_counter += 1\n\t\tfor phrase_file in os.listdir(phrase_dir): # for each mp4 file of the phrase\n\t\t\tphrase_file_dir = phrase_dir+\"\\\\\"+phrase_file\n\t\t\thand_data_file_name = pickle_dir+\"\\\\\"+phrase_file+\".pickle\"\n\t\t\t# load array of hands for this phrase file\n\t\t\thand_data = pickle.load(open(hand_data_file_name, \"rb\"))\n\t\t\tphrases[phrase_counter].append(hand_data)", "title": "" }, { "docid": "e119e5c0dc41f5ca50e4e47ad78cc032", "score": "0.544183", "text": "def persist(self, model_dir):\n regex_file = os.path.join(model_dir, DICT_NER_MODEL_FILE_NAME)\n utils.write_json_to_file(regex_file, self.known_patterns, indent=4)\n\n return {\"dict_ner_file\": DICT_NER_MODEL_FILE_NAME}", "title": "" }, { "docid": "dbf2040ae4d6b1f0115443bb98da1a16", "score": "0.5433404", "text": "def read_decade(folder):\n\n path = \"/home/artoo/Workspace_local/Python/Final_Project_02806/data/wiki/\" + folder + \"/\"\n file_list = os.listdir(path)\n decade = {}\n\n for each_file in file_list:\n\n each_file = codecs.open(path + each_file, 'r', 'utf-8')\n try:\n source = json.loads(each_file.read())\n except ValueError as ve:\n print \"\\nError while reading series...\"\n print ve.message + \"\\n\"\n\n title = source['query']['pages'].itervalues().next()['title']\n content = source['query']['pages'].itervalues().next()['revisions'][0]['*']\n\n\n decade[title] = content\n\n return decade", "title": "" }, { "docid": "3a54ea74aa985188fc2e73283dc49914", "score": "0.54311633", "text": "def read(self):\n\n valid = self.find_valid_directories()\n\n mapping = dict()\n for directory in valid:\n documents = JaguarCalcDrone.get_documents_calc_dir(directory)\n mapping[directory] = compute_state_hash(documents)\n\n return mapping", "title": "" }, { "docid": "3a54ea74aa985188fc2e73283dc49914", "score": "0.54311633", "text": "def read(self):\n\n valid = self.find_valid_directories()\n\n mapping = dict()\n for directory in valid:\n documents = JaguarCalcDrone.get_documents_calc_dir(directory)\n mapping[directory] = compute_state_hash(documents)\n\n return mapping", "title": "" }, { "docid": "d1f7f80c621ea768e3365c48c8e5931d", "score": "0.54308647", "text": "def path_list2dict(file_list: list, suffix: str) -> dict:\n file_dict = {}\n for path in file_list:\n key = path[path.rfind('/') + 1:len(path) - len(suffix)]\n file_dict[key] = path\n return file_dict", "title": "" }, { "docid": "37770f31c2a904df8720da6a33e45c02", "score": "0.543051", "text": "def read(self):\n\n valid = self.find_valid_directories()\n\n mapping = dict()\n for directory in valid:\n documents = AutoTSCalcDrone.get_documents_calc_dir(directory)\n mapping[directory] = compute_state_hash(documents)\n\n return mapping", "title": "" }, { "docid": "703e4a634ac7bd1098e8c6a7afc0b131", "score": "0.5427156", "text": "def available_saved(self):\n return [*os.listdir(self._mem_file)]", "title": "" }, { "docid": "ac0244ca685e1d1652a4b534d9f1bb99", "score": "0.54217535", "text": "def get_dicts():\n dicts_list = ['abbreviations', 'us_uk']\n all_dics = [ampersand, clean_symbols, convert_sgml, single_space]\n\n some_dics = {}\n \n for dictionary in dics_list:\n with open(dictionary, 'r') as f:\n some_dics[dictionary] = pickle.load(f)\n\n all_dics.extend(some_dics.values())\n return all_dics", "title": "" }, { "docid": "f6f220fb72e7bc81bb59caafbe3d6ba4", "score": "0.5415177", "text": "def dump(self, folder: str) -> None:", "title": "" }, { "docid": "edbed9cfe25f31c741610d20130c8761", "score": "0.5414246", "text": "def save(self):\n json_objects = {}\n for key in self.__objects:\n json_objects[key] = self.__objects[key].to_dict()\n with open(self.__file_path, 'w') as f:\n json.dump(json_objects, f)", "title": "" }, { "docid": "4f823e468138c3ab55a298897f786bd4", "score": "0.54107964", "text": "def save_to_file(cls, list_objs):\n listj = []\n f = cls.__name__ + '.json'\n with open(f, 'w', encoding=\"UTF-8\") as file:\n if list_objs:\n for k in list_objs:\n listj.append(k.to_dictionary())\n file.write(cls.to_json_string(listj))\n else:\n file.write(cls.to_json_string(listj))", "title": "" }, { "docid": "fcd66bb3c9d5f69a9ccec6038d1ab292", "score": "0.54071915", "text": "def get_summaries_dict(summaries_dir):\n summaries_dict_data = defaultdict(dict)\n\n for d in os.scandir(summaries_dir):\n if d.is_dir():\n for f in os.listdir(d.path):\n absolute_path = d.path + '/' + f\n\n with open(absolute_path, encoding=\"utf8\", errors='ignore') as article_file:\n data = article_file.read()\n summaries_dict_data[d.name][f] = data\n\n return summaries_dict_data", "title": "" }, { "docid": "6429bfe2d479b3b39eeac1ebd4d416a7", "score": "0.5407166", "text": "def all_files_data(dir_name):\n skip_list = ['.DS_Store']\n all_files_data_list = []\n all_files = os.listdir(dir_name)\n all_files.sort(key=natural_keys)\n for f in all_files:\n if f not in skip_list:\n with open(dir_name + f) as data_file:\n all_files_data_list.append(json.load(data_file))\n\n keys_sorted = []\n for d in all_files_data_list:\n sorted_keys = [x for x in d['blocks']]\n sorted_keys.sort(key=natural_keys)\n keys_sorted.append(sorted_keys)\n\n return all_files_data_list, keys_sorted", "title": "" }, { "docid": "82b3b5e7180697298630a31e3105c608", "score": "0.54049057", "text": "def contents(self):\n rv = {}\n Dhosts = self.dir.dirs()\n for Dhost in Dhosts:\n host = str(Dhost.basename())\n if host not in rv:\n rv[host] = {}\n Dnames = Dhost.dirs()\n for Dname in Dnames:\n name = str(Dname.basename())\n if name not in rv[host]:\n rv[host][name] = {}\n Dids = self.non_dot(Dname)\n for Did in Dids:\n id = str(Did.basename())\n props = self.props(Did)\n props[\"active\"] = \"unknown\"\n rv[host][name][id] = props\n return rv", "title": "" }, { "docid": "b6d89e81d0ed109b398a1ce21fc399b1", "score": "0.5401883", "text": "def savefiles(self):\n return self.options['savefiles']", "title": "" }, { "docid": "25150655e5241147f8078536e2837ee9", "score": "0.53987396", "text": "def loadData(vocab, directory):\n top_level = os.listdir(directory)\n dataset = []\n for d in top_level:\n if d[-1] == '/':\n label = d[:-1]\n subdir = d\n else:\n label = d\n subdir = d+\"/\"\n files = os.listdir(directory+subdir)\n for f in files:\n bow = create_bow(vocab, directory+subdir+f)\n dataset.append({'label': label, 'bow': bow})\n return dataset", "title": "" }, { "docid": "393cc4edda01e097e4c98a86c2ad8153", "score": "0.53958565", "text": "def save_list(path, lst):\n output = open(\"{}.pkl\".format(path), \"wb\")\n pickle.dump(avg, output)\n output.close()\n\n return True", "title": "" }, { "docid": "826846232adecf0b78490048383cf189", "score": "0.53956956", "text": "def dump_obj(name, data, directory, it=-1):\n for d in data:\n # Save list\n if isinstance(data[d], list):\n print '\\titem: ', d, '[', len(data[d]), ']'\n # File name\n fstr = '/' + name + '_' + d + ('_' + str(it) if it != -1 else '') + '.txt'\n\n # Create file and dump all items\n with open(directory + fstr, 'w') as fd:\n \tfd.write('\\n'.join(map(lambda x: str(x), data[d])) + \"\\n\")\n\t\t\n # Save global\n else:\n print '\\titem: ', d\n fstr = '/' + name + '_global' + ('_' + str(it) if it != -1 else '') + '.txt'\n\n # Create file and dump all items\n with open(directory + fstr, 'a') as fd:\n \tfd.write(d + ':' + str(data[d]) + '\\n')", "title": "" }, { "docid": "5932f38cf8c8b7c14dd8573ab10f9d00", "score": "0.53907824", "text": "def load_files(directory):\n dictionary={}\n for file in os.listdir(directory):\n dictionary[file]=open(directory+os.sep+file,encoding='utf-8').read()\n return dictionary", "title": "" }, { "docid": "246e95c121f82a7f9c86d0ce3274e9c8", "score": "0.5386953", "text": "def ListDataFiles():\n print \"Listing Data Files\"\n htmlReturn = \"\"\n files = os.listdir(\"Data\")\n print \"Found files: \", files\n return json.dumps( files )", "title": "" }, { "docid": "3372854f5f8a0eaf56d5064d2e2c9081", "score": "0.5383845", "text": "def file_list(file_name):\n global dict_artists\n output = []\n\n print('Creating file \"' + file_name + '\"...')\n output.append(\"MUSIC LIST\")\n output.append('----------\\n')\n for k_artist in sorted(dict_artists.keys()):\n for k_album in sorted(dict_artists[k_artist].keys()):\n output.append('ARTIST: ' + k_artist)\n output.append('ALBUM: ' + k_album)\n output.append('TRACKS:')\n for track in sorted(dict_artists[k_artist][k_album]):\n output.append(\" \" + track)\n output.append(\"\")\n\n write_file(file_name, output)\n print('File created')\n\n return", "title": "" }, { "docid": "fa69c3e5f51b14f285c2544f593a90d2", "score": "0.53769517", "text": "def getFileNamesAndPaths():\r\n os.chdir(json_file_dir)\r\n files_in_JSON_dir = os.listdir(json_file_dir)\r\n for file in glob.glob(\"*.txt\"):\r\n files_to_zip.append(file)", "title": "" }, { "docid": "9d19063a7726884c40bcf475bfafa9a5", "score": "0.5375869", "text": "def generate_folders_and_files():\r\n\r\n if params['EXPERIMENT_NAME']:\r\n params['FILE_PATH'] = getcwd() + \"/results/\" + params[\r\n 'EXPERIMENT_NAME'] + \"/\"\r\n else:\r\n params['FILE_PATH'] = getcwd() + \"/results/\"\r\n\r\n # Generate save folders\r\n if not path.isdir(params['FILE_PATH']):\r\n mkdir(params['FILE_PATH'])\r\n if not path.isdir(params['FILE_PATH'] + str(params['TIME_STAMP'])):\r\n mkdir(params['FILE_PATH'] + str(params['TIME_STAMP']))\r\n\r\n save_params()\r\n save_stats_headers()", "title": "" }, { "docid": "feb9a4834357d3b75268d16b2bb58550", "score": "0.5374786", "text": "def make_atlas_list():\n jsondict = {}\n jsondict['Atlas'] = []\n jsondict['ShortDesc'] = []\n jsondict['Maps'] = []\n jsondict['MapDesc'] = []\n jsondict['Type'] = []\n jsondict[\"LongDesc\"]=[]\n jsondict[\"ReferencesAndLinks\"]=[]\n\n directories = ['Diedrichsen_2009','Buckner_2011','Xue_2021','Ji_2019','King_2019']\n for name in directories:\n with open(name +'/atlas_description.json') as jsonfile:\n file = json.load(jsonfile)\n jsondict[\"Atlas\"].append(name)\n jsondict[\"ShortDesc\"].append(file[\"ShortDesc\"])\n jsondict[\"Maps\"].append(file[\"Maps\"])\n jsondict[\"Type\"].append(file[\"Type\"])\n jsondict[\"MapDesc\"].append(file[\"MapDesc\"])\n jsondict[\"LongDesc\"].append(file[\"LongDesc\"])\n jsondict[\"ReferencesAndLinks\"].append(file[\"ReferencesAndLinks\"])\n with open('package_description.json','w') as outfile:\n json.dump(jsondict,outfile,indent = 5)", "title": "" }, { "docid": "702e51beecf4279a6ad1993aff2f6fb3", "score": "0.53741115", "text": "def save(self):\n my_dict = {}\n for k, v in self.__objects.items():\n my_dict[k] = v.to_dict()\n with open(FileStorage.__file_path, mode='w', encoding='UTF-8') as f:\n json.dump(my_dict, f)", "title": "" }, { "docid": "89135d4969110decd230e561a368cbf3", "score": "0.5371747", "text": "def copys():\n for fname in os.listdir(Result.froot):\n if fname.startswith('wiki_p'):\n result = Result('', '', fname)\n logging.info(\"copying %s\" % fname)\n with open(fname + '.s', 'w') as fout:\n cPickle.dump([None, result.s], fout, protocol=-1)", "title": "" }, { "docid": "03414003afbc84e37c02bf847b9df0fc", "score": "0.5371028", "text": "def crawlFolder(path):\n # path =\"dataset/plot/\"\n files = [f for f in listdir(path) if isfile(join(path, f))]\n files.sort()\n dataset = []\n for item in files:\n file_name = join(path, item)\n file = open(file_name, \"r\")\n data = file.read()\n file.close()\n dataset.append([data, file_name])\n return dataset", "title": "" } ]