text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Print something on screen when self.verbose == True
<END_TASK>
<USER_TASK:>
Description:
def _screen(self, s, newline=False):
"""Print something on screen when self.verbose == True""" |
if self.verbose:
if newline:
print(s)
else:
print(s, end=' ') |
<SYSTEM_TASK:>
Perform a line search along the current direction
<END_TASK>
<USER_TASK:>
Description:
def _line_opt(self):
"""Perform a line search along the current direction""" |
direction = self.search_direction.direction
if self.constraints is not None:
try:
direction = self.constraints.project(self.x, direction)
except ConstraintError:
self._screen("CONSTRAINT PROJECT FAILED", newline=True)
return False
direction_norm = np.linalg.norm(direction)
if direction_norm == 0:
return False
self.line.configure(self.x, direction/direction_norm)
success, wolfe, qopt, fopt = \
self.line_search(self.line, self.initial_step_size, self.epsilon)
if success:
self.step = qopt*self.line.axis
self.initial_step_size = np.linalg.norm(self.step)
self.x = self.x + self.step
self.f = fopt
if wolfe:
self._screen("W")
else:
self._screen(" ")
self.search_direction.reset()
return True
else:
if self.debug_line:
import matplotlib.pyplot as pt
import datetime
pt.clf()
qs = np.arange(0.0, 100.1)*(5*self.initial_step_size/100.0)
fs = np.array([self.line(q) for q in qs])
pt.plot(qs, fs)
pt.xlim(qs[0], qs[-1])
fdelta = fs.max() - fs.min()
if fdelta == 0.0:
fdelta = fs.mean()
fmargin = fdelta*0.1
pt.ylim(fs.min() - fmargin, fs.max() + fmargin)
pt.title('fdelta = %.2e fmean = %.2e' % (fdelta, fs.mean()))
pt.xlabel('Line coordinate, q')
pt.ylabel('Function value, f')
pt.savefig('line_failed_%s.png' % (datetime.datetime.now().isoformat()))
self._reset_state()
return False |
<SYSTEM_TASK:>
A map to look up the index of a edge
<END_TASK>
<USER_TASK:>
Description:
def edge_index(self):
"""A map to look up the index of a edge""" |
return dict((edge, index) for index, edge in enumerate(self.edges)) |
<SYSTEM_TASK:>
A dictionary with neighbors
<END_TASK>
<USER_TASK:>
Description:
def neighbors(self):
"""A dictionary with neighbors
The dictionary will have the following form:
``{vertexX: (vertexY1, vertexY2, ...), ...}``
This means that vertexX and vertexY1 are connected etc. This also
implies that the following elements are part of the dictionary:
``{vertexY1: (vertexX, ...), vertexY2: (vertexX, ...), ...}``.
""" |
neighbors = dict(
(vertex, []) for vertex
in range(self.num_vertices)
)
for a, b in self.edges:
neighbors[a].append(b)
neighbors[b].append(a)
# turn lists into frozensets
neighbors = dict((key, frozenset(val)) for key, val in neighbors.items())
return neighbors |
<SYSTEM_TASK:>
The matrix with the all-pairs shortest path lenghts
<END_TASK>
<USER_TASK:>
Description:
def distances(self):
"""The matrix with the all-pairs shortest path lenghts""" |
from molmod.ext import graphs_floyd_warshall
distances = np.zeros((self.num_vertices,)*2, dtype=int)
#distances[:] = -1 # set all -1, which is just a very big integer
#distances.ravel()[::len(distances)+1] = 0 # set diagonal to zero
for i, j in self.edges: # set edges to one
distances[i, j] = 1
distances[j, i] = 1
graphs_floyd_warshall(distances)
return distances |
<SYSTEM_TASK:>
Vertices that have the lowest maximum distance to any other vertex
<END_TASK>
<USER_TASK:>
Description:
def central_vertices(self):
"""Vertices that have the lowest maximum distance to any other vertex""" |
max_distances = self.distances.max(0)
max_distances_min = max_distances[max_distances > 0].min()
return (max_distances == max_distances_min).nonzero()[0] |
<SYSTEM_TASK:>
Lists of vertices that are only interconnected within each list
<END_TASK>
<USER_TASK:>
Description:
def independent_vertices(self):
"""Lists of vertices that are only interconnected within each list
This means that there is no path from a vertex in one list to a
vertex in another list. In case of a molecular graph, this would
yield the atoms that belong to individual molecules.
""" |
candidates = set(range(self.num_vertices))
result = []
while len(candidates) > 0:
pivot = candidates.pop()
group = [
vertex for vertex, distance
in self.iter_breadth_first(pivot)
]
candidates.difference_update(group)
# this sort makes sure that the order of the vertices is respected
group.sort()
result.append(group)
return result |
<SYSTEM_TASK:>
A total graph fingerprint
<END_TASK>
<USER_TASK:>
Description:
def fingerprint(self):
"""A total graph fingerprint
The result is invariant under permutation of the vertex indexes. The
chance that two different (molecular) graphs yield the same
fingerprint is small but not zero. (See unit tests.)""" |
if self.num_vertices == 0:
return np.zeros(20, np.ubyte)
else:
return sum(self.vertex_fingerprints) |
<SYSTEM_TASK:>
A fingerprint for each vertex
<END_TASK>
<USER_TASK:>
Description:
def vertex_fingerprints(self):
"""A fingerprint for each vertex
The result is invariant under permutation of the vertex indexes.
Vertices that are symmetrically equivalent will get the same
fingerprint, e.g. the hydrogens in methane would get the same
fingerprint.
""" |
return self.get_vertex_fingerprints(
[self.get_vertex_string(i) for i in range(self.num_vertices)],
[self.get_edge_string(i) for i in range(self.num_edges)],
) |
<SYSTEM_TASK:>
A dictionary with symmetrically equivalent vertices.
<END_TASK>
<USER_TASK:>
Description:
def equivalent_vertices(self):
"""A dictionary with symmetrically equivalent vertices.""" |
level1 = {}
for i, row in enumerate(self.vertex_fingerprints):
key = row.tobytes()
l = level1.get(key)
if l is None:
l = set([i])
level1[key] = l
else:
l.add(i)
level2 = {}
for key, vertices in level1.items():
for vertex in vertices:
level2[vertex] = vertices
return level2 |
<SYSTEM_TASK:>
The vertices in a canonical or normalized order.
<END_TASK>
<USER_TASK:>
Description:
def canonical_order(self):
"""The vertices in a canonical or normalized order.
This routine will return a list of vertices in an order that does not
depend on the initial order, but only depends on the connectivity and
the return values of the function self.get_vertex_string.
Only the vertices that are involved in edges will be included. The
result can be given as first argument to self.get_subgraph, with
reduce=True as second argument. This will return a complete canonical
graph.
The routine is designed not to use symmetry relations that are
obtained with the GraphSearch routine. We also tried to create an
ordering that feels like natural, i.e. starting in the center and
pushing vertices with few equivalents to the front. If necessary, the
nature of the vertices and their bonds to atoms closer to the center
will also play a role, but only as a last resort.
""" |
# A) find an appropriate starting vertex.
# Here we take a central vertex that has a minimal number of symmetrical
# equivalents, 'the highest atom number', and the highest fingerprint.
# Note that the symmetrical equivalents are computed from the vertex
# fingerprints, i.e. without the GraphSearch.
starting_vertex = max(
(
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
) for vertex in self.central_vertices
)[-1]
# B) sort all vertices based on
# 1) distance from central vertex
# 2) number of equivalent vertices
# 3) vertex string, (higher atom numbers come first)
# 4) fingerprint
# 5) vertex index
# The last field is only included to collect the result of the sort.
# The fingerprint on itself would be sufficient, but the three first are
# there to have a naturally appealing result.
l = [
[
-distance,
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
] for vertex, distance in self.iter_breadth_first(starting_vertex)
if len(self.neighbors[vertex]) > 0
]
l.sort(reverse=True)
# C) The order of some vertices is still not completely set. e.g.
# consider the case of allene. The four hydrogen atoms are equivalent,
# but one can have two different orders: make geminiles consecutive or
# don't. It is more trikcy than one would think at first sight. In the
# case of allene, geminility could easily solve the problem. Consider a
# big flat rotationally symmetric molecule (order 2). The first five
# shells are order 4 and one would just give a random order to four
# segemnts in the first shell. Only when one reaches the outer part that
# has order two, it turns out that the arbitrary choices in the inner
# shell play a role. So it does not help to look at relations with
# vertices at inner or current shells only. One has to consider the
# whole picture. (unit testing reveals troubles like these)
# I need some sleep now. The code below checks for potential fuzz and
# will raise an error if the ordering is not fully determined yet. One
# day, I'll need this code more than I do now, and I'll fix things up.
# I know how to do this, but I don't care enough right now.
# -- Toon
for i in range(1, len(l)):
if l[i][:-1] == l[i-1][:-1]:
raise NotImplementedError
# D) Return only the vertex indexes.
return [record[-1] for record in l] |
<SYSTEM_TASK:>
Iterate over the vertices with the breadth first algorithm.
<END_TASK>
<USER_TASK:>
Description:
def iter_breadth_first(self, start=None, do_paths=False, do_duplicates=False):
"""Iterate over the vertices with the breadth first algorithm.
See http://en.wikipedia.org/wiki/Breadth-first_search for more info.
If not start vertex is given, the central vertex is taken.
By default, the distance to the starting vertex is also computed. If
the path to the starting vertex should be computed instead, set path
to True.
When duplicate is True, then vertices that can be reached through
different paths of equal length, will be iterated twice. This
typically only makes sense when path==True.
""" |
if start is None:
start = self.central_vertex
else:
try:
start = int(start)
except ValueError:
raise TypeError("First argument (start) must be an integer.")
if start < 0 or start >= self.num_vertices:
raise ValueError("start must be in the range [0, %i[" %
self.num_vertices)
from collections import deque
work = np.zeros(self.num_vertices, int)
work[:] = -1
work[start] = 0
if do_paths:
result = (start, 0, (start, ))
else:
result = (start, 0)
yield result
todo = deque([result])
while len(todo) > 0:
if do_paths:
parent, parent_length, parent_path = todo.popleft()
else:
parent, parent_length = todo.popleft()
current_length = parent_length + 1
for current in self.neighbors[parent]:
visited = work[current]
if visited == -1 or (do_duplicates and visited == current_length):
work[current] = current_length
if do_paths:
current_path = parent_path + (current, )
result = (current, current_length, current_path)
else:
result = (current, current_length)
#print "iter_breadth_first", result
yield result
todo.append(result) |
<SYSTEM_TASK:>
Iterate over the edges with the breadth first convention.
<END_TASK>
<USER_TASK:>
Description:
def iter_breadth_first_edges(self, start=None):
"""Iterate over the edges with the breadth first convention.
We need this for the pattern matching algorithms, but a quick look at
Wikipedia did not result in a known and named algorithm.
The edges are yielded one by one, together with the distance of the
edge from the starting vertex and a flag that indicates whether the
yielded edge connects two vertices that are at the same distance from
the starting vertex. If that flag is False, the distance from the
starting vertex to edge[0] is equal to the distance variable and the
distance from edge[1] to the starting vertex is equal to distance+1.
One item has the following format: ((i, j), distance, flag)
""" |
if start is None:
start = self.central_vertex
else:
try:
start = int(start)
except ValueError:
raise TypeError("First argument (start) must be an integer.")
if start < 0 or start >= self.num_vertices:
raise ValueError("start must be in the range [0, %i[" %
self.num_vertices)
from collections import deque
work = np.zeros(self.num_vertices, int)
work[:] = -1
work[start] = 0
todo = deque([start])
while len(todo) > 0:
parent = todo.popleft()
distance = work[parent]
for current in self.neighbors[parent]:
if work[current] == -1:
yield (parent, current), distance, False
work[current] = distance+1
todo.append(current)
elif work[current] == distance and current > parent:
# second equation in elif avoids duplicates
yield (parent, current), distance, True
elif work[current] == distance+1:
yield (parent, current), distance, False |
<SYSTEM_TASK:>
Constructs a subgraph of the current graph
<END_TASK>
<USER_TASK:>
Description:
def get_subgraph(self, subvertices, normalize=False):
"""Constructs a subgraph of the current graph
Arguments:
| ``subvertices`` -- The vertices that should be retained.
| ``normalize`` -- Whether or not the vertices should renumbered and
reduced to the given set of subvertices. When True, also the
edges are sorted. It the end, this means that new order of the
edges does not depend on the original order, but only on the
order of the argument subvertices.
This option is False by default. When False, only edges will be
discarded, but the retained data remain unchanged. Also the
parameter num_vertices is not affected.
The returned graph will have an attribute ``old_edge_indexes`` that
relates the positions of the new and the old edges as follows::
>>> self.edges[result._old_edge_indexes[i]] = result.edges[i]
In derived classes, the following should be supported::
>>> self.edge_property[result._old_edge_indexes[i]] = result.edge_property[i]
When ``normalize==True``, also the vertices are affected and the
derived classes should make sure that the following works::
>>> self.vertex_property[result._old_vertex_indexes[i]] = result.vertex_property[i]
The attribute ``old_vertex_indexes`` is only constructed when
``normalize==True``.
""" |
if normalize:
revorder = dict((j, i) for i, j in enumerate(subvertices))
new_edges = []
old_edge_indexes = []
for counter, (i, j) in enumerate(self.edges):
new_i = revorder.get(i)
if new_i is None:
continue
new_j = revorder.get(j)
if new_j is None:
continue
new_edges.append((new_i, new_j))
old_edge_indexes.append(counter)
# sort the edges
order = list(range(len(new_edges)))
# argsort in pure python
order.sort( key=(lambda i: tuple(sorted(new_edges[i]))) )
new_edges = [new_edges[i] for i in order]
old_edge_indexes = [old_edge_indexes[i] for i in order]
result = Graph(new_edges, num_vertices=len(subvertices))
result._old_vertex_indexes = np.array(subvertices, dtype=int)
#result.new_vertex_indexes = revorder
result._old_edge_indexes = np.array(old_edge_indexes, dtype=int)
else:
subvertices = set(subvertices)
old_edge_indexes = np.array([
i for i, edge in enumerate(self.edges)
if edge.issubset(subvertices)
], dtype=int)
new_edges = tuple(self.edges[i] for i in old_edge_indexes)
result = Graph(new_edges, self.num_vertices)
result._old_edge_indexes = old_edge_indexes
# no need for old and new vertex_indexes because they remain the
# same.
return result |
<SYSTEM_TASK:>
Return an array with fingerprints for each vertex
<END_TASK>
<USER_TASK:>
Description:
def get_vertex_fingerprints(self, vertex_strings, edge_strings, num_iter=None):
"""Return an array with fingerprints for each vertex""" |
import hashlib
def str2array(x):
"""convert a hash string to a numpy array of bytes"""
if len(x) == 0:
return np.zeros(0, np.ubyte)
elif sys.version_info[0] == 2:
return np.frombuffer(x, np.ubyte)
else:
return np.frombuffer(x.encode(), np.ubyte)
hashrow = lambda x: np.frombuffer(hashlib.sha1(x.data).digest(), np.ubyte)
# initialization
result = np.zeros((self.num_vertices, 20), np.ubyte)
for i in range(self.num_vertices):
result[i] = hashrow(str2array(vertex_strings[i]))
for i in range(self.num_edges):
a, b = self.edges[i]
tmp = hashrow(str2array(edge_strings[i]))
result[a] += tmp
result[b] += tmp
work = result.copy()
# iterations
if num_iter is None:
num_iter = self.max_distance
for i in range(num_iter):
for a, b in self.edges:
work[a] += result[b]
work[b] += result[a]
#for a in xrange(self.num_vertices):
# for b in xrange(self.num_vertices):
# work[a] += hashrow(result[b]*self.distances[a, b])
for a in range(self.num_vertices):
result[a] = hashrow(work[a])
return result |
<SYSTEM_TASK:>
List all vertices that are connected to vertex_in, but are not
<END_TASK>
<USER_TASK:>
Description:
def get_part(self, vertex_in, vertices_border):
"""List all vertices that are connected to vertex_in, but are not
included in or 'behind' vertices_border.
""" |
vertices_new = set(self.neighbors[vertex_in])
vertices_part = set([vertex_in])
while len(vertices_new) > 0:
pivot = vertices_new.pop()
if pivot in vertices_border:
continue
vertices_part.add(pivot)
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertices_part
vertices_new |= pivot_neighbors
return vertices_part |
<SYSTEM_TASK:>
Find the mapping between vertex indexes in self and other.
<END_TASK>
<USER_TASK:>
Description:
def full_match(self, other):
"""Find the mapping between vertex indexes in self and other.
This also works on disconnected graphs. Derived classes should just
implement get_vertex_string and get_edge_string to make this method
aware of the different nature of certain vertices. In case molecules,
this would make the algorithm sensitive to atom numbers etc.
""" |
# we need normalize subgraphs because these graphs are used as patterns.
graphs0 = [
self.get_subgraph(group, normalize=True)
for group in self.independent_vertices
]
graphs1 = [
other.get_subgraph(group)
for group in other.independent_vertices
]
if len(graphs0) != len(graphs1):
return
matches = []
for graph0 in graphs0:
pattern = EqualPattern(graph0)
found_match = False
for i, graph1 in enumerate(graphs1):
local_matches = list(GraphSearch(pattern)(graph1, one_match=True))
if len(local_matches) == 1:
match = local_matches[0]
# we need to restore the relation between the normalized
# graph0 and its original indexes
old_to_new = OneToOne((
(j, i) for i, j
in enumerate(graph0._old_vertex_indexes)
))
matches.append(match * old_to_new)
del graphs1[i]
found_match = True
break
if not found_match:
return
result = OneToOne()
for match in matches:
result.add_relations(match.forward.items())
return result |
<SYSTEM_TASK:>
Add multiple relations to a bijection
<END_TASK>
<USER_TASK:>
Description:
def add_relations(self, relations):
"""Add multiple relations to a bijection""" |
for source, destination in relations:
self.add_relation(source, destination) |
<SYSTEM_TASK:>
Intialize a fresh match based on the first relation
<END_TASK>
<USER_TASK:>
Description:
def from_first_relation(cls, vertex0, vertex1):
"""Intialize a fresh match based on the first relation""" |
result = cls([(vertex0, vertex1)])
result.previous_ends1 = set([vertex1])
return result |
<SYSTEM_TASK:>
Get new edges from the subject graph for the graph search algorithm
<END_TASK>
<USER_TASK:>
Description:
def get_new_edges(self, subject_graph):
"""Get new edges from the subject graph for the graph search algorithm
The Graph search algorithm extends the matches iteratively by adding
matching vertices that are one edge further from the starting vertex
at each iteration.
""" |
result = []
#print "Match.get_new_edges self.previous_ends1", self.previous_ends1
for vertex in self.previous_ends1:
for neighbor in subject_graph.neighbors[vertex]:
if neighbor not in self.reverse:
result.append((vertex, neighbor))
return result |
<SYSTEM_TASK:>
Create a new match object extended with new relations
<END_TASK>
<USER_TASK:>
Description:
def copy_with_new_relations(self, new_relations):
"""Create a new match object extended with new relations""" |
result = self.__class__(self.forward.items())
result.add_relations(new_relations.items())
result.previous_ends1 = set(new_relations.values())
return result |
<SYSTEM_TASK:>
Given a match, iterate over all related equivalent matches
<END_TASK>
<USER_TASK:>
Description:
def iter_final_matches(self, canonical_match, subject_graph, one_match):
"""Given a match, iterate over all related equivalent matches
When criteria sets are defined, the iterator runs over all symmetric
equivalent matches that fulfill one of the criteria sets. When not
criteria sets are defined, the iterator only yields the input match.
""" |
if self.criteria_sets is None or one_match:
yield canonical_match
else:
for criteria_set in self.criteria_sets:
satisfied_match_tags = set([])
for symmetry in self.pattern_graph.symmetries:
final_match = canonical_match * symmetry
#print final_match
if criteria_set.test_match(final_match, self.pattern_graph, subject_graph):
match_tags = tuple(
self.vertex_tags.get(symmetry.reverse[vertex0])
for vertex0
in range(self.pattern_graph.num_vertices)
)
if match_tags not in satisfied_match_tags:
final_match.__dict__.update(criteria_set.info)
yield final_match
satisfied_match_tags.add(match_tags) |
<SYSTEM_TASK:>
Return the closed cycles corresponding to this permutation
<END_TASK>
<USER_TASK:>
Description:
def get_closed_cycles(self):
"""Return the closed cycles corresponding to this permutation
The cycle will be normalized to facilitate the elimination of
duplicates. The following is guaranteed:
1) If this permutation is represented by disconnected cycles, the
cycles will be sorted by the lowest index they contain.
2) Each cycle starts with its lowest index. (unique starting point)
3) Singletons are discarded. (because they are boring)
""" |
# A) construct all the cycles
closed_cycles = []
todo = set(self.forward.keys())
if todo != set(self.forward.values()):
raise GraphError("The subject and pattern graph must have the same "
"numbering.")
current_vertex = None
while len(todo) > 0:
if current_vertex == None:
current_vertex = todo.pop()
current_cycle = []
else:
todo.discard(current_vertex)
current_cycle.append(current_vertex)
next_vertex = self.get_destination(current_vertex)
if next_vertex == current_cycle[0]:
if len(current_cycle) > 1:
# bring the lowest element in front
pivot = np.argmin(current_cycle)
current_cycle = current_cycle[pivot:] + \
current_cycle[:pivot]
closed_cycles.append(current_cycle)
current_vertex = None
else:
current_vertex = next_vertex
# B) normalize the cycle representation
closed_cycles.sort() # a normal sort is sufficient because only the
# first item of each cycle is considered
# transform the structure into a tuple of tuples
closed_cycles = tuple(tuple(cycle) for cycle in closed_cycles)
return closed_cycles |
<SYSTEM_TASK:>
Returns true when the two vertices are of the same kind
<END_TASK>
<USER_TASK:>
Description:
def compare(self, vertex0, vertex1, subject_graph):
"""Returns true when the two vertices are of the same kind""" |
return (
self.pattern_graph.vertex_fingerprints[vertex0] ==
subject_graph.vertex_fingerprints[vertex1]
).all() |
<SYSTEM_TASK:>
Check the completeness of a ring match
<END_TASK>
<USER_TASK:>
Description:
def complete(self, match, subject_graph):
"""Check the completeness of a ring match""" |
size = len(match)
# check whether we have an odd strong ring
if match.forward[size-1] in subject_graph.neighbors[match.forward[size-2]]:
# we have an odd closed cycle. check if this is a strong ring
order = list(range(0, size, 2)) + list(range(1, size-1, 2))[::-1]
ok = True
for i in range(len(order)//2):
# Count the number of paths between two opposite points in the
# ring. Since the ring has an odd number of vertices, each
# vertex has two semi-opposite vertices.
count = len(list(subject_graph.iter_shortest_paths(
match.forward[order[i]],
match.forward[order[(i+size//2)%size]]
)))
if count > 1:
ok = False
break
count = len(list(subject_graph.iter_shortest_paths(
match.forward[order[i]],
match.forward[order[(i+size//2+1)%size]]
)))
if count > 1:
ok = False
break
if ok:
match.ring_vertices = tuple(match.forward[i] for i in order)
#print "RingPattern.complete: found odd ring"
return True
#print "RingPattern.complete: no odd ring"
# check whether we have an even strong ring
paths = list(subject_graph.iter_shortest_paths(
match.forward[size-1],
match.forward[size-2]
))
#print "RingPattern.complete: even paths", paths
if (size > 3 and len(paths) == 1 and len(paths[0]) == 3) or \
(size == 3 and len(paths) == 2 and len(paths[0]) == 3):
path = paths[0]
if size == 3 and path[1] == match.forward[0]:
path = paths[1]
# we have an even closed cycle. check if this is a strong ring
match.add_relation(size, path[1])
size += 1
order = list(range(0, size, 2)) + list(range(size-1, 0, -2))
ok = True
for i in range(len(order)//2):
count = len(list(subject_graph.iter_shortest_paths(
match.forward[order[i]],
match.forward[order[(i+size//2)%size]]
)))
if count != 2:
ok = False
break
if ok:
# also check if this does not violate the requirement for a
# unique origin:
if match.forward[size-1] < match.forward[0]:
ok = False
if not ok:
vertex1 = match.forward[size-1]
del match.forward[size-1]
del match.reverse[vertex1]
size -= 1
#print "RingPattern.complete: no even ring"
else:
match.ring_vertices = tuple(match.forward[i] for i in order)
#print "RingPattern.complete: found even ring"
return ok
#print "RingPattern.complete: not at all"
return False |
<SYSTEM_TASK:>
Only prints debug info on screen when self.debug == True.
<END_TASK>
<USER_TASK:>
Description:
def print_debug(self, text, indent=0):
"""Only prints debug info on screen when self.debug == True.""" |
if self.debug:
if indent > 0:
print(" "*self.debug, text)
self.debug += indent
if indent <= 0:
print(" "*self.debug, text) |
<SYSTEM_TASK:>
Given an onset for a match, iterate over all possible new key-value pairs
<END_TASK>
<USER_TASK:>
Description:
def _iter_new_relations(self, init_match, subject_graph, edges0, constraints0, edges1):
"""Given an onset for a match, iterate over all possible new key-value pairs""" |
# Count the number of unique edges0[i][1] values. This is also
# the number of new relations.
num_new_relations = len(set(j for i, j in edges0))
def combine_small(relations, num):
"""iterate over all compatible combinations within one set of relations"""
if len(relations) == 0:
return
for i, pivot in enumerate(relations):
if num == 1:
yield (pivot, )
else:
compatible_relations = list(
item for item in relations[:i]
if pivot[0]!=item[0] and pivot[1]!=item[1]
)
for tail in combine_small(compatible_relations, num-1):
yield (pivot, ) + tail
# generate candidate relations
candidate_relations = []
icg = self._iter_candidate_groups(init_match, edges0, edges1)
for end_vertices0, end_vertices1 in icg:
if len(end_vertices0) > len(end_vertices1):
return # this can never work, the subject graph is 'too small'
elif not self.pattern.sub and \
len(end_vertices0) != len(end_vertices1):
return # an exact match is sought, this can never work
l = []
for end_vertex0 in end_vertices0:
for end_vertex1 in end_vertices1:
if self.pattern.compare(end_vertex0, end_vertex1, subject_graph):
l.append((end_vertex0, end_vertex1))
# len(end_vertices0) = the total number of relations that must be
# made in this group
if len(l) > 0:
# turn l into a list of sets of internally compatible candidate
# relations in this group
l = list(combine_small(l, len(end_vertices0)))
candidate_relations.append(l)
if len(candidate_relations) == 0:
return
self.print_debug("candidate_relations: %s" % candidate_relations)
def combine_big(pos=0):
"""Iterate over all possible sets of relations"""
# pos is an index in candidate_relations
crs = candidate_relations[pos]
if pos == len(candidate_relations)-1:
for relations in crs:
yield relations
else:
for tail in combine_big(pos+1):
for relations in crs:
yield relations + tail
# final loop
for new_relations in combine_big():
new_relations = set(new_relations)
self.print_debug("new_relations: %s" % (new_relations, ))
# check the total number of new relations
if len(new_relations) != num_new_relations:
continue
# check sanity of relations
forward = dict(new_relations)
if len(forward) != num_new_relations:
continue
reverse = dict((j, i) for i, j in new_relations)
if len(reverse) != num_new_relations:
continue
# check the constraints
for a0, b0 in constraints0:
if forward[a0] not in subject_graph.neighbors[forward[b0]]:
forward = None
break
if forward is None:
continue
yield forward |
<SYSTEM_TASK:>
Given an onset for a match, iterate over all completions of that match
<END_TASK>
<USER_TASK:>
Description:
def _iter_matches(self, input_match, subject_graph, one_match, level=0):
"""Given an onset for a match, iterate over all completions of that match
This iterator works recursively. At each level the match is extended
with a new set of relations based on vertices in the pattern graph
that are at a distances 'level' from the starting vertex
""" |
self.print_debug("ENTERING _ITER_MATCHES", 1)
self.print_debug("input_match: %s" % input_match)
# A) collect the new edges in the pattern graph and the subject graph
# to extend the match.
#
# Note that the edges are ordered. edge[0] is always in the match.
# edge[1] is never in the match. The constraints contain information
# about the end points of edges0. It is a list of two-tuples where
# (a, b) means that a and b must be connected.
#
# Second note: suffix 0 indicates the pattern graph and suffix 1
# is used for the subject graph.
edges0, constraints0 = self.pattern.get_new_edges(level)
edges1 = input_match.get_new_edges(subject_graph)
self.print_debug("edges0: %s" % edges0)
self.print_debug("constraints0: %s" % constraints0)
self.print_debug("edges1: %s" % edges1)
# B) iterate over the sets of new relations: [(vertex0[i], vertex1[j]),
# ...] that contain all endpoints of edges0, that satisfy the
# constraints0 and where (vertex0[i], vertex1[j]) only occurs if these
# are end points of a edge0 and edge1 whose starting points are already
# in init_match. These conditions are implemented in an iterator as to
# separate concerns. This iterator also calls the routines that check
# whether vertex1[j] also satisfies additional conditions inherent
# vertex0[i].
inr = self._iter_new_relations(input_match, subject_graph, edges0,
constraints0, edges1)
for new_relations in inr:
# for each set of new_relations, construct a next_match and recurse
next_match = input_match.copy_with_new_relations(new_relations)
if not self.pattern.check_next_match(next_match, new_relations, subject_graph, one_match):
continue
if self.pattern.complete(next_match, subject_graph):
yield next_match
else:
for match in self._iter_matches(next_match, subject_graph, one_match, level+1):
yield match
self.print_debug("LEAVING_ITER_MATCHES", -1) |
<SYSTEM_TASK:>
Writes a single molecule to a pdb file.
<END_TASK>
<USER_TASK:>
Description:
def dump_pdb(filename, molecule, atomnames=None, resnames=None, chain_ids=None, occupancies=None, betas=None):
"""Writes a single molecule to a pdb file.
This function is based on the pdb file specification:
http://www.wwpdb.org/documentation/format32/sect9.html
For convenience, the relevant table is copied and the character indexes are
transformed to C-style (starting from zero)
======= ============ ========== ==========================================
COLUMNS DATA TYPE FIELD DEFINITION
======= ============ ========== ==========================================
0 - 5 Record name "ATOM "
6 - 10 Integer serial Atom serial number.
12 - 15 Atom name Atom name.
16 Character altLoc Alternate location indicator.
17 - 19 Residue name resName Residue name.
21 Character chainID Chain identifier.
22 - 25 Integer resSeq Residue sequence number.
26 AChar iCode Code for insertion of residues.
30 - 37 Real(8.3) x Orthogonal coordinates for X in Angstroms.
38 - 45 Real(8.3) y Orthogonal coordinates for Y in Angstroms.
46 - 53 Real(8.3) z Orthogonal coordinates for Z in Angstroms.
54 - 59 Real(6.2) occupancy Occupancy.
60 - 65 Real(6.2) tempFactor Temperature factor.
76 - 77 LString(2) element Element symbol, right-justified.
78 - 79 LString(2) charge Charge on the atom.
======= ============ ========== ==========================================
""" |
with open(filename, "w") as f:
res_id = 1
old_resname = None
for i in range(molecule.size):
symbol = periodic[molecule.numbers[i]].symbol
if atomnames is None:
atomname = symbol
else:
atomname = atomnames[i]
if resnames is None:
resname = "OXO"
else:
resname = resnames[i]
if resname != old_resname:
res_id += 1
if chain_ids is None:
chain_id = "A"
else:
chain_id = chain_ids[i]
if occupancies is None:
occupancy = 1.0
else:
occupancy = occupancies[i]
if betas is None:
beta = 1.0
else:
beta = betas[i]
print("ATOM %4i %3s %3s %1s%4i %8.3f%8.3f%8.3f%6.2f%6.2f %2s " % (
i+1, atomname.ljust(3), resname.ljust(3), chain_id, res_id,
molecule.coordinates[i, 0]/angstrom,
molecule.coordinates[i, 1]/angstrom,
molecule.coordinates[i, 2]/angstrom,
occupancy, beta, symbol.ljust(2)
), file=f)
old_resname = resname |
<SYSTEM_TASK:>
Loads a single molecule from a pdb file.
<END_TASK>
<USER_TASK:>
Description:
def load_pdb(filename):
"""Loads a single molecule from a pdb file.
This function does support only a small fragment from the pdb specification.
It assumes that there is only one molecular geometry in the pdb file.
""" |
with open(filename) as f:
numbers = []
coordinates = []
occupancies = []
betas = []
for line in f:
if line.startswith("ATOM"):
symbol = line[76:78].strip()
numbers.append(periodic[symbol].number)
coordinates.append([float(line[30:38])*angstrom, float(line[38:46])*angstrom, float(line[46:54])*angstrom])
occupancies.append(float(line[54:60]))
betas.append(float(line[60:66]))
if len(numbers) > 0:
molecule = Molecule(numbers, coordinates)
molecule.occupancies = np.array(occupancies)
molecule.betas = np.array(betas)
return molecule
else:
raise FileFormatError("No molecule found in pdb file %s" % filename) |
<SYSTEM_TASK:>
Get a new reference atom for a row in the ZMatrix
<END_TASK>
<USER_TASK:>
Description:
def _get_new_ref(self, existing_refs):
"""Get a new reference atom for a row in the ZMatrix
The reference atoms should obey the following conditions:
- They must be different
- They must be neighbours in the bond graph
- They must have an index lower than the current atom
If multiple candidate refs can be found, take the heaviest atom
""" |
# ref0 is the atom whose position is defined by the current row in the
# zmatrix.
ref0 = existing_refs[0]
for ref in existing_refs:
# try to find a neighbor of the ref that can serve as the new ref
result = None
for n in sorted(self.graph.neighbors[ref]):
if self.new_index[n] > self.new_index[ref0]:
# index is too high, zmatrix rows can't refer to future
# atoms
continue
if n in existing_refs:
# ref is already in use
continue
if result is None or self.graph.numbers[n] <= self.graph.numbers[result]:
# acceptable ref, prefer heaviest atom
result = n
if result is not None:
return result
raise RuntimeError("Could not find new reference.") |
<SYSTEM_TASK:>
Convert cartesian coordinates to ZMatrix format
<END_TASK>
<USER_TASK:>
Description:
def cart_to_zmat(self, coordinates):
"""Convert cartesian coordinates to ZMatrix format
Argument:
coordinates -- Cartesian coordinates (numpy array Nx3)
The coordinates must match with the graph that was used to initialize
the ZMatrixGenerator object.
""" |
N = len(self.graph.numbers)
if coordinates.shape != (N, 3):
raise ValueError("The shape of the coordinates must be (%i, 3)" % N)
result = np.zeros(N, dtype=self.dtype)
for i in range(N):
ref0 = self.old_index[i]
rel1 = -1
rel2 = -1
rel3 = -1
distance = 0
angle = 0
dihed = 0
if i > 0:
ref1 = self._get_new_ref([ref0])
distance = np.linalg.norm(coordinates[ref0]-coordinates[ref1])
rel1 = i - self.new_index[ref1]
if i > 1:
ref2 = self._get_new_ref([ref0, ref1])
angle, = ic.bend_angle(coordinates[[ref0, ref1, ref2]])
rel2 = i - self.new_index[ref2]
if i > 2:
ref3 = self._get_new_ref([ref0, ref1, ref2])
dihed, = ic.dihed_angle(coordinates[[ref0, ref1, ref2, ref3]])
rel3 = i - self.new_index[ref3]
result[i] = (self.graph.numbers[i], distance, rel1, angle, rel2, dihed, rel3)
return result |
<SYSTEM_TASK:>
Return ``True`` if the mapper can map all the IPA characters
<END_TASK>
<USER_TASK:>
Description:
def can_map_ipa_string(self, ipa_string):
"""
Return ``True`` if the mapper can map all the IPA characters
in the given IPA string.
:param IPAString ipa_string: the IPAString to be parsed
:rtype: bool
""" |
canonical = [(c.canonical_representation, ) for c in ipa_string]
split = split_using_dictionary(canonical, self, self.max_key_length, single_char_parsing=False)
for sub in split:
if not sub in self.ipa_canonical_representation_to_mapped_str:
return False
return True |
<SYSTEM_TASK:>
Convert the given IPAString to a string
<END_TASK>
<USER_TASK:>
Description:
def map_ipa_string(self, ipa_string, ignore=False, return_as_list=False, return_can_map=False):
"""
Convert the given IPAString to a string
containing the corresponding ASCII IPA representation.
:param IPAString ipa_string: the IPAString to be parsed
:param bool ignore: if ``True``, ignore Unicode characters that are not IPA valid
:param bool return_as_list: if ``True``, return as a list of strings, one for each IPAChar,
instead of their concatenation (single str)
:param bool return_can_map: if ``True``, return a pair ``(bool, str)``, where the first element
says if the mapper can map all the IPA characters in the given IPA string,
and the second element is either ``None`` or the mapped string/list
:rtype: str or (bool, str) or (bool, list)
""" |
acc = []
can_map = True
canonical = [(c.canonical_representation, ) for c in ipa_string]
split = split_using_dictionary(canonical, self, self.max_key_length, single_char_parsing=False)
for sub in split:
try:
acc.append(self.ipa_canonical_representation_to_mapped_str[sub])
except KeyError:
if ignore:
can_map = False
else:
raise ValueError("The IPA string contains an IPA character that is not mapped: %s" % sub)
mapped = acc if return_as_list else u"".join(acc)
if return_can_map:
return (can_map, mapped)
return mapped |
<SYSTEM_TASK:>
Convert the given Unicode string, representing an IPA string,
<END_TASK>
<USER_TASK:>
Description:
def map_unicode_string(self, unicode_string, ignore=False, single_char_parsing=False, return_as_list=False, return_can_map=False):
"""
Convert the given Unicode string, representing an IPA string,
to a string containing the corresponding mapped representation.
Return ``None`` if ``unicode_string`` is ``None``.
:param str unicode_string: the Unicode string to be parsed
:param bool ignore: if ``True``, ignore Unicode characters that are not IPA valid
:param bool single_char_parsing: if ``True``, parse one Unicode character at a time
:param bool return_as_list: if ``True``, return as a list of strings, one for each IPAChar,
instead of their concatenation (single str)
:param bool return_can_map: if ``True``, return a pair ``(bool, str)``, where the first element
says if the mapper can map all the IPA characters in the given IPA string,
and the second element is either ``None`` or the mapped string/list
:rtype: str or (bool, str) or (bool, list)
""" |
if unicode_string is None:
return None
ipa_string = IPAString(unicode_string=unicode_string, ignore=ignore, single_char_parsing=single_char_parsing)
return self.map_ipa_string(
ipa_string=ipa_string,
ignore=ignore,
return_as_list=return_as_list,
return_can_map=return_can_map
) |
<SYSTEM_TASK:>
Print Unicode characterss that are not IPA valid,
<END_TASK>
<USER_TASK:>
Description:
def print_invalid_chars(invalid_chars, vargs):
"""
Print Unicode characterss that are not IPA valid,
if requested by the user.
:param list invalid_chars: a list (possibly empty) of invalid Unicode characters
:param dict vargs: the command line parameters
""" |
if len(invalid_chars) > 0:
if vargs["print_invalid"]:
print(u"".join(invalid_chars))
if vargs["unicode"]:
for u_char in sorted(set(invalid_chars)):
print(u"'%s'\t%s\t%s" % (u_char, hex(ord(u_char)), unicodedata.name(u_char, "UNKNOWN"))) |
<SYSTEM_TASK:>
Print the canonical representation of the given string.
<END_TASK>
<USER_TASK:>
Description:
def command_canonize(string, vargs):
"""
Print the canonical representation of the given string.
It will replace non-canonical compound characters
with their canonical synonym.
:param str string: the string to act upon
:param dict vargs: the command line arguments
""" |
try:
ipa_string = IPAString(
unicode_string=string,
ignore=vargs["ignore"],
single_char_parsing=vargs["single_char_parsing"]
)
print(vargs["separator"].join([(u"%s" % c) for c in ipa_string]))
except ValueError as exc:
print_error(str(exc)) |
<SYSTEM_TASK:>
Print a list of all IPA characters in the given string.
<END_TASK>
<USER_TASK:>
Description:
def command_chars(string, vargs):
"""
Print a list of all IPA characters in the given string.
It will print the Unicode representation, the full IPA name,
and the Unicode "U+"-prefixed hexadecimal codepoint representation
of each IPA character.
:param str string: the string to act upon
:param dict vargs: the command line arguments
""" |
try:
ipa_string = IPAString(
unicode_string=string,
ignore=vargs["ignore"],
single_char_parsing=vargs["single_char_parsing"]
)
for c in ipa_string:
print(u"'%s'\t%s (%s)" % (c.unicode_repr, c.name, unicode_to_hex(c.unicode_repr)))
except ValueError as exc:
print_error(str(exc)) |
<SYSTEM_TASK:>
Check if the given string is IPA valid.
<END_TASK>
<USER_TASK:>
Description:
def command_check(string, vargs):
"""
Check if the given string is IPA valid.
If the given string is not IPA valid,
print the invalid characters.
:param str string: the string to act upon
:param dict vargs: the command line arguments
""" |
is_valid = is_valid_ipa(string)
print(is_valid)
if not is_valid:
valid_chars, invalid_chars = remove_invalid_ipa_characters(
unicode_string=string,
return_invalid=True
)
print_invalid_chars(invalid_chars, vargs) |
<SYSTEM_TASK:>
Remove characters that are not IPA valid from the given string,
<END_TASK>
<USER_TASK:>
Description:
def command_clean(string, vargs):
"""
Remove characters that are not IPA valid from the given string,
and print the remaining string.
:param str string: the string to act upon
:param dict vargs: the command line arguments
""" |
valid_chars, invalid_chars = remove_invalid_ipa_characters(
unicode_string=string,
return_invalid=True,
single_char_parsing=vargs["single_char_parsing"]
)
print(u"".join(valid_chars))
print_invalid_chars(invalid_chars, vargs) |
<SYSTEM_TASK:>
Print the ARPABEY ASCII string corresponding to the given Unicode IPA string.
<END_TASK>
<USER_TASK:>
Description:
def command_u2a(string, vargs):
"""
Print the ARPABEY ASCII string corresponding to the given Unicode IPA string.
:param str string: the string to act upon
:param dict vargs: the command line arguments
""" |
try:
l = ARPABETMapper().map_unicode_string(
unicode_string=string,
ignore=vargs["ignore"],
single_char_parsing=vargs["single_char_parsing"],
return_as_list=True
)
print(vargs["separator"].join(l))
except ValueError as exc:
print_error(str(exc)) |
<SYSTEM_TASK:>
Print the Kirshenbaum ASCII string corresponding to the given Unicode IPA string.
<END_TASK>
<USER_TASK:>
Description:
def command_u2k(string, vargs):
"""
Print the Kirshenbaum ASCII string corresponding to the given Unicode IPA string.
:param str string: the string to act upon
:param dict vargs: the command line arguments
""" |
try:
l = KirshenbaumMapper().map_unicode_string(
unicode_string=string,
ignore=vargs["ignore"],
single_char_parsing=vargs["single_char_parsing"],
return_as_list=True
)
print(vargs["separator"].join(l))
except ValueError as exc:
print_error(str(exc)) |
<SYSTEM_TASK:>
Set the list of IPAChar objects composing the IPA string
<END_TASK>
<USER_TASK:>
Description:
def ipa_chars(self, value):
"""
Set the list of IPAChar objects composing the IPA string
:param list value: list of IPAChar objects
""" |
if value is None:
self.__ipa_chars = []
else:
if is_list_of_ipachars(value):
self.__ipa_chars = value
else:
raise TypeError("ipa_chars only accepts a list of IPAChar objects") |
<SYSTEM_TASK:>
Return ``True`` if the IPA string is equivalent to the ``other`` object.
<END_TASK>
<USER_TASK:>
Description:
def is_equivalent(self, other, ignore=False):
"""
Return ``True`` if the IPA string is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string,
2. a list of IPAChar objects, and
3. another IPAString.
:param variant other: the object to be compared against
:param bool ignore: if other is a Unicode string, ignore Unicode characters not IPA valid
:rtype: bool
""" |
def is_equivalent_to_list_of_ipachars(other):
"""
Return ``True`` if the list of IPAChar objects
in the canonical representation of the string
is the same as the given list.
:param list other: list of IPAChar objects
:rtype: bool
"""
my_ipa_chars = self.canonical_representation.ipa_chars
if len(my_ipa_chars) != len(other):
return False
for i in range(len(my_ipa_chars)):
if not my_ipa_chars[i].is_equivalent(other[i]):
return False
return True
if is_unicode_string(other):
try:
return is_equivalent_to_list_of_ipachars(IPAString(unicode_string=other, ignore=ignore).ipa_chars)
except:
return False
if is_list_of_ipachars(other):
try:
return is_equivalent_to_list_of_ipachars(other)
except:
return False
if isinstance(other, IPAString):
return is_equivalent_to_list_of_ipachars(other.canonical_representation.ipa_chars)
return False |
<SYSTEM_TASK:>
Return a new IPAString, containing only the IPA characters specified
<END_TASK>
<USER_TASK:>
Description:
def filter_chars(self, chars=u""):
"""
Return a new IPAString, containing only the IPA characters specified
by the ``chars`` string.
Valid values for ``chars`` are:
* ``consonants`` or ``cns``
* ``vowels`` or ``vwl``
* ``letters`` or ``cns_vwl``
* ``cns_vwl_pstr`` or ``cvp``
* ``cns_vwl_pstr_long`` or ``cvpl``
* ``cns_vwl_str`` or ``cvs``
* ``cns_vwl_str_len`` or ``cvsl``
* ``cns_vwl_str_len_wb`` or ``cvslw``
* ``cns_vwl_str_len_wb_sb`` or ``cvslws``
:rtype: IPAString
""" |
if chars in [u"cns", u"consonants"]:
return self.consonants
elif chars in [u"vwl", u"vowels"]:
return self.vowels
elif chars in [u"cns_vwl", u"letters"]:
return self.letters
elif chars in [u"cns_vwl_pstr", u"cvp"]:
return self.cns_vwl_pstr
elif chars in [u"cns_vwl_pstr_long", u"cvpl"]:
return self.cns_vwl_pstr_long
elif chars in [u"cns_vwl_str", u"cvs"]:
return self.cns_vwl_str
elif chars in [u"cns_vwl_str_len", u"cvsl"]:
return self.cns_vwl_str_len
elif chars in [u"cns_vwl_str_len_wb", u"cvslw"]:
return self.cns_vwl_str_len_wb
elif chars in [u"cns_vwl_str_len_wb_sb", u"cvslws"]:
return self.cns_vwl_str_len_wb_sb
return self |
<SYSTEM_TASK:>
Return a new IPAString, containing only the consonants in the current string.
<END_TASK>
<USER_TASK:>
Description:
def consonants(self):
"""
Return a new IPAString, containing only the consonants in the current string.
:rtype: IPAString
""" |
return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_consonant]) |
<SYSTEM_TASK:>
Return a new IPAString, containing only the vowels in the current string.
<END_TASK>
<USER_TASK:>
Description:
def vowels(self):
"""
Return a new IPAString, containing only the vowels in the current string.
:rtype: IPAString
""" |
return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_vowel]) |
<SYSTEM_TASK:>
Convert a Unicode field into the corresponding list of Unicode strings.
<END_TASK>
<USER_TASK:>
Description:
def convert_unicode_field(string):
"""
Convert a Unicode field into the corresponding list of Unicode strings.
The (input) Unicode field is a Unicode string containing
one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``),
separated by a space.
:param str string: the (input) Unicode field
:rtype: list of Unicode strings
""" |
values = []
for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]:
values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)]))
return values |
<SYSTEM_TASK:>
Convert an ASCII field into the corresponding list of Unicode strings.
<END_TASK>
<USER_TASK:>
Description:
def convert_ascii_field(string):
"""
Convert an ASCII field into the corresponding list of Unicode strings.
The (input) ASCII field is a Unicode string containing
one or more ASCII codepoints (``00xx`` or ``U+00xx`` or
an ASCII string not starting with ``00`` or ``U+``),
separated by a space.
:param str string: the (input) ASCII field
:rtype: list of Unicode strings
""" |
values = []
for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]:
#if DATA_FILE_CODEPOINT_JOINER in codepoint:
# values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)]))
if (codepoint.startswith(DATA_FILE_ASCII_NUMERICAL_CODEPOINT_START)) or (codepoint.startswith(DATA_FILE_ASCII_UNICODE_CODEPOINT_START)):
values.append(hex_to_unichr(codepoint))
else:
values.append(codepoint)
return values |
<SYSTEM_TASK:>
Convert a tuple of raw values, according to the given line format.
<END_TASK>
<USER_TASK:>
Description:
def convert_raw_tuple(value_tuple, format_string):
"""
Convert a tuple of raw values, according to the given line format.
:param tuple value_tuple: the tuple of raw values
:param str format_string: the format of the tuple
:rtype: list of tuples
""" |
values = []
for v, c in zip(value_tuple, format_string):
if v is None:
# append None
values.append(v)
elif c == u"s":
# string
values.append(v)
elif c == u"S":
# string, split using space as delimiter
values.append([s for s in v.split(u" ") if len(s) > 0])
elif c == u"i":
# int
values.append(int(v))
elif c == u"U":
# Unicode
values.append(convert_unicode_field(v))
elif c == u"A":
# ASCII
values.append(convert_ascii_field(v))
#elif c == u"x":
# # ignore
# pass
return tuple(values) |
<SYSTEM_TASK:>
Load a data file, with one record per line and
<END_TASK>
<USER_TASK:>
Description:
def load_data_file(
file_path,
file_path_is_relative=False,
comment_string=DATA_FILE_COMMENT,
field_separator=DATA_FILE_FIELD_SEPARATOR,
line_format=None
):
"""
Load a data file, with one record per line and
fields separated by ``field_separator``,
returning a list of tuples.
It ignores lines starting with ``comment_string`` or empty lines.
If ``values_per_line`` is not ``None``,
check that each line (tuple)
has the prescribed number of values.
:param str file_path: path of the data file to load
:param bool file_path_is_relative: if ``True``, ``file_path`` is relative to this source code file
:param str comment_string: ignore lines starting with this string
:param str field_separator: fields are separated by this string
:param str line_format: if not ``None``, parses each line according to the given format
(``s`` = string, ``S`` = split string using spaces,
``i`` = int, ``x`` = ignore, ``U`` = Unicode, ``A`` = ASCII)
:rtype: list of tuples
""" |
raw_tuples = []
if file_path_is_relative:
file_path = os.path.join(os.path.dirname(__file__), file_path)
with io.open(file_path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if (len(line) > 0) and (not line.startswith(comment_string)):
raw_list = line.split(field_separator)
if len(raw_list) != len(line_format):
raise ValueError("Data file '%s' contains a bad line: '%s'" % (file_path, line))
raw_tuples.append(tuple(raw_list))
if (line_format is None) or (len(line_format) < 1):
return raw_tuples
return [convert_raw_tuple(t, line_format) for t in raw_tuples] |
<SYSTEM_TASK:>
Return the list of Unicode characters
<END_TASK>
<USER_TASK:>
Description:
def invalid_ipa_characters(unicode_string, indices=False):
"""
Return the list of Unicode characters
in the given Unicode string
that are not IPA valid.
Return ``None`` if ``unicode_string`` is ``None``.
:param str unicode_string: the Unicode string to be parsed
:param bool indices: if ``True``, return a list of pairs (index, invalid character),
instead of a list of str (characters).
:rtype: list of str or list of (int, str)
""" |
if unicode_string is None:
return None
if indices:
return [(i, unicode_string[i]) for i in range(len(unicode_string)) if unicode_string[i] not in UNICODE_TO_IPA]
return set([c for c in unicode_string if c not in UNICODE_TO_IPA]) |
<SYSTEM_TASK:>
Return a list containing the descriptors in the given object.
<END_TASK>
<USER_TASK:>
Description:
def variant_to_list(obj):
"""
Return a list containing the descriptors in the given object.
The ``obj`` can be a list or a set of descriptor strings, or a Unicode string.
If ``obj`` is a Unicode string, it will be split using spaces as delimiters.
:param variant obj: the object to be parsed
:rtype: list
:raise TypeError: if the ``obj`` has a type not listed above
""" |
if isinstance(obj, list):
return obj
elif is_unicode_string(obj):
return [s for s in obj.split() if len(s) > 0]
elif isinstance(obj, set) or isinstance(obj, frozenset):
return list(obj)
raise TypeError("The given value must be a list or a set of descriptor strings, or a Unicode string.") |
<SYSTEM_TASK:>
Return a list containing the canonical string for the given object.
<END_TASK>
<USER_TASK:>
Description:
def variant_to_canonical_string(obj):
"""
Return a list containing the canonical string for the given object.
The ``obj`` can be a list or a set of descriptor strings, or a Unicode string.
If ``obj`` is a Unicode string, it will be split using spaces as delimiters.
:param variant obj: the object to be parsed
:rtype: str
:raise TypeError: if the ``obj`` has a type not listed above
""" |
acc = [DG_ALL_DESCRIPTORS.canonical_value(p) for p in variant_to_list(obj)]
acc = sorted([a for a in acc if a is not None])
return u" ".join(acc) |
<SYSTEM_TASK:>
Return ``True`` if the given object is a list of IPAChar objects.
<END_TASK>
<USER_TASK:>
Description:
def is_list_of_ipachars(obj):
"""
Return ``True`` if the given object is a list of IPAChar objects.
:param object obj: the object to test
:rtype: bool
""" |
if isinstance(obj, list):
for e in obj:
if not isinstance(e, IPAChar):
return False
return True
return False |
<SYSTEM_TASK:>
Return ``True`` if the IPA character is equivalent to the ``other`` object.
<END_TASK>
<USER_TASK:>
Description:
def is_equivalent(self, other):
"""
Return ``True`` if the IPA character is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string, containing the representation of the IPA character,
2. a Unicode string, containing a space-separated list of descriptors,
3. a list of Unicode strings, containing descriptors, and
4. another IPAChar.
:rtype: bool
""" |
if (self.unicode_repr is not None) and (is_unicode_string(other)) and (self.unicode_repr == other):
return True
if isinstance(other, IPAChar):
return self.canonical_representation == other.canonical_representation
try:
return self.canonical_representation == IPAChar(name=None, descriptors=other).canonical_representation
except:
return False |
<SYSTEM_TASK:>
Return the canonical value of a descriptor of the character,
<END_TASK>
<USER_TASK:>
Description:
def dg_value(self, descriptor_group):
"""
Return the canonical value of a descriptor of the character,
provided it is present in the given descriptor group.
If not present, return ``None``.
:param IPADescriptorGroup descriptor_group: the descriptor group to be checked against
:rtype: str
""" |
for p in self.descriptors:
if p in descriptor_group:
return descriptor_group.canonical_value(p)
return None |
<SYSTEM_TASK:>
Return ``True`` if the character has the given descriptor.
<END_TASK>
<USER_TASK:>
Description:
def has_descriptor(self, descriptor):
"""
Return ``True`` if the character has the given descriptor.
:param IPADescriptor descriptor: the descriptor to be checked against
:rtype: bool
""" |
for p in self.descriptors:
if p in descriptor:
return True
return False |
<SYSTEM_TASK:>
Set the voicing of the consonant.
<END_TASK>
<USER_TASK:>
Description:
def voicing(self, value):
"""
Set the voicing of the consonant.
:param str value: the value to be set
""" |
if (value is not None) and (not value in DG_C_VOICING):
raise ValueError("Unrecognized value for voicing: '%s'" % value)
self.__voicing = value |
<SYSTEM_TASK:>
Set the place of articulation of the consonant.
<END_TASK>
<USER_TASK:>
Description:
def place(self, value):
"""
Set the place of articulation of the consonant.
:param str value: the value to be set
""" |
if (value is not None) and (not value in DG_C_PLACE):
raise ValueError("Unrecognized value for place: '%s'" % value)
self.__place = value |
<SYSTEM_TASK:>
Set the manner of articulation of the consonant.
<END_TASK>
<USER_TASK:>
Description:
def manner(self, value):
"""
Set the manner of articulation of the consonant.
:param str value: the value to be set
""" |
if (value is not None) and (not value in DG_C_MANNER):
raise ValueError("Unrecognized value for manner: '%s'" % value)
self.__manner = value |
<SYSTEM_TASK:>
Set the height of the vowel.
<END_TASK>
<USER_TASK:>
Description:
def height(self, value):
"""
Set the height of the vowel.
:param str value: the value to be set
""" |
if (value is not None) and (not value in DG_V_HEIGHT):
raise ValueError("Unrecognized value for height: '%s'" % value)
self.__height = value |
<SYSTEM_TASK:>
Set the backness of the vowel.
<END_TASK>
<USER_TASK:>
Description:
def backness(self, value):
"""
Set the backness of the vowel.
:param str value: the value to be set
""" |
if (value is not None) and (not value in DG_V_BACKNESS):
raise ValueError("Unrecognized value for backness: '%s'" % value)
self.__backness = value |
<SYSTEM_TASK:>
Set the roundness of the vowel.
<END_TASK>
<USER_TASK:>
Description:
def roundness(self, value):
"""
Set the roundness of the vowel.
:param str value: the value to be set
""" |
if (value is not None) and (not value in DG_V_ROUNDNESS):
raise ValueError("Unrecognized value for roundness: '%s'" % value)
self.__roundness = value |
<SYSTEM_TASK:>
Load the Kirshenbaum ASCII IPA data from the built-in database.
<END_TASK>
<USER_TASK:>
Description:
def _load_data(self):
"""
Load the Kirshenbaum ASCII IPA data from the built-in database.
""" |
ipa_canonical_string_to_ascii_str = dict()
for line in load_data_file(
file_path=self.DATA_FILE_PATH,
file_path_is_relative=True,
line_format=u"sxA"
):
i_desc, i_ascii = line
if len(i_ascii) == 0:
raise ValueError("Data file '%s' contains a bad line: '%s'" % (self.DATA_FILE_PATH, line))
key = (variant_to_canonical_string(i_desc),)
ipa_canonical_string_to_ascii_str[key] = i_ascii[0]
return ipa_canonical_string_to_ascii_str |
<SYSTEM_TASK:>
Return the canonical value corresponding to the given query value.
<END_TASK>
<USER_TASK:>
Description:
def canonical_value(self, query):
"""
Return the canonical value corresponding to the given query value.
Return ``None`` if the query value is not present in any descriptor of the group.
:param str query: the descriptor value to be checked against
""" |
for d in self.descriptors:
if query in d:
return d.canonical_label
return None |
<SYSTEM_TASK:>
Load the ARPABET ASCII IPA data from the built-in database.
<END_TASK>
<USER_TASK:>
Description:
def _load_data(self):
"""
Load the ARPABET ASCII IPA data from the built-in database.
""" |
ipa_canonical_string_to_ascii_str = dict()
for line in load_data_file(
file_path=self.DATA_FILE_PATH,
file_path_is_relative=True,
line_format=u"UA"
):
i_unicode, i_ascii = line
if (len(i_unicode) == 0) or (len(i_ascii) == 0):
raise ValueError("Data file '%s' contains a bad line: '%s'" % (self.DATA_FILE_PATH, line))
i_unicode = i_unicode[0]
i_ascii = i_ascii[0]
key = tuple([UNICODE_TO_IPA[c].canonical_representation for c in i_unicode])
ipa_canonical_string_to_ascii_str[key] = i_ascii
return ipa_canonical_string_to_ascii_str |
<SYSTEM_TASK:>
Return ``True`` if the given string is a Unicode string,
<END_TASK>
<USER_TASK:>
Description:
def is_unicode_string(string):
"""
Return ``True`` if the given string is a Unicode string,
that is, of type ``unicode`` in Python 2 or ``str`` in Python 3.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to be checked
:rtype: bool
""" |
if string is None:
return None
if PY2:
return isinstance(string, unicode)
return isinstance(string, str) |
<SYSTEM_TASK:>
Return a Unicode string out of the given string.
<END_TASK>
<USER_TASK:>
Description:
def to_unicode_string(string):
"""
Return a Unicode string out of the given string.
On Python 2, it calls ``unicode`` with ``utf-8`` encoding.
On Python 3, it just returns the given string.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to convert to Unicode
:rtype: (Unicode) str
""" |
if string is None:
return None
if is_unicode_string(string):
return string
# if reached here, string is a byte string
if PY2:
return unicode(string, encoding="utf-8")
return string.decode(encoding="utf-8") |
<SYSTEM_TASK:>
Return the Unicode character with the given codepoint,
<END_TASK>
<USER_TASK:>
Description:
def hex_to_unichr(hex_string):
"""
Return the Unicode character with the given codepoint,
given as an hexadecimal string.
Return ``None`` if ``hex_string`` is ``None`` or is empty.
Example::
"0061" => a
"U+0061" => a
:param str hex_string: the Unicode codepoint of the desired character
:rtype: (Unicode) str
""" |
if (hex_string is None) or (len(hex_string) < 1):
return None
if hex_string.startswith("U+"):
hex_string = hex_string[2:]
return int_to_unichr(int(hex_string, base=16)) |
<SYSTEM_TASK:>
Return a string containing the Unicode hexadecimal codepoint
<END_TASK>
<USER_TASK:>
Description:
def unicode_to_hex(unicode_string):
"""
Return a string containing the Unicode hexadecimal codepoint
of each Unicode character in the given Unicode string.
Return ``None`` if ``unicode_string`` is ``None``.
Example::
a => U+0061
ab => U+0061 U+0062
:param str unicode_string: the Unicode string to convert
:rtype: (Unicode) str
""" |
if unicode_string is None:
return None
acc = []
for c in unicode_string:
s = hex(ord(c)).replace("0x", "").upper()
acc.append("U+" + ("0" * (4 - len(s))) + s)
return u" ".join(acc) |
<SYSTEM_TASK:>
Convert a cleaned parsel.Selector to text.
<END_TASK>
<USER_TASK:>
Description:
def selector_to_text(sel, guess_punct_space=True, guess_layout=True):
""" Convert a cleaned parsel.Selector to text.
See html_text.extract_text docstring for description of the approach
and options.
""" |
import parsel
if isinstance(sel, parsel.SelectorList):
# if selecting a specific xpath
text = []
for s in sel:
extracted = etree_to_text(
s.root,
guess_punct_space=guess_punct_space,
guess_layout=guess_layout)
if extracted:
text.append(extracted)
return ' '.join(text)
else:
return etree_to_text(
sel.root,
guess_punct_space=guess_punct_space,
guess_layout=guess_layout) |
<SYSTEM_TASK:>
Clean parsel.selector.
<END_TASK>
<USER_TASK:>
Description:
def cleaned_selector(html):
""" Clean parsel.selector.
""" |
import parsel
try:
tree = _cleaned_html_tree(html)
sel = parsel.Selector(root=tree, type='html')
except (lxml.etree.XMLSyntaxError,
lxml.etree.ParseError,
lxml.etree.ParserError,
UnicodeEncodeError):
# likely plain text
sel = parsel.Selector(html)
return sel |
<SYSTEM_TASK:>
Convert html to text, cleaning invisible content such as styles.
<END_TASK>
<USER_TASK:>
Description:
def extract_text(html,
guess_punct_space=True,
guess_layout=True,
newline_tags=NEWLINE_TAGS,
double_newline_tags=DOUBLE_NEWLINE_TAGS):
"""
Convert html to text, cleaning invisible content such as styles.
Almost the same as normalize-space xpath, but this also
adds spaces between inline elements (like <span>) which are
often used as block elements in html markup, and adds appropriate
newlines to make output better formatted.
html should be a unicode string or an already parsed lxml.html element.
``html_text.etree_to_text`` is a lower-level function which only accepts
an already parsed lxml.html Element, and is not doing html cleaning itself.
When guess_punct_space is True (default), no extra whitespace is added
for punctuation. This has a slight (around 10%) performance overhead
and is just a heuristic.
When guess_layout is True (default), a newline is added
before and after ``newline_tags`` and two newlines are added before
and after ``double_newline_tags``. This heuristic makes the extracted
text more similar to how it is rendered in the browser.
Default newline and double newline tags can be found in
`html_text.NEWLINE_TAGS` and `html_text.DOUBLE_NEWLINE_TAGS`.
""" |
if html is None:
return ''
cleaned = _cleaned_html_tree(html)
return etree_to_text(
cleaned,
guess_punct_space=guess_punct_space,
guess_layout=guess_layout,
newline_tags=newline_tags,
double_newline_tags=double_newline_tags,
) |
<SYSTEM_TASK:>
Returns the standard JSON descriptor for the layer. There is a lot of
<END_TASK>
<USER_TASK:>
Description:
def get_descriptor_for_layer(self, layer):
"""
Returns the standard JSON descriptor for the layer. There is a lot of
usefule information in there.
""" |
if not layer in self._layer_descriptor_cache:
params = {'f': 'pjson'}
if self.token:
params['token'] = self.token
response = requests.get(self._build_request(layer), params=params)
self._layer_descriptor_cache[layer] = response.json()
return self._layer_descriptor_cache[layer] |
<SYSTEM_TASK:>
Pulls out all of the field names for a layer.
<END_TASK>
<USER_TASK:>
Description:
def enumerate_layer_fields(self, layer):
"""
Pulls out all of the field names for a layer.
""" |
descriptor = self.get_descriptor_for_layer(layer)
return [field['name'] for field in descriptor['fields']] |
<SYSTEM_TASK:>
Gets a layer and returns it as honest to God GeoJSON.
<END_TASK>
<USER_TASK:>
Description:
def get(self, layer, where="1 = 1", fields=[], count_only=False, srid='4326'):
"""
Gets a layer and returns it as honest to God GeoJSON.
WHERE 1 = 1 causes us to get everything. We use OBJECTID in the WHERE clause
to paginate, so don't use OBJECTID in your WHERE clause unless you're going to
query under 1000 objects.
""" |
base_where = where
# By default we grab all of the fields. Technically I think
# we can just do "*" for all fields, but I found this was buggy in
# the KMZ mode. I'd rather be explicit.
fields = fields or self.enumerate_layer_fields(layer)
jsobj = self.get_json(layer, where, fields, count_only, srid)
# Sometimes you just want to know how far there is to go.
if count_only:
return jsobj.get('count')
# If there is no geometry, we default to assuming it's a Table type
# data format, and we dump a simple (non-geo) json of all of the data.
if not jsobj.get('geometryType', None):
return self.getTable(layer, where, fields, jsobj=jsobj)
# From what I can tell, the entire layer tends to be of the same type,
# so we only have to determine the parsing function once.
geom_parser = self._determine_geom_parser(jsobj.get('geometryType'))
features = []
# We always want to run once, and then break out as soon as we stop
# getting exceededTransferLimit.
while True:
features += [self.esri_to_geojson(feat, geom_parser) for feat in jsobj.get('features')]
if jsobj.get('exceededTransferLimit', False) == False:
break
# If we've hit the transfer limit we offset by the last OBJECTID
# returned and keep moving along.
where = "%s > %s" % (self.object_id_field, features[-1]['properties'].get(self.object_id_field))
if base_where != "1 = 1" :
# If we have another WHERE filter we needed to tack that back on.
where += " AND %s" % base_where
jsobj = self.get_json(layer, where, fields, count_only, srid)
return {
'type': "FeatureCollection",
'features': features
} |
<SYSTEM_TASK:>
Get a bunch of layers and concatenate them together into one. This is useful if you
<END_TASK>
<USER_TASK:>
Description:
def getMultiple(self, layers, where="1 = 1", fields=[], srid='4326', layer_name_field=None):
"""
Get a bunch of layers and concatenate them together into one. This is useful if you
have a map with layers for, say, every year named stuff_2014, stuff_2013, stuff_2012. Etc.
Optionally, you can stuff the source layer name into a field of your choosing.
>>> arc.getMultiple([0, 3, 5], layer_name_field='layer_src_name')
""" |
features = []
for layer in layers:
get_fields = fields or self.enumerate_layer_fields(layer)
this_layer = self.get(layer, where, get_fields, False, srid).get('features')
if layer_name_field:
descriptor = self.get_descriptor_for_layer(layer)
layer_name = descriptor.get('name')
for feature in this_layer:
feature['properties'][layer_name_field] = layer_name
features += this_layer
return {
'type': "FeatureCollection",
'features': features
} |
<SYSTEM_TASK:>
Returns the version of the file and the file extension.
<END_TASK>
<USER_TASK:>
Description:
def wf_info(workflow_path):
"""
Returns the version of the file and the file extension.
Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local
files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to
enable our approach to version checking, then removed after version is extracted.
""" |
supported_formats = ['py', 'wdl', 'cwl']
file_type = workflow_path.lower().split('.')[-1] # Grab the file extension
workflow_path = workflow_path if ':' in workflow_path else 'file://' + workflow_path
if file_type in supported_formats:
if workflow_path.startswith('file://'):
version = get_version(file_type, workflow_path[7:])
elif workflow_path.startswith('https://') or workflow_path.startswith('http://'):
# If file not local go fetch it.
html = urlopen(workflow_path).read()
local_loc = os.path.join(os.getcwd(), 'fetchedFromRemote.' + file_type)
with open(local_loc, 'w') as f:
f.write(html.decode())
version = wf_info('file://' + local_loc)[0] # Don't take the file_type here, found it above.
os.remove(local_loc) # TODO: Find a way to avoid recreating file before version determination.
else:
raise NotImplementedError('Unsupported workflow file location: {}. Must be local or HTTP(S).'.format(workflow_path))
else:
raise TypeError('Unsupported workflow type: .{}. Must be {}.'.format(file_type, '.py, .cwl, or .wdl'))
return version, file_type.upper() |
<SYSTEM_TASK:>
Composes and sends a post request that signals the wes server to run a workflow.
<END_TASK>
<USER_TASK:>
Description:
def run(self, wf, jsonyaml, attachments):
"""
Composes and sends a post request that signals the wes server to run a workflow.
:param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file.
:param str jsonyaml: A local path to a json or yaml file.
:param list attachments: A list of local paths to files that will be uploaded to the server.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the post result as a dictionary.
""" |
attachments = list(expand_globs(attachments))
parts = build_wes_request(wf, jsonyaml, attachments)
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
files=parts,
headers=self.auth)
return wes_reponse(postresult) |
<SYSTEM_TASK:>
Cancel a running workflow.
<END_TASK>
<USER_TASK:>
Description:
def cancel(self, run_id):
"""
Cancel a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the delete result as a dictionary.
""" |
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult) |
<SYSTEM_TASK:>
Get detailed info about a running workflow.
<END_TASK>
<USER_TASK:>
Description:
def get_run_log(self, run_id):
"""
Get detailed info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
""" |
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult) |
<SYSTEM_TASK:>
Writes a cwl, wdl, or python file as appropriate from the request dictionary.
<END_TASK>
<USER_TASK:>
Description:
def write_workflow(self, request, opts, cwd, wftype='cwl'):
"""Writes a cwl, wdl, or python file as appropriate from the request dictionary.""" |
workflow_url = request.get("workflow_url")
# link the cwl and json into the cwd
if workflow_url.startswith('file://'):
os.link(workflow_url[7:], os.path.join(cwd, "wes_workflow." + wftype))
workflow_url = os.path.join(cwd, "wes_workflow." + wftype)
os.link(self.input_json, os.path.join(cwd, "wes_input.json"))
self.input_json = os.path.join(cwd, "wes_input.json")
extra_options = self.sort_toil_options(opts.getoptlist("extra"))
if wftype == 'cwl':
command_args = ['toil-cwl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'wdl':
command_args = ['toil-wdl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'py':
command_args = ['python'] + extra_options + [workflow_url]
else:
raise RuntimeError('workflow_type is not "cwl", "wdl", or "py": ' + str(wftype))
return command_args |
<SYSTEM_TASK:>
Calls a command with Popen.
<END_TASK>
<USER_TASK:>
Description:
def call_cmd(self, cmd, cwd):
"""
Calls a command with Popen.
Writes stdout, stderr, and the command to separate files.
:param cmd: A string or array of strings.
:param tempdir:
:return: The pid of the command.
""" |
with open(self.cmdfile, 'w') as f:
f.write(str(cmd))
stdout = open(self.outfile, 'w')
stderr = open(self.errfile, 'w')
logging.info('Calling: ' + ' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=stdout,
stderr=stderr,
close_fds=True,
cwd=cwd)
stdout.close()
stderr.close()
return process.pid |
<SYSTEM_TASK:>
Returns the first option value stored that matches p or default.
<END_TASK>
<USER_TASK:>
Description:
def getopt(self, p, default=None):
"""Returns the first option value stored that matches p or default.""" |
for k, v in self.pairs:
if k == p:
return v
return default |
<SYSTEM_TASK:>
Returns all option values stored that match p as a list.
<END_TASK>
<USER_TASK:>
Description:
def getoptlist(self, p):
"""Returns all option values stored that match p as a list.""" |
optlist = []
for k, v in self.pairs:
if k == p:
optlist.append(v)
return optlist |
<SYSTEM_TASK:>
Convert the file name to ASCII and normalize the string.
<END_TASK>
<USER_TASK:>
Description:
def _safe_name(file_name, sep):
"""Convert the file name to ASCII and normalize the string.""" |
file_name = stringify(file_name)
if file_name is None:
return
file_name = ascii_text(file_name)
file_name = category_replace(file_name, UNICODE_CATEGORIES)
file_name = collapse_spaces(file_name)
if file_name is None or not len(file_name):
return
return file_name.replace(WS, sep) |
<SYSTEM_TASK:>
Create a secure filename for plain file system storage.
<END_TASK>
<USER_TASK:>
Description:
def safe_filename(file_name, sep='_', default=None, extension=None):
"""Create a secure filename for plain file system storage.""" |
if file_name is None:
return decode_path(default)
file_name = decode_path(file_name)
file_name = os.path.basename(file_name)
file_name, _extension = os.path.splitext(file_name)
file_name = _safe_name(file_name, sep=sep)
if file_name is None:
return decode_path(default)
file_name = file_name[:MAX_LENGTH]
extension = _safe_name(extension or _extension, sep=sep)
if extension is not None:
file_name = '.'.join((file_name, extension))
file_name = file_name[:MAX_LENGTH]
return file_name |
<SYSTEM_TASK:>
Brute-force convert a given object to a string.
<END_TASK>
<USER_TASK:>
Description:
def stringify(value, encoding_default='utf-8', encoding=None):
"""Brute-force convert a given object to a string.
This will attempt an increasingly mean set of conversions to make a given
object into a unicode string. It is guaranteed to either return unicode or
None, if all conversions failed (or the value is indeed empty).
""" |
if value is None:
return None
if not isinstance(value, six.text_type):
if isinstance(value, (date, datetime)):
return value.isoformat()
elif isinstance(value, (float, Decimal)):
return Decimal(value).to_eng_string()
elif isinstance(value, six.binary_type):
if encoding is None:
encoding = guess_encoding(value, default=encoding_default)
value = value.decode(encoding, 'replace')
value = remove_byte_order_mark(value)
value = remove_unsafe_chars(value)
else:
value = six.text_type(value)
# XXX: is this really a good idea?
value = value.strip()
if not len(value):
return None
return value |
<SYSTEM_TASK:>
Guess string encoding.
<END_TASK>
<USER_TASK:>
Description:
def guess_encoding(text, default=DEFAULT_ENCODING):
"""Guess string encoding.
Given a piece of text, apply character encoding detection to
guess the appropriate encoding of the text.
""" |
result = chardet.detect(text)
return normalize_result(result, default=default) |
<SYSTEM_TASK:>
Guess encoding from a file handle.
<END_TASK>
<USER_TASK:>
Description:
def guess_file_encoding(fh, default=DEFAULT_ENCODING):
"""Guess encoding from a file handle.""" |
start = fh.tell()
detector = chardet.UniversalDetector()
while True:
data = fh.read(1024 * 10)
if not data:
detector.close()
break
detector.feed(data)
if detector.done:
break
fh.seek(start)
return normalize_result(detector.result, default=default) |
<SYSTEM_TASK:>
Wrapper to open that damn file for you, lazy bastard.
<END_TASK>
<USER_TASK:>
Description:
def guess_path_encoding(file_path, default=DEFAULT_ENCODING):
"""Wrapper to open that damn file for you, lazy bastard.""" |
with io.open(file_path, 'rb') as fh:
return guess_file_encoding(fh, default=default) |
<SYSTEM_TASK:>
Perform unicode compatibility decomposition.
<END_TASK>
<USER_TASK:>
Description:
def decompose_nfkd(text):
"""Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints.
""" |
if text is None:
return None
if not hasattr(decompose_nfkd, '_tr'):
decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD')
return decompose_nfkd._tr.transliterate(text) |
<SYSTEM_TASK:>
Remove characters from a string based on unicode classes.
<END_TASK>
<USER_TASK:>
Description:
def category_replace(text, replacements=UNICODE_CATEGORIES):
"""Remove characters from a string based on unicode classes.
This is a method for removing non-text characters (such as punctuation,
whitespace, marks and diacritics) from a piece of text by class, rather
than specifying them individually.
""" |
if text is None:
return None
characters = []
for character in decompose_nfkd(text):
cat = category(character)
replacement = replacements.get(cat, character)
if replacement is not None:
characters.append(replacement)
return u''.join(characters) |
<SYSTEM_TASK:>
Remove unsafe unicode characters from a piece of text.
<END_TASK>
<USER_TASK:>
Description:
def remove_unsafe_chars(text):
"""Remove unsafe unicode characters from a piece of text.""" |
if isinstance(text, six.string_types):
text = UNSAFE_RE.sub('', text)
return text |
<SYSTEM_TASK:>
Remove newlines, tabs and multiple spaces with single spaces.
<END_TASK>
<USER_TASK:>
Description:
def collapse_spaces(text):
"""Remove newlines, tabs and multiple spaces with single spaces.""" |
if not isinstance(text, six.string_types):
return text
return COLLAPSE_RE.sub(WS, text).strip(WS) |
<SYSTEM_TASK:>
The main normalization function for text.
<END_TASK>
<USER_TASK:>
Description:
def normalize(text, lowercase=True, collapse=True, latinize=False, ascii=False,
encoding_default='utf-8', encoding=None,
replace_categories=UNICODE_CATEGORIES):
"""The main normalization function for text.
This will take a string and apply a set of transformations to it so
that it can be processed more easily afterwards. Arguments:
* ``lowercase``: not very mysterious.
* ``collapse``: replace multiple whitespace-like characters with a
single whitespace. This is especially useful with category replacement
which can lead to a lot of whitespace.
* ``decompose``: apply a unicode normalization (NFKD) to separate
simple characters and their diacritics.
* ``replace_categories``: This will perform a replacement of whole
classes of unicode characters (e.g. symbols, marks, numbers) with a
given character. It is used to replace any non-text elements of the
input string.
""" |
text = stringify(text, encoding_default=encoding_default,
encoding=encoding)
if text is None:
return
if lowercase:
# Yeah I made a Python package for this.
text = text.lower()
if ascii:
# A stricter form of transliteration that leaves only ASCII
# characters.
text = ascii_text(text)
elif latinize:
# Perform unicode-based transliteration, e.g. of cyricllic
# or CJK scripts into latin.
text = latinize_text(text)
if text is None:
return
# Perform unicode category-based character replacement. This is
# used to filter out whole classes of characters, such as symbols,
# punctuation, or whitespace-like characters.
text = category_replace(text, replace_categories)
if collapse:
# Remove consecutive whitespace.
text = collapse_spaces(text)
return text |
<SYSTEM_TASK:>
Transliterate the given text to the latin script.
<END_TASK>
<USER_TASK:>
Description:
def latinize_text(text, ascii=False):
"""Transliterate the given text to the latin script.
This attempts to convert a given text to latin script using the
closest match of characters vis a vis the original script.
""" |
if text is None or not isinstance(text, six.string_types) or not len(text):
return text
if ascii:
if not hasattr(latinize_text, '_ascii'):
# Transform to latin, separate accents, decompose, remove
# symbols, compose, push to ASCII
latinize_text._ascii = Transliterator.createInstance('Any-Latin; NFKD; [:Symbol:] Remove; [:Nonspacing Mark:] Remove; NFKC; Accents-Any; Latin-ASCII') # noqa
return latinize_text._ascii.transliterate(text)
if not hasattr(latinize_text, '_tr'):
latinize_text._tr = Transliterator.createInstance('Any-Latin')
return latinize_text._tr.transliterate(text) |
<SYSTEM_TASK:>
Transliterate the given text and make sure it ends up as ASCII.
<END_TASK>
<USER_TASK:>
Description:
def ascii_text(text):
"""Transliterate the given text and make sure it ends up as ASCII.""" |
text = latinize_text(text, ascii=True)
if isinstance(text, six.text_type):
text = text.encode('ascii', 'ignore').decode('ascii')
return text |
<SYSTEM_TASK:>
Return default message for this element
<END_TASK>
<USER_TASK:>
Description:
def message(self):
"""Return default message for this element
""" |
if self.code != 200:
for code in self.response_codes:
if code.code == self.code:
return code.message
raise ValueError("Unknown response code \"%s\" in \"%s\"." % (self.code, self.name))
return "OK" |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.