text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_dot(docgraph):
""" converts a document graph into a dot file and returns it as a string. If this function call is prepended by %dotstr, it will display the given document graph as a dot/graphviz graph in the currently running IPython notebook session. To use this function, the gvmagic IPython notebook extension needs to be installed once:: %install_ext https://raw.github.com/cjdrake/ipython-magic/master/gvmagic.py In order to visualize dot graphs in your currently running IPython notebook, run this command once:: %load_ext gvmagic """
|
stripped_graph = preprocess_for_pydot(docgraph)
return nx.drawing.nx_pydot.to_pydot(stripped_graph).to_string()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def relabel_nodes(G, mapping, copy=True):
"""Relabel the nodes of the graph G. Parameters G : graph A NetworkX graph mapping : dictionary A dictionary with the old labels as keys and new labels as values. A partial mapping is allowed. copy : bool (optional, default=True) If True return a copy, or if False relabel the nodes in place. Examples -------- ['a', 'b', 'c'] Partial in-place mapping: print(G.nodes()) [2, 'b', 'a'] Mapping as function: [0, 1, 4] Notes ----- Only the nodes specified in the mapping will be relabeled. The keyword setting copy=False modifies the graph in place. This is not always possible if the mapping is circular. In that case use copy=True. See Also -------- convert_node_labels_to_integers """
|
# you can pass a function f(old_label)->new_label
# but we'll just make a dictionary here regardless
if not hasattr(mapping, "__getitem__"):
m = dict((n, mapping(n)) for n in G)
else:
m = mapping
if copy:
return _relabel_copy(G, m)
else:
return _relabel_inplace(G, m)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_node_labels_to_integers(G, first_label=0, ordering="default", label_attribute=None):
"""Return a copy of the graph G with the nodes relabeled with integers. Parameters G : graph A NetworkX graph first_label : int, optional (default=0) An integer specifying the offset in numbering nodes. ordering : string "default" : inherit node ordering from G.nodes() "sorted" : inherit node ordering from sorted(G.nodes()) "increasing degree" : nodes are sorted by increasing degree "decreasing degree" : nodes are sorted by decreasing degree label_attribute : string, optional (default=None) Name of node attribute to store old label. If None no attribute is created. Notes ----- Node and edge attribute data are copied to the new (relabeled) graph. See Also -------- relabel_nodes """
|
N = G.number_of_nodes() + first_label
if ordering == "default":
mapping = dict(zip(G.nodes(), range(first_label, N)))
elif ordering == "sorted":
nlist = G.nodes()
nlist.sort()
mapping = dict(zip(nlist, range(first_label, N)))
elif ordering == "increasing degree":
dv_pairs = [(d, n) for (n, d) in G.degree_iter()]
dv_pairs.sort() # in-place sort from lowest to highest degree
mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
elif ordering == "decreasing degree":
dv_pairs = [(d, n) for (n, d) in G.degree_iter()]
dv_pairs.sort() # in-place sort from lowest to highest degree
dv_pairs.reverse()
mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
else:
raise nx.NetworkXError('Unknown node ordering: {0}'.format(ordering))
H = relabel_nodes(G, mapping)
H.name = "(" + G.name + ")_with_int_labels"
# create node attribute with the old label
if label_attribute is not None:
nx.set_node_attributes(H, label_attribute,
dict((v, k) for k, v in mapping.items()))
return H
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, output_filepath):
""" serialize the ExmaraldaFile instance and write it to a file. Parameters output_filepath : str relative or absolute path to the Exmaralda file to be created """
|
with open(output_filepath, 'w') as out_file:
out_file.write(self.__str__())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __create_document_header(self):
""" Look, mum! XML generation without string concatenation!1!! This creates an empty, but functional header for an Exmaralda *.exb file. """
|
E = self.E
root = E('basic-transcription')
head = E('head')
meta = E('meta-information')
project = E('project-name')
tname = E('transcription-name')
ref_file = E('referenced-file', url="")
ud = E('ud-meta-information')
comment = E('comment')
tconvention = E('transcription-convention')
meta.append(project)
meta.append(tname)
meta.append(ref_file)
meta.append(ud)
meta.append(comment)
meta.append(tconvention)
speakers = E('speakertable')
head.append(meta)
head.append(speakers)
root.append(head)
return root
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __add_document_structure(self, docgraph, remove_redundant_layers=True):
"""return an Exmaralda XML etree representation a docgraph"""
|
E = self.E
root = self.__create_document_header()
body = E('basic-body')
timeline = E('common-timeline')
# for n tokens we need to create n+1 timeline indices
for i in xrange(len(docgraph.tokens)+1):
idx = str(i)
# example: <tli id="T0" time="0"/>
timeline.append(E('tli', {'id': 'T'+idx, 'time': idx}))
body.append(timeline)
body = self.__add_token_tiers(docgraph, body)
annotation_layers = get_annotation_layers(docgraph)
for layer in annotation_layers:
if not remove_redundant_layers: # add all layers
self.__add_annotation_tier(docgraph, body, layer)
elif is_informative(layer): # only add informative layers
self.__add_annotation_tier(docgraph, body, layer)
self.__add_coreference_chain_tiers(docgraph, body)
root.append(body)
return root
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __add_tokenization(self, tree):
"""adds a node for each token ID in the document"""
|
for token_id in self.get_token_ids(tree):
self.add_node(token_id, layers={self.ns})
self.tokens.append(token_id)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_token_annotation_tier(self, tier):
""" returns True, iff all events in the given tier annotate exactly one token. """
|
for i, event in enumerate(tier.iter('event')):
if self.indexdelta(event.attrib['end'], event.attrib['start']) != 1:
return False
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __add_token_annotation_tier(self, tier):
""" adds a tier to the document graph, in which each event annotates exactly one token. """
|
for i, event in enumerate(tier.iter('event')):
anno_key = '{0}:{1}'.format(self.ns, tier.attrib['category'])
anno_val = event.text if event.text else ''
self.node[event.attrib['start']][anno_key] = anno_val
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __add_span_tier(self, tier):
""" adds a tier to the document graph in which each event annotates a span of one or more tokens. """
|
tier_id = tier.attrib['id']
# add the tier's root node with an inbound edge from the document root
self.add_node(
tier_id, layers={self.ns, self.ns+':tier'},
attr_dict={self.ns+':category': tier.attrib['category'],
self.ns+':type': tier.attrib['type'],
self.ns+':display-name': tier.attrib['display-name']})
self.add_edge(self.root, tier_id, edge_type=EdgeTypes.dominance_relation)
# add a node for each span, containing an annotation.
# add an edge from the tier root to each span and an edge from each
# span to the tokens it represents
for i, event in enumerate(tier.iter('event')):
span_id = '{}_{}'.format(tier_id, i)
span_tokens = self.gen_token_range(event.attrib['start'], event.attrib['end'])
annotation = event.text if event.text else ''
self.add_node(
span_id, layers={self.ns, self.ns+':span'},
attr_dict={self.ns+':annotation': annotation,
'label': annotation})
self.add_edge(tier_id, span_id, edge_type=EdgeTypes.dominance_relation)
for token_id in span_tokens:
self.add_edge(span_id, token_id,
edge_type=EdgeTypes.spanning_relation)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_token_ids(tree):
""" returns a list of all token IDs occuring the the given exmaralda file, sorted by their time stamp in ascending order. """
|
def tok2time(token_element):
'''
extracts the time (float) of a <tli> element
(i.e. the absolute position of a token in the document)
'''
return float(token_element.attrib['time'])
timeline = tree.find('//common-timeline')
return (tok.attrib['id']
for tok in sorted((tli for tli in timeline.iterchildren()),
key=tok2time))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_token_mapping(docgraph_with_old_names, docgraph_with_new_names, verbose=False):
""" given two document graphs which annotate the same text and which use the same tokenization, creates a dictionary with a mapping from the token IDs used in the first graph to the token IDs used in the second graph. Parameters docgraph_with_old_names : DiscourseDocumentGraph a document graph with token IDs that will be replaced later on docgraph_with_new_names : DiscourseDocumentGraph a document graph with token IDs that will replace the token IDs used in ``docgraph_with_old_names`` later on Returns ------- old2new : dict maps from a token ID used in ``docgraph_with_old_names`` to the token ID used in ``docgraph_with_new_names`` to reference the same token """
|
def kwic_string(docgraph, keyword_index):
tokens = [tok for (tokid, tok) in list(docgraph.get_tokens())]
before, keyword, after = get_kwic(tokens, keyword_index)
return "{0} (Index: {1}): {2} [[{3}]] {4}\n".format(
docgraph.name, keyword_index, ' '.join(before), keyword,
' '.join(after))
# generators of (token ID, token) tuples
old_token_gen = docgraph_with_old_names.get_tokens()
new_token_gen = docgraph_with_new_names.get_tokens()
old2new = {}
for i, (new_tok_id, new_tok) in enumerate(new_token_gen):
old_tok_id, old_tok = old_token_gen.next()
if new_tok != old_tok: # token mismatch
if verbose:
raise ValueError(u"Tokenization mismatch:\n{0}{1}".format(
kwic_string(docgraph_with_old_names, i),
kwic_string(docgraph_with_new_names, i)))
raise ValueError(
u"Tokenization mismatch: {0} ({1}) vs. {2} ({3})\n"
"\t{4} != {5}".format(
docgraph_with_new_names.name, docgraph_with_new_names.ns,
docgraph_with_old_names.name, docgraph_with_old_names.ns,
new_tok, old_tok).encode('utf-8'))
else:
old2new[old_tok_id] = new_tok_id
return old2new
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_kwic(tokens, index, context_window=5):
""" keyword in context Parameters tokens : list of str a text represented as a list of tokens index : int the index of the keyword in the token list context_window : int the number of preceding/succeding words of the keyword to be retrieved Returns ------- before : list of str the tokens preceding the keyword keyword : str the token at the index position after : list of str the tokens succeding the keyword """
|
text_length = len(tokens)
start_before = max(0, index-context_window)
end_before = max(0, index)
before = tokens[start_before:end_before]
start_after = min(text_length, index+1)
end_after = min(text_length, index+context_window+1)
after = tokens[start_after:end_after]
return before, tokens[index], after
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_span_offsets(docgraph, node_id):
""" returns the character start and end position of the span of text that the given node spans or dominates. Returns ------- offsets : tuple(int, int) character onset and offset of the span """
|
try:
span = get_span(docgraph, node_id)
# workaround for issue #138
# TODO: when #138 is fixed, just take the first onset / last offset
onsets, offsets = zip(*[docgraph.get_offsets(tok_node)
for tok_node in span])
return (min(onsets), max(offsets))
except KeyError as _:
raise KeyError("Node '{}' doesn't span any tokens.".format(node_id))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_span(docgraph, node_id, debug=False):
""" returns all the tokens that are dominated or in a span relation with the given node. If debug is set to True, you'll get a warning if the graph is cyclic. Returns ------- span : list of str sorted list of token nodes (token node IDs) """
|
if debug is True and is_directed_acyclic_graph(docgraph) is False:
warnings.warn(
("Can't reliably extract span '{0}' from cyclical graph'{1}'."
"Maximum recursion depth may be exceeded.").format(node_id,
docgraph))
span = []
if docgraph.ns+':token' in docgraph.node[node_id]:
span.append(node_id)
for src_id, target_id, edge_attribs in docgraph.out_edges_iter(node_id,
data=True):
if src_id == target_id:
continue # ignore self-loops
# ignore pointing relations
if edge_attribs['edge_type'] != EdgeTypes.pointing_relation:
span.extend(get_span(docgraph, target_id))
return sorted(span, key=natural_sort_key)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def istoken(docgraph, node_id, namespace=None):
"""returns true, iff the given node ID belongs to a token node. Parameters node_id : str the node to be checked namespace : str or None If a namespace is given, only look for tokens in the given namespace. Otherwise, look for tokens in the default namespace of the given document graph. """
|
if namespace is None:
namespace = docgraph.ns
return namespace+':token' in docgraph.node[node_id]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_continuous(docgraph, dominating_node):
"""return True, if the tokens dominated by the given node are all adjacent"""
|
first_onset, last_offset = get_span_offsets(docgraph, dominating_node)
span_range = xrange(first_onset, last_offset+1)
token_offsets = (docgraph.get_offsets(tok)
for tok in get_span(docgraph, dominating_node))
char_positions = set(itertools.chain.from_iterable(xrange(on, off+1)
for on, off in token_offsets))
for item in span_range:
if item not in char_positions:
return False
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select_edges(docgraph, conditions, data):
"""yields all edges that meet the conditions given as eval strings"""
|
for (src_id, target_id, edge_attribs) in docgraph.edges(data=True):
# if all conditions are fulfilled
# we need to add edge_attribs to the namespace eval is working in
if all((eval(cond, {'edge_attribs': edge_attribs})
for cond in conditions)):
if data:
yield (src_id, target_id, edge_attribs)
else:
yield (src_id, target_id)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_offsets(self, offset_ns=None):
""" adds the onset and offset to each token in the document graph, i.e. the character position where each token starts and ends. """
|
if offset_ns is None:
offset_ns = self.ns
onset = 0
offset = 0
for token_id, token_str in self.get_tokens():
offset = onset + len(token_str)
self.node[token_id]['{0}:{1}'.format(offset_ns, 'onset')] = onset
self.node[token_id]['{0}:{1}'.format(offset_ns, 'offset')] = offset
onset = offset + 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_node(self, n, layers=None, attr_dict=None, **attr):
"""Add a single node n and update node attributes. Parameters n : node A node can be any hashable Python object except None. layers : set of str or None the set of layers the node belongs to, e.g. {'tiger:token', 'anaphoricity:annotation'}. Will be set to {self.ns} if None. attr_dict : dictionary, optional (default= no attributes) Dictionary of node attributes. Key/value pairs will update existing data associated with the node. attr : keyword arguments, optional Set or change attributes using key=value. See Also -------- add_nodes_from Examples -------- # adding the same node with a different layer [(1, {'layers': {'node', 'number'}})] Use keywords set/change node attributes: [(1, {'layers': {'node', 'number'}, 'size': 10}), (3, {'UTM': ('13S', 382), 'layers': {'num'}, 'weight': 0.4})] Notes ----- A hashable object is one that can be used as a key in a Python dictionary. This includes strings, numbers, tuples of strings and numbers, etc. On many platforms hashable items also include mutables such as NetworkX Graphs, though one should be careful that the hash doesn't change on mutables. """
|
if not layers:
layers = {self.ns}
assert isinstance(layers, set), \
"'layers' parameter must be given as a set of strings."
assert all((isinstance(layer, str) for layer in layers)), \
"All elements of the 'layers' set must be strings."
# add layers to keyword arguments dict
attr.update({'layers': layers})
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
assert isinstance(attr_dict, dict), \
"attr_dict must be a dictionary, not a '{}'".format(type(attr_dict))
attr_dict.update(attr)
# if there's no node with this ID in the graph, yet
if n not in self.succ:
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr_dict
else: # update attr even if node already exists
# if a node exists, its attributes will be updated, except
# for the layers attribute. the value of 'layers' will
# be the union of the existing layers set and the new one.
existing_layers = self.node[n]['layers']
all_layers = existing_layers.union(layers)
attrs_without_layers = {k: v for (k, v) in attr_dict.items()
if k != 'layers'}
self.node[n].update(attrs_without_layers)
self.node[n].update({'layers': all_layers})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_nodes_from(self, nodes, **attr):
"""Add multiple nodes. Parameters nodes : iterable container A container of nodes (list, dict, set, etc.). OR A container of (node, attribute dict) tuples. Node attributes are updated using the attribute dict. attr : keyword arguments, optional (default= no attributes) Update attributes for all nodes in nodes. Node attributes specified in nodes as a tuple take precedence over attributes specified generally. See Also -------- add_node Examples -------- (2, {'layers':{'token'}, 'word':'world'})]) [(1, {'layers': {'token'}, 'word': 'hello'}), (2, {'layers': {'token'}, 'word': 'world'})] Use keywords to update specific node attributes for every node. [(1, {'layers': {'token'}, 'weight': 1.0, 'word': 'hello'}), (2, {'layers': {'token'}, 'weight': 1.0, 'word': 'world'})] Use (node, attrdict) tuples to update attributes for specific nodes. [(1, {'layers': {'tiger', 'token'}, 'size': 10, 'weight': 1.0, 'word': 'hello'}), (2, {'layers': {'token'}, 'weight': 1.0, 'word': 'world'})] """
|
additional_attribs = attr # will be added to each node
for n in nodes:
try: # check, if n is a node_id or a (node_id, attrib dict) tuple
newnode = n not in self.succ # is node in the graph, yet?
except TypeError: # n is a (node_id, attribute dict) tuple
node_id, ndict = n
if not 'layers' in ndict:
ndict['layers'] = {self.ns}
layers = ndict['layers']
assert isinstance(layers, set), \
"'layers' must be specified as a set of strings."
assert all((isinstance(layer, str) for layer in layers)), \
"All elements of the 'layers' set must be strings."
if node_id not in self.succ: # node doesn't exist, yet
self.succ[node_id] = {}
self.pred[node_id] = {}
newdict = additional_attribs.copy()
newdict.update(ndict) # all given attribs incl. layers
self.node[node_id] = newdict
else: # node already exists
existing_layers = self.node[node_id]['layers']
all_layers = existing_layers.union(layers)
self.node[node_id].update(ndict)
self.node[node_id].update(additional_attribs)
self.node[node_id].update({'layers': all_layers})
continue # process next node
# newnode check didn't raise an exception
if newnode: # n is a node_id and it's not in the graph, yet
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr.copy()
# since the node isn't represented as a
# (node_id, attribute dict) tuple, we don't know which layers
# it is part of. Therefore, we'll add the namespace of the
# graph as the node layer
self.node[n].update({'layers': set([self.ns])})
else: # n is a node_id and it's already in the graph
self.node[n].update(attr)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_layer(self, element, layer):
""" add a layer to an existing node or edge Parameters element : str, int, (str/int, str/int) the ID of a node or edge (source node ID, target node ID) layer : str the layer that the element shall be added to """
|
assert isinstance(layer, str), "Layers must be strings!"
if isinstance(element, tuple): # edge repr. by (source, target)
assert len(element) == 2
assert all(isinstance(node, (str, int)) for node in element)
source_id, target_id = element
# this class is based on a multi-digraph, so we'll have to iterate
# over all edges between the two nodes (even if there's just one)
edges = self.edge[source_id][target_id]
for edge in edges:
existing_layers = edges[edge]['layers']
existing_layers.add(layer)
edges[edge]['layers'] = existing_layers
if isinstance(element, (str, int)): # node
existing_layers = self.node[element]['layers']
existing_layers.add(layer)
self.node[element]['layers'] = existing_layers
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_token(self, token_node_id, token_attrib='token'):
""" given a token node ID, returns the token unicode string. Parameters token_node_id : str the ID of the token node token_attrib : str name of the node attribute that contains the token string as its value (default: token). Returns ------- token : unicode the token string """
|
return self.node[token_node_id][self.ns+':'+token_attrib]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_rootnodes(self, other_docgraph):
""" Copy all the metadata from the root node of the other graph into this one. Then, move all edges belonging to the other root node to this one. Finally, remove the root node of the other graph from this one. """
|
# copy metadata from other graph, cf. #136
if 'metadata' in other_docgraph.node[other_docgraph.root]:
other_meta = other_docgraph.node[other_docgraph.root]['metadata']
self.node[self.root]['metadata'].update(other_meta)
assert not other_docgraph.in_edges(other_docgraph.root), \
"root node in graph '{}' must not have any ingoing edges".format(
other_docgraph.name)
for (root, target, attrs) in other_docgraph.out_edges(
other_docgraph.root, data=True):
self.add_edge(self.root, target, attr_dict=attrs)
self.remove_node(other_docgraph.root)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def paula_etree_to_string(tree, dtd_filename):
"""convert a PAULA etree into an XML string."""
|
return etree.tostring(
tree, pretty_print=True, xml_declaration=True,
encoding="UTF-8", standalone='no',
doctype='<!DOCTYPE paula SYSTEM "{0}">'.format(dtd_filename))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gen_paula_etree(paula_id):
""" creates an element tree representation of an empty PAULA XML file. """
|
E = ElementMaker(nsmap=NSMAP)
tree = E('paula', version='1.1')
tree.append(E('header', paula_id=paula_id))
return E, tree
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_paula(docgraph, output_root_dir, human_readable=False):
""" converts a DiscourseDocumentGraph into a set of PAULA XML files representing the same document. Parameters docgraph : DiscourseDocumentGraph the document graph to be converted """
|
paula_document = PaulaDocument(docgraph, human_readable=human_readable)
error_msg = ("Please specify an output directory.\nPaula documents consist"
" of multiple files, so we can't just pipe them to STDOUT.")
assert isinstance(output_root_dir, str), error_msg
document_dir = os.path.join(output_root_dir, paula_document.name)
if not os.path.isdir(document_dir):
create_dir(document_dir)
for paula_id in paula_document.files:
with open(os.path.join(document_dir, paula_id+'.xml'), 'w') as outfile:
outfile.write(
paula_etree_to_string(paula_document.files[paula_id],
paula_document.file2dtd[paula_id]))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __make_xpointer_compatible(self):
""" ensure that all node and IDs in the document graph are valid xpointer IDs. this will relabel all node IDs in place in the discourse graph and change its ``.tokens`` list accordingly. """
|
node_id_map = {node: ensure_xpointer_compatibility(node)
for node in self.dg.nodes_iter()}
old_token_ids = self.dg.tokens
# replace document graph with node relabeled version
self.dg = relabel_nodes(self.dg, node_id_map, copy=True)
self.dg.tokens = [node_id_map[tok] for tok in old_token_ids]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_etree(cls, etree_element):
""" creates a ``SaltLabel`` from an etree element representing a label element in a SaltXMI file. A label element in SaltXMI looks like this:: <labels xsi:type="saltCore:SFeature" namespace="salt" name="SNAME" value="ACED0005740007735370616E3139" valueString="sSpan19"/> Parameters etree_element : lxml.etree._Element an etree element parsed from a SaltXMI document """
|
return cls(name=etree_element.attrib['name'],
value=etree_element.attrib['valueString'],
xsi_type=get_xsi_type(etree_element),
namespace=etree_element.attrib.get('namespace'),
hexvalue=etree_element.attrib['value'])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_brackets(docgraph, output_file, layer='mmax'):
""" converts a document graph into a plain text file with brackets. Parameters layer : str or None The layer from which the pointing chains/relations (i.e. coreference relations) should be extracted. If no layer is selected, all pointing relations will be considered. (This might lead to errors, e.g. when the document contains Tiger syntax trees with secondary edges.) """
|
bracketed_str = gen_bracketed_output(docgraph, layer=layer)
assert isinstance(output_file, (str, file))
if isinstance(output_file, str):
path_to_file = os.path.dirname(output_file)
if not os.path.isdir(path_to_file):
create_dir(path_to_file)
with codecs.open(output_file, 'w', 'utf-8') as outfile:
outfile.write(bracketed_str)
else: # output_file is a file object
output_file.write(bracketed_str)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def node2geoff(node_name, properties, encoder):
"""converts a NetworkX node into a Geoff string. Parameters node_name : str or int the ID of a NetworkX node properties : dict a dictionary of node attributes encoder : json.JSONEncoder an instance of a JSON encoder (e.g. `json.JSONEncoder`) Returns ------- geoff : str a Geoff string """
|
if properties:
return '({0} {1})'.format(node_name,
encoder.encode(properties))
else:
return '({0})'.format(node_name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edge2geoff(from_node, to_node, properties, edge_relationship_name, encoder):
"""converts a NetworkX edge into a Geoff string. Parameters from_node : str or int the ID of a NetworkX source node to_node : str or int the ID of a NetworkX target node properties : dict a dictionary of edge attributes edge_relationship_name : str string that describes the relationship between the two nodes encoder : json.JSONEncoder an instance of a JSON encoder (e.g. `json.JSONEncoder`) Returns ------- geoff : str a Geoff string """
|
edge_string = None
if properties:
args = [from_node, edge_relationship_name,
encoder.encode(properties), to_node]
edge_string = '({0})-[:{1} {2}]->({3})'.format(*args)
else:
args = [from_node, edge_relationship_name, to_node]
edge_string = '({0})-[:{1}]->({2})'.format(*args)
return edge_string
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_edu_text(text_subtree):
"""return the text of the given EDU subtree"""
|
assert text_subtree.label() == SubtreeType.text
return u' '.join(word.decode('utf-8') for word in text_subtree.leaves())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_edus_to_tree(parented_tree, edus):
"""replace EDU indices with the text of the EDUs in a parented tree. Parameters parented_tree : nltk.ParentedTree a parented tree that only contains EDU indices as leaves edus : list(list(unicode)) a list of EDUs, where each EDU is represented as a list of tokens """
|
for i, child in enumerate(parented_tree):
if isinstance(child, nltk.Tree):
_add_edus_to_tree(child, edus)
else:
edu_index = int(child)
edu_tokens = edus[edu_index]
parented_tree[i] = u" ".join(edu_tokens)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute(self, controller_id, command, *args, **kwargs):
""" Execute a single command, and sets sleep times properly. - controller_id = index of controller, zero-based - command is normal LedController command as a string - *args and **kwargs are passed to command For example, .execute(0, "on", 1) sends "on" command to group 1 on controller 0 (first IP passed to constructor). """
|
controller_instance = self.controllers[controller_id]
controller_instance.last_command_at = self.last_command_at
ret_val = getattr(controller_instance, command)(*args, **kwargs)
self.last_command_at = controller_instance.last_command_at
return ret_val
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_group_type(self, group, bulb_type):
""" Set bulb type for specified group. Group must be int between 1 and 4. Type must be "rgbw" or "white". Alternatively, use constructor keywords group_1, group_2 etc. to set bulb types. """
|
if bulb_type not in ("rgbw", "white"):
raise AttributeError("Bulb type must be either rgbw or white")
self.group[group] = bulb_type
self.has_white = "white" in self.group.values()
self.has_rgbw = "rgbw" in self.group.values()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_brightness_level(cls, percent):
""" Convert percents to bulbs internal range. percent should be integer from 0 to 100. Return value is 2 (minimum brightness) - 27 (maximum brightness) """
|
# Clamp to appropriate range.
percent = min(100, max(0, percent))
# Map 0-100 to 2-27
value = int(2 + ((float(percent) / 100) * 25))
return percent, value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_brightness(self, percent, group=None):
""" Set brightness. Percent is int between 0 (minimum brightness) and 100 (maximum brightness), or float between 0.0 (minimum brightness) and 1.0 (maximum brightness). See also .nightmode(). If group (1-4) is not specified, brightness of all four groups will be adjusted. """
|
# If input is float, assume it is percent value from 0 to 1.
if isinstance(percent, float):
if percent > 1:
percent = int(percent)
else:
percent = int(percent * 100)
percent, value = self.get_brightness_level(percent)
self.on(group)
self._send_command((b"\x4e", struct.pack("B", value)))
return percent
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batch_run(self, *commands):
""" Run batch of commands in sequence. Input is positional arguments with (function pointer, *args) tuples. This method is useful for executing commands to multiple groups with retries, without having too long delays. For example, - Set group 1 to red and brightness to 10% - Set group 2 to red and brightness to 10% - Set group 3 to white and brightness to 100% - Turn off group 4 With three repeats, running these consecutively takes approximately 100ms * 13 commands * 3 times = 3.9 seconds. With batch_run, execution takes same time, but first loop - each command is sent once to every group - is finished within 1.3 seconds. After that, each command is repeated two times. Most of the time, this ensures slightly faster changes for each group. Usage: """
|
original_retries = self.repeat_commands
self.repeat_commands = 1
for _ in range(original_retries):
for command in commands:
cmd = command[0]
args = command[1:]
cmd(*args)
self.repeat_commands = original_retries
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send(self, msg_dict):
"""Send a message through the websocket client and wait for the answer if the message being sent contains an id attribute."""
|
message = ejson.dumps(msg_dict)
super(DDPSocket, self).send(message)
self._debug_log('<<<{}'.format(message))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _debug_log(self, msg):
"""Debug log messages if debug=True"""
|
if not self.debug:
return
sys.stderr.write('{}\n'.format(msg))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_socket(self):
"""Initialize the ddp socket"""
|
# destroy the connection if it already exists
if self.ddpsocket:
self.ddpsocket.remove_all_listeners('received_message')
self.ddpsocket.remove_all_listeners('closed')
self.ddpsocket.remove_all_listeners('opened')
self.ddpsocket.close_connection()
self.ddpsocket = None
# create a ddp socket and subscribe to events
self.ddpsocket = DDPSocket(self.url, self.debug)
self.ddpsocket.on('received_message', self.received_message)
self.ddpsocket.on('closed', self.closed)
self.ddpsocket.on('opened', self.opened)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _recover_network_failure(self):
"""Recover from a network failure"""
|
if self.auto_reconnect and not self._is_closing:
connected = False
while not connected:
log_msg = "* ATTEMPTING RECONNECT"
if self._retry_new_version:
log_msg = "* RETRYING DIFFERENT DDP VERSION"
self.ddpsocket._debug_log(log_msg)
time.sleep(self.auto_reconnect_timeout)
self._init_socket()
try:
self.connect()
connected = True
if self._retry_new_version:
self._retry_new_version = False
else:
self._is_reconnecting = True
except (socket.error, WebSocketException):
pass
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def opened(self):
"""Send the connect message to the server."""
|
# give up if there are no more ddp versions to try
if self._ddp_version_index == len(DDP_VERSIONS):
self.ddpsocket._debug_log('* DDP VERSION MISMATCH')
self.emit('version_mismatch', DDP_VERSIONS)
return
# use server recommended version if we support it
if self._retry_new_version in DDP_VERSIONS:
self._ddp_version_index = [i for i, x in enumerate(DDP_VERSIONS)
if x == self._retry_new_version][0]
connect_msg = {
"msg": "connect",
"version": DDP_VERSIONS[self._ddp_version_index],
"support": DDP_VERSIONS
}
# if we've already got a session token then reconnect
if self._session:
connect_msg["session"] = self._session
self.send(connect_msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def closed(self, code, reason=None):
"""Called when the connection is closed"""
|
self.emit('socket_closed', code, reason)
self._recover_network_failure()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call(self, method, params, callback=None):
"""Call a method on the server Arguments: method - the remote server method params - an array of commands to send to the method Keyword Arguments: callback - a callback function containing the return data"""
|
cur_id = self._next_id()
if callback:
self._callbacks[cur_id] = callback
self.send({'msg': 'method', 'id': cur_id, 'method': method, 'params': params})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_edus(merge_file_str):
"""Extract EDUs from DPLPs .merge output files. Returns ------- edus : dict from EDU IDs (int) to words (list(str)) """
|
lines = merge_file_str.splitlines()
edus = defaultdict(list)
for line in lines:
if line.strip(): # ignore empty lines
token = line.split('\t')[2]
edu_id = int(line.split('\t')[9])
edus[edu_id].append(token)
return edus
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dplptree2dgparentedtree(self):
"""Convert the tree from DPLP's format into a conventional binary tree, which can be easily converted into output formats like RS3. """
|
def transform(dplp_tree):
"""Transform a DPLP parse tree into a more conventional parse tree."""
if isinstance(dplp_tree, basestring) or not hasattr(dplp_tree, 'label'):
return dplp_tree
assert len(dplp_tree) == 2, "We can only handle binary trees."
match = DPLP_REL_RE.match(dplp_tree.label())
assert match, "Relation '{}' does not match regex '{}'".format(dplp_tree.label(), DPLP_REL_RE)
left_child_nuc, right_child_nuc, relname = match.groups()
dplp_tree._label = relname
for i, child_nuclearity in enumerate([left_child_nuc, right_child_nuc]):
child = dplp_tree[i]
dplp_tree[i] = Tree(child_nuclearity, [transform(child)])
return dplp_tree
tree = transform(self.parsetree)
return DGParentedTree.convert(tree)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_terminals_and_nonterminals(sentence_graph):
""" Given a TigerSentenceGraph, returns a sorted list of terminal node IDs, as well as a sorted list of nonterminal node IDs. Parameters sentence_graph : TigerSentenceGraph a directed graph representing one syntax annotated sentence from a TigerXML file Returns ------- terminals, nonterminals : list of str a sorted list of terminal node IDs and a sorted list of nonterminal node IDs """
|
terminals = set()
nonterminals = set()
for node_id in sentence_graph.nodes_iter():
if sentence_graph.out_degree(node_id) > 0:
# all nonterminals (incl. root)
nonterminals.add(node_id)
else: # terminals
terminals.add(node_id)
return sorted(list(terminals), key=natural_sort_key), \
sorted(list(nonterminals), key=natural_sort_key)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_unconnected_nodes(sentence_graph):
""" Takes a TigerSentenceGraph and returns a list of node IDs of unconnected nodes. A node is unconnected, if it doesn't have any in- or outgoing edges. A node is NOT considered unconnected, if the graph only consists of that particular node. Parameters sentence_graph : TigerSentenceGraph a directed graph representing one syntax annotated sentence from a TigerXML file Returns ------- unconnected_node_ids : list of str a list of node IDs of unconnected nodes """
|
return [node for node in sentence_graph.nodes_iter()
if sentence_graph.degree(node) == 0 and
sentence_graph.number_of_nodes() > 1]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_subordinate_clauses(tiger_docgraph):
""" given a document graph of a TIGER syntax tree, return all node IDs of nodes representing subordinate clause constituents. Parameters tiger_docgraph : DiscourseDocumentGraph or TigerDocumentGraph document graph from which subordinate clauses will be extracted Returns ------- subord_clause_nodes : list(str) list of node IDs of nodes directly dominating subordinate clauses """
|
subord_clause_rels = \
dg.select_edges_by_attribute(
tiger_docgraph, attribute='tiger:label',
value=['MO', 'RC', 'SB'])
subord_clause_nodes = []
for src_id, target_id in subord_clause_rels:
src_cat = tiger_docgraph.node[src_id].get('tiger:cat')
if src_cat == 'S' and not dg.istoken(tiger_docgraph, target_id):
subord_clause_nodes.append(target_id)
return subord_clause_nodes
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_token_to_document(self, token_string, token_attrs=None):
"""add a token node to this document graph"""
|
token_feat = {self.ns+':token': token_string}
if token_attrs:
token_attrs.update(token_feat)
else:
token_attrs = token_feat
token_id = 'token_{}'.format(self.token_count)
self.add_node(token_id, layers={self.ns, self.ns+':token'},
attr_dict=token_attrs)
self.token_count += 1
self.tokens.append(token_id)
return token_id
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_dominance_relation(self, source, target):
"""add a dominance relation to this docgraph"""
|
# TODO: fix #39, so we don't need to add nodes by hand
self.add_node(target, layers={self.ns, self.ns+':unit'})
self.add_edge(source, target,
layers={self.ns, self.ns+':discourse'},
edge_type=EdgeTypes.dominance_relation)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_spanning_relation(self, source, target):
"""add a spanning relation to this docgraph"""
|
self.add_edge(source, target, layers={self.ns, self.ns+':unit'},
edge_type=EdgeTypes.spanning_relation)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(data):
""" Query data and result data must have keys who's values are strings. """
|
if not isinstance(data, dict):
error('Data must be a dictionary.')
for value in data.values():
if not isinstance(value, basestring):
error('Values must be strings.')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def terraform_external_data(function):
""" Query data is received on stdin as a JSON object. Result data must be returned on stdout as a JSON object. The wrapped function must expect its first positional argument to be a dictionary of the query data. """
|
@wraps(function)
def wrapper(*args, **kwargs):
query = json.loads(sys.stdin.read())
validate(query)
try:
result = function(query, *args, **kwargs)
except Exception as e:
# Terraform wants one-line errors so we catch all exceptions and trim down to just the message (no trace).
error('{}: {}'.format(type(e).__name__, e))
validate(result)
sys.stdout.write(json.dumps(result))
return wrapper
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def n_wrap(tree, debug=False, root_id=None):
"""Ensure the given tree has a nucleus as its root. If the root of the tree is a nucleus, return it. If the root of the tree is a satellite, replace the satellite with a nucleus and return the tree. If the root of the tree is a relation, place a nucleus on top and return the tree. """
|
root_label = tree.label()
expected_n_root = debug_root_label('N', debug=debug, root_id=tree.root_id)
expected_s_root = debug_root_label('S', debug=debug, root_id=tree.root_id)
if root_label == expected_n_root:
return tree
elif root_label == expected_s_root:
tree.set_label(expected_n_root)
return tree
else:
return t('N', [tree], debug=debug, root_id=root_id)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_relations(dgtree, relations=None):
"""Extracts relations from a DGParentedTree. Given a DGParentedTree, returns a (relation name, relation type) dict of all the RST relations occurring in that tree. """
|
if hasattr(dgtree, 'reltypes'):
# dgtree is an RSTTree or a DisTree that contains a DGParentedTree
return dgtree.reltypes
if relations is None:
relations = {}
if is_leaf(dgtree):
return relations
root_label = dgtree.label()
if root_label == '':
assert dgtree == DGParentedTree('', []), \
"The tree has no root label, but isn't empty: {}".format(dgtree)
return relations
elif root_label in NUCLEARITY_LABELS:
for child in dgtree:
relations.update(extract_relations(child, relations))
else: # dgtree is a 'relation' node
child_labels = [child.label() for child in dgtree]
assert all(label in NUCLEARITY_LABELS for label in child_labels)
if 'S' in child_labels:
relations[root_label] = 'rst'
else:
relations[root_label] = 'multinuc'
for child in dgtree:
relations.update(extract_relations(child, relations))
return relations
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def elem_wrap(self, tree, debug=False, root_id=None):
"""takes a DGParentedTree and puts a nucleus or satellite on top, depending on the nuclearity of the root element of the tree. """
|
if root_id is None:
root_id = tree.root_id
elem = self.elem_dict[root_id]
if elem['nuclearity'] == 'nucleus':
return n_wrap(tree, debug=debug, root_id=root_id)
else:
return s_wrap(tree, debug=debug, root_id=root_id)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def release():
"Cut a new release"
version = run('python setup.py --version').stdout.strip()
assert version, 'No version found in setup.py?'
print('### Releasing new version: {0}'.format(version))
run('git tag {0}'.format(version))
run('git push --tags')
run('python setup.py sdist bdist_wheel')
run('twine upload -s dist/*')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def bgwrite(fileObj, data, closeWhenFinished=False, chainAfter=None, ioPrio=4):
'''
bgwrite - Start a background writing process
@param fileObj <stream> - A stream backed by an fd
@param data <str/bytes/list> - The data to write. If a list is given, each successive element will be written to the fileObj and flushed. If a string/bytes is provided, it will be chunked according to the #BackgroundIOPriority chosen. If you would like a different chunking than the chosen ioPrio provides, use #bgwrite_chunk function instead.
Chunking makes the data available quicker on the other side, reduces iowait on this side, and thus increases interactivity (at penalty of throughput).
@param closeWhenFinished <bool> - If True, the given fileObj will be closed after all the data has been written. Default False.
@param chainAfter <None/BackgroundWriteProcess> - If a BackgroundWriteProcess object is provided (the return of bgwrite* functions), this data will be held for writing until the data associated with the provided object has completed writing.
Use this to queue several background writes, but retain order within the resulting stream.
@return - BackgroundWriteProcess - An object representing the state of this operation. @see BackgroundWriteProcess
'''
thread = BackgroundWriteProcess(fileObj, data, closeWhenFinished, chainAfter, ioPrio)
thread.start()
return thread
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def run(self):
'''
run - Starts the thread. bgwrite and bgwrite_chunk automatically start the thread.
'''
# If we are chaining after another process, wait for it to complete.
# We use a flag here instead of joining the thread for various reasons
chainAfter = self.chainAfter
if chainAfter is not None:
chainPollTime = self.backgroundIOPriority.chainPollTime
while chainAfter.finished is False:
time.sleep(chainPollTime)
# Pull class data into locals
fileObj = self.fileObj
bandwidthPct = self.backgroundIOPriority.bandwidthPct
bandwidthPctDec = bandwidthPct / 100.0
# Number of blocks, total
numBlocks = len(self.remainingData)
# Bytes written
dataWritten = 0
# Mark that we have started writing data
self.startedWriting = True
# Create a conditional lambda for flushing. I'd rather just only support flushable streams, but
# some unfortunatly just aren't. This should be cheaper than testing with hasattr at each iteration
if hasattr(fileObj, 'flush'):
doFlush = lambda obj : obj.flush()
else:
doFlush = lambda obj : 1
# numChunksRateSmoothing - How often we stop for a short bit to be gracious to other running tasks.
# float for division below
numChunksRateSmoothing = float(self.backgroundIOPriority.numChunksRateSmoothing)
# i will be the counter from 1 to numChunksRateSmoothing, and then reset
i = 1
# We start with using max bandwidth until we hit #numChunksRateSmoothing , at which case we recalculate
# sleepTime. We sleep after every block written to maintain a desired average throughput based on
# bandwidthPct
sleepTime = 0
# Before represents the "start" time. When we sleep, we will increment this value
# such that [ delta = (after - before) ] only accounts for time we've spent writing,
# not in charity.
before = time.time()
# timeSlept - Amount of time slept, which must be subtracted from total time spend
# to get an accurate picture of throughput.
timeSlept = 0
# firstPass - Mark the first pass through, so we can get a rough calculation
# of speed from the first write, and recalculate after #numChunksRateSmoothing
firstPass = True
if bandwidthPct == 100:
shouldRecalculate = lambda i, numChunksRateSmoothing, firstPass : False
else:
shouldRecalculate = lambda i, numChunksRateSmoothing, firstPass : firstPass or i == numChunksRateSmoothing
while len(self.remainingData) > 0:
# pop, write, flush
nextData = self.remainingData.popleft()
fileObj.write(nextData)
doFlush(fileObj)
dataWritten += len(nextData)
if sleepTime:
sleepBefore = time.time()
time.sleep(sleepTime)
sleepAfter = time.time()
timeSlept += (sleepAfter - sleepBefore)
if shouldRecalculate(i, numChunksRateSmoothing, firstPass) is True:
# if not sleeptime, we are on first
# We've completed a full period, time for charity
after = time.time()
delta = after - before - timeSlept
rate = dataWritten / delta
# if DEBUG is True:
# sys.stdout.write('\t I have written %d bytes in %3.3f seconds and slept %3.3f sec (%4.5f M/s over %3.3fs)\n' %(dataWritten, delta, timeSlept, (rate) / (1024*1024), delta + timeSlept ))
# sys.stdout.flush()
# Calculate how much time we should give up on each block to other tasks
sleepTime = delta * (1.00 - bandwidthPctDec)
sleepTime /= numChunksRateSmoothing
# if DEBUG is True:
# sys.stdout.write('Calculated new sleepTime to be: %f\n' %(sleepTime,))
timeSlept = 0
before = time.time()
i = 0
# elif DEBUG is True and i == numChunksRateSmoothing:
# # When bandwidth pct is 100 (prio=1), the above DEBUG will never be hit.
# after = time.time()
#
# delta = after - before - timeSlept
#
# rate = dataWritten / delta
#
# sys.stdout.write('\t I have written %d bytes in %3.3f seconds and slept %3.3f sec (%4.5f M/s over %3.3fs)\n' %(dataWritten, delta, timeSlept, (rate) / (1024*1024), delta + timeSlept ))
# sys.stdout.flush()
#
# timeSlept = 0
# before = time.time()
# i = 0
firstPass = False
i += 1
if self.closeWhenFinished is True:
fileObj.close()
self.finished = True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_sorted_counter(counter, tab=1):
"""print all elements of a counter in descending order"""
|
for key, count in sorted(counter.items(), key=itemgetter(1), reverse=True):
print "{0}{1} - {2}".format('\t'*tab, key, count)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_most_common(counter, number=5, tab=1):
"""print the most common elements of a counter"""
|
for key, count in counter.most_common(number):
print "{0}{1} - {2}".format('\t'*tab, key, count)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def info(docgraph):
"""print node and edge statistics of a document graph"""
|
print networkx.info(docgraph), '\n'
node_statistics(docgraph)
print
edge_statistics(docgraph)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sum_cycles_from_tokens(self, tokens: List[str]) -> int: """Sum the total number of cycles over a list of tokens."""
|
return sum((int(self._nonnumber_pattern.sub('', t)) for t in tokens))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def template_cycles(self) -> int: """The number of cycles dedicated to template."""
|
return sum((int(re.sub(r'\D', '', op)) for op in self.template_tokens))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def skip_cycles(self) -> int: """The number of cycles dedicated to skips."""
|
return sum((int(re.sub(r'\D', '', op)) for op in self.skip_tokens))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def umi_cycles(self) -> int: """The number of cycles dedicated to UMI."""
|
return sum((int(re.sub(r'\D', '', op)) for op in self.umi_tokens))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def total_cycles(self) -> int: """The number of total number of cycles in the structure."""
|
return sum((int(re.sub(r'\D', '', op)) for op in self.tokens))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def experimental_design(self) -> Any: """Return a markdown summary of the samples on this sample sheet. This property supports displaying rendered markdown only when running within an IPython interpreter. If we are not running in an IPython interpreter, then print out a nicely formatted ASCII table. Returns: Markdown, str: A visual table of IDs and names for all samples. """
|
if not self.samples:
raise ValueError('No samples in sample sheet')
markdown = tabulate(
[[getattr(s, h, '') for h in DESIGN_HEADER] for s in self.samples],
headers=DESIGN_HEADER,
tablefmt='pipe',
)
return maybe_render_markdown(markdown)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _repr_tty_(self) -> str: """Return a summary of this sample sheet in a TTY compatible codec."""
|
header_description = ['Sample_ID', 'Description']
header_samples = [
'Sample_ID',
'Sample_Name',
'Library_ID',
'index',
'index2',
]
header = SingleTable([], 'Header')
setting = SingleTable([], 'Settings')
sample_main = SingleTable([header_samples], 'Identifiers')
sample_desc = SingleTable([header_description], 'Descriptions')
# All key:value pairs found in the [Header] section.
max_header_width = max(MIN_WIDTH, sample_desc.column_max_width(-1))
for key in self.Header.keys():
if 'Description' in key:
value = '\n'.join(
wrap(getattr(self.Header, key), max_header_width)
)
else:
value = getattr(self.Header, key)
header.table_data.append([key, value])
# All key:value pairs found in the [Settings] and [Reads] sections.
for key in self.Settings.keys():
setting.table_data.append((key, getattr(self.Settings, key) or ''))
setting.table_data.append(('Reads', ', '.join(map(str, self.Reads))))
# Descriptions are wrapped to the allowable space remaining.
description_width = max(MIN_WIDTH, sample_desc.column_max_width(-1))
for sample in self.samples:
# Add all key:value pairs for this sample
sample_main.table_data.append(
[getattr(sample, title) or '' for title in header_samples]
)
# Wrap and add the sample descrption
sample_desc.table_data.append(
(
sample.Sample_ID,
'\n'.join(
wrap(sample.Description or '', description_width)
),
)
)
# These tables do not have horizontal headers so remove the frame.
header.inner_heading_row_border = False
setting.inner_heading_row_border = False
table = '\n'.join(
[header.table, setting.table, sample_main.table, sample_desc.table]
)
return table
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_prep_value(self, value):
"""Converts timezone instances to strings for db storage."""
|
# pylint: disable=newstyle
value = super(TimeZoneField, self).get_prep_value(value)
if isinstance(value, tzinfo):
return value.zone
return value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_python(self, value):
"""Returns a datetime.tzinfo instance for the value."""
|
# pylint: disable=newstyle
value = super(TimeZoneField, self).to_python(value)
if not value:
return value
try:
return pytz.timezone(str(value))
except pytz.UnknownTimeZoneError:
raise ValidationError(
message=self.error_messages['invalid'],
code='invalid',
params={'value': value}
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def formfield(self, **kwargs):
"""Returns a custom form field for the TimeZoneField."""
|
defaults = {'form_class': forms.TimeZoneField}
defaults.update(**kwargs)
return super(TimeZoneField, self).formfield(**defaults)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check(self, **kwargs):
# pragma: no cover """Calls the TimeZoneField's custom checks."""
|
errors = super(TimeZoneField, self).check(**kwargs)
errors.extend(self._check_timezone_max_length_attribute())
errors.extend(self._check_choices_attribute())
return errors
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_timezone_max_length_attribute(self):
# pragma: no cover """ Checks that the `max_length` attribute covers all possible pytz timezone lengths. """
|
# Retrieve the maximum possible length for the time zone string
possible_max_length = max(map(len, pytz.all_timezones))
# Make sure that the max_length attribute will handle the longest time
# zone string
if self.max_length < possible_max_length: # pragma: no cover
return [
checks.Error(
msg=(
"'max_length' is too short to support all possible "
"pytz time zones."
),
hint=(
"pytz {version}'s longest time zone string has a "
"length of {value}, although it is recommended that "
"you leave room for longer time zone strings to be "
"added in the future.".format(
version=pytz.VERSION,
value=possible_max_length
)
),
obj=self,
)
]
# When no error, return an empty list
return []
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_choices_attribute(self):
# pragma: no cover """Checks to make sure that choices contains valid timezone choices."""
|
if self.choices:
warning_params = {
'msg': (
"'choices' contains an invalid time zone value '{value}' "
"which was not found as a supported time zone by pytz "
"{version}."
),
'hint': "Values must be found in pytz.all_timezones.",
'obj': self,
}
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key in map(lambda x: x[0], option_value):
if optgroup_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if optgroup_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=optgroup_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
elif option_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if option_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=option_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
# When no error, return an empty list
return []
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_python(self, value):
"""Convert the value to the appropriate timezone."""
|
# pylint: disable=newstyle
value = super(LinkedTZDateTimeField, self).to_python(value)
if not value:
return value
return value.astimezone(self.timezone)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pre_save(self, model_instance, add):
""" Converts the value being saved based on `populate_from` and `time_override` """
|
# pylint: disable=newstyle
# Retrieve the currently entered datetime
value = super(
LinkedTZDateTimeField,
self
).pre_save(
model_instance=model_instance,
add=add
)
# Convert the value to the correct time/timezone
value = self._convert_value(
value=value,
model_instance=model_instance,
add=add
)
setattr(model_instance, self.attname, value)
return value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deconstruct(self):
# pragma: no cover """Add our custom keyword arguments for migrations."""
|
# pylint: disable=newstyle
name, path, args, kwargs = super(
LinkedTZDateTimeField,
self
).deconstruct()
# Only include kwarg if it's not the default
if self.populate_from is not None:
# Since populate_from requires a model instance and Django does
# not allow lambda, we hope that we have been provided a
# function that can be parsed
kwargs['populate_from'] = self.populate_from
# Only include kwarg if it's not the default
if self.time_override is not None:
if hasattr(self.time_override, '__call__'):
# Call the callable datetime.time instance
kwargs['time_override'] = self.time_override()
else:
kwargs['time_override'] = self.time_override
return name, path, args, kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_populate_from(self, model_instance):
""" Retrieves the timezone or None from the `populate_from` attribute. """
|
if hasattr(self.populate_from, '__call__'):
tz = self.populate_from(model_instance)
else:
from_attr = getattr(model_instance, self.populate_from)
tz = callable(from_attr) and from_attr() or from_attr
try:
tz = pytz.timezone(str(tz))
except pytz.UnknownTimeZoneError:
# It was a valiant effort. Resistance is futile.
raise
# If we have a timezone, set the instance's timezone attribute
self.timezone = tz
return tz
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_time_override(self):
""" Retrieves the datetime.time or None from the `time_override` attribute. """
|
if callable(self.time_override):
time_override = self.time_override()
else:
time_override = self.time_override
if not isinstance(time_override, datetime_time):
raise ValueError(
'Invalid type. Must be a datetime.time instance.'
)
return time_override
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _convert_value(self, value, model_instance, add):
""" Converts the value to the appropriate timezone and time as declared by the `time_override` and `populate_from` attributes. """
|
if not value:
return value
# Retrieve the default timezone as the default
tz = get_default_timezone()
# If populate_from exists, override the default timezone
if self.populate_from is not None:
tz = self._get_populate_from(model_instance)
if is_naive(value):
value = make_aware(value=value, timezone=tz)
# Convert the value to a datetime object in the correct timezone. This
# insures that we will have the correct date if we are performing a
# time override below.
value = value.astimezone(tz)
# Do not convert the time to the time override if auto_now or
# auto_now_add is set
if self.time_override is not None and not (
self.auto_now or (self.auto_now_add and add)
):
# Retrieve the time override
time_override = self._get_time_override()
# Convert the value to the date/time with the appropriate timezone
value = make_aware(
value=datetime.combine(
date=value.date(),
time=time_override
),
timezone=tz
)
return value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_multinuc(relname, nucleii):
"""Creates a rst.sty Latex string representation of a multi-nuclear RST relation."""
|
nuc_strings = []
for nucleus in nucleii:
nuc_strings.append( MULTINUC_ELEMENT_TEMPLATE.substitute(nucleus=nucleus) )
nucleii_string = "\n\t" + "\n\t".join(nuc_strings)
return MULTINUC_TEMPLATE.substitute(relation=relname, nucleus_segments=nucleii_string)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_multisat(nucsat_tuples):
"""Creates a rst.sty Latex string representation of a multi-satellite RST subtree (i.e. a set of nucleus-satellite relations that share the same nucleus. """
|
nucsat_tuples = [tup for tup in nucsat_tuples] # unpack the iterable, so we can check its length
assert len(nucsat_tuples) > 1, \
"A multisat relation bundle must contain more than one relation"
result = "\dirrel\n\t"
first_relation, remaining_relations = nucsat_tuples[0], nucsat_tuples[1:]
relname, nuc_types, elements = first_relation
first_nucleus_pos = current_nucleus_pos = nuc_types.index('N')
result_segments = []
for i, nuc_type in enumerate(nuc_types):
if nuc_type == 'N':
result_segments.append(NUC_TEMPLATE.substitute(nucleus=elements[i]))
else:
result_segments.append(SAT_TEMPLATE.substitute(satellite=elements[i], relation=relname))
for (relname, nuc_types, elements) in remaining_relations:
for i, nuc_type in enumerate(nuc_types):
if nuc_type == 'N': # all relations share the same nucleus, so we don't need to reprocess it.
continue
else:
result_segment = SAT_TEMPLATE.substitute(satellite=elements[i], relation=relname)
if i < first_nucleus_pos: # satellite comes before the nucleus
result_segments.insert(current_nucleus_pos, result_segment)
current_nucleus_pos += 1
else:
result_segments.append(result_segment)
return result + '\n\t'.join(result_segments)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def indent(text, amount, ch=' '):
"""Indents a string by the given amount of characters."""
|
padding = amount * ch
return ''.join(padding+line for line in text.splitlines(True))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def document_ids(self):
"""returns a list of document IDs used in the PCC"""
|
matches = [PCC_DOCID_RE.match(os.path.basename(fname))
for fname in pcc.tokenization]
return sorted(match.groups()[0] for match in matches)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_document(self, doc_id):
""" given a document ID, returns a merged document graph containng all available annotation layers. """
|
layer_graphs = []
for layer_name in self.layers:
layer_files, read_function = self.layers[layer_name]
for layer_file in layer_files:
if fnmatch.fnmatch(layer_file, '*{}.*'.format(doc_id)):
layer_graphs.append(read_function(layer_file))
if not layer_graphs:
raise TypeError("There are no files with that document ID.")
else:
doc_graph = layer_graphs[0]
for layer_graph in layer_graphs[1:]:
doc_graph.merge_graphs(layer_graph)
return doc_graph
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_files_by_layer(self, layer_name, file_pattern='*'):
""" returns a list of all files with the given filename pattern in the given PCC annotation layer """
|
layer_path = os.path.join(self.path, layer_name)
return list(dg.find_files(layer_path, file_pattern))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def maybe_render_markdown(string: str) -> Any: """Render a string as Markdown only if in an IPython interpreter."""
|
if is_ipython_interpreter(): # pragma: no cover
from IPython.display import Markdown # type: ignore # noqa: E501
return Markdown(string)
else:
return string
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generic_converter_cli(docgraph_class, file_descriptor=''):
""" generic command line interface for importers. Will convert the file specified on the command line into a dot representation of the corresponding DiscourseDocumentGraph and write the output to stdout or a file specified on the command line. Parameters docgraph_class : class a DiscourseDocumentGraph (or a class derived from it), not an instance of it! file_descriptor : str string descring the input format, e.g. 'TigerXML (syntax)' """
|
parser = argparse.ArgumentParser()
parser.add_argument('input_file',
help='{} file to be converted'.format(file_descriptor))
parser.add_argument('output_file', nargs='?', default=sys.stdout)
args = parser.parse_args(sys.argv[1:])
assert os.path.isfile(args.input_file), \
"'{}' isn't a file".format(args.input_file)
docgraph = docgraph_class(args.input_file)
write_dot(docgraph, args.output_file)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_sensor_memory(self, cb_compress=False, custom_compress=False, custom_compress_file=None, auto_collect_result=False):
"""Customized function for dumping sensor memory. :arguments cb_compress: If True, use CarbonBlack's built-in compression. :arguments custom_compress_file: Supply path to lr_tools/compress_file.bat to fork powershell compression :collect_mem_file: If True, wait for memdump + and compression to complete, then use cbapi to collect """
|
print("~ dumping contents of memory on {}".format(self.sensor.computer_name))
local_file = remote_file = "{}.memdmp".format(self.sensor.computer_name)
if not self.lr_session:
self.go_live()
try:
if cb_compress and auto_collect_result:
logging.info("CB compression and auto-collection set")
self.lr_session.memdump(remote_filename=remote_file, compress=cb_compress)
return True
dump_object = self.lr_session.start_memdump(remote_filename=remote_file, compress=cb_compress)
dump_object.wait()
if cb_compress:
print("+ Memory dump compressed at -> C:\windows\carbonblack\{}.zip".format(remote_file))
if auto_collect_result:
self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}.zip".format(remote_file))
return True
print("+ Memory dump complete on host -> C:\windows\carbonblack\{}".format(remote_file))
except LiveResponseError as e:
raise Exception("LiveResponseError: {}".format(e))
if custom_compress: # compress with powershell?
if not os.path.exists(custom_compress_file):
logging.debug("{} not found.".format(custom_compress_file))
HOME_DIR = os.path.abspath(os.path.join(os.path.realpath(__file__),'..','..'))
custom_compress_file = os.path.join(HOME_DIR, 'lr_tools', 'compress_file.bat')
if not os.path.exists(custom_compress_file):
logging.error("{} not found.".format(custom_compress_file))
return False
logging.info("Using {}".format(custom_compress_file))
bat_filename = custom_compress_file[custom_compress_file.rfind('/')+1:]
filedata = None
with open(custom_compress_file, 'rb') as f:
filedata = f.read()
try:
self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename)
except LiveResponseError as e:
if 'ERROR_FILE_EXISTS' not in str(e):
logging.error("Error puting compress_file.bat")
return False
else:
self.lr_session.delete_file("C:\\Windows\\CarbonBlack\\" + bat_filename)
self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename)
print("~ Launching "+ bat_filename +" to create C:\\windows\\carbonblack\\_memdump.zip")
compress_cmd = "C:\\Windows\\CarbonBlack\\" + bat_filename + " " + remote_file
self.lr_session.create_process(compress_cmd, wait_for_output=False, wait_for_completion=False)
if auto_collect_result:
print("~ waiting for {} to complete.".format(bat_filename))
self.wait_for_process_to_finish(bat_filename)
self.getFile_with_timeout("C:\\windows\\carbonblack\\_memdump.zip")
print("[!] If compression successful, _memdump.zip will exist, and {} should be deleted.".format(remote_file))
# here, they didn't want to use cb or custom compression, but they did want to auto collect results
if auto_collect_result:
self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}".format(remote_file))
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_process_memory(self, pid, working_dir="c:\\windows\\carbonblack\\", path_to_procdump=None):
"""Use sysinternals procdump to dump process memory on a specific process. If only the pid is specified, the default behavior is to use the version of ProcDump supplied with cbinterface's pip3 installer. :requires: SysInternals ProcDump v9.0 included with cbinterface==1.1.0 :arguments pid: Process id to dump memory for :arguments working_dir: Specify a directoy on the windows sensor to work out of. Default: C:\\Windows\\CarbonBlack\\ :arguments path_to_procdump: Specify the path to a version of procdump you want to use. Default is included copy """
|
self.go_live()
print("~ dumping memory where pid={} for {}".format(pid, self.sensor.computer_name))
# need to make sure procdump.exe is on the sensor
procdump_host_path = None
dir_output = self.lr_session.list_directory(working_dir)
for dir_item in dir_output:
if dir_item['filename'] == 'procdump.exe':
logging.info("procdump.exe already on host.")
procdump_host_path = working_dir + "procdump.exe"
break
else:
logging.info("Dropping procdump.exe on host.")
if not procdump_host_path:
if not os.path.exists(path_to_procdump):
HOME_DIR = os.path.abspath(os.path.join(os.path.realpath(__file__),'..','..'))
path_to_procdump = os.path.join(HOME_DIR, 'lr_tools', 'procdump.exe')
if not os.path.exists(path_to_procdump):
logging.warn("{} not found".format(path_to_procdump))
return False
print("~ dropping procdump.exe on host.")
filedata = None
with open(path_to_procdump, 'rb') as f:
filedata = f.read()
try:
self.lr_session.create_directory(working_dir)
except LiveResponseError:
logging.debug("working directory already exists")
self.lr_session.put_file(filedata, working_dir + "procdump.exe")
procdump_host_path = working_dir + "procdump.exe"
print("~ Executing procdump..")
command_str = procdump_host_path + " -accepteula -ma " + str(pid)
result = self.lr_session.create_process(command_str)
time.sleep(1)
print("+ procdump output:\n-------------------------")
result = result.decode('utf-8')
print(result + "\n-------------------------")
# cut off the carriage return and line feed from filename
dumpfile_name = result[result.rfind('\\')+1:result.rfind('.dmp')+4]
while True:
if 'procdump.exe' not in str(self.lr_session.list_processes()):
break
else:
time.sleep(1)
# download dumpfile to localdir
self.getFile_with_timeout(working_dir + dumpfile_name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_relationtypes(urml_xml_tree):
""" extracts the allowed RST relation names and relation types from an URML XML file. Parameters urml_xml_tree : lxml.etree._ElementTree lxml ElementTree representation of an URML XML file Returns ------- relations : dict of (str, str) Returns a dictionary with RST relation names as keys (str) and relation types (either 'par' or 'hyp') as values (str). """
|
return {rel.attrib['name']: rel.attrib['type']
for rel in urml_xml_tree.iterfind('//header/reltypes/rel')
if 'type' in rel.attrib}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filters(self):
"""List of filters available for the dataset."""
|
if self._filters is None:
self._filters, self._attributes = self._fetch_configuration()
return self._filters
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default_attributes(self):
"""List of default attributes for the dataset."""
|
if self._default_attributes is None:
self._default_attributes = {
name: attr
for name, attr in self.attributes.items()
if attr.default is True
}
return self._default_attributes
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_attributes(self):
"""Lists available attributes in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available attributes. """
|
def _row_gen(attributes):
for attr in attributes.values():
yield (attr.name, attr.display_name, attr.description)
return pd.DataFrame.from_records(
_row_gen(self.attributes),
columns=['name', 'display_name', 'description'])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_filters(self):
"""Lists available filters in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available filters. """
|
def _row_gen(attributes):
for attr in attributes.values():
yield (attr.name, attr.type, attr.description)
return pd.DataFrame.from_records(
_row_gen(self.filters), columns=['name', 'type', 'description'])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query(self, attributes=None, filters=None, only_unique=True, use_attr_names=False, dtypes = None ):
"""Queries the dataset to retrieve the contained data. Args: attributes (list[str]):
Names of attributes to fetch in query. Attribute names must correspond to valid attributes. See the attributes property for a list of valid attributes. filters (dict[str,any]):
Dictionary of filters --> values to filter the dataset by. Filter names and values must correspond to valid filters and filter values. See the filters property for a list of valid filters. only_unique (bool):
Whether to return only rows containing unique values (True) or to include duplicate rows (False). use_attr_names (bool):
Whether to use the attribute names as column names in the result (True) or the attribute display names (False). dtypes (dict[str,any]):
Dictionary of attributes --> data types to describe to pandas how the columns should be handled Returns: pandas.DataFrame: DataFrame containing the query results. """
|
# Example query from Ensembl biomart:
#
# <?xml version="1.0" encoding="UTF-8"?>
# <!DOCTYPE Query>
# <Query virtualSchemaName = "default" formatter = "TSV" header = "0"
# uniqueRows = "0" count = "" datasetConfigVersion = "0.6" >
# <Dataset name = "hsapiens_gene_ensembl" interface = "default" >
# <Filter name = "chromosome_name" value = "1,2"/>
# <Filter name = "end" value = "10000000"/>
# <Filter name = "start" value = "1"/>
# <Attribute name = "ensembl_gene_id" />
# <Attribute name = "ensembl_transcript_id" />
# </Dataset>
# </Query>
# Setup query element.
root = ElementTree.Element('Query')
root.set('virtualSchemaName', self._virtual_schema)
root.set('formatter', 'TSV')
root.set('header', '1')
root.set('uniqueRows', native_str(int(only_unique)))
root.set('datasetConfigVersion', '0.6')
# Add dataset element.
dataset = ElementTree.SubElement(root, 'Dataset')
dataset.set('name', self.name)
dataset.set('interface', 'default')
# Default to default attributes if none requested.
if attributes is None:
attributes = list(self.default_attributes.keys())
# Add attribute elements.
for name in attributes:
try:
attr = self.attributes[name]
self._add_attr_node(dataset, attr)
except KeyError:
raise BiomartException(
'Unknown attribute {}, check dataset attributes '
'for a list of valid attributes.'.format(name))
if filters is not None:
# Add filter elements.
for name, value in filters.items():
try:
filter_ = self.filters[name]
self._add_filter_node(dataset, filter_, value)
except KeyError:
raise BiomartException(
'Unknown filter {}, check dataset filters '
'for a list of valid filters.'.format(name))
# Fetch response.
response = self.get(query=ElementTree.tostring(root))
# Raise exception if an error occurred.
if 'Query ERROR' in response.text:
raise BiomartException(response.text)
# Parse results into a DataFrame.
try:
result = pd.read_csv(StringIO(response.text), sep='\t', dtype=dtypes)
# Type error is raised of a data type is not understood by pandas
except TypeError as err:
raise ValueError("Non valid data type is used in dtypes")
if use_attr_names:
# Rename columns with attribute names instead of display names.
column_map = {
self.attributes[attr].display_name: attr
for attr in attributes
}
result.rename(columns=column_map, inplace=True)
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.