text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns lists of files which are needed by this device in order
<END_TASK>
<USER_TASK:>
Description:
def need(self, folder, page=None, perpage=None):
""" Returns lists of files which are needed by this device in order
for it to become in sync.
Args:
folder (str):
page (int): If defined applies pagination accross the
collection of results.
perpage (int): If defined applies pagination across the
collection of results.
Returns:
dict
""" |
assert isinstance(page, int) or page is None
assert isinstance(perpage, int) or perpage is None
return self.get('need', params={'folder': folder,
'page': page,
'perpage': perpage}) |
<SYSTEM_TASK:>
Request immediate rescan of a folder, or a specific path within a
<END_TASK>
<USER_TASK:>
Description:
def scan(self, folder, sub=None, next_=None):
""" Request immediate rescan of a folder, or a specific path within a
folder.
Args:
folder (str): Folder ID.
sub (str): Path relative to the folder root. If sub is omitted
the entire folder is scanned for changes, otherwise only
the given path children are scanned.
next_ (int): Delays Syncthing's automated rescan interval for
a given amount of seconds.
Returns:
str
""" |
if not sub:
sub = ''
assert isinstance(sub, string_types)
assert isinstance(next_, int) or next_ is None
return self.post('scan', params={'folder': folder,
'sub': sub,
'next': next_}) |
<SYSTEM_TASK:>
A long-polling method that queries Syncthing for events..
<END_TASK>
<USER_TASK:>
Description:
def _events(self, using_url, filters=None, limit=None):
""" A long-polling method that queries Syncthing for events..
Args:
using_url (str): REST HTTP endpoint
filters (List[str]): Creates an "event group" in Syncthing to
only receive events that have been subscribed to.
limit (int): The number of events to query in the history
to catch up to the current state.
Returns:
generator[dict]
""" |
# coerce
if not isinstance(limit, (int, NoneType)):
limit = None
# coerce
if filters is None:
filters = []
# format our list into the correct expectation of string with commas
if isinstance(filters, string_types):
filters = filters.split(',')
# reset the state if the loop was broken with `stop`
if not self.blocking:
self.blocking = True
# block/long-poll for updates to the events api
while self.blocking:
params = {
'since': self._last_seen_id,
'limit': limit,
}
if filters:
params['events'] = ','.join(map(str, filters))
try:
data = self.get(using_url, params=params, raw_exceptions=True)
except (ConnectTimeout, ConnectionError) as e:
# swallow timeout errors for long polling
data = None
except Exception as e:
reraise('', e)
if data:
# update our last_seen_id to move our event counter forward
self._last_seen_id = data[-1]['id']
for event in data:
# handle potentially multiple events returned in a list
self._count += 1
yield event |
<SYSTEM_TASK:>
Return tree positions of all leaves of a subtree.
<END_TASK>
<USER_TASK:>
Description:
def subtree_leaf_positions(subtree):
"""Return tree positions of all leaves of a subtree.""" |
relative_leaf_positions = subtree.treepositions('leaves')
subtree_root_pos = subtree.treeposition()
absolute_leaf_positions = []
for rel_leaf_pos in relative_leaf_positions:
absolute_leaf_positions.append( subtree_root_pos + rel_leaf_pos)
return absolute_leaf_positions |
<SYSTEM_TASK:>
Return a string representation of the tree in .dis format.
<END_TASK>
<USER_TASK:>
Description:
def to_dis_format(self):
"""Return a string representation of the tree in .dis format.""" |
dis_raw_str = self.disfiletree.pformat()
return re.sub('_!(.*?)_!', join_lines, dis_raw_str, flags=re.DOTALL) |
<SYSTEM_TASK:>
takes a document graph, converts it into GraphML format and writes it to
<END_TASK>
<USER_TASK:>
Description:
def write_graphml(docgraph, output_file):
"""
takes a document graph, converts it into GraphML format and writes it to
a file.
""" |
dg_copy = deepcopy(docgraph)
layerset2str(dg_copy)
attriblist2str(dg_copy)
remove_root_metadata(dg_copy)
nx_write_graphml(dg_copy, output_file) |
<SYSTEM_TASK:>
Add a discourse relation to the document graph.
<END_TASK>
<USER_TASK:>
Description:
def add_discrel(self, discrel):
"""
Add a discourse relation to the document graph.
Parameters
----------
add_discrel : etree.Element
etree representation of a <discRel> element which describes the
relation between two EDUs.
The ID of the other EDU is given in the arg2 attribute.
Note, that arg2 can either reference an EDU (e.g. edu_9_3_2
or an EDU range, e.g. edus9_3_1-5_0).
Example
-------
<edu xml:id="edu_9_3_0">
<discRel relation="Explanation-Speechact" marking="-" arg2="edus9_3_1-5_0"/>
<node xml:id="s128_504" cat="SIMPX" func="--">
...
</node>
<word xml:id="s128_3" form=":" pos="$." lemma=":" func="--" deprel="ROOT"/>
</edu>
<edu xml:id="edu_9_3_1">
<discRel relation="Continuation" marking="-" arg2="edu_9_3_2"/>
<node xml:id="s128_506" cat="VF" func="-" parent="s128_525">
...
</node>
...
</edu>
""" |
if self.ignore_relations is False:
arg1_id = self.get_element_id(discrel)
arg2_id = discrel.attrib['arg2']
reltype = discrel.attrib['relation']
discrel_attribs = self.element_attribs_to_dict(discrel)
self.node[arg1_id].update(discrel_attribs)
self.add_layer(arg1_id, self.ns+':discourse')
self.add_layer(arg1_id, self.ns+':relation')
self.add_edge(arg1_id, arg2_id,
layers={self.ns, self.ns+':discourse', self.ns+':relation'},
edge_type=dg.EdgeTypes.pointing_relation,
relation=reltype,
label='discourse:'+reltype) |
<SYSTEM_TASK:>
returns the ID of the parent of the given element
<END_TASK>
<USER_TASK:>
Description:
def get_parent_id(element):
"""returns the ID of the parent of the given element""" |
if 'parent' in element.attrib:
return element.attrib['parent']
else:
return element.getparent().attrib[add_ns('id')] |
<SYSTEM_TASK:>
returns the ID of the sentence the given element belongs to.
<END_TASK>
<USER_TASK:>
Description:
def get_sentence_id(self, element):
"""returns the ID of the sentence the given element belongs to.""" |
try:
sentence_elem = element.iterancestors('sentence').next()
except StopIteration as e:
warnings.warn("<{}> element is not a descendant of a <sentence> "
"We'll try to extract the sentence ID from the "
"prefix of the element ID".format(element.tag))
return self.get_element_id(element).split('_')[0]
return self.get_element_id(sentence_elem) |
<SYSTEM_TASK:>
Returns the type of the root node of a DGParentedTree.
<END_TASK>
<USER_TASK:>
Description:
def get_node_type(dgtree):
"""Returns the type of the root node of a DGParentedTree.""" |
if is_leaf(dgtree):
return TreeNodeTypes.leaf_node
root_label = dgtree.label()
if root_label == '':
assert dgtree == DGParentedTree('', []), \
"The tree has no root label, but isn't empty: {}".format(dgtree)
return TreeNodeTypes.empty_tree
elif root_label in NUCLEARITY_LABELS:
return TreeNodeTypes.nuclearity_node
else:
assert isinstance(dgtree, (RSTTree, DGParentedTree)), type(dgtree)
return TreeNodeTypes.relation_node |
<SYSTEM_TASK:>
Given a treeposition, return the treepositions of its children.
<END_TASK>
<USER_TASK:>
Description:
def get_children_treepos(self, treepos):
"""Given a treeposition, return the treepositions of its children.""" |
children_treepos = []
for i, child in enumerate(self.dgtree[treepos]):
if isinstance(child, nltk.Tree):
children_treepos.append(child.treeposition())
elif is_leaf(child):
# we can't call .treeposition() on a leaf node
treepos_list = list(treepos)
treepos_list.append(i)
leaf_treepos = tuple(treepos_list)
children_treepos.append(leaf_treepos)
return children_treepos |
<SYSTEM_TASK:>
Given a treeposition, return the treepositions of its siblings.
<END_TASK>
<USER_TASK:>
Description:
def get_siblings_treepos(self, treepos):
"""Given a treeposition, return the treepositions of its siblings.""" |
parent_pos = self.get_parent_treepos(treepos)
siblings_treepos = []
if parent_pos is not None:
for child_treepos in self.get_children_treepos(parent_pos):
if child_treepos != treepos:
siblings_treepos.append(child_treepos)
return siblings_treepos |
<SYSTEM_TASK:>
Given a treeposition, return the treeposition of its siblings.
<END_TASK>
<USER_TASK:>
Description:
def get_cousins_treepos(self, treepos):
"""Given a treeposition, return the treeposition of its siblings.""" |
cousins_pos = []
mother_pos = self.get_parent_treepos(treepos)
if mother_pos is not None:
aunts_pos = self.get_siblings_treepos(mother_pos)
for aunt_pos in aunts_pos:
cousins_pos.extend( self.get_children_treepos(aunt_pos) )
return cousins_pos |
<SYSTEM_TASK:>
Given the treeposition of a node, return the label of its parent.
<END_TASK>
<USER_TASK:>
Description:
def get_parent_label(self, treepos):
"""Given the treeposition of a node, return the label of its parent.
Returns None, if the tree has no parent.
""" |
parent_pos = self.get_parent_treepos(treepos)
if parent_pos is not None:
parent = self.dgtree[parent_pos]
return parent.label()
else:
return None |
<SYSTEM_TASK:>
Given the treeposition of a node, return the labels of its children.
<END_TASK>
<USER_TASK:>
Description:
def get_children_labels(self, treepos):
"""Given the treeposition of a node, return the labels of its children.""" |
children_labels = []
node = self.dgtree[treepos]
for child in node:
if is_leaf(child):
# we can't call .label() on a leaf node
children_labels.append(child)
else:
children_labels.append(child.label())
return children_labels |
<SYSTEM_TASK:>
returns a key that can be used in sort functions.
<END_TASK>
<USER_TASK:>
Description:
def natural_sort_key(s):
"""
returns a key that can be used in sort functions.
Example:
>>> items = ['A99', 'a1', 'a2', 'a10', 'a24', 'a12', 'a100']
The normal sort function will ignore the natural order of the
integers in the string:
>>> print sorted(items)
['A99', 'a1', 'a10', 'a100', 'a12', 'a2', 'a24']
When we use this function as a key to the sort function,
the natural order of the integer is considered.
>>> print sorted(items, key=natural_sort_key)
['A99', 'a1', 'a2', 'a10', 'a12', 'a24', 'a100']
""" |
return [int(text) if text.isdigit() else text
for text in re.split(INTEGER_RE, str(s))] |
<SYSTEM_TASK:>
tests, if the input is ``str`` or ``unicode``. if it is ``str``, it
<END_TASK>
<USER_TASK:>
Description:
def ensure_unicode(str_or_unicode):
"""
tests, if the input is ``str`` or ``unicode``. if it is ``str``, it
will be decoded from ``UTF-8`` to unicode.
""" |
if isinstance(str_or_unicode, str):
return str_or_unicode.decode('utf-8')
elif isinstance(str_or_unicode, unicode):
return str_or_unicode
else:
raise ValueError("Input '{0}' should be a string or unicode, "
"but its of type {1}".format(str_or_unicode,
type(str_or_unicode))) |
<SYSTEM_TASK:>
tests, if the input is ``str`` or ``unicode``. if it is ``unicode``,
<END_TASK>
<USER_TASK:>
Description:
def ensure_utf8(str_or_unicode):
"""
tests, if the input is ``str`` or ``unicode``. if it is ``unicode``,
it will be encoded from ``unicode`` to ``utf-8``. otherwise, the
input string is returned.
""" |
if isinstance(str_or_unicode, str):
return str_or_unicode
elif isinstance(str_or_unicode, unicode):
return str_or_unicode.encode('utf-8')
else:
raise ValueError(
"Input '{0}' should be a string or unicode, but it is of "
"type {1}".format(str_or_unicode, type(str_or_unicode))) |
<SYSTEM_TASK:>
Creates a directory. Warns, if the directory can't be accessed. Passes,
<END_TASK>
<USER_TASK:>
Description:
def create_dir(path):
"""
Creates a directory. Warns, if the directory can't be accessed. Passes,
if the directory already exists.
modified from http://stackoverflow.com/a/600612
Parameters
----------
path : str
path to the directory to be created
""" |
import sys
import errno
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
if os.path.isdir(path):
pass
else: # if something exists at the path, but it's not a dir
raise
elif exc.errno == errno.EACCES:
sys.stderr.write("Cannot create [{0}]! Check Permissions".format(path))
raise
else:
raise |
<SYSTEM_TASK:>
Appends the node ID to each node label and appends the edge type to each
<END_TASK>
<USER_TASK:>
Description:
def make_labels_explicit(docgraph):
"""
Appends the node ID to each node label and appends the edge type to each
edge label in the given document graph. This can be used to debug a
graph visually with ``write_dot``.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
Returns
-------
explicit_docgraph : DiscourseDocumentGraph
document graph with explicit node and edge labels
""" |
def make_nodelabels_explicit(docgraph):
for node_id, node_attribs in docgraph.nodes(data=True):
if 'label' in docgraph.node[node_id]:
docgraph.node[node_id]['label'] = \
u"{0}_{1}".format(node_attribs['label'], node_id)
return docgraph
def make_edgelabels_explicit(docgraph):
for from_id, to_id, edge_attribs in docgraph.edges(data=True):
for edge_num in docgraph.edge[from_id][to_id]:
if 'label' in docgraph.edge[from_id][to_id][edge_num]:
docgraph.edge[from_id][to_id][edge_num]['label'] = \
u"{0}_{1}".format(edge_attribs['label'],
edge_attribs['edge_type'])
else:
docgraph.edge[from_id][to_id][edge_num]['label'] = \
edge_attribs['edge_type']
return docgraph
return make_edgelabels_explicit(make_nodelabels_explicit(docgraph)) |
<SYSTEM_TASK:>
You can call this function and pass it a dictionary, or any other
<END_TASK>
<USER_TASK:>
Description:
def create_multiple_replace_func(*args, **kwds):
"""
You can call this function and pass it a dictionary, or any other
combination of arguments you could pass to built-in dict in order to
construct a dictionary. The function will return a xlat closure that
takes as its only argument text the string on which the substitutions
are desired and returns a copy of text with all the substitutions
performed.
Source: Python Cookbook 2nd ed, Chapter 1.18. Replacing Multiple Patterns
in a Single Pass.
https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html
""" |
adict = dict(*args, **kwds)
rx = re.compile('|'.join(map(re.escape, adict)))
def one_xlat(match):
return adict[match.group(0)]
def xlat(text):
return rx.sub(one_xlat, text)
return xlat |
<SYSTEM_TASK:>
returns the lowercased string of the connective used in the given Conano unit.
<END_TASK>
<USER_TASK:>
Description:
def get_connective(docgraph, unit_id):
"""
returns the lowercased string of the connective used in the given Conano unit.
""" |
unit_index, _unit_type = unit_id.split(':')
connective_id = unit_index+':connective'
return ' '.join(docgraph.get_token(tok_id).lower()
for tok_id in get_span(docgraph, connective_id)) |
<SYSTEM_TASK:>
add a token to this docgraph
<END_TASK>
<USER_TASK:>
Description:
def _add_token(self, token, parent_node='root'):
"""add a token to this docgraph""" |
if parent_node == 'root':
parent_node = self.root
token_node_id = 'token:{}'.format(self.token_count)
self.add_node(token_node_id, layers={self.ns, self.ns+':token'},
attr_dict={self.ns+':token': token})
self.add_edge(parent_node, token_node_id,
layers={self.ns},
edge_type=EdgeTypes.spanning_relation)
self.tokens.append(token_node_id)
self.token_count += 1 |
<SYSTEM_TASK:>
adds a token to the document graph as a node with the given ID.
<END_TASK>
<USER_TASK:>
Description:
def __add_token_to_document(self, token, token_id, connected):
"""
adds a token to the document graph as a node with the given ID.
Parameters
----------
token : str
the token to be added to the document graph
token_id : int
the node ID of the token to be added, which must not yet
exist in the document graph
connected : bool
Make the graph connected, i.e. add an edge from root this token.
""" |
regex_match = ANNOTATED_ANAPHORA_REGEX.search(token)
if regex_match: # token is annotated
unannotated_token = regex_match.group('token')
unicode_token = ensure_unicode(unannotated_token)
annotation = regex_match.group('annotation')
anno_type = ANNOTATION_TYPES[annotation]
certainty = "1.0" if not regex_match.group('uncertain') else "0.5"
self.add_node(
token_id,
layers={self.ns, self.ns+':token', self.ns+':annotated'},
attr_dict={
self.ns+':annotation': anno_type,
self.ns+':certainty': certainty,
self.ns+':token': unicode_token,
'label': u"{0}_{1}".format(unicode_token, anno_type)})
else: # token is not annotated
self.add_node(
token_id,
layers={self.ns, self.ns+':token'},
attr_dict={self.ns+':token': ensure_unicode(token),
'label': ensure_unicode(token)})
if connected:
self.add_edge(self.root, token_id,
layers={self.ns, self.ns+':token'}) |
<SYSTEM_TASK:>
throw away all node attributes, except for 'label
<END_TASK>
<USER_TASK:>
Description:
def _preprocess_nodes_for_pydot(nodes_with_data):
"""throw away all node attributes, except for 'label'""" |
for (node_id, attrs) in nodes_with_data:
if 'label' in attrs:
yield (quote_for_pydot(node_id),
{'label': quote_for_pydot(attrs['label'])})
else:
yield (quote_for_pydot(node_id), {}) |
<SYSTEM_TASK:>
throw away all edge attributes, except for 'label
<END_TASK>
<USER_TASK:>
Description:
def _preprocess_edges_for_pydot(edges_with_data):
"""throw away all edge attributes, except for 'label'""" |
for (source, target, attrs) in edges_with_data:
if 'label' in attrs:
yield (quote_for_pydot(source), quote_for_pydot(target),
{'label': quote_for_pydot(attrs['label'])})
else:
yield (quote_for_pydot(source), quote_for_pydot(target), {}) |
<SYSTEM_TASK:>
converts a document graph into a dot file and returns it as a string.
<END_TASK>
<USER_TASK:>
Description:
def print_dot(docgraph):
"""
converts a document graph into a dot file and returns it as a string.
If this function call is prepended by %dotstr,
it will display the given document graph as a dot/graphviz graph
in the currently running IPython notebook session.
To use this function, the gvmagic IPython notebook extension
needs to be installed once::
%install_ext https://raw.github.com/cjdrake/ipython-magic/master/gvmagic.py
In order to visualize dot graphs in your currently running
IPython notebook, run this command once::
%load_ext gvmagic
""" |
stripped_graph = preprocess_for_pydot(docgraph)
return nx.drawing.nx_pydot.to_pydot(stripped_graph).to_string() |
<SYSTEM_TASK:>
Relabel the nodes of the graph G.
<END_TASK>
<USER_TASK:>
Description:
def relabel_nodes(G, mapping, copy=True):
"""Relabel the nodes of the graph G.
Parameters
----------
G : graph
A NetworkX graph
mapping : dictionary
A dictionary with the old labels as keys and new labels as values.
A partial mapping is allowed.
copy : bool (optional, default=True)
If True return a copy, or if False relabel the nodes in place.
Examples
--------
>>> G=nx.path_graph(3) # nodes 0-1-2
>>> mapping={0:'a',1:'b',2:'c'}
>>> H=nx.relabel_nodes(G,mapping)
>>> print(sorted(H.nodes()))
['a', 'b', 'c']
>>> G=nx.path_graph(26) # nodes 0..25
>>> mapping=dict(zip(G.nodes(),"abcdefghijklmnopqrstuvwxyz"))
>>> H=nx.relabel_nodes(G,mapping) # nodes a..z
>>> mapping=dict(zip(G.nodes(),range(1,27)))
>>> G1=nx.relabel_nodes(G,mapping) # nodes 1..26
Partial in-place mapping:
>>> G=nx.path_graph(3) # nodes 0-1-2
>>> mapping={0:'a',1:'b'} # 0->'a' and 1->'b'
>>> G=nx.relabel_nodes(G,mapping, copy=False)
print(G.nodes())
[2, 'b', 'a']
Mapping as function:
>>> G=nx.path_graph(3)
>>> def mapping(x):
... return x**2
>>> H=nx.relabel_nodes(G,mapping)
>>> print(H.nodes())
[0, 1, 4]
Notes
-----
Only the nodes specified in the mapping will be relabeled.
The keyword setting copy=False modifies the graph in place.
This is not always possible if the mapping is circular.
In that case use copy=True.
See Also
--------
convert_node_labels_to_integers
""" |
# you can pass a function f(old_label)->new_label
# but we'll just make a dictionary here regardless
if not hasattr(mapping, "__getitem__"):
m = dict((n, mapping(n)) for n in G)
else:
m = mapping
if copy:
return _relabel_copy(G, m)
else:
return _relabel_inplace(G, m) |
<SYSTEM_TASK:>
Return a copy of the graph G with the nodes relabeled with integers.
<END_TASK>
<USER_TASK:>
Description:
def convert_node_labels_to_integers(G, first_label=0, ordering="default",
label_attribute=None):
"""Return a copy of the graph G with the nodes relabeled with integers.
Parameters
----------
G : graph
A NetworkX graph
first_label : int, optional (default=0)
An integer specifying the offset in numbering nodes.
The n new integer labels are numbered first_label, ..., n-1+first_label.
ordering : string
"default" : inherit node ordering from G.nodes()
"sorted" : inherit node ordering from sorted(G.nodes())
"increasing degree" : nodes are sorted by increasing degree
"decreasing degree" : nodes are sorted by decreasing degree
label_attribute : string, optional (default=None)
Name of node attribute to store old label. If None no attribute
is created.
Notes
-----
Node and edge attribute data are copied to the new (relabeled) graph.
See Also
--------
relabel_nodes
""" |
N = G.number_of_nodes() + first_label
if ordering == "default":
mapping = dict(zip(G.nodes(), range(first_label, N)))
elif ordering == "sorted":
nlist = G.nodes()
nlist.sort()
mapping = dict(zip(nlist, range(first_label, N)))
elif ordering == "increasing degree":
dv_pairs = [(d, n) for (n, d) in G.degree_iter()]
dv_pairs.sort() # in-place sort from lowest to highest degree
mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
elif ordering == "decreasing degree":
dv_pairs = [(d, n) for (n, d) in G.degree_iter()]
dv_pairs.sort() # in-place sort from lowest to highest degree
dv_pairs.reverse()
mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
else:
raise nx.NetworkXError('Unknown node ordering: {0}'.format(ordering))
H = relabel_nodes(G, mapping)
H.name = "(" + G.name + ")_with_int_labels"
# create node attribute with the old label
if label_attribute is not None:
nx.set_node_attributes(H, label_attribute,
dict((v, k) for k, v in mapping.items()))
return H |
<SYSTEM_TASK:>
serialize the ExmaraldaFile instance and write it to a file.
<END_TASK>
<USER_TASK:>
Description:
def write(self, output_filepath):
"""
serialize the ExmaraldaFile instance and write it to a file.
Parameters
----------
output_filepath : str
relative or absolute path to the Exmaralda file to be created
""" |
with open(output_filepath, 'w') as out_file:
out_file.write(self.__str__()) |
<SYSTEM_TASK:>
Look, mum! XML generation without string concatenation!1!!
<END_TASK>
<USER_TASK:>
Description:
def __create_document_header(self):
"""
Look, mum! XML generation without string concatenation!1!!
This creates an empty, but functional header for an Exmaralda *.exb
file.
""" |
E = self.E
root = E('basic-transcription')
head = E('head')
meta = E('meta-information')
project = E('project-name')
tname = E('transcription-name')
ref_file = E('referenced-file', url="")
ud = E('ud-meta-information')
comment = E('comment')
tconvention = E('transcription-convention')
meta.append(project)
meta.append(tname)
meta.append(ref_file)
meta.append(ud)
meta.append(comment)
meta.append(tconvention)
speakers = E('speakertable')
head.append(meta)
head.append(speakers)
root.append(head)
return root |
<SYSTEM_TASK:>
return an Exmaralda XML etree representation a docgraph
<END_TASK>
<USER_TASK:>
Description:
def __add_document_structure(self, docgraph,
remove_redundant_layers=True):
"""return an Exmaralda XML etree representation a docgraph""" |
E = self.E
root = self.__create_document_header()
body = E('basic-body')
timeline = E('common-timeline')
# for n tokens we need to create n+1 timeline indices
for i in xrange(len(docgraph.tokens)+1):
idx = str(i)
# example: <tli id="T0" time="0"/>
timeline.append(E('tli', {'id': 'T'+idx, 'time': idx}))
body.append(timeline)
body = self.__add_token_tiers(docgraph, body)
annotation_layers = get_annotation_layers(docgraph)
for layer in annotation_layers:
if not remove_redundant_layers: # add all layers
self.__add_annotation_tier(docgraph, body, layer)
elif is_informative(layer): # only add informative layers
self.__add_annotation_tier(docgraph, body, layer)
self.__add_coreference_chain_tiers(docgraph, body)
root.append(body)
return root |
<SYSTEM_TASK:>
adds a node for each token ID in the document
<END_TASK>
<USER_TASK:>
Description:
def __add_tokenization(self, tree):
"""adds a node for each token ID in the document""" |
for token_id in self.get_token_ids(tree):
self.add_node(token_id, layers={self.ns})
self.tokens.append(token_id) |
<SYSTEM_TASK:>
returns True, iff all events in the given tier annotate exactly one
<END_TASK>
<USER_TASK:>
Description:
def is_token_annotation_tier(self, tier):
"""
returns True, iff all events in the given tier annotate exactly one
token.
""" |
for i, event in enumerate(tier.iter('event')):
if self.indexdelta(event.attrib['end'], event.attrib['start']) != 1:
return False
return True |
<SYSTEM_TASK:>
adds a tier to the document graph, in which each event annotates
<END_TASK>
<USER_TASK:>
Description:
def __add_token_annotation_tier(self, tier):
"""
adds a tier to the document graph, in which each event annotates
exactly one token.
""" |
for i, event in enumerate(tier.iter('event')):
anno_key = '{0}:{1}'.format(self.ns, tier.attrib['category'])
anno_val = event.text if event.text else ''
self.node[event.attrib['start']][anno_key] = anno_val |
<SYSTEM_TASK:>
adds a tier to the document graph in which each event annotates a span
<END_TASK>
<USER_TASK:>
Description:
def __add_span_tier(self, tier):
"""
adds a tier to the document graph in which each event annotates a span
of one or more tokens.
""" |
tier_id = tier.attrib['id']
# add the tier's root node with an inbound edge from the document root
self.add_node(
tier_id, layers={self.ns, self.ns+':tier'},
attr_dict={self.ns+':category': tier.attrib['category'],
self.ns+':type': tier.attrib['type'],
self.ns+':display-name': tier.attrib['display-name']})
self.add_edge(self.root, tier_id, edge_type=EdgeTypes.dominance_relation)
# add a node for each span, containing an annotation.
# add an edge from the tier root to each span and an edge from each
# span to the tokens it represents
for i, event in enumerate(tier.iter('event')):
span_id = '{}_{}'.format(tier_id, i)
span_tokens = self.gen_token_range(event.attrib['start'], event.attrib['end'])
annotation = event.text if event.text else ''
self.add_node(
span_id, layers={self.ns, self.ns+':span'},
attr_dict={self.ns+':annotation': annotation,
'label': annotation})
self.add_edge(tier_id, span_id, edge_type=EdgeTypes.dominance_relation)
for token_id in span_tokens:
self.add_edge(span_id, token_id,
edge_type=EdgeTypes.spanning_relation) |
<SYSTEM_TASK:>
returns a list of all token IDs occuring the the given exmaralda file,
<END_TASK>
<USER_TASK:>
Description:
def get_token_ids(tree):
"""
returns a list of all token IDs occuring the the given exmaralda file,
sorted by their time stamp in ascending order.
""" |
def tok2time(token_element):
'''
extracts the time (float) of a <tli> element
(i.e. the absolute position of a token in the document)
'''
return float(token_element.attrib['time'])
timeline = tree.find('//common-timeline')
return (tok.attrib['id']
for tok in sorted((tli for tli in timeline.iterchildren()),
key=tok2time)) |
<SYSTEM_TASK:>
given two document graphs which annotate the same text and which use the
<END_TASK>
<USER_TASK:>
Description:
def create_token_mapping(docgraph_with_old_names, docgraph_with_new_names,
verbose=False):
"""
given two document graphs which annotate the same text and which use the
same tokenization, creates a dictionary with a mapping from the token
IDs used in the first graph to the token IDs used in the second graph.
Parameters
----------
docgraph_with_old_names : DiscourseDocumentGraph
a document graph with token IDs that will be replaced later on
docgraph_with_new_names : DiscourseDocumentGraph
a document graph with token IDs that will replace the token IDs
used in ``docgraph_with_old_names`` later on
Returns
-------
old2new : dict
maps from a token ID used in ``docgraph_with_old_names`` to the token
ID used in ``docgraph_with_new_names`` to reference the same token
""" |
def kwic_string(docgraph, keyword_index):
tokens = [tok for (tokid, tok) in list(docgraph.get_tokens())]
before, keyword, after = get_kwic(tokens, keyword_index)
return "{0} (Index: {1}): {2} [[{3}]] {4}\n".format(
docgraph.name, keyword_index, ' '.join(before), keyword,
' '.join(after))
# generators of (token ID, token) tuples
old_token_gen = docgraph_with_old_names.get_tokens()
new_token_gen = docgraph_with_new_names.get_tokens()
old2new = {}
for i, (new_tok_id, new_tok) in enumerate(new_token_gen):
old_tok_id, old_tok = old_token_gen.next()
if new_tok != old_tok: # token mismatch
if verbose:
raise ValueError(u"Tokenization mismatch:\n{0}{1}".format(
kwic_string(docgraph_with_old_names, i),
kwic_string(docgraph_with_new_names, i)))
raise ValueError(
u"Tokenization mismatch: {0} ({1}) vs. {2} ({3})\n"
"\t{4} != {5}".format(
docgraph_with_new_names.name, docgraph_with_new_names.ns,
docgraph_with_old_names.name, docgraph_with_old_names.ns,
new_tok, old_tok).encode('utf-8'))
else:
old2new[old_tok_id] = new_tok_id
return old2new |
<SYSTEM_TASK:>
keyword in context
<END_TASK>
<USER_TASK:>
Description:
def get_kwic(tokens, index, context_window=5):
"""
keyword in context
Parameters
----------
tokens : list of str
a text represented as a list of tokens
index : int
the index of the keyword in the token list
context_window : int
the number of preceding/succeding words of the keyword to be
retrieved
Returns
-------
before : list of str
the tokens preceding the keyword
keyword : str
the token at the index position
after : list of str
the tokens succeding the keyword
""" |
text_length = len(tokens)
start_before = max(0, index-context_window)
end_before = max(0, index)
before = tokens[start_before:end_before]
start_after = min(text_length, index+1)
end_after = min(text_length, index+context_window+1)
after = tokens[start_after:end_after]
return before, tokens[index], after |
<SYSTEM_TASK:>
returns the character start and end position of the span of text that
<END_TASK>
<USER_TASK:>
Description:
def get_span_offsets(docgraph, node_id):
"""
returns the character start and end position of the span of text that
the given node spans or dominates.
Returns
-------
offsets : tuple(int, int)
character onset and offset of the span
""" |
try:
span = get_span(docgraph, node_id)
# workaround for issue #138
# TODO: when #138 is fixed, just take the first onset / last offset
onsets, offsets = zip(*[docgraph.get_offsets(tok_node)
for tok_node in span])
return (min(onsets), max(offsets))
except KeyError as _:
raise KeyError("Node '{}' doesn't span any tokens.".format(node_id)) |
<SYSTEM_TASK:>
returns all the tokens that are dominated or in a span relation with
<END_TASK>
<USER_TASK:>
Description:
def get_span(docgraph, node_id, debug=False):
"""
returns all the tokens that are dominated or in a span relation with
the given node. If debug is set to True, you'll get a warning if the
graph is cyclic.
Returns
-------
span : list of str
sorted list of token nodes (token node IDs)
""" |
if debug is True and is_directed_acyclic_graph(docgraph) is False:
warnings.warn(
("Can't reliably extract span '{0}' from cyclical graph'{1}'."
"Maximum recursion depth may be exceeded.").format(node_id,
docgraph))
span = []
if docgraph.ns+':token' in docgraph.node[node_id]:
span.append(node_id)
for src_id, target_id, edge_attribs in docgraph.out_edges_iter(node_id,
data=True):
if src_id == target_id:
continue # ignore self-loops
# ignore pointing relations
if edge_attribs['edge_type'] != EdgeTypes.pointing_relation:
span.extend(get_span(docgraph, target_id))
return sorted(span, key=natural_sort_key) |
<SYSTEM_TASK:>
returns true, iff the given node ID belongs to a token node.
<END_TASK>
<USER_TASK:>
Description:
def istoken(docgraph, node_id, namespace=None):
"""returns true, iff the given node ID belongs to a token node.
Parameters
----------
node_id : str
the node to be checked
namespace : str or None
If a namespace is given, only look for tokens in the given namespace.
Otherwise, look for tokens in the default namespace of the given
document graph.
""" |
if namespace is None:
namespace = docgraph.ns
return namespace+':token' in docgraph.node[node_id] |
<SYSTEM_TASK:>
return True, if the tokens dominated by the given node are all adjacent
<END_TASK>
<USER_TASK:>
Description:
def is_continuous(docgraph, dominating_node):
"""return True, if the tokens dominated by the given node are all adjacent""" |
first_onset, last_offset = get_span_offsets(docgraph, dominating_node)
span_range = xrange(first_onset, last_offset+1)
token_offsets = (docgraph.get_offsets(tok)
for tok in get_span(docgraph, dominating_node))
char_positions = set(itertools.chain.from_iterable(xrange(on, off+1)
for on, off in token_offsets))
for item in span_range:
if item not in char_positions:
return False
return True |
<SYSTEM_TASK:>
yields all edges that meet the conditions given as eval strings
<END_TASK>
<USER_TASK:>
Description:
def select_edges(docgraph, conditions, data):
"""yields all edges that meet the conditions given as eval strings""" |
for (src_id, target_id, edge_attribs) in docgraph.edges(data=True):
# if all conditions are fulfilled
# we need to add edge_attribs to the namespace eval is working in
if all((eval(cond, {'edge_attribs': edge_attribs})
for cond in conditions)):
if data:
yield (src_id, target_id, edge_attribs)
else:
yield (src_id, target_id) |
<SYSTEM_TASK:>
adds the onset and offset to each token in the document graph, i.e.
<END_TASK>
<USER_TASK:>
Description:
def add_offsets(self, offset_ns=None):
"""
adds the onset and offset to each token in the document graph, i.e.
the character position where each token starts and ends.
""" |
if offset_ns is None:
offset_ns = self.ns
onset = 0
offset = 0
for token_id, token_str in self.get_tokens():
offset = onset + len(token_str)
self.node[token_id]['{0}:{1}'.format(offset_ns, 'onset')] = onset
self.node[token_id]['{0}:{1}'.format(offset_ns, 'offset')] = offset
onset = offset + 1 |
<SYSTEM_TASK:>
Add a single node n and update node attributes.
<END_TASK>
<USER_TASK:>
Description:
def add_node(self, n, layers=None, attr_dict=None, **attr):
"""Add a single node n and update node attributes.
Parameters
----------
n : node
A node can be any hashable Python object except None.
layers : set of str or None
the set of layers the node belongs to,
e.g. {'tiger:token', 'anaphoricity:annotation'}.
Will be set to {self.ns} if None.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of node attributes. Key/value pairs will
update existing data associated with the node.
attr : keyword arguments, optional
Set or change attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> from discoursegraphs import DiscourseDocumentGraph
>>> d = DiscourseDocumentGraph()
>>> d.add_node(1, {'node'})
# adding the same node with a different layer
>>> d.add_node(1, {'number'})
>>> d.nodes(data=True)
[(1, {'layers': {'node', 'number'}})]
Use keywords set/change node attributes:
>>> d.add_node(1, {'node'}, size=10)
>>> d.add_node(3, layers={'num'}, weight=0.4, UTM=('13S',382))
>>> d.nodes(data=True)
[(1, {'layers': {'node', 'number'}, 'size': 10}),
(3, {'UTM': ('13S', 382), 'layers': {'num'}, 'weight': 0.4})]
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
""" |
if not layers:
layers = {self.ns}
assert isinstance(layers, set), \
"'layers' parameter must be given as a set of strings."
assert all((isinstance(layer, str) for layer in layers)), \
"All elements of the 'layers' set must be strings."
# add layers to keyword arguments dict
attr.update({'layers': layers})
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
assert isinstance(attr_dict, dict), \
"attr_dict must be a dictionary, not a '{}'".format(type(attr_dict))
attr_dict.update(attr)
# if there's no node with this ID in the graph, yet
if n not in self.succ:
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr_dict
else: # update attr even if node already exists
# if a node exists, its attributes will be updated, except
# for the layers attribute. the value of 'layers' will
# be the union of the existing layers set and the new one.
existing_layers = self.node[n]['layers']
all_layers = existing_layers.union(layers)
attrs_without_layers = {k: v for (k, v) in attr_dict.items()
if k != 'layers'}
self.node[n].update(attrs_without_layers)
self.node[n].update({'layers': all_layers}) |
<SYSTEM_TASK:>
Add multiple nodes.
<END_TASK>
<USER_TASK:>
Description:
def add_nodes_from(self, nodes, **attr):
"""Add multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple
take precedence over attributes specified generally.
See Also
--------
add_node
Examples
--------
>>> from discoursegraphs import DiscourseDocumentGraph
>>> d = DiscourseDocumentGraph()
>>> d.add_nodes_from([(1, {'layers':{'token'}, 'word':'hello'}), \
(2, {'layers':{'token'}, 'word':'world'})])
>>> d.nodes(data=True)
[(1, {'layers': {'token'}, 'word': 'hello'}),
(2, {'layers': {'token'}, 'word': 'world'})]
Use keywords to update specific node attributes for every node.
>>> d.add_nodes_from(d.nodes(data=True), weight=1.0)
>>> d.nodes(data=True)
[(1, {'layers': {'token'}, 'weight': 1.0, 'word': 'hello'}),
(2, {'layers': {'token'}, 'weight': 1.0, 'word': 'world'})]
Use (node, attrdict) tuples to update attributes for specific
nodes.
>>> d.add_nodes_from([(1, {'layers': {'tiger'}})], size=10)
>>> d.nodes(data=True)
[(1, {'layers': {'tiger', 'token'}, 'size': 10, 'weight': 1.0,
'word': 'hello'}),
(2, {'layers': {'token'}, 'weight': 1.0, 'word': 'world'})]
""" |
additional_attribs = attr # will be added to each node
for n in nodes:
try: # check, if n is a node_id or a (node_id, attrib dict) tuple
newnode = n not in self.succ # is node in the graph, yet?
except TypeError: # n is a (node_id, attribute dict) tuple
node_id, ndict = n
if not 'layers' in ndict:
ndict['layers'] = {self.ns}
layers = ndict['layers']
assert isinstance(layers, set), \
"'layers' must be specified as a set of strings."
assert all((isinstance(layer, str) for layer in layers)), \
"All elements of the 'layers' set must be strings."
if node_id not in self.succ: # node doesn't exist, yet
self.succ[node_id] = {}
self.pred[node_id] = {}
newdict = additional_attribs.copy()
newdict.update(ndict) # all given attribs incl. layers
self.node[node_id] = newdict
else: # node already exists
existing_layers = self.node[node_id]['layers']
all_layers = existing_layers.union(layers)
self.node[node_id].update(ndict)
self.node[node_id].update(additional_attribs)
self.node[node_id].update({'layers': all_layers})
continue # process next node
# newnode check didn't raise an exception
if newnode: # n is a node_id and it's not in the graph, yet
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr.copy()
# since the node isn't represented as a
# (node_id, attribute dict) tuple, we don't know which layers
# it is part of. Therefore, we'll add the namespace of the
# graph as the node layer
self.node[n].update({'layers': set([self.ns])})
else: # n is a node_id and it's already in the graph
self.node[n].update(attr) |
<SYSTEM_TASK:>
add a layer to an existing node or edge
<END_TASK>
<USER_TASK:>
Description:
def add_layer(self, element, layer):
"""
add a layer to an existing node or edge
Parameters
----------
element : str, int, (str/int, str/int)
the ID of a node or edge (source node ID, target node ID)
layer : str
the layer that the element shall be added to
""" |
assert isinstance(layer, str), "Layers must be strings!"
if isinstance(element, tuple): # edge repr. by (source, target)
assert len(element) == 2
assert all(isinstance(node, (str, int)) for node in element)
source_id, target_id = element
# this class is based on a multi-digraph, so we'll have to iterate
# over all edges between the two nodes (even if there's just one)
edges = self.edge[source_id][target_id]
for edge in edges:
existing_layers = edges[edge]['layers']
existing_layers.add(layer)
edges[edge]['layers'] = existing_layers
if isinstance(element, (str, int)): # node
existing_layers = self.node[element]['layers']
existing_layers.add(layer)
self.node[element]['layers'] = existing_layers |
<SYSTEM_TASK:>
given a token node ID, returns the token unicode string.
<END_TASK>
<USER_TASK:>
Description:
def get_token(self, token_node_id, token_attrib='token'):
"""
given a token node ID, returns the token unicode string.
Parameters
----------
token_node_id : str
the ID of the token node
token_attrib : str
name of the node attribute that contains the token string as its
value (default: token).
Returns
-------
token : unicode
the token string
""" |
return self.node[token_node_id][self.ns+':'+token_attrib] |
<SYSTEM_TASK:>
Copy all the metadata from the root node of the other graph into this
<END_TASK>
<USER_TASK:>
Description:
def merge_rootnodes(self, other_docgraph):
"""
Copy all the metadata from the root node of the other graph into this
one. Then, move all edges belonging to the other root node to this
one. Finally, remove the root node of the other graph from this one.
""" |
# copy metadata from other graph, cf. #136
if 'metadata' in other_docgraph.node[other_docgraph.root]:
other_meta = other_docgraph.node[other_docgraph.root]['metadata']
self.node[self.root]['metadata'].update(other_meta)
assert not other_docgraph.in_edges(other_docgraph.root), \
"root node in graph '{}' must not have any ingoing edges".format(
other_docgraph.name)
for (root, target, attrs) in other_docgraph.out_edges(
other_docgraph.root, data=True):
self.add_edge(self.root, target, attr_dict=attrs)
self.remove_node(other_docgraph.root) |
<SYSTEM_TASK:>
convert a PAULA etree into an XML string.
<END_TASK>
<USER_TASK:>
Description:
def paula_etree_to_string(tree, dtd_filename):
"""convert a PAULA etree into an XML string.""" |
return etree.tostring(
tree, pretty_print=True, xml_declaration=True,
encoding="UTF-8", standalone='no',
doctype='<!DOCTYPE paula SYSTEM "{0}">'.format(dtd_filename)) |
<SYSTEM_TASK:>
creates an element tree representation of an empty PAULA XML file.
<END_TASK>
<USER_TASK:>
Description:
def gen_paula_etree(paula_id):
"""
creates an element tree representation of an empty PAULA XML file.
""" |
E = ElementMaker(nsmap=NSMAP)
tree = E('paula', version='1.1')
tree.append(E('header', paula_id=paula_id))
return E, tree |
<SYSTEM_TASK:>
converts a DiscourseDocumentGraph into a set of PAULA XML files
<END_TASK>
<USER_TASK:>
Description:
def write_paula(docgraph, output_root_dir, human_readable=False):
"""
converts a DiscourseDocumentGraph into a set of PAULA XML files
representing the same document.
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph to be converted
""" |
paula_document = PaulaDocument(docgraph, human_readable=human_readable)
error_msg = ("Please specify an output directory.\nPaula documents consist"
" of multiple files, so we can't just pipe them to STDOUT.")
assert isinstance(output_root_dir, str), error_msg
document_dir = os.path.join(output_root_dir, paula_document.name)
if not os.path.isdir(document_dir):
create_dir(document_dir)
for paula_id in paula_document.files:
with open(os.path.join(document_dir, paula_id+'.xml'), 'w') as outfile:
outfile.write(
paula_etree_to_string(paula_document.files[paula_id],
paula_document.file2dtd[paula_id])) |
<SYSTEM_TASK:>
ensure that all node and IDs in the document graph are valid
<END_TASK>
<USER_TASK:>
Description:
def __make_xpointer_compatible(self):
"""
ensure that all node and IDs in the document graph are valid
xpointer IDs. this will relabel all node IDs in place in the discourse
graph and change its ``.tokens`` list accordingly.
""" |
node_id_map = {node: ensure_xpointer_compatibility(node)
for node in self.dg.nodes_iter()}
old_token_ids = self.dg.tokens
# replace document graph with node relabeled version
self.dg = relabel_nodes(self.dg, node_id_map, copy=True)
self.dg.tokens = [node_id_map[tok] for tok in old_token_ids] |
<SYSTEM_TASK:>
creates a ``SaltLabel`` from an etree element representing a label
<END_TASK>
<USER_TASK:>
Description:
def from_etree(cls, etree_element):
"""
creates a ``SaltLabel`` from an etree element representing a label
element in a SaltXMI file.
A label element in SaltXMI looks like this::
<labels xsi:type="saltCore:SFeature" namespace="salt"
name="SNAME" value="ACED0005740007735370616E3139"
valueString="sSpan19"/>
Parameters
----------
etree_element : lxml.etree._Element
an etree element parsed from a SaltXMI document
""" |
return cls(name=etree_element.attrib['name'],
value=etree_element.attrib['valueString'],
xsi_type=get_xsi_type(etree_element),
namespace=etree_element.attrib.get('namespace'),
hexvalue=etree_element.attrib['value']) |
<SYSTEM_TASK:>
converts a document graph into a plain text file with brackets.
<END_TASK>
<USER_TASK:>
Description:
def write_brackets(docgraph, output_file, layer='mmax'):
"""
converts a document graph into a plain text file with brackets.
Parameters
----------
layer : str or None
The layer from which the pointing chains/relations
(i.e. coreference relations) should be extracted.
If no layer is selected, all pointing relations will be considered.
(This might lead to errors, e.g. when the document contains Tiger
syntax trees with secondary edges.)
""" |
bracketed_str = gen_bracketed_output(docgraph, layer=layer)
assert isinstance(output_file, (str, file))
if isinstance(output_file, str):
path_to_file = os.path.dirname(output_file)
if not os.path.isdir(path_to_file):
create_dir(path_to_file)
with codecs.open(output_file, 'w', 'utf-8') as outfile:
outfile.write(bracketed_str)
else: # output_file is a file object
output_file.write(bracketed_str) |
<SYSTEM_TASK:>
converts a NetworkX node into a Geoff string.
<END_TASK>
<USER_TASK:>
Description:
def node2geoff(node_name, properties, encoder):
"""converts a NetworkX node into a Geoff string.
Parameters
----------
node_name : str or int
the ID of a NetworkX node
properties : dict
a dictionary of node attributes
encoder : json.JSONEncoder
an instance of a JSON encoder (e.g. `json.JSONEncoder`)
Returns
-------
geoff : str
a Geoff string
""" |
if properties:
return '({0} {1})'.format(node_name,
encoder.encode(properties))
else:
return '({0})'.format(node_name) |
<SYSTEM_TASK:>
converts a NetworkX edge into a Geoff string.
<END_TASK>
<USER_TASK:>
Description:
def edge2geoff(from_node, to_node, properties, edge_relationship_name, encoder):
"""converts a NetworkX edge into a Geoff string.
Parameters
----------
from_node : str or int
the ID of a NetworkX source node
to_node : str or int
the ID of a NetworkX target node
properties : dict
a dictionary of edge attributes
edge_relationship_name : str
string that describes the relationship between the two nodes
encoder : json.JSONEncoder
an instance of a JSON encoder (e.g. `json.JSONEncoder`)
Returns
-------
geoff : str
a Geoff string
""" |
edge_string = None
if properties:
args = [from_node, edge_relationship_name,
encoder.encode(properties), to_node]
edge_string = '({0})-[:{1} {2}]->({3})'.format(*args)
else:
args = [from_node, edge_relationship_name, to_node]
edge_string = '({0})-[:{1}]->({2})'.format(*args)
return edge_string |
<SYSTEM_TASK:>
return the text of the given EDU subtree
<END_TASK>
<USER_TASK:>
Description:
def get_edu_text(text_subtree):
"""return the text of the given EDU subtree""" |
assert text_subtree.label() == SubtreeType.text
return u' '.join(word.decode('utf-8') for word in text_subtree.leaves()) |
<SYSTEM_TASK:>
replace EDU indices with the text of the EDUs
<END_TASK>
<USER_TASK:>
Description:
def _add_edus_to_tree(parented_tree, edus):
"""replace EDU indices with the text of the EDUs
in a parented tree.
Parameters
----------
parented_tree : nltk.ParentedTree
a parented tree that only contains EDU indices
as leaves
edus : list(list(unicode))
a list of EDUs, where each EDU is represented as
a list of tokens
""" |
for i, child in enumerate(parented_tree):
if isinstance(child, nltk.Tree):
_add_edus_to_tree(child, edus)
else:
edu_index = int(child)
edu_tokens = edus[edu_index]
parented_tree[i] = u" ".join(edu_tokens) |
<SYSTEM_TASK:>
Execute a single command, and sets sleep times properly.
<END_TASK>
<USER_TASK:>
Description:
def execute(self, controller_id, command, *args, **kwargs):
"""
Execute a single command, and sets sleep times properly.
- controller_id = index of controller, zero-based
- command is normal LedController command as a string
- *args and **kwargs are passed to command
For example, .execute(0, "on", 1) sends "on" command to group 1 on controller 0 (first IP passed to constructor).
""" |
controller_instance = self.controllers[controller_id]
controller_instance.last_command_at = self.last_command_at
ret_val = getattr(controller_instance, command)(*args, **kwargs)
self.last_command_at = controller_instance.last_command_at
return ret_val |
<SYSTEM_TASK:>
Set bulb type for specified group.
<END_TASK>
<USER_TASK:>
Description:
def set_group_type(self, group, bulb_type):
""" Set bulb type for specified group.
Group must be int between 1 and 4.
Type must be "rgbw" or "white".
Alternatively, use constructor keywords group_1, group_2 etc. to set bulb types.
""" |
if bulb_type not in ("rgbw", "white"):
raise AttributeError("Bulb type must be either rgbw or white")
self.group[group] = bulb_type
self.has_white = "white" in self.group.values()
self.has_rgbw = "rgbw" in self.group.values() |
<SYSTEM_TASK:>
Convert percents to bulbs internal range.
<END_TASK>
<USER_TASK:>
Description:
def get_brightness_level(cls, percent):
""" Convert percents to bulbs internal range.
percent should be integer from 0 to 100.
Return value is 2 (minimum brightness) - 27 (maximum brightness)
""" |
# Clamp to appropriate range.
percent = min(100, max(0, percent))
# Map 0-100 to 2-27
value = int(2 + ((float(percent) / 100) * 25))
return percent, value |
<SYSTEM_TASK:>
Set brightness.
<END_TASK>
<USER_TASK:>
Description:
def set_brightness(self, percent, group=None):
""" Set brightness.
Percent is int between 0 (minimum brightness) and 100 (maximum brightness), or
float between 0.0 (minimum brightness) and 1.0 (maximum brightness).
See also .nightmode().
If group (1-4) is not specified, brightness of all four groups will be adjusted.
""" |
# If input is float, assume it is percent value from 0 to 1.
if isinstance(percent, float):
if percent > 1:
percent = int(percent)
else:
percent = int(percent * 100)
percent, value = self.get_brightness_level(percent)
self.on(group)
self._send_command((b"\x4e", struct.pack("B", value)))
return percent |
<SYSTEM_TASK:>
Run batch of commands in sequence.
<END_TASK>
<USER_TASK:>
Description:
def batch_run(self, *commands):
""" Run batch of commands in sequence.
Input is positional arguments with (function pointer, *args) tuples.
This method is useful for executing commands to multiple groups with retries,
without having too long delays. For example,
- Set group 1 to red and brightness to 10%
- Set group 2 to red and brightness to 10%
- Set group 3 to white and brightness to 100%
- Turn off group 4
With three repeats, running these consecutively takes approximately 100ms * 13 commands * 3 times = 3.9 seconds.
With batch_run, execution takes same time, but first loop - each command is sent once to every group -
is finished within 1.3 seconds. After that, each command is repeated two times. Most of the time, this ensures
slightly faster changes for each group.
Usage:
led.batch_run((led.set_color, "red", 1), (led.set_brightness, 10, 1), (led.set_color, "white", 3), ...)
""" |
original_retries = self.repeat_commands
self.repeat_commands = 1
for _ in range(original_retries):
for command in commands:
cmd = command[0]
args = command[1:]
cmd(*args)
self.repeat_commands = original_retries |
<SYSTEM_TASK:>
Send a message through the websocket client and wait for the
<END_TASK>
<USER_TASK:>
Description:
def send(self, msg_dict):
"""Send a message through the websocket client and wait for the
answer if the message being sent contains an id attribute.""" |
message = ejson.dumps(msg_dict)
super(DDPSocket, self).send(message)
self._debug_log('<<<{}'.format(message)) |
<SYSTEM_TASK:>
Recover from a network failure
<END_TASK>
<USER_TASK:>
Description:
def _recover_network_failure(self):
"""Recover from a network failure""" |
if self.auto_reconnect and not self._is_closing:
connected = False
while not connected:
log_msg = "* ATTEMPTING RECONNECT"
if self._retry_new_version:
log_msg = "* RETRYING DIFFERENT DDP VERSION"
self.ddpsocket._debug_log(log_msg)
time.sleep(self.auto_reconnect_timeout)
self._init_socket()
try:
self.connect()
connected = True
if self._retry_new_version:
self._retry_new_version = False
else:
self._is_reconnecting = True
except (socket.error, WebSocketException):
pass |
<SYSTEM_TASK:>
Send the connect message to the server.
<END_TASK>
<USER_TASK:>
Description:
def opened(self):
"""Send the connect message to the server.""" |
# give up if there are no more ddp versions to try
if self._ddp_version_index == len(DDP_VERSIONS):
self.ddpsocket._debug_log('* DDP VERSION MISMATCH')
self.emit('version_mismatch', DDP_VERSIONS)
return
# use server recommended version if we support it
if self._retry_new_version in DDP_VERSIONS:
self._ddp_version_index = [i for i, x in enumerate(DDP_VERSIONS)
if x == self._retry_new_version][0]
connect_msg = {
"msg": "connect",
"version": DDP_VERSIONS[self._ddp_version_index],
"support": DDP_VERSIONS
}
# if we've already got a session token then reconnect
if self._session:
connect_msg["session"] = self._session
self.send(connect_msg) |
<SYSTEM_TASK:>
Called when the connection is closed
<END_TASK>
<USER_TASK:>
Description:
def closed(self, code, reason=None):
"""Called when the connection is closed""" |
self.emit('socket_closed', code, reason)
self._recover_network_failure() |
<SYSTEM_TASK:>
Call a method on the server
<END_TASK>
<USER_TASK:>
Description:
def call(self, method, params, callback=None):
"""Call a method on the server
Arguments:
method - the remote server method
params - an array of commands to send to the method
Keyword Arguments:
callback - a callback function containing the return data""" |
cur_id = self._next_id()
if callback:
self._callbacks[cur_id] = callback
self.send({'msg': 'method', 'id': cur_id, 'method': method, 'params': params}) |
<SYSTEM_TASK:>
Convert the tree from DPLP's format into a conventional binary tree,
<END_TASK>
<USER_TASK:>
Description:
def dplptree2dgparentedtree(self):
"""Convert the tree from DPLP's format into a conventional binary tree,
which can be easily converted into output formats like RS3.
""" |
def transform(dplp_tree):
"""Transform a DPLP parse tree into a more conventional parse tree."""
if isinstance(dplp_tree, basestring) or not hasattr(dplp_tree, 'label'):
return dplp_tree
assert len(dplp_tree) == 2, "We can only handle binary trees."
match = DPLP_REL_RE.match(dplp_tree.label())
assert match, "Relation '{}' does not match regex '{}'".format(dplp_tree.label(), DPLP_REL_RE)
left_child_nuc, right_child_nuc, relname = match.groups()
dplp_tree._label = relname
for i, child_nuclearity in enumerate([left_child_nuc, right_child_nuc]):
child = dplp_tree[i]
dplp_tree[i] = Tree(child_nuclearity, [transform(child)])
return dplp_tree
tree = transform(self.parsetree)
return DGParentedTree.convert(tree) |
<SYSTEM_TASK:>
Given a TigerSentenceGraph, returns a sorted list of terminal node
<END_TASK>
<USER_TASK:>
Description:
def _get_terminals_and_nonterminals(sentence_graph):
"""
Given a TigerSentenceGraph, returns a sorted list of terminal node
IDs, as well as a sorted list of nonterminal node IDs.
Parameters
----------
sentence_graph : TigerSentenceGraph
a directed graph representing one syntax annotated sentence from
a TigerXML file
Returns
-------
terminals, nonterminals : list of str
a sorted list of terminal node IDs and a sorted list of
nonterminal node IDs
""" |
terminals = set()
nonterminals = set()
for node_id in sentence_graph.nodes_iter():
if sentence_graph.out_degree(node_id) > 0:
# all nonterminals (incl. root)
nonterminals.add(node_id)
else: # terminals
terminals.add(node_id)
return sorted(list(terminals), key=natural_sort_key), \
sorted(list(nonterminals), key=natural_sort_key) |
<SYSTEM_TASK:>
Takes a TigerSentenceGraph and returns a list of node IDs of
<END_TASK>
<USER_TASK:>
Description:
def get_unconnected_nodes(sentence_graph):
"""
Takes a TigerSentenceGraph and returns a list of node IDs of
unconnected nodes.
A node is unconnected, if it doesn't have any in- or outgoing edges.
A node is NOT considered unconnected, if the graph only consists of
that particular node.
Parameters
----------
sentence_graph : TigerSentenceGraph
a directed graph representing one syntax annotated sentence from
a TigerXML file
Returns
-------
unconnected_node_ids : list of str
a list of node IDs of unconnected nodes
""" |
return [node for node in sentence_graph.nodes_iter()
if sentence_graph.degree(node) == 0 and
sentence_graph.number_of_nodes() > 1] |
<SYSTEM_TASK:>
given a document graph of a TIGER syntax tree, return all
<END_TASK>
<USER_TASK:>
Description:
def get_subordinate_clauses(tiger_docgraph):
"""
given a document graph of a TIGER syntax tree, return all
node IDs of nodes representing subordinate clause constituents.
Parameters
----------
tiger_docgraph : DiscourseDocumentGraph or TigerDocumentGraph
document graph from which subordinate clauses will be extracted
Returns
-------
subord_clause_nodes : list(str)
list of node IDs of nodes directly dominating subordinate clauses
""" |
subord_clause_rels = \
dg.select_edges_by_attribute(
tiger_docgraph, attribute='tiger:label',
value=['MO', 'RC', 'SB'])
subord_clause_nodes = []
for src_id, target_id in subord_clause_rels:
src_cat = tiger_docgraph.node[src_id].get('tiger:cat')
if src_cat == 'S' and not dg.istoken(tiger_docgraph, target_id):
subord_clause_nodes.append(target_id)
return subord_clause_nodes |
<SYSTEM_TASK:>
add a token node to this document graph
<END_TASK>
<USER_TASK:>
Description:
def _add_token_to_document(self, token_string, token_attrs=None):
"""add a token node to this document graph""" |
token_feat = {self.ns+':token': token_string}
if token_attrs:
token_attrs.update(token_feat)
else:
token_attrs = token_feat
token_id = 'token_{}'.format(self.token_count)
self.add_node(token_id, layers={self.ns, self.ns+':token'},
attr_dict=token_attrs)
self.token_count += 1
self.tokens.append(token_id)
return token_id |
<SYSTEM_TASK:>
add a dominance relation to this docgraph
<END_TASK>
<USER_TASK:>
Description:
def _add_dominance_relation(self, source, target):
"""add a dominance relation to this docgraph""" |
# TODO: fix #39, so we don't need to add nodes by hand
self.add_node(target, layers={self.ns, self.ns+':unit'})
self.add_edge(source, target,
layers={self.ns, self.ns+':discourse'},
edge_type=EdgeTypes.dominance_relation) |
<SYSTEM_TASK:>
add a spanning relation to this docgraph
<END_TASK>
<USER_TASK:>
Description:
def _add_spanning_relation(self, source, target):
"""add a spanning relation to this docgraph""" |
self.add_edge(source, target, layers={self.ns, self.ns+':unit'},
edge_type=EdgeTypes.spanning_relation) |
<SYSTEM_TASK:>
Query data and result data must have keys who's values are strings.
<END_TASK>
<USER_TASK:>
Description:
def validate(data):
"""
Query data and result data must have keys who's values are strings.
""" |
if not isinstance(data, dict):
error('Data must be a dictionary.')
for value in data.values():
if not isinstance(value, basestring):
error('Values must be strings.') |
<SYSTEM_TASK:>
Query data is received on stdin as a JSON object.
<END_TASK>
<USER_TASK:>
Description:
def terraform_external_data(function):
"""
Query data is received on stdin as a JSON object.
Result data must be returned on stdout as a JSON object.
The wrapped function must expect its first positional argument to be a dictionary of the query data.
""" |
@wraps(function)
def wrapper(*args, **kwargs):
query = json.loads(sys.stdin.read())
validate(query)
try:
result = function(query, *args, **kwargs)
except Exception as e:
# Terraform wants one-line errors so we catch all exceptions and trim down to just the message (no trace).
error('{}: {}'.format(type(e).__name__, e))
validate(result)
sys.stdout.write(json.dumps(result))
return wrapper |
<SYSTEM_TASK:>
Ensure the given tree has a nucleus as its root.
<END_TASK>
<USER_TASK:>
Description:
def n_wrap(tree, debug=False, root_id=None):
"""Ensure the given tree has a nucleus as its root.
If the root of the tree is a nucleus, return it.
If the root of the tree is a satellite, replace the satellite
with a nucleus and return the tree.
If the root of the tree is a relation, place a nucleus on top
and return the tree.
""" |
root_label = tree.label()
expected_n_root = debug_root_label('N', debug=debug, root_id=tree.root_id)
expected_s_root = debug_root_label('S', debug=debug, root_id=tree.root_id)
if root_label == expected_n_root:
return tree
elif root_label == expected_s_root:
tree.set_label(expected_n_root)
return tree
else:
return t('N', [tree], debug=debug, root_id=root_id) |
<SYSTEM_TASK:>
Extracts relations from a DGParentedTree.
<END_TASK>
<USER_TASK:>
Description:
def extract_relations(dgtree, relations=None):
"""Extracts relations from a DGParentedTree.
Given a DGParentedTree, returns a (relation name, relation type) dict
of all the RST relations occurring in that tree.
""" |
if hasattr(dgtree, 'reltypes'):
# dgtree is an RSTTree or a DisTree that contains a DGParentedTree
return dgtree.reltypes
if relations is None:
relations = {}
if is_leaf(dgtree):
return relations
root_label = dgtree.label()
if root_label == '':
assert dgtree == DGParentedTree('', []), \
"The tree has no root label, but isn't empty: {}".format(dgtree)
return relations
elif root_label in NUCLEARITY_LABELS:
for child in dgtree:
relations.update(extract_relations(child, relations))
else: # dgtree is a 'relation' node
child_labels = [child.label() for child in dgtree]
assert all(label in NUCLEARITY_LABELS for label in child_labels)
if 'S' in child_labels:
relations[root_label] = 'rst'
else:
relations[root_label] = 'multinuc'
for child in dgtree:
relations.update(extract_relations(child, relations))
return relations |
<SYSTEM_TASK:>
takes a DGParentedTree and puts a nucleus or satellite on top,
<END_TASK>
<USER_TASK:>
Description:
def elem_wrap(self, tree, debug=False, root_id=None):
"""takes a DGParentedTree and puts a nucleus or satellite on top,
depending on the nuclearity of the root element of the tree.
""" |
if root_id is None:
root_id = tree.root_id
elem = self.elem_dict[root_id]
if elem['nuclearity'] == 'nucleus':
return n_wrap(tree, debug=debug, root_id=root_id)
else:
return s_wrap(tree, debug=debug, root_id=root_id) |
<SYSTEM_TASK:>
print all elements of a counter in descending order
<END_TASK>
<USER_TASK:>
Description:
def print_sorted_counter(counter, tab=1):
"""print all elements of a counter in descending order""" |
for key, count in sorted(counter.items(), key=itemgetter(1), reverse=True):
print "{0}{1} - {2}".format('\t'*tab, key, count) |
<SYSTEM_TASK:>
print the most common elements of a counter
<END_TASK>
<USER_TASK:>
Description:
def print_most_common(counter, number=5, tab=1):
"""print the most common elements of a counter""" |
for key, count in counter.most_common(number):
print "{0}{1} - {2}".format('\t'*tab, key, count) |
<SYSTEM_TASK:>
Sum the total number of cycles over a list of tokens.
<END_TASK>
<USER_TASK:>
Description:
def _sum_cycles_from_tokens(self, tokens: List[str]) -> int:
"""Sum the total number of cycles over a list of tokens.""" |
return sum((int(self._nonnumber_pattern.sub('', t)) for t in tokens)) |
<SYSTEM_TASK:>
The number of cycles dedicated to template.
<END_TASK>
<USER_TASK:>
Description:
def template_cycles(self) -> int:
"""The number of cycles dedicated to template.""" |
return sum((int(re.sub(r'\D', '', op)) for op in self.template_tokens)) |
<SYSTEM_TASK:>
The number of cycles dedicated to skips.
<END_TASK>
<USER_TASK:>
Description:
def skip_cycles(self) -> int:
"""The number of cycles dedicated to skips.""" |
return sum((int(re.sub(r'\D', '', op)) for op in self.skip_tokens)) |
<SYSTEM_TASK:>
The number of cycles dedicated to UMI.
<END_TASK>
<USER_TASK:>
Description:
def umi_cycles(self) -> int:
"""The number of cycles dedicated to UMI.""" |
return sum((int(re.sub(r'\D', '', op)) for op in self.umi_tokens)) |
<SYSTEM_TASK:>
The number of total number of cycles in the structure.
<END_TASK>
<USER_TASK:>
Description:
def total_cycles(self) -> int:
"""The number of total number of cycles in the structure.""" |
return sum((int(re.sub(r'\D', '', op)) for op in self.tokens)) |
<SYSTEM_TASK:>
Return a markdown summary of the samples on this sample sheet.
<END_TASK>
<USER_TASK:>
Description:
def experimental_design(self) -> Any:
"""Return a markdown summary of the samples on this sample sheet.
This property supports displaying rendered markdown only when running
within an IPython interpreter. If we are not running in an IPython
interpreter, then print out a nicely formatted ASCII table.
Returns:
Markdown, str: A visual table of IDs and names for all samples.
""" |
if not self.samples:
raise ValueError('No samples in sample sheet')
markdown = tabulate(
[[getattr(s, h, '') for h in DESIGN_HEADER] for s in self.samples],
headers=DESIGN_HEADER,
tablefmt='pipe',
)
return maybe_render_markdown(markdown) |
<SYSTEM_TASK:>
Return a summary of this sample sheet in a TTY compatible codec.
<END_TASK>
<USER_TASK:>
Description:
def _repr_tty_(self) -> str:
"""Return a summary of this sample sheet in a TTY compatible codec.""" |
header_description = ['Sample_ID', 'Description']
header_samples = [
'Sample_ID',
'Sample_Name',
'Library_ID',
'index',
'index2',
]
header = SingleTable([], 'Header')
setting = SingleTable([], 'Settings')
sample_main = SingleTable([header_samples], 'Identifiers')
sample_desc = SingleTable([header_description], 'Descriptions')
# All key:value pairs found in the [Header] section.
max_header_width = max(MIN_WIDTH, sample_desc.column_max_width(-1))
for key in self.Header.keys():
if 'Description' in key:
value = '\n'.join(
wrap(getattr(self.Header, key), max_header_width)
)
else:
value = getattr(self.Header, key)
header.table_data.append([key, value])
# All key:value pairs found in the [Settings] and [Reads] sections.
for key in self.Settings.keys():
setting.table_data.append((key, getattr(self.Settings, key) or ''))
setting.table_data.append(('Reads', ', '.join(map(str, self.Reads))))
# Descriptions are wrapped to the allowable space remaining.
description_width = max(MIN_WIDTH, sample_desc.column_max_width(-1))
for sample in self.samples:
# Add all key:value pairs for this sample
sample_main.table_data.append(
[getattr(sample, title) or '' for title in header_samples]
)
# Wrap and add the sample descrption
sample_desc.table_data.append(
(
sample.Sample_ID,
'\n'.join(
wrap(sample.Description or '', description_width)
),
)
)
# These tables do not have horizontal headers so remove the frame.
header.inner_heading_row_border = False
setting.inner_heading_row_border = False
table = '\n'.join(
[header.table, setting.table, sample_main.table, sample_desc.table]
)
return table |
<SYSTEM_TASK:>
Converts timezone instances to strings for db storage.
<END_TASK>
<USER_TASK:>
Description:
def get_prep_value(self, value):
"""Converts timezone instances to strings for db storage.""" |
# pylint: disable=newstyle
value = super(TimeZoneField, self).get_prep_value(value)
if isinstance(value, tzinfo):
return value.zone
return value |
<SYSTEM_TASK:>
Checks that the `max_length` attribute covers all possible pytz
<END_TASK>
<USER_TASK:>
Description:
def _check_timezone_max_length_attribute(self): # pragma: no cover
"""
Checks that the `max_length` attribute covers all possible pytz
timezone lengths.
""" |
# Retrieve the maximum possible length for the time zone string
possible_max_length = max(map(len, pytz.all_timezones))
# Make sure that the max_length attribute will handle the longest time
# zone string
if self.max_length < possible_max_length: # pragma: no cover
return [
checks.Error(
msg=(
"'max_length' is too short to support all possible "
"pytz time zones."
),
hint=(
"pytz {version}'s longest time zone string has a "
"length of {value}, although it is recommended that "
"you leave room for longer time zone strings to be "
"added in the future.".format(
version=pytz.VERSION,
value=possible_max_length
)
),
obj=self,
)
]
# When no error, return an empty list
return [] |
<SYSTEM_TASK:>
Checks to make sure that choices contains valid timezone choices.
<END_TASK>
<USER_TASK:>
Description:
def _check_choices_attribute(self): # pragma: no cover
"""Checks to make sure that choices contains valid timezone choices.""" |
if self.choices:
warning_params = {
'msg': (
"'choices' contains an invalid time zone value '{value}' "
"which was not found as a supported time zone by pytz "
"{version}."
),
'hint': "Values must be found in pytz.all_timezones.",
'obj': self,
}
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key in map(lambda x: x[0], option_value):
if optgroup_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if optgroup_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=optgroup_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
elif option_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if option_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=option_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
# When no error, return an empty list
return [] |
<SYSTEM_TASK:>
Convert the value to the appropriate timezone.
<END_TASK>
<USER_TASK:>
Description:
def to_python(self, value):
"""Convert the value to the appropriate timezone.""" |
# pylint: disable=newstyle
value = super(LinkedTZDateTimeField, self).to_python(value)
if not value:
return value
return value.astimezone(self.timezone) |
<SYSTEM_TASK:>
Converts the value being saved based on `populate_from` and
<END_TASK>
<USER_TASK:>
Description:
def pre_save(self, model_instance, add):
"""
Converts the value being saved based on `populate_from` and
`time_override`
""" |
# pylint: disable=newstyle
# Retrieve the currently entered datetime
value = super(
LinkedTZDateTimeField,
self
).pre_save(
model_instance=model_instance,
add=add
)
# Convert the value to the correct time/timezone
value = self._convert_value(
value=value,
model_instance=model_instance,
add=add
)
setattr(model_instance, self.attname, value)
return value |
<SYSTEM_TASK:>
Add our custom keyword arguments for migrations.
<END_TASK>
<USER_TASK:>
Description:
def deconstruct(self): # pragma: no cover
"""Add our custom keyword arguments for migrations.""" |
# pylint: disable=newstyle
name, path, args, kwargs = super(
LinkedTZDateTimeField,
self
).deconstruct()
# Only include kwarg if it's not the default
if self.populate_from is not None:
# Since populate_from requires a model instance and Django does
# not allow lambda, we hope that we have been provided a
# function that can be parsed
kwargs['populate_from'] = self.populate_from
# Only include kwarg if it's not the default
if self.time_override is not None:
if hasattr(self.time_override, '__call__'):
# Call the callable datetime.time instance
kwargs['time_override'] = self.time_override()
else:
kwargs['time_override'] = self.time_override
return name, path, args, kwargs |
<SYSTEM_TASK:>
Retrieves the timezone or None from the `populate_from` attribute.
<END_TASK>
<USER_TASK:>
Description:
def _get_populate_from(self, model_instance):
"""
Retrieves the timezone or None from the `populate_from` attribute.
""" |
if hasattr(self.populate_from, '__call__'):
tz = self.populate_from(model_instance)
else:
from_attr = getattr(model_instance, self.populate_from)
tz = callable(from_attr) and from_attr() or from_attr
try:
tz = pytz.timezone(str(tz))
except pytz.UnknownTimeZoneError:
# It was a valiant effort. Resistance is futile.
raise
# If we have a timezone, set the instance's timezone attribute
self.timezone = tz
return tz |
<SYSTEM_TASK:>
Retrieves the datetime.time or None from the `time_override` attribute.
<END_TASK>
<USER_TASK:>
Description:
def _get_time_override(self):
"""
Retrieves the datetime.time or None from the `time_override` attribute.
""" |
if callable(self.time_override):
time_override = self.time_override()
else:
time_override = self.time_override
if not isinstance(time_override, datetime_time):
raise ValueError(
'Invalid type. Must be a datetime.time instance.'
)
return time_override |
<SYSTEM_TASK:>
Converts the value to the appropriate timezone and time as declared by
<END_TASK>
<USER_TASK:>
Description:
def _convert_value(self, value, model_instance, add):
"""
Converts the value to the appropriate timezone and time as declared by
the `time_override` and `populate_from` attributes.
""" |
if not value:
return value
# Retrieve the default timezone as the default
tz = get_default_timezone()
# If populate_from exists, override the default timezone
if self.populate_from is not None:
tz = self._get_populate_from(model_instance)
if is_naive(value):
value = make_aware(value=value, timezone=tz)
# Convert the value to a datetime object in the correct timezone. This
# insures that we will have the correct date if we are performing a
# time override below.
value = value.astimezone(tz)
# Do not convert the time to the time override if auto_now or
# auto_now_add is set
if self.time_override is not None and not (
self.auto_now or (self.auto_now_add and add)
):
# Retrieve the time override
time_override = self._get_time_override()
# Convert the value to the date/time with the appropriate timezone
value = make_aware(
value=datetime.combine(
date=value.date(),
time=time_override
),
timezone=tz
)
return value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.