text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_filter_node(root, filter_, value): """Adds filter xml node to root."""
filter_el = ElementTree.SubElement(root, 'Filter') filter_el.set('name', filter_.name) # Set filter value depending on type. if filter_.type == 'boolean': # Boolean case. if value is True or value.lower() in {'included', 'only'}: filter_el.set('excluded', '0') elif value is False or value.lower() == 'excluded': filter_el.set('excluded', '1') else: raise ValueError('Invalid value for boolean filter ({})' .format(value)) elif isinstance(value, list) or isinstance(value, tuple): # List case. filter_el.set('value', ','.join(map(str, value))) else: # Default case. filter_el.set('value', str(value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_potential_markables(docgraph): """ returns a list of all NPs and PPs in the given docgraph. Parameters docgraph : DiscourseDocumentGraph a document graph that (at least) contains syntax trees (imported from Tiger XML files) Returns ------- potential_markables : list of str or int Node IDs of all nodes that represent an NP/PP syntactical category/phrase in the input document. If an NP is embedded in a PP, only the node ID of the PP is returned. """
potential_markables = [] for node_id, nattr in dg.select_nodes_by_layer(docgraph, 'tiger:syntax', data=True): if nattr['tiger:cat'] == 'NP': # if an NP is embedded into a PP, only print the PP pp_parent = False for source, target in docgraph.in_edges(node_id): parent_node = docgraph.node[source] if 'tiger:cat' in parent_node and parent_node['tiger:cat'] == 'PP': potential_markables.append(source) # add parent PP phrase pp_parent = True if not pp_parent: potential_markables.append(node_id) # add NP phrase elif nattr['tiger:cat'] == 'PP': potential_markables.append(node_id) # add PP phrase return potential_markables
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_common_paths_file(project_path): """ Parses a common_paths.xml file and returns a dictionary of paths, a dictionary of annotation level descriptions and the filename of the style file. Parameters project_path : str path to the root directory of the MMAX project Returns ------- paths : dict maps from MMAX file types (str, e.g. 'basedata' or 'markable') to the relative path (str) containing files of this type annotations : dict maps from MMAX annotation level names (str, e.g. 'sentence', 'primmark') to a dict of features. The features are: 'schemefile' (maps to a file), 'customization_file' (ditto) and 'file_extension' (maps to the file name ending used for all annotations files of this level) stylefile : str name of the (default) style file used in this MMAX project """
common_paths_file = os.path.join(project_path, 'common_paths.xml') tree = etree.parse(common_paths_file) paths = {} path_vars = ['basedata', 'scheme', 'style', 'style', 'customization', 'markable'] for path_var in path_vars: specific_path = tree.find('//{}_path'.format(path_var)).text paths[path_var] = specific_path if specific_path else project_path paths['project_path'] = project_path annotations = {} for level in tree.iterfind('//level'): annotations[level.attrib['name']] = { 'schemefile': level.attrib['schemefile'], 'customization_file': level.attrib['customization_file'], 'file_extension': level.text[1:]} stylesheet = tree.find('//stylesheet').text return paths, annotations, stylesheet
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_sentences_and_token_nodes(self): """ Returns a list of sentence root node IDs and a list of sentences, where each list contains the token node IDs of that sentence. Both lists will be empty if sentences were not annotated in the original MMAX2 data. TODO: Refactor this! There's code overlap with self.add_annotation_layer(). Ideally, we would always import sentence annotations and filter them out in the exporters (e.g. Exmaralda, CoNLL), probably by modifying get_pointing_chains(). Returns ------- sentence_root_nodes : list of str a list of all sentence root node IDs, in the order they occur in the text token_nodes : list of list of str a list of lists. each list represents a sentence and contains token node IDs (in the order they occur in the text) """
token_nodes = [] # if sentence annotations were ignored during MMAXDocumentGraph # construction, we need to extract sentence/token node IDs manually if self.ignore_sentence_annotations: mp = self.mmax_project layer_dict = mp.annotations['sentence'] file_id = self.get_file_id(self.name) sentence_anno_file = os.path.join(mp.project_path, mp.paths['markable'], file_id+layer_dict['file_extension']) tree = etree.parse(sentence_anno_file) root = tree.getroot() sentence_root_nodes = [] for markable in root.iterchildren(): sentence_root_nodes.append(markable.attrib['id']) sentence_token_nodes = [] for token_id in spanstring2tokens(self, markable.attrib['span']): # ignore token IDs that aren't used in the *_words.xml file # NOTE: we only need this filter for broken files in the PCC corpus if token_id in self.tokens: sentence_token_nodes.append(token_id) self.add_node(markable.attrib['id'], layers={self.ns, self.ns+':sentence'}) token_nodes.append(sentence_token_nodes) else: sentence_root_nodes = list(select_nodes_by_layer(self, self.ns+':sentence')) for sent_node in sentence_root_nodes: sentence_token_nodes = [] for token_id in self.get_token_nodes_from_sentence(sent_node): # ignore token IDs that aren't used in the *_words.xml file # NOTE: we only need this filter for broken files in the PCC corpus if token_id in self.tokens: sentence_token_nodes.append(token_id) token_nodes.append(sentence_token_nodes) return sentence_root_nodes, token_nodes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_token_nodes_from_sentence(self, sentence_root_node): """returns a list of token node IDs belonging to the given sentence"""
return spanstring2tokens(self, self.node[sentence_root_node][self.ns+':span'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_token_layer(self, words_file, connected): """ parses a _words.xml file, adds every token to the document graph and adds an edge from the MMAX root node to it. Parameters connected : bool Make the graph connected, i.e. add an edge from root to each token. """
for word in etree.parse(words_file).iterfind('//word'): token_node_id = word.attrib['id'] self.tokens.append(token_node_id) token_str = ensure_unicode(word.text) self.add_node(token_node_id, layers={self.ns, self.ns+':token'}, attr_dict={self.ns+':token': token_str, 'label': token_str}) if connected: self.add_edge(self.root, token_node_id, layers={self.ns, self.ns+':token'})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_annotation_layer(self, annotation_file, layer_name): """ adds all markables from the given annotation layer to the discourse graph. """
assert os.path.isfile(annotation_file), \ "Annotation file doesn't exist: {}".format(annotation_file) tree = etree.parse(annotation_file) root = tree.getroot() default_layers = {self.ns, self.ns+':markable', self.ns+':'+layer_name} # avoids eml.org namespace handling for markable in root.iterchildren(): markable_node_id = markable.attrib['id'] markable_attribs = add_prefix(markable.attrib, self.ns+':') self.add_node(markable_node_id, layers=default_layers, attr_dict=markable_attribs, label=markable_node_id+':'+layer_name) for target_node_id in spanstring2tokens(self, markable.attrib['span']): # manually add to_node if it's not in the graph, yet # cf. issue #39 if target_node_id not in self: self.add_node(target_node_id, # adding 'mmax:layer_name' here could be # misleading (e.g. each token would be part # of the 'mmax:sentence' layer layers={self.ns, self.ns+':markable'}, label=target_node_id) self.add_edge(markable_node_id, target_node_id, layers=default_layers, edge_type=EdgeTypes.spanning_relation, label=self.ns+':'+layer_name) # this is a workaround for Chiarcos-style MMAX files if has_antecedent(markable): antecedent_pointer = markable.attrib['anaphor_antecedent'] # mmax2 supports weird double antecedents, # e.g. "markable_1000131;markable_1000132", cf. Issue #40 # # handling these double antecendents increases the number of # chains, cf. commit edc28abdc4fd36065e8bbf5900eeb4d1326db153 for antecedent in antecedent_pointer.split(';'): ante_split = antecedent.split(":") if len(ante_split) == 2: # mark group:markable_n or secmark:markable_n as such edge_label = '{}:antecedent'.format(ante_split[0]) else: edge_label = ':antecedent' # handles both 'markable_n' and 'layer:markable_n' antecedent_node_id = ante_split[-1] if len(ante_split) == 2: antecedent_layer = ante_split[0] default_layers.add('{0}:{1}'.format(self.ns, antecedent_layer)) # manually add antecedent node if it's not yet in the graph # cf. issue #39 if antecedent_node_id not in self: self.add_node(antecedent_node_id, layers=default_layers) self.add_edge(markable_node_id, antecedent_node_id, layers=default_layers, edge_type=EdgeTypes.pointing_relation, label=self.ns+edge_label)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_edu_text(text_subtree): """return the text of the given EDU subtree, with '_!'-delimiters removed."""
assert text_subtree.label() == 'text', "text_subtree: {}".format(text_subtree) edu_str = u' '.join(word for word in text_subtree.leaves()) return re.sub('_!(.*?)_!', '\g<1>', edu_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_node_id(nuc_or_sat, namespace=None): """return the node ID of the given nucleus or satellite"""
node_type = get_node_type(nuc_or_sat) if node_type == 'leaf': leaf_id = nuc_or_sat[0].leaves()[0] if namespace is not None: return '{0}:{1}'.format(namespace, leaf_id) else: return string(leaf_id) #else: node_type == 'span' span_start = nuc_or_sat[0].leaves()[0] span_end = nuc_or_sat[0].leaves()[1] if namespace is not None: return '{0}:span:{1}-{2}'.format(namespace, span_start, span_end) else: return 'span:{0}-{1}'.format(span_start, span_end)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def datasets(self): """List of datasets in this mart."""
if self._datasets is None: self._datasets = self._fetch_datasets() return self._datasets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_datasets(self): """Lists available datasets in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available datasets. """
def _row_gen(attributes): for attr in attributes.values(): yield (attr.name, attr.display_name) return pd.DataFrame.from_records( _row_gen(self.datasets), columns=['name', 'display_name'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_relationtypes(rs3_xml_tree): """ extracts the allowed RST relation names and relation types from an RS3 XML file. Parameters rs3_xml_tree : lxml.etree._ElementTree lxml ElementTree representation of an RS3 XML file Returns ------- relations : dict of (str, str) Returns a dictionary with RST relation names as keys (str) and relation types (either 'rst' or 'multinuc') as values (str). """
return {rel.attrib['name']: rel.attrib['type'] for rel in rs3_xml_tree.iter('rel') if 'type' in rel.attrib}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_node_id(edge, node_type): """ returns the source or target node id of an edge, depending on the node_type given. """
assert node_type in ('source', 'target') _, node_id_str = edge.attrib[node_type].split('.') # e.g. //@nodes.251 return int(node_id_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def traverse_dependencies_up(docgraph, node_id, node_attr=None): """ starting from the given node, traverse ingoing edges up to the root element of the sentence. return the given node attribute from all the nodes visited along the way. """
# there's only one, but we're in a multidigraph source, target = docgraph.in_edges(node_id)[0] traverse_attr = node_attr if node_attr else docgraph.lemma_attr attrib_value = docgraph.node[source].get(traverse_attr) if attrib_value: yield attrib_value if istoken(docgraph, source) is True: for attrib_value in traverse_dependencies_up(docgraph, source, traverse_attr): yield attrib_value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __add_dependency(self, word_instance, sent_id): """ adds an ingoing dependency relation from the projected head of a token to the token itself. """
# 'head_attr': (projected) head head = word_instance.__getattribute__(self.head_attr) deprel = word_instance.__getattribute__(self.deprel_attr) if head == '0': # word represents the sentence root source_id = sent_id else: source_id = '{0}_t{1}'.format(sent_id, head) # TODO: fix issue #39, so we don't have to add nodes explicitly if source_id not in self.node: self.add_node(source_id, layers={self.ns}) target_id = '{0}_t{1}'.format(sent_id, word_instance.word_id) # 'pdeprel': projected dependency relation try: self.add_edge(source_id, target_id, layers={self.ns, self.ns+':dependency'}, relation_type=deprel, label=deprel, edge_type=EdgeTypes.dominance_relation) except AssertionError: print "source: {0}, target: {1}".format(source_id, target_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __build_markable_token_mapper(self, coreference_layer=None, markable_layer=None): """ Creates mappings from tokens to the markable spans they belong to and the coreference chains these markables are part of. Returns ------- tok2markables : dict (str -> set of str) Maps from a token (node ID) to all the markables (node IDs) it is part of. markable2toks : dict (str -> list of str) Maps from a markable (node ID) to all the tokens (node IDs) that belong to it. markable2chains : dict (str -> list of int) Maps from a markable (node ID) to all the chains (chain ID) it belongs to. """
tok2markables = defaultdict(set) markable2toks = defaultdict(list) markable2chains = defaultdict(list) coreference_chains = get_pointing_chains(self.docgraph, layer=coreference_layer) for chain_id, chain in enumerate(coreference_chains): for markable_node_id in chain: markable2chains[markable_node_id].append(chain_id) # ID of the first singleton (if there are any) singleton_id = len(coreference_chains) # markable2toks/tok2markables shall contains all markables, not only # those which are part of a coreference chain for markable_node_id in select_nodes_by_layer(self.docgraph, markable_layer): span = get_span(self.docgraph, markable_node_id) markable2toks[markable_node_id] = span for token_node_id in span: tok2markables[token_node_id].add(markable_node_id) # singletons each represent their own chain (with only one element) if markable_node_id not in markable2chains: markable2chains[markable_node_id] = [singleton_id] singleton_id += 1 return tok2markables, markable2toks, markable2chains
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __gen_coref_str(self, token_id, markable_id, target_id): """ generates the string that represents the markables and coreference chains that a token is part of. Parameters token_id : str the node ID of the token markable_id : str the node ID of the markable span target_id : int the ID of the target (either a singleton markable or a coreference chain) Returns ------- coref_str : str a string representing the token's position in a markable span and its membership in one (or more) coreference chains """
span = self.markable2toks[markable_id] coref_str = str(target_id) if span.index(token_id) == 0: # token is the first element of a markable span coref_str = '(' + coref_str if span.index(token_id) == len(span)-1: # token is the last element of a markable span coref_str += ')' return coref_str
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_sentences(nodes, token_node_indices): """ given a list of ``SaltNode``\s, returns a list of lists, where each list contains the indices of the nodes belonging to that sentence. """
sents = [] tokens = [] for i, node in enumerate(nodes): if i in token_node_indices: if node.features['tiger.pos'] != '$.': tokens.append(i) else: # start a new sentence, if 'tiger.pos' is '$.' tokens.append(i) sents.append(tokens) tokens = [] return sents
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_gexf(docgraph, output_file): """ takes a document graph, converts it into GEXF format and writes it to a file. """
dg_copy = deepcopy(docgraph) remove_root_metadata(dg_copy) layerset2str(dg_copy) attriblist2str(dg_copy) nx_write_gexf(dg_copy, output_file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_child_nodes(docgraph, parent_node_id, data=False): """Yield all nodes that the given node dominates or spans."""
return select_neighbors_by_edge_attribute( docgraph=docgraph, source=parent_node_id, attribute='edge_type', value=[EdgeTypes.dominance_relation], data=data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_parents(docgraph, child_node, strict=True): """Return a list of parent nodes that dominate this child. In a 'syntax tree' a node never has more than one parent node dominating it. To enforce this, set strict=True. Parameters docgraph : DiscourseDocumentGraph a document graph strict : bool If True, raise a ValueError if a child node is dominated by more than one parent node. Returns ------- parents : list a list of (parent) node IDs. """
parents = [] for src, _, edge_attrs in docgraph.in_edges(child_node, data=True): if edge_attrs['edge_type'] == EdgeTypes.dominance_relation: parents.append(src) if strict and len(parents) > 1: raise ValueError(("In a syntax tree, a node can't be " "dominated by more than one parent")) return parents
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sorted_bfs_edges(G, source=None): """Produce edges in a breadth-first-search starting at source. Neighbors appear in the order a linguist would expect in a syntax tree. The result will only contain edges that express a dominance or spanning relation, i.e. edges expressing pointing or precedence relations will be ignored. Parameters G : DiscourseDocumentGraph source : node Specify starting node for breadth-first search and return edges in the component reachable from source. Returns ------- edges: generator A generator of edges in the breadth-first-search. """
if source is None: source = G.root xpos = horizontal_positions(G, source) visited = set([source]) source_children = get_child_nodes(G, source) queue = deque([(source, iter(sorted(source_children, key=lambda x: xpos[x])))]) while queue: parent, children = queue[0] try: child = next(children) if child not in visited: yield parent, child visited.add(child) grandchildren = get_child_nodes(G, child) queue.append((child, iter(sorted(grandchildren, key=lambda x: xpos[x])))) except StopIteration: queue.popleft()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sorted_bfs_successors(G, source=None): """Return dictionary of successors in breadth-first-search from source. Parameters G : DiscourseDocumentGraph graph source : node Specify starting node for breadth-first search and return edges in the component reachable from source. Returns ------- successors: dict A dictionary with nodes as keys and list of succssors nodes as values. """
if source is None: source = G.root successors = defaultdict(list) for src, target in sorted_bfs_edges(G, source): successors[src].append(target) return dict(successors)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node2bracket(docgraph, node_id, child_str=''): """convert a docgraph node into a PTB-style string."""
node_attrs = docgraph.node[node_id] if istoken(docgraph, node_id): pos_str = node_attrs.get(docgraph.ns+':pos', '') token_str = node_attrs[docgraph.ns+':token'] return u"({pos}{space1}{token}{space2}{child})".format( pos=pos_str, space1=bool(pos_str)*' ', token=token_str, space2=bool(child_str)*' ', child=child_str) #else: node is not a token label_str = node_attrs.get('label', '') return u"({label}{space}{child})".format( label=label_str, space=bool(label_str and child_str)*' ', child=child_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tree2bracket(docgraph, root=None, successors=None): """convert a docgraph into a PTB-style string. If root (a node ID) is given, only convert the subgraph that this node domintes/spans into a PTB-style string. """
if root is None: root = docgraph.root if successors is None: successors = sorted_bfs_successors(docgraph, root) if root in successors: embed_str = u" ".join(tree2bracket(docgraph, child, successors) for child in successors[root]) return node2bracket(docgraph, root, embed_str) return node2bracket(docgraph, root)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def word_wrap_tree(parented_tree, width=0): """line-wrap an NLTK ParentedTree for pretty-printing"""
if width != 0: for i, leaf_text in enumerate(parented_tree.leaves()): dedented_text = textwrap.dedent(leaf_text).strip() parented_tree[parented_tree.leaf_treeposition(i)] = textwrap.fill(dedented_text, width=width) return parented_tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_position(self, rst_tree, node_id=None): """Get the linear position of an element of this DGParentedTree in an RSTTree. If ``node_id`` is given, this will return the position of the subtree with that node ID. Otherwise, the position of the root of this DGParentedTree in the given RSTTree is returned. """
if node_id is None: node_id = self.root_id if node_id in rst_tree.edu_set: return rst_tree.edus.index(node_id) return min(self.get_position(rst_tree, child_node_id) for child_node_id in rst_tree.child_dict[node_id])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, **params): """Performs get request to the biomart service. Args: **params (dict of str: any): Arbitrary keyword arguments, which are added as parameters to the get request to biomart. Returns: requests.models.Response: Response from biomart for the request. """
if self._use_cache: r = requests.get(self.url, params=params) else: with requests_cache.disabled(): r = requests.get(self.url, params=params) r.raise_for_status() return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fromstring(cls, ptb_string, namespace='ptb', precedence=False, ignore_traces=True): """create a PTBDocumentGraph from a string containing PTB parses."""
temp = tempfile.NamedTemporaryFile(delete=False) temp.write(ptb_string) temp.close() ptb_docgraph = cls(ptb_filepath=temp.name, namespace=namespace, precedence=precedence, ignore_traces=ignore_traces) os.unlink(temp.name) return ptb_docgraph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_sentence(self, sentence, ignore_traces=True): """ add a sentence from the input document to the document graph. Parameters sentence : nltk.tree.Tree a sentence represented by a Tree instance """
self.sentences.append(self._node_id) # add edge from document root to sentence root self.add_edge(self.root, self._node_id, edge_type=dg.EdgeTypes.dominance_relation) self._parse_sentencetree(sentence, ignore_traces=ignore_traces) self._node_id += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_sentencetree(self, tree, parent_node_id=None, ignore_traces=True): """parse a sentence Tree into this document graph"""
def get_nodelabel(node): if isinstance(node, nltk.tree.Tree): return node.label() elif isinstance(node, unicode): return node.encode('utf-8') else: raise ValueError("Unexpected node type: {0}, {1}".format(type(node), node)) root_node_id = self._node_id self.node[root_node_id]['label'] = get_nodelabel(tree) for subtree in tree: self._node_id += 1 node_label = get_nodelabel(subtree) # unescape the node label, if necessary node_label = PTB_BRACKET_UNESCAPE.get(node_label, node_label) # TODO: refactor this, so we don't need to query this all the time if ignore_traces and node_label == '-NONE-': # ignore tokens annotated for traces continue if isinstance(subtree, nltk.tree.Tree): if len(subtree) > 1: # subtree is a syntactic category node_attrs = {'label': node_label, self.ns+':cat': node_label} layers = {self.ns, self.ns+':syntax'} else: # subtree represents a token and its POS tag node_attrs = {'label': node_label} layers = {self.ns} edge_type = dg.EdgeTypes.dominance_relation self.add_node(self._node_id, layers=layers, attr_dict=node_attrs) self.add_edge(root_node_id, self._node_id, edge_type=edge_type) else: # isinstance(subtree, unicode); subtree is a token # we'll have to modify the parent node of a token, since # in NLTK Trees, even a leaf node (with its POS tag) is # represented as a Tree (an iterator over a single unicode # string), e.g. ``Tree('NNS', ['prices'])`` pos_tag = self.node[parent_node_id]['label'] token_attrs = { 'label': node_label, self.ns+':token': node_label, self.ns+':pos': pos_tag} self.node[parent_node_id].update(token_attrs) self.tokens.append(parent_node_id) if isinstance(subtree, nltk.tree.Tree): self._parse_sentencetree(subtree, parent_node_id=self._node_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_class_instance(element, element_id, doc_id): """ given an Salt XML element, returns a corresponding `SaltElement` class instance, i.e. a SaltXML `SToken` node will be converted into a `TokenNode`. Parameters element : lxml.etree._Element an `etree._Element` is the XML representation of a Salt element, e.g. a single 'nodes' or 'edges' element element_id : int the index of element (used to connect edges to nodes) doc_id : str the ID of the SaltXML document Returns ------- salt_element : SaltElement an instance of a `SaltElement` subclass instance, e.g. a `TokenNode`, `TextualRelation` or `SaltLayer` """
xsi_type = get_xsi_type(element) element_class = XSI_TYPE_CLASSES[xsi_type] return element_class.from_etree(element)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def abslistdir(directory): """ returns a list of absolute filepaths for all files found in the given directory. """
abs_dir = os.path.abspath(directory) filenames = os.listdir(abs_dir) return [os.path.join(abs_dir, filename) for filename in filenames]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _extract_elements(self, tree, element_type): """ extracts all element of type `element_type from the `_ElementTree` representation of a SaltXML document and adds them to the corresponding `SaltDocument` attributes, i.e. `self.nodes`, `self.edges` and `self.layers`. Parameters tree : lxml.etree._ElementTree an ElementTree that represents a complete SaltXML document element_type : str the tag name of a SaltXML element, e.g. `nodes` or `edges` """
# creates a new attribute, e.g. 'self.nodes' and assigns it an # empty list setattr(self, element_type, []) etree_elements = get_elements(tree, element_type) for i, etree_element in enumerate(etree_elements): # create an instance of an element class (e.g. TokenNode) salt_element = create_class_instance(etree_element, i, self.doc_id) # and add it to the corresponding element type list, # e.g. 'self.nodes' getattr(self, element_type).append(salt_element)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_sentence(self, sent_index): """ returns the string representation of a sentence. :param sent_index: the index of a sentence (from ``self.sentences``) :type sent_index: int :return: the sentence string :rtype: str """
tokens = [self.print_token(tok_idx) for tok_idx in self.sentences[sent_index]] return ' '.join(tokens)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_token(self, token_node_index): """returns the string representation of a token."""
err_msg = "The given node is not a token node." assert isinstance(self.nodes[token_node_index], TokenNode), err_msg onset = self.nodes[token_node_index].onset offset = self.nodes[token_node_index].offset return self.text[onset:offset]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def detect_stream_mode(stream): ''' detect_stream_mode - Detect the mode on a given stream @param stream <object> - A stream object If "mode" is present, that will be used. @return <type> - "Bytes" type or "str" type ''' # If "Mode" is present, pull from that if hasattr(stream, 'mode'): if 'b' in stream.mode: return bytes elif 't' in stream.mode: return str # Read a zero-length string off the device if hasattr(stream, 'read'): zeroStr = stream.read(0) if type(zeroStr) is str: return str return bytes elif hasattr(stream, 'recv'): zeroStr = stream.recv(0) if type(zeroStr) is str: return str return bytes # Cannot figure it out, assume bytes. return bytes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node2freqt(docgraph, node_id, child_str='', include_pos=False, escape_func=FREQT_ESCAPE_FUNC): """convert a docgraph node into a FREQT string."""
node_attrs = docgraph.node[node_id] if istoken(docgraph, node_id): token_str = escape_func(node_attrs[docgraph.ns+':token']) if include_pos: pos_str = escape_func(node_attrs.get(docgraph.ns+':pos', '')) return u"({pos}({token}){child})".format( pos=pos_str, token=token_str, child=child_str) else: return u"({token}{child})".format(token=token_str, child=child_str) else: # node is not a token label_str=escape_func(node_attrs.get('label', node_id)) return u"({label}{child})".format(label=label_str, child=child_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sentence2freqt(docgraph, root, successors=None, include_pos=False, escape_func=FREQT_ESCAPE_FUNC): """convert a sentence subgraph into a FREQT string."""
if successors is None: successors = sorted_bfs_successors(docgraph, root) if root in successors: # root node has children / subgraphs embed_str = u"".join(sentence2freqt(docgraph, child, successors, include_pos=include_pos, escape_func=escape_func) for child in successors[root]) return node2freqt( docgraph, root, embed_str, include_pos=include_pos, escape_func=escape_func) else: # root node has no children / subgraphs return node2freqt(docgraph, root, include_pos=include_pos, escape_func=escape_func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def docgraph2freqt(docgraph, root=None, include_pos=False, escape_func=FREQT_ESCAPE_FUNC): """convert a docgraph into a FREQT string."""
if root is None: return u"\n".join( sentence2freqt(docgraph, sentence, include_pos=include_pos, escape_func=escape_func) for sentence in docgraph.sentences) else: return sentence2freqt(docgraph, root, include_pos=include_pos, escape_func=escape_func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def report(self, stream): """ Output code coverage report. """
if not self.xcoverageToStdout: # This will create a false stream where output will be ignored stream = StringIO() super(XCoverage, self).report(stream) if not hasattr(self, 'coverInstance'): # nose coverage plugin 1.0 and earlier import coverage self.coverInstance = coverage._the_coverage modules = [module for name, module in sys.modules.items() if self.wantModuleCoverage(name, module)] log.debug("Coverage report will cover modules: %s", modules) morfs = [m.__file__ for m in modules if hasattr(m, '__file__')] self.coverInstance.xml_report(morfs, outfile=self.xcoverageFile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_etree(cls, etree_element): """ creates a `SaltElement` from an `etree._Element` representing an element in a SaltXMI file. """
label_elements = get_subelements(etree_element, 'labels') labels = [SaltLabel.from_etree(elem) for elem in label_elements] return cls(name=get_element_name(etree_element), element_id=get_graph_element_id(etree_element), xsi_type=get_xsi_type(etree_element), labels=labels, xml=etree_element)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def grant_sudo_privileges(request, max_age=COOKIE_AGE): """ Assigns a random token to the user's session that allows them to have elevated permissions """
user = getattr(request, 'user', None) # If there's not a user on the request, just noop if user is None: return if not user.is_authenticated(): raise ValueError('User needs to be logged in to be elevated to sudo') # Token doesn't need to be unique, # just needs to be unpredictable and match the cookie and the session token = get_random_string() request.session[COOKIE_NAME] = token request._sudo = True request._sudo_token = token request._sudo_max_age = max_age return token
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def revoke_sudo_privileges(request): """ Revoke sudo privileges from a request explicitly """
request._sudo = False if COOKIE_NAME in request.session: del request.session[COOKIE_NAME]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_sudo_privileges(request): """ Check if a request is allowed to perform sudo actions """
if getattr(request, '_sudo', None) is None: try: request._sudo = ( request.user.is_authenticated() and constant_time_compare( request.get_signed_cookie(COOKIE_NAME, salt=COOKIE_SALT, max_age=COOKIE_AGE), request.session[COOKIE_NAME] ) ) except (KeyError, BadSignature): request._sudo = False return request._sudo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hildatree2dgparentedtree(self): """Convert the tree from HILDA's format into a conventional binary tree, which can be easily converted into output formats like RS3. """
def transform(hilda_tree): """Transform a HILDA parse tree into a more conventional parse tree. The input tree:: Contrast[S][N] _______________|______________ Although they they accepted did n't like it , the offer . is converted into:: Contrast ____________|___________ S N | | Although they they accepted did n't like it , the offer . """ if isinstance(hilda_tree, basestring) or not hasattr(hilda_tree, 'label'): return hilda_tree assert len(hilda_tree) == 2, "We can only handle binary trees." match = HILDA_REL_RE.match(hilda_tree.label()) assert match, "Relation '{}' does not match regex '{}'".format(hilda_tree.label(), HILDA_REL_RE) relname, left_child_nuc, right_child_nuc = match.groups() hilda_tree._label = relname for i, child_nuclearity in enumerate([left_child_nuc, right_child_nuc]): child = hilda_tree[i] hilda_tree[i] = Tree(child_nuclearity, [transform(child)]) return hilda_tree tree = transform(self.hildafile_tree) return DGParentedTree.convert(tree)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def marts(self): """List of available marts."""
if self._marts is None: self._marts = self._fetch_marts() return self._marts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_marts(self): """Lists available marts in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available marts. """
def _row_gen(attributes): for attr in attributes.values(): yield (attr.name, attr.display_name) return pd.DataFrame.from_records( _row_gen(self.marts), columns=['name', 'display_name'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def bgread(stream, blockSizeLimit=65535, pollTime=.03, closeStream=True): ''' bgread - Start a thread which will read from the given stream in a non-blocking fashion, and automatically populate data in the returned object. @param stream <object> - A stream on which to read. Socket, file, etc. @param blockSizeLimit <None/int> - Number of bytes. Default 65535. If None, the stream will be read from until there is no more available data (not closed, but you've read all that's been flushed to straem). This is okay for smaller datasets, but this number effectively controls the amount of CPU time spent in I/O on this stream VS everything else in your application. The default of 65535 bytes is a fair amount of data. @param pollTime <float> - Default .03 (30ms) After all available data has been read from the stream, wait this many seconds before checking again for more data. A low number here means a high priority, i.e. more cycles will be devoted to checking and collecting the background data. Since this is a non-blocking read, this value is the "block", which will return execution context to the remainder of the application. The default of 100ms should be fine in most cases. If it's really idle data collection, you may want to try a value of 1 second. @param closeStream <bool> - Default True. If True, the "close" method on the stream object will be called when the other side has closed and all data has been read. NOTES -- blockSizeLimit / pollTime is your effective max-throughput. Real throughput will be lower than this number, as the actual throughput is be defined by: T = (blockSizeLimit / pollTime) - DeviceReadTime(blockSizeLimit) Using the defaults of .03 and 65535 means you'll read up to 2 MB per second. Keep in mind that the more time spent in I/O means less time spent doing other tasks. @return - The return of this function is a BackgroundReadData object. This object contains an attribute "blocks" which is a list of the non-zero-length blocks that were read from the stream. The object also contains a calculated property, "data", which is a string/bytes (depending on stream mode) of all the data currently read. The property "isFinished" will be set to True when the stream has been closed. The property "error" will be set to any exception that occurs during reading which will terminate the thread. @see BackgroundReadData for more info. ''' try: pollTime = float(pollTime) except ValueError: raise ValueError('Provided poll time must be a float.') if not hasattr(stream, 'read') and not hasattr(stream, 'recv'): raise ValueError('Cannot read off provided stream, does not implement "read" or "recv"') if blockSizeLimit is not None: try: blockSizeLimit = int(blockSizeLimit) if blockSizeLimit <= 0: raise ValueError() except ValueError: raise ValueError('Provided block size limit must be "None" for no limit, or a positive integer.') streamMode = detect_stream_mode(stream) results = BackgroundReadData(streamMode) thread = threading.Thread(target=_do_bgread, args=(stream, blockSizeLimit, pollTime, closeStream, results)) thread.daemon = True # Automatically terminate this thread if program closes thread.start() return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _do_bgread(stream, blockSizeLimit, pollTime, closeStream, results): ''' _do_bgread - Worker functon for the background read thread. @param stream <object> - Stream to read until closed @param results <BackgroundReadData> ''' # Put the whole function in a try instead of just the read portion for performance reasons. try: while True: nextData = nonblock_read(stream, limit=blockSizeLimit) if nextData is None: break elif nextData: results.addBlock(nextData) time.sleep(pollTime) except Exception as e: results.error = e return if closeStream and hasattr(stream, 'close'): stream.close() results.isFinished = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_theme(request): """ Redirect to a given url while setting the chosen theme in the session or cookie. The url and the theme identifier need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """
next = request.POST.get('next', request.GET.get('next')) if not is_safe_url(url=next, host=request.get_host()): next = request.META.get('HTTP_REFERER') if not is_safe_url(url=next, host=request.get_host()): next = '/' response = http.HttpResponseRedirect(next) if request.method == 'POST': theme = request.POST.get('theme', None) if theme: if hasattr(request, 'session'): request.session['DJANGO_BOOTSTRAP_UI_THEME'] = theme else: response.set_cookie('DJANGO_BOOTSTRAP_UI_THEME', theme) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_ui(self, path='hgwebdir.config'): """ A funcion that will read python rc files and make an ui from read options :param path: path to mercurial config file """
#propagated from mercurial documentation sections = [ 'alias', 'auth', 'decode/encode', 'defaults', 'diff', 'email', 'extensions', 'format', 'merge-patterns', 'merge-tools', 'hooks', 'http_proxy', 'smtp', 'patch', 'paths', 'profiling', 'server', 'trusted', 'ui', 'web', ] repos = path baseui = ui.ui() cfg = config.config() cfg.read(repos) self.paths = cfg.items('paths') self.base_path = self.paths[0][1].replace('*', '') self.check_repo_dir(self.paths) self.set_statics(cfg) for section in sections: for k, v in cfg.items(section): baseui.setconfig(section, k, v) return baseui
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def status(self): """ Returns modified, added, removed, deleted files for current changeset """
return self.repository._repo.status(self._ctx.p1().node(), self._ctx.node())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fix_path(self, path): """ Paths are stored without trailing slash so we need to get rid off it if needed. Also mercurial keeps filenodes as str so we need to decode from unicode to str """
if path.endswith('/'): path = path.rstrip('/') return safe_str(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_nodes(self, path): """ Returns combined ``DirNode`` and ``FileNode`` objects list representing state of changeset at the given ``path``. If node at the given ``path`` is not instance of ``DirNode``, ChangesetError would be raised. """
if self._get_kind(path) != NodeKind.DIR: raise ChangesetError("Directory does not exist for revision %s at " " '%s'" % (self.revision, path)) path = self._fix_path(path) filenodes = [FileNode(f, changeset=self) for f in self._file_paths if os.path.dirname(f) == path] dirs = path == '' and '' or [d for d in self._dir_paths if d and posixpath.dirname(d) == path] dirnodes = [DirNode(d, changeset=self) for d in dirs if os.path.dirname(d) == path] als = self.repository.alias for k, vals in self._extract_submodules().iteritems(): #vals = url,rev,type loc = vals[0] cs = vals[1] dirnodes.append(SubModuleNode(k, url=loc, changeset=cs, alias=als)) nodes = dirnodes + filenodes # cache nodes for node in nodes: self.nodes[node.path] = node nodes.sort() return nodes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_node(self, path): """ Returns ``Node`` object from the given ``path``. If there is no node at the given ``path``, ``ChangesetError`` would be raised. """
path = self._fix_path(path) if not path in self.nodes: if path in self._file_paths: node = FileNode(path, changeset=self) elif path in self._dir_paths or path in self._dir_paths: if path == '': node = RootNode(changeset=self) else: node = DirNode(path, changeset=self) else: raise NodeDoesNotExistError("There is no file nor directory " "at the given path: '%s' at revision %s" % (path, self.short_id)) # cache node self.nodes[path] = node return self.nodes[path]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def content(self): """ Returns lazily content of the FileNode. If possible, would try to decode content from UTF-8. """
content = self._get_content() if bool(content and '\0' in content): return content return safe_unicode(content)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lexer(self): """ Returns pygment's lexer class. Would try to guess lexer taking file's content, name and mimetype. """
try: lexer = lexers.guess_lexer_for_filename(self.name, self.content, stripnl=False) except lexers.ClassNotFound: lexer = lexers.TextLexer(stripnl=False) # returns first alias return lexer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def history(self): """ Returns a list of changeset for this file in which the file was changed """
if self.changeset is None: raise NodeError('Unable to get changeset for this FileNode') return self.changeset.get_file_history(self.path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def annotate(self): """ Returns a list of three element tuples with lineno,changeset and line """
if self.changeset is None: raise NodeError('Unable to get changeset for this FileNode') return self.changeset.get_file_annotate(self.path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def name(self): """ Returns name of the node so if its path then only last part is returned. """
org = safe_unicode(self.path.rstrip('/').split('/')[-1]) return u'%s @ %s' % (org, self.changeset.short_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def predict(self, X, nsamples=200, likelihood_args=()): """ Predict target values from Bayesian generalized linear regression. Parameters X : ndarray (N*,d) array query input dataset (N* samples, d dimensions). nsamples : int, optional Number of samples for sampling the expected target values from the predictive distribution. likelihood_args : sequence, optional sequence of arguments to pass to the likelihood function. These are non-learnable parameters. They can be scalars or arrays of length N. Returns ------- Ey : ndarray The expected value of y* for the query inputs, X* of shape (N*,). """
Ey, _ = self.predict_moments(X, nsamples, likelihood_args) return Ey
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def predict_moments(self, X, nsamples=200, likelihood_args=()): r""" Predictive moments, in particular mean and variance, of a Bayesian GLM. This function uses Monte-Carlo sampling to evaluate the predictive mean and variance of a Bayesian GLM. The exact expressions evaluated are, .. math :: \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y] &= \int \mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)] p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w}, \mathbb{V}[y^* | \mathbf{x^*}, \mathbf{X}, y] &= \int \left(\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)] - \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y]\right)^2 p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w}, where :math:`\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]` is the the expected value of :math:`y^*` from the likelihood, and :math:`p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi)` is the posterior distribution over weights (from ``learn``). Here are few concrete examples of how we can use these values, - Gaussian likelihood: these are just the predicted mean and variance, see ``revrand.regression.predict`` - Bernoulli likelihood: The expected value is the probability, :math:`p(y^* = 1)`, i.e. the probability of class one. The variance may not be so useful. - Poisson likelihood: The expected value is similar conceptually to the Gaussian case, and is also a *continuous* value. The median (50% quantile) from ``predict_interval`` is a discrete value. Again, the variance in this instance may not be so useful. Parameters X : ndarray (N*,d) array query input dataset (N* samples, d dimensions). nsamples : int, optional Number of samples for sampling the expected moments from the predictive distribution. likelihood_args : sequence, optional sequence of arguments to pass to the likelihood function. These are non-learnable parameters. They can be scalars or arrays of length N. Returns ------- Ey : ndarray The expected value of y* for the query inputs, X* of shape (N*,). Vy : ndarray The expected variance of y* (excluding likelihood noise terms) for the query inputs, X* of shape (N*,). """
# Get latent function samples N = X.shape[0] ys = np.empty((N, nsamples)) fsamples = self._sample_func(X, nsamples) # Push samples though likelihood expected value Eyargs = tuple(chain(atleast_list(self.like_hypers_), likelihood_args)) for i, f in enumerate(fsamples): ys[:, i] = self.likelihood.Ey(f, *Eyargs) # Average transformed samples (MC integration) Ey = ys.mean(axis=1) Vy = ((ys - Ey[:, np.newaxis])**2).mean(axis=1) return Ey, Vy
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def predict_logpdf(self, X, y, nsamples=200, likelihood_args=()): r""" Predictive log-probability density function of a Bayesian GLM. Parameters X : ndarray (N*,d) array query input dataset (N* samples, D dimensions). y : float or ndarray The test observations of shape (N*,) to evaluate under, :math:`\log p(y^* |\mathbf{x}^*, \mathbf{X}, y)`. nsamples : int, optional Number of samples for sampling the log predictive distribution. likelihood_args : sequence, optional sequence of arguments to pass to the likelihood function. These are non-learnable parameters. They can be scalars or arrays of length N*. Returns ------- logp : ndarray The log probability of y* given X* of shape (N*,). logp_min : ndarray The minimum sampled values of the predicted log probability (same shape as p) logp_max : ndarray The maximum sampled values of the predicted log probability (same shape as p) """
X, y = check_X_y(X, y) # Get latent function samples N = X.shape[0] ps = np.empty((N, nsamples)) fsamples = self._sample_func(X, nsamples) # Push samples though likelihood pdf llargs = tuple(chain(atleast_list(self.like_hypers_), likelihood_args)) for i, f in enumerate(fsamples): ps[:, i] = self.likelihood.loglike(y, f, *llargs) # Average transformed samples (MC integration) logp = ps.mean(axis=1) logp_min = ps.min(axis=1) logp_max = ps.max(axis=1) return logp, logp_min, logp_max
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def predict_cdf(self, X, quantile, nsamples=200, likelihood_args=()): r""" Predictive cumulative density function of a Bayesian GLM. Parameters X : ndarray (N*,d) array query input dataset (N* samples, D dimensions). quantile : float The predictive probability, :math:`p(y^* \leq \text{quantile} | \mathbf{x}^*, \mathbf{X}, y)`. nsamples : int, optional Number of samples for sampling the predictive CDF. likelihood_args : sequence, optional sequence of arguments to pass to the likelihood function. These are non-learnable parameters. They can be scalars or arrays of length N*. nsamples : int, optional The number of samples to draw from the posterior in order to approximate the predictive mean and variance. Returns ------- p : ndarray The probability of y* <= quantile for the query inputs, X* of shape (N*,). p_min : ndarray The minimum sampled values of the predicted probability (same shape as p) p_max : ndarray The maximum sampled values of the predicted probability (same shape as p) """
# Get latent function samples N = X.shape[0] ps = np.empty((N, nsamples)) fsamples = self._sample_func(X, nsamples) # Push samples though likelihood cdf cdfarg = tuple(chain(atleast_list(self.like_hypers_), likelihood_args)) for i, f in enumerate(fsamples): ps[:, i] = self.likelihood.cdf(quantile, f, *cdfarg) # Average transformed samples (MC integration) p = ps.mean(axis=1) p_min = ps.min(axis=1) p_max = ps.max(axis=1) return p, p_min, p_max
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cmd_generate(args): """Generate images. Parameters args : `argparse.Namespace` Command arguments. """
check_output_format(args.output, args.count) markov = load(MarkovImage, args.state, args) if args.size is None: if markov.scanner.resize is None: print('Unknown output image size', file=stderr) exit(1) width, height = markov.scanner.resize else: width, height = args.size if args.level is None: scale = markov.scanner.min_size else: scale = reduce( lambda x, y: x * y, islice(markov.scanner.level_scale, 0, args.level - 1), 1 ) width, height = width // scale, height // scale markov.scanner.traversal[0].show_progress = args.progress for fname in outfiles(markov, args.output, args.count, args.progress): img = markov( width, height, state_size=args.state_size, levels=args.level ) save_image(img, fname)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cmd_filter(args): """Filter an image. Parameters args : `argparse.Namespace` Command arguments. """
check_output_format(args.output, args.count) img = Image.open(args.input) width, height = img.size if args.state is not None: markov = load(MarkovImage, args.state, args) else: args.state = () if args.type == JSON: storage = JsonStorage(settings=args.settings) else: storage = SqliteStorage(settings=args.settings) markov = MarkovImage.from_storage(storage) read([args.input], markov, args.progress, False) args.level = min(args.level, markov.levels - 1) - 1 if args.level < 0: args.level = -1 scale = markov.scanner.min_size width, height = width // scale, height // scale start = None else: scale = reduce( lambda x, y: x * y, islice(markov.scanner.level_scale, args.level, markov.levels), 1 ) width, height = width // scale, height // scale start = img.resize((width, height), markov.scanner.scale) for fname in outfiles(markov, args.output, args.count, args.progress, args.level + 1): img = markov( width, height, state_size=args.state_size, start_level=args.level, start_image=start ) save_image(img, fname)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lstrip_ws_and_chars(string, chars): """Remove leading whitespace and characters from a string. Parameters string : `str` String to strip. chars : `str` Characters to remove. Returns ------- `str` Stripped string. Examples -------- 'x. ' """
res = string.lstrip().lstrip(chars) while len(res) != len(string): string = res res = string.lstrip().lstrip(chars) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def capitalize(string): """Capitalize a sentence. Parameters string : `str` String to capitalize. Returns ------- `str` Capitalized string. Examples -------- 'Word word word' """
if not string: return string if len(string) == 1: return string.upper() return string[0].upper() + string[1:].lower()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def re_flags(flags, custom=ReFlags): """Parse regexp flag string. Parameters flags: `str` Flag string. custom: `IntEnum`, optional Custom flag enum (default: None). Returns ------- (`int`, `int`) (flags for `re.compile`, custom flags) Raises ------ ValueError """
re_, custom_ = 0, 0 for flag in flags.upper(): try: re_ |= getattr(re, flag) except AttributeError: if custom is not None: try: custom_ |= getattr(custom, flag) except AttributeError: raise ValueError('Invalid custom flag "%s"' % flag) else: raise ValueError('Invalid regexp flag "%s"' % flag) return re_, custom_
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def re_flags_str(flags, custom_flags): """Convert regexp flags to string. Parameters flags : `int` Flags. custom_flags : `int` Custom flags. Returns ------- `str` Flag string. """
res = '' for flag in RE_FLAGS: if flags & getattr(re, flag): res += flag for flag in RE_CUSTOM_FLAGS: if custom_flags & getattr(ReFlags, flag): res += flag return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def re_sub(pattern, repl, string, count=0, flags=0, custom_flags=0): """Replace regular expression. Parameters pattern : `str` or `_sre.SRE_Pattern` Compiled regular expression. repl : `str` or `function` Replacement. string : `str` Input string. count: `int` Maximum number of pattern occurrences. flags : `int` Flags. custom_flags : `int` Custom flags. """
if custom_flags & ReFlags.OVERLAP: prev_string = None while string != prev_string: prev_string = string string = re.sub(pattern, repl, string, count, flags) return string return re.sub(pattern, repl, string, count, flags)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert(self, string): """Return a copy of string converted to case. Parameters string : `str` Returns ------- `str` Examples -------- 'str ing' 'STR ING' 'Str ing' 'sTr InG' """
if self == self.__class__.TITLE: return capitalize(string) if self == self.__class__.UPPER: return string.upper() if self == self.__class__.LOWER: return string.lower() return string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def endless_permutations(N, random_state=None): """ Generate an endless sequence of random integers from permutations of the If we call this N times, we will sweep through the entire set without replacement, on the (N+1)th call a new permutation will be created, etc. Parameters N: int the length of the set random_state: int or RandomState, optional random seed Yields ------ int: """
generator = check_random_state(random_state) while True: batch_inds = generator.permutation(N) for b in batch_inds: yield b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_repo(path=None, alias=None, create=False): """ Returns ``Repository`` object of type linked with given ``alias`` at the specified ``path``. If ``alias`` is not given it will try to guess it using get_scm method """
if create: if not (path or alias): raise TypeError("If create is specified, we need path and scm type") return get_backend(alias)(path, create=True) if path is None: path = abspath(os.path.curdir) try: scm, path = get_scm(path, search_up=True) path = abspath(path) alias = scm except VCSError: raise VCSError("No scm found at %s" % path) if alias is None: alias = get_scm(path)[0] backend = get_backend(alias) repo = backend(path, create=create) return repo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_backend(alias): """ Returns ``Repository`` class identified by the given alias or raises VCSError if alias is not recognized or backend class cannot be imported. """
if alias not in settings.BACKENDS: raise VCSError("Given alias '%s' is not recognized! Allowed aliases:\n" "%s" % (alias, pformat(settings.BACKENDS.keys()))) backend_path = settings.BACKENDS[alias] klass = import_class(backend_path) return klass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_scms_for_path(path): """ Returns all scm's found at the given path. If no scm is recognized - empty list is returned. :param path: path to directory which should be checked. May be callable. :raises VCSError: if given ``path`` is not a directory """
from vcs.backends import get_backend if hasattr(path, '__call__'): path = path() if not os.path.isdir(path): raise VCSError("Given path %r is not a directory" % path) result = [] for key in ALIASES: dirname = os.path.join(path, '.' + key) if os.path.isdir(dirname): result.append(key) continue # We still need to check if it's not bare repository as # bare repos don't have working directories try: get_backend(key)(path) result.append(key) continue except RepositoryError: # Wrong backend pass except VCSError: # No backend at all pass return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_repo_paths(path): """ Returns path's subdirectories which seems to be a repository. """
repo_paths = [] dirnames = (os.path.abspath(dirname) for dirname in os.listdir(path)) for dirname in dirnames: try: get_scm(dirname) repo_paths.append(dirname) except VCSError: pass return repo_paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_command(cmd, *args): """ Runs command on the system with given ``args``. """
command = ' '.join((cmd, args)) p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() return p.retcode, stdout, stderr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_highlighted_code(name, code, type='terminal'): """ If pygments are available on the system then returned output is colored. Otherwise unchanged content is returned. """
import logging try: import pygments pygments except ImportError: return code from pygments import highlight from pygments.lexers import guess_lexer_for_filename, ClassNotFound from pygments.formatters import TerminalFormatter try: lexer = guess_lexer_for_filename(name, code) formatter = TerminalFormatter() content = highlight(code, lexer, formatter) except ClassNotFound: logging.debug("Couldn't guess Lexer, will not use pygments.") content = code return content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_datetime(text): """ Parses given text and returns ``datetime.datetime`` instance or raises ``ValueError``. :param text: string of desired date/datetime or something more verbose, like *yesterday*, *2weeks 3days*, etc. """
text = text.strip().lower() INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d', '%m/%d/%Y %H:%M:%S', '%m/%d/%Y %H:%M', '%m/%d/%Y', '%m/%d/%y %H:%M:%S', '%m/%d/%y %H:%M', '%m/%d/%y', ) for format in INPUT_FORMATS: try: return datetime.datetime(*time.strptime(text, format)[:6]) except ValueError: pass # Try descriptive texts if text == 'tomorrow': future = datetime.datetime.now() + datetime.timedelta(days=1) args = future.timetuple()[:3] + (23, 59, 59) return datetime.datetime(*args) elif text == 'today': return datetime.datetime(*datetime.datetime.today().timetuple()[:3]) elif text == 'now': return datetime.datetime.now() elif text == 'yesterday': past = datetime.datetime.now() - datetime.timedelta(days=1) return datetime.datetime(*past.timetuple()[:3]) else: days = 0 matched = re.match( r'^((?P<weeks>\d+) ?w(eeks?)?)? ?((?P<days>\d+) ?d(ays?)?)?$', text) if matched: groupdict = matched.groupdict() if groupdict['days']: days += int(matched.groupdict()['days']) if groupdict['weeks']: days += int(matched.groupdict()['weeks']) * 7 past = datetime.datetime.now() - datetime.timedelta(days=days) return datetime.datetime(*past.timetuple()[:3]) raise ValueError('Wrong date: "%s"' % text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_dict_for_attrs(obj, attrs): """ Returns dictionary for each attribute from given ``obj``. """
data = {} for attr in attrs: data[attr] = getattr(obj, attr) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loglike(self, y, f): r""" Bernoulli log likelihood. Parameters y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood. """
# way faster than calling bernoulli.logpmf y, f = np.broadcast_arrays(y, f) ll = y * f - softplus(f) return ll
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loglike(self, y, f, n): r""" Binomial log likelihood. Parameters y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) n: ndarray the total number of observations Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood. """
ll = binom.logpmf(y, n=n, p=expit(f)) return ll
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def df(self, y, f, n): r""" Derivative of Binomial log likelihood w.r.t.\ f. Parameters y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) n: ndarray the total number of observations Returns ------- df: ndarray the derivative :math:`\partial \log p(y|f) / \partial f` """
y, f, n = np.broadcast_arrays(y, f, n) return y - expit(f) * n
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loglike(self, y, f, var=None): r""" Gaussian log likelihood. Parameters y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) var: float, ndarray, optional The variance of the distribution, if not input, the initial value of variance is used. Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood. """
# way faster than calling norm.logpdf var = self._check_param(var) y, f = np.broadcast_arrays(y, f) ll = - 0.5 * (np.log(2 * np.pi * var) + (y - f)**2 / var) return ll
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def df(self, y, f, var): r""" Derivative of Gaussian log likelihood w.r.t.\ f. Parameters y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) var: float, ndarray, optional The variance of the distribution, if not input, the initial value of variance is used. Returns ------- df: ndarray the derivative :math:`\partial \log p(y|f) / \partial f` """
var = self._check_param(var) y, f = np.broadcast_arrays(y, f) return (y - f) / var
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loglike(self, y, f): r""" Poisson log likelihood. Parameters y: ndarray array of integer targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood. """
y, f = np.broadcast_arrays(y, f) if self.tranfcn == 'exp': g = np.exp(f) logg = f else: g = softplus(f) logg = np.log(g) return y * logg - g - gammaln(y + 1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Ey(self, f): r""" Expected value of the Poisson likelihood. Parameters f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- Ey: ndarray expected value of y, :math:`\mathbb{E}[\mathbf{y}|\mathbf{f}]`. """
return np.exp(f) if self.tranfcn == 'exp' else softplus(f)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def df(self, y, f): r""" Derivative of Poisson log likelihood w.r.t.\ f. Parameters y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- df: ndarray the derivative :math:`\partial \log p(y|f) / \partial f` """
y, f = np.broadcast_arrays(y, f) if self.tranfcn == 'exp': return y - np.exp(f) else: return expit(f) * (y / safesoftplus(f) - 1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(self, state_size_changed=False): """Reset parser state. Parameters state_size_changed : `bool`, optional `True` if maximum state size changed (default: `False`). """
if state_size_changed: self.state = deque(repeat('', self.state_size), maxlen=self.state_size) else: self.state.extend(repeat('', self.state_size)) self.end = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert(ctype, img, palette_img, dither=False): """Convert an image to palette type. Parameters ctype : `int` Conversion type. img : `PIL.Image` Image to convert. palette_img : `PIL.Image` Palette source image. dither : `bool`, optional Enable dithering (default: `False`). Raises ------ ValueError If palette_img has no palette. Returns ------- `PIL.Image` Converted image. """
if ctype == 0: img2 = img.convert(mode='P') img2.putpalette(palette_img.getpalette()) return img2 img.load() palette_img.load() if palette_img.palette is None: raise ValueError('invalid palette image') im = img.im.convert('P', int(dither), palette_img.im) return img._new(im)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _unescape_value(value): """Unescape a value."""
def unescape(c): return { "\\\\": "\\", "\\\"": "\"", "\\n": "\n", "\\t": "\t", "\\b": "\b", }[c.group(0)] return re.sub(r"(\\.)", unescape, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_boolean(self, section, name, default=None): """Retrieve a configuration setting as boolean. :param section: Tuple with section name and optional subsection namee :param name: Name of the setting, including section and possible subsection. :return: Contents of the setting :raise KeyError: if the value is not set """
try: value = self.get(section, name) except KeyError: return default if value.lower() == "true": return True elif value.lower() == "false": return False raise ValueError("not a valid boolean string: %r" % value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_file(cls, f): """Read configuration from a file-like object."""
ret = cls() section = None setting = None for lineno, line in enumerate(f.readlines()): line = line.lstrip() if setting is None: if _strip_comments(line).strip() == "": continue if line[0] == "[": line = _strip_comments(line).rstrip() if line[-1] != "]": raise ValueError("expected trailing ]") key = line.strip() pts = key[1:-1].split(" ", 1) pts[0] = pts[0].lower() if len(pts) == 2: if pts[1][0] != "\"" or pts[1][-1] != "\"": raise ValueError( "Invalid subsection " + pts[1]) else: pts[1] = pts[1][1:-1] if not _check_section_name(pts[0]): raise ValueError("invalid section name %s" % pts[0]) section = (pts[0], pts[1]) else: if not _check_section_name(pts[0]): raise ValueError("invalid section name %s" % pts[0]) pts = pts[0].split(".", 1) if len(pts) == 2: section = (pts[0], pts[1]) else: section = (pts[0], ) ret._values[section] = {} else: if section is None: raise ValueError("setting %r without section" % line) try: setting, value = line.split("=", 1) except ValueError: setting = line value = "true" setting = setting.strip().lower() if not _check_variable_name(setting): raise ValueError("invalid variable name %s" % setting) if value.endswith("\\\n"): value = value[:-2] continuation = True else: continuation = False value = _parse_string(value) ret._values[section][setting] = value if not continuation: setting = None else: # continuation line if line.endswith("\\\n"): line = line[:-2] continuation = True else: continuation = False value = _parse_string(line) ret._values[section][setting] += value if not continuation: setting = None return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_path(cls, path): """Read configuration from a file on disk."""
f = GitFile(path, 'rb') try: ret = cls.from_file(f) ret.path = path return ret finally: f.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_to_path(self, path=None): """Write configuration to a file on disk."""
if path is None: path = self.path f = GitFile(path, 'wb') try: self.write_to_file(f) finally: f.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_to_file(self, f): """Write configuration to a file-like object."""
for section, values in self._values.iteritems(): try: section_name, subsection_name = section except ValueError: (section_name, ) = section subsection_name = None if subsection_name is None: f.write("[%s]\n" % section_name) else: f.write("[%s \"%s\"]\n" % (section_name, subsection_name)) for key, value in values.iteritems(): f.write("%s = %s\n" % (key, _escape_value(value)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_backends(cls): """Retrieve the default configuration. This will look in the repository configuration (if for_path is specified), the users' home directory and the system configuration. """
paths = [] paths.append(os.path.expanduser("~/.gitconfig")) paths.append("/etc/gitconfig") backends = [] for path in paths: try: cf = ConfigFile.from_path(path) except (IOError, OSError), e: if e.errno != errno.ENOENT: raise else: continue backends.append(cf) return backends
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_regression(func, n_samples=100, n_features=1, bias=0.0, noise=0.0, random_state=None): """ Make dataset for a regression problem. Examples -------- (100, 1) (100,) array([[ 1.62], [-0.61], [-0.53], [-1.07], [ 0.87]]) array([ 0.76, 0.48, -0.23, -0.28, 0.83]) """
generator = check_random_state(random_state) X = generator.randn(n_samples, n_features) # unpack the columns of X y = func(*X.T) + bias if noise > 0.0: y += generator.normal(scale=noise, size=y.shape) return X, y