text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Creates a rst.sty Latex string representation of a multi-nuclear RST relation. <END_TASK> <USER_TASK:> Description: def make_multinuc(relname, nucleii): """Creates a rst.sty Latex string representation of a multi-nuclear RST relation."""
nuc_strings = [] for nucleus in nucleii: nuc_strings.append( MULTINUC_ELEMENT_TEMPLATE.substitute(nucleus=nucleus) ) nucleii_string = "\n\t" + "\n\t".join(nuc_strings) return MULTINUC_TEMPLATE.substitute(relation=relname, nucleus_segments=nucleii_string)
<SYSTEM_TASK:> Creates a rst.sty Latex string representation of a multi-satellite RST subtree <END_TASK> <USER_TASK:> Description: def make_multisat(nucsat_tuples): """Creates a rst.sty Latex string representation of a multi-satellite RST subtree (i.e. a set of nucleus-satellite relations that share the same nucleus. """
nucsat_tuples = [tup for tup in nucsat_tuples] # unpack the iterable, so we can check its length assert len(nucsat_tuples) > 1, \ "A multisat relation bundle must contain more than one relation" result = "\dirrel\n\t" first_relation, remaining_relations = nucsat_tuples[0], nucsat_tuples[1:] relname, nuc_types, elements = first_relation first_nucleus_pos = current_nucleus_pos = nuc_types.index('N') result_segments = [] for i, nuc_type in enumerate(nuc_types): if nuc_type == 'N': result_segments.append(NUC_TEMPLATE.substitute(nucleus=elements[i])) else: result_segments.append(SAT_TEMPLATE.substitute(satellite=elements[i], relation=relname)) for (relname, nuc_types, elements) in remaining_relations: for i, nuc_type in enumerate(nuc_types): if nuc_type == 'N': # all relations share the same nucleus, so we don't need to reprocess it. continue else: result_segment = SAT_TEMPLATE.substitute(satellite=elements[i], relation=relname) if i < first_nucleus_pos: # satellite comes before the nucleus result_segments.insert(current_nucleus_pos, result_segment) current_nucleus_pos += 1 else: result_segments.append(result_segment) return result + '\n\t'.join(result_segments)
<SYSTEM_TASK:> Indents a string by the given amount of characters. <END_TASK> <USER_TASK:> Description: def indent(text, amount, ch=' '): """Indents a string by the given amount of characters."""
padding = amount * ch return ''.join(padding+line for line in text.splitlines(True))
<SYSTEM_TASK:> returns a list of document IDs used in the PCC <END_TASK> <USER_TASK:> Description: def document_ids(self): """returns a list of document IDs used in the PCC"""
matches = [PCC_DOCID_RE.match(os.path.basename(fname)) for fname in pcc.tokenization] return sorted(match.groups()[0] for match in matches)
<SYSTEM_TASK:> given a document ID, returns a merged document graph containng all <END_TASK> <USER_TASK:> Description: def get_document(self, doc_id): """ given a document ID, returns a merged document graph containng all available annotation layers. """
layer_graphs = [] for layer_name in self.layers: layer_files, read_function = self.layers[layer_name] for layer_file in layer_files: if fnmatch.fnmatch(layer_file, '*{}.*'.format(doc_id)): layer_graphs.append(read_function(layer_file)) if not layer_graphs: raise TypeError("There are no files with that document ID.") else: doc_graph = layer_graphs[0] for layer_graph in layer_graphs[1:]: doc_graph.merge_graphs(layer_graph) return doc_graph
<SYSTEM_TASK:> returns a list of all files with the given filename pattern in the <END_TASK> <USER_TASK:> Description: def get_files_by_layer(self, layer_name, file_pattern='*'): """ returns a list of all files with the given filename pattern in the given PCC annotation layer """
layer_path = os.path.join(self.path, layer_name) return list(dg.find_files(layer_path, file_pattern))
<SYSTEM_TASK:> Render a string as Markdown only if in an IPython interpreter. <END_TASK> <USER_TASK:> Description: def maybe_render_markdown(string: str) -> Any: """Render a string as Markdown only if in an IPython interpreter."""
if is_ipython_interpreter(): # pragma: no cover from IPython.display import Markdown # type: ignore # noqa: E501 return Markdown(string) else: return string
<SYSTEM_TASK:> generic command line interface for importers. Will convert the file <END_TASK> <USER_TASK:> Description: def generic_converter_cli(docgraph_class, file_descriptor=''): """ generic command line interface for importers. Will convert the file specified on the command line into a dot representation of the corresponding DiscourseDocumentGraph and write the output to stdout or a file specified on the command line. Parameters ---------- docgraph_class : class a DiscourseDocumentGraph (or a class derived from it), not an instance of it! file_descriptor : str string descring the input format, e.g. 'TigerXML (syntax)' """
parser = argparse.ArgumentParser() parser.add_argument('input_file', help='{} file to be converted'.format(file_descriptor)) parser.add_argument('output_file', nargs='?', default=sys.stdout) args = parser.parse_args(sys.argv[1:]) assert os.path.isfile(args.input_file), \ "'{}' isn't a file".format(args.input_file) docgraph = docgraph_class(args.input_file) write_dot(docgraph, args.output_file)
<SYSTEM_TASK:> Use sysinternals procdump to dump process memory on a specific process. If only the pid is specified, the default <END_TASK> <USER_TASK:> Description: def dump_process_memory(self, pid, working_dir="c:\\windows\\carbonblack\\", path_to_procdump=None): """Use sysinternals procdump to dump process memory on a specific process. If only the pid is specified, the default behavior is to use the version of ProcDump supplied with cbinterface's pip3 installer. :requires: SysInternals ProcDump v9.0 included with cbinterface==1.1.0 :arguments pid: Process id to dump memory for :arguments working_dir: Specify a directoy on the windows sensor to work out of. Default: C:\\Windows\\CarbonBlack\\ :arguments path_to_procdump: Specify the path to a version of procdump you want to use. Default is included copy """
self.go_live() print("~ dumping memory where pid={} for {}".format(pid, self.sensor.computer_name)) # need to make sure procdump.exe is on the sensor procdump_host_path = None dir_output = self.lr_session.list_directory(working_dir) for dir_item in dir_output: if dir_item['filename'] == 'procdump.exe': logging.info("procdump.exe already on host.") procdump_host_path = working_dir + "procdump.exe" break else: logging.info("Dropping procdump.exe on host.") if not procdump_host_path: if not os.path.exists(path_to_procdump): HOME_DIR = os.path.abspath(os.path.join(os.path.realpath(__file__),'..','..')) path_to_procdump = os.path.join(HOME_DIR, 'lr_tools', 'procdump.exe') if not os.path.exists(path_to_procdump): logging.warn("{} not found".format(path_to_procdump)) return False print("~ dropping procdump.exe on host.") filedata = None with open(path_to_procdump, 'rb') as f: filedata = f.read() try: self.lr_session.create_directory(working_dir) except LiveResponseError: logging.debug("working directory already exists") self.lr_session.put_file(filedata, working_dir + "procdump.exe") procdump_host_path = working_dir + "procdump.exe" print("~ Executing procdump..") command_str = procdump_host_path + " -accepteula -ma " + str(pid) result = self.lr_session.create_process(command_str) time.sleep(1) print("+ procdump output:\n-------------------------") result = result.decode('utf-8') print(result + "\n-------------------------") # cut off the carriage return and line feed from filename dumpfile_name = result[result.rfind('\\')+1:result.rfind('.dmp')+4] while True: if 'procdump.exe' not in str(self.lr_session.list_processes()): break else: time.sleep(1) # download dumpfile to localdir self.getFile_with_timeout(working_dir + dumpfile_name)
<SYSTEM_TASK:> extracts the allowed RST relation names and relation types from <END_TASK> <USER_TASK:> Description: def extract_relationtypes(urml_xml_tree): """ extracts the allowed RST relation names and relation types from an URML XML file. Parameters ---------- urml_xml_tree : lxml.etree._ElementTree lxml ElementTree representation of an URML XML file Returns ------- relations : dict of (str, str) Returns a dictionary with RST relation names as keys (str) and relation types (either 'par' or 'hyp') as values (str). """
return {rel.attrib['name']: rel.attrib['type'] for rel in urml_xml_tree.iterfind('//header/reltypes/rel') if 'type' in rel.attrib}
<SYSTEM_TASK:> List of filters available for the dataset. <END_TASK> <USER_TASK:> Description: def filters(self): """List of filters available for the dataset."""
if self._filters is None: self._filters, self._attributes = self._fetch_configuration() return self._filters
<SYSTEM_TASK:> List of default attributes for the dataset. <END_TASK> <USER_TASK:> Description: def default_attributes(self): """List of default attributes for the dataset."""
if self._default_attributes is None: self._default_attributes = { name: attr for name, attr in self.attributes.items() if attr.default is True } return self._default_attributes
<SYSTEM_TASK:> Lists available attributes in a readable DataFrame format. <END_TASK> <USER_TASK:> Description: def list_attributes(self): """Lists available attributes in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available attributes. """
def _row_gen(attributes): for attr in attributes.values(): yield (attr.name, attr.display_name, attr.description) return pd.DataFrame.from_records( _row_gen(self.attributes), columns=['name', 'display_name', 'description'])
<SYSTEM_TASK:> Lists available filters in a readable DataFrame format. <END_TASK> <USER_TASK:> Description: def list_filters(self): """Lists available filters in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available filters. """
def _row_gen(attributes): for attr in attributes.values(): yield (attr.name, attr.type, attr.description) return pd.DataFrame.from_records( _row_gen(self.filters), columns=['name', 'type', 'description'])
<SYSTEM_TASK:> Queries the dataset to retrieve the contained data. <END_TASK> <USER_TASK:> Description: def query(self, attributes=None, filters=None, only_unique=True, use_attr_names=False, dtypes = None ): """Queries the dataset to retrieve the contained data. Args: attributes (list[str]): Names of attributes to fetch in query. Attribute names must correspond to valid attributes. See the attributes property for a list of valid attributes. filters (dict[str,any]): Dictionary of filters --> values to filter the dataset by. Filter names and values must correspond to valid filters and filter values. See the filters property for a list of valid filters. only_unique (bool): Whether to return only rows containing unique values (True) or to include duplicate rows (False). use_attr_names (bool): Whether to use the attribute names as column names in the result (True) or the attribute display names (False). dtypes (dict[str,any]): Dictionary of attributes --> data types to describe to pandas how the columns should be handled Returns: pandas.DataFrame: DataFrame containing the query results. """
# Example query from Ensembl biomart: # # <?xml version="1.0" encoding="UTF-8"?> # <!DOCTYPE Query> # <Query virtualSchemaName = "default" formatter = "TSV" header = "0" # uniqueRows = "0" count = "" datasetConfigVersion = "0.6" > # <Dataset name = "hsapiens_gene_ensembl" interface = "default" > # <Filter name = "chromosome_name" value = "1,2"/> # <Filter name = "end" value = "10000000"/> # <Filter name = "start" value = "1"/> # <Attribute name = "ensembl_gene_id" /> # <Attribute name = "ensembl_transcript_id" /> # </Dataset> # </Query> # Setup query element. root = ElementTree.Element('Query') root.set('virtualSchemaName', self._virtual_schema) root.set('formatter', 'TSV') root.set('header', '1') root.set('uniqueRows', native_str(int(only_unique))) root.set('datasetConfigVersion', '0.6') # Add dataset element. dataset = ElementTree.SubElement(root, 'Dataset') dataset.set('name', self.name) dataset.set('interface', 'default') # Default to default attributes if none requested. if attributes is None: attributes = list(self.default_attributes.keys()) # Add attribute elements. for name in attributes: try: attr = self.attributes[name] self._add_attr_node(dataset, attr) except KeyError: raise BiomartException( 'Unknown attribute {}, check dataset attributes ' 'for a list of valid attributes.'.format(name)) if filters is not None: # Add filter elements. for name, value in filters.items(): try: filter_ = self.filters[name] self._add_filter_node(dataset, filter_, value) except KeyError: raise BiomartException( 'Unknown filter {}, check dataset filters ' 'for a list of valid filters.'.format(name)) # Fetch response. response = self.get(query=ElementTree.tostring(root)) # Raise exception if an error occurred. if 'Query ERROR' in response.text: raise BiomartException(response.text) # Parse results into a DataFrame. try: result = pd.read_csv(StringIO(response.text), sep='\t', dtype=dtypes) # Type error is raised of a data type is not understood by pandas except TypeError as err: raise ValueError("Non valid data type is used in dtypes") if use_attr_names: # Rename columns with attribute names instead of display names. column_map = { self.attributes[attr].display_name: attr for attr in attributes } result.rename(columns=column_map, inplace=True) return result
<SYSTEM_TASK:> returns a list of all NPs and PPs in the given docgraph. <END_TASK> <USER_TASK:> Description: def get_potential_markables(docgraph): """ returns a list of all NPs and PPs in the given docgraph. Parameters ---------- docgraph : DiscourseDocumentGraph a document graph that (at least) contains syntax trees (imported from Tiger XML files) Returns ------- potential_markables : list of str or int Node IDs of all nodes that represent an NP/PP syntactical category/phrase in the input document. If an NP is embedded in a PP, only the node ID of the PP is returned. """
potential_markables = [] for node_id, nattr in dg.select_nodes_by_layer(docgraph, 'tiger:syntax', data=True): if nattr['tiger:cat'] == 'NP': # if an NP is embedded into a PP, only print the PP pp_parent = False for source, target in docgraph.in_edges(node_id): parent_node = docgraph.node[source] if 'tiger:cat' in parent_node and parent_node['tiger:cat'] == 'PP': potential_markables.append(source) # add parent PP phrase pp_parent = True if not pp_parent: potential_markables.append(node_id) # add NP phrase elif nattr['tiger:cat'] == 'PP': potential_markables.append(node_id) # add PP phrase return potential_markables
<SYSTEM_TASK:> Parses a common_paths.xml file and returns a dictionary of paths, <END_TASK> <USER_TASK:> Description: def _parse_common_paths_file(project_path): """ Parses a common_paths.xml file and returns a dictionary of paths, a dictionary of annotation level descriptions and the filename of the style file. Parameters ---------- project_path : str path to the root directory of the MMAX project Returns ------- paths : dict maps from MMAX file types (str, e.g. 'basedata' or 'markable') to the relative path (str) containing files of this type annotations : dict maps from MMAX annotation level names (str, e.g. 'sentence', 'primmark') to a dict of features. The features are: 'schemefile' (maps to a file), 'customization_file' (ditto) and 'file_extension' (maps to the file name ending used for all annotations files of this level) stylefile : str name of the (default) style file used in this MMAX project """
common_paths_file = os.path.join(project_path, 'common_paths.xml') tree = etree.parse(common_paths_file) paths = {} path_vars = ['basedata', 'scheme', 'style', 'style', 'customization', 'markable'] for path_var in path_vars: specific_path = tree.find('//{}_path'.format(path_var)).text paths[path_var] = specific_path if specific_path else project_path paths['project_path'] = project_path annotations = {} for level in tree.iterfind('//level'): annotations[level.attrib['name']] = { 'schemefile': level.attrib['schemefile'], 'customization_file': level.attrib['customization_file'], 'file_extension': level.text[1:]} stylesheet = tree.find('//stylesheet').text return paths, annotations, stylesheet
<SYSTEM_TASK:> Returns a list of sentence root node IDs and a list of sentences, <END_TASK> <USER_TASK:> Description: def get_sentences_and_token_nodes(self): """ Returns a list of sentence root node IDs and a list of sentences, where each list contains the token node IDs of that sentence. Both lists will be empty if sentences were not annotated in the original MMAX2 data. TODO: Refactor this! There's code overlap with self.add_annotation_layer(). Ideally, we would always import sentence annotations and filter them out in the exporters (e.g. Exmaralda, CoNLL), probably by modifying get_pointing_chains(). Returns ------- sentence_root_nodes : list of str a list of all sentence root node IDs, in the order they occur in the text token_nodes : list of list of str a list of lists. each list represents a sentence and contains token node IDs (in the order they occur in the text) """
token_nodes = [] # if sentence annotations were ignored during MMAXDocumentGraph # construction, we need to extract sentence/token node IDs manually if self.ignore_sentence_annotations: mp = self.mmax_project layer_dict = mp.annotations['sentence'] file_id = self.get_file_id(self.name) sentence_anno_file = os.path.join(mp.project_path, mp.paths['markable'], file_id+layer_dict['file_extension']) tree = etree.parse(sentence_anno_file) root = tree.getroot() sentence_root_nodes = [] for markable in root.iterchildren(): sentence_root_nodes.append(markable.attrib['id']) sentence_token_nodes = [] for token_id in spanstring2tokens(self, markable.attrib['span']): # ignore token IDs that aren't used in the *_words.xml file # NOTE: we only need this filter for broken files in the PCC corpus if token_id in self.tokens: sentence_token_nodes.append(token_id) self.add_node(markable.attrib['id'], layers={self.ns, self.ns+':sentence'}) token_nodes.append(sentence_token_nodes) else: sentence_root_nodes = list(select_nodes_by_layer(self, self.ns+':sentence')) for sent_node in sentence_root_nodes: sentence_token_nodes = [] for token_id in self.get_token_nodes_from_sentence(sent_node): # ignore token IDs that aren't used in the *_words.xml file # NOTE: we only need this filter for broken files in the PCC corpus if token_id in self.tokens: sentence_token_nodes.append(token_id) token_nodes.append(sentence_token_nodes) return sentence_root_nodes, token_nodes
<SYSTEM_TASK:> returns a list of token node IDs belonging to the given sentence <END_TASK> <USER_TASK:> Description: def get_token_nodes_from_sentence(self, sentence_root_node): """returns a list of token node IDs belonging to the given sentence"""
return spanstring2tokens(self, self.node[sentence_root_node][self.ns+':span'])
<SYSTEM_TASK:> parses a _words.xml file, adds every token to the document graph <END_TASK> <USER_TASK:> Description: def add_token_layer(self, words_file, connected): """ parses a _words.xml file, adds every token to the document graph and adds an edge from the MMAX root node to it. Parameters ---------- connected : bool Make the graph connected, i.e. add an edge from root to each token. """
for word in etree.parse(words_file).iterfind('//word'): token_node_id = word.attrib['id'] self.tokens.append(token_node_id) token_str = ensure_unicode(word.text) self.add_node(token_node_id, layers={self.ns, self.ns+':token'}, attr_dict={self.ns+':token': token_str, 'label': token_str}) if connected: self.add_edge(self.root, token_node_id, layers={self.ns, self.ns+':token'})
<SYSTEM_TASK:> adds all markables from the given annotation layer to the discourse <END_TASK> <USER_TASK:> Description: def add_annotation_layer(self, annotation_file, layer_name): """ adds all markables from the given annotation layer to the discourse graph. """
assert os.path.isfile(annotation_file), \ "Annotation file doesn't exist: {}".format(annotation_file) tree = etree.parse(annotation_file) root = tree.getroot() default_layers = {self.ns, self.ns+':markable', self.ns+':'+layer_name} # avoids eml.org namespace handling for markable in root.iterchildren(): markable_node_id = markable.attrib['id'] markable_attribs = add_prefix(markable.attrib, self.ns+':') self.add_node(markable_node_id, layers=default_layers, attr_dict=markable_attribs, label=markable_node_id+':'+layer_name) for target_node_id in spanstring2tokens(self, markable.attrib['span']): # manually add to_node if it's not in the graph, yet # cf. issue #39 if target_node_id not in self: self.add_node(target_node_id, # adding 'mmax:layer_name' here could be # misleading (e.g. each token would be part # of the 'mmax:sentence' layer layers={self.ns, self.ns+':markable'}, label=target_node_id) self.add_edge(markable_node_id, target_node_id, layers=default_layers, edge_type=EdgeTypes.spanning_relation, label=self.ns+':'+layer_name) # this is a workaround for Chiarcos-style MMAX files if has_antecedent(markable): antecedent_pointer = markable.attrib['anaphor_antecedent'] # mmax2 supports weird double antecedents, # e.g. "markable_1000131;markable_1000132", cf. Issue #40 # # handling these double antecendents increases the number of # chains, cf. commit edc28abdc4fd36065e8bbf5900eeb4d1326db153 for antecedent in antecedent_pointer.split(';'): ante_split = antecedent.split(":") if len(ante_split) == 2: # mark group:markable_n or secmark:markable_n as such edge_label = '{}:antecedent'.format(ante_split[0]) else: edge_label = ':antecedent' # handles both 'markable_n' and 'layer:markable_n' antecedent_node_id = ante_split[-1] if len(ante_split) == 2: antecedent_layer = ante_split[0] default_layers.add('{0}:{1}'.format(self.ns, antecedent_layer)) # manually add antecedent node if it's not yet in the graph # cf. issue #39 if antecedent_node_id not in self: self.add_node(antecedent_node_id, layers=default_layers) self.add_edge(markable_node_id, antecedent_node_id, layers=default_layers, edge_type=EdgeTypes.pointing_relation, label=self.ns+edge_label)
<SYSTEM_TASK:> return the text of the given EDU subtree, with '_!'-delimiters removed. <END_TASK> <USER_TASK:> Description: def get_edu_text(text_subtree): """return the text of the given EDU subtree, with '_!'-delimiters removed."""
assert text_subtree.label() == 'text', "text_subtree: {}".format(text_subtree) edu_str = u' '.join(word for word in text_subtree.leaves()) return re.sub('_!(.*?)_!', '\g<1>', edu_str)
<SYSTEM_TASK:> return the node ID of the given nucleus or satellite <END_TASK> <USER_TASK:> Description: def get_node_id(nuc_or_sat, namespace=None): """return the node ID of the given nucleus or satellite"""
node_type = get_node_type(nuc_or_sat) if node_type == 'leaf': leaf_id = nuc_or_sat[0].leaves()[0] if namespace is not None: return '{0}:{1}'.format(namespace, leaf_id) else: return string(leaf_id) #else: node_type == 'span' span_start = nuc_or_sat[0].leaves()[0] span_end = nuc_or_sat[0].leaves()[1] if namespace is not None: return '{0}:span:{1}-{2}'.format(namespace, span_start, span_end) else: return 'span:{0}-{1}'.format(span_start, span_end)
<SYSTEM_TASK:> List of datasets in this mart. <END_TASK> <USER_TASK:> Description: def datasets(self): """List of datasets in this mart."""
if self._datasets is None: self._datasets = self._fetch_datasets() return self._datasets
<SYSTEM_TASK:> extracts the allowed RST relation names and relation types from <END_TASK> <USER_TASK:> Description: def extract_relationtypes(rs3_xml_tree): """ extracts the allowed RST relation names and relation types from an RS3 XML file. Parameters ---------- rs3_xml_tree : lxml.etree._ElementTree lxml ElementTree representation of an RS3 XML file Returns ------- relations : dict of (str, str) Returns a dictionary with RST relation names as keys (str) and relation types (either 'rst' or 'multinuc') as values (str). """
return {rel.attrib['name']: rel.attrib['type'] for rel in rs3_xml_tree.iter('rel') if 'type' in rel.attrib}
<SYSTEM_TASK:> returns the source or target node id of an edge, depending on the <END_TASK> <USER_TASK:> Description: def get_node_id(edge, node_type): """ returns the source or target node id of an edge, depending on the node_type given. """
assert node_type in ('source', 'target') _, node_id_str = edge.attrib[node_type].split('.') # e.g. //@nodes.251 return int(node_id_str)
<SYSTEM_TASK:> starting from the given node, traverse ingoing edges up to the root element <END_TASK> <USER_TASK:> Description: def traverse_dependencies_up(docgraph, node_id, node_attr=None): """ starting from the given node, traverse ingoing edges up to the root element of the sentence. return the given node attribute from all the nodes visited along the way. """
# there's only one, but we're in a multidigraph source, target = docgraph.in_edges(node_id)[0] traverse_attr = node_attr if node_attr else docgraph.lemma_attr attrib_value = docgraph.node[source].get(traverse_attr) if attrib_value: yield attrib_value if istoken(docgraph, source) is True: for attrib_value in traverse_dependencies_up(docgraph, source, traverse_attr): yield attrib_value
<SYSTEM_TASK:> adds an ingoing dependency relation from the projected head of a token <END_TASK> <USER_TASK:> Description: def __add_dependency(self, word_instance, sent_id): """ adds an ingoing dependency relation from the projected head of a token to the token itself. """
# 'head_attr': (projected) head head = word_instance.__getattribute__(self.head_attr) deprel = word_instance.__getattribute__(self.deprel_attr) if head == '0': # word represents the sentence root source_id = sent_id else: source_id = '{0}_t{1}'.format(sent_id, head) # TODO: fix issue #39, so we don't have to add nodes explicitly if source_id not in self.node: self.add_node(source_id, layers={self.ns}) target_id = '{0}_t{1}'.format(sent_id, word_instance.word_id) # 'pdeprel': projected dependency relation try: self.add_edge(source_id, target_id, layers={self.ns, self.ns+':dependency'}, relation_type=deprel, label=deprel, edge_type=EdgeTypes.dominance_relation) except AssertionError: print "source: {0}, target: {1}".format(source_id, target_id)
<SYSTEM_TASK:> Creates mappings from tokens to the markable spans they belong to <END_TASK> <USER_TASK:> Description: def __build_markable_token_mapper(self, coreference_layer=None, markable_layer=None): """ Creates mappings from tokens to the markable spans they belong to and the coreference chains these markables are part of. Returns ------- tok2markables : dict (str -> set of str) Maps from a token (node ID) to all the markables (node IDs) it is part of. markable2toks : dict (str -> list of str) Maps from a markable (node ID) to all the tokens (node IDs) that belong to it. markable2chains : dict (str -> list of int) Maps from a markable (node ID) to all the chains (chain ID) it belongs to. """
tok2markables = defaultdict(set) markable2toks = defaultdict(list) markable2chains = defaultdict(list) coreference_chains = get_pointing_chains(self.docgraph, layer=coreference_layer) for chain_id, chain in enumerate(coreference_chains): for markable_node_id in chain: markable2chains[markable_node_id].append(chain_id) # ID of the first singleton (if there are any) singleton_id = len(coreference_chains) # markable2toks/tok2markables shall contains all markables, not only # those which are part of a coreference chain for markable_node_id in select_nodes_by_layer(self.docgraph, markable_layer): span = get_span(self.docgraph, markable_node_id) markable2toks[markable_node_id] = span for token_node_id in span: tok2markables[token_node_id].add(markable_node_id) # singletons each represent their own chain (with only one element) if markable_node_id not in markable2chains: markable2chains[markable_node_id] = [singleton_id] singleton_id += 1 return tok2markables, markable2toks, markable2chains
<SYSTEM_TASK:> generates the string that represents the markables and coreference <END_TASK> <USER_TASK:> Description: def __gen_coref_str(self, token_id, markable_id, target_id): """ generates the string that represents the markables and coreference chains that a token is part of. Parameters ---------- token_id : str the node ID of the token markable_id : str the node ID of the markable span target_id : int the ID of the target (either a singleton markable or a coreference chain) Returns ------- coref_str : str a string representing the token's position in a markable span and its membership in one (or more) coreference chains """
span = self.markable2toks[markable_id] coref_str = str(target_id) if span.index(token_id) == 0: # token is the first element of a markable span coref_str = '(' + coref_str if span.index(token_id) == len(span)-1: # token is the last element of a markable span coref_str += ')' return coref_str
<SYSTEM_TASK:> given a list of ``SaltNode``\s, returns a list of lists, where each list <END_TASK> <USER_TASK:> Description: def extract_sentences(nodes, token_node_indices): """ given a list of ``SaltNode``\s, returns a list of lists, where each list contains the indices of the nodes belonging to that sentence. """
sents = [] tokens = [] for i, node in enumerate(nodes): if i in token_node_indices: if node.features['tiger.pos'] != '$.': tokens.append(i) else: # start a new sentence, if 'tiger.pos' is '$.' tokens.append(i) sents.append(tokens) tokens = [] return sents
<SYSTEM_TASK:> takes a document graph, converts it into GEXF format and writes it to <END_TASK> <USER_TASK:> Description: def write_gexf(docgraph, output_file): """ takes a document graph, converts it into GEXF format and writes it to a file. """
dg_copy = deepcopy(docgraph) remove_root_metadata(dg_copy) layerset2str(dg_copy) attriblist2str(dg_copy) nx_write_gexf(dg_copy, output_file)
<SYSTEM_TASK:> Yield all nodes that the given node dominates or spans. <END_TASK> <USER_TASK:> Description: def get_child_nodes(docgraph, parent_node_id, data=False): """Yield all nodes that the given node dominates or spans."""
return select_neighbors_by_edge_attribute( docgraph=docgraph, source=parent_node_id, attribute='edge_type', value=[EdgeTypes.dominance_relation], data=data)
<SYSTEM_TASK:> Return a list of parent nodes that dominate this child. <END_TASK> <USER_TASK:> Description: def get_parents(docgraph, child_node, strict=True): """Return a list of parent nodes that dominate this child. In a 'syntax tree' a node never has more than one parent node dominating it. To enforce this, set strict=True. Parameters ---------- docgraph : DiscourseDocumentGraph a document graph strict : bool If True, raise a ValueError if a child node is dominated by more than one parent node. Returns ------- parents : list a list of (parent) node IDs. """
parents = [] for src, _, edge_attrs in docgraph.in_edges(child_node, data=True): if edge_attrs['edge_type'] == EdgeTypes.dominance_relation: parents.append(src) if strict and len(parents) > 1: raise ValueError(("In a syntax tree, a node can't be " "dominated by more than one parent")) return parents
<SYSTEM_TASK:> Produce edges in a breadth-first-search starting at source. <END_TASK> <USER_TASK:> Description: def sorted_bfs_edges(G, source=None): """Produce edges in a breadth-first-search starting at source. Neighbors appear in the order a linguist would expect in a syntax tree. The result will only contain edges that express a dominance or spanning relation, i.e. edges expressing pointing or precedence relations will be ignored. Parameters ---------- G : DiscourseDocumentGraph source : node Specify starting node for breadth-first search and return edges in the component reachable from source. Returns ------- edges: generator A generator of edges in the breadth-first-search. """
if source is None: source = G.root xpos = horizontal_positions(G, source) visited = set([source]) source_children = get_child_nodes(G, source) queue = deque([(source, iter(sorted(source_children, key=lambda x: xpos[x])))]) while queue: parent, children = queue[0] try: child = next(children) if child not in visited: yield parent, child visited.add(child) grandchildren = get_child_nodes(G, child) queue.append((child, iter(sorted(grandchildren, key=lambda x: xpos[x])))) except StopIteration: queue.popleft()
<SYSTEM_TASK:> Return dictionary of successors in breadth-first-search from source. <END_TASK> <USER_TASK:> Description: def sorted_bfs_successors(G, source=None): """Return dictionary of successors in breadth-first-search from source. Parameters ---------- G : DiscourseDocumentGraph graph source : node Specify starting node for breadth-first search and return edges in the component reachable from source. Returns ------- successors: dict A dictionary with nodes as keys and list of succssors nodes as values. """
if source is None: source = G.root successors = defaultdict(list) for src, target in sorted_bfs_edges(G, source): successors[src].append(target) return dict(successors)
<SYSTEM_TASK:> convert a docgraph into a PTB-style string. <END_TASK> <USER_TASK:> Description: def tree2bracket(docgraph, root=None, successors=None): """convert a docgraph into a PTB-style string. If root (a node ID) is given, only convert the subgraph that this node domintes/spans into a PTB-style string. """
if root is None: root = docgraph.root if successors is None: successors = sorted_bfs_successors(docgraph, root) if root in successors: embed_str = u" ".join(tree2bracket(docgraph, child, successors) for child in successors[root]) return node2bracket(docgraph, root, embed_str) return node2bracket(docgraph, root)
<SYSTEM_TASK:> line-wrap an NLTK ParentedTree for pretty-printing <END_TASK> <USER_TASK:> Description: def word_wrap_tree(parented_tree, width=0): """line-wrap an NLTK ParentedTree for pretty-printing"""
if width != 0: for i, leaf_text in enumerate(parented_tree.leaves()): dedented_text = textwrap.dedent(leaf_text).strip() parented_tree[parented_tree.leaf_treeposition(i)] = textwrap.fill(dedented_text, width=width) return parented_tree
<SYSTEM_TASK:> Get the linear position of an element of this DGParentedTree in an RSTTree. <END_TASK> <USER_TASK:> Description: def get_position(self, rst_tree, node_id=None): """Get the linear position of an element of this DGParentedTree in an RSTTree. If ``node_id`` is given, this will return the position of the subtree with that node ID. Otherwise, the position of the root of this DGParentedTree in the given RSTTree is returned. """
if node_id is None: node_id = self.root_id if node_id in rst_tree.edu_set: return rst_tree.edus.index(node_id) return min(self.get_position(rst_tree, child_node_id) for child_node_id in rst_tree.child_dict[node_id])
<SYSTEM_TASK:> Performs get request to the biomart service. <END_TASK> <USER_TASK:> Description: def get(self, **params): """Performs get request to the biomart service. Args: **params (dict of str: any): Arbitrary keyword arguments, which are added as parameters to the get request to biomart. Returns: requests.models.Response: Response from biomart for the request. """
if self._use_cache: r = requests.get(self.url, params=params) else: with requests_cache.disabled(): r = requests.get(self.url, params=params) r.raise_for_status() return r
<SYSTEM_TASK:> create a PTBDocumentGraph from a string containing PTB parses. <END_TASK> <USER_TASK:> Description: def fromstring(cls, ptb_string, namespace='ptb', precedence=False, ignore_traces=True): """create a PTBDocumentGraph from a string containing PTB parses."""
temp = tempfile.NamedTemporaryFile(delete=False) temp.write(ptb_string) temp.close() ptb_docgraph = cls(ptb_filepath=temp.name, namespace=namespace, precedence=precedence, ignore_traces=ignore_traces) os.unlink(temp.name) return ptb_docgraph
<SYSTEM_TASK:> add a sentence from the input document to the document graph. <END_TASK> <USER_TASK:> Description: def _add_sentence(self, sentence, ignore_traces=True): """ add a sentence from the input document to the document graph. Parameters ---------- sentence : nltk.tree.Tree a sentence represented by a Tree instance """
self.sentences.append(self._node_id) # add edge from document root to sentence root self.add_edge(self.root, self._node_id, edge_type=dg.EdgeTypes.dominance_relation) self._parse_sentencetree(sentence, ignore_traces=ignore_traces) self._node_id += 1
<SYSTEM_TASK:> parse a sentence Tree into this document graph <END_TASK> <USER_TASK:> Description: def _parse_sentencetree(self, tree, parent_node_id=None, ignore_traces=True): """parse a sentence Tree into this document graph"""
def get_nodelabel(node): if isinstance(node, nltk.tree.Tree): return node.label() elif isinstance(node, unicode): return node.encode('utf-8') else: raise ValueError("Unexpected node type: {0}, {1}".format(type(node), node)) root_node_id = self._node_id self.node[root_node_id]['label'] = get_nodelabel(tree) for subtree in tree: self._node_id += 1 node_label = get_nodelabel(subtree) # unescape the node label, if necessary node_label = PTB_BRACKET_UNESCAPE.get(node_label, node_label) # TODO: refactor this, so we don't need to query this all the time if ignore_traces and node_label == '-NONE-': # ignore tokens annotated for traces continue if isinstance(subtree, nltk.tree.Tree): if len(subtree) > 1: # subtree is a syntactic category node_attrs = {'label': node_label, self.ns+':cat': node_label} layers = {self.ns, self.ns+':syntax'} else: # subtree represents a token and its POS tag node_attrs = {'label': node_label} layers = {self.ns} edge_type = dg.EdgeTypes.dominance_relation self.add_node(self._node_id, layers=layers, attr_dict=node_attrs) self.add_edge(root_node_id, self._node_id, edge_type=edge_type) else: # isinstance(subtree, unicode); subtree is a token # we'll have to modify the parent node of a token, since # in NLTK Trees, even a leaf node (with its POS tag) is # represented as a Tree (an iterator over a single unicode # string), e.g. ``Tree('NNS', ['prices'])`` pos_tag = self.node[parent_node_id]['label'] token_attrs = { 'label': node_label, self.ns+':token': node_label, self.ns+':pos': pos_tag} self.node[parent_node_id].update(token_attrs) self.tokens.append(parent_node_id) if isinstance(subtree, nltk.tree.Tree): self._parse_sentencetree(subtree, parent_node_id=self._node_id)
<SYSTEM_TASK:> given an Salt XML element, returns a corresponding `SaltElement` class <END_TASK> <USER_TASK:> Description: def create_class_instance(element, element_id, doc_id): """ given an Salt XML element, returns a corresponding `SaltElement` class instance, i.e. a SaltXML `SToken` node will be converted into a `TokenNode`. Parameters ---------- element : lxml.etree._Element an `etree._Element` is the XML representation of a Salt element, e.g. a single 'nodes' or 'edges' element element_id : int the index of element (used to connect edges to nodes) doc_id : str the ID of the SaltXML document Returns ------- salt_element : SaltElement an instance of a `SaltElement` subclass instance, e.g. a `TokenNode`, `TextualRelation` or `SaltLayer` """
xsi_type = get_xsi_type(element) element_class = XSI_TYPE_CLASSES[xsi_type] return element_class.from_etree(element)
<SYSTEM_TASK:> returns a list of absolute filepaths for all files found in the given <END_TASK> <USER_TASK:> Description: def abslistdir(directory): """ returns a list of absolute filepaths for all files found in the given directory. """
abs_dir = os.path.abspath(directory) filenames = os.listdir(abs_dir) return [os.path.join(abs_dir, filename) for filename in filenames]
<SYSTEM_TASK:> extracts all element of type `element_type from the `_ElementTree` <END_TASK> <USER_TASK:> Description: def _extract_elements(self, tree, element_type): """ extracts all element of type `element_type from the `_ElementTree` representation of a SaltXML document and adds them to the corresponding `SaltDocument` attributes, i.e. `self.nodes`, `self.edges` and `self.layers`. Parameters ---------- tree : lxml.etree._ElementTree an ElementTree that represents a complete SaltXML document element_type : str the tag name of a SaltXML element, e.g. `nodes` or `edges` """
# creates a new attribute, e.g. 'self.nodes' and assigns it an # empty list setattr(self, element_type, []) etree_elements = get_elements(tree, element_type) for i, etree_element in enumerate(etree_elements): # create an instance of an element class (e.g. TokenNode) salt_element = create_class_instance(etree_element, i, self.doc_id) # and add it to the corresponding element type list, # e.g. 'self.nodes' getattr(self, element_type).append(salt_element)
<SYSTEM_TASK:> returns the string representation of a sentence. <END_TASK> <USER_TASK:> Description: def print_sentence(self, sent_index): """ returns the string representation of a sentence. :param sent_index: the index of a sentence (from ``self.sentences``) :type sent_index: int :return: the sentence string :rtype: str """
tokens = [self.print_token(tok_idx) for tok_idx in self.sentences[sent_index]] return ' '.join(tokens)
<SYSTEM_TASK:> returns the string representation of a token. <END_TASK> <USER_TASK:> Description: def print_token(self, token_node_index): """returns the string representation of a token."""
err_msg = "The given node is not a token node." assert isinstance(self.nodes[token_node_index], TokenNode), err_msg onset = self.nodes[token_node_index].onset offset = self.nodes[token_node_index].offset return self.text[onset:offset]
<SYSTEM_TASK:> convert a docgraph node into a FREQT string. <END_TASK> <USER_TASK:> Description: def node2freqt(docgraph, node_id, child_str='', include_pos=False, escape_func=FREQT_ESCAPE_FUNC): """convert a docgraph node into a FREQT string."""
node_attrs = docgraph.node[node_id] if istoken(docgraph, node_id): token_str = escape_func(node_attrs[docgraph.ns+':token']) if include_pos: pos_str = escape_func(node_attrs.get(docgraph.ns+':pos', '')) return u"({pos}({token}){child})".format( pos=pos_str, token=token_str, child=child_str) else: return u"({token}{child})".format(token=token_str, child=child_str) else: # node is not a token label_str=escape_func(node_attrs.get('label', node_id)) return u"({label}{child})".format(label=label_str, child=child_str)
<SYSTEM_TASK:> convert a sentence subgraph into a FREQT string. <END_TASK> <USER_TASK:> Description: def sentence2freqt(docgraph, root, successors=None, include_pos=False, escape_func=FREQT_ESCAPE_FUNC): """convert a sentence subgraph into a FREQT string."""
if successors is None: successors = sorted_bfs_successors(docgraph, root) if root in successors: # root node has children / subgraphs embed_str = u"".join(sentence2freqt(docgraph, child, successors, include_pos=include_pos, escape_func=escape_func) for child in successors[root]) return node2freqt( docgraph, root, embed_str, include_pos=include_pos, escape_func=escape_func) else: # root node has no children / subgraphs return node2freqt(docgraph, root, include_pos=include_pos, escape_func=escape_func)
<SYSTEM_TASK:> convert a docgraph into a FREQT string. <END_TASK> <USER_TASK:> Description: def docgraph2freqt(docgraph, root=None, include_pos=False, escape_func=FREQT_ESCAPE_FUNC): """convert a docgraph into a FREQT string."""
if root is None: return u"\n".join( sentence2freqt(docgraph, sentence, include_pos=include_pos, escape_func=escape_func) for sentence in docgraph.sentences) else: return sentence2freqt(docgraph, root, include_pos=include_pos, escape_func=escape_func)
<SYSTEM_TASK:> creates a `SaltElement` from an `etree._Element` representing <END_TASK> <USER_TASK:> Description: def from_etree(cls, etree_element): """ creates a `SaltElement` from an `etree._Element` representing an element in a SaltXMI file. """
label_elements = get_subelements(etree_element, 'labels') labels = [SaltLabel.from_etree(elem) for elem in label_elements] return cls(name=get_element_name(etree_element), element_id=get_graph_element_id(etree_element), xsi_type=get_xsi_type(etree_element), labels=labels, xml=etree_element)
<SYSTEM_TASK:> Assigns a random token to the user's session <END_TASK> <USER_TASK:> Description: def grant_sudo_privileges(request, max_age=COOKIE_AGE): """ Assigns a random token to the user's session that allows them to have elevated permissions """
user = getattr(request, 'user', None) # If there's not a user on the request, just noop if user is None: return if not user.is_authenticated(): raise ValueError('User needs to be logged in to be elevated to sudo') # Token doesn't need to be unique, # just needs to be unpredictable and match the cookie and the session token = get_random_string() request.session[COOKIE_NAME] = token request._sudo = True request._sudo_token = token request._sudo_max_age = max_age return token
<SYSTEM_TASK:> Revoke sudo privileges from a request explicitly <END_TASK> <USER_TASK:> Description: def revoke_sudo_privileges(request): """ Revoke sudo privileges from a request explicitly """
request._sudo = False if COOKIE_NAME in request.session: del request.session[COOKIE_NAME]
<SYSTEM_TASK:> Check if a request is allowed to perform sudo actions <END_TASK> <USER_TASK:> Description: def has_sudo_privileges(request): """ Check if a request is allowed to perform sudo actions """
if getattr(request, '_sudo', None) is None: try: request._sudo = ( request.user.is_authenticated() and constant_time_compare( request.get_signed_cookie(COOKIE_NAME, salt=COOKIE_SALT, max_age=COOKIE_AGE), request.session[COOKIE_NAME] ) ) except (KeyError, BadSignature): request._sudo = False return request._sudo
<SYSTEM_TASK:> Convert the tree from HILDA's format into a conventional binary tree, <END_TASK> <USER_TASK:> Description: def hildatree2dgparentedtree(self): """Convert the tree from HILDA's format into a conventional binary tree, which can be easily converted into output formats like RS3. """
def transform(hilda_tree): """Transform a HILDA parse tree into a more conventional parse tree. The input tree:: Contrast[S][N] _______________|______________ Although they they accepted did n't like it , the offer . is converted into:: Contrast ____________|___________ S N | | Although they they accepted did n't like it , the offer . """ if isinstance(hilda_tree, basestring) or not hasattr(hilda_tree, 'label'): return hilda_tree assert len(hilda_tree) == 2, "We can only handle binary trees." match = HILDA_REL_RE.match(hilda_tree.label()) assert match, "Relation '{}' does not match regex '{}'".format(hilda_tree.label(), HILDA_REL_RE) relname, left_child_nuc, right_child_nuc = match.groups() hilda_tree._label = relname for i, child_nuclearity in enumerate([left_child_nuc, right_child_nuc]): child = hilda_tree[i] hilda_tree[i] = Tree(child_nuclearity, [transform(child)]) return hilda_tree tree = transform(self.hildafile_tree) return DGParentedTree.convert(tree)
<SYSTEM_TASK:> Redirect to a given url while setting the chosen theme in the session or cookie. The url and the theme identifier <END_TASK> <USER_TASK:> Description: def set_theme(request): """ Redirect to a given url while setting the chosen theme in the session or cookie. The url and the theme identifier need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """
next = request.POST.get('next', request.GET.get('next')) if not is_safe_url(url=next, host=request.get_host()): next = request.META.get('HTTP_REFERER') if not is_safe_url(url=next, host=request.get_host()): next = '/' response = http.HttpResponseRedirect(next) if request.method == 'POST': theme = request.POST.get('theme', None) if theme: if hasattr(request, 'session'): request.session['DJANGO_BOOTSTRAP_UI_THEME'] = theme else: response.set_cookie('DJANGO_BOOTSTRAP_UI_THEME', theme) return response
<SYSTEM_TASK:> A funcion that will read python rc files and make an ui from read options <END_TASK> <USER_TASK:> Description: def make_ui(self, path='hgwebdir.config'): """ A funcion that will read python rc files and make an ui from read options :param path: path to mercurial config file """
#propagated from mercurial documentation sections = [ 'alias', 'auth', 'decode/encode', 'defaults', 'diff', 'email', 'extensions', 'format', 'merge-patterns', 'merge-tools', 'hooks', 'http_proxy', 'smtp', 'patch', 'paths', 'profiling', 'server', 'trusted', 'ui', 'web', ] repos = path baseui = ui.ui() cfg = config.config() cfg.read(repos) self.paths = cfg.items('paths') self.base_path = self.paths[0][1].replace('*', '') self.check_repo_dir(self.paths) self.set_statics(cfg) for section in sections: for k, v in cfg.items(section): baseui.setconfig(section, k, v) return baseui
<SYSTEM_TASK:> Returns modified, added, removed, deleted files for current changeset <END_TASK> <USER_TASK:> Description: def status(self): """ Returns modified, added, removed, deleted files for current changeset """
return self.repository._repo.status(self._ctx.p1().node(), self._ctx.node())
<SYSTEM_TASK:> Paths are stored without trailing slash so we need to get rid off it if <END_TASK> <USER_TASK:> Description: def _fix_path(self, path): """ Paths are stored without trailing slash so we need to get rid off it if needed. Also mercurial keeps filenodes as str so we need to decode from unicode to str """
if path.endswith('/'): path = path.rstrip('/') return safe_str(path)
<SYSTEM_TASK:> Returns combined ``DirNode`` and ``FileNode`` objects list representing <END_TASK> <USER_TASK:> Description: def get_nodes(self, path): """ Returns combined ``DirNode`` and ``FileNode`` objects list representing state of changeset at the given ``path``. If node at the given ``path`` is not instance of ``DirNode``, ChangesetError would be raised. """
if self._get_kind(path) != NodeKind.DIR: raise ChangesetError("Directory does not exist for revision %s at " " '%s'" % (self.revision, path)) path = self._fix_path(path) filenodes = [FileNode(f, changeset=self) for f in self._file_paths if os.path.dirname(f) == path] dirs = path == '' and '' or [d for d in self._dir_paths if d and posixpath.dirname(d) == path] dirnodes = [DirNode(d, changeset=self) for d in dirs if os.path.dirname(d) == path] als = self.repository.alias for k, vals in self._extract_submodules().iteritems(): #vals = url,rev,type loc = vals[0] cs = vals[1] dirnodes.append(SubModuleNode(k, url=loc, changeset=cs, alias=als)) nodes = dirnodes + filenodes # cache nodes for node in nodes: self.nodes[node.path] = node nodes.sort() return nodes
<SYSTEM_TASK:> Returns ``Node`` object from the given ``path``. If there is no node at <END_TASK> <USER_TASK:> Description: def get_node(self, path): """ Returns ``Node`` object from the given ``path``. If there is no node at the given ``path``, ``ChangesetError`` would be raised. """
path = self._fix_path(path) if not path in self.nodes: if path in self._file_paths: node = FileNode(path, changeset=self) elif path in self._dir_paths or path in self._dir_paths: if path == '': node = RootNode(changeset=self) else: node = DirNode(path, changeset=self) else: raise NodeDoesNotExistError("There is no file nor directory " "at the given path: '%s' at revision %s" % (path, self.short_id)) # cache node self.nodes[path] = node return self.nodes[path]
<SYSTEM_TASK:> Returns lazily content of the FileNode. If possible, would try to <END_TASK> <USER_TASK:> Description: def content(self): """ Returns lazily content of the FileNode. If possible, would try to decode content from UTF-8. """
content = self._get_content() if bool(content and '\0' in content): return content return safe_unicode(content)
<SYSTEM_TASK:> Returns pygment's lexer class. Would try to guess lexer taking file's <END_TASK> <USER_TASK:> Description: def lexer(self): """ Returns pygment's lexer class. Would try to guess lexer taking file's content, name and mimetype. """
try: lexer = lexers.guess_lexer_for_filename(self.name, self.content, stripnl=False) except lexers.ClassNotFound: lexer = lexers.TextLexer(stripnl=False) # returns first alias return lexer
<SYSTEM_TASK:> Returns a list of changeset for this file in which the file was changed <END_TASK> <USER_TASK:> Description: def history(self): """ Returns a list of changeset for this file in which the file was changed """
if self.changeset is None: raise NodeError('Unable to get changeset for this FileNode') return self.changeset.get_file_history(self.path)
<SYSTEM_TASK:> Returns a list of three element tuples with lineno,changeset and line <END_TASK> <USER_TASK:> Description: def annotate(self): """ Returns a list of three element tuples with lineno,changeset and line """
if self.changeset is None: raise NodeError('Unable to get changeset for this FileNode') return self.changeset.get_file_annotate(self.path)
<SYSTEM_TASK:> Returns name of the node so if its path <END_TASK> <USER_TASK:> Description: def name(self): """ Returns name of the node so if its path then only last part is returned. """
org = safe_unicode(self.path.rstrip('/').split('/')[-1]) return u'%s @ %s' % (org, self.changeset.short_id)
<SYSTEM_TASK:> Predict target values from Bayesian generalized linear regression. <END_TASK> <USER_TASK:> Description: def predict(self, X, nsamples=200, likelihood_args=()): """ Predict target values from Bayesian generalized linear regression. Parameters ---------- X : ndarray (N*,d) array query input dataset (N* samples, d dimensions). nsamples : int, optional Number of samples for sampling the expected target values from the predictive distribution. likelihood_args : sequence, optional sequence of arguments to pass to the likelihood function. These are non-learnable parameters. They can be scalars or arrays of length N. Returns ------- Ey : ndarray The expected value of y* for the query inputs, X* of shape (N*,). """
Ey, _ = self.predict_moments(X, nsamples, likelihood_args) return Ey
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def predict_moments(self, X, nsamples=200, likelihood_args=()): r""" Predictive moments, in particular mean and variance, of a Bayesian GLM. This function uses Monte-Carlo sampling to evaluate the predictive mean and variance of a Bayesian GLM. The exact expressions evaluated are, .. math :: \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y] &= \int \mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)] p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w}, \mathbb{V}[y^* | \mathbf{x^*}, \mathbf{X}, y] &= \int \left(\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)] - \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y]\right)^2 p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w}, where :math:`\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]` is the the expected value of :math:`y^*` from the likelihood, and :math:`p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi)` is the posterior distribution over weights (from ``learn``). Here are few concrete examples of how we can use these values, - Gaussian likelihood: these are just the predicted mean and variance, see ``revrand.regression.predict`` - Bernoulli likelihood: The expected value is the probability, :math:`p(y^* = 1)`, i.e. the probability of class one. The variance may not be so useful. - Poisson likelihood: The expected value is similar conceptually to the Gaussian case, and is also a *continuous* value. The median (50% quantile) from ``predict_interval`` is a discrete value. Again, the variance in this instance may not be so useful. Parameters ---------- X : ndarray (N*,d) array query input dataset (N* samples, d dimensions). nsamples : int, optional Number of samples for sampling the expected moments from the predictive distribution. likelihood_args : sequence, optional sequence of arguments to pass to the likelihood function. These are non-learnable parameters. They can be scalars or arrays of length N. Returns ------- Ey : ndarray The expected value of y* for the query inputs, X* of shape (N*,). Vy : ndarray The expected variance of y* (excluding likelihood noise terms) for the query inputs, X* of shape (N*,). """
# Get latent function samples N = X.shape[0] ys = np.empty((N, nsamples)) fsamples = self._sample_func(X, nsamples) # Push samples though likelihood expected value Eyargs = tuple(chain(atleast_list(self.like_hypers_), likelihood_args)) for i, f in enumerate(fsamples): ys[:, i] = self.likelihood.Ey(f, *Eyargs) # Average transformed samples (MC integration) Ey = ys.mean(axis=1) Vy = ((ys - Ey[:, np.newaxis])**2).mean(axis=1) return Ey, Vy
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def predict_logpdf(self, X, y, nsamples=200, likelihood_args=()): r""" Predictive log-probability density function of a Bayesian GLM. Parameters ---------- X : ndarray (N*,d) array query input dataset (N* samples, D dimensions). y : float or ndarray The test observations of shape (N*,) to evaluate under, :math:`\log p(y^* |\mathbf{x}^*, \mathbf{X}, y)`. nsamples : int, optional Number of samples for sampling the log predictive distribution. likelihood_args : sequence, optional sequence of arguments to pass to the likelihood function. These are non-learnable parameters. They can be scalars or arrays of length N*. Returns ------- logp : ndarray The log probability of y* given X* of shape (N*,). logp_min : ndarray The minimum sampled values of the predicted log probability (same shape as p) logp_max : ndarray The maximum sampled values of the predicted log probability (same shape as p) """
X, y = check_X_y(X, y) # Get latent function samples N = X.shape[0] ps = np.empty((N, nsamples)) fsamples = self._sample_func(X, nsamples) # Push samples though likelihood pdf llargs = tuple(chain(atleast_list(self.like_hypers_), likelihood_args)) for i, f in enumerate(fsamples): ps[:, i] = self.likelihood.loglike(y, f, *llargs) # Average transformed samples (MC integration) logp = ps.mean(axis=1) logp_min = ps.min(axis=1) logp_max = ps.max(axis=1) return logp, logp_min, logp_max
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def predict_cdf(self, X, quantile, nsamples=200, likelihood_args=()): r""" Predictive cumulative density function of a Bayesian GLM. Parameters ---------- X : ndarray (N*,d) array query input dataset (N* samples, D dimensions). quantile : float The predictive probability, :math:`p(y^* \leq \text{quantile} | \mathbf{x}^*, \mathbf{X}, y)`. nsamples : int, optional Number of samples for sampling the predictive CDF. likelihood_args : sequence, optional sequence of arguments to pass to the likelihood function. These are non-learnable parameters. They can be scalars or arrays of length N*. nsamples : int, optional The number of samples to draw from the posterior in order to approximate the predictive mean and variance. Returns ------- p : ndarray The probability of y* <= quantile for the query inputs, X* of shape (N*,). p_min : ndarray The minimum sampled values of the predicted probability (same shape as p) p_max : ndarray The maximum sampled values of the predicted probability (same shape as p) """
# Get latent function samples N = X.shape[0] ps = np.empty((N, nsamples)) fsamples = self._sample_func(X, nsamples) # Push samples though likelihood cdf cdfarg = tuple(chain(atleast_list(self.like_hypers_), likelihood_args)) for i, f in enumerate(fsamples): ps[:, i] = self.likelihood.cdf(quantile, f, *cdfarg) # Average transformed samples (MC integration) p = ps.mean(axis=1) p_min = ps.min(axis=1) p_max = ps.max(axis=1) return p, p_min, p_max
<SYSTEM_TASK:> Remove leading whitespace and characters from a string. <END_TASK> <USER_TASK:> Description: def lstrip_ws_and_chars(string, chars): """Remove leading whitespace and characters from a string. Parameters ---------- string : `str` String to strip. chars : `str` Characters to remove. Returns ------- `str` Stripped string. Examples -------- >>> lstrip_ws_and_chars(' \\t.\\n , .x. ', '.,?!') 'x. ' """
res = string.lstrip().lstrip(chars) while len(res) != len(string): string = res res = string.lstrip().lstrip(chars) return res
<SYSTEM_TASK:> Capitalize a sentence. <END_TASK> <USER_TASK:> Description: def capitalize(string): """Capitalize a sentence. Parameters ---------- string : `str` String to capitalize. Returns ------- `str` Capitalized string. Examples -------- >>> capitalize('worD WORD WoRd') 'Word word word' """
if not string: return string if len(string) == 1: return string.upper() return string[0].upper() + string[1:].lower()
<SYSTEM_TASK:> Replace regular expression. <END_TASK> <USER_TASK:> Description: def re_sub(pattern, repl, string, count=0, flags=0, custom_flags=0): """Replace regular expression. Parameters ---------- pattern : `str` or `_sre.SRE_Pattern` Compiled regular expression. repl : `str` or `function` Replacement. string : `str` Input string. count: `int` Maximum number of pattern occurrences. flags : `int` Flags. custom_flags : `int` Custom flags. """
if custom_flags & ReFlags.OVERLAP: prev_string = None while string != prev_string: prev_string = string string = re.sub(pattern, repl, string, count, flags) return string return re.sub(pattern, repl, string, count, flags)
<SYSTEM_TASK:> Generate an endless sequence of random integers from permutations of the <END_TASK> <USER_TASK:> Description: def endless_permutations(N, random_state=None): """ Generate an endless sequence of random integers from permutations of the set [0, ..., N). If we call this N times, we will sweep through the entire set without replacement, on the (N+1)th call a new permutation will be created, etc. Parameters ---------- N: int the length of the set random_state: int or RandomState, optional random seed Yields ------ int: a random int from the set [0, ..., N) """
generator = check_random_state(random_state) while True: batch_inds = generator.permutation(N) for b in batch_inds: yield b
<SYSTEM_TASK:> Returns ``Repository`` object of type linked with given ``alias`` at <END_TASK> <USER_TASK:> Description: def get_repo(path=None, alias=None, create=False): """ Returns ``Repository`` object of type linked with given ``alias`` at the specified ``path``. If ``alias`` is not given it will try to guess it using get_scm method """
if create: if not (path or alias): raise TypeError("If create is specified, we need path and scm type") return get_backend(alias)(path, create=True) if path is None: path = abspath(os.path.curdir) try: scm, path = get_scm(path, search_up=True) path = abspath(path) alias = scm except VCSError: raise VCSError("No scm found at %s" % path) if alias is None: alias = get_scm(path)[0] backend = get_backend(alias) repo = backend(path, create=create) return repo
<SYSTEM_TASK:> Returns ``Repository`` class identified by the given alias or raises <END_TASK> <USER_TASK:> Description: def get_backend(alias): """ Returns ``Repository`` class identified by the given alias or raises VCSError if alias is not recognized or backend class cannot be imported. """
if alias not in settings.BACKENDS: raise VCSError("Given alias '%s' is not recognized! Allowed aliases:\n" "%s" % (alias, pformat(settings.BACKENDS.keys()))) backend_path = settings.BACKENDS[alias] klass = import_class(backend_path) return klass
<SYSTEM_TASK:> Returns all scm's found at the given path. If no scm is recognized <END_TASK> <USER_TASK:> Description: def get_scms_for_path(path): """ Returns all scm's found at the given path. If no scm is recognized - empty list is returned. :param path: path to directory which should be checked. May be callable. :raises VCSError: if given ``path`` is not a directory """
from vcs.backends import get_backend if hasattr(path, '__call__'): path = path() if not os.path.isdir(path): raise VCSError("Given path %r is not a directory" % path) result = [] for key in ALIASES: dirname = os.path.join(path, '.' + key) if os.path.isdir(dirname): result.append(key) continue # We still need to check if it's not bare repository as # bare repos don't have working directories try: get_backend(key)(path) result.append(key) continue except RepositoryError: # Wrong backend pass except VCSError: # No backend at all pass return result
<SYSTEM_TASK:> Returns path's subdirectories which seems to be a repository. <END_TASK> <USER_TASK:> Description: def get_repo_paths(path): """ Returns path's subdirectories which seems to be a repository. """
repo_paths = [] dirnames = (os.path.abspath(dirname) for dirname in os.listdir(path)) for dirname in dirnames: try: get_scm(dirname) repo_paths.append(dirname) except VCSError: pass return repo_paths
<SYSTEM_TASK:> Runs command on the system with given ``args``. <END_TASK> <USER_TASK:> Description: def run_command(cmd, *args): """ Runs command on the system with given ``args``. """
command = ' '.join((cmd, args)) p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() return p.retcode, stdout, stderr
<SYSTEM_TASK:> If pygments are available on the system <END_TASK> <USER_TASK:> Description: def get_highlighted_code(name, code, type='terminal'): """ If pygments are available on the system then returned output is colored. Otherwise unchanged content is returned. """
import logging try: import pygments pygments except ImportError: return code from pygments import highlight from pygments.lexers import guess_lexer_for_filename, ClassNotFound from pygments.formatters import TerminalFormatter try: lexer = guess_lexer_for_filename(name, code) formatter = TerminalFormatter() content = highlight(code, lexer, formatter) except ClassNotFound: logging.debug("Couldn't guess Lexer, will not use pygments.") content = code return content
<SYSTEM_TASK:> Parses given text and returns ``datetime.datetime`` instance or raises <END_TASK> <USER_TASK:> Description: def parse_datetime(text): """ Parses given text and returns ``datetime.datetime`` instance or raises ``ValueError``. :param text: string of desired date/datetime or something more verbose, like *yesterday*, *2weeks 3days*, etc. """
text = text.strip().lower() INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d', '%m/%d/%Y %H:%M:%S', '%m/%d/%Y %H:%M', '%m/%d/%Y', '%m/%d/%y %H:%M:%S', '%m/%d/%y %H:%M', '%m/%d/%y', ) for format in INPUT_FORMATS: try: return datetime.datetime(*time.strptime(text, format)[:6]) except ValueError: pass # Try descriptive texts if text == 'tomorrow': future = datetime.datetime.now() + datetime.timedelta(days=1) args = future.timetuple()[:3] + (23, 59, 59) return datetime.datetime(*args) elif text == 'today': return datetime.datetime(*datetime.datetime.today().timetuple()[:3]) elif text == 'now': return datetime.datetime.now() elif text == 'yesterday': past = datetime.datetime.now() - datetime.timedelta(days=1) return datetime.datetime(*past.timetuple()[:3]) else: days = 0 matched = re.match( r'^((?P<weeks>\d+) ?w(eeks?)?)? ?((?P<days>\d+) ?d(ays?)?)?$', text) if matched: groupdict = matched.groupdict() if groupdict['days']: days += int(matched.groupdict()['days']) if groupdict['weeks']: days += int(matched.groupdict()['weeks']) * 7 past = datetime.datetime.now() - datetime.timedelta(days=days) return datetime.datetime(*past.timetuple()[:3]) raise ValueError('Wrong date: "%s"' % text)
<SYSTEM_TASK:> Returns dictionary for each attribute from given ``obj``. <END_TASK> <USER_TASK:> Description: def get_dict_for_attrs(obj, attrs): """ Returns dictionary for each attribute from given ``obj``. """
data = {} for attr in attrs: data[attr] = getattr(obj, attr) return data
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def loglike(self, y, f): r""" Bernoulli log likelihood. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood. """
# way faster than calling bernoulli.logpmf y, f = np.broadcast_arrays(y, f) ll = y * f - softplus(f) return ll
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def loglike(self, y, f, n): r""" Binomial log likelihood. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) n: ndarray the total number of observations Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood. """
ll = binom.logpmf(y, n=n, p=expit(f)) return ll
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def df(self, y, f, n): r""" Derivative of Binomial log likelihood w.r.t.\ f. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) n: ndarray the total number of observations Returns ------- df: ndarray the derivative :math:`\partial \log p(y|f) / \partial f` """
y, f, n = np.broadcast_arrays(y, f, n) return y - expit(f) * n
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def loglike(self, y, f, var=None): r""" Gaussian log likelihood. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) var: float, ndarray, optional The variance of the distribution, if not input, the initial value of variance is used. Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood. """
# way faster than calling norm.logpdf var = self._check_param(var) y, f = np.broadcast_arrays(y, f) ll = - 0.5 * (np.log(2 * np.pi * var) + (y - f)**2 / var) return ll
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def df(self, y, f, var): r""" Derivative of Gaussian log likelihood w.r.t.\ f. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) var: float, ndarray, optional The variance of the distribution, if not input, the initial value of variance is used. Returns ------- df: ndarray the derivative :math:`\partial \log p(y|f) / \partial f` """
var = self._check_param(var) y, f = np.broadcast_arrays(y, f) return (y - f) / var
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def loglike(self, y, f): r""" Poisson log likelihood. Parameters ---------- y: ndarray array of integer targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood. """
y, f = np.broadcast_arrays(y, f) if self.tranfcn == 'exp': g = np.exp(f) logg = f else: g = softplus(f) logg = np.log(g) return y * logg - g - gammaln(y + 1)
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def Ey(self, f): r""" Expected value of the Poisson likelihood. Parameters ---------- f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- Ey: ndarray expected value of y, :math:`\mathbb{E}[\mathbf{y}|\mathbf{f}]`. """
return np.exp(f) if self.tranfcn == 'exp' else softplus(f)
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def df(self, y, f): r""" Derivative of Poisson log likelihood w.r.t.\ f. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- df: ndarray the derivative :math:`\partial \log p(y|f) / \partial f` """
y, f = np.broadcast_arrays(y, f) if self.tranfcn == 'exp': return y - np.exp(f) else: return expit(f) * (y / safesoftplus(f) - 1)
<SYSTEM_TASK:> Reset parser state. <END_TASK> <USER_TASK:> Description: def reset(self, state_size_changed=False): """Reset parser state. Parameters ---------- state_size_changed : `bool`, optional `True` if maximum state size changed (default: `False`). """
if state_size_changed: self.state = deque(repeat('', self.state_size), maxlen=self.state_size) else: self.state.extend(repeat('', self.state_size)) self.end = True
<SYSTEM_TASK:> Convert an image to palette type. <END_TASK> <USER_TASK:> Description: def convert(ctype, img, palette_img, dither=False): """Convert an image to palette type. Parameters ---------- ctype : `int` Conversion type. img : `PIL.Image` Image to convert. palette_img : `PIL.Image` Palette source image. dither : `bool`, optional Enable dithering (default: `False`). Raises ------ ValueError If palette_img has no palette. Returns ------- `PIL.Image` Converted image. """
if ctype == 0: img2 = img.convert(mode='P') img2.putpalette(palette_img.getpalette()) return img2 img.load() palette_img.load() if palette_img.palette is None: raise ValueError('invalid palette image') im = img.im.convert('P', int(dither), palette_img.im) return img._new(im)
<SYSTEM_TASK:> Retrieve a configuration setting as boolean. <END_TASK> <USER_TASK:> Description: def get_boolean(self, section, name, default=None): """Retrieve a configuration setting as boolean. :param section: Tuple with section name and optional subsection namee :param name: Name of the setting, including section and possible subsection. :return: Contents of the setting :raise KeyError: if the value is not set """
try: value = self.get(section, name) except KeyError: return default if value.lower() == "true": return True elif value.lower() == "false": return False raise ValueError("not a valid boolean string: %r" % value)
<SYSTEM_TASK:> Read configuration from a file-like object. <END_TASK> <USER_TASK:> Description: def from_file(cls, f): """Read configuration from a file-like object."""
ret = cls() section = None setting = None for lineno, line in enumerate(f.readlines()): line = line.lstrip() if setting is None: if _strip_comments(line).strip() == "": continue if line[0] == "[": line = _strip_comments(line).rstrip() if line[-1] != "]": raise ValueError("expected trailing ]") key = line.strip() pts = key[1:-1].split(" ", 1) pts[0] = pts[0].lower() if len(pts) == 2: if pts[1][0] != "\"" or pts[1][-1] != "\"": raise ValueError( "Invalid subsection " + pts[1]) else: pts[1] = pts[1][1:-1] if not _check_section_name(pts[0]): raise ValueError("invalid section name %s" % pts[0]) section = (pts[0], pts[1]) else: if not _check_section_name(pts[0]): raise ValueError("invalid section name %s" % pts[0]) pts = pts[0].split(".", 1) if len(pts) == 2: section = (pts[0], pts[1]) else: section = (pts[0], ) ret._values[section] = {} else: if section is None: raise ValueError("setting %r without section" % line) try: setting, value = line.split("=", 1) except ValueError: setting = line value = "true" setting = setting.strip().lower() if not _check_variable_name(setting): raise ValueError("invalid variable name %s" % setting) if value.endswith("\\\n"): value = value[:-2] continuation = True else: continuation = False value = _parse_string(value) ret._values[section][setting] = value if not continuation: setting = None else: # continuation line if line.endswith("\\\n"): line = line[:-2] continuation = True else: continuation = False value = _parse_string(line) ret._values[section][setting] += value if not continuation: setting = None return ret
<SYSTEM_TASK:> Read configuration from a file on disk. <END_TASK> <USER_TASK:> Description: def from_path(cls, path): """Read configuration from a file on disk."""
f = GitFile(path, 'rb') try: ret = cls.from_file(f) ret.path = path return ret finally: f.close()
<SYSTEM_TASK:> Write configuration to a file on disk. <END_TASK> <USER_TASK:> Description: def write_to_path(self, path=None): """Write configuration to a file on disk."""
if path is None: path = self.path f = GitFile(path, 'wb') try: self.write_to_file(f) finally: f.close()
<SYSTEM_TASK:> Write configuration to a file-like object. <END_TASK> <USER_TASK:> Description: def write_to_file(self, f): """Write configuration to a file-like object."""
for section, values in self._values.iteritems(): try: section_name, subsection_name = section except ValueError: (section_name, ) = section subsection_name = None if subsection_name is None: f.write("[%s]\n" % section_name) else: f.write("[%s \"%s\"]\n" % (section_name, subsection_name)) for key, value in values.iteritems(): f.write("%s = %s\n" % (key, _escape_value(value)))
<SYSTEM_TASK:> Retrieve the default configuration. <END_TASK> <USER_TASK:> Description: def default_backends(cls): """Retrieve the default configuration. This will look in the repository configuration (if for_path is specified), the users' home directory and the system configuration. """
paths = [] paths.append(os.path.expanduser("~/.gitconfig")) paths.append("/etc/gitconfig") backends = [] for path in paths: try: cf = ConfigFile.from_path(path) except (IOError, OSError), e: if e.errno != errno.ENOENT: raise else: continue backends.append(cf) return backends
<SYSTEM_TASK:> Generate a noisy polynomial for a regression problem <END_TASK> <USER_TASK:> Description: def make_polynomial(degree=3, n_samples=100, bias=0.0, noise=0.0, return_coefs=False, random_state=None): """ Generate a noisy polynomial for a regression problem Examples -------- >>> X, y, coefs = make_polynomial(degree=3, n_samples=200, noise=.5, ... return_coefs=True, random_state=1) """
generator = check_random_state(random_state) # TODO: Add arguments to support other priors coefs = generator.randn(degree + 1) pows = np.arange(degree + 1) poly = np.vectorize(lambda x: np.sum(coefs * x ** pows)) X, y = make_regression(poly, n_samples=n_samples, bias=bias, noise=noise, random_state=random_state) if return_coefs: return X, y, coefs return X, y