code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def get_relationship_bundle(manager, relationship_id=None, legacy=True): q = """ MATCH (start)-[r]->(end) WHERE ID(r) = {relationship_id} RETURN start, r, end """ with manager.session as s: record = s.run(q, {'relationship_id': int(relationship_id)}).single() if record is None: raise exceptions.RelationshipNotFound(manager, int(relationship_id)) if legacy: bundle = { 'type': record['r'].type, 'id': int(relationship_id), 'data': record['r'].properties, 'start': record['start'].properties['handle_id'], 'end': record['end'].properties['handle_id'], } else: bundle = { 'type': record['r'].type, 'id': int(relationship_id), 'data': record['r'].properties, 'start': record['start'], 'end': record['end'], } return bundle
:param manager: Neo4jDBSessionManager :param relationship_id: Internal Neo4j id :param legacy: Backwards compatibility :type relationship_id: int :type legacy: bool :rtype: dictionary
def delete_relationship(manager, relationship_id): q = """ MATCH ()-[r]->() WHERE ID(r) = {relationship_id} DELETE r """ with manager.session as s: s.run(q, {'relationship_id': int(relationship_id)}) return True
Deletes the relationship. :param manager: Neo4jDBSessionManager :param relationship_id: Internal Neo4j relationship id :return: bool
def get_node_meta_type(manager, handle_id): node = get_node(manager=manager, handle_id=handle_id, legacy=False) for label in node.labels: if label in META_TYPES: return label raise exceptions.NoMetaLabelFound(handle_id)
Returns the meta type of the supplied node as a string. :param manager: Neo4jDBSessionManager :param handle_id: Unique id :return: string
def get_nodes_by_value(manager, value, prop=None, node_type='Node'): if prop: q = """ MATCH (n:{label}) USING SCAN n:{label} WHERE n.{prop} = {{value}} RETURN distinct n """.format(label=node_type, prop=prop) with manager.session as s: for result in s.run(q, {'value': value}): yield result['n'] else: q = """ MATCH (n:{label}) RETURN n """.format(label=node_type) pattern = re.compile(u'{0}'.format(value), re.IGNORECASE) with manager.session as s: for result in s.run(q): for v in result['n'].properties.values(): if pattern.search(text_type(v)): yield result['n'] break
Traverses all nodes or nodes of specified label and compares the property/properties of the node with the supplied string. :param manager: Neo4jDBSessionManager :param value: Value to search for :param prop: Which property to look for value in :param node_type: :type value: str|list|bool|int :type prop: str :type node_type: str :return: dicts
def search_nodes_by_value(manager, value, prop=None, node_type='Node'): if prop: q = """ MATCH (n:{label}) WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*") RETURN distinct n """.format(label=node_type, prop=prop, value=value) else: q = """ MATCH (n:{label}) WITH n, keys(n) as props WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*")) RETURN distinct n """.format(label=node_type, value=value) with manager.session as s: for result in s.run(q): yield result['n']
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node with the supplied string. :param manager: Neo4jDBSessionManager :param value: Value to search for :param prop: Which property to look for value in :param node_type: :type value: str :type prop: str :type node_type: str :return: dicts
def create_index(manager, prop, node_type='Node'): with manager.session as s: s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
:param manager: Neo4jDBSessionManager :param prop: Property to index :param node_type: Label to create index on :type manager: Neo4jDBSessionManager :type prop: str :type node_type: str
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True): q = """ MATCH (n:{label}) WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}}) RETURN n """.format(label=node_type, prop=prop, lookup_func=lookup_func) with manager.session as s: for result in s.run(q, {'value': value}): if legacy: yield result['n'].properties else: yield result['n']
:param manager: Neo4jDBSessionManager :param prop: Indexed property :param value: Indexed value :param node_type: Label used for index :param lookup_func: STARTS WITH | CONTAINS | ENDS WITH :param legacy: Backwards compatibility :type manager: Neo4jDBSessionManager :type prop: str :type value: str :type node_type: str :type lookup_func: str :type legacy: bool :return: Dict or Node object :rtype: dict|Node
def get_unique_node_by_name(manager, node_name, node_type): q = """ MATCH (n:Node { name: {name} }) WHERE {label} IN labels(n) RETURN n.handle_id as handle_id """ with manager.session as s: result = list(s.run(q, {'name': node_name, 'label': node_type})) if result: if len(result) == 1: return get_node_model(manager, result[0]['handle_id']) raise exceptions.MultipleNodesReturned(node_name, node_type) return None
Returns the node if the node is unique for name and type or None. :param manager: Neo4jDBSessionManager :param node_name: string :param node_type: str|unicode :return: norduniclient node model or None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True): q = """ MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}}) CREATE (a)-[r:%s]->(b) RETURN r """ % rel_type with manager.session as s: if legacy: return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
:param manager: Context manager to handle transactions :param handle_id: Node handle id :param other_handle_id: Other node handle id :param rel_type: Relationship type :param legacy: Backwards compatibility :type manager: Neo4jDBSessionManager :type handle_id: str|unicode :type other_handle_id: str|unicode :type rel_type: str|unicode :type legacy: Boolean :rtype: int|neo4j.v1.types.Relationship
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type): other_meta_type = get_node_meta_type(manager, other_handle_id) if other_meta_type == 'Location' and rel_type == 'Has': return _create_relationship(manager, location_handle_id, other_handle_id, rel_type) raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
Makes relationship between the two nodes and returns the relationship. If a relationship is not possible NoRelationshipPossible exception is raised.
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type): other_meta_type = get_node_meta_type(manager, other_handle_id) if rel_type == 'Depends_on': if other_meta_type == 'Logical' or other_meta_type == 'Physical': return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type) elif rel_type == 'Part_of': if other_meta_type == 'Physical': return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type) raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
Makes relationship between the two nodes and returns the relationship. If a relationship is not possible NoRelationshipPossible exception is raised.
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type): other_meta_type = get_node_meta_type(manager, other_handle_id) if other_meta_type == 'Logical': if rel_type in ['Uses', 'Provides']: return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type) elif other_meta_type == 'Location' and rel_type == 'Responsible_for': return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type) elif other_meta_type == 'Physical': if rel_type in ['Owns', 'Provides']: return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type) raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
Makes relationship between the two nodes and returns the relationship. If a relationship is not possible NoRelationshipPossible exception is raised.
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type): other_meta_type = get_node_meta_type(manager, other_handle_id) if other_meta_type == 'Physical': if rel_type == 'Has' or rel_type == 'Connected_to': return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type) elif other_meta_type == 'Location' and rel_type == 'Located_in': return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type) raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
Makes relationship between the two nodes and returns the relationship. If a relationship is not possible NoRelationshipPossible exception is raised.
def create_relationship(manager, handle_id, other_handle_id, rel_type): meta_type = get_node_meta_type(manager, handle_id) if meta_type == 'Location': return create_location_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == 'Logical': return create_logical_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == 'Relation': return create_relation_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == 'Physical': return create_physical_relationship(manager, handle_id, other_handle_id, rel_type) other_meta_type = get_node_meta_type(manager, other_handle_id) raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
Makes a relationship from node to other_node depending on which meta_type the nodes are. Returns the relationship or raises NoRelationshipPossible exception.
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True): if rel_type: q = """ MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}}) RETURN collect(r) as relationships """.format(rel_type=rel_type) else: q = """ MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}}) RETURN collect(r) as relationships """ with manager.session as s: if legacy: relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships'] return [relationship.id for relationship in relationships] return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
Takes a start and an end node with an optional relationship type. Returns the relationships between the nodes or an empty list.
def get_node_model(manager, handle_id=None, node=None): bundle = get_node_bundle(manager, handle_id, node) for label in bundle.get('labels'): try: classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '') return getattr(models, classname)(manager).load(bundle) except AttributeError: pass for label in bundle.get('labels'): try: classname = '{base}Model'.format(base=label).replace('_', '') return getattr(models, classname)(manager).load(bundle) except AttributeError: pass try: classname = '{base}Model'.format(base=bundle.get('meta_type')) return getattr(models, classname)(manager).load(bundle) except AttributeError: return models.BaseNodeModel(manager).load(bundle)
:param manager: Context manager to handle transactions :type manager: Neo4jDBSessionManager :param handle_id: Nodes handle id :type handle_id: str|unicode :param node: Node object :type node: neo4j.v1.types.Node :return: Node model :rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
def get_relationship_model(manager, relationship_id): bundle = get_relationship_bundle(manager, relationship_id) return models.BaseRelationshipModel(manager).load(bundle)
:param manager: Context manager to handle transactions :type manager: Neo4jDBSessionManager :param relationship_id: Internal Neo4j relationship id :type relationship_id: int :return: Relationship model :rtype: models.BaseRelationshipModel
def parse_code(self, url, html): soup = BeautifulSoup(html, 'html5lib', from_encoding='utf-8') # -- main text div = (soup .find('div', id='content_false') .find('div', attrs={'class': 'data'})) code = Code(self.id_code, date_pub=self.date_pub, url_code=cleanup_url(url)) # -- Code title/subtitle div_title = div.find('div', id='titreTexte') span_subtitle = div_title.find('span', attrs={'class': 'sousTitreTexte'}) if span_subtitle: code.title = div_title.text.replace(span_subtitle.text, '') code.subtitle = span_subtitle.text.strip() regex = r'Version consolidée au (\d{1,2}(?:er)?\s+[^\s]+\s+\d{4})' m = re.search(regex, code.subtitle) if m: code.date_pub = parse_date(m.group(1)) code.title = code.title.strip() # -- TOC code.children = [self.parse_code_ul(url, child) for child in div.find_all('ul', recursive=False)] return code
Parse the code details and TOC from the given HTML content :type url: str :param url: source URL of the page :type html: unicode :param html: Content of the HTML :return: the code
def parse_code_ul(self, url, ul): li_list = ul.find_all('li', recursive=False) li = li_list[0] span_title = li.find('span', attrs={'class': re.compile(r'TM\d+Code')}, recursive=False) section = Section(span_title.attrs['id'], span_title.text.strip()) div_italic = li.find('div', attrs={'class': 'italic'}, recursive=False) if div_italic: section.content = div_italic.text.strip() span_link = li.find('span', attrs={'class': 'codeLienArt'}, recursive=False) if span_link: a_link = span_link.find('a', recursive=False) if self.with_articles: service = self.section_service section.articles = service.articles(self.id_code, section.id_section, self.date_pub) else: section.articles = a_link.text.strip() section.url_section = cleanup_url( urljoin(url, a_link.attrs['href'])) section.children = [self.parse_code_ul(url, child) for child in li.find_all('ul', recursive=False)] return section
Fill the toc item
def add(self, interval, offset): start, stop = self.get_start_stop(interval) if len(self.starts) > 0: if start < self.starts[-1] or offset <= self.offsets[-1][1]: raise ValueError('intervals and offsets must be added in-order') self.offsets[-1][1] = offset self.offsets[-1][2] += 1 else: self.starts.append(start) self.stops.append(stop) self.offsets.append([offset, offset, 1])
The added interval must be overlapping or beyond the last stored interval ie. added in sorted order. :param interval: interval to add :param offset: full virtual offset to add :return:
def get_sum(qs, field): sum_field = '%s__sum' % field qty = qs.aggregate(Sum(field))[sum_field] return qty if qty else 0
get sum for queryset. ``qs``: queryset ``field``: The field name to sum.
def get_max(qs, field): max_field = '%s__max' % field num = qs.aggregate(Max(field))[max_field] return num if num else 0
get max for queryset. qs: queryset field: The field name to max.
def do_filter(qs, qdata, quick_query_fields=[], int_quick_query_fields=[]): try: qs = qs.filter( __gen_quick_query_params( qdata.get('q_quick_search_kw'), quick_query_fields, int_quick_query_fields) ) q, kw_query_params = __gen_query_params(qdata) qs = qs.filter(q, **kw_query_params) except: import traceback traceback.print_exc() return qs
auto filter queryset by dict. qs: queryset need to filter. qdata: quick_query_fields: int_quick_query_fields:
def read_gcvs(filename): with open(filename, 'r') as fp: parser = GcvsParser(fp) for star in parser: yield star
Reads variable star data in `GCVS format`_. :param filename: path to GCVS data file (usually ``iii.dat``) .. _`GCVS format`: http://www.sai.msu.su/gcvs/gcvs/iii/html/
def dict_to_body(star_dict): if ephem is None: # pragma: no cover raise NotImplementedError("Please install PyEphem in order to use dict_to_body.") body = ephem.FixedBody() body.name = star_dict['name'] body._ra = ephem.hours(str(star_dict['ra'])) body._dec = ephem.degrees(str(star_dict['dec'])) body._epoch = ephem.J2000 return body
Converts a dictionary of variable star data to a `Body` instance. Requires `PyEphem <http://rhodesmill.org/pyephem/>`_ to be installed.
def tempfile(self): "write the docx to a named tmpfile and return the tmpfile filename" tf = tempfile.NamedTemporaryFile() tfn = tf.name tf.close() os.remove(tf.name) shutil.copy(self.fn, tfn) return tff tempfile(self): "write the docx to a named tmpfile and return the tmpfile filename" tf = tempfile.NamedTemporaryFile() tfn = tf.name tf.close() os.remove(tf.name) shutil.copy(self.fn, tfn) return tfn
write the docx to a named tmpfile and return the tmpfile filename
def sheets(self): data = Dict() for src in [src for src in self.zipfile.namelist() if 'xl/worksheets/' in src]: name = os.path.splitext(os.path.basename(src))[0] xml = self.xml(src) data[name] = xml return data
return the sheets of data.
def workbook_data(self): document = XML( fn=os.path.splitext(self.fn)[0]+'.xml', root=Element.workbook()) shared_strings = [ str(t.text) for t in self.xml('xl/sharedStrings.xml') .root.xpath(".//xl:t", namespaces=self.NS)] for key in self.sheets.keys(): worksheet = self.sheets[key].transform(XT, shared_strings=shared_strings) document.root.append(worksheet.root) return document
return a readable XML form of the data.
def process(self, event): logger.info(f"{self}: put {event.src_path}") self.queue.put(os.path.basename(event.src_path))
Put and process tasks in queue.
def vi_pos_matching (line, index=0): '''find matching <([{}])>''' anchor = None target = None delta = 1 count = 0 try: while 1: if anchor is None: # first find anchor try: target, delta = _vi_dct_matching [line [index]] anchor = line [index] count = 1 except KeyError: index += 1 continue else: # Here the anchor has been found # Need to get corresponding target if index < 0: return -1 if line [index] == anchor: count += 1 elif line [index] == target: count -= 1 if count == 0: return index index += delta except IndexError: return -f vi_pos_matching (line, index=0): '''find matching <([{}])>''' anchor = None target = None delta = 1 count = 0 try: while 1: if anchor is None: # first find anchor try: target, delta = _vi_dct_matching [line [index]] anchor = line [index] count = 1 except KeyError: index += 1 continue else: # Here the anchor has been found # Need to get corresponding target if index < 0: return -1 if line [index] == anchor: count += 1 elif line [index] == target: count -= 1 if count == 0: return index index += delta except IndexError: return -1
find matching <([{}])>
def key_percent (self, char): '''find matching <([{}])>''' self.motion = self.motion_matching self.delete_right = 1 self.state = _VI_MOTION self.apply (f key_percent (self, char): '''find matching <([{}])>''' self.motion = self.motion_matching self.delete_right = 1 self.state = _VI_MOTION self.apply ()
find matching <([{}])>
def store_user_documents(user_document_gen, client, mongo_database_name, mongo_collection_name): mongo_database = client[mongo_database_name] mongo_collection = mongo_database[mongo_collection_name] # Iterate over all users to be annotated and store the Twitter lists in mongo. for user_twitter_id, user_document_list in user_document_gen: document = user_document_list document["_id"] = int(user_twitter_id) mongo_collection.update({"_id": user_twitter_id}, document, upsert=True)
Stores Twitter list objects that a Twitter user is a member of in different mongo collections. Inputs: - user_document_gen: A python generator that yields a Twitter user id and an associated document list. - client: A pymongo MongoClient object. - mongo_database_name: The name of a Mongo database as a string. - mongo_collection_name: The name of the mongo collection as a string.
def read_user_documents_for_single_user_generator(user_twitter_id, mongo_database): collection_name = str(user_twitter_id) collection = mongo_database[collection_name] cursor = collection.find() for twitter_list in cursor: yield twitter_list
Stores Twitter list objects that a Twitter user is a member of in different mongo collections. Inputs: - user_twitter_id: A Twitter user id. - mongo_database: A mongo database. Yields: - twitter_list: A tuple containing: * A Twitter user id. * A python list containing Twitter lists in dictionary (json) format.
def read_user_documents_generator(user_twitter_id_list, client, mongo_database_name, mongo_collection_name): mongo_database = client[mongo_database_name] mongo_collection = mongo_database[mongo_collection_name] # cursor = mongo_collection.find({"_id": {"$in": [int(user_twitter_id) for user_twitter_id in user_twitter_id_list]}}) cursor = mongo_collection.find() user_twitter_id_list = [int(user_twitter_id) for user_twitter_id in user_twitter_id_list] user_twitter_id_list = set(user_twitter_id_list) for documents in cursor: if documents["_id"] in user_twitter_id_list: yield documents["_id"], documents
Stores Twitter list objects that a Twitter user is a member of in different mongo collections. Inputs: - user_twitter_id_list: A python list of Twitter user ids. - client: A pymongo MongoClient object. - mongo_database_name: The name of a Mongo database as a string. - mongo_collection_name: The name of the mongo collection as a string. Yields: - user_twitter_id: A Twitter user id. - twitter_list_gen: A python generator that yields Twitter lists in dictionary (json) format.
def get_collection_documents_generator(client, database_name, collection_name, spec, latest_n, sort_key): mongo_database = client[database_name] collection = mongo_database[collection_name] collection.create_index(sort_key) if latest_n is not None: skip_n = collection.count() - latest_n if collection.count() - latest_n < 0: skip_n = 0 cursor = collection.find(filter=spec).sort([(sort_key, ASCENDING), ]) cursor = cursor[skip_n:] else: cursor = collection.find(filter=spec).sort([(sort_key, ASCENDING), ]) for document in cursor: yield document
This is a python generator that yields tweets stored in a mongodb collection. Tweet "created_at" field is assumed to have been stored in the format supported by MongoDB. Inputs: - client: A pymongo MongoClient object. - database_name: The name of a Mongo database as a string. - collection_name: The name of the tweet collection as a string. - spec: A python dictionary that defines higher query arguments. - latest_n: The number of latest results we require from the mongo document collection. - sort_key: A field name according to which we will sort in ascending order. Yields: - document: A document in python dictionary (json) format.
def extract_connected_components(graph, connectivity_type, node_to_id): # Get a networkx graph. nx_graph = nx.from_scipy_sparse_matrix(graph, create_using=nx.DiGraph()) # Calculate all connected components in graph. if connectivity_type == "weak": largest_connected_component_list = nxalgcom.weakly_connected_component_subgraphs(nx_graph) elif connectivity_type == "strong": largest_connected_component_list = nxalgcom.strongly_connected_component_subgraphs(nx_graph) else: print("Invalid connectivity type input.") raise RuntimeError # Handle empty graph. try: largest_connected_component = max(largest_connected_component_list, key=len) except ValueError: print("Error: Empty graph.") raise RuntimeError old_node_list = largest_connected_component.nodes() node_to_node = dict(zip(np.arange(len(old_node_list)), old_node_list)) largest_connected_component = nx.to_scipy_sparse_matrix(largest_connected_component, dtype=np.float64, format="csr") # Make node_to_id. new_node_to_id = {k: node_to_id[v] for k, v in node_to_node.items()} return largest_connected_component, new_node_to_id, old_node_list
Extract the largest connected component from a graph. Inputs: - graph: An adjacency matrix in scipy sparse matrix format. - connectivity_type: A string that can be either: "strong" or "weak". - node_to_id: A map from graph node id to Twitter id, in python dictionary format. Outputs: - largest_connected_component: An adjacency matrix in scipy sparse matrix format. - new_node_to_id: A map from graph node id to Twitter id, in python dictionary format. - old_node_list: List of nodes from the possibly disconnected original graph. Raises: - RuntimeError: If there the input graph is empty.
def sendEmail(self, subject, body, toAddress=False): if not toAddress: toAddress = self.toAddress toAddress = toAddress.split(';') message = MIMEText(body) message['Subject'] = subject message['From'] = self.fromAddress message['To'] = ','.join(toAddress) if not self.testing: s = SMTP(self.server, self.port) s.sendmail(self.fromAddress, toAddress, message.as_string()) s.quit() print('email sent') else: print('***Begin Test Email Message***') print(message) print('***End Test Email Message***')
sends an email using the [email protected] account
def _bind_key(self, key, func): u if not callable(func): print u"Trying to bind non method to keystroke:%s,%s"%(key,func) raise ReadlineError(u"Trying to bind non method to keystroke:%s,%s,%s,%s"%(key,func,type(func),type(self._bind_key))) keyinfo = make_KeyPress_from_keydescr(key.lower()).tuple() log(u">>>%s -> %s<<<"%(keyinfo,func.__name__)) self.key_dispatch[keyinfo] = func
u"""setup the mapping from key to call the function.
def _bind_exit_key(self, key): u keyinfo = make_KeyPress_from_keydescr(key.lower()).tuple() self.exit_dispatch[keyinfo] = None
u"""setup the mapping from key to call the function.
def _get_completions(self): completions = [] self.begidx = self.l_buffer.point self.endidx = self.l_buffer.point buf=self.l_buffer.line_buffer if self.completer: # get the string to complete while self.begidx > 0: self.begidx -= 1 if buf[self.begidx] in self.completer_delims: self.begidx += 1 break text = ensure_str(u''.join(buf[self.begidx:self.endidx])) log(u'complete text="%s"' % ensure_unicode(text)) i = 0 while 1: try: r = ensure_unicode(self.completer(text, i)) except IndexError: break i += 1 if r is None: break elif r and r not in completions: completions.append(r) else: pass log(u'text completions=<%s>' % map(ensure_unicode, completions)) if (self.complete_filesystem == "on") and not completions: # get the filename to complete while self.begidx > 0: self.begidx -= 1 if buf[self.begidx] in u' \t\n': self.begidx += 1 break text = ensure_str(u''.join(buf[self.begidx:self.endidx])) log(u'file complete text="%s"' % ensure_unicode(text)) completions = map(ensure_unicode, glob.glob(os.path.expanduser(text) + '*')) if self.mark_directories == u'on': mc = [] for f in completions: if os.path.isdir(f): mc.append(f + os.sep) else: mc.append(f) completions = mc log(u'fnames=<%s>' % map(ensure_unicode, completions)) return completions
Return a list of possible completions for the string ending at the point. Also set begidx and endidx in the process.
def complete(self, e): # (TAB) u completions = self._get_completions() if completions: cprefix = commonprefix(completions) if len(cprefix) > 0: rep = [ c for c in cprefix ] point=self.l_buffer.point self.l_buffer[self.begidx:self.endidx] = rep self.l_buffer.point = point + len(rep) - (self.endidx - self.begidx) if len(completions) > 1: if self.show_all_if_ambiguous == u'on': self._display_completions(completions) else: self._bell() else: self._bell() self.finalize()
u"""Attempt to perform completion on the text before point. The actual completion performed is application-specific. The default is filename completion.
def possible_completions(self, e): # (M-?) u completions = self._get_completions() self._display_completions(completions) self.finalize()
u"""List the possible completions of the text before point.
def insert_completions(self, e): # (M-*) u completions = self._get_completions() b = self.begidx e = self.endidx for comp in completions: rep = [ c for c in comp ] rep.append(' ') self.l_buffer[b:e] = rep b += len(rep) e = b self.line_cursor = b self.finalize()
u"""Insert all completions of the text before point that would have been generated by possible-completions.
def insert_text(self, string): u self.l_buffer.insert_text(string, self.argument_reset) self.finalize()
u"""Insert text into the command line.
def forward_char(self, e): # (C-f) u self.l_buffer.forward_char(self.argument_reset) self.finalize()
u"""Move forward a character.
def backward_char(self, e): # (C-b) u self.l_buffer.backward_char(self.argument_reset) self.finalize()
u"""Move back a character.
def forward_word(self, e): # (M-f) u self.l_buffer.forward_word(self.argument_reset) self.finalize()
u"""Move forward to the end of the next word. Words are composed of letters and digits.
def backward_word(self, e): # (M-b) u self.l_buffer.backward_word(self.argument_reset) self.finalize()
u"""Move back to the start of the current or previous word. Words are composed of letters and digits.
def forward_word_end(self, e): # () u self.l_buffer.forward_word_end(self.argument_reset) self.finalize()
u"""Move forward to the end of the next word. Words are composed of letters and digits.
def backward_word_end(self, e): # () u self.l_buffer.backward_word_end(self.argument_reset) self.finalize()
u"""Move forward to the end of the next word. Words are composed of letters and digits.
def forward_char_extend_selection(self, e): # u self.l_buffer.forward_char_extend_selection(self.argument_reset) self.finalize()
u"""Move forward a character.
def backward_char_extend_selection(self, e): # u self.l_buffer.backward_char_extend_selection(self.argument_reset) self.finalize()
u"""Move back a character.
def forward_word_extend_selection(self, e): # u self.l_buffer.forward_word_extend_selection(self.argument_reset) self.finalize()
u"""Move forward to the end of the next word. Words are composed of letters and digits.
def backward_word_extend_selection(self, e): # u self.l_buffer.backward_word_extend_selection(self.argument_reset) self.finalize()
u"""Move back to the start of the current or previous word. Words are composed of letters and digits.
def forward_word_end_extend_selection(self, e): # u self.l_buffer.forward_word_end_extend_selection(self.argument_reset) self.finalize()
u"""Move forward to the end of the next word. Words are composed of letters and digits.
def delete_char(self, e): # (C-d) u self.l_buffer.delete_char(self.argument_reset) self.finalize()
u"""Delete the character at point. If point is at the beginning of the line, there are no characters in the line, and the last character typed was not bound to delete-char, then return EOF.
def backward_delete_char(self, e): # (Rubout) u self.l_buffer.backward_delete_char(self.argument_reset) self.finalize()
u"""Delete the character behind the cursor. A numeric argument means to kill the characters instead of deleting them.
def backward_delete_word(self, e): # (Control-Rubout) u self.l_buffer.backward_delete_word(self.argument_reset) self.finalize()
u"""Delete the character behind the cursor. A numeric argument means to kill the characters instead of deleting them.
def forward_delete_word(self, e): # (Control-Delete) u self.l_buffer.forward_delete_word(self.argument_reset) self.finalize()
u"""Delete the character behind the cursor. A numeric argument means to kill the characters instead of deleting them.
def self_insert(self, e): # (a, b, A, 1, !, ...) u if e.char and ord(e.char)!=0: #don't insert null character in buffer, can happen with dead keys. self.insert_text(e.char) self.finalize()
u"""Insert yourself.
def paste(self,e): u #(Control-v) if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert(False) txt=txt.split("\n")[0].strip("\r").strip("\n") log("paste: >%s<"%map(ord,txt)) self.insert_text(txt) self.finalize()
u"""Paste windows clipboard. Assume single line strip other lines and end of line markers and trailing spaces
def ipython_paste(self,e): u if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert( self.enable_ipython_paste_list_of_lists) if self.enable_ipython_paste_for_paths: if len(txt)<300 and ("\t" not in txt) and ("\n" not in txt): txt=txt.replace("\\","/").replace(" ",r"\ ") self.insert_text(txt) self.finalize()
u"""Paste windows clipboard. If enable_ipython_paste_list_of_lists is True then try to convert tabseparated data to repr of list of lists or repr of array. If enable_ipython_paste_for_paths==True then change \\ to / and spaces to \space
def dump_functions(self, e): # () u print txt="\n".join(self.rl_settings_to_string()) print txt self._print_prompt() self.finalize()
u"""Print all of the functions and their key bindings to the Readline output stream. If a numeric argument is supplied, the output is formatted in such a way that it can be made part of an inputrc file. This command is unbound by default.
def fit(self, X, y=None): self.transmat, self.genmat, self.transcount, self.statetime = ctmc( X, self.numstates, self.transintv, self.toltime, self.debug) return self
Calls the ctmc.ctmc function Parameters ---------- X : list of lists (see ctmc function 'data') y not used, present for API consistence purpose.
def RV_1(self): return self.orbpop_long.RV * (self.orbpop_long.M2 / (self.orbpop_long.M1 + self.orbpop_long.M2))
Instantaneous RV of star 1 with respect to system center-of-mass
def RV_2(self): return -self.orbpop_long.RV * (self.orbpop_long.M1 / (self.orbpop_long.M1 + self.orbpop_long.M2)) +\ self.orbpop_short.RV_com1
Instantaneous RV of star 2 with respect to system center-of-mass
def RV_3(self): return -self.orbpop_long.RV * (self.orbpop_long.M1 / (self.orbpop_long.M1 + self.orbpop_long.M2)) +\ self.orbpop_short.RV_com2
Instantaneous RV of star 3 with respect to system center-of-mass
def dRV_2(self,dt): return -self.orbpop_long.dRV(dt) * (self.orbpop_long.M1/(self.orbpop_long.M1 + self.orbpop_long.M2)) +\ self.orbpop_short.dRV(dt,com=True)
Returns difference in RVs (separated by time dt) of star 2.
def dRV_3(self,dt): return -self.orbpop_long.dRV(dt) * (self.orbpop_long.M1/(self.orbpop_long.M1 + self.orbpop_long.M2)) -\ self.orbpop_short.dRV(dt) * (self.orbpop_short.M1/(self.orbpop_short.M1 + self.orbpop_short.M2))
Returns difference in RVs (separated by time dt) of star 3.
def save_hdf(self,filename,path=''): self.orbpop_long.save_hdf(filename,'{}/long'.format(path)) self.orbpop_short.save_hdf(filename,'{}/short'.format(path))
Save to .h5 file.
def Rsky(self): return np.sqrt(self.position.x**2 + self.position.y**2)
Projected sky separation of stars
def RV_com1(self): return self.RV * (self.M2 / (self.M1 + self.M2))
RVs of star 1 relative to center-of-mass
def RV_com2(self): return -self.RV * (self.M1 / (self.M1 + self.M2))
RVs of star 2 relative to center-of-mass
def save_hdf(self,filename,path=''): self.dataframe.to_hdf(filename,'{}/df'.format(path))
Saves all relevant data to .h5 file; so state can be restored.
def add_pii_permissions(self, group, view_only=None): pii_model_names = [m.split(".")[1] for m in self.pii_models] if view_only: permissions = Permission.objects.filter( (Q(codename__startswith="view") | Q(codename__startswith="display")), content_type__model__in=pii_model_names, ) else: permissions = Permission.objects.filter( content_type__model__in=pii_model_names ) for permission in permissions: group.permissions.add(permission) for model in self.pii_models: permissions = Permission.objects.filter( codename__startswith="view", content_type__app_label=model.split(".")[0], content_type__model=f"historical{model.split('.')[1]}", ) for permission in permissions: group.permissions.add(permission) for permission in Permission.objects.filter( content_type__app_label="edc_registration", codename__in=[ "add_registeredsubject", "delete_registeredsubject", "change_registeredsubject", ], ): group.permissions.remove(permission) permission = Permission.objects.get( content_type__app_label="edc_registration", codename="view_historicalregisteredsubject", ) group.permissions.add(permission)
Adds PII model permissions.
def get_receive_message(self, data): ''' http://channels.readthedocs.io/en/stable/asgi/www.html#receive ''' self.order += 1 message = { 'channel': 'websocket.receive', 'reply_channel': None, 'path': self.path, 'order': self.order, 'text': None, 'bytes': None, } if isinstance(data, str): message['text'] = data elif isinstance(data, bytes): message['bytes'] = data return messagf get_receive_message(self, data): ''' http://channels.readthedocs.io/en/stable/asgi/www.html#receive ''' self.order += 1 message = { 'channel': 'websocket.receive', 'reply_channel': None, 'path': self.path, 'order': self.order, 'text': None, 'bytes': None, } if isinstance(data, str): message['text'] = data elif isinstance(data, bytes): message['bytes'] = data return message
http://channels.readthedocs.io/en/stable/asgi/www.html#receive
def get_disconnect_message(self, code: int): ''' http://channels.readthedocs.io/en/stable/asgi/www.html#disconnection ''' self.order += 1 return { 'channel': 'websocket.disconnect', 'reply_channel': None, 'path': self.path, 'order': self.order, 'code': code, f get_disconnect_message(self, code: int): ''' http://channels.readthedocs.io/en/stable/asgi/www.html#disconnection ''' self.order += 1 return { 'channel': 'websocket.disconnect', 'reply_channel': None, 'path': self.path, 'order': self.order, 'code': code, }
http://channels.readthedocs.io/en/stable/asgi/www.html#disconnection
def get_attribute_cardinality(attribute): if attribute.kind == RESOURCE_ATTRIBUTE_KINDS.MEMBER: card = CARDINALITY_CONSTANTS.ONE elif attribute.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION: card = CARDINALITY_CONSTANTS.MANY else: raise ValueError('Can not determine cardinality for non-terminal ' 'attributes.') return card
Returns the cardinality of the given resource attribute. :returns: One of the constants defined in :class:`evererst.constants.CARDINALITY_CONSTANTS`. :raises ValueError: If the given attribute is not a relation attribute (i.e., if it is a terminal attribute).
def readSIFGraph(filename): p = sif_parser.Parser(filename) accu = TermSet() file = open(filename,'r') s = file.readline() while s!="": try: accu = p.parse(s) except EOFError: break s = file.readline() return accu
input: string, name of a file containing a Bioquali-like graph description output: asp.TermSet, with atoms matching the contents of the input file Parses a Bioquali-like graph description, and returns a TermSet object. Written using original Bioquali
def setup(path_config="~/.config/scalar/config.yaml", configuration_name=None): global config global client global token global room # config file path_config = Path(path_config).expanduser() log.debug("load config {path}".format(path = path_config)) if not path_config.exists(): log.error("no config {path} found".format(path = path_config)) sys.exit() else: with open(str(path_config), "r") as _file: config = yaml.load(_file) if not configuration_name: for configuration in list(config["configurations"].items()): if configuration[1]["default"]: config = configuration[1] else: config["configurations"][configuration_name] # connect to homeserver and room log.debug("Matrix username: " + config["username"]) log.debug("connect to homeserver " + config["homeserver"]) client = MatrixClient(config["homeserver"]) token = client.login_with_password(username = config["username"], password = config["passcode"]) log.debug("connect to room " + config["room_alias"]) room = client.join_room(config["room_alias"])
Load a configuration from a default or specified configuration file, accessing a default or specified configuration name.
def worker_wrapper(worker_instance, pid_path): def exit_handler(*args): """ Remove pid file on exit """ if len(args) > 0: print("Exit py signal {signal}".format(signal=args[0])) remove(pid_path) atexit.register(exit_handler) signal.signal(signal.SIGINT, exit_handler) signal.signal(signal.SIGTERM, exit_handler) worker_instance.work() # Remove pid file if the process can not catch signals exit_handler(2)
A wrapper to start RQ worker as a new process. :param worker_instance: RQ's worker instance :param pid_path: A file to check if the worker is running or not
def collection(self): if not self.include_collections: return None ctx = stack.top if ctx is not None: if not hasattr(ctx, 'redislite_collection'): ctx.redislite_collection = Collection(redis=self.connection) return ctx.redislite_collection
Return the redis-collection instance.
def queue(self): if not self.include_rq: return None ctx = stack.top if ctx is not None: if not hasattr(ctx, 'redislite_queue'): ctx.redislite_queue = {} for queue_name in self.queues: ctx.redislite_queue[queue_name] = \ Queue(queue_name, connection=self.connection) return ctx.redislite_queue
The queue property. Return rq.Queue instance.
def start_worker(self): if not self.include_rq: return None worker = Worker(queues=self.queues, connection=self.connection) worker_pid_path = current_app.config.get( "{}_WORKER_PID".format(self.config_prefix), 'rl_worker.pid' ) try: worker_pid_file = open(worker_pid_path, 'r') worker_pid = int(worker_pid_file.read()) print("Worker already started with PID=%d" % worker_pid) worker_pid_file.close() return worker_pid except (IOError, TypeError): self.worker_process = Process(target=worker_wrapper, kwargs={ 'worker_instance': worker, 'pid_path': worker_pid_path }) self.worker_process.start() worker_pid_file = open(worker_pid_path, 'w') worker_pid_file.write("%d" % self.worker_process.pid) worker_pid_file.close() print("Start a worker process with PID=%d" % self.worker_process.pid) return self.worker_process.pid
Trigger new process as a RQ worker.
def image_get_format(f): f.seek(0) try: img = Image.open(f) t = img.format.lower() except IOError: t = None return t
Return image format for file-object f. (jpeg, png, gif etc.) All formats: http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html Example: if image_get_format(request.FILES['image']) == 'jpeg': print 'Image is JPEG' if image_get_format(open('/tmp/image.png', 'rb')) == 'png': print 'File is PNG'
def is_image(f, types=('png', 'jpeg', 'gif'), set_content_type=True): assert isinstance(types, (list, tuple)) t = image_get_format(f) if t not in [t.lower() for t in types]: return False if set_content_type: set_uploaded_file_content_type_and_file_ext(f, t) return True
Return True if file f is image (types type) and set its correct content_type and filename extension. Example: if is_image(request.FILES['file']): print 'File is image' if is_image(open('/tmp/image.jpeg', 'rb')): print 'File is image'
def image_save_buffer_fix(maxblock=1048576): before = ImageFile.MAXBLOCK ImageFile.MAXBLOCK = maxblock try: yield finally: ImageFile.MAXBLOCK = before
Contextmanager that change MAXBLOCK in ImageFile.
def optimize_png_file(f, o=None): if isinstance(f, basestring): if o is None: o = f else: assert isinstance(o, basestring) try: subprocess.check_call(['pngquant', '--force', '--output', o, f]) except subprocess.CalledProcessError: return False return True if not hasattr(f, 'read'): raise RuntimeError if o is None: o = f else: if not hasattr(f, 'write'): raise RuntimeError f.seek(0) try: p = subprocess.Popen(['pngquant', '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) r = p.communicate(f.read())[0] except IOError: r = None if r: truncate_file(o) o.write(r) return True return False
Use pngquant for optimize PNG-image. f - path to input image file or file-object. o - path to output image file or file-object for save result. NOTICE: f and o can not be of different type
def response(data={}, status=200, message='OK'): response_data = { 'data': data, 'status': status, 'message': message, } content = json.dumps(response_data, ensure_ascii=False, cls=DjangoJSONEncoder) return HttpResponse(content, status=status, content_type='application/json')
Wraps the arguments in a dictionary and returns a HttpResponse object with the HTTP status set to ``status``. The body of the response is JSON data on the format:: { "status": 400, "message": "OK", "data": {"ids": [1, 2, 3]} } The content of ``data`` is serialized using the DjangoJSONEncoder class. Example:: import jason def my_view(request): return jason.response({'weight': 80}, 200, 'OK')
def view(allowed_methods, exceptions={}): def _(f): def __(request, *args, **kwargs): if request.method not in allowed_methods: return response({}, 405, 'Method Not Allowed') try: return response(*f(request, *args, **kwargs)) except Bail as e: return response(e.data, e.status, e.message) except Exception as e: if e.__class__ in exceptions: return response(*exceptions[e.__class__](e)) else: return response({}, 500, 'Internal Server Error') return __ return _
Decorates a Django function based view and wraps it's return in the :py:func:`jason.response` function. The view should return a list or tuple which is unpacked using the ``*``-operator into :py:func:`jason.response`. The view can raise a :py:class:`jason.Bail` Exception. ``allowed_methods`` lists which HTTP methods are allowed, e.g. ['GET', 'POST']. ``exceptions`` is a dictionary where the keys are ``Exception`` classes and values are callables. It defines responses for raised Exceptions other than the :py:class:`jason.Bail` Exception. The callable should return a tuple or list that can unpacked into :py:func:`jason.response`. Example:: import jason @jason.view(allowed_methods=['GET', 'POST'], exceptions={ WebFault: lambda e: ({}, 400, e.message, ) }) def my_view(request): return {'numbers': get_numbers()},
def permission_required(perm): def _(f): def __(request, *args, **kwargs): if request.user.has_perm(perm): return f(request, *args, **kwargs) else: return response({}, 401, 'Unauthorized') return __ return _
A json pendant to permission_required. Will return a 401 response if the user is not allowed. The body of the response will be the following json data:: { "status": 401, "message": "Unauthorized", "data": {} } Example:: import jason @jason.permission_required("my_perm") def my_view(request): ...
def confirm(self, batch_id=None, filename=None): if batch_id or filename: export_history = self.history_model.objects.using(self.using).filter( Q(batch_id=batch_id) | Q(filename=filename), sent=True, confirmation_code__isnull=True, ) else: export_history = self.history_model.objects.using(self.using).filter( sent=True, confirmation_code__isnull=True ) if export_history.count() == 0: raise ConfirmationError( "Nothing to do. No history of sent and unconfirmed files" ) else: confirmation_code = ConfirmationCode() export_history.update( confirmation_code=confirmation_code.identifier, confirmation_datetime=get_utcnow(), ) return confirmation_code.identifier
Flags the batch as confirmed by updating confirmation_datetime on the history model for this batch.
def clean_single_word(word, lemmatizing="wordnet"): if lemmatizing == "porter": porter = PorterStemmer() lemma = porter.stem(word) elif lemmatizing == "snowball": snowball = SnowballStemmer('english') lemma = snowball.stem(word) elif lemmatizing == "wordnet": wordnet = WordNetLemmatizer() lemma = wordnet.lemmatize(word) else: print("Invalid lemmatizer argument.") raise RuntimeError return lemma
Performs stemming or lemmatizing on a single word. If we are to search for a word in a clean bag-of-words, we need to search it after the same kind of preprocessing. Inputs: - word: A string containing the source word. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - lemma: The resulting clean lemma or stem.
def clean_corpus_serial(corpus, lemmatizing="wordnet"): list_of_bags_of_words = list() append_bag_of_words = list_of_bags_of_words.append lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for document in corpus: word_list, lemma_to_keywordbag = clean_document(document=document, lemmatizing=lemmatizing) # TODO: Alter this. bag_of_words = combine_word_list(word_list) append_bag_of_words(bag_of_words) for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return list_of_bags_of_words, lemma_to_keywordbag_total
Extracts a bag-of-words from each document in a corpus serially. Inputs: - corpus: A python list of python strings. Each string is a document. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - list_of_bags_of_words: A list of python dictionaries representing bags-of-words. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
def extract_bag_of_words_from_corpus_parallel(corpus, lemmatizing="wordnet"): #################################################################################################################### # Map and reduce document cleaning. #################################################################################################################### # Build a pool of processes. pool = Pool(processes=get_threads_number()*2,) # Partition the tweets to chunks. partitioned_corpus = chunks(corpus, len(corpus) / get_threads_number()) # Map the cleaning of the tweet corpus to a pool of processes. list_of_bags_of_words, list_of_lemma_to_keywordset_maps = pool.map(partial(clean_corpus_serial, lemmatizing=lemmatizing), partitioned_corpus) # Reduce dictionaries to a single dictionary serially. bag_of_words = reduce_list_of_bags_of_words(list_of_bags_of_words) # Reduce lemma to keyword maps to a single dictionary. lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for lemma_to_keywordbag in list_of_lemma_to_keywordset_maps: for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return bag_of_words, lemma_to_keywordbag_total
This extracts one bag-of-words from a list of strings. The documents are mapped to parallel processes. Inputs: - corpus: A list of strings. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: This is a bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
def separate_camel_case(word, first_cap_re, all_cap_re): s1 = first_cap_re.sub(r'\1 \2', word) separated_word = all_cap_re.sub(r'\1 \2', s1) return separated_word
What it says on the tin. Input: - word: A string that may be in camelCase. Output: - separated_word: A list of strings with camel case separated.
def middleware(func): @wraps(func) def parse(*args, **kwargs): """ get middleware from route, execute middleware in order """ middleware = copy.deepcopy(kwargs['middleware']) kwargs.pop('middleware') if request.method == "OPTIONS": # return 200 json response for CORS return JsonResponse(200) if middleware is None: return func(*args, **kwargs) for mware in middleware: ware = mware() if ware.status is False: return ware.response return func(*args, **kwargs) return parse
Executes routes.py route middleware
def progress_bar_media(): if PROGRESSBARUPLOAD_INCLUDE_JQUERY: js = ["http://code.jquery.com/jquery-1.8.3.min.js",] else: js = [] js.append("js/progress_bar.js") m = Media(js=js) return m.render()
progress_bar_media simple tag return rendered script tag for javascript used by progress_bar
def send(MESSAGE, SOCKET, MESSAGE_ID=None, CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None, **kwargs): r args = ['MESSAGE=' + MESSAGE] if MESSAGE_ID is not None: id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID) args.append('MESSAGE_ID=' + id) if CODE_LINE == CODE_FILE == CODE_FUNC == None: CODE_FILE, CODE_LINE, CODE_FUNC = \ _traceback.extract_stack(limit=2)[0][:3] if CODE_FILE is not None: args.append('CODE_FILE=' + CODE_FILE) if CODE_LINE is not None: args.append('CODE_LINE={:d}'.format(CODE_LINE)) if CODE_FUNC is not None: args.append('CODE_FUNC=' + CODE_FUNC) args.extend(_make_line(key.upper(), val) for key, val in kwargs.items()) return sendv(SOCKET, *args)
r"""Send a message to the journal. >>> journal.send('Hello world') >>> journal.send('Hello, again, world', FIELD2='Greetings!') >>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef') Value of the MESSAGE argument will be used for the MESSAGE= field. MESSAGE must be a string and will be sent as UTF-8 to the journal. MESSAGE_ID can be given to uniquely identify the type of message. It must be a string or a uuid.UUID object. CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to identify the caller. Unless at least on of the three is given, values are extracted from the stack frame of the caller of send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE must be an integer. Additional fields for the journal entry can only be specified as keyword arguments. The payload can be either a string or bytes. A string will be sent as UTF-8, and bytes will be sent as-is to the journal. Other useful fields include PRIORITY, SYSLOG_FACILITY, SYSLOG_IDENTIFIER, SYSLOG_PID.
def stream(identifier, priority=LOG_DEBUG, level_prefix=False): r fd = stream_fd(identifier, priority, level_prefix) return _os.fdopen(fd, 'w', 1)
r"""Return a file object wrapping a stream to journal. Log messages written to this file as simple newline sepearted text strings are written to the journal. The file will be line buffered, so messages are actually sent after a newline character is written. >>> stream = journal.stream('myapp') >>> stream <open file '<fdopen>', mode 'w' at 0x...> >>> stream.write('message...\n') will produce the following message in the journal:: PRIORITY=7 SYSLOG_IDENTIFIER=myapp MESSAGE=message... Using the interface with print might be more convinient: >>> from __future__ import print_function >>> print('message...', file=stream) priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`. level_prefix is a boolean. If true, kernel-style log priority level prefixes (such as '<1>') are interpreted. See sd-daemon(3) for more information.