code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def __repr__(self): """ Returns a string representation of the KnowledgeNode instance. Returns: str: String representation of the KnowledgeNode instance. """ return f"KnowledgeNode(name={self.name}, content={self.content}, children={len(self.children)})"
Returns a string representation of the KnowledgeNode instance. Returns: str: String representation of the KnowledgeNode instance.
__repr__
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def get_path_from_root(self, root: Optional["KnowledgeNode"] = None): """ Get a list of names from the root to this node. Returns: List[str]: A list of node names from the root to this node. """ path = [] current_node = self while current_node: path.append(current_node.name) if root is not None and current_node.name == root.name: break current_node = current_node.parent return path[::-1]
Get a list of names from the root to this node. Returns: List[str]: A list of node names from the root to this node.
get_path_from_root
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def get_all_descendents(self) -> List["KnowledgeNode"]: """ Get a list of all descendant nodes. Returns: List[KnowledgeNode]: A list of all descendant nodes. """ descendents = [] def collect_descendents(node): for child in node.children: descendents.append(child) collect_descendents(child) collect_descendents(self) return descendents
Get a list of all descendant nodes. Returns: List[KnowledgeNode]: A list of all descendant nodes.
get_all_descendents
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def get_all_predecessors(self) -> List["KnowledgeNode"]: """ Get a list of all predecessor nodes (from current node to root). Returns: List[KnowledgeNode]: A list of all predecessor nodes. """ predecessors = [] current_node = self.parent while current_node is not None: predecessors.append(current_node) current_node = current_node.parent return predecessors
Get a list of all predecessor nodes (from current node to root). Returns: List[KnowledgeNode]: A list of all predecessor nodes.
get_all_predecessors
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def to_dict(self): """ Converts the KnowledgeNode instance to a dictionary representation. Returns: dict: The dictionary representation of the KnowledgeNode. """ return { "name": self.name, "content": list(self.content), "children": [child.to_dict() for child in self.children], "parent": self.parent.name if self.parent else None, "synthesize_output": self.synthesize_output, "need_regenerate_synthesize_output": self.need_regenerate_synthesize_output, }
Converts the KnowledgeNode instance to a dictionary representation. Returns: dict: The dictionary representation of the KnowledgeNode.
to_dict
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def from_dict(cls, data): """ Constructs a KnowledgeNode instance from a dictionary representation. Args: data (dict): The dictionary representation of the KnowledgeNode. Returns: KnowledgeNode: The constructed KnowledgeNode instance. """ def helper(cls, data, parent_node=None): if parent_node is not None: assert data["parent"] is not None and data["parent"] == parent_node.name node = cls( name=data["name"], content=data["content"], parent=parent_node, children=None, synthesize_output=data.get("synthesize_output", None), need_regenerate_synthesize_output=data.get( "need_regenerate_synthesize_output", True ), ) for child_data in data["children"]: child_node = helper(cls, child_data, parent_node=node) node.children.append(child_node) return node return helper(cls, data)
Constructs a KnowledgeNode instance from a dictionary representation. Args: data (dict): The dictionary representation of the KnowledgeNode. Returns: KnowledgeNode: The constructed KnowledgeNode instance.
from_dict
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def __init__( self, topic: str, knowledge_base_lm: Union[dspy.dsp.LM, dspy.dsp.HFModel], node_expansion_trigger_count: int, encoder: Encoder, ): """ Initializes a KnowledgeBase instance. Args: topic (str): The topic of the knowledge base expand_node_module (dspy.Module): The module that organize knowledge base in place. The module should accept knowledge base as param. E.g. expand_node_module(self) article_generation_module (dspy.Module): The module that generate report from knowledge base. The module should return string. E.g. report = article_generation_module(self) """ from .collaborative_storm.modules.article_generation import ( ArticleGenerationModule, ) from .collaborative_storm.modules.information_insertion_module import ( InsertInformationModule, ExpandNodeModule, ) from .collaborative_storm.modules.knowledge_base_summary import ( KnowledgeBaseSummaryModule, ) self.topic: str = topic self.encoder: Encoder = encoder self.information_insert_module = InsertInformationModule( engine=knowledge_base_lm, encoder=self.encoder ) self.expand_node_module = ExpandNodeModule( engine=knowledge_base_lm, information_insert_module=self.information_insert_module, node_expansion_trigger_count=node_expansion_trigger_count, ) self.article_generation_module = ArticleGenerationModule( engine=knowledge_base_lm ) self.gen_summary_module = KnowledgeBaseSummaryModule(engine=knowledge_base_lm) self.root: KnowledgeNode = KnowledgeNode(name="root") self.kb_embedding = { "hash": hash(""), "encoded_structure": np.array([[]]), "structure_string": "", } self.info_uuid_to_info_dict: Dict[int, Information] = {} self.info_hash_to_uuid_dict: Dict[int, int] = {} self._lock = threading.Lock()
Initializes a KnowledgeBase instance. Args: topic (str): The topic of the knowledge base expand_node_module (dspy.Module): The module that organize knowledge base in place. The module should accept knowledge base as param. E.g. expand_node_module(self) article_generation_module (dspy.Module): The module that generate report from knowledge base. The module should return string. E.g. report = article_generation_module(self)
__init__
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def traverse_down(self, node): """ Traverses the tree downward from the given node. Args: node (KnowledgeNode): The node to start the traversal from. Returns: list: A list of KnowledgeNode instances in the order they were visited. """ nodes = [] def _traverse(current_node): nodes.append(current_node) for child in current_node.get_children(): _traverse(child) _traverse(node) return nodes
Traverses the tree downward from the given node. Args: node (KnowledgeNode): The node to start the traversal from. Returns: list: A list of KnowledgeNode instances in the order they were visited.
traverse_down
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def traverse_up(self, node): """ Traverses the tree upward from the given node. Args: node (KnowledgeNode): The node to start the traversal from. Returns: list: A list of KnowledgeNode instances in the order they were visited. """ nodes = [] while node is not None: nodes.append(node) node = node.get_parent() return nodes
Traverses the tree upward from the given node. Args: node (KnowledgeNode): The node to start the traversal from. Returns: list: A list of KnowledgeNode instances in the order they were visited.
traverse_up
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def insert_node( self, new_node_name, parent_node: Optional[KnowledgeNode] = None, duplicate_handling="skip", ): """ Inserts a new node into the knowledge base under the specified parent node. Args: new_node_name (str): The name of the new node. parent_node_name (str): The name of the parent node. If None, the new node is inserted under the root. duplicate_handling (str): How to handle duplicate nodes. Options are "skip", "none", and "raise error". """ if parent_node is None: return self.root.add_child( new_node_name, duplicate_handling=duplicate_handling ) else: return parent_node.add_child( new_node_name, duplicate_handling=duplicate_handling )
Inserts a new node into the knowledge base under the specified parent node. Args: new_node_name (str): The name of the new node. parent_node_name (str): The name of the parent node. If None, the new node is inserted under the root. duplicate_handling (str): How to handle duplicate nodes. Options are "skip", "none", and "raise error".
insert_node
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def find_node(self, current_node, node_name): """ Finds a node by name in the knowledge base. Args: current_node (KnowledgeNode): The node to start the search from. node_name (str): The name of the node to find. Returns: KnowledgeNode: The node with the specified name, or None if not found. """ if current_node.name == node_name: return current_node for child in current_node.get_children(): result = self.find_node(child, node_name) if result is not None: return result return None
Finds a node by name in the knowledge base. Args: current_node (KnowledgeNode): The node to start the search from. node_name (str): The name of the node to find. Returns: KnowledgeNode: The node with the specified name, or None if not found.
find_node
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def insert_from_outline_string(self, outline_string, duplicate_handling="skip"): """ Creates and inserts nodes into the knowledge base from a string outline. Args: outline_string (str): The outline string where each line starts with '#' denoting the level. duplicate_handling (str): How to handle duplicate nodes. Options are "skip", "none", and "raise error". """ last_node_at_level = {} for line in outline_string.split("\n"): level = line.count("#") if level > 0: title = line.strip("# ").strip() if title.lower() in ["overview", "summary", "introduction"]: continue parent_node = None if level == 1 else last_node_at_level.get(level - 1) new_node = self.insert_node( new_node_name=title, parent_node=parent_node, duplicate_handling=duplicate_handling, ) last_node_at_level[level] = new_node for deeper_level in list(last_node_at_level.keys()): if deeper_level > level: del last_node_at_level[deeper_level]
Creates and inserts nodes into the knowledge base from a string outline. Args: outline_string (str): The outline string where each line starts with '#' denoting the level. duplicate_handling (str): How to handle duplicate nodes. Options are "skip", "none", and "raise error".
insert_from_outline_string
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def find_node_contain_index(node, index): """ Traverses the tree downward from the given node. Args: node (KnowledgeNode): The node to start the traversal from. Returns: list: A list of KnowledgeNode instances in the order they were visited. """ nodes = [] def _traverse(current_node): if current_node is not None and index in current_node.content: nodes.append(current_node) for child in current_node.get_children(): _traverse(child) _traverse(node) return nodes
Traverses the tree downward from the given node. Args: node (KnowledgeNode): The node to start the traversal from. Returns: list: A list of KnowledgeNode instances in the order they were visited.
get_node_hierarchy_string.find_node_contain_index
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def get_node_hierarchy_string( self, include_indent=False, include_full_path=False, include_hash_tag=True, include_node_content_count=False, cited_indices: Optional[List[int]] = None, root: Optional[KnowledgeNode] = None, ) -> str: def find_node_contain_index(node, index): """ Traverses the tree downward from the given node. Args: node (KnowledgeNode): The node to start the traversal from. Returns: list: A list of KnowledgeNode instances in the order they were visited. """ nodes = [] def _traverse(current_node): if current_node is not None and index in current_node.content: nodes.append(current_node) for child in current_node.get_children(): _traverse(child) _traverse(node) return nodes paths_to_highlight = set() nodes_to_include = set() if cited_indices is not None: for index in cited_indices: for cur_node in find_node_contain_index(self.root, index): paths_to_highlight.add(" -> ".join(cur_node.get_path_from_root())) nodes_to_include.add(cur_node) nodes_to_include.update(cur_node.get_all_descendents()) predecessors = cur_node.get_all_predecessors() for predecessor in predecessors: nodes_to_include.update(predecessor.children) nodes_to_include.update(predecessors) def should_include_node(node): if cited_indices is None: return True return node in nodes_to_include def should_omit_child_nodes(node): if cited_indices is None: return False for child in node.children: if should_include_node(child): return False return True def helper(cur_root, level): to_return = [] if cur_root is not None: should_include_current_node = should_include_node(cur_root) indent = "" if not include_indent else "\t" * (level - 1) full_path = " -> ".join(cur_root.get_path_from_root(root=root)) node_info = cur_root.name if not include_full_path else full_path hash_tag = "#" * level + " " if include_hash_tag else "" content_count = ( f" ({len(cur_root.content)})" if include_node_content_count else "" ) special_note = ( "" if cited_indices is None or full_path not in paths_to_highlight else " ⭐" ) if should_include_current_node: to_return.append( f"{indent}{hash_tag}{node_info}{content_count}{special_note}" ) if should_omit_child_nodes(cur_root): if len(cur_root.children) > 0: child_indent = indent = ( "" if not include_indent else "\t" * (level) ) to_return.append(f"{child_indent}...") else: for child in cur_root.children: to_return.extend(helper(child, level + 1)) return to_return to_return = [] if root is None and self.root is not None: for child in self.root.children: to_return.extend(helper(child, level=1)) else: to_return.extend(helper(root, level=1)) return "\n".join(to_return)
Traverses the tree downward from the given node. Args: node (KnowledgeNode): The node to start the traversal from. Returns: list: A list of KnowledgeNode instances in the order they were visited.
get_node_hierarchy_string
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def find_node_by_path( self, path: str, missing_node_handling="abort", root: Optional[KnowledgeNode] = None, ): """ Returns the target node given a path string. Args: path (str): The path to the node, with node names connected by " -> ". missing_node_handling (str): How to handle missing nodes. Options are "abort", "create", and "raise error". Returns: KnowledgeNode: The target node. """ node_names = path.split(" -> ") current_node = self.root if root is None else root for name in node_names[1:]: found_node = next( (child for child in current_node.children if child.name == name), None ) if found_node is None: if missing_node_handling == "abort": return elif missing_node_handling == "create": new_node = current_node.add_child(child_node_name=name) current_node = new_node elif missing_node_handling == "raise error": structure = self.get_node_hierarchy_string( include_indent=True, include_full_path=False, include_hash_tag=True, ) raise Exception( f"Insert information error. Unable to find node {{{name}}} under {{{current_node.name}}}\n{structure}" ) else: current_node = found_node return current_node
Returns the target node given a path string. Args: path (str): The path to the node, with node names connected by " -> ". missing_node_handling (str): How to handle missing nodes. Options are "abort", "create", and "raise error". Returns: KnowledgeNode: The target node.
find_node_by_path
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def insert_information( self, path: str, information: Information, missing_node_handling="abort", root: Optional[KnowledgeNode] = None, ): """ Inserts information into the knowledge base at the specified path. Args: path (str): The placement path string, connected by " -> " linking the name of nodes. information (Information): The information to insert. missing_node_handling (str): How to handle missing nodes. Options are "abort", "create", and "raise error". Return: uuid of insertion information """ with self._lock: target_node: KnowledgeNode = self.find_node_by_path( path=path, missing_node_handling=missing_node_handling, root=root ) information_hash = hash(information) if information.citation_uuid == -1: info_citation_uuid = self.info_hash_to_uuid_dict.get( information_hash, len(self.info_hash_to_uuid_dict) + 1 ) information.citation_uuid = info_citation_uuid self.info_hash_to_uuid_dict[information_hash] = info_citation_uuid self.info_uuid_to_info_dict[info_citation_uuid] = information if target_node is not None: self.info_uuid_to_info_dict[information.citation_uuid].meta[ "placement" ] = " -> ".join(target_node.get_path_from_root()) target_node.insert_information(information.citation_uuid)
Inserts information into the knowledge base at the specified path. Args: path (str): The placement path string, connected by " -> " linking the name of nodes. information (Information): The information to insert. missing_node_handling (str): How to handle missing nodes. Options are "abort", "create", and "raise error". Return: uuid of insertion information
insert_information
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def trim_empty_leaf_nodes(self): """ Trims all leaf nodes that do not have any content. Iteratively does it until all leaf nodes have at least one content. """ def trim_node(node): if not node.children and not node.content: return True node.children = [child for child in node.children if not trim_node(child)] return not node.children and not node.content # Start the trimming process from the root while True: before_trim = len(self.get_all_leaf_nodes()) trim_node(self.root) after_trim = len(self.get_all_leaf_nodes()) if before_trim == after_trim: break
Trims all leaf nodes that do not have any content. Iteratively does it until all leaf nodes have at least one content.
trim_empty_leaf_nodes
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def get_all_leaf_nodes(self): """ Helper function to get all leaf nodes. Returns: List[KnowledgeNode]: A list of all leaf nodes in the knowledge base. """ leaf_nodes = [] def find_leaf_nodes(node): if not node.children: leaf_nodes.append(node) for child in node.children: find_leaf_nodes(child) find_leaf_nodes(self.root) return leaf_nodes
Helper function to get all leaf nodes. Returns: List[KnowledgeNode]: A list of all leaf nodes in the knowledge base.
get_all_leaf_nodes
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def merge_single_child_nodes(self): """ Merges content of a node with its single child and removes the child node. Iteratively does this from leaf nodes back to the root. """ def merge_node(node): # Recursively merge children first for child in node.children: merge_node(child) # If the node has exactly one child, merge its content with the child and remove the child if len(node.children) == 1: single_child = node.children[0] node.content.update(single_child.content) node.children = single_child.children for grandchild in node.children: grandchild.parent = node merge_node(self.root)
Merges content of a node with its single child and removes the child node. Iteratively does this from leaf nodes back to the root.
merge_single_child_nodes
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def reogranize(self): """ Reorganizes the knowledge base through two main processes: top-down expansion and bottom-up cleaning. The reorganization process ensures that the knowledge base remains well-structured and relevant as new information is added. It consists of the following steps: 1.Top-Down Expansion: Expands nodes that have accumulated significant amounts of information by creating subtopics, ensuring that each concept remains specific and manageable. 2.Bottom-Up Cleaning: Cleans the knowledge base by removing empty leaf nodes (nodes with no supporting information) and merging nodes that have only a single child, simplifying the structure and maintaining clarity. """ # pre-processing self.trim_empty_leaf_nodes() self.merge_single_child_nodes() # expand nodes self.expand_node_module(knowledge_base=self) # clean up self.trim_empty_leaf_nodes() self.merge_single_child_nodes() self.update_all_info_path()
Reorganizes the knowledge base through two main processes: top-down expansion and bottom-up cleaning. The reorganization process ensures that the knowledge base remains well-structured and relevant as new information is added. It consists of the following steps: 1.Top-Down Expansion: Expands nodes that have accumulated significant amounts of information by creating subtopics, ensuring that each concept remains specific and manageable. 2.Bottom-Up Cleaning: Cleans the knowledge base by removing empty leaf nodes (nodes with no supporting information) and merging nodes that have only a single child, simplifying the structure and maintaining clarity.
reogranize
python
stanford-oval/storm
knowledge_storm/dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/dataclass.py
MIT
def __init__(self, url, description, snippets, title, meta=None): """Initialize the Information object with detailed attributes. Args: url (str): The unique URL serving as the identifier for the information. description (str): Detailed description. snippets (list): List of brief excerpts or snippet. title (str): The title or headline of the information. """ self.description = description self.snippets = snippets self.title = title self.url = url self.meta = meta if meta is not None else {} self.citation_uuid = -1
Initialize the Information object with detailed attributes. Args: url (str): The unique URL serving as the identifier for the information. description (str): Detailed description. snippets (list): List of brief excerpts or snippet. title (str): The title or headline of the information.
__init__
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def _meta_str(self): """Generate a string representation of relevant meta information.""" return f"Question: {self.meta.get('question', '')}, Query: {self.meta.get('query', '')}"
Generate a string representation of relevant meta information.
_meta_str
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def _md5_hash(self, value): """Generate an MD5 hash for a given value.""" if isinstance(value, (dict, list, tuple)): value = json.dumps(value, sort_keys=True) return hashlib.md5(str(value).encode("utf-8")).hexdigest()
Generate an MD5 hash for a given value.
_md5_hash
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def from_dict(cls, info_dict): """Create a Information object from a dictionary. Usage: info = Information.from_dict(storm_info_dict) Args: info_dict (dict): A dictionary containing keys 'url', 'description', 'snippets', and 'title' corresponding to the object's attributes. Returns: Information: An instance of Information. """ info = cls( url=info_dict["url"], description=info_dict["description"], snippets=info_dict["snippets"], title=info_dict["title"], meta=info_dict.get("meta", None), ) info.citation_uuid = int(info_dict.get("citation_uuid", -1)) return info
Create a Information object from a dictionary. Usage: info = Information.from_dict(storm_info_dict) Args: info_dict (dict): A dictionary containing keys 'url', 'description', 'snippets', and 'title' corresponding to the object's attributes. Returns: Information: An instance of Information.
from_dict
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def __init__(self, section_name: str, content=None): """ section_name: section heading in string format. E.g. Introduction, History, etc. content: content of the section. Up to you for design choice of the data structure. """ self.section_name = section_name self.content = content self.children = [] self.preference = None
section_name: section heading in string format. E.g. Introduction, History, etc. content: content of the section. Up to you for design choice of the data structure.
__init__
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def find_section( self, node: ArticleSectionNode, name: str ) -> Optional[ArticleSectionNode]: """ Return the node of the section given the section name. Args: node: the node as the root to find. name: the name of node as section name Return: reference of the node or None if section name has no match """ if node.section_name == name: return node for child in node.children: result = self.find_section(child, name) if result: return result return None
Return the node of the section given the section name. Args: node: the node as the root to find. name: the name of node as section name Return: reference of the node or None if section name has no match
find_section
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def to_string(self) -> str: """ Export Article object into string representation. """
Export Article object into string representation.
to_string
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def get_outline_tree(self): """ Generates a hierarchical tree structure representing the outline of the document. Returns: Dict[str, Dict]: A nested dictionary representing the hierarchical structure of the document's outline. Each key is a section name, and the value is another dictionary representing the child sections, recursively forming the tree structure of the document's outline. If a section has no subsections, its value is an empty dictionary. Example: Assuming a document with a structure like: - Introduction - Background - Objective - Methods - Data Collection - Analysis The method would return: { 'Introduction': { 'Background': {}, 'Objective': {} }, 'Methods': { 'Data Collection': {}, 'Analysis': {} } } """ def build_tree(node) -> Dict[str, Dict]: tree = {} for child in node.children: tree[child.section_name] = build_tree(child) return tree if tree else {} return build_tree(self.root)
Generates a hierarchical tree structure representing the outline of the document. Returns: Dict[str, Dict]: A nested dictionary representing the hierarchical structure of the document's outline. Each key is a section name, and the value is another dictionary representing the child sections, recursively forming the tree structure of the document's outline. If a section has no subsections, its value is an empty dictionary. Example: Assuming a document with a structure like: - Introduction - Background - Objective - Methods - Data Collection - Analysis The method would return: { 'Introduction': { 'Background': {}, 'Objective': {} }, 'Methods': { 'Data Collection': {}, 'Analysis': {} } }
get_outline_tree
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def get_first_level_section_names(self) -> List[str]: """ Get first level section names """ return [i.section_name for i in self.root.children]
Get first level section names
get_first_level_section_names
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def from_string(cls, topic_name: str, article_text: str): """ Create an instance of the Article object from a string """ pass
Create an instance of the Article object from a string
from_string
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def __init__(self, retriever: Retriever): """ Store args and finish initialization. """ self.retriever = retriever
Store args and finish initialization.
__init__
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def research(self, topic) -> InformationTable: """ Curate information and knowledge for the given topic Args: topic: topic of interest in natural language. Returns: collected_information: collected information in InformationTable type. """ pass
Curate information and knowledge for the given topic Args: topic: topic of interest in natural language. Returns: collected_information: collected information in InformationTable type.
research
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def generate_outline( self, topic: str, information_table: InformationTable, **kwargs ) -> Article: """ Generate outline for the article. Required arguments include: topic: the topic of interest information_table: knowledge curation data generated from KnowledgeCurationModule More arguments could be 1. draft outline 2. user provided outline Returns: article_outline of type ArticleOutline """ pass
Generate outline for the article. Required arguments include: topic: the topic of interest information_table: knowledge curation data generated from KnowledgeCurationModule More arguments could be 1. draft outline 2. user provided outline Returns: article_outline of type ArticleOutline
generate_outline
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def generate_article( self, topic: str, information_table: InformationTable, article_with_outline: Article, **kwargs, ) -> Article: """ Generate article. Required arguments include: topic: the topic of interest information_table: knowledge curation data generated from KnowledgeCurationModule article_with_outline: article with specified outline from OutlineGenerationModule """ pass
Generate article. Required arguments include: topic: the topic of interest information_table: knowledge curation data generated from KnowledgeCurationModule article_with_outline: article with specified outline from OutlineGenerationModule
generate_article
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def polish_article(self, topic: str, draft_article: Article, **kwargs) -> Article: """ Polish article. Required arguments include: topic: the topic of interest draft_article: draft article from ArticleGenerationModule. """ pass
Polish article. Required arguments include: topic: the topic of interest draft_article: draft article from ArticleGenerationModule.
polish_article
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def log_execution_time(func): """Decorator to log the execution time of a function.""" @functools.wraps(func) def wrapper(self, *args, **kwargs): start_time = time.time() result = func(self, *args, **kwargs) end_time = time.time() execution_time = end_time - start_time logger.info(f"{func.__name__} executed in {execution_time:.4f} seconds") self.time[func.__name__] = execution_time return result return wrapper
Decorator to log the execution time of a function.
log_execution_time
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def log_execution_time_and_lm_rm_usage(self, func): """Decorator to log the execution time, language model usage, and retrieval model usage of a function.""" @functools.wraps(func) def wrapper(*args, **kwargs): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() execution_time = end_time - start_time self.time[func.__name__] = execution_time logger.info(f"{func.__name__} executed in {execution_time:.4f} seconds") self.lm_cost[func.__name__] = self.lm_configs.collect_and_reset_lm_usage() if hasattr(self, "retriever"): self.rm_cost[func.__name__] = ( self.retriever.collect_and_reset_rm_usage() ) return result return wrapper
Decorator to log the execution time, language model usage, and retrieval model usage of a function.
log_execution_time_and_lm_rm_usage
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def apply_decorators(self): """Apply decorators to methods that need them.""" methods_to_decorate = [ method_name for method_name in dir(self) if callable(getattr(self, method_name)) and method_name.startswith("run_") ] for method_name in methods_to_decorate: original_method = getattr(self, method_name) decorated_method = self.log_execution_time_and_lm_rm_usage(original_method) setattr(self, method_name, decorated_method)
Apply decorators to methods that need them.
apply_decorators
python
stanford-oval/storm
knowledge_storm/interface.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/interface.py
MIT
def forward( self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = [] ): """Search with You.com for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url' """ queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) collected_results = [] for query in queries: try: headers = {"X-API-Key": self.ydc_api_key} results = requests.get( f"https://api.ydc-index.io/search?query={query}", headers=headers, ).json() authoritative_results = [] for r in results["hits"]: if self.is_valid_source(r["url"]) and r["url"] not in exclude_urls: authoritative_results.append(r) if "hits" in results: collected_results.extend(authoritative_results[: self.k]) except Exception as e: logging.error(f"Error occurs when searching query {query}: {e}") return collected_results
Search with You.com for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def __init__( self, bing_search_api_key=None, k=3, is_valid_source: Callable = None, min_char_count: int = 150, snippet_chunk_size: int = 1000, webpage_helper_max_threads=10, mkt="en-US", language="en", **kwargs, ): """ Params: min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper. mkt, language, **kwargs: Bing search API parameters. - Reference: https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/reference/query-parameters """ super().__init__(k=k) if not bing_search_api_key and not os.environ.get("BING_SEARCH_API_KEY"): raise RuntimeError( "You must supply bing_search_subscription_key or set environment variable BING_SEARCH_API_KEY" ) elif bing_search_api_key: self.bing_api_key = bing_search_api_key else: self.bing_api_key = os.environ["BING_SEARCH_API_KEY"] self.endpoint = "https://api.bing.microsoft.com/v7.0/search" self.params = {"mkt": mkt, "setLang": language, "count": k, **kwargs} self.webpage_helper = WebPageHelper( min_char_count=min_char_count, snippet_chunk_size=snippet_chunk_size, max_thread_num=webpage_helper_max_threads, ) self.usage = 0 # If not None, is_valid_source shall be a function that takes a URL and returns a boolean. if is_valid_source: self.is_valid_source = is_valid_source else: self.is_valid_source = lambda x: True
Params: min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper. mkt, language, **kwargs: Bing search API parameters. - Reference: https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/reference/query-parameters
__init__
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def forward( self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = [] ): """Search with Bing for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url' """ queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) url_to_results = {} headers = {"Ocp-Apim-Subscription-Key": self.bing_api_key} for query in queries: try: results = requests.get( self.endpoint, headers=headers, params={**self.params, "q": query} ).json() for d in results["webPages"]["value"]: if self.is_valid_source(d["url"]) and d["url"] not in exclude_urls: url_to_results[d["url"]] = { "url": d["url"], "title": d["name"], "description": d["snippet"], } except Exception as e: logging.error(f"Error occurs when searching query {query}: {e}") valid_url_to_snippets = self.webpage_helper.urls_to_snippets( list(url_to_results.keys()) ) collected_results = [] for url in valid_url_to_snippets: r = url_to_results[url] r["snippets"] = valid_url_to_snippets[url]["snippets"] collected_results.append(r) return collected_results
Search with Bing for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def __init__( self, collection_name: str, embedding_model: str, device: str = "mps", k: int = 3, ): from langchain_huggingface import HuggingFaceEmbeddings """ Params: collection_name: Name of the Qdrant collection. embedding_model: Name of the Hugging Face embedding model. device: Device to run the embeddings model on, can be "mps", "cuda", "cpu". k: Number of top chunks to retrieve. """ super().__init__(k=k) self.usage = 0 # check if the collection is provided if not collection_name: raise ValueError("Please provide a collection name.") # check if the embedding model is provided if not embedding_model: raise ValueError("Please provide an embedding model.") model_kwargs = {"device": device} encode_kwargs = {"normalize_embeddings": True} self.model = HuggingFaceEmbeddings( model_name=embedding_model, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs, ) self.collection_name = collection_name self.client = None self.qdrant = None
Params: collection_name: Name of the Qdrant collection. embedding_model: Name of the Hugging Face embedding model. device: Device to run the embeddings model on, can be "mps", "cuda", "cpu". k: Number of top chunks to retrieve.
__init__
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def _check_collection(self): from langchain_qdrant import Qdrant """ Check if the Qdrant collection exists and create it if it does not. """ if self.client is None: raise ValueError("Qdrant client is not initialized.") if self.client.collection_exists(collection_name=f"{self.collection_name}"): print( f"Collection {self.collection_name} exists. Loading the collection..." ) self.qdrant = Qdrant( client=self.client, collection_name=self.collection_name, embeddings=self.model, ) else: raise ValueError( f"Collection {self.collection_name} does not exist. Please create the collection first." )
Check if the Qdrant collection exists and create it if it does not.
_check_collection
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def init_online_vector_db(self, url: str, api_key: str): from qdrant_client import QdrantClient """ Initialize the Qdrant client that is connected to an online vector store with the given URL and API key. Args: url (str): URL of the Qdrant server. api_key (str): API key for the Qdrant server. """ if api_key is None: if not os.getenv("QDRANT_API_KEY"): raise ValueError("Please provide an api key.") api_key = os.getenv("QDRANT_API_KEY") if url is None: raise ValueError("Please provide a url for the Qdrant server.") try: self.client = QdrantClient(url=url, api_key=api_key) self._check_collection() except Exception as e: raise ValueError(f"Error occurs when connecting to the server: {e}")
Initialize the Qdrant client that is connected to an online vector store with the given URL and API key. Args: url (str): URL of the Qdrant server. api_key (str): API key for the Qdrant server.
init_online_vector_db
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def init_offline_vector_db(self, vector_store_path: str): from qdrant_client import QdrantClient """ Initialize the Qdrant client that is connected to an offline vector store with the given vector store folder path. Args: vector_store_path (str): Path to the vector store. """ if vector_store_path is None: raise ValueError("Please provide a folder path.") try: self.client = QdrantClient(path=vector_store_path) self._check_collection() except Exception as e: raise ValueError(f"Error occurs when loading the vector store: {e}")
Initialize the Qdrant client that is connected to an offline vector store with the given vector store folder path. Args: vector_store_path (str): Path to the vector store.
init_offline_vector_db
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def get_vector_count(self): """ Get the count of vectors in the collection. Returns: int: Number of vectors in the collection. """ return self.qdrant.client.count(collection_name=self.collection_name)
Get the count of vectors in the collection. Returns: int: Number of vectors in the collection.
get_vector_count
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def forward(self, query_or_queries: Union[str, List[str]], exclude_urls: List[str]): """ Search in your data for self.k top passages for query or queries. Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): Dummy parameter to match the interface. Does not have any effect. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url' """ queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) collected_results = [] for query in queries: related_docs = self.qdrant.similarity_search_with_score(query, k=self.k) for i in range(len(related_docs)): doc = related_docs[i][0] collected_results.append( { "description": doc.metadata["description"], "snippets": [doc.page_content], "title": doc.metadata["title"], "url": doc.metadata["url"], } ) return collected_results
Search in your data for self.k top passages for query or queries. Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): Dummy parameter to match the interface. Does not have any effect. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def __init__( self, serper_search_api_key=None, k=3, query_params=None, ENABLE_EXTRA_SNIPPET_EXTRACTION=False, min_char_count: int = 150, snippet_chunk_size: int = 1000, webpage_helper_max_threads=10, ): """Args: serper_search_api_key str: API key to run serper, can be found by creating an account on https://serper.dev/ query_params (dict or list of dict): parameters in dictionary or list of dictionaries that has a max size of 100 that will be used to query. Commonly used fields are as follows (see more information in https://serper.dev/playground): q str: query that will be used with google search type str: type that will be used for browsing google. Types are search, images, video, maps, places, etc. gl str: Country that will be focused on for the search location str: Country where the search will originate from. All locates can be found here: https://api.serper.dev/locations. autocorrect bool: Enable autocorrect on the queries while searching, if query is misspelled, will be updated. results int: Max number of results per page. page int: Max number of pages per call. tbs str: date time range, automatically set to any time by default. qdr:h str: Date time range for the past hour. qdr:d str: Date time range for the past 24 hours. qdr:w str: Date time range for past week. qdr:m str: Date time range for past month. qdr:y str: Date time range for past year. """ super().__init__(k=k) self.usage = 0 self.query_params = None self.ENABLE_EXTRA_SNIPPET_EXTRACTION = ENABLE_EXTRA_SNIPPET_EXTRACTION self.webpage_helper = WebPageHelper( min_char_count=min_char_count, snippet_chunk_size=snippet_chunk_size, max_thread_num=webpage_helper_max_threads, ) if query_params is None: self.query_params = {"num": k, "autocorrect": True, "page": 1} else: self.query_params = query_params self.query_params.update({"num": k}) self.serper_search_api_key = serper_search_api_key if not self.serper_search_api_key and not os.environ.get("SERPER_API_KEY"): raise RuntimeError( "You must supply a serper_search_api_key param or set environment variable SERPER_API_KEY" ) elif self.serper_search_api_key: self.serper_search_api_key = serper_search_api_key else: self.serper_search_api_key = os.environ["SERPER_API_KEY"] self.base_url = "https://google.serper.dev"
Args: serper_search_api_key str: API key to run serper, can be found by creating an account on https://serper.dev/ query_params (dict or list of dict): parameters in dictionary or list of dictionaries that has a max size of 100 that will be used to query. Commonly used fields are as follows (see more information in https://serper.dev/playground): q str: query that will be used with google search type str: type that will be used for browsing google. Types are search, images, video, maps, places, etc. gl str: Country that will be focused on for the search location str: Country where the search will originate from. All locates can be found here: https://api.serper.dev/locations. autocorrect bool: Enable autocorrect on the queries while searching, if query is misspelled, will be updated. results int: Max number of results per page. page int: Max number of pages per call. tbs str: date time range, automatically set to any time by default. qdr:h str: Date time range for the past hour. qdr:d str: Date time range for the past 24 hours. qdr:w str: Date time range for past week. qdr:m str: Date time range for past month. qdr:y str: Date time range for past year.
__init__
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def forward(self, query_or_queries: Union[str, List[str]], exclude_urls: List[str]): """ Calls the API and searches for the query passed in. Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): Dummy parameter to match the interface. Does not have any effect. Returns: a list of dictionaries, each dictionary has keys of 'description', 'snippets' (list of strings), 'title', 'url' """ queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) self.results = [] collected_results = [] for query in queries: if query == "Queries:": continue query_params = self.query_params # All available parameters can be found in the playground: https://serper.dev/playground # Sets the json value for query to be the query that is being parsed. query_params["q"] = query # Sets the type to be search, can be images, video, places, maps etc that Google provides. query_params["type"] = "search" self.result = self.serper_runner(query_params) self.results.append(self.result) # Array of dictionaries that will be used by Storm to create the jsons collected_results = [] if self.ENABLE_EXTRA_SNIPPET_EXTRACTION: urls = [] for result in self.results: organic_results = result.get("organic", []) for organic in organic_results: url = organic.get("link") if url: urls.append(url) valid_url_to_snippets = self.webpage_helper.urls_to_snippets(urls) else: valid_url_to_snippets = {} for result in self.results: try: # An array of dictionaries that contains the snippets, title of the document and url that will be used. organic_results = result.get("organic") knowledge_graph = result.get("knowledgeGraph") for organic in organic_results: snippets = [organic.get("snippet")] if self.ENABLE_EXTRA_SNIPPET_EXTRACTION: snippets.extend( valid_url_to_snippets.get(url, {}).get("snippets", []) ) collected_results.append( { "snippets": snippets, "title": organic.get("title"), "url": organic.get("link"), "description": ( knowledge_graph.get("description") if knowledge_graph is not None else "" ), } ) except: continue return collected_results
Calls the API and searches for the query passed in. Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): Dummy parameter to match the interface. Does not have any effect. Returns: a list of dictionaries, each dictionary has keys of 'description', 'snippets' (list of strings), 'title', 'url'
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def forward( self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = [] ): """Search with api.search.brave.com for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url' """ queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) collected_results = [] for query in queries: try: headers = { "Accept": "application/json", "Accept-Encoding": "gzip", "X-Subscription-Token": self.brave_search_api_key, } response = requests.get( f"https://api.search.brave.com/res/v1/web/search?result_filter=web&q={query}", headers=headers, ).json() results = response.get("web", {}).get("results", []) for result in results: collected_results.append( { "snippets": result.get("extra_snippets", []), "title": result.get("title"), "url": result.get("url"), "description": result.get("description"), } ) except Exception as e: logging.error(f"Error occurs when searching query {query}: {e}") return collected_results
Search with api.search.brave.com for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def __init__( self, searxng_api_url, searxng_api_key=None, k=3, is_valid_source: Callable = None, ): """Initialize the SearXNG search retriever. Please set up SearXNG according to https://docs.searxng.org/index.html. Args: searxng_api_url (str): The URL of the SearXNG API. Consult SearXNG documentation for details. searxng_api_key (str, optional): The API key for the SearXNG API. Defaults to None. Consult SearXNG documentation for details. k (int, optional): The number of top passages to retrieve. Defaults to 3. is_valid_source (Callable, optional): A function that takes a URL and returns a boolean indicating if the source is valid. Defaults to None. """ super().__init__(k=k) if not searxng_api_url: raise RuntimeError("You must supply searxng_api_url") self.searxng_api_url = searxng_api_url self.searxng_api_key = searxng_api_key self.usage = 0 if is_valid_source: self.is_valid_source = is_valid_source else: self.is_valid_source = lambda x: True
Initialize the SearXNG search retriever. Please set up SearXNG according to https://docs.searxng.org/index.html. Args: searxng_api_url (str): The URL of the SearXNG API. Consult SearXNG documentation for details. searxng_api_key (str, optional): The API key for the SearXNG API. Defaults to None. Consult SearXNG documentation for details. k (int, optional): The number of top passages to retrieve. Defaults to 3. is_valid_source (Callable, optional): A function that takes a URL and returns a boolean indicating if the source is valid. Defaults to None.
__init__
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def forward( self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = [] ): """Search with SearxNG for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url' """ queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) collected_results = [] headers = ( {"Authorization": f"Bearer {self.searxng_api_key}"} if self.searxng_api_key else {} ) for query in queries: try: params = {"q": query, "format": "json"} response = requests.get( self.searxng_api_url, headers=headers, params=params ) results = response.json() for r in results["results"]: if self.is_valid_source(r["url"]) and r["url"] not in exclude_urls: collected_results.append( { "description": r.get("content", ""), "snippets": [r.get("content", "")], "title": r.get("title", ""), "url": r["url"], } ) except Exception as e: logging.error(f"Error occurs when searching query {query}: {e}") return collected_results
Search with SearxNG for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def __init__( self, k: int = 3, is_valid_source: Callable = None, min_char_count: int = 150, snippet_chunk_size: int = 1000, webpage_helper_max_threads=10, safe_search: str = "On", region: str = "us-en", ): """ Params: min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper. **kwargs: Additional parameters for the OpenAI API. """ super().__init__(k=k) try: from duckduckgo_search import DDGS except ImportError as err: raise ImportError( "Duckduckgo requires `pip install duckduckgo_search`." ) from err self.k = k self.webpage_helper = WebPageHelper( min_char_count=min_char_count, snippet_chunk_size=snippet_chunk_size, max_thread_num=webpage_helper_max_threads, ) self.usage = 0 # All params for search can be found here: # https://duckduckgo.com/duckduckgo-help-pages/settings/params/ # Sets the backend to be api self.duck_duck_go_backend = "api" # Only gets safe search results self.duck_duck_go_safe_search = safe_search # Specifies the region that the search will use self.duck_duck_go_region = region # If not None, is_valid_source shall be a function that takes a URL and returns a boolean. if is_valid_source: self.is_valid_source = is_valid_source else: self.is_valid_source = lambda x: True # Import the duckduckgo search library found here: https://github.com/deedy5/duckduckgo_search self.ddgs = DDGS()
Params: min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper. **kwargs: Additional parameters for the OpenAI API.
__init__
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def forward( self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = [] ): """Search with DuckDuckGoSearch for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url' """ queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) collected_results = [] for query in queries: # list of dicts that will be parsed to return results = self.request(query) for d in results: # assert d is dict if not isinstance(d, dict): print(f"Invalid result: {d}\n") continue try: # ensure keys are present url = d.get("href", None) title = d.get("title", None) description = d.get("description", title) snippets = [d.get("body", None)] # raise exception of missing key(s) if not all([url, title, description, snippets]): raise ValueError(f"Missing key(s) in result: {d}") if self.is_valid_source(url) and url not in exclude_urls: result = { "url": url, "title": title, "description": description, "snippets": snippets, } collected_results.append(result) else: print(f"invalid source {url} or url in exclude_urls") except Exception as e: print(f"Error occurs when processing {result=}: {e}\n") print(f"Error occurs when searching query {query}: {e}") return collected_results
Search with DuckDuckGoSearch for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def __init__( self, tavily_search_api_key=None, k: int = 3, is_valid_source: Callable = None, min_char_count: int = 150, snippet_chunk_size: int = 1000, webpage_helper_max_threads=10, include_raw_content=False, ): """ Params: tavily_search_api_key str: API key for tavily that can be retrieved from https://tavily.com/ min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper. include_raw_content bool: Boolean that is used to determine if the full text should be returned. """ super().__init__(k=k) try: from tavily import TavilyClient except ImportError as err: raise ImportError("Tavily requires `pip install tavily-python`.") from err if not tavily_search_api_key and not os.environ.get("TAVILY_API_KEY"): raise RuntimeError( "You must supply tavily_search_api_key or set environment variable TAVILY_API_KEY" ) elif tavily_search_api_key: self.tavily_search_api_key = tavily_search_api_key else: self.tavily_search_api_key = os.environ["TAVILY_API_KEY"] self.k = k self.webpage_helper = WebPageHelper( min_char_count=min_char_count, snippet_chunk_size=snippet_chunk_size, max_thread_num=webpage_helper_max_threads, ) self.usage = 0 # Creates client instance that will use search. Full search params are here: # https://docs.tavily.com/docs/python-sdk/tavily-search/examples self.tavily_client = TavilyClient(api_key=self.tavily_search_api_key) self.include_raw_content = include_raw_content # If not None, is_valid_source shall be a function that takes a URL and returns a boolean. if is_valid_source: self.is_valid_source = is_valid_source else: self.is_valid_source = lambda x: True
Params: tavily_search_api_key str: API key for tavily that can be retrieved from https://tavily.com/ min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper. include_raw_content bool: Boolean that is used to determine if the full text should be returned.
__init__
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def forward( self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = [] ): """Search with TavilySearch for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url' """ queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) collected_results = [] for query in queries: args = { "max_results": self.k, "include_raw_contents": self.include_raw_content, } # list of dicts that will be parsed to return responseData = self.tavily_client.search(query) results = responseData.get("results") for d in results: # assert d is dict if not isinstance(d, dict): print(f"Invalid result: {d}\n") continue try: # ensure keys are present url = d.get("url", None) title = d.get("title", None) description = d.get("content", None) snippets = [] if d.get("raw_body_content"): snippets.append(d.get("raw_body_content")) else: snippets.append(d.get("content")) # raise exception of missing key(s) if not all([url, title, description, snippets]): raise ValueError(f"Missing key(s) in result: {d}") if self.is_valid_source(url) and url not in exclude_urls: result = { "url": url, "title": title, "description": description, "snippets": snippets, } collected_results.append(result) else: print(f"invalid source {url} or url in exclude_urls") except Exception as e: print(f"Error occurs when processing {result=}: {e}\n") print(f"Error occurs when searching query {query}: {e}") return collected_results
Search with TavilySearch for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def __init__( self, google_search_api_key=None, google_cse_id=None, k=3, is_valid_source: Callable = None, min_char_count: int = 150, snippet_chunk_size: int = 1000, webpage_helper_max_threads=10, ): """ Params: google_search_api_key: Google API key. Check out https://developers.google.com/custom-search/v1/overview "API key" section google_cse_id: Custom search engine ID. Check out https://developers.google.com/custom-search/v1/overview "Search engine ID" section k: Number of top results to retrieve. is_valid_source: Optional function to filter valid sources. min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper. """ super().__init__(k=k) try: from googleapiclient.discovery import build except ImportError as err: raise ImportError( "GoogleSearch requires `pip install google-api-python-client`." ) from err if not google_search_api_key and not os.environ.get("GOOGLE_SEARCH_API_KEY"): raise RuntimeError( "You must supply google_search_api_key or set the GOOGLE_SEARCH_API_KEY environment variable" ) if not google_cse_id and not os.environ.get("GOOGLE_CSE_ID"): raise RuntimeError( "You must supply google_cse_id or set the GOOGLE_CSE_ID environment variable" ) self.google_search_api_key = ( google_search_api_key or os.environ["GOOGLE_SEARCH_API_KEY"] ) self.google_cse_id = google_cse_id or os.environ["GOOGLE_CSE_ID"] if is_valid_source: self.is_valid_source = is_valid_source else: self.is_valid_source = lambda x: True self.service = build( "customsearch", "v1", developerKey=self.google_search_api_key ) self.webpage_helper = WebPageHelper( min_char_count=min_char_count, snippet_chunk_size=snippet_chunk_size, max_thread_num=webpage_helper_max_threads, ) self.usage = 0
Params: google_search_api_key: Google API key. Check out https://developers.google.com/custom-search/v1/overview "API key" section google_cse_id: Custom search engine ID. Check out https://developers.google.com/custom-search/v1/overview "Search engine ID" section k: Number of top results to retrieve. is_valid_source: Optional function to filter valid sources. min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper.
__init__
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def forward( self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = [] ): """Search using Google Custom Search API for self.k top results for query or queries. Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of URLs to exclude from the search results. Returns: A list of dicts, each dict has keys: 'title', 'url', 'snippet', 'description'. """ queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) url_to_results = {} for query in queries: try: response = ( self.service.cse() .list( q=query, cx=self.google_cse_id, num=self.k, ) .execute() ) for item in response.get("items", []): if ( self.is_valid_source(item["link"]) and item["link"] not in exclude_urls ): url_to_results[item["link"]] = { "title": item["title"], "url": item["link"], # "snippet": item.get("snippet", ""), # Google search snippet is very short. "description": item.get("snippet", ""), } except Exception as e: logging.error(f"Error occurred while searching query {query}: {e}") valid_url_to_snippets = self.webpage_helper.urls_to_snippets( list(url_to_results.keys()) ) collected_results = [] for url in valid_url_to_snippets: r = url_to_results[url] r["snippets"] = valid_url_to_snippets[url]["snippets"] collected_results.append(r) return collected_results
Search using Google Custom Search API for self.k top results for query or queries. Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of URLs to exclude from the search results. Returns: A list of dicts, each dict has keys: 'title', 'url', 'snippet', 'description'.
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def __init__( self, azure_ai_search_api_key=None, azure_ai_search_url=None, azure_ai_search_index_name=None, k=3, is_valid_source: Callable = None, ): """ Params: azure_ai_search_api_key: Azure AI Search API key. Check out https://learn.microsoft.com/en-us/azure/search/search-security-api-keys?tabs=rest-use%2Cportal-find%2Cportal-query "API key" section azure_ai_search_url: Custom Azure AI Search Endpoint URL. Check out https://learn.microsoft.com/en-us/azure/search/search-create-service-portal#name-the-service azure_ai_search_index_name: Custom Azure AI Search Index Name. Check out https://learn.microsoft.com/en-us/azure/search/search-how-to-create-search-index?tabs=portal k: Number of top results to retrieve. is_valid_source: Optional function to filter valid sources. min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper. """ super().__init__(k=k) try: from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient except ImportError as err: raise ImportError( "AzureAISearch requires `pip install azure-search-documents`." ) from err if not azure_ai_search_api_key and not os.environ.get( "AZURE_AI_SEARCH_API_KEY" ): raise RuntimeError( "You must supply azure_ai_search_api_key or set environment variable AZURE_AI_SEARCH_API_KEY" ) elif azure_ai_search_api_key: self.azure_ai_search_api_key = azure_ai_search_api_key else: self.azure_ai_search_api_key = os.environ["AZURE_AI_SEARCH_API_KEY"] if not azure_ai_search_url and not os.environ.get("AZURE_AI_SEARCH_URL"): raise RuntimeError( "You must supply azure_ai_search_url or set environment variable AZURE_AI_SEARCH_URL" ) elif azure_ai_search_url: self.azure_ai_search_url = azure_ai_search_url else: self.azure_ai_search_url = os.environ["AZURE_AI_SEARCH_URL"] if not azure_ai_search_index_name and not os.environ.get( "AZURE_AI_SEARCH_INDEX_NAME" ): raise RuntimeError( "You must supply azure_ai_search_index_name or set environment variable AZURE_AI_SEARCH_INDEX_NAME" ) elif azure_ai_search_index_name: self.azure_ai_search_index_name = azure_ai_search_index_name else: self.azure_ai_search_index_name = os.environ["AZURE_AI_SEARCH_INDEX_NAME"] self.usage = 0 # If not None, is_valid_source shall be a function that takes a URL and returns a boolean. if is_valid_source: self.is_valid_source = is_valid_source else: self.is_valid_source = lambda x: True
Params: azure_ai_search_api_key: Azure AI Search API key. Check out https://learn.microsoft.com/en-us/azure/search/search-security-api-keys?tabs=rest-use%2Cportal-find%2Cportal-query "API key" section azure_ai_search_url: Custom Azure AI Search Endpoint URL. Check out https://learn.microsoft.com/en-us/azure/search/search-create-service-portal#name-the-service azure_ai_search_index_name: Custom Azure AI Search Index Name. Check out https://learn.microsoft.com/en-us/azure/search/search-how-to-create-search-index?tabs=portal k: Number of top results to retrieve. is_valid_source: Optional function to filter valid sources. min_char_count: Minimum character count for the article to be considered valid. snippet_chunk_size: Maximum character count for each snippet. webpage_helper_max_threads: Maximum number of threads to use for webpage helper.
__init__
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def forward( self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = [] ): """Search with Azure Open AI for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url' """ try: from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient except ImportError as err: raise ImportError( "AzureAISearch requires `pip install azure-search-documents`." ) from err queries = ( [query_or_queries] if isinstance(query_or_queries, str) else query_or_queries ) self.usage += len(queries) collected_results = [] client = SearchClient( self.azure_ai_search_url, self.azure_ai_search_index_name, AzureKeyCredential(self.azure_ai_search_api_key), ) for query in queries: try: # https://learn.microsoft.com/en-us/python/api/azure-search-documents/azure.search.documents.searchclient?view=azure-python#azure-search-documents-searchclient-search results = client.search(search_text=query, top=1) for result in results: document = { "url": result["metadata_storage_path"], "title": result["title"], "description": "N/A", "snippets": [result["chunk"]], } collected_results.append(document) except Exception as e: logging.error(f"Error occurs when searching query {query}: {e}") return collected_results
Search with Azure Open AI for self.k top passages for query or queries Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. exclude_urls (List[str]): A list of urls to exclude from the search results. Returns: a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
forward
python
stanford-oval/storm
knowledge_storm/rm.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/rm.py
MIT
def init_openai_model( self, openai_api_key: str, azure_api_key: str, openai_type: Literal["openai", "azure"], api_base: Optional[str] = None, api_version: Optional[str] = None, temperature: Optional[float] = 1.0, top_p: Optional[float] = 0.9, ): """Legacy: Corresponding to the original setup in the NAACL'24 paper.""" azure_kwargs = { "api_key": azure_api_key, "temperature": temperature, "top_p": top_p, "api_base": api_base, "api_version": api_version, } openai_kwargs = { "api_key": openai_api_key, "temperature": temperature, "top_p": top_p, "api_base": None, } if openai_type and openai_type == "openai": self.conv_simulator_lm = LitellmModel( model="gpt-4o-mini-2024-07-18", max_tokens=500, **openai_kwargs ) self.question_asker_lm = LitellmModel( model="gpt-4o-mini-2024-07-18", max_tokens=500, **openai_kwargs ) # 1/12/2024: Update gpt-4 to gpt-4-1106-preview. (Currently keep the original setup when using azure.) self.outline_gen_lm = LitellmModel( model="gpt-4-0125-preview", max_tokens=400, **openai_kwargs ) self.article_gen_lm = LitellmModel( model="gpt-4o-2024-05-13", max_tokens=700, **openai_kwargs ) self.article_polish_lm = LitellmModel( model="gpt-4o-2024-05-13", max_tokens=4000, **openai_kwargs ) elif openai_type and openai_type == "azure": self.conv_simulator_lm = LitellmModel( model="azure/gpt-4o-mini-2024-07-18", max_tokens=500, **openai_kwargs ) self.question_asker_lm = LitellmModel( model="azure/gpt-4o-mini-2024-07-18", max_tokens=500, **azure_kwargs, model_type="chat", ) # use combination of openai and azure-openai as azure-openai does not support gpt-4 in standard deployment self.outline_gen_lm = LitellmModel( model="azure/gpt-4o", max_tokens=400, **azure_kwargs, model_type="chat" ) self.article_gen_lm = LitellmModel( model="azure/gpt-4o-mini-2024-07-18", max_tokens=700, **azure_kwargs, model_type="chat", ) self.article_polish_lm = LitellmModel( model="azure/gpt-4o-mini-2024-07-18", max_tokens=4000, **azure_kwargs, model_type="chat", ) else: logging.warning( "No valid OpenAI API provider is provided. Cannot use default LLM configurations." )
Legacy: Corresponding to the original setup in the NAACL'24 paper.
init_openai_model
python
stanford-oval/storm
knowledge_storm/storm_wiki/engine.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/engine.py
MIT
def post_run(self): """ Post-run operations, including: 1. Dumping the run configuration. 2. Dumping the LLM call history. """ config_log = self.lm_configs.log() FileIOHelper.dump_json( config_log, os.path.join(self.article_output_dir, "run_config.json") ) llm_call_history = self.lm_configs.collect_and_reset_lm_history() with open( os.path.join(self.article_output_dir, "llm_call_history.jsonl"), "w" ) as f: for call in llm_call_history: if "kwargs" in call: call.pop( "kwargs" ) # All kwargs are dumped together to run_config.json. f.write(json.dumps(call) + "\n")
Post-run operations, including: 1. Dumping the run configuration. 2. Dumping the LLM call history.
post_run
python
stanford-oval/storm
knowledge_storm/storm_wiki/engine.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/engine.py
MIT
def run( self, topic: str, ground_truth_url: str = "", do_research: bool = True, do_generate_outline: bool = True, do_generate_article: bool = True, do_polish_article: bool = True, remove_duplicate: bool = False, callback_handler: BaseCallbackHandler = BaseCallbackHandler(), ): """ Run the STORM pipeline. Args: topic: The topic to research. ground_truth_url: A ground truth URL including a curated article about the topic. The URL will be excluded. do_research: If True, research the topic through information-seeking conversation; if False, expect conversation_log.json and raw_search_results.json to exist in the output directory. do_generate_outline: If True, generate an outline for the topic; if False, expect storm_gen_outline.txt to exist in the output directory. do_generate_article: If True, generate a curated article for the topic; if False, expect storm_gen_article.txt to exist in the output directory. do_polish_article: If True, polish the article by adding a summarization section and (optionally) removing duplicated content. remove_duplicate: If True, remove duplicated content. callback_handler: A callback handler to handle the intermediate results. """ assert ( do_research or do_generate_outline or do_generate_article or do_polish_article ), makeStringRed( "No action is specified. Please set at least one of --do-research, --do-generate-outline, --do-generate-article, --do-polish-article" ) self.topic = topic self.article_dir_name = truncate_filename( topic.replace(" ", "_").replace("/", "_") ) self.article_output_dir = os.path.join( self.args.output_dir, self.article_dir_name ) os.makedirs(self.article_output_dir, exist_ok=True) # research module information_table: StormInformationTable = None if do_research: information_table = self.run_knowledge_curation_module( ground_truth_url=ground_truth_url, callback_handler=callback_handler ) # outline generation module outline: StormArticle = None if do_generate_outline: # load information table if it's not initialized if information_table is None: information_table = self._load_information_table_from_local_fs( os.path.join(self.article_output_dir, "conversation_log.json") ) outline = self.run_outline_generation_module( information_table=information_table, callback_handler=callback_handler ) # article generation module draft_article: StormArticle = None if do_generate_article: if information_table is None: information_table = self._load_information_table_from_local_fs( os.path.join(self.article_output_dir, "conversation_log.json") ) if outline is None: outline = self._load_outline_from_local_fs( topic=topic, outline_local_path=os.path.join( self.article_output_dir, "storm_gen_outline.txt" ), ) draft_article = self.run_article_generation_module( outline=outline, information_table=information_table, callback_handler=callback_handler, ) # article polishing module if do_polish_article: if draft_article is None: draft_article_path = os.path.join( self.article_output_dir, "storm_gen_article.txt" ) url_to_info_path = os.path.join( self.article_output_dir, "url_to_info.json" ) draft_article = self._load_draft_article_from_local_fs( topic=topic, draft_article_path=draft_article_path, url_to_info_path=url_to_info_path, ) self.run_article_polishing_module( draft_article=draft_article, remove_duplicate=remove_duplicate )
Run the STORM pipeline. Args: topic: The topic to research. ground_truth_url: A ground truth URL including a curated article about the topic. The URL will be excluded. do_research: If True, research the topic through information-seeking conversation; if False, expect conversation_log.json and raw_search_results.json to exist in the output directory. do_generate_outline: If True, generate an outline for the topic; if False, expect storm_gen_outline.txt to exist in the output directory. do_generate_article: If True, generate a curated article for the topic; if False, expect storm_gen_article.txt to exist in the output directory. do_polish_article: If True, polish the article by adding a summarization section and (optionally) removing duplicated content. remove_duplicate: If True, remove duplicated content. callback_handler: A callback handler to handle the intermediate results.
run
python
stanford-oval/storm
knowledge_storm/storm_wiki/engine.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/engine.py
MIT
def generate_outline( self, topic: str, information_table: StormInformationTable, old_outline: Optional[StormArticle] = None, callback_handler: BaseCallbackHandler = None, return_draft_outline=False, ) -> Union[StormArticle, Tuple[StormArticle, StormArticle]]: """ Generates an outline for an article based on the specified topic and the information gathered during the knowledge curation stage. This method can optionally return both the final article outline and a draft outline if required. Args: topic (str): The topic of the article. information_table (StormInformationTable): The information table containing the collected information. old_outline (Optional[StormArticle]): An optional previous version of the article outline that can be used for reference or comparison. Defaults to None. callback_handler (BaseCallbackHandler): An optional callback handler that can be used to trigger custom callbacks at various stages of the outline generation process, such as when the information organization starts. Defaults to None. return_draft_outline (bool): A flag indicating whether the method should return both the final article outline and a draft version of the outline. If False, only the final article outline is returned. Defaults to False. Returns: Union[StormArticle, Tuple[StormArticle, StormArticle]]: Depending on the value of `return_draft_outline`, this method returns either a single `StormArticle` object containing the final outline or a tuple of two `StormArticle` objects, the first containing the final outline and the second containing the draft outline. """ if callback_handler is not None: callback_handler.on_information_organization_start() concatenated_dialogue_turns = sum( [conv for (_, conv) in information_table.conversations], [] ) result = self.write_outline( topic=topic, dlg_history=concatenated_dialogue_turns, callback_handler=callback_handler, ) article_with_outline_only = StormArticle.from_outline_str( topic=topic, outline_str=result.outline ) article_with_draft_outline_only = StormArticle.from_outline_str( topic=topic, outline_str=result.old_outline ) if not return_draft_outline: return article_with_outline_only return article_with_outline_only, article_with_draft_outline_only
Generates an outline for an article based on the specified topic and the information gathered during the knowledge curation stage. This method can optionally return both the final article outline and a draft outline if required. Args: topic (str): The topic of the article. information_table (StormInformationTable): The information table containing the collected information. old_outline (Optional[StormArticle]): An optional previous version of the article outline that can be used for reference or comparison. Defaults to None. callback_handler (BaseCallbackHandler): An optional callback handler that can be used to trigger custom callbacks at various stages of the outline generation process, such as when the information organization starts. Defaults to None. return_draft_outline (bool): A flag indicating whether the method should return both the final article outline and a draft version of the outline. If False, only the final article outline is returned. Defaults to False. Returns: Union[StormArticle, Tuple[StormArticle, StormArticle]]: Depending on the value of `return_draft_outline`, this method returns either a single `StormArticle` object containing the final outline or a tuple of two `StormArticle` objects, the first containing the final outline and the second containing the draft outline.
generate_outline
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/outline_generation.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/outline_generation.py
MIT
def forward( self, topic: str, persona: str, ground_truth_url: str, callback_handler: BaseCallbackHandler, ): """ topic: The topic to research. persona: The persona of the Wikipedia writer. ground_truth_url: The ground_truth_url will be excluded from search to avoid ground truth leakage in evaluation. """ dlg_history: List[DialogueTurn] = [] for _ in range(self.max_turn): user_utterance = self.wiki_writer( topic=topic, persona=persona, dialogue_turns=dlg_history ).question if user_utterance == "": logging.error("Simulated Wikipedia writer utterance is empty.") break if user_utterance.startswith("Thank you so much for your help!"): break expert_output = self.topic_expert( topic=topic, question=user_utterance, ground_truth_url=ground_truth_url ) dlg_turn = DialogueTurn( agent_utterance=expert_output.answer, user_utterance=user_utterance, search_queries=expert_output.queries, search_results=expert_output.searched_results, ) dlg_history.append(dlg_turn) callback_handler.on_dialogue_turn_end(dlg_turn=dlg_turn) return dspy.Prediction(dlg_history=dlg_history)
topic: The topic to research. persona: The persona of the Wikipedia writer. ground_truth_url: The ground_truth_url will be excluded from search to avoid ground truth leakage in evaluation.
forward
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/knowledge_curation.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/knowledge_curation.py
MIT
def __init__( self, retriever: Retriever, persona_generator: Optional[StormPersonaGenerator], conv_simulator_lm: Union[dspy.dsp.LM, dspy.dsp.HFModel], question_asker_lm: Union[dspy.dsp.LM, dspy.dsp.HFModel], max_search_queries_per_turn: int, search_top_k: int, max_conv_turn: int, max_thread_num: int, ): """ Store args and finish initialization. """ self.retriever = retriever self.persona_generator = persona_generator self.conv_simulator_lm = conv_simulator_lm self.search_top_k = search_top_k self.max_thread_num = max_thread_num self.retriever = retriever self.conv_simulator = ConvSimulator( topic_expert_engine=conv_simulator_lm, question_asker_engine=question_asker_lm, retriever=retriever, max_search_queries_per_turn=max_search_queries_per_turn, search_top_k=search_top_k, max_turn=max_conv_turn, )
Store args and finish initialization.
__init__
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/knowledge_curation.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/knowledge_curation.py
MIT
def _run_conversation( self, conv_simulator, topic, ground_truth_url, considered_personas, callback_handler: BaseCallbackHandler, ) -> List[Tuple[str, List[DialogueTurn]]]: """ Executes multiple conversation simulations concurrently, each with a different persona, and collects their dialog histories. The dialog history of each conversation is cleaned up before being stored. Parameters: conv_simulator (callable): The function to simulate conversations. It must accept four parameters: `topic`, `ground_truth_url`, `persona`, and `callback_handler`, and return an object that has a `dlg_history` attribute. topic (str): The topic of conversation for the simulations. ground_truth_url (str): The URL to the ground truth data related to the conversation topic. considered_personas (list): A list of personas under which the conversation simulations will be conducted. Each persona is passed to `conv_simulator` individually. callback_handler (callable): A callback function that is passed to `conv_simulator`. It should handle any callbacks or events during the simulation. Returns: list of tuples: A list where each tuple contains a persona and its corresponding cleaned dialog history (`dlg_history`) from the conversation simulation. """ conversations = [] def run_conv(persona): return conv_simulator( topic=topic, ground_truth_url=ground_truth_url, persona=persona, callback_handler=callback_handler, ) max_workers = min(self.max_thread_num, len(considered_personas)) with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: future_to_persona = { executor.submit(run_conv, persona): persona for persona in considered_personas } if streamlit_connection: # Ensure the logging context is correct when connecting with Streamlit frontend. for t in executor._threads: add_script_run_ctx(t) for future in as_completed(future_to_persona): persona = future_to_persona[future] conv = future.result() conversations.append( (persona, ArticleTextProcessing.clean_up_citation(conv).dlg_history) ) return conversations
Executes multiple conversation simulations concurrently, each with a different persona, and collects their dialog histories. The dialog history of each conversation is cleaned up before being stored. Parameters: conv_simulator (callable): The function to simulate conversations. It must accept four parameters: `topic`, `ground_truth_url`, `persona`, and `callback_handler`, and return an object that has a `dlg_history` attribute. topic (str): The topic of conversation for the simulations. ground_truth_url (str): The URL to the ground truth data related to the conversation topic. considered_personas (list): A list of personas under which the conversation simulations will be conducted. Each persona is passed to `conv_simulator` individually. callback_handler (callable): A callback function that is passed to `conv_simulator`. It should handle any callbacks or events during the simulation. Returns: list of tuples: A list where each tuple contains a persona and its corresponding cleaned dialog history (`dlg_history`) from the conversation simulation.
_run_conversation
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/knowledge_curation.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/knowledge_curation.py
MIT
def research( self, topic: str, ground_truth_url: str, callback_handler: BaseCallbackHandler, max_perspective: int = 0, disable_perspective: bool = True, return_conversation_log=False, ) -> Union[StormInformationTable, Tuple[StormInformationTable, Dict]]: """ Curate information and knowledge for the given topic Args: topic: topic of interest in natural language. Returns: collected_information: collected information in InformationTable type. """ # identify personas callback_handler.on_identify_perspective_start() considered_personas = [] if disable_perspective: considered_personas = [""] else: considered_personas = self._get_considered_personas( topic=topic, max_num_persona=max_perspective ) callback_handler.on_identify_perspective_end(perspectives=considered_personas) # run conversation callback_handler.on_information_gathering_start() conversations = self._run_conversation( conv_simulator=self.conv_simulator, topic=topic, ground_truth_url=ground_truth_url, considered_personas=considered_personas, callback_handler=callback_handler, ) information_table = StormInformationTable(conversations) callback_handler.on_information_gathering_end() if return_conversation_log: return information_table, StormInformationTable.construct_log_dict( conversations ) return information_table
Curate information and knowledge for the given topic Args: topic: topic of interest in natural language. Returns: collected_information: collected information in InformationTable type.
research
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/knowledge_curation.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/knowledge_curation.py
MIT
def get_wiki_page_title_and_toc(url): """Get the main title and table of contents from an url of a Wikipedia page.""" response = requests.get(url) soup = BeautifulSoup(response.content, "html.parser") # Get the main title from the first h1 tag main_title = soup.find("h1").text.replace("[edit]", "").strip().replace("\xa0", " ") toc = "" levels = [] excluded_sections = { "Contents", "See also", "Notes", "References", "External links", } # Start processing from h2 to exclude the main title from TOC for header in soup.find_all(["h2", "h3", "h4", "h5", "h6"]): level = int( header.name[1] ) # Extract the numeric part of the header tag (e.g., '2' from 'h2') section_title = header.text.replace("[edit]", "").strip().replace("\xa0", " ") if section_title in excluded_sections: continue while levels and level <= levels[-1]: levels.pop() levels.append(level) indentation = " " * (len(levels) - 1) toc += f"{indentation}{section_title}\n" return main_title, toc.strip()
Get the main title and table of contents from an url of a Wikipedia page.
get_wiki_page_title_and_toc
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/persona_generator.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/persona_generator.py
MIT
def generate_persona(self, topic: str, max_num_persona: int = 3) -> List[str]: """ Generates a list of personas based on the provided topic, up to a maximum number specified. This method first creates personas using the underlying `create_writer_with_persona` instance and then prepends a default 'Basic fact writer' persona to the list before returning it. The number of personas returned is limited to `max_num_persona`, excluding the default persona. Args: topic (str): The topic for which personas are to be generated. max_num_persona (int): The maximum number of personas to generate, excluding the default 'Basic fact writer' persona. Returns: List[str]: A list of persona descriptions, including the default 'Basic fact writer' persona and up to `max_num_persona` additional personas generated based on the topic. """ personas = self.create_writer_with_persona(topic=topic) default_persona = "Basic fact writer: Basic fact writer focusing on broadly covering the basic facts about the topic." considered_personas = [default_persona] + personas.personas[:max_num_persona] return considered_personas
Generates a list of personas based on the provided topic, up to a maximum number specified. This method first creates personas using the underlying `create_writer_with_persona` instance and then prepends a default 'Basic fact writer' persona to the list before returning it. The number of personas returned is limited to `max_num_persona`, excluding the default persona. Args: topic (str): The topic for which personas are to be generated. max_num_persona (int): The maximum number of personas to generate, excluding the default 'Basic fact writer' persona. Returns: List[str]: A list of persona descriptions, including the default 'Basic fact writer' persona and up to `max_num_persona` additional personas generated based on the topic.
generate_persona
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/persona_generator.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/persona_generator.py
MIT
def polish_article( self, topic: str, draft_article: StormArticle, remove_duplicate: bool = False ) -> StormArticle: """ Polish article. Args: topic (str): The topic of the article. draft_article (StormArticle): The draft article. remove_duplicate (bool): Whether to use one additional LM call to remove duplicates from the article. """ article_text = draft_article.to_string() polish_result = self.polish_page( topic=topic, draft_page=article_text, polish_whole_page=remove_duplicate ) lead_section = f"# summary\n{polish_result.lead_section}" polished_article = "\n\n".join([lead_section, polish_result.page]) polished_article_dict = ArticleTextProcessing.parse_article_into_dict( polished_article ) polished_article = copy.deepcopy(draft_article) polished_article.insert_or_create_section(article_dict=polished_article_dict) polished_article.post_processing() return polished_article
Polish article. Args: topic (str): The topic of the article. draft_article (StormArticle): The draft article. remove_duplicate (bool): Whether to use one additional LM call to remove duplicates from the article.
polish_article
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/article_polish.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/article_polish.py
MIT
def on_identify_perspective_start(self, **kwargs): """Run when the perspective identification starts.""" pass
Run when the perspective identification starts.
on_identify_perspective_start
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/callback.py
MIT
def on_identify_perspective_end(self, perspectives: list[str], **kwargs): """Run when the perspective identification finishes.""" pass
Run when the perspective identification finishes.
on_identify_perspective_end
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/callback.py
MIT
def on_information_gathering_start(self, **kwargs): """Run when the information gathering starts.""" pass
Run when the information gathering starts.
on_information_gathering_start
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/callback.py
MIT
def on_dialogue_turn_end(self, dlg_turn, **kwargs): """Run when a question asking and answering turn finishes.""" pass
Run when a question asking and answering turn finishes.
on_dialogue_turn_end
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/callback.py
MIT
def on_information_gathering_end(self, **kwargs): """Run when the information gathering finishes.""" pass
Run when the information gathering finishes.
on_information_gathering_end
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/callback.py
MIT
def on_information_organization_start(self, **kwargs): """Run when the information organization starts.""" pass
Run when the information organization starts.
on_information_organization_start
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/callback.py
MIT
def on_direct_outline_generation_end(self, outline: str, **kwargs): """Run when the direct outline generation finishes.""" pass
Run when the direct outline generation finishes.
on_direct_outline_generation_end
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/callback.py
MIT
def on_outline_refinement_end(self, outline: str, **kwargs): """Run when the outline refinement finishes.""" pass
Run when the outline refinement finishes.
on_outline_refinement_end
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/callback.py
MIT
def log(self): """ Returns a json object that contains all information inside `self` """ return OrderedDict( { "agent_utterance": self.agent_utterance, "user_utterance": self.user_utterance, "search_queries": self.search_queries, "search_results": [data.to_dict() for data in self.search_results], } )
Returns a json object that contains all information inside `self`
log
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/storm_dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/storm_dataclass.py
MIT
def find_section( self, node: ArticleSectionNode, name: str ) -> Optional[ArticleSectionNode]: """ Return the node of the section given the section name. Args: node: the node as the root to find. name: the name of node as section name Return: reference of the node or None if section name has no match """ if node.section_name == name: return node for child in node.children: result = self.find_section(child, name) if result: return result return None
Return the node of the section given the section name. Args: node: the node as the root to find. name: the name of node as section name Return: reference of the node or None if section name has no match
find_section
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/storm_dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/storm_dataclass.py
MIT
def _merge_new_info_to_references( self, new_info_list: List[Information], index_to_keep=None ) -> Dict[int, int]: """ Merges new storm information into existing references and updates the citation index mapping. Args: new_info_list (List[Information]): A list of dictionaries representing new storm information. index_to_keep (List[int]): A list of index of the new_info_list to keep. If none, keep all. Returns: Dict[int, int]: A dictionary mapping the index of each storm information piece in the input list to its unified citation index in the references. """ citation_idx_mapping = {} for idx, storm_info in enumerate(new_info_list): if index_to_keep is not None and idx not in index_to_keep: continue url = storm_info.url if url not in self.reference["url_to_unified_index"]: self.reference["url_to_unified_index"][url] = ( len(self.reference["url_to_unified_index"]) + 1 ) # The citation index starts from 1. self.reference["url_to_info"][url] = storm_info else: existing_snippets = self.reference["url_to_info"][url].snippets existing_snippets.extend(storm_info.snippets) self.reference["url_to_info"][url].snippets = list( set(existing_snippets) ) citation_idx_mapping[idx + 1] = self.reference["url_to_unified_index"][ url ] # The citation index starts from 1. return citation_idx_mapping
Merges new storm information into existing references and updates the citation index mapping. Args: new_info_list (List[Information]): A list of dictionaries representing new storm information. index_to_keep (List[int]): A list of index of the new_info_list to keep. If none, keep all. Returns: Dict[int, int]: A dictionary mapping the index of each storm information piece in the input list to its unified citation index in the references.
_merge_new_info_to_references
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/storm_dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/storm_dataclass.py
MIT
def update_section( self, current_section_content: str, current_section_info_list: List[Information], parent_section_name: Optional[str] = None, ) -> Optional[ArticleSectionNode]: """ Add new section to the article. Args: current_section_name: new section heading name in string format. parent_section_name: under which parent section to add the new one. Default to root. current_section_content: optional section content. Returns: the ArticleSectionNode for current section if successfully created / updated. Otherwise none. """ if current_section_info_list is not None: references = set( [int(x) for x in re.findall(r"\[(\d+)\]", current_section_content)] ) # for any reference number greater than max number of references, delete the reference if len(references) > 0: max_ref_num = max(references) if max_ref_num > len(current_section_info_list): for i in range(len(current_section_info_list), max_ref_num + 1): current_section_content = current_section_content.replace( f"[{i}]", "" ) if i in references: references.remove(i) # for any reference that is not used, trim it from current_section_info_list index_to_keep = [i - 1 for i in references] citation_mapping = self._merge_new_info_to_references( current_section_info_list, index_to_keep ) current_section_content = ArticleTextProcessing.update_citation_index( current_section_content, citation_mapping ) if parent_section_name is None: parent_section_name = self.root.section_name article_dict = ArticleTextProcessing.parse_article_into_dict( current_section_content ) self.insert_or_create_section( article_dict=article_dict, parent_section_name=parent_section_name, trim_children=False, )
Add new section to the article. Args: current_section_name: new section heading name in string format. parent_section_name: under which parent section to add the new one. Default to root. current_section_content: optional section content. Returns: the ArticleSectionNode for current section if successfully created / updated. Otherwise none.
update_section
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/storm_dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/storm_dataclass.py
MIT
def get_outline_as_list( self, root_section_name: Optional[str] = None, add_hashtags: bool = False, include_root: bool = True, ) -> List[str]: """ Get outline of the article as a list. Args: section_name: get all section names in pre-order travel ordering in the subtree of section_name. For example: #root ##section1 ###section1.1 ###section1.2 ##section2 article.get_outline_as_list("section1") returns [section1, section1.1, section1.2, section2] Returns: list of section and subsection names. """ if root_section_name is None: section_node = self.root else: section_node = self.find_section(self.root, root_section_name) include_root = include_root or section_node != self.root.section_name if section_node is None: return [] result = [] def preorder_traverse(node, level): prefix = ( "#" * level if add_hashtags else "" ) # Adjust level if excluding root result.append( f"{prefix} {node.section_name}".strip() if add_hashtags else node.section_name ) for child in node.children: preorder_traverse(child, level + 1) # Adjust the initial level based on whether root is included and hashtags are added if include_root: preorder_traverse(section_node, level=1) else: for child in section_node.children: preorder_traverse(child, level=1) return result
Get outline of the article as a list. Args: section_name: get all section names in pre-order travel ordering in the subtree of section_name. For example: #root ##section1 ###section1.1 ###section1.2 ##section2 article.get_outline_as_list("section1") returns [section1, section1.1, section1.2, section2] Returns: list of section and subsection names.
get_outline_as_list
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/storm_dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/storm_dataclass.py
MIT
def to_string(self) -> str: """ Get outline of the article as a list. Returns: list of section and subsection names. """ result = [] def preorder_traverse(node, level): prefix = "#" * level result.append(f"{prefix} {node.section_name}".strip()) result.append(node.content) for child in node.children: preorder_traverse(child, level + 1) # Adjust the initial level based on whether root is included and hashtags are added for child in self.root.children: preorder_traverse(child, level=1) result = [i.strip() for i in result if i is not None and i.strip()] return "\n\n".join(result)
Get outline of the article as a list. Returns: list of section and subsection names.
to_string
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/storm_dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/storm_dataclass.py
MIT
def get_first_level_section_names(self) -> List[str]: """ Get first level section names """ return [i.section_name for i in self.root.children]
Get first level section names
get_first_level_section_names
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/storm_dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/storm_dataclass.py
MIT
def from_outline_file(cls, topic: str, file_path: str): """ Create StormArticle class instance from outline file. """ outline_str = FileIOHelper.load_str(file_path) return StormArticle.from_outline_str(topic=topic, outline_str=outline_str)
Create StormArticle class instance from outline file.
from_outline_file
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/storm_dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/storm_dataclass.py
MIT
def from_outline_str(cls, topic: str, outline_str: str): """ Create StormArticle class instance from outline only string. """ lines = [] try: lines = outline_str.split("\n") lines = [line.strip() for line in lines if line.strip()] except: pass instance = cls(topic) if lines: a = lines[0].startswith("#") and lines[0].replace("#", "").strip().lower() b = topic.lower().replace("_", " ") adjust_level = lines[0].startswith("#") and lines[0].replace( "#", "" ).strip().lower() == topic.lower().replace("_", " ") if adjust_level: lines = lines[1:] node_stack = [(0, instance.root)] # Stack to keep track of (level, node) for line in lines: level = line.count("#") - adjust_level section_name = line.replace("#", "").strip() if section_name == topic: continue new_node = ArticleSectionNode(section_name) while node_stack and level <= node_stack[-1][0]: node_stack.pop() node_stack[-1][1].add_child(new_node) node_stack.append((level, new_node)) return instance
Create StormArticle class instance from outline only string.
from_outline_str
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/storm_dataclass.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/storm_dataclass.py
MIT
def generate_article( self, topic: str, information_table: StormInformationTable, article_with_outline: StormArticle, callback_handler: BaseCallbackHandler = None, ) -> StormArticle: """ Generate article for the topic based on the information table and article outline. Args: topic (str): The topic of the article. information_table (StormInformationTable): The information table containing the collected information. article_with_outline (StormArticle): The article with specified outline. callback_handler (BaseCallbackHandler): An optional callback handler that can be used to trigger custom callbacks at various stages of the article generation process. Defaults to None. """ information_table.prepare_table_for_retrieval() if article_with_outline is None: article_with_outline = StormArticle(topic_name=topic) sections_to_write = article_with_outline.get_first_level_section_names() section_output_dict_collection = [] if len(sections_to_write) == 0: logging.error( f"No outline for {topic}. Will directly search with the topic." ) section_output_dict = self.generate_section( topic=topic, section_name=topic, information_table=information_table, section_outline="", section_query=[topic], ) section_output_dict_collection = [section_output_dict] else: with concurrent.futures.ThreadPoolExecutor( max_workers=self.max_thread_num ) as executor: future_to_sec_title = {} for section_title in sections_to_write: # We don't want to write a separate introduction section. if section_title.lower().strip() == "introduction": continue # We don't want to write a separate conclusion section. if section_title.lower().strip().startswith( "conclusion" ) or section_title.lower().strip().startswith("summary"): continue section_query = article_with_outline.get_outline_as_list( root_section_name=section_title, add_hashtags=False ) queries_with_hashtags = article_with_outline.get_outline_as_list( root_section_name=section_title, add_hashtags=True ) section_outline = "\n".join(queries_with_hashtags) future_to_sec_title[ executor.submit( self.generate_section, topic, section_title, information_table, section_outline, section_query, ) ] = section_title for future in as_completed(future_to_sec_title): section_output_dict_collection.append(future.result()) article = copy.deepcopy(article_with_outline) for section_output_dict in section_output_dict_collection: article.update_section( parent_section_name=topic, current_section_content=section_output_dict["section_content"], current_section_info_list=section_output_dict["collected_info"], ) article.post_processing() return article
Generate article for the topic based on the information table and article outline. Args: topic (str): The topic of the article. information_table (StormInformationTable): The information table containing the collected information. article_with_outline (StormArticle): The article with specified outline. callback_handler (BaseCallbackHandler): An optional callback handler that can be used to trigger custom callbacks at various stages of the article generation process. Defaults to None.
generate_article
python
stanford-oval/storm
knowledge_storm/storm_wiki/modules/article_generation.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/storm_wiki/modules/article_generation.py
MIT
def to_dict(self): """ Converts the CollaborativeStormLMConfigs instance to a dictionary representation. Returns: dict: The dictionary representation of the CollaborativeStormLMConfigs. """ config_dict = {} for attr_name in self.__dict__: config_dict[attr_name] = getattr(self, attr_name).kwargs return config_dict
Converts the CollaborativeStormLMConfigs instance to a dictionary representation. Returns: dict: The dictionary representation of the CollaborativeStormLMConfigs.
to_dict
python
stanford-oval/storm
knowledge_storm/collaborative_storm/engine.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/engine.py
MIT
def to_dict(self): """ Converts the RunnerArgument instance to a dictionary representation. Returns: dict: The dictionary representation of the RunnerArgument. """ return asdict(self)
Converts the RunnerArgument instance to a dictionary representation. Returns: dict: The dictionary representation of the RunnerArgument.
to_dict
python
stanford-oval/storm
knowledge_storm/collaborative_storm/engine.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/engine.py
MIT
def from_dict(cls, data): """ Constructs a RunnerArgument instance from a dictionary representation. Args: data (dict): The dictionary representation of the RunnerArgument. Returns: RunnerArgument: The constructed RunnerArgument instance. """ return cls(**data)
Constructs a RunnerArgument instance from a dictionary representation. Args: data (dict): The dictionary representation of the RunnerArgument. Returns: RunnerArgument: The constructed RunnerArgument instance.
from_dict
python
stanford-oval/storm
knowledge_storm/collaborative_storm/engine.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/engine.py
MIT
def warm_start(self): """ Warm start co-storm system to conduct background information search in order to build shared conceptual space with user. This stage is a mini-STORM, spawning multiple LLM agent with different perspective and perform multi-round conversation. The knowledge base (i.e. mind map) will be initialize using the collected information. It will also generate a first draft of report and use it to produce an engaging and concise conversation presented to the user to catch up with system's knowledge about the topic. """ with self.logging_wrapper.log_pipeline_stage( pipeline_stage=f"warm start stage" ): if not self.runner_argument.rag_only_baseline_mode: warm_start_module = WarmStartModule( lm_config=self.lm_config, runner_argument=self.runner_argument, logging_wrapper=self.logging_wrapper, rm=self.rm, callback_handler=self.callback_handler, ) ( warmstart_conv, warmstart_revised_conv, warmstart_experts, ) = warm_start_module.initiate_warm_start( topic=self.runner_argument.topic, knowledge_base=self.knowledge_base, ) self.discourse_manager.experts = ( self.discourse_manager._parse_expert_names_to_agent( warmstart_experts ) ) self.discourse_manager.next_turn_moderator_override = True self.conversation_history = ( warmstart_revised_conv if warmstart_revised_conv else warmstart_conv ) self.warmstart_conv_archive = warmstart_conv self.knowledge_base.reogranize() else: if self.knowledge_base is None: self.knowledge_base = KnowledgeBase( topic=self.runner_argument.topic, knowledge_base_lm=self.lm_config.knowledge_base_lm, node_expansion_trigger_count=self.runner_argument.node_expansion_trigger_count, encoder=self.encoder, ) if self.conversation_history is None: self.conversation_history = [] conv_turn = ( self.discourse_manager.pure_rag_agent.generate_topic_background() ) self.conversation_history.append(conv_turn) self.knowledge_base.update_from_conv_turn( conv_turn=conv_turn, allow_create_new_node=True, insert_under_root=self.runner_argument.rag_only_baseline_mode, )
Warm start co-storm system to conduct background information search in order to build shared conceptual space with user. This stage is a mini-STORM, spawning multiple LLM agent with different perspective and perform multi-round conversation. The knowledge base (i.e. mind map) will be initialize using the collected information. It will also generate a first draft of report and use it to produce an engaging and concise conversation presented to the user to catch up with system's knowledge about the topic.
warm_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/engine.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/engine.py
MIT
def generate_report(self) -> str: """ Generate report leveraging organized collected information in the knowledge base (i.e. mind map). The article generation follows the paradigm in STORM paper, where it considers mind map nodes as section names, and generate the report section by section. Returns: str: A string representing the report, with "#" "##" indicating hierarchical sections and [1][2] indicating references. """ with self.logging_wrapper.log_pipeline_stage( f"report generation after conv turn: {len(self.conversation_history)}" ): with self.logging_wrapper.log_event( "report generation stage: generate report" ): return self.knowledge_base.to_report()
Generate report leveraging organized collected information in the knowledge base (i.e. mind map). The article generation follows the paradigm in STORM paper, where it considers mind map nodes as section names, and generate the report section by section. Returns: str: A string representing the report, with "#" "##" indicating hierarchical sections and [1][2] indicating references.
generate_report
python
stanford-oval/storm
knowledge_storm/collaborative_storm/engine.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/engine.py
MIT
def step( self, user_utterance: str = "", simulate_user: bool = False, simulate_user_intent: str = "", ) -> ConversationTurn: """ Yields a single turn in the conversation flow. This method take a user input when user choose to inject an utterance or generates the next system utterance based on the current conversation history and defined discourse policies. It handles updating the conversation history, managing expert lists, and interacting with the knowledge base. Additionally, it logs each stage of the conversation for monitoring and debugging purposes. Args: user_utterance (str, optional): The input provided by the user. If provided, this utterance is added directly to the conversation history and returns with no further action. simulate_user (bool, optional): This is designed for automatic experiments using a LLM agent to simulate user actions. Flag indicating whether to simulate user behavior. When set to `True`, the system will generate user intents based on predefined simulation logic. Defaults to `False`. simulate_user_intent (str, optional): This is designed for automatic experiments using a LLM agent to simulate user actions. Specifies the intent to simulate for the user. This is used when `simulate_user` is `True` to guide the simulated user's responses, Returns: ConversationTurn: An object representing the latest turn in the conversation. Workflow: 1. User Utterance Handling - If `user_utterance` is provided, it is appended to the `conversation_history` 2. System Utterance Generation - If no `user_utterance` is provided, the method proceeds to generate the next system utterance. - Determines the next turn policy by consulting the `discourse_manager` with the current conversation history. - Generates a new utterance using the agent defined in the turn policy, leveraging the `knowledge_base` and `conversation_history`. - If the turn policy indicates that the experts list should be updated, it updates the expert list based on the latest utterances. 4. Knowledge Base Update - Inserts the new turn into the `knowledge_base`, optionally allowing the creation of new nodes or inserting under the root based on the `rag_only_baseline_mode` flag. - If the turn policy specifies, it reorganizes the `knowledge_base` to maintain optimal structure and relevance. """ last_conv_turn = self.conversation_history[-1] cur_turn_name = f"conv turn: {len(self.conversation_history) + 1}" with self.logging_wrapper.log_pipeline_stage( pipeline_stage=f"{cur_turn_name} stage" ): conv_turn = None if user_utterance: self.discourse_manager.next_turn_moderator_override = False conv_turn = ConversationTurn( role="Guest", raw_utterance=user_utterance, utterance_type="Original Question", ) self.conversation_history.append(conv_turn) else: with self.logging_wrapper.log_event( f"{cur_turn_name}: get turn policy" ): if self.callback_handler is not None: self.callback_handler.on_turn_policy_planning_start() turn_policy = self.discourse_manager.get_next_turn_policy( conversation_history=self.conversation_history, simulate_user=simulate_user, simulate_user_intent=simulate_user_intent, dry_run=False, ) with self.logging_wrapper.log_event( f"{cur_turn_name}: generate utterance" ): conv_turn = turn_policy.agent.generate_utterance( knowledge_base=self.knowledge_base, conversation_history=self.conversation_history, ) if turn_policy.should_update_experts_list: with self.logging_wrapper.log_event( f"{cur_turn_name}: update experts list" ): self.discourse_manager._update_expert_list_from_utterance( focus=last_conv_turn.raw_utterance, background_info=conv_turn.raw_utterance, ) if conv_turn is not None: self.conversation_history.append(conv_turn) with self.logging_wrapper.log_event( f"{cur_turn_name}: insert into knowledge base" ): if self.callback_handler is not None: self.callback_handler.on_mindmap_insert_start() self.knowledge_base.update_from_conv_turn( conv_turn=conv_turn, allow_create_new_node=True, insert_under_root=self.runner_argument.rag_only_baseline_mode, ) if self.callback_handler is not None: self.callback_handler.on_mindmap_insert_end() if turn_policy.should_reorganize_knowledge_base: with self.logging_wrapper.log_event( f"{cur_turn_name}: reorganize knowledge base" ): if self.callback_handler is not None: self.callback_handler.on_mindmap_reorg_start() self.knowledge_base.reogranize() return conv_turn
Yields a single turn in the conversation flow. This method take a user input when user choose to inject an utterance or generates the next system utterance based on the current conversation history and defined discourse policies. It handles updating the conversation history, managing expert lists, and interacting with the knowledge base. Additionally, it logs each stage of the conversation for monitoring and debugging purposes. Args: user_utterance (str, optional): The input provided by the user. If provided, this utterance is added directly to the conversation history and returns with no further action. simulate_user (bool, optional): This is designed for automatic experiments using a LLM agent to simulate user actions. Flag indicating whether to simulate user behavior. When set to `True`, the system will generate user intents based on predefined simulation logic. Defaults to `False`. simulate_user_intent (str, optional): This is designed for automatic experiments using a LLM agent to simulate user actions. Specifies the intent to simulate for the user. This is used when `simulate_user` is `True` to guide the simulated user's responses, Returns: ConversationTurn: An object representing the latest turn in the conversation. Workflow: 1. User Utterance Handling - If `user_utterance` is provided, it is appended to the `conversation_history` 2. System Utterance Generation - If no `user_utterance` is provided, the method proceeds to generate the next system utterance. - Determines the next turn policy by consulting the `discourse_manager` with the current conversation history. - Generates a new utterance using the agent defined in the turn policy, leveraging the `knowledge_base` and `conversation_history`. - If the turn policy indicates that the experts list should be updated, it updates the expert list based on the latest utterances. 4. Knowledge Base Update - Inserts the new turn into the `knowledge_base`, optionally allowing the creation of new nodes or inserting under the root based on the `rag_only_baseline_mode` flag. - If the turn policy specifies, it reorganizes the `knowledge_base` to maintain optimal structure and relevance.
step
python
stanford-oval/storm
knowledge_storm/collaborative_storm/engine.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/engine.py
MIT
def initiate_warm_start(self, topic: str, knowledge_base: KnowledgeBase): """ Initiates a warm start process for the given topic by generating a warm start conversation and inserting the resulting information into a knowledge base. Args: topic (str): The topic for which to initiate the warm start process. Returns: Tuple[List[ConversationTurn], List[str], KnowledgeBase]: - A list of ConversationTurn instances representing the conversation history. - A list of strings representing the experts involved in the conversation. - A KnowledgeBase instance containing the organized information. """ warm_start_conversation_history: List[ConversationTurn] = [] warm_start_experts = None # get warm start conversations with self.logging_wrapper.log_event("warm start: perspective guided QA"): if self.callback_handler is not None: self.callback_handler.on_warmstart_update( message="Start getting familiar with the topic by chatting with multiple LLM experts (Step 1 / 4)" ) warm_start_result = self.warmstart_conv(topic=topic) warm_start_conversation_history = warm_start_result.conversation_history warm_start_experts = warm_start_result.experts # get warm start conv outline with self.logging_wrapper.log_event("warm start: outline generation"): if self.callback_handler is not None: self.callback_handler.on_warmstart_update( "Organizing collected information (Step 2 / 4)" ) warm_start_outline_output = self.warmstart_outline_gen_module( topic=topic, conv=warm_start_conversation_history ) # init knowledge base with self.logging_wrapper.log_event("warm start: insert into knowledge base"): if self.callback_handler is not None: self.callback_handler.on_warmstart_update( "Inserting collected information into knowledge base (Step 3 / 4)" ) knowledge_base.insert_from_outline_string( outline_string=warm_start_outline_output.outline ) # insert information to knowledge base for turn in warm_start_conversation_history: knowledge_base.update_from_conv_turn( conv_turn=turn, allow_create_new_node=False ) # knowledge base to report if self.callback_handler is not None: self.callback_handler.on_warmstart_update( "Synthesizing background information discussion utterances (Step 4 / 4)" ) knowledge_base.to_report() # generate engaging conversations engaging_conversations = self.report_to_conversation(knowledge_base) return ( warm_start_conversation_history, engaging_conversations, warm_start_experts, )
Initiates a warm start process for the given topic by generating a warm start conversation and inserting the resulting information into a knowledge base. Args: topic (str): The topic for which to initiate the warm start process. Returns: Tuple[List[ConversationTurn], List[str], KnowledgeBase]: - A list of ConversationTurn instances representing the conversation history. - A list of strings representing the experts involved in the conversation. - A KnowledgeBase instance containing the organized information.
initiate_warm_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/warmstart_hierarchical_chat.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/warmstart_hierarchical_chat.py
MIT
def extract_storm_info_snippet(info: Information, snippet_index: int) -> Information: """ Constructs a new Information instance with only the specified snippet index. Args: storm_info (Information): The original Information instance. snippet_index (int): The index of the snippet to retain. Returns: Information: A new Information instance with only the specified snippet. """ if snippet_index < 0 or snippet_index >= len(info.snippets): raise ValueError("Snippet index out of range") new_snippets = [info.snippets[snippet_index]] new_storm_info = Information( info.url, info.description, new_snippets, info.title, info.meta ) return new_storm_info
Constructs a new Information instance with only the specified snippet index. Args: storm_info (Information): The original Information instance. snippet_index (int): The index of the snippet to retain. Returns: Information: A new Information instance with only the specified snippet.
extract_storm_info_snippet
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
MIT
def format_search_results( searched_results: List[Information], info_max_num_words: int = 1000, mode: str = "brief", ) -> Tuple[str, Dict[int, Information]]: """ Constructs a string from a list of search results with a specified word limit and returns a mapping of indices to Information. Args: searched_results (List[Information]): List of Information objects to process. info_max_num_words (int, optional): Maximum number of words allowed in the output string. Defaults to 1000. mode (str, optional): Mode of summarization. 'brief' takes only the first snippet of each Information. 'extensive' adds snippets iteratively until the word limit is reached. Defaults to 'brief'. Returns: Tuple[str, Dict[int, Information]]: - Formatted string with search results, constrained by the word limit. - Dictionary mapping indices to the corresponding Information objects. """ total_length = 0 extracted_snippet_queue = [] max_snippets = ( max(len(info.snippets) for info in searched_results) if searched_results else 0 ) max_snippets = 1 if mode == "brief" else max_snippets abort = False included_snippets = set() for i in range(max_snippets): for info in searched_results: if i < len(info.snippets) and not abort: cur_snippet = info.snippets[i] cur_snippet_len = len(info.snippets[i].split()) if total_length + cur_snippet_len > info_max_num_words: abort = True break if cur_snippet not in included_snippets: included_snippets.add(cur_snippet) info = extract_storm_info_snippet(info, snippet_index=i) extracted_snippet_queue.append(info) total_length += cur_snippet_len output = [] index_mapping = {} for idx, info in enumerate(extracted_snippet_queue): output.append(f"[{idx + 1}]: {info.snippets[0]}") index_mapping[idx + 1] = info assert -1 not in index_mapping return "\n".join(output), index_mapping
Constructs a string from a list of search results with a specified word limit and returns a mapping of indices to Information. Args: searched_results (List[Information]): List of Information objects to process. info_max_num_words (int, optional): Maximum number of words allowed in the output string. Defaults to 1000. mode (str, optional): Mode of summarization. 'brief' takes only the first snippet of each Information. 'extensive' adds snippets iteratively until the word limit is reached. Defaults to 'brief'. Returns: Tuple[str, Dict[int, Information]]: - Formatted string with search results, constrained by the word limit. - Dictionary mapping indices to the corresponding Information objects.
format_search_results
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
MIT
def extract_cited_storm_info( response: str, index_to_storm_info: Dict[int, Information] ) -> Dict[int, Information]: """ Extracts a sub-dictionary of Information instances that are cited in the response. Args: response (str): The response string containing inline citations like [1], [2], etc. index_to_storm_info (Dict[int, Information]): A dictionary mapping indices to Information instances. Returns: Dict[int, Information]: A sub-dictionary with only the indices that appear in the response. """ cited_indices = set(map(int, re.findall(r"\[(\d+)\]", response))) cited_storm_info = { index: info for index, info in index_to_storm_info.items() if index in cited_indices } return cited_storm_info
Extracts a sub-dictionary of Information instances that are cited in the response. Args: response (str): The response string containing inline citations like [1], [2], etc. index_to_storm_info (Dict[int, Information]): A dictionary mapping indices to Information instances. Returns: Dict[int, Information]: A sub-dictionary with only the indices that appear in the response.
extract_cited_storm_info
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
MIT
def trim_output_after_hint(response: str, hint: str) -> str: """ Trims the output string to only keep the substring after the given hint (not including the hint). Args: response (str): The original output string. hint (str): The hint string after which the substring should be kept. Returns: str: The trimmed output string, or the original string if the hint is not found. """ if hint in response: start_index = response.find(hint) + len(hint) return response[start_index:].strip() return response.strip("\n")
Trims the output string to only keep the substring after the given hint (not including the hint). Args: response (str): The original output string. hint (str): The hint string after which the substring should be kept. Returns: str: The trimmed output string, or the original string if the hint is not found.
trim_output_after_hint
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
MIT