code
stringlengths 141
79.4k
| apis
sequencelengths 1
23
| extract_api
stringlengths 126
73.2k
|
---|---|---|
import ast
import re
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.transform import BaseTransformOutputParser
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import BaseTool
from typing_extensions import TypedDict
THOUGHT_PATTERN = r"Thought: ([^\n]*)"
ACTION_PATTERN = r"\n*(\d+)\. (\w+)\((.*)\)(\s*#\w+\n)?"
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
END_OF_PLAN = "<END_OF_PLAN>"
### Helper functions
def _ast_parse(arg: str) -> Any:
try:
return ast.literal_eval(arg)
except: # noqa
return arg
def _parse_llm_compiler_action_args(args: str, tool: Union[str, BaseTool]) -> list[Any]:
"""Parse arguments from a string."""
if args == "":
return ()
if isinstance(tool, str):
return ()
extracted_args = {}
tool_key = None
prev_idx = None
for key in tool.args.keys():
# Split if present
if f"{key}=" in args:
idx = args.index(f"{key}=")
if prev_idx is not None:
extracted_args[tool_key] = _ast_parse(
args[prev_idx:idx].strip().rstrip(",")
)
args = args.split(f"{key}=", 1)[1]
tool_key = key
prev_idx = 0
if prev_idx is not None:
extracted_args[tool_key] = _ast_parse(
args[prev_idx:].strip().rstrip(",").rstrip(")")
)
return extracted_args
def default_dependency_rule(idx, args: str):
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def _get_dependencies_from_graph(
idx: int, tool_name: str, args: Dict[str, Any]
) -> dict[str, list[str]]:
"""Get dependencies from a graph."""
if tool_name == "join":
return list(range(1, idx))
return [i for i in range(1, idx) if default_dependency_rule(i, str(args))]
class Task(TypedDict):
idx: int
tool: BaseTool
args: list
dependencies: Dict[str, list]
thought: Optional[str]
def instantiate_task(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: Union[str, Any],
thought: Optional[str] = None,
) -> Task:
if tool_name == "join":
tool = "join"
else:
try:
tool = tools[[tool.name for tool in tools].index(tool_name)]
except ValueError as e:
raise OutputParserException(f"Tool {tool_name} not found.") from e
tool_args = _parse_llm_compiler_action_args(args, tool)
dependencies = _get_dependencies_from_graph(idx, tool_name, tool_args)
return Task(
idx=idx,
tool=tool,
args=tool_args,
dependencies=dependencies,
thought=thought,
)
class LLMCompilerPlanParser(BaseTransformOutputParser[dict], extra="allow"):
"""Planning output parser."""
tools: List[BaseTool]
def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[Task]:
texts = []
# TODO: Cleanup tuple state tracking here.
thought = None
for chunk in input:
# Assume input is str. TODO: support vision/other formats
text = chunk if isinstance(chunk, str) else str(chunk.content)
for task, thought in self.ingest_token(text, texts, thought):
yield task
# Final possible task
if texts:
task, _ = self._parse_task("".join(texts), thought)
if task:
yield task
def parse(self, text: str) -> List[Task]:
return list(self._transform([text]))
def stream(
self,
input: str | BaseMessage,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Task]:
yield from self.transform([input], config, **kwargs)
def ingest_token(
self, token: str, buffer: List[str], thought: Optional[str]
) -> Iterator[Tuple[Optional[Task], str]]:
buffer.append(token)
if "\n" in token:
buffer_ = "".join(buffer).split("\n")
suffix = buffer_[-1]
for line in buffer_[:-1]:
task, thought = self._parse_task(line, thought)
if task:
yield task, thought
buffer.clear()
buffer.append(suffix)
def _parse_task(self, line: str, thought: Optional[str] = None):
task = None
if match := re.match(THOUGHT_PATTERN, line):
# Optionally, action can be preceded by a thought
thought = match.group(1)
elif match := re.match(ACTION_PATTERN, line):
# if action is parsed, return the task, and clear the buffer
idx, tool_name, args, _ = match.groups()
idx = int(idx)
task = instantiate_task(
tools=self.tools,
idx=idx,
tool_name=tool_name,
args=args,
thought=thought,
)
thought = None
# Else it is just dropped
return task, thought
| [
"langchain_core.exceptions.OutputParserException"
] | [((1688, 1716), 're.findall', 're.findall', (['ID_PATTERN', 'args'], {}), '(ID_PATTERN, args)\n', (1698, 1716), False, 'import re\n'), ((715, 736), 'ast.literal_eval', 'ast.literal_eval', (['arg'], {}), '(arg)\n', (731, 736), False, 'import ast\n'), ((4606, 4637), 're.match', 're.match', (['THOUGHT_PATTERN', 'line'], {}), '(THOUGHT_PATTERN, line)\n', (4614, 4637), False, 'import re\n'), ((2579, 2632), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['f"""Tool {tool_name} not found."""'], {}), "(f'Tool {tool_name} not found.')\n", (2600, 2632), False, 'from langchain_core.exceptions import OutputParserException\n'), ((4760, 4790), 're.match', 're.match', (['ACTION_PATTERN', 'line'], {}), '(ACTION_PATTERN, line)\n', (4768, 4790), False, 'import re\n')] |
import ast
import re
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.transform import BaseTransformOutputParser
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import BaseTool
from typing_extensions import TypedDict
THOUGHT_PATTERN = r"Thought: ([^\n]*)"
ACTION_PATTERN = r"\n*(\d+)\. (\w+)\((.*)\)(\s*#\w+\n)?"
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
END_OF_PLAN = "<END_OF_PLAN>"
### Helper functions
def _ast_parse(arg: str) -> Any:
try:
return ast.literal_eval(arg)
except: # noqa
return arg
def _parse_llm_compiler_action_args(args: str, tool: Union[str, BaseTool]) -> list[Any]:
"""Parse arguments from a string."""
if args == "":
return ()
if isinstance(tool, str):
return ()
extracted_args = {}
tool_key = None
prev_idx = None
for key in tool.args.keys():
# Split if present
if f"{key}=" in args:
idx = args.index(f"{key}=")
if prev_idx is not None:
extracted_args[tool_key] = _ast_parse(
args[prev_idx:idx].strip().rstrip(",")
)
args = args.split(f"{key}=", 1)[1]
tool_key = key
prev_idx = 0
if prev_idx is not None:
extracted_args[tool_key] = _ast_parse(
args[prev_idx:].strip().rstrip(",").rstrip(")")
)
return extracted_args
def default_dependency_rule(idx, args: str):
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def _get_dependencies_from_graph(
idx: int, tool_name: str, args: Dict[str, Any]
) -> dict[str, list[str]]:
"""Get dependencies from a graph."""
if tool_name == "join":
return list(range(1, idx))
return [i for i in range(1, idx) if default_dependency_rule(i, str(args))]
class Task(TypedDict):
idx: int
tool: BaseTool
args: list
dependencies: Dict[str, list]
thought: Optional[str]
def instantiate_task(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: Union[str, Any],
thought: Optional[str] = None,
) -> Task:
if tool_name == "join":
tool = "join"
else:
try:
tool = tools[[tool.name for tool in tools].index(tool_name)]
except ValueError as e:
raise OutputParserException(f"Tool {tool_name} not found.") from e
tool_args = _parse_llm_compiler_action_args(args, tool)
dependencies = _get_dependencies_from_graph(idx, tool_name, tool_args)
return Task(
idx=idx,
tool=tool,
args=tool_args,
dependencies=dependencies,
thought=thought,
)
class LLMCompilerPlanParser(BaseTransformOutputParser[dict], extra="allow"):
"""Planning output parser."""
tools: List[BaseTool]
def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[Task]:
texts = []
# TODO: Cleanup tuple state tracking here.
thought = None
for chunk in input:
# Assume input is str. TODO: support vision/other formats
text = chunk if isinstance(chunk, str) else str(chunk.content)
for task, thought in self.ingest_token(text, texts, thought):
yield task
# Final possible task
if texts:
task, _ = self._parse_task("".join(texts), thought)
if task:
yield task
def parse(self, text: str) -> List[Task]:
return list(self._transform([text]))
def stream(
self,
input: str | BaseMessage,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Task]:
yield from self.transform([input], config, **kwargs)
def ingest_token(
self, token: str, buffer: List[str], thought: Optional[str]
) -> Iterator[Tuple[Optional[Task], str]]:
buffer.append(token)
if "\n" in token:
buffer_ = "".join(buffer).split("\n")
suffix = buffer_[-1]
for line in buffer_[:-1]:
task, thought = self._parse_task(line, thought)
if task:
yield task, thought
buffer.clear()
buffer.append(suffix)
def _parse_task(self, line: str, thought: Optional[str] = None):
task = None
if match := re.match(THOUGHT_PATTERN, line):
# Optionally, action can be preceded by a thought
thought = match.group(1)
elif match := re.match(ACTION_PATTERN, line):
# if action is parsed, return the task, and clear the buffer
idx, tool_name, args, _ = match.groups()
idx = int(idx)
task = instantiate_task(
tools=self.tools,
idx=idx,
tool_name=tool_name,
args=args,
thought=thought,
)
thought = None
# Else it is just dropped
return task, thought
| [
"langchain_core.exceptions.OutputParserException"
] | [((1688, 1716), 're.findall', 're.findall', (['ID_PATTERN', 'args'], {}), '(ID_PATTERN, args)\n', (1698, 1716), False, 'import re\n'), ((715, 736), 'ast.literal_eval', 'ast.literal_eval', (['arg'], {}), '(arg)\n', (731, 736), False, 'import ast\n'), ((4606, 4637), 're.match', 're.match', (['THOUGHT_PATTERN', 'line'], {}), '(THOUGHT_PATTERN, line)\n', (4614, 4637), False, 'import re\n'), ((2579, 2632), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['f"""Tool {tool_name} not found."""'], {}), "(f'Tool {tool_name} not found.')\n", (2600, 2632), False, 'from langchain_core.exceptions import OutputParserException\n'), ((4760, 4790), 're.match', 're.match', (['ACTION_PATTERN', 'line'], {}), '(ACTION_PATTERN, line)\n', (4768, 4790), False, 'import re\n')] |
import requests
from typing import Any, Dict, Optional
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains import APIChain
from langchain.prompts import BasePromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from .requests_l402 import RequestsL402Wrapper
from .requests_l402 import ResponseTextWrapper
from lightning import LightningNode
class L402APIChain(APIChain):
requests_wrapper: Any
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
lightning_node = None,
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
requests_L402 = RequestsL402Wrapper(lightning_node, requests)
lang_chain_request_L402 = ResponseTextWrapper(
requests_wrapper=requests_L402,
)
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=lang_chain_request_L402,
api_docs=api_docs,
**kwargs,
)
| [
"langchain.chains.llm.LLMChain"
] | [((1139, 1179), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (1147, 1179), False, 'from langchain.chains.llm import LLMChain\n'), ((1207, 1252), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (1215, 1252), False, 'from langchain.chains.llm import LLMChain\n')] |
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"langchain.chains.sql_database.base.SQLDatabaseChain",
"langchain.prompts.loading.load_prompt_from_config",
"langchain.chains.qa_with_sources.base.QAWithSourcesChain",
"langchain.chains.pal.base.PALChain",
"langchain.chains.combine_documents.refine.RefineDocumentsChain",
"langchain.chains.llm.LLMChain",
"langchain.chains.hyde.base.HypotheticalDocumentEmbedder",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.llm_checker.base.LLMCheckerChain",
"langchain.llms.loading.load_llm_from_config",
"langchain.chains.vector_db_qa.base.VectorDBQA",
"langchain.chains.llm_bash.base.LLMBashChain",
"langchain.utilities.loading.try_load_from_hub",
"langchain.chains.llm_requests.LLMRequestsChain",
"langchain.chains.api.base.APIChain",
"langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain",
"langchain.chains.llm_math.base.LLMMathChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain"
] | [((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')] |
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"langchain.chains.sql_database.base.SQLDatabaseChain",
"langchain.prompts.loading.load_prompt_from_config",
"langchain.chains.qa_with_sources.base.QAWithSourcesChain",
"langchain.chains.pal.base.PALChain",
"langchain.chains.combine_documents.refine.RefineDocumentsChain",
"langchain.chains.llm.LLMChain",
"langchain.chains.hyde.base.HypotheticalDocumentEmbedder",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.llm_checker.base.LLMCheckerChain",
"langchain.llms.loading.load_llm_from_config",
"langchain.chains.vector_db_qa.base.VectorDBQA",
"langchain.chains.llm_bash.base.LLMBashChain",
"langchain.utilities.loading.try_load_from_hub",
"langchain.chains.llm_requests.LLMRequestsChain",
"langchain.chains.api.base.APIChain",
"langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain",
"langchain.chains.llm_math.base.LLMMathChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain"
] | [((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')] |
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"langchain.chains.sql_database.base.SQLDatabaseChain",
"langchain.prompts.loading.load_prompt_from_config",
"langchain.chains.qa_with_sources.base.QAWithSourcesChain",
"langchain.chains.pal.base.PALChain",
"langchain.chains.combine_documents.refine.RefineDocumentsChain",
"langchain.chains.llm.LLMChain",
"langchain.chains.hyde.base.HypotheticalDocumentEmbedder",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.llm_checker.base.LLMCheckerChain",
"langchain.llms.loading.load_llm_from_config",
"langchain.chains.vector_db_qa.base.VectorDBQA",
"langchain.chains.llm_bash.base.LLMBashChain",
"langchain.utilities.loading.try_load_from_hub",
"langchain.chains.llm_requests.LLMRequestsChain",
"langchain.chains.api.base.APIChain",
"langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain",
"langchain.chains.llm_math.base.LLMMathChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain"
] | [((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')] |
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"langchain.chains.sql_database.base.SQLDatabaseChain",
"langchain.prompts.loading.load_prompt_from_config",
"langchain.chains.qa_with_sources.base.QAWithSourcesChain",
"langchain.chains.pal.base.PALChain",
"langchain.chains.combine_documents.refine.RefineDocumentsChain",
"langchain.chains.llm.LLMChain",
"langchain.chains.hyde.base.HypotheticalDocumentEmbedder",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.llm_checker.base.LLMCheckerChain",
"langchain.llms.loading.load_llm_from_config",
"langchain.chains.vector_db_qa.base.VectorDBQA",
"langchain.chains.llm_bash.base.LLMBashChain",
"langchain.utilities.loading.try_load_from_hub",
"langchain.chains.llm_requests.LLMRequestsChain",
"langchain.chains.api.base.APIChain",
"langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain",
"langchain.chains.llm_math.base.LLMMathChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain"
] | [((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')] |
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')] |
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')] |
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')] |
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')] |
import os
from langchain.llms.bedrock import Bedrock
from langchain import PromptTemplate
def get_llm():
model_kwargs = {
"maxTokenCount": 1024,
"stopSequences": [],
"temperature": 0,
"topP": 0.9
}
llm = Bedrock(
# credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default)
region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default)
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary)
model_id="amazon.titan-tg1-large", #use the Anthropic Claude model
model_kwargs=model_kwargs) #configure the properties for Claude
return llm
def get_prompt(user_input, template):
prompt_template = PromptTemplate.from_template(template) #this will automatically identify the input variables for the template
prompt = prompt_template.format(user_input=user_input)
return prompt
def get_text_response(user_input, template): #text-to-text client function
llm = get_llm()
prompt = get_prompt(user_input, template)
return llm.predict(prompt) #return a response to the prompt
| [
"langchain.PromptTemplate.from_template"
] | [((844, 882), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (872, 882), False, 'from langchain import PromptTemplate\n'), ((437, 470), 'os.environ.get', 'os.environ.get', (['"""BWB_REGION_NAME"""'], {}), "('BWB_REGION_NAME')\n", (451, 470), False, 'import os\n'), ((536, 570), 'os.environ.get', 'os.environ.get', (['"""BWB_ENDPOINT_URL"""'], {}), "('BWB_ENDPOINT_URL')\n", (550, 570), False, 'import os\n')] |
from langchain import PromptTemplate, LLMChain
from langchain.document_loaders import TextLoader
from langchain.embeddings import LlamaCppEmbeddings
from langchain.llms import GPT4All
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.callbacks.base import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores.faiss import FAISS
# SCRIPT INFO:
#
# This script allows you to create a vectorstore from a file and query it with a question (hard coded).
#
# It shows how you could send questions to a GPT4All custom knowledge base and receive answers.
#
# If you want a chat style interface using a similar custom knowledge base, you can use the custom_chatbot.py script provided.
# Setup
gpt4all_path = './models/gpt4all-converted.bin'
llama_path = './models/ggml-model-q4_0.bin'
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
loader = TextLoader('./docs/shortened_sotu.txt')
embeddings = LlamaCppEmbeddings(model_path=llama_path)
llm = GPT4All(model=gpt4all_path, callback_manager=callback_manager, verbose=True)
# Split text
def split_chunks(sources):
chunks = []
splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=32)
for chunk in splitter.split_documents(sources):
chunks.append(chunk)
return chunks
def create_index(chunks):
texts = [doc.page_content for doc in chunks]
metadatas = [doc.metadata for doc in chunks]
search_index = FAISS.from_texts(texts, embeddings, metadatas=metadatas)
return search_index
def similarity_search(query, index):
matched_docs = index.similarity_search(query, k=4)
sources = []
for doc in matched_docs:
sources.append(
{
"page_content": doc.page_content,
"metadata": doc.metadata,
}
)
return matched_docs, sources
# Create Index
# docs = loader.load()
# chunks = split_chunks(docs)
# index = create_index(chunks)
# Save Index (use this to save the index for later use)
# Comment the line below after running once successfully (IMPORTANT)
# index.save_local("state_of_the_union_index")
# Load Index (use this to load the index from a file, eg on your second time running things and beyond)
# Uncomment the line below after running once successfully (IMPORTANT)
index = FAISS.load_local("./full_sotu_index", embeddings)
# Set your query here manually
question = "Summarize the comments about NATO and its purpose."
matched_docs, sources = similarity_search(question, index)
template = """
Please use the following context to answer questions.
Context: {context}
---
Question: {question}
Answer: Let's think step by step."""
context = "\n".join([doc.page_content for doc in matched_docs])
prompt = PromptTemplate(template=template, input_variables=["context", "question"]).partial(context=context)
llm_chain = LLMChain(prompt=prompt, llm=llm)
print(llm_chain.run(question)) | [
"langchain.llms.GPT4All",
"langchain.LLMChain",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.document_loaders.TextLoader",
"langchain.vectorstores.faiss.FAISS.load_local",
"langchain.vectorstores.faiss.FAISS.from_texts",
"langchain.embeddings.LlamaCppEmbeddings",
"langchain.PromptTemplate"
] | [((968, 1007), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""./docs/shortened_sotu.txt"""'], {}), "('./docs/shortened_sotu.txt')\n", (978, 1007), False, 'from langchain.document_loaders import TextLoader\n'), ((1021, 1062), 'langchain.embeddings.LlamaCppEmbeddings', 'LlamaCppEmbeddings', ([], {'model_path': 'llama_path'}), '(model_path=llama_path)\n', (1039, 1062), False, 'from langchain.embeddings import LlamaCppEmbeddings\n'), ((1069, 1145), 'langchain.llms.GPT4All', 'GPT4All', ([], {'model': 'gpt4all_path', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), '(model=gpt4all_path, callback_manager=callback_manager, verbose=True)\n', (1076, 1145), False, 'from langchain.llms import GPT4All\n'), ((2399, 2448), 'langchain.vectorstores.faiss.FAISS.load_local', 'FAISS.load_local', (['"""./full_sotu_index"""', 'embeddings'], {}), "('./full_sotu_index', embeddings)\n", (2415, 2448), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((2941, 2973), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (2949, 2973), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1219, 1283), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(32)'}), '(chunk_size=256, chunk_overlap=32)\n', (1249, 1283), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1529, 1585), 'langchain.vectorstores.faiss.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {'metadatas': 'metadatas'}), '(texts, embeddings, metadatas=metadatas)\n', (1545, 1585), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((924, 956), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (954, 956), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((2829, 2903), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (2843, 2903), False, 'from langchain import PromptTemplate, LLMChain\n')] |
from langchain import PromptTemplate, LLMChain
from langchain.document_loaders import TextLoader
from langchain.embeddings import LlamaCppEmbeddings
from langchain.llms import GPT4All
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.callbacks.base import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores.faiss import FAISS
# SCRIPT INFO:
#
# This script allows you to create a vectorstore from a file and query it with a question (hard coded).
#
# It shows how you could send questions to a GPT4All custom knowledge base and receive answers.
#
# If you want a chat style interface using a similar custom knowledge base, you can use the custom_chatbot.py script provided.
# Setup
gpt4all_path = './models/gpt4all-converted.bin'
llama_path = './models/ggml-model-q4_0.bin'
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
loader = TextLoader('./docs/shortened_sotu.txt')
embeddings = LlamaCppEmbeddings(model_path=llama_path)
llm = GPT4All(model=gpt4all_path, callback_manager=callback_manager, verbose=True)
# Split text
def split_chunks(sources):
chunks = []
splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=32)
for chunk in splitter.split_documents(sources):
chunks.append(chunk)
return chunks
def create_index(chunks):
texts = [doc.page_content for doc in chunks]
metadatas = [doc.metadata for doc in chunks]
search_index = FAISS.from_texts(texts, embeddings, metadatas=metadatas)
return search_index
def similarity_search(query, index):
matched_docs = index.similarity_search(query, k=4)
sources = []
for doc in matched_docs:
sources.append(
{
"page_content": doc.page_content,
"metadata": doc.metadata,
}
)
return matched_docs, sources
# Create Index
# docs = loader.load()
# chunks = split_chunks(docs)
# index = create_index(chunks)
# Save Index (use this to save the index for later use)
# Comment the line below after running once successfully (IMPORTANT)
# index.save_local("state_of_the_union_index")
# Load Index (use this to load the index from a file, eg on your second time running things and beyond)
# Uncomment the line below after running once successfully (IMPORTANT)
index = FAISS.load_local("./full_sotu_index", embeddings)
# Set your query here manually
question = "Summarize the comments about NATO and its purpose."
matched_docs, sources = similarity_search(question, index)
template = """
Please use the following context to answer questions.
Context: {context}
---
Question: {question}
Answer: Let's think step by step."""
context = "\n".join([doc.page_content for doc in matched_docs])
prompt = PromptTemplate(template=template, input_variables=["context", "question"]).partial(context=context)
llm_chain = LLMChain(prompt=prompt, llm=llm)
print(llm_chain.run(question)) | [
"langchain.llms.GPT4All",
"langchain.LLMChain",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.document_loaders.TextLoader",
"langchain.vectorstores.faiss.FAISS.load_local",
"langchain.vectorstores.faiss.FAISS.from_texts",
"langchain.embeddings.LlamaCppEmbeddings",
"langchain.PromptTemplate"
] | [((968, 1007), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""./docs/shortened_sotu.txt"""'], {}), "('./docs/shortened_sotu.txt')\n", (978, 1007), False, 'from langchain.document_loaders import TextLoader\n'), ((1021, 1062), 'langchain.embeddings.LlamaCppEmbeddings', 'LlamaCppEmbeddings', ([], {'model_path': 'llama_path'}), '(model_path=llama_path)\n', (1039, 1062), False, 'from langchain.embeddings import LlamaCppEmbeddings\n'), ((1069, 1145), 'langchain.llms.GPT4All', 'GPT4All', ([], {'model': 'gpt4all_path', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), '(model=gpt4all_path, callback_manager=callback_manager, verbose=True)\n', (1076, 1145), False, 'from langchain.llms import GPT4All\n'), ((2399, 2448), 'langchain.vectorstores.faiss.FAISS.load_local', 'FAISS.load_local', (['"""./full_sotu_index"""', 'embeddings'], {}), "('./full_sotu_index', embeddings)\n", (2415, 2448), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((2941, 2973), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (2949, 2973), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1219, 1283), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(32)'}), '(chunk_size=256, chunk_overlap=32)\n', (1249, 1283), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1529, 1585), 'langchain.vectorstores.faiss.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {'metadatas': 'metadatas'}), '(texts, embeddings, metadatas=metadatas)\n', (1545, 1585), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((924, 956), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (954, 956), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((2829, 2903), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (2843, 2903), False, 'from langchain import PromptTemplate, LLMChain\n')] |
from langchain.chains.router import MultiPromptChain
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import os
# A template for working with LangChain multi prompt chain.
# It's a great way to let the large language model choose which prompts suits the question.
# Load env files
load_dotenv()
openai_api_key = os.environ.get('openai_api_key')
# Create the templates
marketing_template = """
You are a skilled marketing professional.
You have a deep understanding of market analysis, consumer behavior, branding, and digital marketing strategies.
You can provide insightful recommendations and creative solutions to address various marketing-related questions.
Here is a marketing-related question:
{input}"""
business_template = """
You are an experienced business expert.
You possess knowledge in areas such as business strategy, entrepreneurship, market research, and financial analysis.
You can provide practical insights and strategic advice to address various business-related questions.
Here is a business-related question:
{input}"""
# Create prompt info
prompt_infos = [
{
"name": "marketing",
"description": "Good for answering marketing questions",
"prompt_template": marketing_template
},
{
"name": "business",
"description": "Good for answering business-related questions",
"prompt_template": business_template
}
]
# Create the chain
llm = ChatOpenAI(openai_api_key=openai_api_key, model_name="gpt-3.5-turbo", temperature=0.3)
chain = MultiPromptChain.from_prompts(llm=llm, prompt_infos=prompt_infos, verbose=True)
# Example usage
question = "What is the best way to finance a startup?"
response = chain.run(question) | [
"langchain.chains.router.MultiPromptChain.from_prompts",
"langchain.chat_models.ChatOpenAI"
] | [((310, 323), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (321, 323), False, 'from dotenv import load_dotenv\n'), ((341, 373), 'os.environ.get', 'os.environ.get', (['"""openai_api_key"""'], {}), "('openai_api_key')\n", (355, 373), False, 'import os\n'), ((1461, 1551), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'openai_api_key', 'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.3)'}), "(openai_api_key=openai_api_key, model_name='gpt-3.5-turbo',\n temperature=0.3)\n", (1471, 1551), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1556, 1635), 'langchain.chains.router.MultiPromptChain.from_prompts', 'MultiPromptChain.from_prompts', ([], {'llm': 'llm', 'prompt_infos': 'prompt_infos', 'verbose': '(True)'}), '(llm=llm, prompt_infos=prompt_infos, verbose=True)\n', (1585, 1635), False, 'from langchain.chains.router import MultiPromptChain\n')] |
import os
import requests
from langchain.tools import tool
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from sec_api import QueryApi
from unstructured.partition.html import partition_html
class SECTools():
@tool("Search 10-Q form")
def search_10q(data):
"""
Useful to search information from the latest 10-Q form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested and what
question you have from it.
For example, `AAPL|what was last quarter's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-Q\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
@tool("Search 10-K form")
def search_10k(data):
"""
Useful to search information from the latest 10-K form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested, what
question you have from it.
For example, `AAPL|what was last year's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-K\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
def __embedding_search(url, ask):
text = SECTools.__download_form_html(url)
elements = partition_html(text=text)
content = "\n".join([str(el) for el in elements])
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 150,
length_function = len,
is_separator_regex = False,
)
docs = text_splitter.create_documents([content])
retriever = FAISS.from_documents(
docs, OpenAIEmbeddings()
).as_retriever()
answers = retriever.get_relevant_documents(ask, top_k=4)
answers = "\n\n".join([a.page_content for a in answers])
return answers
def __download_form_html(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
'Cache-Control': 'max-age=0',
'Dnt': '1',
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"macOS"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
response = requests.get(url, headers=headers)
return response.text
| [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.tools.tool"
] | [((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
import os
import requests
from langchain.tools import tool
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from sec_api import QueryApi
from unstructured.partition.html import partition_html
class SECTools():
@tool("Search 10-Q form")
def search_10q(data):
"""
Useful to search information from the latest 10-Q form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested and what
question you have from it.
For example, `AAPL|what was last quarter's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-Q\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
@tool("Search 10-K form")
def search_10k(data):
"""
Useful to search information from the latest 10-K form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested, what
question you have from it.
For example, `AAPL|what was last year's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-K\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
def __embedding_search(url, ask):
text = SECTools.__download_form_html(url)
elements = partition_html(text=text)
content = "\n".join([str(el) for el in elements])
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 150,
length_function = len,
is_separator_regex = False,
)
docs = text_splitter.create_documents([content])
retriever = FAISS.from_documents(
docs, OpenAIEmbeddings()
).as_retriever()
answers = retriever.get_relevant_documents(ask, top_k=4)
answers = "\n\n".join([a.page_content for a in answers])
return answers
def __download_form_html(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
'Cache-Control': 'max-age=0',
'Dnt': '1',
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"macOS"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
response = requests.get(url, headers=headers)
return response.text
| [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.tools.tool"
] | [((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
import os
import requests
from langchain.tools import tool
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from sec_api import QueryApi
from unstructured.partition.html import partition_html
class SECTools():
@tool("Search 10-Q form")
def search_10q(data):
"""
Useful to search information from the latest 10-Q form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested and what
question you have from it.
For example, `AAPL|what was last quarter's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-Q\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
@tool("Search 10-K form")
def search_10k(data):
"""
Useful to search information from the latest 10-K form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested, what
question you have from it.
For example, `AAPL|what was last year's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-K\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
def __embedding_search(url, ask):
text = SECTools.__download_form_html(url)
elements = partition_html(text=text)
content = "\n".join([str(el) for el in elements])
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 150,
length_function = len,
is_separator_regex = False,
)
docs = text_splitter.create_documents([content])
retriever = FAISS.from_documents(
docs, OpenAIEmbeddings()
).as_retriever()
answers = retriever.get_relevant_documents(ask, top_k=4)
answers = "\n\n".join([a.page_content for a in answers])
return answers
def __download_form_html(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
'Cache-Control': 'max-age=0',
'Dnt': '1',
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"macOS"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
response = requests.get(url, headers=headers)
return response.text
| [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.tools.tool"
] | [((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
import os
import requests
from langchain.tools import tool
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from sec_api import QueryApi
from unstructured.partition.html import partition_html
class SECTools():
@tool("Search 10-Q form")
def search_10q(data):
"""
Useful to search information from the latest 10-Q form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested and what
question you have from it.
For example, `AAPL|what was last quarter's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-Q\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
@tool("Search 10-K form")
def search_10k(data):
"""
Useful to search information from the latest 10-K form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested, what
question you have from it.
For example, `AAPL|what was last year's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-K\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
def __embedding_search(url, ask):
text = SECTools.__download_form_html(url)
elements = partition_html(text=text)
content = "\n".join([str(el) for el in elements])
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 150,
length_function = len,
is_separator_regex = False,
)
docs = text_splitter.create_documents([content])
retriever = FAISS.from_documents(
docs, OpenAIEmbeddings()
).as_retriever()
answers = retriever.get_relevant_documents(ask, top_k=4)
answers = "\n\n".join([a.page_content for a in answers])
return answers
def __download_form_html(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
'Cache-Control': 'max-age=0',
'Dnt': '1',
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"macOS"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
response = requests.get(url, headers=headers)
return response.text
| [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.tools.tool"
] | [((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Otherwise, use this simpler format:
Question: ${{Question without hard calculation}}
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
print(37593 * 67)
```
```output
2518731
```
Answer: 2518731
Question: {question}
"""
PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
| [
"langchain.prompts.prompt.PromptTemplate"
] | [((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')] |
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Otherwise, use this simpler format:
Question: ${{Question without hard calculation}}
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
print(37593 * 67)
```
```output
2518731
```
Answer: 2518731
Question: {question}
"""
PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
| [
"langchain.prompts.prompt.PromptTemplate"
] | [((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')] |
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Otherwise, use this simpler format:
Question: ${{Question without hard calculation}}
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
print(37593 * 67)
```
```output
2518731
```
Answer: 2518731
Question: {question}
"""
PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
| [
"langchain.prompts.prompt.PromptTemplate"
] | [((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')] |
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Otherwise, use this simpler format:
Question: ${{Question without hard calculation}}
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
print(37593 * 67)
```
```output
2518731
```
Answer: 2518731
Question: {question}
"""
PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
| [
"langchain.prompts.prompt.PromptTemplate"
] | [((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')] |
import streamlit as st
from langchain.prompts import PromptTemplate
chat_template = PromptTemplate(
input_variables=['transcript','summary','chat_history','user_message', 'sentiment_report'],
template='''
You are an AI chatbot intended to discuss about the user's audio transcription.
\nTRANSCRIPT: "{transcript}"
\nTRANSCIRPT SUMMARY: "{summary}"
\nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"
\nCHAT HISTORY: {chat_history}
\nUSER MESSAGE: "{user_message}"
\nAI RESPONSE HERE:
'''
)
sentiment_prompt = PromptTemplate(
input_variables=['transcript','summary'],
template='''
Return a single word sentiment of either ['Positive','Negative' or 'Neutral'] from this transcript and summary.
After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.
\nTRANSCRIPT: {transcript}
\nTRANSCRIPT SUMMARY: {summary}
\nSENTIMENT LABEL HERE ('Positive','Negative', or 'Neutral') <comma-seperated> REPORT HERE:
'''
)
fact_check_prompt = '''
Fact-check this transcript for factual or logical inacurracies or inconsistencies
\nWrite a report on the factuality / logic of the transcirpt
\nTRANSCRIPT: {}
\nTRANSCRIPT SUMMARY: {}
\nAI FACT CHECK RESPONSE HERE:
''' | [
"langchain.prompts.PromptTemplate"
] | [((88, 562), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary', 'chat_history', 'user_message', 'sentiment_report']", 'template': '"""\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\', \'chat_history\',\n \'user_message\', \'sentiment_report\'], template=\n """\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """\n )\n', (102, 562), False, 'from langchain.prompts import PromptTemplate\n'), ((595, 1095), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary']", 'template': '"""\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\'], template=\n """\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """\n )\n', (609, 1095), False, 'from langchain.prompts import PromptTemplate\n')] |
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import os
from langchain.chains import SimpleSequentialChain
# Create a .env file in the root of your project and add your OpenAI API key to it
# Load env files
load_dotenv()
openai_api_key = os.environ.get('openai_api_key')
# This is an LLMChain to generate company names given a company description.
llm = ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo")
# Create templates
template_name = """You are a company name generator. Based on a company description, it is your job to create a company name.
Company description: {company_description}
Company name:"""
prompt_template_name = PromptTemplate(input_variables=["company_description"], template=template_name)
# This is an LLMChain to generate company slogans given a company name and company description.
template_slogan = """You are a company slogan generator. Based on a company name, it is your job to create a company slogan.
Company name: {company_name}
Company slogan:"""
prompt_template_slogan = PromptTemplate(input_variables=["company_name"], template=template_slogan)
# Create chains
name_chain = LLMChain(llm=llm, prompt=prompt_template_name)
slogan_chain = LLMChain(llm=llm, prompt=prompt_template_slogan)
# This is the overall chain where we run these two chains in sequence.
overall_chain = SimpleSequentialChain(chains=[name_chain, slogan_chain], verbose=True)
slogan = overall_chain.run("We are a company that sells shoes.")
| [
"langchain.chains.LLMChain",
"langchain.chains.SimpleSequentialChain",
"langchain.prompts.PromptTemplate",
"langchain.chat_models.ChatOpenAI"
] | [((321, 334), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (332, 334), False, 'from dotenv import load_dotenv\n'), ((352, 384), 'os.environ.get', 'os.environ.get', (['"""openai_api_key"""'], {}), "('openai_api_key')\n", (366, 384), False, 'import os\n'), ((469, 524), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0.7, model_name='gpt-3.5-turbo')\n", (479, 524), False, 'from langchain.chat_models import ChatOpenAI\n'), ((757, 836), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_description']", 'template': 'template_name'}), "(input_variables=['company_description'], template=template_name)\n", (771, 836), False, 'from langchain.prompts import PromptTemplate\n'), ((1137, 1211), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_name']", 'template': 'template_slogan'}), "(input_variables=['company_name'], template=template_slogan)\n", (1151, 1211), False, 'from langchain.prompts import PromptTemplate\n'), ((1242, 1288), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_name'}), '(llm=llm, prompt=prompt_template_name)\n', (1250, 1288), False, 'from langchain.chains import LLMChain\n'), ((1304, 1352), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_slogan'}), '(llm=llm, prompt=prompt_template_slogan)\n', (1312, 1352), False, 'from langchain.chains import LLMChain\n'), ((1441, 1511), 'langchain.chains.SimpleSequentialChain', 'SimpleSequentialChain', ([], {'chains': '[name_chain, slogan_chain]', 'verbose': '(True)'}), '(chains=[name_chain, slogan_chain], verbose=True)\n', (1462, 1511), False, 'from langchain.chains import SimpleSequentialChain\n')] |
import os
import streamlit as st
from PyPDF2 import PdfReader, PdfWriter
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
def ChatPDF(text):
# st.write(text)
#split into chunks
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function=len
)
chunks = text_splitter.split_text(text)
# st.write(chunks)
# creating embeddings
OPENAI_API_KEY = st.text_input("OPENAI API KEY", type = "password")
if OPENAI_API_KEY:
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
# st.write("Embedding Created")
# st.write(embeddings)
knowledge_base = FAISS.from_texts(chunks, embeddings)
st.write("Knowledge Base created ")
#show user input
def ask_question(i=0):
user_question = st.text_input("Ask a question about your PDF?",key = i)
if user_question:
docs = knowledge_base.similarity_search(user_question)
# st.write(docs)
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=user_question)
print(cb)
st.write(response)
ask_question(i+1)
ask_question()
def main():
st.set_page_config(page_title="Ask ur PDF",
page_icon="📄")
hide_st_style = """
<style>
#mainMenue {visibility: hidden;}
footer {visibility: hidden;}
#header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
# st.write(st.set_page_config)
st.header("Ask your PDF 🤔💭")
#uploading file
pdf = st.file_uploader("Upload your PDF ", type="pdf")
# extract the text
if pdf is not None:
option = st.selectbox("What you want to do with PDF📜", [
"Meta Data📂",
"Extract Raw Text📄",
"Extract Links🔗",
"Extract Images🖼️",
"Make PDF password protected🔐",
"PDF Annotation📝",
"ChatPDF💬"
])
pdf_reader = PdfReader(pdf)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
if option == "Meta Data📂":
st.write(pdf_reader.metadata)
elif option == "Make PDF password protected🔐":
pswd = st.text_input("Enter yourpass word", type="password")
if pswd:
with st.spinner("Encrypting..."):
pdf_writer = PdfWriter()
for page_num in range(len(pdf_reader.pages)):
pdf_writer.add_page(pdf_reader.pages[page_num])
pdf_writer.encrypt(pswd)
with open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "wb") as f:
pdf_writer.write(f)
st.success("Encryption Successful!")
st.download_button(
label="Download Encrypted PDF",
data=open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "rb").read(),
file_name=f"{pdf.name.split('.')[0]}_encrypted.pdf",
mime="application/octet-stream",
)
try:
os.remove(f"{pdf.name.split('.')[0]}_encrypted.pdf")
except: pass
elif option == "Extract Raw Text📄":
st.write(text)
elif option == "Extract Links🔗":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
subtype = annot.get_object()["/Subtype"]
if subtype == "/Link":
try:
st.write(annot.get_object()["/A"]["/URI"])
except: pass
elif option == "Extract Images🖼️":
for page in pdf_reader.pages:
try:
for img in page.images:
st.write(img.name)
st.image(img.data)
except: pass
elif option == "PDF Annotation📝":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
obj = annot.get_object()
st.write(obj)
st.write("***********")
annotation = {"subtype": obj["/Subtype"], "location": obj["/Rect"]}
st.write(annotation)
elif option == "ChatPDF💬":
ChatPDF(text)
if __name__ == "__main__":
main()
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.callbacks.get_openai_callback",
"langchain.vectorstores.FAISS.from_texts",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((481, 579), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (502, 579), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((745, 793), 'streamlit.text_input', 'st.text_input', (['"""OPENAI API KEY"""'], {'type': '"""password"""'}), "('OPENAI API KEY', type='password')\n", (758, 793), True, 'import streamlit as st\n'), ((1783, 1841), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Ask ur PDF"""', 'page_icon': '"""📄"""'}), "(page_title='Ask ur PDF', page_icon='📄')\n", (1801, 1841), True, 'import streamlit as st\n'), ((2081, 2131), 'streamlit.markdown', 'st.markdown', (['hide_st_style'], {'unsafe_allow_html': '(True)'}), '(hide_st_style, unsafe_allow_html=True)\n', (2092, 2131), True, 'import streamlit as st\n'), ((2175, 2203), 'streamlit.header', 'st.header', (['"""Ask your PDF 🤔💭"""'], {}), "('Ask your PDF 🤔💭')\n", (2184, 2203), True, 'import streamlit as st\n'), ((2242, 2290), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your PDF """'], {'type': '"""pdf"""'}), "('Upload your PDF ', type='pdf')\n", (2258, 2290), True, 'import streamlit as st\n'), ((842, 889), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (858, 889), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((989, 1025), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['chunks', 'embeddings'], {}), '(chunks, embeddings)\n', (1005, 1025), False, 'from langchain.vectorstores import FAISS\n'), ((1035, 1070), 'streamlit.write', 'st.write', (['"""Knowledge Base created """'], {}), "('Knowledge Base created ')\n", (1043, 1070), True, 'import streamlit as st\n'), ((2360, 2551), 'streamlit.selectbox', 'st.selectbox', (['"""What you want to do with PDF📜"""', "['Meta Data📂', 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬']"], {}), "('What you want to do with PDF📜', ['Meta Data📂',\n 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬'])\n", (2372, 2551), True, 'import streamlit as st\n'), ((2672, 2686), 'PyPDF2.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (2681, 2686), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((1160, 1214), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF?"""'], {'key': 'i'}), "('Ask a question about your PDF?', key=i)\n", (1173, 1214), True, 'import streamlit as st\n'), ((2835, 2864), 'streamlit.write', 'st.write', (['pdf_reader.metadata'], {}), '(pdf_reader.metadata)\n', (2843, 2864), True, 'import streamlit as st\n'), ((1378, 1415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (1384, 1415), False, 'from langchain.llms import OpenAI\n'), ((1441, 1479), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (1454, 1479), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1667, 1685), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (1675, 1685), True, 'import streamlit as st\n'), ((2941, 2994), 'streamlit.text_input', 'st.text_input', (['"""Enter yourpass word"""'], {'type': '"""password"""'}), "('Enter yourpass word', type='password')\n", (2954, 2994), True, 'import streamlit as st\n'), ((1502, 1523), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1521, 1523), False, 'from langchain.callbacks import get_openai_callback\n'), ((4062, 4076), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (4070, 4076), True, 'import streamlit as st\n'), ((3039, 3066), 'streamlit.spinner', 'st.spinner', (['"""Encrypting..."""'], {}), "('Encrypting...')\n", (3049, 3066), True, 'import streamlit as st\n'), ((3102, 3113), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (3111, 3113), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((3480, 3516), 'streamlit.success', 'st.success', (['"""Encryption Successful!"""'], {}), "('Encryption Successful!')\n", (3490, 3516), True, 'import streamlit as st\n'), ((4697, 4715), 'streamlit.write', 'st.write', (['img.name'], {}), '(img.name)\n', (4705, 4715), True, 'import streamlit as st\n'), ((4741, 4759), 'streamlit.image', 'st.image', (['img.data'], {}), '(img.data)\n', (4749, 4759), True, 'import streamlit as st\n'), ((5041, 5054), 'streamlit.write', 'st.write', (['obj'], {}), '(obj)\n', (5049, 5054), True, 'import streamlit as st\n'), ((5080, 5103), 'streamlit.write', 'st.write', (['"""***********"""'], {}), "('***********')\n", (5088, 5103), True, 'import streamlit as st\n'), ((5222, 5242), 'streamlit.write', 'st.write', (['annotation'], {}), '(annotation)\n', (5230, 5242), True, 'import streamlit as st\n')] |
import os
import streamlit as st
from PyPDF2 import PdfReader, PdfWriter
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
def ChatPDF(text):
# st.write(text)
#split into chunks
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function=len
)
chunks = text_splitter.split_text(text)
# st.write(chunks)
# creating embeddings
OPENAI_API_KEY = st.text_input("OPENAI API KEY", type = "password")
if OPENAI_API_KEY:
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
# st.write("Embedding Created")
# st.write(embeddings)
knowledge_base = FAISS.from_texts(chunks, embeddings)
st.write("Knowledge Base created ")
#show user input
def ask_question(i=0):
user_question = st.text_input("Ask a question about your PDF?",key = i)
if user_question:
docs = knowledge_base.similarity_search(user_question)
# st.write(docs)
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=user_question)
print(cb)
st.write(response)
ask_question(i+1)
ask_question()
def main():
st.set_page_config(page_title="Ask ur PDF",
page_icon="📄")
hide_st_style = """
<style>
#mainMenue {visibility: hidden;}
footer {visibility: hidden;}
#header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
# st.write(st.set_page_config)
st.header("Ask your PDF 🤔💭")
#uploading file
pdf = st.file_uploader("Upload your PDF ", type="pdf")
# extract the text
if pdf is not None:
option = st.selectbox("What you want to do with PDF📜", [
"Meta Data📂",
"Extract Raw Text📄",
"Extract Links🔗",
"Extract Images🖼️",
"Make PDF password protected🔐",
"PDF Annotation📝",
"ChatPDF💬"
])
pdf_reader = PdfReader(pdf)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
if option == "Meta Data📂":
st.write(pdf_reader.metadata)
elif option == "Make PDF password protected🔐":
pswd = st.text_input("Enter yourpass word", type="password")
if pswd:
with st.spinner("Encrypting..."):
pdf_writer = PdfWriter()
for page_num in range(len(pdf_reader.pages)):
pdf_writer.add_page(pdf_reader.pages[page_num])
pdf_writer.encrypt(pswd)
with open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "wb") as f:
pdf_writer.write(f)
st.success("Encryption Successful!")
st.download_button(
label="Download Encrypted PDF",
data=open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "rb").read(),
file_name=f"{pdf.name.split('.')[0]}_encrypted.pdf",
mime="application/octet-stream",
)
try:
os.remove(f"{pdf.name.split('.')[0]}_encrypted.pdf")
except: pass
elif option == "Extract Raw Text📄":
st.write(text)
elif option == "Extract Links🔗":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
subtype = annot.get_object()["/Subtype"]
if subtype == "/Link":
try:
st.write(annot.get_object()["/A"]["/URI"])
except: pass
elif option == "Extract Images🖼️":
for page in pdf_reader.pages:
try:
for img in page.images:
st.write(img.name)
st.image(img.data)
except: pass
elif option == "PDF Annotation📝":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
obj = annot.get_object()
st.write(obj)
st.write("***********")
annotation = {"subtype": obj["/Subtype"], "location": obj["/Rect"]}
st.write(annotation)
elif option == "ChatPDF💬":
ChatPDF(text)
if __name__ == "__main__":
main()
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.callbacks.get_openai_callback",
"langchain.vectorstores.FAISS.from_texts",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((481, 579), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (502, 579), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((745, 793), 'streamlit.text_input', 'st.text_input', (['"""OPENAI API KEY"""'], {'type': '"""password"""'}), "('OPENAI API KEY', type='password')\n", (758, 793), True, 'import streamlit as st\n'), ((1783, 1841), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Ask ur PDF"""', 'page_icon': '"""📄"""'}), "(page_title='Ask ur PDF', page_icon='📄')\n", (1801, 1841), True, 'import streamlit as st\n'), ((2081, 2131), 'streamlit.markdown', 'st.markdown', (['hide_st_style'], {'unsafe_allow_html': '(True)'}), '(hide_st_style, unsafe_allow_html=True)\n', (2092, 2131), True, 'import streamlit as st\n'), ((2175, 2203), 'streamlit.header', 'st.header', (['"""Ask your PDF 🤔💭"""'], {}), "('Ask your PDF 🤔💭')\n", (2184, 2203), True, 'import streamlit as st\n'), ((2242, 2290), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your PDF """'], {'type': '"""pdf"""'}), "('Upload your PDF ', type='pdf')\n", (2258, 2290), True, 'import streamlit as st\n'), ((842, 889), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (858, 889), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((989, 1025), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['chunks', 'embeddings'], {}), '(chunks, embeddings)\n', (1005, 1025), False, 'from langchain.vectorstores import FAISS\n'), ((1035, 1070), 'streamlit.write', 'st.write', (['"""Knowledge Base created """'], {}), "('Knowledge Base created ')\n", (1043, 1070), True, 'import streamlit as st\n'), ((2360, 2551), 'streamlit.selectbox', 'st.selectbox', (['"""What you want to do with PDF📜"""', "['Meta Data📂', 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬']"], {}), "('What you want to do with PDF📜', ['Meta Data📂',\n 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬'])\n", (2372, 2551), True, 'import streamlit as st\n'), ((2672, 2686), 'PyPDF2.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (2681, 2686), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((1160, 1214), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF?"""'], {'key': 'i'}), "('Ask a question about your PDF?', key=i)\n", (1173, 1214), True, 'import streamlit as st\n'), ((2835, 2864), 'streamlit.write', 'st.write', (['pdf_reader.metadata'], {}), '(pdf_reader.metadata)\n', (2843, 2864), True, 'import streamlit as st\n'), ((1378, 1415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (1384, 1415), False, 'from langchain.llms import OpenAI\n'), ((1441, 1479), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (1454, 1479), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1667, 1685), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (1675, 1685), True, 'import streamlit as st\n'), ((2941, 2994), 'streamlit.text_input', 'st.text_input', (['"""Enter yourpass word"""'], {'type': '"""password"""'}), "('Enter yourpass word', type='password')\n", (2954, 2994), True, 'import streamlit as st\n'), ((1502, 1523), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1521, 1523), False, 'from langchain.callbacks import get_openai_callback\n'), ((4062, 4076), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (4070, 4076), True, 'import streamlit as st\n'), ((3039, 3066), 'streamlit.spinner', 'st.spinner', (['"""Encrypting..."""'], {}), "('Encrypting...')\n", (3049, 3066), True, 'import streamlit as st\n'), ((3102, 3113), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (3111, 3113), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((3480, 3516), 'streamlit.success', 'st.success', (['"""Encryption Successful!"""'], {}), "('Encryption Successful!')\n", (3490, 3516), True, 'import streamlit as st\n'), ((4697, 4715), 'streamlit.write', 'st.write', (['img.name'], {}), '(img.name)\n', (4705, 4715), True, 'import streamlit as st\n'), ((4741, 4759), 'streamlit.image', 'st.image', (['img.data'], {}), '(img.data)\n', (4749, 4759), True, 'import streamlit as st\n'), ((5041, 5054), 'streamlit.write', 'st.write', (['obj'], {}), '(obj)\n', (5049, 5054), True, 'import streamlit as st\n'), ((5080, 5103), 'streamlit.write', 'st.write', (['"""***********"""'], {}), "('***********')\n", (5088, 5103), True, 'import streamlit as st\n'), ((5222, 5242), 'streamlit.write', 'st.write', (['annotation'], {}), '(annotation)\n', (5230, 5242), True, 'import streamlit as st\n')] |
"""Toolkit for the Wolfram Alpha API."""
from typing import List
from langchain.tools.base import BaseTool, BaseToolkit
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaToolkit(BaseToolkit):
"""Tool that adds the capability to interact with Wolfram Alpha."""
wolfram_alpha_appid: str
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
wrapper = WolframAlphaAPIWrapper(wolfram_alpha_appid=self.wolfram_alpha_appid)
return [
WolframAlphaQueryRun(
api_wrapper=wrapper,
)
]
| [
"langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper",
"langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun"
] | [((509, 577), 'langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper', 'WolframAlphaAPIWrapper', ([], {'wolfram_alpha_appid': 'self.wolfram_alpha_appid'}), '(wolfram_alpha_appid=self.wolfram_alpha_appid)\n', (531, 577), False, 'from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper\n'), ((607, 648), 'langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun', 'WolframAlphaQueryRun', ([], {'api_wrapper': 'wrapper'}), '(api_wrapper=wrapper)\n', (627, 648), False, 'from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun\n')] |
"""Toolkit for the Wolfram Alpha API."""
from typing import List
from langchain.tools.base import BaseTool, BaseToolkit
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaToolkit(BaseToolkit):
"""Tool that adds the capability to interact with Wolfram Alpha."""
wolfram_alpha_appid: str
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
wrapper = WolframAlphaAPIWrapper(wolfram_alpha_appid=self.wolfram_alpha_appid)
return [
WolframAlphaQueryRun(
api_wrapper=wrapper,
)
]
| [
"langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper",
"langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun"
] | [((509, 577), 'langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper', 'WolframAlphaAPIWrapper', ([], {'wolfram_alpha_appid': 'self.wolfram_alpha_appid'}), '(wolfram_alpha_appid=self.wolfram_alpha_appid)\n', (531, 577), False, 'from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper\n'), ((607, 648), 'langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun', 'WolframAlphaQueryRun', ([], {'api_wrapper': 'wrapper'}), '(api_wrapper=wrapper)\n', (627, 648), False, 'from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun\n')] |
"""The function tools tht are actually implemented"""
import json
import subprocess
from langchain.agents.load_tools import load_tools
from langchain.tools import BaseTool
from langchain.utilities.bash import BashProcess
from toolemu.tools.tool_interface import (
ArgException,
ArgParameter,
ArgReturn,
FunctionTool,
FunctionToolkit,
)
from toolemu.utils.my_typing import *
from .register import register_toolkit
__ALL__ = ["RealTerminal", "RealPythonInterpreter", "RealWikipedia", "RealHuman"]
class MyBashProcess(BashProcess):
def _run(self, command: str) -> Tuple[str, int]:
"""
Runs a command in a subprocess and returns
the output.
Args:
command: The command to run
""" # noqa: E501
try:
output = (
subprocess.run(
command,
shell=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
.stdout.decode()
.strip()
)
except subprocess.CalledProcessError as error:
if self.return_err_output:
return error.stdout.decode().strip(), error.returncode
return str(error).strip(), error.returncode
if self.strip_newlines:
output = output.strip()
return output, 0
#################### Terminal Interpreter ####################
class RealTerminalExecute(FunctionTool):
name = "TerminalExecute"
summary = "Execute a terminal command and return the output. This command should follow proper syntax and be supported by the terminal environment."
parameters: List[ArgParameter] = [
{
"name": "command",
"type": "string",
"description": "The command to execute in the terminal.",
"required": True,
}
]
returns: List[ArgReturn] = [
{
"name": "output",
"type": "string",
"description": "The output generated by the executed terminal command, including both standard output and standard error streams.",
},
{
"name": "exit_code",
"type": "integer",
"description": "The exit code returned by the executed command. A zero value indicates successful execution, while non-zero values indicate errors or exceptions.",
},
]
exceptions: List[ArgException] = [
{
"name": "InvalidRequestException",
"description": "The 'command' parameter contains an invalid or malformed command, which results in a failed execution attempt.",
}
]
_tool: BaseTool = MyBashProcess(return_err_output=True)
def parse_return(self, tool_output: Dict[str, Any]) -> str:
return json.dumps({"output": tool_output[0], "exit_code": tool_output[1]})
def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._run(tool_input["command"])
def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._arun(tool_input["command"])
@register_toolkit()
class RealTerminal(FunctionToolkit):
name_for_human = "Terminal command executor"
description_for_human = "Executes commands in a terminal."
name_for_model = "Terminal"
description_for_model = "Executes commands in a terminal on the user's local system. Use it to run valid terminal commands for tasks such as file management, system control, and more"
tool_classes = [RealTerminalExecute]
#################### Python Interpreter ####################
class RealPythonInterpreterExecute(FunctionTool):
name = "PythonInterpreterExecute"
summary = "Execute a Python script."
parameters: List[ArgParameter] = [
{
"name": "script",
"type": "string",
"description": "The python script to execute.",
"required": True,
}
]
returns: List[ArgReturn] = [
{
"name": "result",
"type": "string",
"description": "The printed output of the script.",
}
]
exceptions: List[ArgException] = []
_tool: BaseTool = load_tools(["python_repl"])[0]
def parse_return(self, tool_output: str) -> str:
return json.dumps({"result": tool_output})
def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._run(tool_input["script"])
def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._arun(tool_input["script"])
@register_toolkit()
class RealPythonInterpreter(FunctionToolkit):
name_for_human = "Python interpreter"
description_for_human = "A Python shell."
name_for_model = "PythonInterpreter"
description_for_model = "A Python shell. Use it to execute python scripts. If you want to see the output of a value, you should print it out with `print(...)`."
tool_classes = [RealPythonInterpreterExecute]
#################### Wikipedia ####################
class RealWikipediaSearch(FunctionTool):
name = "WikipediaSearch"
summary = "Query the Wikipedia tool for a given query."
parameters: List[ArgParameter] = [
{
"name": "query",
"type": "string",
"description": "The query to search for.",
"required": True,
}
]
returns: List[ArgReturn] = [
{
"name": "result",
"type": "string",
"description": "The summary of the Wikipedia article.",
}
]
exceptions: List[ArgException] = []
_tool: BaseTool = load_tools(["wikipedia"])[0]
def parse_return(self, tool_output: str) -> str:
return json.dumps({"result": tool_output})
def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._run(tool_input["query"])
def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._arun(tool_input["query"])
@register_toolkit()
class RealWikipedia(FunctionToolkit):
name_for_human = "Wikipedia search tool"
description_for_human = "Tool for searching through Wikipedia."
name_for_model = "Wikipedia"
description_for_model = "Tool for searching through Wikipedia. Use it whenever you need to provide accurate responses for general questions about people, places, companies, historical events, or other subjects."
tool_classes = [RealWikipediaSearch]
#################### Human ####################
class RealHumanAssistanceQuery(FunctionTool):
name = "HumanAssistanceQuery"
summary = "Ask the human a specific question"
parameters: List[ArgParameter] = [
{
"name": "question",
"type": "string",
"description": "The question to ask.",
"required": True,
}
]
returns: List[ArgReturn] = [
{
"name": "answer",
"type": "string",
"description": "The answer from the human.",
}
]
exceptions: List[ArgException] = []
def parse_return(self, tool_output: str) -> str:
return json.dumps({"answer": tool_output})
def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
print("\n" + tool_input["question"] + "\n")
return input(tool_input["question"])
def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return NotImplementedError("Human tool does not support async")
@register_toolkit()
class RealHuman(FunctionToolkit):
name_for_human = "Human assistance"
description_for_human = "Seek human assistance or guidance."
name_for_model = "HumanAssistance"
description_for_model = "Seek human assistance or guidance. Use it when expert human or user input is necessary, e.g., when you need some human knowledge, user permission, user-specific information."
tool_classes = [RealHumanAssistanceQuery]
| [
"langchain.agents.load_tools.load_tools"
] | [((2863, 2930), 'json.dumps', 'json.dumps', (["{'output': tool_output[0], 'exit_code': tool_output[1]}"], {}), "({'output': tool_output[0], 'exit_code': tool_output[1]})\n", (2873, 2930), False, 'import json\n'), ((4269, 4296), 'langchain.agents.load_tools.load_tools', 'load_tools', (["['python_repl']"], {}), "(['python_repl'])\n", (4279, 4296), False, 'from langchain.agents.load_tools import load_tools\n'), ((4369, 4404), 'json.dumps', 'json.dumps', (["{'result': tool_output}"], {}), "({'result': tool_output})\n", (4379, 4404), False, 'import json\n'), ((5711, 5736), 'langchain.agents.load_tools.load_tools', 'load_tools', (["['wikipedia']"], {}), "(['wikipedia'])\n", (5721, 5736), False, 'from langchain.agents.load_tools import load_tools\n'), ((5809, 5844), 'json.dumps', 'json.dumps', (["{'result': tool_output}"], {}), "({'result': tool_output})\n", (5819, 5844), False, 'import json\n'), ((7232, 7267), 'json.dumps', 'json.dumps', (["{'answer': tool_output}"], {}), "({'answer': tool_output})\n", (7242, 7267), False, 'import json\n'), ((824, 925), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, check=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n', (838, 925), False, 'import subprocess\n')] |
from typing import List, Optional, Any, Dict
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
from pydantic import Extra, root_validator
from sam.gpt.quora import PoeClient, PoeResponse
# token = "KaEMfvDPEXoS115jzAFRRg%3D%3D"
# prompt = "write a java function that prints the nth fibonacci number. provide example usage"
# streaming_response = False
# render_markdown = True
# chat_mode = False
class Poe(LLM):
client: PoeClient
model: Optional[str] = "gpt-3.5-turbo"
custom_model: bool = False
token: str
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
token = get_from_dict_or_env(
values, "token", "POE_COOKIE"
)
values["client"] = PoeClient(token)
return values
class Config:
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
models = {
'sage': 'capybara',
'gpt-4': 'beaver',
'claude-v1.2': 'a2_2',
'claude-instant-v1.0': 'a2',
'gpt-3.5-turbo': 'chinchilla',
}
_model = models[self.model] if not self.custom_model else self.model
return {
"model": _model,
"token": self.token,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
return "poe"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
params = self._default_params
for chunk in self.client.send_message(params.model, prompt):
pass
response = PoeResponse(
{
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': params.model,
'choices': [
{
'text': chunk['text'],
'index': 0,
'logprobs': None,
'finish_reason': 'stop',
}
],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(chunk['text']),
'total_tokens': len(prompt) + len(chunk['text']),
},
}
)
text = response.completion.choices[0].text
return text
| [
"langchain.utils.get_from_dict_or_env"
] | [((573, 589), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (587, 589), False, 'from pydantic import Extra, root_validator\n'), ((663, 714), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""token"""', '"""POE_COOKIE"""'], {}), "(values, 'token', 'POE_COOKIE')\n", (683, 714), False, 'from langchain.utils import get_from_dict_or_env\n'), ((765, 781), 'sam.gpt.quora.PoeClient', 'PoeClient', (['token'], {}), '(token)\n', (774, 781), False, 'from sam.gpt.quora import PoeClient, PoeResponse\n')] |
from typing import List, Optional, Any, Dict
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
from pydantic import Extra, root_validator
from sam.gpt.quora import PoeClient, PoeResponse
# token = "KaEMfvDPEXoS115jzAFRRg%3D%3D"
# prompt = "write a java function that prints the nth fibonacci number. provide example usage"
# streaming_response = False
# render_markdown = True
# chat_mode = False
class Poe(LLM):
client: PoeClient
model: Optional[str] = "gpt-3.5-turbo"
custom_model: bool = False
token: str
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
token = get_from_dict_or_env(
values, "token", "POE_COOKIE"
)
values["client"] = PoeClient(token)
return values
class Config:
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
models = {
'sage': 'capybara',
'gpt-4': 'beaver',
'claude-v1.2': 'a2_2',
'claude-instant-v1.0': 'a2',
'gpt-3.5-turbo': 'chinchilla',
}
_model = models[self.model] if not self.custom_model else self.model
return {
"model": _model,
"token": self.token,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
return "poe"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
params = self._default_params
for chunk in self.client.send_message(params.model, prompt):
pass
response = PoeResponse(
{
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': params.model,
'choices': [
{
'text': chunk['text'],
'index': 0,
'logprobs': None,
'finish_reason': 'stop',
}
],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(chunk['text']),
'total_tokens': len(prompt) + len(chunk['text']),
},
}
)
text = response.completion.choices[0].text
return text
| [
"langchain.utils.get_from_dict_or_env"
] | [((573, 589), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (587, 589), False, 'from pydantic import Extra, root_validator\n'), ((663, 714), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""token"""', '"""POE_COOKIE"""'], {}), "(values, 'token', 'POE_COOKIE')\n", (683, 714), False, 'from langchain.utils import get_from_dict_or_env\n'), ((765, 781), 'sam.gpt.quora.PoeClient', 'PoeClient', (['token'], {}), '(token)\n', (774, 781), False, 'from sam.gpt.quora import PoeClient, PoeResponse\n')] |
from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Document,
HumanMessage,
SystemMessage,
)
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
class AutoGPT:
"""Agent class for interacting with Auto-GPT."""
def __init__(
self,
ai_name: str,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: BaseAutoGPTOutputParser,
tools: List[BaseTool],
feedback_tool: Optional[HumanInputRun] = None,
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history: List[BaseMessage] = []
self.next_action_count = 0
self.chain = chain
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
@classmethod
def from_llm_and_tools(
cls,
ai_name: str,
ai_role: str,
memory: VectorStoreRetriever,
tools: List[BaseTool],
llm: BaseChatModel,
human_in_the_loop: bool = False,
output_parser: Optional[BaseAutoGPTOutputParser] = None,
) -> AutoGPT:
prompt = AutoGPTPrompt(
ai_name=ai_name,
ai_role=ai_role,
tools=tools,
input_variables=["memory", "messages", "goals", "user_input"],
token_counter=llm.get_num_tokens,
)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
memory,
chain,
output_parser or AutoGPTOutputParser(),
tools,
feedback_tool=human_feedback_tool,
)
def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, "
"and respond using the format specified above:"
)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
goals=goals,
messages=self.full_message_history,
memory=self.memory,
user_input=user_input,
)
# Print Assistant thoughts
print(assistant_reply)
self.full_message_history.append(HumanMessage(content=user_input))
self.full_message_history.append(AIMessage(content=assistant_reply))
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
if action.name == FINISH_NAME:
return action.args["response"]
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"Unknown command '{action.name}'. "
f"Please refer to the 'COMMANDS' list for available "
f"commands and only respond in the specified JSON format."
)
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback
self.memory.add_documents([Document(page_content=memory_to_add)])
self.full_message_history.append(SystemMessage(content=result))
| [
"langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser",
"langchain.tools.human.tool.HumanInputRun",
"langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt",
"langchain.schema.HumanMessage",
"langchain.chains.llm.LLMChain",
"langchain.schema.AIMessage",
"langchain.schema.Document",
"langchain.schema.SystemMessage"
] | [((1753, 1918), 'langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt', 'AutoGPTPrompt', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'tools': 'tools', 'input_variables': "['memory', 'messages', 'goals', 'user_input']", 'token_counter': 'llm.get_num_tokens'}), "(ai_name=ai_name, ai_role=ai_role, tools=tools,\n input_variables=['memory', 'messages', 'goals', 'user_input'],\n token_counter=llm.get_num_tokens)\n", (1766, 1918), False, 'from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt\n'), ((2075, 2107), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2083, 2107), False, 'from langchain.chains.llm import LLMChain\n'), ((2012, 2027), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2025, 2027), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2217, 2238), 'langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser', 'AutoGPTOutputParser', ([], {}), '()\n', (2236, 2238), False, 'from langchain.experimental.autonomous_agents.autogpt.output_parser import AutoGPTOutputParser, BaseAutoGPTOutputParser\n'), ((3045, 3077), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (3057, 3077), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((3124, 3158), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'assistant_reply'}), '(content=assistant_reply)\n', (3133, 3158), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4895, 4924), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'result'}), '(content=result)\n', (4908, 4924), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4811, 4847), 'langchain.schema.Document', 'Document', ([], {'page_content': 'memory_to_add'}), '(page_content=memory_to_add)\n', (4819, 4847), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n')] |
from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Document,
HumanMessage,
SystemMessage,
)
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
class AutoGPT:
"""Agent class for interacting with Auto-GPT."""
def __init__(
self,
ai_name: str,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: BaseAutoGPTOutputParser,
tools: List[BaseTool],
feedback_tool: Optional[HumanInputRun] = None,
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history: List[BaseMessage] = []
self.next_action_count = 0
self.chain = chain
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
@classmethod
def from_llm_and_tools(
cls,
ai_name: str,
ai_role: str,
memory: VectorStoreRetriever,
tools: List[BaseTool],
llm: BaseChatModel,
human_in_the_loop: bool = False,
output_parser: Optional[BaseAutoGPTOutputParser] = None,
) -> AutoGPT:
prompt = AutoGPTPrompt(
ai_name=ai_name,
ai_role=ai_role,
tools=tools,
input_variables=["memory", "messages", "goals", "user_input"],
token_counter=llm.get_num_tokens,
)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
memory,
chain,
output_parser or AutoGPTOutputParser(),
tools,
feedback_tool=human_feedback_tool,
)
def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, "
"and respond using the format specified above:"
)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
goals=goals,
messages=self.full_message_history,
memory=self.memory,
user_input=user_input,
)
# Print Assistant thoughts
print(assistant_reply)
self.full_message_history.append(HumanMessage(content=user_input))
self.full_message_history.append(AIMessage(content=assistant_reply))
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
if action.name == FINISH_NAME:
return action.args["response"]
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"Unknown command '{action.name}'. "
f"Please refer to the 'COMMANDS' list for available "
f"commands and only respond in the specified JSON format."
)
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback
self.memory.add_documents([Document(page_content=memory_to_add)])
self.full_message_history.append(SystemMessage(content=result))
| [
"langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser",
"langchain.tools.human.tool.HumanInputRun",
"langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt",
"langchain.schema.HumanMessage",
"langchain.chains.llm.LLMChain",
"langchain.schema.AIMessage",
"langchain.schema.Document",
"langchain.schema.SystemMessage"
] | [((1753, 1918), 'langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt', 'AutoGPTPrompt', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'tools': 'tools', 'input_variables': "['memory', 'messages', 'goals', 'user_input']", 'token_counter': 'llm.get_num_tokens'}), "(ai_name=ai_name, ai_role=ai_role, tools=tools,\n input_variables=['memory', 'messages', 'goals', 'user_input'],\n token_counter=llm.get_num_tokens)\n", (1766, 1918), False, 'from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt\n'), ((2075, 2107), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2083, 2107), False, 'from langchain.chains.llm import LLMChain\n'), ((2012, 2027), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2025, 2027), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2217, 2238), 'langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser', 'AutoGPTOutputParser', ([], {}), '()\n', (2236, 2238), False, 'from langchain.experimental.autonomous_agents.autogpt.output_parser import AutoGPTOutputParser, BaseAutoGPTOutputParser\n'), ((3045, 3077), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (3057, 3077), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((3124, 3158), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'assistant_reply'}), '(content=assistant_reply)\n', (3133, 3158), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4895, 4924), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'result'}), '(content=result)\n', (4908, 4924), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4811, 4847), 'langchain.schema.Document', 'Document', ([], {'page_content': 'memory_to_add'}), '(page_content=memory_to_add)\n', (4819, 4847), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n')] |
from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Document,
HumanMessage,
SystemMessage,
)
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
class AutoGPT:
"""Agent class for interacting with Auto-GPT."""
def __init__(
self,
ai_name: str,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: BaseAutoGPTOutputParser,
tools: List[BaseTool],
feedback_tool: Optional[HumanInputRun] = None,
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history: List[BaseMessage] = []
self.next_action_count = 0
self.chain = chain
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
@classmethod
def from_llm_and_tools(
cls,
ai_name: str,
ai_role: str,
memory: VectorStoreRetriever,
tools: List[BaseTool],
llm: BaseChatModel,
human_in_the_loop: bool = False,
output_parser: Optional[BaseAutoGPTOutputParser] = None,
) -> AutoGPT:
prompt = AutoGPTPrompt(
ai_name=ai_name,
ai_role=ai_role,
tools=tools,
input_variables=["memory", "messages", "goals", "user_input"],
token_counter=llm.get_num_tokens,
)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
memory,
chain,
output_parser or AutoGPTOutputParser(),
tools,
feedback_tool=human_feedback_tool,
)
def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, "
"and respond using the format specified above:"
)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
goals=goals,
messages=self.full_message_history,
memory=self.memory,
user_input=user_input,
)
# Print Assistant thoughts
print(assistant_reply)
self.full_message_history.append(HumanMessage(content=user_input))
self.full_message_history.append(AIMessage(content=assistant_reply))
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
if action.name == FINISH_NAME:
return action.args["response"]
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"Unknown command '{action.name}'. "
f"Please refer to the 'COMMANDS' list for available "
f"commands and only respond in the specified JSON format."
)
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback
self.memory.add_documents([Document(page_content=memory_to_add)])
self.full_message_history.append(SystemMessage(content=result))
| [
"langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser",
"langchain.tools.human.tool.HumanInputRun",
"langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt",
"langchain.schema.HumanMessage",
"langchain.chains.llm.LLMChain",
"langchain.schema.AIMessage",
"langchain.schema.Document",
"langchain.schema.SystemMessage"
] | [((1753, 1918), 'langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt', 'AutoGPTPrompt', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'tools': 'tools', 'input_variables': "['memory', 'messages', 'goals', 'user_input']", 'token_counter': 'llm.get_num_tokens'}), "(ai_name=ai_name, ai_role=ai_role, tools=tools,\n input_variables=['memory', 'messages', 'goals', 'user_input'],\n token_counter=llm.get_num_tokens)\n", (1766, 1918), False, 'from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt\n'), ((2075, 2107), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2083, 2107), False, 'from langchain.chains.llm import LLMChain\n'), ((2012, 2027), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2025, 2027), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2217, 2238), 'langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser', 'AutoGPTOutputParser', ([], {}), '()\n', (2236, 2238), False, 'from langchain.experimental.autonomous_agents.autogpt.output_parser import AutoGPTOutputParser, BaseAutoGPTOutputParser\n'), ((3045, 3077), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (3057, 3077), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((3124, 3158), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'assistant_reply'}), '(content=assistant_reply)\n', (3133, 3158), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4895, 4924), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'result'}), '(content=result)\n', (4908, 4924), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4811, 4847), 'langchain.schema.Document', 'Document', ([], {'page_content': 'memory_to_add'}), '(page_content=memory_to_add)\n', (4819, 4847), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n')] |
from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Document,
HumanMessage,
SystemMessage,
)
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
class AutoGPT:
"""Agent class for interacting with Auto-GPT."""
def __init__(
self,
ai_name: str,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: BaseAutoGPTOutputParser,
tools: List[BaseTool],
feedback_tool: Optional[HumanInputRun] = None,
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history: List[BaseMessage] = []
self.next_action_count = 0
self.chain = chain
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
@classmethod
def from_llm_and_tools(
cls,
ai_name: str,
ai_role: str,
memory: VectorStoreRetriever,
tools: List[BaseTool],
llm: BaseChatModel,
human_in_the_loop: bool = False,
output_parser: Optional[BaseAutoGPTOutputParser] = None,
) -> AutoGPT:
prompt = AutoGPTPrompt(
ai_name=ai_name,
ai_role=ai_role,
tools=tools,
input_variables=["memory", "messages", "goals", "user_input"],
token_counter=llm.get_num_tokens,
)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
memory,
chain,
output_parser or AutoGPTOutputParser(),
tools,
feedback_tool=human_feedback_tool,
)
def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, "
"and respond using the format specified above:"
)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
goals=goals,
messages=self.full_message_history,
memory=self.memory,
user_input=user_input,
)
# Print Assistant thoughts
print(assistant_reply)
self.full_message_history.append(HumanMessage(content=user_input))
self.full_message_history.append(AIMessage(content=assistant_reply))
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
if action.name == FINISH_NAME:
return action.args["response"]
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"Unknown command '{action.name}'. "
f"Please refer to the 'COMMANDS' list for available "
f"commands and only respond in the specified JSON format."
)
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback
self.memory.add_documents([Document(page_content=memory_to_add)])
self.full_message_history.append(SystemMessage(content=result))
| [
"langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser",
"langchain.tools.human.tool.HumanInputRun",
"langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt",
"langchain.schema.HumanMessage",
"langchain.chains.llm.LLMChain",
"langchain.schema.AIMessage",
"langchain.schema.Document",
"langchain.schema.SystemMessage"
] | [((1753, 1918), 'langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt', 'AutoGPTPrompt', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'tools': 'tools', 'input_variables': "['memory', 'messages', 'goals', 'user_input']", 'token_counter': 'llm.get_num_tokens'}), "(ai_name=ai_name, ai_role=ai_role, tools=tools,\n input_variables=['memory', 'messages', 'goals', 'user_input'],\n token_counter=llm.get_num_tokens)\n", (1766, 1918), False, 'from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt\n'), ((2075, 2107), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2083, 2107), False, 'from langchain.chains.llm import LLMChain\n'), ((2012, 2027), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2025, 2027), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2217, 2238), 'langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser', 'AutoGPTOutputParser', ([], {}), '()\n', (2236, 2238), False, 'from langchain.experimental.autonomous_agents.autogpt.output_parser import AutoGPTOutputParser, BaseAutoGPTOutputParser\n'), ((3045, 3077), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (3057, 3077), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((3124, 3158), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'assistant_reply'}), '(content=assistant_reply)\n', (3133, 3158), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4895, 4924), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'result'}), '(content=result)\n', (4908, 4924), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4811, 4847), 'langchain.schema.Document', 'Document', ([], {'page_content': 'memory_to_add'}), '(page_content=memory_to_add)\n', (4819, 4847), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n')] |
"""Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.pydantic_v1 import Extra
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: Dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
| [
"langchain.chains.ReduceDocumentsChain",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.docstore.document.Document",
"langchain.chains.llm.LLMChain",
"langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager"
] | [((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')] |
"""Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.pydantic_v1 import Extra
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: Dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
| [
"langchain.chains.ReduceDocumentsChain",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.docstore.document.Document",
"langchain.chains.llm.LLMChain",
"langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager"
] | [((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')] |
"""Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.pydantic_v1 import Extra
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: Dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
| [
"langchain.chains.ReduceDocumentsChain",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.docstore.document.Document",
"langchain.chains.llm.LLMChain",
"langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager"
] | [((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')] |
"""Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.pydantic_v1 import Extra
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: Dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
| [
"langchain.chains.ReduceDocumentsChain",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.docstore.document.Document",
"langchain.chains.llm.LLMChain",
"langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager"
] | [((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')] |
from typing import Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env",
"langchain.pydantic_v1.root_validator"
] | [((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')] |
from typing import Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env",
"langchain.pydantic_v1.root_validator"
] | [((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')] |
from typing import Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env",
"langchain.pydantic_v1.root_validator"
] | [((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')] |
from typing import Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env",
"langchain.pydantic_v1.root_validator"
] | [((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')] |
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import VectorDBQA
from langchain.document_loaders import TextLoader
from typing import List
from langchain.schema import Document
import os
os.environ['OPENAI_API_KEY'] = "your-api-key"
class Genie:
def __init__(self, file_path: str):
self.file_path = file_path
self.loader = TextLoader(self.file_path)
self.documents = self.loader.load()
self.texts = self.text_split(self.documents)
self.vectordb = self.embeddings(self.texts)
self.genie = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=self.vectordb)
@staticmethod
def text_split(documents: TextLoader):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
return texts
@staticmethod
def embeddings(texts: List[Document]):
embeddings = OpenAIEmbeddings()
vectordb = Chroma.from_documents(texts, embeddings)
return vectordb
def ask(self, query: str):
return self.genie.run(query)
if __name__ == "__main__":
genie = Genie("example.txt")
print(genie.ask("How is the wheater like?")) | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.TextLoader",
"langchain.llms.OpenAI",
"langchain.vectorstores.Chroma.from_documents",
"langchain.embeddings.OpenAIEmbeddings"
] | [((515, 541), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.file_path'], {}), '(self.file_path)\n', (525, 541), False, 'from langchain.document_loaders import TextLoader\n'), ((886, 950), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (916, 950), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1112, 1130), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1128, 1130), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1150, 1190), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (1171, 1190), False, 'from langchain.vectorstores import Chroma\n'), ((743, 751), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (749, 751), False, 'from langchain.llms import OpenAI\n')] |
import logging
from pathlib import Path
from typing import List, Optional, Tuple
from dotenv import load_dotenv
load_dotenv()
from queue import Empty, Queue
from threading import Thread
import gradio as gr
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
from callback import QueueCallback
MODELS_NAMES = ["gpt-3.5-turbo", "gpt-4"]
DEFAULT_TEMPERATURE = 0.7
ChatHistory = List[str]
logging.basicConfig(
format="[%(asctime)s %(levelname)s]: %(message)s", level=logging.INFO
)
# load up our system prompt
default_system_prompt = Path("prompts/system.prompt").read_text()
# for the human, we will just inject the text
human_message_prompt_template = HumanMessagePromptTemplate.from_template("{text}")
def on_message_button_click(
chat: Optional[ChatOpenAI],
message: str,
chatbot_messages: ChatHistory,
messages: List[BaseMessage],
) -> Tuple[ChatOpenAI, str, ChatHistory, List[BaseMessage]]:
if chat is None:
# in the queue we will store our streamed tokens
queue = Queue()
# let's create our default chat
chat = ChatOpenAI(
model_name=MODELS_NAMES[0],
temperature=DEFAULT_TEMPERATURE,
streaming=True,
callbacks=([QueueCallback(queue)]),
)
else:
# hacky way to get the queue back
queue = chat.callbacks[0].queue
job_done = object()
logging.info(f"Asking question to GPT, messages={messages}")
# let's add the messages to our stuff
messages.append(HumanMessage(content=message))
chatbot_messages.append((message, ""))
# this is a little wrapper we need cuz we have to add the job_done
def task():
chat(messages)
queue.put(job_done)
# now let's start a thread and run the generation inside it
t = Thread(target=task)
t.start()
# this will hold the content as we generate
content = ""
# now, we read the next_token from queue and do what it has to be done
while True:
try:
next_token = queue.get(True, timeout=1)
if next_token is job_done:
break
content += next_token
chatbot_messages[-1] = (message, content)
yield chat, "", chatbot_messages, messages
except Empty:
continue
# finally we can add our reply to messsages
messages.append(AIMessage(content=content))
logging.debug(f"reply = {content}")
logging.info(f"Done!")
return chat, "", chatbot_messages, messages
def system_prompt_handler(value: str) -> str:
return value
def on_clear_button_click(system_prompt: str) -> Tuple[str, List, List]:
return "", [], [SystemMessage(content=system_prompt)]
def on_apply_settings_button_click(
system_prompt: str, model_name: str, temperature: float
):
logging.info(
f"Applying settings: model_name={model_name}, temperature={temperature}"
)
chat = ChatOpenAI(
model_name=model_name,
temperature=temperature,
streaming=True,
callbacks=[QueueCallback(Queue())],
)
# don't forget to nuke our queue
chat.callbacks[0].queue.empty()
return chat, *on_clear_button_click(system_prompt)
# some css why not, "borrowed" from https://huggingface.co/spaces/ysharma/Gradio-demo-streaming/blob/main/app.py
with gr.Blocks(
css="""#col_container {width: 700px; margin-left: auto; margin-right: auto;}
#chatbot {height: 400px; overflow: auto;}"""
) as demo:
system_prompt = gr.State(default_system_prompt)
# here we keep our state so multiple user can use the app at the same time!
messages = gr.State([SystemMessage(content=default_system_prompt)])
# same thing for the chat, we want one chat per use so callbacks are unique I guess
chat = gr.State(None)
with gr.Column(elem_id="col_container"):
gr.Markdown("# Welcome to GradioGPT! 🌟🚀")
gr.Markdown(
"An easy to use template. It comes with state and settings managment"
)
with gr.Column():
system_prompt_area = gr.TextArea(
default_system_prompt, lines=4, label="system prompt", interactive=True
)
# we store the value into the state to avoid re rendering of the area
system_prompt_area.input(
system_prompt_handler,
inputs=[system_prompt_area],
outputs=[system_prompt],
)
system_prompt_button = gr.Button("Set")
chatbot = gr.Chatbot()
with gr.Column():
message = gr.Textbox(label="chat input")
message.submit(
on_message_button_click,
[chat, message, chatbot, messages],
[chat, message, chatbot, messages],
queue=True,
)
message_button = gr.Button("Submit", variant="primary")
message_button.click(
on_message_button_click,
[chat, message, chatbot, messages],
[chat, message, chatbot, messages],
)
with gr.Row():
with gr.Column():
clear_button = gr.Button("Clear")
clear_button.click(
on_clear_button_click,
[system_prompt],
[message, chatbot, messages],
queue=False,
)
with gr.Accordion("Settings", open=False):
model_name = gr.Dropdown(
choices=MODELS_NAMES, value=MODELS_NAMES[0], label="model"
)
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
label="temperature",
interactive=True,
)
apply_settings_button = gr.Button("Apply")
apply_settings_button.click(
on_apply_settings_button_click,
[system_prompt, model_name, temperature],
[chat, message, chatbot, messages],
)
system_prompt_button.click(
on_apply_settings_button_click,
[system_prompt, model_name, temperature],
[chat, message, chatbot, messages],
)
demo.queue()
demo.launch()
| [
"langchain.schema.AIMessage",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.SystemMessage",
"langchain.schema.HumanMessage"
] | [((114, 127), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (125, 127), False, 'from dotenv import load_dotenv\n'), ((604, 698), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s %(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(asctime)s %(levelname)s]: %(message)s',\n level=logging.INFO)\n", (623, 698), False, 'import logging\n'), ((873, 923), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (913, 923), False, 'from langchain.prompts import HumanMessagePromptTemplate\n'), ((1596, 1656), 'logging.info', 'logging.info', (['f"""Asking question to GPT, messages={messages}"""'], {}), "(f'Asking question to GPT, messages={messages}')\n", (1608, 1656), False, 'import logging\n'), ((2004, 2023), 'threading.Thread', 'Thread', ([], {'target': 'task'}), '(target=task)\n', (2010, 2023), False, 'from threading import Thread\n'), ((2606, 2641), 'logging.debug', 'logging.debug', (['f"""reply = {content}"""'], {}), "(f'reply = {content}')\n", (2619, 2641), False, 'import logging\n'), ((2646, 2668), 'logging.info', 'logging.info', (['f"""Done!"""'], {}), "(f'Done!')\n", (2658, 2668), False, 'import logging\n'), ((3020, 3111), 'logging.info', 'logging.info', (['f"""Applying settings: model_name={model_name}, temperature={temperature}"""'], {}), "(\n f'Applying settings: model_name={model_name}, temperature={temperature}')\n", (3032, 3111), False, 'import logging\n'), ((3530, 3688), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""'}), '(css=\n """#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""\n )\n', (3539, 3688), True, 'import gradio as gr\n'), ((3714, 3745), 'gradio.State', 'gr.State', (['default_system_prompt'], {}), '(default_system_prompt)\n', (3722, 3745), True, 'import gradio as gr\n'), ((3997, 4011), 'gradio.State', 'gr.State', (['None'], {}), '(None)\n', (4005, 4011), True, 'import gradio as gr\n'), ((753, 782), 'pathlib.Path', 'Path', (['"""prompts/system.prompt"""'], {}), "('prompts/system.prompt')\n", (757, 782), False, 'from pathlib import Path\n'), ((1228, 1235), 'queue.Queue', 'Queue', ([], {}), '()\n', (1233, 1235), False, 'from queue import Empty, Queue\n'), ((1719, 1748), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'message'}), '(content=message)\n', (1731, 1748), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2574, 2600), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (2583, 2600), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4022, 4056), 'gradio.Column', 'gr.Column', ([], {'elem_id': '"""col_container"""'}), "(elem_id='col_container')\n", (4031, 4056), True, 'import gradio as gr\n'), ((4066, 4107), 'gradio.Markdown', 'gr.Markdown', (['"""# Welcome to GradioGPT! 🌟🚀"""'], {}), "('# Welcome to GradioGPT! 🌟🚀')\n", (4077, 4107), True, 'import gradio as gr\n'), ((4116, 4203), 'gradio.Markdown', 'gr.Markdown', (['"""An easy to use template. It comes with state and settings managment"""'], {}), "(\n 'An easy to use template. It comes with state and settings managment')\n", (4127, 4203), True, 'import gradio as gr\n'), ((4725, 4737), 'gradio.Chatbot', 'gr.Chatbot', ([], {}), '()\n', (4735, 4737), True, 'import gradio as gr\n'), ((2877, 2913), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (2890, 2913), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((3851, 3895), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'default_system_prompt'}), '(content=default_system_prompt)\n', (3864, 3895), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4234, 4245), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4243, 4245), True, 'import gradio as gr\n'), ((4280, 4368), 'gradio.TextArea', 'gr.TextArea', (['default_system_prompt'], {'lines': '(4)', 'label': '"""system prompt"""', 'interactive': '(True)'}), "(default_system_prompt, lines=4, label='system prompt',\n interactive=True)\n", (4291, 4368), True, 'import gradio as gr\n'), ((4689, 4705), 'gradio.Button', 'gr.Button', (['"""Set"""'], {}), "('Set')\n", (4698, 4705), True, 'import gradio as gr\n'), ((4751, 4762), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4760, 4762), True, 'import gradio as gr\n'), ((4786, 4816), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""chat input"""'}), "(label='chat input')\n", (4796, 4816), True, 'import gradio as gr\n'), ((5061, 5099), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {'variant': '"""primary"""'}), "('Submit', variant='primary')\n", (5070, 5099), True, 'import gradio as gr\n'), ((5306, 5314), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5312, 5314), True, 'import gradio as gr\n'), ((5333, 5344), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (5342, 5344), True, 'import gradio as gr\n'), ((5377, 5395), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (5386, 5395), True, 'import gradio as gr\n'), ((5630, 5666), 'gradio.Accordion', 'gr.Accordion', (['"""Settings"""'], {'open': '(False)'}), "('Settings', open=False)\n", (5642, 5666), True, 'import gradio as gr\n'), ((5697, 5768), 'gradio.Dropdown', 'gr.Dropdown', ([], {'choices': 'MODELS_NAMES', 'value': 'MODELS_NAMES[0]', 'label': '"""model"""'}), "(choices=MODELS_NAMES, value=MODELS_NAMES[0], label='model')\n", (5708, 5768), True, 'import gradio as gr\n'), ((5837, 5937), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.7)', 'step': '(0.1)', 'label': '"""temperature"""', 'interactive': '(True)'}), "(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label=\n 'temperature', interactive=True)\n", (5846, 5937), True, 'import gradio as gr\n'), ((6112, 6130), 'gradio.Button', 'gr.Button', (['"""Apply"""'], {}), "('Apply')\n", (6121, 6130), True, 'import gradio as gr\n'), ((1440, 1460), 'callback.QueueCallback', 'QueueCallback', (['queue'], {}), '(queue)\n', (1453, 1460), False, 'from callback import QueueCallback\n'), ((3265, 3272), 'queue.Queue', 'Queue', ([], {}), '()\n', (3270, 3272), False, 'from queue import Empty, Queue\n')] |
import logging
from pathlib import Path
from typing import List, Optional, Tuple
from dotenv import load_dotenv
load_dotenv()
from queue import Empty, Queue
from threading import Thread
import gradio as gr
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
from callback import QueueCallback
MODELS_NAMES = ["gpt-3.5-turbo", "gpt-4"]
DEFAULT_TEMPERATURE = 0.7
ChatHistory = List[str]
logging.basicConfig(
format="[%(asctime)s %(levelname)s]: %(message)s", level=logging.INFO
)
# load up our system prompt
default_system_prompt = Path("prompts/system.prompt").read_text()
# for the human, we will just inject the text
human_message_prompt_template = HumanMessagePromptTemplate.from_template("{text}")
def on_message_button_click(
chat: Optional[ChatOpenAI],
message: str,
chatbot_messages: ChatHistory,
messages: List[BaseMessage],
) -> Tuple[ChatOpenAI, str, ChatHistory, List[BaseMessage]]:
if chat is None:
# in the queue we will store our streamed tokens
queue = Queue()
# let's create our default chat
chat = ChatOpenAI(
model_name=MODELS_NAMES[0],
temperature=DEFAULT_TEMPERATURE,
streaming=True,
callbacks=([QueueCallback(queue)]),
)
else:
# hacky way to get the queue back
queue = chat.callbacks[0].queue
job_done = object()
logging.info(f"Asking question to GPT, messages={messages}")
# let's add the messages to our stuff
messages.append(HumanMessage(content=message))
chatbot_messages.append((message, ""))
# this is a little wrapper we need cuz we have to add the job_done
def task():
chat(messages)
queue.put(job_done)
# now let's start a thread and run the generation inside it
t = Thread(target=task)
t.start()
# this will hold the content as we generate
content = ""
# now, we read the next_token from queue and do what it has to be done
while True:
try:
next_token = queue.get(True, timeout=1)
if next_token is job_done:
break
content += next_token
chatbot_messages[-1] = (message, content)
yield chat, "", chatbot_messages, messages
except Empty:
continue
# finally we can add our reply to messsages
messages.append(AIMessage(content=content))
logging.debug(f"reply = {content}")
logging.info(f"Done!")
return chat, "", chatbot_messages, messages
def system_prompt_handler(value: str) -> str:
return value
def on_clear_button_click(system_prompt: str) -> Tuple[str, List, List]:
return "", [], [SystemMessage(content=system_prompt)]
def on_apply_settings_button_click(
system_prompt: str, model_name: str, temperature: float
):
logging.info(
f"Applying settings: model_name={model_name}, temperature={temperature}"
)
chat = ChatOpenAI(
model_name=model_name,
temperature=temperature,
streaming=True,
callbacks=[QueueCallback(Queue())],
)
# don't forget to nuke our queue
chat.callbacks[0].queue.empty()
return chat, *on_clear_button_click(system_prompt)
# some css why not, "borrowed" from https://huggingface.co/spaces/ysharma/Gradio-demo-streaming/blob/main/app.py
with gr.Blocks(
css="""#col_container {width: 700px; margin-left: auto; margin-right: auto;}
#chatbot {height: 400px; overflow: auto;}"""
) as demo:
system_prompt = gr.State(default_system_prompt)
# here we keep our state so multiple user can use the app at the same time!
messages = gr.State([SystemMessage(content=default_system_prompt)])
# same thing for the chat, we want one chat per use so callbacks are unique I guess
chat = gr.State(None)
with gr.Column(elem_id="col_container"):
gr.Markdown("# Welcome to GradioGPT! 🌟🚀")
gr.Markdown(
"An easy to use template. It comes with state and settings managment"
)
with gr.Column():
system_prompt_area = gr.TextArea(
default_system_prompt, lines=4, label="system prompt", interactive=True
)
# we store the value into the state to avoid re rendering of the area
system_prompt_area.input(
system_prompt_handler,
inputs=[system_prompt_area],
outputs=[system_prompt],
)
system_prompt_button = gr.Button("Set")
chatbot = gr.Chatbot()
with gr.Column():
message = gr.Textbox(label="chat input")
message.submit(
on_message_button_click,
[chat, message, chatbot, messages],
[chat, message, chatbot, messages],
queue=True,
)
message_button = gr.Button("Submit", variant="primary")
message_button.click(
on_message_button_click,
[chat, message, chatbot, messages],
[chat, message, chatbot, messages],
)
with gr.Row():
with gr.Column():
clear_button = gr.Button("Clear")
clear_button.click(
on_clear_button_click,
[system_prompt],
[message, chatbot, messages],
queue=False,
)
with gr.Accordion("Settings", open=False):
model_name = gr.Dropdown(
choices=MODELS_NAMES, value=MODELS_NAMES[0], label="model"
)
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
label="temperature",
interactive=True,
)
apply_settings_button = gr.Button("Apply")
apply_settings_button.click(
on_apply_settings_button_click,
[system_prompt, model_name, temperature],
[chat, message, chatbot, messages],
)
system_prompt_button.click(
on_apply_settings_button_click,
[system_prompt, model_name, temperature],
[chat, message, chatbot, messages],
)
demo.queue()
demo.launch()
| [
"langchain.schema.AIMessage",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.SystemMessage",
"langchain.schema.HumanMessage"
] | [((114, 127), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (125, 127), False, 'from dotenv import load_dotenv\n'), ((604, 698), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s %(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(asctime)s %(levelname)s]: %(message)s',\n level=logging.INFO)\n", (623, 698), False, 'import logging\n'), ((873, 923), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (913, 923), False, 'from langchain.prompts import HumanMessagePromptTemplate\n'), ((1596, 1656), 'logging.info', 'logging.info', (['f"""Asking question to GPT, messages={messages}"""'], {}), "(f'Asking question to GPT, messages={messages}')\n", (1608, 1656), False, 'import logging\n'), ((2004, 2023), 'threading.Thread', 'Thread', ([], {'target': 'task'}), '(target=task)\n', (2010, 2023), False, 'from threading import Thread\n'), ((2606, 2641), 'logging.debug', 'logging.debug', (['f"""reply = {content}"""'], {}), "(f'reply = {content}')\n", (2619, 2641), False, 'import logging\n'), ((2646, 2668), 'logging.info', 'logging.info', (['f"""Done!"""'], {}), "(f'Done!')\n", (2658, 2668), False, 'import logging\n'), ((3020, 3111), 'logging.info', 'logging.info', (['f"""Applying settings: model_name={model_name}, temperature={temperature}"""'], {}), "(\n f'Applying settings: model_name={model_name}, temperature={temperature}')\n", (3032, 3111), False, 'import logging\n'), ((3530, 3688), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""'}), '(css=\n """#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""\n )\n', (3539, 3688), True, 'import gradio as gr\n'), ((3714, 3745), 'gradio.State', 'gr.State', (['default_system_prompt'], {}), '(default_system_prompt)\n', (3722, 3745), True, 'import gradio as gr\n'), ((3997, 4011), 'gradio.State', 'gr.State', (['None'], {}), '(None)\n', (4005, 4011), True, 'import gradio as gr\n'), ((753, 782), 'pathlib.Path', 'Path', (['"""prompts/system.prompt"""'], {}), "('prompts/system.prompt')\n", (757, 782), False, 'from pathlib import Path\n'), ((1228, 1235), 'queue.Queue', 'Queue', ([], {}), '()\n', (1233, 1235), False, 'from queue import Empty, Queue\n'), ((1719, 1748), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'message'}), '(content=message)\n', (1731, 1748), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2574, 2600), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (2583, 2600), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4022, 4056), 'gradio.Column', 'gr.Column', ([], {'elem_id': '"""col_container"""'}), "(elem_id='col_container')\n", (4031, 4056), True, 'import gradio as gr\n'), ((4066, 4107), 'gradio.Markdown', 'gr.Markdown', (['"""# Welcome to GradioGPT! 🌟🚀"""'], {}), "('# Welcome to GradioGPT! 🌟🚀')\n", (4077, 4107), True, 'import gradio as gr\n'), ((4116, 4203), 'gradio.Markdown', 'gr.Markdown', (['"""An easy to use template. It comes with state and settings managment"""'], {}), "(\n 'An easy to use template. It comes with state and settings managment')\n", (4127, 4203), True, 'import gradio as gr\n'), ((4725, 4737), 'gradio.Chatbot', 'gr.Chatbot', ([], {}), '()\n', (4735, 4737), True, 'import gradio as gr\n'), ((2877, 2913), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (2890, 2913), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((3851, 3895), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'default_system_prompt'}), '(content=default_system_prompt)\n', (3864, 3895), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4234, 4245), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4243, 4245), True, 'import gradio as gr\n'), ((4280, 4368), 'gradio.TextArea', 'gr.TextArea', (['default_system_prompt'], {'lines': '(4)', 'label': '"""system prompt"""', 'interactive': '(True)'}), "(default_system_prompt, lines=4, label='system prompt',\n interactive=True)\n", (4291, 4368), True, 'import gradio as gr\n'), ((4689, 4705), 'gradio.Button', 'gr.Button', (['"""Set"""'], {}), "('Set')\n", (4698, 4705), True, 'import gradio as gr\n'), ((4751, 4762), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4760, 4762), True, 'import gradio as gr\n'), ((4786, 4816), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""chat input"""'}), "(label='chat input')\n", (4796, 4816), True, 'import gradio as gr\n'), ((5061, 5099), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {'variant': '"""primary"""'}), "('Submit', variant='primary')\n", (5070, 5099), True, 'import gradio as gr\n'), ((5306, 5314), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5312, 5314), True, 'import gradio as gr\n'), ((5333, 5344), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (5342, 5344), True, 'import gradio as gr\n'), ((5377, 5395), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (5386, 5395), True, 'import gradio as gr\n'), ((5630, 5666), 'gradio.Accordion', 'gr.Accordion', (['"""Settings"""'], {'open': '(False)'}), "('Settings', open=False)\n", (5642, 5666), True, 'import gradio as gr\n'), ((5697, 5768), 'gradio.Dropdown', 'gr.Dropdown', ([], {'choices': 'MODELS_NAMES', 'value': 'MODELS_NAMES[0]', 'label': '"""model"""'}), "(choices=MODELS_NAMES, value=MODELS_NAMES[0], label='model')\n", (5708, 5768), True, 'import gradio as gr\n'), ((5837, 5937), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.7)', 'step': '(0.1)', 'label': '"""temperature"""', 'interactive': '(True)'}), "(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label=\n 'temperature', interactive=True)\n", (5846, 5937), True, 'import gradio as gr\n'), ((6112, 6130), 'gradio.Button', 'gr.Button', (['"""Apply"""'], {}), "('Apply')\n", (6121, 6130), True, 'import gradio as gr\n'), ((1440, 1460), 'callback.QueueCallback', 'QueueCallback', (['queue'], {}), '(queue)\n', (1453, 1460), False, 'from callback import QueueCallback\n'), ((3265, 3272), 'queue.Queue', 'Queue', ([], {}), '()\n', (3270, 3272), False, 'from queue import Empty, Queue\n')] |
"""
View stage example selector.
| Copyright 2017-2023, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
import pickle
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
import numpy as np
import pandas as pd
from scipy.spatial.distance import cosine
# pylint: disable=relative-beyond-top-level
from .utils import get_embedding_function, get_cache, hash_query
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXAMPLES_DIR = os.path.join(ROOT_DIR, "examples")
EXAMPLE_EMBEDDINGS_PATH = os.path.join(
EXAMPLES_DIR, "viewstage_embeddings.pkl"
)
VIEW_STAGE_EXAMPLES_PATH = os.path.join(EXAMPLES_DIR, "viewstage_examples.csv")
VIEW_STAGE_EXAMPLE_PROMPT = PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
)
def get_or_create_embeddings(queries):
if os.path.isfile(EXAMPLE_EMBEDDINGS_PATH):
with open(EXAMPLE_EMBEDDINGS_PATH, "rb") as f:
example_embeddings = pickle.load(f)
else:
example_embeddings = {}
query_hashes = []
new_hashes = []
new_queries = []
for query in queries:
key = hash_query(query)
query_hashes.append(key)
if key not in example_embeddings:
new_hashes.append(key)
new_queries.append(query)
if new_queries:
print("Generating %d embeddings..." % len(new_queries))
model = get_embedding_function()
new_embeddings = model(new_queries)
for key, embedding in zip(new_hashes, new_embeddings):
example_embeddings[key] = embedding
if new_queries:
print("Saving embeddings to disk...")
with open(EXAMPLE_EMBEDDINGS_PATH, "wb") as f:
pickle.dump(example_embeddings, f)
return example_embeddings
def has_geo_field(sample_collection):
types = list(sample_collection.get_field_schema(flat=True).values())
types = [type(t) for t in types]
return any(["Geo" in t.__name__ for t in types])
def get_label_type(sample_collection, field_name):
sample = sample_collection.first()
field = sample.get_field(field_name)
field_type = str(type(field).__name__).lower()
field_type = field_type[:-1] if field_type.endswith("s") else field_type
return field_type
def _replace_run_keys(prompt, runs):
if "text_similarity" in runs:
prompt = prompt.replace("TEXT_SIM_KEY", runs["text_similarity"]["key"])
if "image_similarity" in runs:
prompt = prompt.replace(
"IMAGE_SIM_KEY", runs["image_similarity"]["key"]
)
if "evaluation" in runs:
prompt = prompt.replace("EVAL_KEY", runs["evaluation"]["key"])
if "uniqueness" in runs:
prompt = prompt.replace(
"UNIQUENESS_FIELD", runs["uniqueness"]["uniqueness_field"]
)
return prompt
def _count_empty_class_names(label_field):
return [list(class_name.values())[0] for class_name in label_field].count(
[]
)
def _reduce_label_fields(label_fields):
label_field_keys = list(label_fields.keys())
if len(label_field_keys) == 0:
return None, None
elif len(label_field_keys) > 0:
empty_counts = [
_count_empty_class_names(label_fields[key])
for key in label_field_keys
]
min_empty_count = min(empty_counts)
valid_keys = [
key
for key, count in zip(label_field_keys, empty_counts)
if count == min_empty_count
]
return {key: label_fields[key] for key in valid_keys}, min_empty_count
def _parse_runs_and_labels(runs, label_fields):
reduced_label_fields, count = _reduce_label_fields(label_fields.copy())
reduced_runs = runs.copy()
if count is not None and count > 0 and "text_similarity" in reduced_runs:
reduced_label_fields = None
return reduced_runs, reduced_label_fields
def _get_evaluation_type(sample_collection, eval_key):
eval_cls = sample_collection.get_evaluation_info(eval_key).config.cls
if "openimages" in eval_cls:
return "detection"
elif "coco" in eval_cls:
return "detection"
elif "activitynet" in eval_cls:
return "detection"
elif "classification" in eval_cls:
return "classification"
return None
def _load_examples():
examples = pd.read_csv(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines="skip")
examples["meta"] = examples["metadata"]
examples["contains_match"] = examples["stages"].str.contains("match\(")
examples["contains_filter_labels"] = examples["stages"].str.contains(
"filter_labels\("
)
examples["mfl"] = (
examples["contains_match"] | examples["contains_filter_labels"]
)
examples["hash"] = examples["query"].apply(lambda x: hash_query(x))
queries = examples["query"].tolist()
embeddings = get_or_create_embeddings(queries)
embeddings = {
key: np.array(embeddings[key]) for key in examples["hash"].tolist()
}
return examples, embeddings
def get_examples():
cache = get_cache()
keys = ("viewstage_examples", "viewstage_embeddings")
if keys[0] not in cache or keys[1] not in cache:
cache[keys[0]], cache[keys[1]] = _load_examples()
return cache[keys[0]], cache[keys[1]]
def _get_filtered_examples(sample_collection, runs, label_fields):
examples, embeddings = get_examples()
media_type = sample_collection.media_type
_filter = examples["media_type"].isin([media_type, "all"])
red_runs, red_label_fields = _parse_runs_and_labels(runs, label_fields)
geo = has_geo_field(sample_collection)
text_sim = "text_similarity" in red_runs
image_sim = "image_similarity" in red_runs
meta = "metadata" in red_runs
eval = "evaluation" in red_runs
if red_label_fields or eval:
if red_label_fields:
label_field_types = list(
set(
[
get_label_type(sample_collection, field)
for field in red_label_fields
]
)
)
else:
label_field_types = []
if eval:
eval_key = red_runs["evaluation"]["key"]
eval_types = [_get_evaluation_type(sample_collection, eval_key)]
else:
eval_types = []
label_types = list(set(label_field_types + eval_types + ["all"]))
_filter = _filter & examples["label_type"].isin(label_types)
## contains match() or filter_labels() in stages
mfl_cond = red_label_fields and not text_sim
conds = [geo, text_sim, image_sim, meta, eval, mfl_cond]
strs = ["geo", "text_sim", "image_sim", "meta", "eval", "mfl"]
for cond, cond_str in zip(conds, strs):
if not cond:
_filter = _filter & (examples[cond_str] == False)
filtered_examples = examples[_filter]
filtered_queries, filtered_stages, hashes = (
filtered_examples["query"].tolist(),
filtered_examples["stages"].tolist(),
filtered_examples["hash"].tolist(),
)
filtered_embeddings = [embeddings[key] for key in hashes]
return filtered_queries, filtered_stages, filtered_embeddings
def get_similar_examples(sample_collection, query, runs, label_fields):
ex_queries, ex_stages, ex_embeddings = _get_filtered_examples(
sample_collection, runs, label_fields
)
model = get_embedding_function()
query_embedding = np.array(model([query]))
if len(query_embedding.shape) == 2:
query_embedding = query_embedding[0]
dists = np.array([cosine(query_embedding, emb) for emb in ex_embeddings])
sorted_ix = np.argsort(dists).astype(int)
k = 20
similar_queries = [ex_queries[ix] for ix in sorted_ix[:k]]
similar_stages = [ex_stages[ix] for ix in sorted_ix[:k]]
return [
{"input": sq, "output": ss}
for sq, ss in zip(similar_queries, similar_stages)
]
def generate_view_stage_examples_prompt_template(
sample_collection, query, runs, label_fields
):
examples = get_similar_examples(
sample_collection, query, runs, label_fields
)
example_prompt = VIEW_STAGE_EXAMPLE_PROMPT
return FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix="Generate code to produce the FiftyOne view stages for the following prompts:\n",
suffix="Input: {text}\nOutput:",
input_variables=["text"],
)
def generate_view_stage_examples_prompt(
sample_collection, query, runs, label_fields
):
similar_examples_prompt_template = (
generate_view_stage_examples_prompt_template(
sample_collection, query, runs, label_fields
)
)
prompt = similar_examples_prompt_template.format(text=query)
return _replace_run_keys(prompt, runs)
| [
"langchain.prompts.FewShotPromptTemplate",
"langchain.prompts.PromptTemplate"
] | [((489, 523), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""examples"""'], {}), "(ROOT_DIR, 'examples')\n", (501, 523), False, 'import os\n'), ((551, 605), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_embeddings.pkl"""'], {}), "(EXAMPLES_DIR, 'viewstage_embeddings.pkl')\n", (563, 605), False, 'import os\n'), ((639, 691), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_examples.csv"""'], {}), "(EXAMPLES_DIR, 'viewstage_examples.csv')\n", (651, 691), False, 'import os\n'), ((721, 825), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input', 'output']", 'template': '"""Input: {input}\nOutput: {output}"""'}), '(input_variables=[\'input\', \'output\'], template=\n """Input: {input}\nOutput: {output}""")\n', (735, 825), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((877, 916), 'os.path.isfile', 'os.path.isfile', (['EXAMPLE_EMBEDDINGS_PATH'], {}), '(EXAMPLE_EMBEDDINGS_PATH)\n', (891, 916), False, 'import os\n'), ((4348, 4406), 'pandas.read_csv', 'pd.read_csv', (['VIEW_STAGE_EXAMPLES_PATH'], {'on_bad_lines': '"""skip"""'}), "(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines='skip')\n", (4359, 4406), True, 'import pandas as pd\n'), ((8216, 8455), 'langchain.prompts.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'example_prompt', 'prefix': '"""Generate code to produce the FiftyOne view stages for the following prompts:\n"""', 'suffix': '"""Input: {text}\nOutput:"""', 'input_variables': "['text']"}), '(examples=examples, example_prompt=example_prompt,\n prefix=\n """Generate code to produce the FiftyOne view stages for the following prompts:\n"""\n , suffix="""Input: {text}\nOutput:""", input_variables=[\'text\'])\n', (8237, 8455), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((446, 471), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'import os\n'), ((4933, 4958), 'numpy.array', 'np.array', (['embeddings[key]'], {}), '(embeddings[key])\n', (4941, 4958), True, 'import numpy as np\n'), ((1006, 1020), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1017, 1020), False, 'import pickle\n'), ((1751, 1785), 'pickle.dump', 'pickle.dump', (['example_embeddings', 'f'], {}), '(example_embeddings, f)\n', (1762, 1785), False, 'import pickle\n'), ((7604, 7632), 'scipy.spatial.distance.cosine', 'cosine', (['query_embedding', 'emb'], {}), '(query_embedding, emb)\n', (7610, 7632), False, 'from scipy.spatial.distance import cosine\n'), ((7677, 7694), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (7687, 7694), True, 'import numpy as np\n')] |
"""
View stage example selector.
| Copyright 2017-2023, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
import pickle
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
import numpy as np
import pandas as pd
from scipy.spatial.distance import cosine
# pylint: disable=relative-beyond-top-level
from .utils import get_embedding_function, get_cache, hash_query
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXAMPLES_DIR = os.path.join(ROOT_DIR, "examples")
EXAMPLE_EMBEDDINGS_PATH = os.path.join(
EXAMPLES_DIR, "viewstage_embeddings.pkl"
)
VIEW_STAGE_EXAMPLES_PATH = os.path.join(EXAMPLES_DIR, "viewstage_examples.csv")
VIEW_STAGE_EXAMPLE_PROMPT = PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
)
def get_or_create_embeddings(queries):
if os.path.isfile(EXAMPLE_EMBEDDINGS_PATH):
with open(EXAMPLE_EMBEDDINGS_PATH, "rb") as f:
example_embeddings = pickle.load(f)
else:
example_embeddings = {}
query_hashes = []
new_hashes = []
new_queries = []
for query in queries:
key = hash_query(query)
query_hashes.append(key)
if key not in example_embeddings:
new_hashes.append(key)
new_queries.append(query)
if new_queries:
print("Generating %d embeddings..." % len(new_queries))
model = get_embedding_function()
new_embeddings = model(new_queries)
for key, embedding in zip(new_hashes, new_embeddings):
example_embeddings[key] = embedding
if new_queries:
print("Saving embeddings to disk...")
with open(EXAMPLE_EMBEDDINGS_PATH, "wb") as f:
pickle.dump(example_embeddings, f)
return example_embeddings
def has_geo_field(sample_collection):
types = list(sample_collection.get_field_schema(flat=True).values())
types = [type(t) for t in types]
return any(["Geo" in t.__name__ for t in types])
def get_label_type(sample_collection, field_name):
sample = sample_collection.first()
field = sample.get_field(field_name)
field_type = str(type(field).__name__).lower()
field_type = field_type[:-1] if field_type.endswith("s") else field_type
return field_type
def _replace_run_keys(prompt, runs):
if "text_similarity" in runs:
prompt = prompt.replace("TEXT_SIM_KEY", runs["text_similarity"]["key"])
if "image_similarity" in runs:
prompt = prompt.replace(
"IMAGE_SIM_KEY", runs["image_similarity"]["key"]
)
if "evaluation" in runs:
prompt = prompt.replace("EVAL_KEY", runs["evaluation"]["key"])
if "uniqueness" in runs:
prompt = prompt.replace(
"UNIQUENESS_FIELD", runs["uniqueness"]["uniqueness_field"]
)
return prompt
def _count_empty_class_names(label_field):
return [list(class_name.values())[0] for class_name in label_field].count(
[]
)
def _reduce_label_fields(label_fields):
label_field_keys = list(label_fields.keys())
if len(label_field_keys) == 0:
return None, None
elif len(label_field_keys) > 0:
empty_counts = [
_count_empty_class_names(label_fields[key])
for key in label_field_keys
]
min_empty_count = min(empty_counts)
valid_keys = [
key
for key, count in zip(label_field_keys, empty_counts)
if count == min_empty_count
]
return {key: label_fields[key] for key in valid_keys}, min_empty_count
def _parse_runs_and_labels(runs, label_fields):
reduced_label_fields, count = _reduce_label_fields(label_fields.copy())
reduced_runs = runs.copy()
if count is not None and count > 0 and "text_similarity" in reduced_runs:
reduced_label_fields = None
return reduced_runs, reduced_label_fields
def _get_evaluation_type(sample_collection, eval_key):
eval_cls = sample_collection.get_evaluation_info(eval_key).config.cls
if "openimages" in eval_cls:
return "detection"
elif "coco" in eval_cls:
return "detection"
elif "activitynet" in eval_cls:
return "detection"
elif "classification" in eval_cls:
return "classification"
return None
def _load_examples():
examples = pd.read_csv(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines="skip")
examples["meta"] = examples["metadata"]
examples["contains_match"] = examples["stages"].str.contains("match\(")
examples["contains_filter_labels"] = examples["stages"].str.contains(
"filter_labels\("
)
examples["mfl"] = (
examples["contains_match"] | examples["contains_filter_labels"]
)
examples["hash"] = examples["query"].apply(lambda x: hash_query(x))
queries = examples["query"].tolist()
embeddings = get_or_create_embeddings(queries)
embeddings = {
key: np.array(embeddings[key]) for key in examples["hash"].tolist()
}
return examples, embeddings
def get_examples():
cache = get_cache()
keys = ("viewstage_examples", "viewstage_embeddings")
if keys[0] not in cache or keys[1] not in cache:
cache[keys[0]], cache[keys[1]] = _load_examples()
return cache[keys[0]], cache[keys[1]]
def _get_filtered_examples(sample_collection, runs, label_fields):
examples, embeddings = get_examples()
media_type = sample_collection.media_type
_filter = examples["media_type"].isin([media_type, "all"])
red_runs, red_label_fields = _parse_runs_and_labels(runs, label_fields)
geo = has_geo_field(sample_collection)
text_sim = "text_similarity" in red_runs
image_sim = "image_similarity" in red_runs
meta = "metadata" in red_runs
eval = "evaluation" in red_runs
if red_label_fields or eval:
if red_label_fields:
label_field_types = list(
set(
[
get_label_type(sample_collection, field)
for field in red_label_fields
]
)
)
else:
label_field_types = []
if eval:
eval_key = red_runs["evaluation"]["key"]
eval_types = [_get_evaluation_type(sample_collection, eval_key)]
else:
eval_types = []
label_types = list(set(label_field_types + eval_types + ["all"]))
_filter = _filter & examples["label_type"].isin(label_types)
## contains match() or filter_labels() in stages
mfl_cond = red_label_fields and not text_sim
conds = [geo, text_sim, image_sim, meta, eval, mfl_cond]
strs = ["geo", "text_sim", "image_sim", "meta", "eval", "mfl"]
for cond, cond_str in zip(conds, strs):
if not cond:
_filter = _filter & (examples[cond_str] == False)
filtered_examples = examples[_filter]
filtered_queries, filtered_stages, hashes = (
filtered_examples["query"].tolist(),
filtered_examples["stages"].tolist(),
filtered_examples["hash"].tolist(),
)
filtered_embeddings = [embeddings[key] for key in hashes]
return filtered_queries, filtered_stages, filtered_embeddings
def get_similar_examples(sample_collection, query, runs, label_fields):
ex_queries, ex_stages, ex_embeddings = _get_filtered_examples(
sample_collection, runs, label_fields
)
model = get_embedding_function()
query_embedding = np.array(model([query]))
if len(query_embedding.shape) == 2:
query_embedding = query_embedding[0]
dists = np.array([cosine(query_embedding, emb) for emb in ex_embeddings])
sorted_ix = np.argsort(dists).astype(int)
k = 20
similar_queries = [ex_queries[ix] for ix in sorted_ix[:k]]
similar_stages = [ex_stages[ix] for ix in sorted_ix[:k]]
return [
{"input": sq, "output": ss}
for sq, ss in zip(similar_queries, similar_stages)
]
def generate_view_stage_examples_prompt_template(
sample_collection, query, runs, label_fields
):
examples = get_similar_examples(
sample_collection, query, runs, label_fields
)
example_prompt = VIEW_STAGE_EXAMPLE_PROMPT
return FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix="Generate code to produce the FiftyOne view stages for the following prompts:\n",
suffix="Input: {text}\nOutput:",
input_variables=["text"],
)
def generate_view_stage_examples_prompt(
sample_collection, query, runs, label_fields
):
similar_examples_prompt_template = (
generate_view_stage_examples_prompt_template(
sample_collection, query, runs, label_fields
)
)
prompt = similar_examples_prompt_template.format(text=query)
return _replace_run_keys(prompt, runs)
| [
"langchain.prompts.FewShotPromptTemplate",
"langchain.prompts.PromptTemplate"
] | [((489, 523), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""examples"""'], {}), "(ROOT_DIR, 'examples')\n", (501, 523), False, 'import os\n'), ((551, 605), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_embeddings.pkl"""'], {}), "(EXAMPLES_DIR, 'viewstage_embeddings.pkl')\n", (563, 605), False, 'import os\n'), ((639, 691), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_examples.csv"""'], {}), "(EXAMPLES_DIR, 'viewstage_examples.csv')\n", (651, 691), False, 'import os\n'), ((721, 825), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input', 'output']", 'template': '"""Input: {input}\nOutput: {output}"""'}), '(input_variables=[\'input\', \'output\'], template=\n """Input: {input}\nOutput: {output}""")\n', (735, 825), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((877, 916), 'os.path.isfile', 'os.path.isfile', (['EXAMPLE_EMBEDDINGS_PATH'], {}), '(EXAMPLE_EMBEDDINGS_PATH)\n', (891, 916), False, 'import os\n'), ((4348, 4406), 'pandas.read_csv', 'pd.read_csv', (['VIEW_STAGE_EXAMPLES_PATH'], {'on_bad_lines': '"""skip"""'}), "(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines='skip')\n", (4359, 4406), True, 'import pandas as pd\n'), ((8216, 8455), 'langchain.prompts.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'example_prompt', 'prefix': '"""Generate code to produce the FiftyOne view stages for the following prompts:\n"""', 'suffix': '"""Input: {text}\nOutput:"""', 'input_variables': "['text']"}), '(examples=examples, example_prompt=example_prompt,\n prefix=\n """Generate code to produce the FiftyOne view stages for the following prompts:\n"""\n , suffix="""Input: {text}\nOutput:""", input_variables=[\'text\'])\n', (8237, 8455), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((446, 471), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'import os\n'), ((4933, 4958), 'numpy.array', 'np.array', (['embeddings[key]'], {}), '(embeddings[key])\n', (4941, 4958), True, 'import numpy as np\n'), ((1006, 1020), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1017, 1020), False, 'import pickle\n'), ((1751, 1785), 'pickle.dump', 'pickle.dump', (['example_embeddings', 'f'], {}), '(example_embeddings, f)\n', (1762, 1785), False, 'import pickle\n'), ((7604, 7632), 'scipy.spatial.distance.cosine', 'cosine', (['query_embedding', 'emb'], {}), '(query_embedding, emb)\n', (7610, 7632), False, 'from scipy.spatial.distance import cosine\n'), ((7677, 7694), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (7687, 7694), True, 'import numpy as np\n')] |
import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
from langchain.tools.gmail.utils import clean_email_body
class Resource(str, Enum):
"""Enumerator of Resources to search."""
THREADS = "threads"
MESSAGES = "messages"
class SearchArgsSchema(BaseModel):
"""Input for SearchGmailTool."""
# From https://support.google.com/mail/answer/7190?hl=en
query: str = Field(
...,
description="The Gmail query. Example filters include from:sender,"
" to:recipient, subject:subject, -filtered_term,"
" in:folder, is:important|read|starred, after:year/mo/date, "
"before:year/mo/date, label:label_name"
' "exact phrase".'
" Search newer/older than using d (day), m (month), and y (year): "
"newer_than:2d, older_than:1y."
" Attachments with extension example: filename:pdf. Multiple term"
" matching example: from:amy OR from:david.",
)
resource: Resource = Field(
default=Resource.MESSAGES,
description="Whether to search for threads or messages.",
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
class GmailSearch(GmailBaseTool):
"""Tool that searches for messages or threads in Gmail."""
name: str = "search_gmail"
description: str = (
"Use this tool to search for email messages or threads."
" The input must be a valid Gmail query."
" The output is a JSON list of the requested resource."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Add the thread message snippets to the thread results
results = []
for thread in threads:
thread_id = thread["id"]
thread_data = (
self.api_resource.users()
.threads()
.get(userId="me", id=thread_id)
.execute()
)
messages = thread_data["messages"]
thread["messages"] = []
for message in messages:
snippet = message["snippet"]
thread["messages"].append({"snippet": snippet, "id": message["id"]})
results.append(thread)
return results
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
results = []
for message in messages:
message_id = message["id"]
message_data = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
.execute()
)
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = email_msg.get_payload()
body = clean_email_body(message_body)
results.append(
{
"id": message["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
)
return results
def _run(
self,
query: str,
resource: Resource = Resource.MESSAGES,
max_results: int = 10,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
"""Run the tool."""
results = (
self.api_resource.users()
.messages()
.list(userId="me", q=query, maxResults=max_results)
.execute()
.get(resource.value, [])
)
if resource == Resource.THREADS:
return self._parse_threads(results)
elif resource == Resource.MESSAGES:
return self._parse_messages(results)
else:
raise NotImplementedError(f"Resource of type {resource} not implemented.")
| [
"langchain.pydantic_v1.Field",
"langchain.tools.gmail.utils.clean_email_body"
] | [((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')] |
import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
from langchain.tools.gmail.utils import clean_email_body
class Resource(str, Enum):
"""Enumerator of Resources to search."""
THREADS = "threads"
MESSAGES = "messages"
class SearchArgsSchema(BaseModel):
"""Input for SearchGmailTool."""
# From https://support.google.com/mail/answer/7190?hl=en
query: str = Field(
...,
description="The Gmail query. Example filters include from:sender,"
" to:recipient, subject:subject, -filtered_term,"
" in:folder, is:important|read|starred, after:year/mo/date, "
"before:year/mo/date, label:label_name"
' "exact phrase".'
" Search newer/older than using d (day), m (month), and y (year): "
"newer_than:2d, older_than:1y."
" Attachments with extension example: filename:pdf. Multiple term"
" matching example: from:amy OR from:david.",
)
resource: Resource = Field(
default=Resource.MESSAGES,
description="Whether to search for threads or messages.",
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
class GmailSearch(GmailBaseTool):
"""Tool that searches for messages or threads in Gmail."""
name: str = "search_gmail"
description: str = (
"Use this tool to search for email messages or threads."
" The input must be a valid Gmail query."
" The output is a JSON list of the requested resource."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Add the thread message snippets to the thread results
results = []
for thread in threads:
thread_id = thread["id"]
thread_data = (
self.api_resource.users()
.threads()
.get(userId="me", id=thread_id)
.execute()
)
messages = thread_data["messages"]
thread["messages"] = []
for message in messages:
snippet = message["snippet"]
thread["messages"].append({"snippet": snippet, "id": message["id"]})
results.append(thread)
return results
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
results = []
for message in messages:
message_id = message["id"]
message_data = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
.execute()
)
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = email_msg.get_payload()
body = clean_email_body(message_body)
results.append(
{
"id": message["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
)
return results
def _run(
self,
query: str,
resource: Resource = Resource.MESSAGES,
max_results: int = 10,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
"""Run the tool."""
results = (
self.api_resource.users()
.messages()
.list(userId="me", q=query, maxResults=max_results)
.execute()
.get(resource.value, [])
)
if resource == Resource.THREADS:
return self._parse_threads(results)
elif resource == Resource.MESSAGES:
return self._parse_messages(results)
else:
raise NotImplementedError(f"Resource of type {resource} not implemented.")
| [
"langchain.pydantic_v1.Field",
"langchain.tools.gmail.utils.clean_email_body"
] | [((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')] |
import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
from langchain.tools.gmail.utils import clean_email_body
class Resource(str, Enum):
"""Enumerator of Resources to search."""
THREADS = "threads"
MESSAGES = "messages"
class SearchArgsSchema(BaseModel):
"""Input for SearchGmailTool."""
# From https://support.google.com/mail/answer/7190?hl=en
query: str = Field(
...,
description="The Gmail query. Example filters include from:sender,"
" to:recipient, subject:subject, -filtered_term,"
" in:folder, is:important|read|starred, after:year/mo/date, "
"before:year/mo/date, label:label_name"
' "exact phrase".'
" Search newer/older than using d (day), m (month), and y (year): "
"newer_than:2d, older_than:1y."
" Attachments with extension example: filename:pdf. Multiple term"
" matching example: from:amy OR from:david.",
)
resource: Resource = Field(
default=Resource.MESSAGES,
description="Whether to search for threads or messages.",
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
class GmailSearch(GmailBaseTool):
"""Tool that searches for messages or threads in Gmail."""
name: str = "search_gmail"
description: str = (
"Use this tool to search for email messages or threads."
" The input must be a valid Gmail query."
" The output is a JSON list of the requested resource."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Add the thread message snippets to the thread results
results = []
for thread in threads:
thread_id = thread["id"]
thread_data = (
self.api_resource.users()
.threads()
.get(userId="me", id=thread_id)
.execute()
)
messages = thread_data["messages"]
thread["messages"] = []
for message in messages:
snippet = message["snippet"]
thread["messages"].append({"snippet": snippet, "id": message["id"]})
results.append(thread)
return results
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
results = []
for message in messages:
message_id = message["id"]
message_data = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
.execute()
)
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = email_msg.get_payload()
body = clean_email_body(message_body)
results.append(
{
"id": message["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
)
return results
def _run(
self,
query: str,
resource: Resource = Resource.MESSAGES,
max_results: int = 10,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
"""Run the tool."""
results = (
self.api_resource.users()
.messages()
.list(userId="me", q=query, maxResults=max_results)
.execute()
.get(resource.value, [])
)
if resource == Resource.THREADS:
return self._parse_threads(results)
elif resource == Resource.MESSAGES:
return self._parse_messages(results)
else:
raise NotImplementedError(f"Resource of type {resource} not implemented.")
| [
"langchain.pydantic_v1.Field",
"langchain.tools.gmail.utils.clean_email_body"
] | [((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')] |
import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
from langchain.tools.gmail.utils import clean_email_body
class Resource(str, Enum):
"""Enumerator of Resources to search."""
THREADS = "threads"
MESSAGES = "messages"
class SearchArgsSchema(BaseModel):
"""Input for SearchGmailTool."""
# From https://support.google.com/mail/answer/7190?hl=en
query: str = Field(
...,
description="The Gmail query. Example filters include from:sender,"
" to:recipient, subject:subject, -filtered_term,"
" in:folder, is:important|read|starred, after:year/mo/date, "
"before:year/mo/date, label:label_name"
' "exact phrase".'
" Search newer/older than using d (day), m (month), and y (year): "
"newer_than:2d, older_than:1y."
" Attachments with extension example: filename:pdf. Multiple term"
" matching example: from:amy OR from:david.",
)
resource: Resource = Field(
default=Resource.MESSAGES,
description="Whether to search for threads or messages.",
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
class GmailSearch(GmailBaseTool):
"""Tool that searches for messages or threads in Gmail."""
name: str = "search_gmail"
description: str = (
"Use this tool to search for email messages or threads."
" The input must be a valid Gmail query."
" The output is a JSON list of the requested resource."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Add the thread message snippets to the thread results
results = []
for thread in threads:
thread_id = thread["id"]
thread_data = (
self.api_resource.users()
.threads()
.get(userId="me", id=thread_id)
.execute()
)
messages = thread_data["messages"]
thread["messages"] = []
for message in messages:
snippet = message["snippet"]
thread["messages"].append({"snippet": snippet, "id": message["id"]})
results.append(thread)
return results
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
results = []
for message in messages:
message_id = message["id"]
message_data = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
.execute()
)
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = email_msg.get_payload()
body = clean_email_body(message_body)
results.append(
{
"id": message["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
)
return results
def _run(
self,
query: str,
resource: Resource = Resource.MESSAGES,
max_results: int = 10,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
"""Run the tool."""
results = (
self.api_resource.users()
.messages()
.list(userId="me", q=query, maxResults=max_results)
.execute()
.get(resource.value, [])
)
if resource == Resource.THREADS:
return self._parse_threads(results)
elif resource == Resource.MESSAGES:
return self._parse_messages(results)
else:
raise NotImplementedError(f"Resource of type {resource} not implemented.")
| [
"langchain.pydantic_v1.Field",
"langchain.tools.gmail.utils.clean_email_body"
] | [((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')] |
from langchain import PromptTemplate
from codedog.templates import grimoire_en
TRANSLATE_PROMPT = PromptTemplate(
template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=["language", "description", "content"]
)
| [
"langchain.PromptTemplate"
] | [((100, 217), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'grimoire_en.TRANSLATE_PR_REVIEW', 'input_variables': "['language', 'description', 'content']"}), "(template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=[\n 'language', 'description', 'content'])\n", (114, 217), False, 'from langchain import PromptTemplate\n')] |
"""Callback Handler that writes to a file."""
from typing import Any, Dict, Optional, TextIO, cast
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file."""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler."""
self.file = cast(TextIO, open(filename, mode, encoding="utf-8"))
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
print_text(
f"\n\n\033[1m> Entering new {class_name} chain...\033[0m",
end="\n",
file=self.file,
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
print_text(action.log, color=color or self.color, file=self.file)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end, file=self.file)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
| [
"langchain_core.utils.input.print_text"
] | [((989, 1084), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '(f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m""", end=\'\\n\',\n file=self.file)\n', (999, 1084), False, 'from langchain_core.utils.input import print_text\n'), ((1267, 1337), 'langchain_core.utils.input.print_text', 'print_text', (['"""\n\x1b[1m> Finished chain.\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '("""\n\x1b[1m> Finished chain.\x1b[0m""", end=\'\\n\', file=self.file)\n', (1277, 1337), False, 'from langchain_core.utils.input import print_text\n'), ((1502, 1567), 'langchain_core.utils.input.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)', 'file': 'self.file'}), '(action.log, color=color or self.color, file=self.file)\n', (1512, 1567), False, 'from langchain_core.utils.input import print_text\n'), ((1971, 2032), 'langchain_core.utils.input.print_text', 'print_text', (['output'], {'color': '(color or self.color)', 'file': 'self.file'}), '(output, color=color or self.color, file=self.file)\n', (1981, 2032), False, 'from langchain_core.utils.input import print_text\n'), ((2285, 2353), 'langchain_core.utils.input.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end', 'file': 'self.file'}), '(text, color=color or self.color, end=end, file=self.file)\n', (2295, 2353), False, 'from langchain_core.utils.input import print_text\n'), ((2513, 2588), 'langchain_core.utils.input.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""', 'file': 'self.file'}), "(finish.log, color=color or self.color, end='\\n', file=self.file)\n", (2523, 2588), False, 'from langchain_core.utils.input import print_text\n'), ((1909, 1965), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {'file': 'self.file'}), '(f"""\n{observation_prefix}""", file=self.file)\n', (1919, 1965), False, 'from langchain_core.utils.input import print_text\n'), ((2080, 2125), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {'file': 'self.file'}), "(f'\\n{llm_prefix}', file=self.file)\n", (2090, 2125), False, 'from langchain_core.utils.input import print_text\n')] |
"""Callback Handler that writes to a file."""
from typing import Any, Dict, Optional, TextIO, cast
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file."""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler."""
self.file = cast(TextIO, open(filename, mode, encoding="utf-8"))
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
print_text(
f"\n\n\033[1m> Entering new {class_name} chain...\033[0m",
end="\n",
file=self.file,
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
print_text(action.log, color=color or self.color, file=self.file)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end, file=self.file)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
| [
"langchain_core.utils.input.print_text"
] | [((989, 1084), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '(f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m""", end=\'\\n\',\n file=self.file)\n', (999, 1084), False, 'from langchain_core.utils.input import print_text\n'), ((1267, 1337), 'langchain_core.utils.input.print_text', 'print_text', (['"""\n\x1b[1m> Finished chain.\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '("""\n\x1b[1m> Finished chain.\x1b[0m""", end=\'\\n\', file=self.file)\n', (1277, 1337), False, 'from langchain_core.utils.input import print_text\n'), ((1502, 1567), 'langchain_core.utils.input.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)', 'file': 'self.file'}), '(action.log, color=color or self.color, file=self.file)\n', (1512, 1567), False, 'from langchain_core.utils.input import print_text\n'), ((1971, 2032), 'langchain_core.utils.input.print_text', 'print_text', (['output'], {'color': '(color or self.color)', 'file': 'self.file'}), '(output, color=color or self.color, file=self.file)\n', (1981, 2032), False, 'from langchain_core.utils.input import print_text\n'), ((2285, 2353), 'langchain_core.utils.input.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end', 'file': 'self.file'}), '(text, color=color or self.color, end=end, file=self.file)\n', (2295, 2353), False, 'from langchain_core.utils.input import print_text\n'), ((2513, 2588), 'langchain_core.utils.input.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""', 'file': 'self.file'}), "(finish.log, color=color or self.color, end='\\n', file=self.file)\n", (2523, 2588), False, 'from langchain_core.utils.input import print_text\n'), ((1909, 1965), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {'file': 'self.file'}), '(f"""\n{observation_prefix}""", file=self.file)\n', (1919, 1965), False, 'from langchain_core.utils.input import print_text\n'), ((2080, 2125), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {'file': 'self.file'}), "(f'\\n{llm_prefix}', file=self.file)\n", (2090, 2125), False, 'from langchain_core.utils.input import print_text\n')] |
"""Callback Handler that writes to a file."""
from typing import Any, Dict, Optional, TextIO, cast
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file."""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler."""
self.file = cast(TextIO, open(filename, mode, encoding="utf-8"))
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
print_text(
f"\n\n\033[1m> Entering new {class_name} chain...\033[0m",
end="\n",
file=self.file,
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
print_text(action.log, color=color or self.color, file=self.file)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end, file=self.file)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
| [
"langchain_core.utils.input.print_text"
] | [((989, 1084), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '(f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m""", end=\'\\n\',\n file=self.file)\n', (999, 1084), False, 'from langchain_core.utils.input import print_text\n'), ((1267, 1337), 'langchain_core.utils.input.print_text', 'print_text', (['"""\n\x1b[1m> Finished chain.\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '("""\n\x1b[1m> Finished chain.\x1b[0m""", end=\'\\n\', file=self.file)\n', (1277, 1337), False, 'from langchain_core.utils.input import print_text\n'), ((1502, 1567), 'langchain_core.utils.input.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)', 'file': 'self.file'}), '(action.log, color=color or self.color, file=self.file)\n', (1512, 1567), False, 'from langchain_core.utils.input import print_text\n'), ((1971, 2032), 'langchain_core.utils.input.print_text', 'print_text', (['output'], {'color': '(color or self.color)', 'file': 'self.file'}), '(output, color=color or self.color, file=self.file)\n', (1981, 2032), False, 'from langchain_core.utils.input import print_text\n'), ((2285, 2353), 'langchain_core.utils.input.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end', 'file': 'self.file'}), '(text, color=color or self.color, end=end, file=self.file)\n', (2295, 2353), False, 'from langchain_core.utils.input import print_text\n'), ((2513, 2588), 'langchain_core.utils.input.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""', 'file': 'self.file'}), "(finish.log, color=color or self.color, end='\\n', file=self.file)\n", (2523, 2588), False, 'from langchain_core.utils.input import print_text\n'), ((1909, 1965), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {'file': 'self.file'}), '(f"""\n{observation_prefix}""", file=self.file)\n', (1919, 1965), False, 'from langchain_core.utils.input import print_text\n'), ((2080, 2125), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {'file': 'self.file'}), "(f'\\n{llm_prefix}', file=self.file)\n", (2090, 2125), False, 'from langchain_core.utils.input import print_text\n')] |
"""Callback Handler that writes to a file."""
from typing import Any, Dict, Optional, TextIO, cast
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file."""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler."""
self.file = cast(TextIO, open(filename, mode, encoding="utf-8"))
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
print_text(
f"\n\n\033[1m> Entering new {class_name} chain...\033[0m",
end="\n",
file=self.file,
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
print_text(action.log, color=color or self.color, file=self.file)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end, file=self.file)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
| [
"langchain_core.utils.input.print_text"
] | [((989, 1084), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '(f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m""", end=\'\\n\',\n file=self.file)\n', (999, 1084), False, 'from langchain_core.utils.input import print_text\n'), ((1267, 1337), 'langchain_core.utils.input.print_text', 'print_text', (['"""\n\x1b[1m> Finished chain.\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '("""\n\x1b[1m> Finished chain.\x1b[0m""", end=\'\\n\', file=self.file)\n', (1277, 1337), False, 'from langchain_core.utils.input import print_text\n'), ((1502, 1567), 'langchain_core.utils.input.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)', 'file': 'self.file'}), '(action.log, color=color or self.color, file=self.file)\n', (1512, 1567), False, 'from langchain_core.utils.input import print_text\n'), ((1971, 2032), 'langchain_core.utils.input.print_text', 'print_text', (['output'], {'color': '(color or self.color)', 'file': 'self.file'}), '(output, color=color or self.color, file=self.file)\n', (1981, 2032), False, 'from langchain_core.utils.input import print_text\n'), ((2285, 2353), 'langchain_core.utils.input.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end', 'file': 'self.file'}), '(text, color=color or self.color, end=end, file=self.file)\n', (2295, 2353), False, 'from langchain_core.utils.input import print_text\n'), ((2513, 2588), 'langchain_core.utils.input.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""', 'file': 'self.file'}), "(finish.log, color=color or self.color, end='\\n', file=self.file)\n", (2523, 2588), False, 'from langchain_core.utils.input import print_text\n'), ((1909, 1965), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {'file': 'self.file'}), '(f"""\n{observation_prefix}""", file=self.file)\n', (1919, 1965), False, 'from langchain_core.utils.input import print_text\n'), ((2080, 2125), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {'file': 'self.file'}), "(f'\\n{llm_prefix}', file=self.file)\n", (2090, 2125), False, 'from langchain_core.utils.input import print_text\n')] |
import base64
import json
from langchain_community.chat_models import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain_core.pydantic_v1 import Field
from langserve import CustomUserType
from .prompts import (
AI_REPONSE_DICT,
FULL_PROMPT,
USER_EXAMPLE_DICT,
create_prompt,
)
from .utils import parse_llm_output
llm = ChatOpenAI(temperature=0, model="gpt-4")
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(FULL_PROMPT),
("human", "{user_example}"),
("ai", "{ai_response}"),
("human", "{input}"),
],
)
# ATTENTION: Inherit from CustomUserType instead of BaseModel otherwise
# the server will decode it into a dict instead of a pydantic model.
class FileProcessingRequest(CustomUserType):
"""Request including a base64 encoded file."""
# The extra field is used to specify a widget for the playground UI.
file: str = Field(..., extra={"widget": {"type": "base64file"}})
num_plates: int = None
num_rows: int = 8
num_cols: int = 12
def _load_file(request: FileProcessingRequest):
return base64.b64decode(request.file.encode("utf-8")).decode("utf-8")
def _load_prompt(request: FileProcessingRequest):
return create_prompt(
num_plates=request.num_plates,
num_rows=request.num_rows,
num_cols=request.num_cols,
)
def _get_col_range_str(request: FileProcessingRequest):
if request.num_cols:
return f"from 1 to {request.num_cols}"
else:
return ""
def _get_json_format(request: FileProcessingRequest):
return json.dumps(
[
{
"row_start": 12,
"row_end": 12 + request.num_rows - 1,
"col_start": 1,
"col_end": 1 + request.num_cols - 1,
"contents": "Entity ID",
}
]
)
chain = (
{
# Should add validation to ensure numeric indices
"input": _load_file,
"hint": _load_prompt,
"col_range_str": _get_col_range_str,
"json_format": _get_json_format,
"user_example": lambda x: USER_EXAMPLE_DICT[x.num_rows * x.num_cols],
"ai_response": lambda x: AI_REPONSE_DICT[x.num_rows * x.num_cols],
}
| prompt
| llm
| StrOutputParser()
| parse_llm_output
).with_types(input_type=FileProcessingRequest)
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.SystemMessagePromptTemplate.from_template",
"langchain_core.output_parsers.StrOutputParser",
"langchain_community.chat_models.ChatOpenAI"
] | [((454, 494), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (464, 494), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1047, 1099), 'langchain_core.pydantic_v1.Field', 'Field', (['...'], {'extra': "{'widget': {'type': 'base64file'}}"}), "(..., extra={'widget': {'type': 'base64file'}})\n", (1052, 1099), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1714, 1869), 'json.dumps', 'json.dumps', (["[{'row_start': 12, 'row_end': 12 + request.num_rows - 1, 'col_start': 1,\n 'col_end': 1 + request.num_cols - 1, 'contents': 'Entity ID'}]"], {}), "([{'row_start': 12, 'row_end': 12 + request.num_rows - 1,\n 'col_start': 1, 'col_end': 1 + request.num_cols - 1, 'contents':\n 'Entity ID'}])\n", (1724, 1869), False, 'import json\n'), ((553, 607), 'langchain_core.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['FULL_PROMPT'], {}), '(FULL_PROMPT)\n', (594, 607), False, 'from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate\n'), ((2402, 2419), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2417, 2419), False, 'from langchain_core.output_parsers import StrOutputParser\n')] |
import base64
import json
from langchain_community.chat_models import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain_core.pydantic_v1 import Field
from langserve import CustomUserType
from .prompts import (
AI_REPONSE_DICT,
FULL_PROMPT,
USER_EXAMPLE_DICT,
create_prompt,
)
from .utils import parse_llm_output
llm = ChatOpenAI(temperature=0, model="gpt-4")
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(FULL_PROMPT),
("human", "{user_example}"),
("ai", "{ai_response}"),
("human", "{input}"),
],
)
# ATTENTION: Inherit from CustomUserType instead of BaseModel otherwise
# the server will decode it into a dict instead of a pydantic model.
class FileProcessingRequest(CustomUserType):
"""Request including a base64 encoded file."""
# The extra field is used to specify a widget for the playground UI.
file: str = Field(..., extra={"widget": {"type": "base64file"}})
num_plates: int = None
num_rows: int = 8
num_cols: int = 12
def _load_file(request: FileProcessingRequest):
return base64.b64decode(request.file.encode("utf-8")).decode("utf-8")
def _load_prompt(request: FileProcessingRequest):
return create_prompt(
num_plates=request.num_plates,
num_rows=request.num_rows,
num_cols=request.num_cols,
)
def _get_col_range_str(request: FileProcessingRequest):
if request.num_cols:
return f"from 1 to {request.num_cols}"
else:
return ""
def _get_json_format(request: FileProcessingRequest):
return json.dumps(
[
{
"row_start": 12,
"row_end": 12 + request.num_rows - 1,
"col_start": 1,
"col_end": 1 + request.num_cols - 1,
"contents": "Entity ID",
}
]
)
chain = (
{
# Should add validation to ensure numeric indices
"input": _load_file,
"hint": _load_prompt,
"col_range_str": _get_col_range_str,
"json_format": _get_json_format,
"user_example": lambda x: USER_EXAMPLE_DICT[x.num_rows * x.num_cols],
"ai_response": lambda x: AI_REPONSE_DICT[x.num_rows * x.num_cols],
}
| prompt
| llm
| StrOutputParser()
| parse_llm_output
).with_types(input_type=FileProcessingRequest)
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.SystemMessagePromptTemplate.from_template",
"langchain_core.output_parsers.StrOutputParser",
"langchain_community.chat_models.ChatOpenAI"
] | [((454, 494), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (464, 494), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1047, 1099), 'langchain_core.pydantic_v1.Field', 'Field', (['...'], {'extra': "{'widget': {'type': 'base64file'}}"}), "(..., extra={'widget': {'type': 'base64file'}})\n", (1052, 1099), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1714, 1869), 'json.dumps', 'json.dumps', (["[{'row_start': 12, 'row_end': 12 + request.num_rows - 1, 'col_start': 1,\n 'col_end': 1 + request.num_cols - 1, 'contents': 'Entity ID'}]"], {}), "([{'row_start': 12, 'row_end': 12 + request.num_rows - 1,\n 'col_start': 1, 'col_end': 1 + request.num_cols - 1, 'contents':\n 'Entity ID'}])\n", (1724, 1869), False, 'import json\n'), ((553, 607), 'langchain_core.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['FULL_PROMPT'], {}), '(FULL_PROMPT)\n', (594, 607), False, 'from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate\n'), ((2402, 2419), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2417, 2419), False, 'from langchain_core.output_parsers import StrOutputParser\n')] |
import base64
import json
from langchain_community.chat_models import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain_core.pydantic_v1 import Field
from langserve import CustomUserType
from .prompts import (
AI_REPONSE_DICT,
FULL_PROMPT,
USER_EXAMPLE_DICT,
create_prompt,
)
from .utils import parse_llm_output
llm = ChatOpenAI(temperature=0, model="gpt-4")
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(FULL_PROMPT),
("human", "{user_example}"),
("ai", "{ai_response}"),
("human", "{input}"),
],
)
# ATTENTION: Inherit from CustomUserType instead of BaseModel otherwise
# the server will decode it into a dict instead of a pydantic model.
class FileProcessingRequest(CustomUserType):
"""Request including a base64 encoded file."""
# The extra field is used to specify a widget for the playground UI.
file: str = Field(..., extra={"widget": {"type": "base64file"}})
num_plates: int = None
num_rows: int = 8
num_cols: int = 12
def _load_file(request: FileProcessingRequest):
return base64.b64decode(request.file.encode("utf-8")).decode("utf-8")
def _load_prompt(request: FileProcessingRequest):
return create_prompt(
num_plates=request.num_plates,
num_rows=request.num_rows,
num_cols=request.num_cols,
)
def _get_col_range_str(request: FileProcessingRequest):
if request.num_cols:
return f"from 1 to {request.num_cols}"
else:
return ""
def _get_json_format(request: FileProcessingRequest):
return json.dumps(
[
{
"row_start": 12,
"row_end": 12 + request.num_rows - 1,
"col_start": 1,
"col_end": 1 + request.num_cols - 1,
"contents": "Entity ID",
}
]
)
chain = (
{
# Should add validation to ensure numeric indices
"input": _load_file,
"hint": _load_prompt,
"col_range_str": _get_col_range_str,
"json_format": _get_json_format,
"user_example": lambda x: USER_EXAMPLE_DICT[x.num_rows * x.num_cols],
"ai_response": lambda x: AI_REPONSE_DICT[x.num_rows * x.num_cols],
}
| prompt
| llm
| StrOutputParser()
| parse_llm_output
).with_types(input_type=FileProcessingRequest)
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.SystemMessagePromptTemplate.from_template",
"langchain_core.output_parsers.StrOutputParser",
"langchain_community.chat_models.ChatOpenAI"
] | [((454, 494), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (464, 494), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1047, 1099), 'langchain_core.pydantic_v1.Field', 'Field', (['...'], {'extra': "{'widget': {'type': 'base64file'}}"}), "(..., extra={'widget': {'type': 'base64file'}})\n", (1052, 1099), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1714, 1869), 'json.dumps', 'json.dumps', (["[{'row_start': 12, 'row_end': 12 + request.num_rows - 1, 'col_start': 1,\n 'col_end': 1 + request.num_cols - 1, 'contents': 'Entity ID'}]"], {}), "([{'row_start': 12, 'row_end': 12 + request.num_rows - 1,\n 'col_start': 1, 'col_end': 1 + request.num_cols - 1, 'contents':\n 'Entity ID'}])\n", (1724, 1869), False, 'import json\n'), ((553, 607), 'langchain_core.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['FULL_PROMPT'], {}), '(FULL_PROMPT)\n', (594, 607), False, 'from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate\n'), ((2402, 2419), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2417, 2419), False, 'from langchain_core.output_parsers import StrOutputParser\n')] |
import base64
import json
from langchain_community.chat_models import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain_core.pydantic_v1 import Field
from langserve import CustomUserType
from .prompts import (
AI_REPONSE_DICT,
FULL_PROMPT,
USER_EXAMPLE_DICT,
create_prompt,
)
from .utils import parse_llm_output
llm = ChatOpenAI(temperature=0, model="gpt-4")
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(FULL_PROMPT),
("human", "{user_example}"),
("ai", "{ai_response}"),
("human", "{input}"),
],
)
# ATTENTION: Inherit from CustomUserType instead of BaseModel otherwise
# the server will decode it into a dict instead of a pydantic model.
class FileProcessingRequest(CustomUserType):
"""Request including a base64 encoded file."""
# The extra field is used to specify a widget for the playground UI.
file: str = Field(..., extra={"widget": {"type": "base64file"}})
num_plates: int = None
num_rows: int = 8
num_cols: int = 12
def _load_file(request: FileProcessingRequest):
return base64.b64decode(request.file.encode("utf-8")).decode("utf-8")
def _load_prompt(request: FileProcessingRequest):
return create_prompt(
num_plates=request.num_plates,
num_rows=request.num_rows,
num_cols=request.num_cols,
)
def _get_col_range_str(request: FileProcessingRequest):
if request.num_cols:
return f"from 1 to {request.num_cols}"
else:
return ""
def _get_json_format(request: FileProcessingRequest):
return json.dumps(
[
{
"row_start": 12,
"row_end": 12 + request.num_rows - 1,
"col_start": 1,
"col_end": 1 + request.num_cols - 1,
"contents": "Entity ID",
}
]
)
chain = (
{
# Should add validation to ensure numeric indices
"input": _load_file,
"hint": _load_prompt,
"col_range_str": _get_col_range_str,
"json_format": _get_json_format,
"user_example": lambda x: USER_EXAMPLE_DICT[x.num_rows * x.num_cols],
"ai_response": lambda x: AI_REPONSE_DICT[x.num_rows * x.num_cols],
}
| prompt
| llm
| StrOutputParser()
| parse_llm_output
).with_types(input_type=FileProcessingRequest)
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.SystemMessagePromptTemplate.from_template",
"langchain_core.output_parsers.StrOutputParser",
"langchain_community.chat_models.ChatOpenAI"
] | [((454, 494), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (464, 494), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1047, 1099), 'langchain_core.pydantic_v1.Field', 'Field', (['...'], {'extra': "{'widget': {'type': 'base64file'}}"}), "(..., extra={'widget': {'type': 'base64file'}})\n", (1052, 1099), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1714, 1869), 'json.dumps', 'json.dumps', (["[{'row_start': 12, 'row_end': 12 + request.num_rows - 1, 'col_start': 1,\n 'col_end': 1 + request.num_cols - 1, 'contents': 'Entity ID'}]"], {}), "([{'row_start': 12, 'row_end': 12 + request.num_rows - 1,\n 'col_start': 1, 'col_end': 1 + request.num_cols - 1, 'contents':\n 'Entity ID'}])\n", (1724, 1869), False, 'import json\n'), ((553, 607), 'langchain_core.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['FULL_PROMPT'], {}), '(FULL_PROMPT)\n', (594, 607), False, 'from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate\n'), ((2402, 2419), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2417, 2419), False, 'from langchain_core.output_parsers import StrOutputParser\n')] |
import json
from typing import Any, Callable, List
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run
from langchain_core.utils.input import get_bolded_text, get_colored_text
def try_json_stringify(obj: Any, fallback: str) -> str:
"""
Try to stringify an object to JSON.
Args:
obj: Object to stringify.
fallback: Fallback string to return if the object cannot be stringified.
Returns:
A JSON string if the object can be stringified, otherwise the fallback string.
"""
try:
return json.dumps(obj, indent=2, ensure_ascii=False)
except Exception:
return fallback
def elapsed(run: Any) -> str:
"""Get the elapsed time of a run.
Args:
run: any object with a start_time and end_time attribute.
Returns:
A string with the elapsed time in seconds or
milliseconds if time is less than a second.
"""
elapsed_time = run.end_time - run.start_time
milliseconds = elapsed_time.total_seconds() * 1000
if milliseconds < 1000:
return f"{milliseconds:.0f}ms"
return f"{(milliseconds / 1000):.2f}s"
class FunctionCallbackHandler(BaseTracer):
"""Tracer that calls a function with a single str parameter."""
name: str = "function_callback_handler"
def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None:
super().__init__(**kwargs)
self.function_callback = function
def _persist_run(self, run: Run) -> None:
pass
def get_parents(self, run: Run) -> List[Run]:
parents = []
current_run = run
while current_run.parent_run_id:
parent = self.run_map.get(str(current_run.parent_run_id))
if parent:
parents.append(parent)
current_run = parent
else:
break
return parents
def get_breadcrumbs(self, run: Run) -> str:
parents = self.get_parents(run)[::-1]
string = " > ".join(
f"{parent.execution_order}:{parent.run_type}:{parent.name}"
if i != len(parents) - 1
else f"{parent.execution_order}:{parent.run_type}:{parent.name}"
for i, parent in enumerate(parents + [run])
)
return string
# logging methods
def _on_chain_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering {run_type} run with input:\n")
+ f"{try_json_stringify(run.inputs, '[inputs]')}"
)
def _on_chain_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[outputs]')}"
)
def _on_chain_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] {run_type} run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_llm_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
inputs = (
{"prompts": [p.strip() for p in run.inputs["prompts"]]}
if "prompts" in run.inputs
else run.inputs
)
self.function_callback(
f"{get_colored_text('[llm/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering LLM run with input:\n")
+ f"{try_json_stringify(inputs, '[inputs]')}"
)
def _on_llm_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting LLM run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[response]')}"
)
def _on_llm_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] LLM run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_tool_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f'{get_colored_text("[tool/start]", color="green")} '
+ get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n")
+ f'"{run.inputs["input"].strip()}"'
)
def _on_tool_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
if run.outputs:
self.function_callback(
f'{get_colored_text("[tool/end]", color="blue")} '
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n"
)
+ f'"{run.outputs["output"].strip()}"'
)
def _on_tool_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[tool/error]', color='red')} "
+ get_bolded_text(f"[{crumbs}] [{elapsed(run)}] ")
+ f"Tool run errored with error:\n"
f"{run.error}"
)
class ConsoleCallbackHandler(FunctionCallbackHandler):
"""Tracer that prints to the console."""
name: str = "console_callback_handler"
def __init__(self, **kwargs: Any) -> None:
super().__init__(function=print, **kwargs)
| [
"langchain_core.utils.input.get_colored_text",
"langchain_core.utils.input.get_bolded_text"
] | [((588, 633), 'json.dumps', 'json.dumps', (['obj'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(obj, indent=2, ensure_ascii=False)\n', (598, 633), False, 'import json\n'), ((2591, 2659), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering {run_type} run with input:\n"""'], {}), "(f'[{crumbs}] Entering {run_type} run with input:\\n')\n", (2606, 2659), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3968, 4029), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering LLM run with input:\n"""'], {}), "(f'[{crumbs}] Entering LLM run with input:\\n')\n", (3983, 4029), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5061, 5123), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering Tool run with input:\n"""'], {}), "(f'[{crumbs}] Entering Tool run with input:\\n')\n", (5076, 5123), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2525, 2573), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/start]"""'], {'color': '"""green"""'}), "('[chain/start]', color='green')\n", (2541, 2573), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2915, 2960), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/end]"""'], {'color': '"""blue"""'}), "('[chain/end]', color='blue')\n", (2931, 2960), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3353, 3399), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/error]"""'], {'color': '"""red"""'}), "('[chain/error]', color='red')\n", (3369, 3399), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3904, 3950), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/start]"""'], {'color': '"""green"""'}), "('[llm/start]', color='green')\n", (3920, 3950), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4234, 4277), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/end]"""'], {'color': '"""blue"""'}), "('[llm/end]', color='blue')\n", (4250, 4277), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4617, 4661), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/error]"""'], {'color': '"""red"""'}), "('[llm/error]', color='red')\n", (4633, 4661), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4996, 5043), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/start]"""'], {'color': '"""green"""'}), "('[tool/start]', color='green')\n", (5012, 5043), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5744, 5789), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/error]"""'], {'color': '"""red"""'}), "('[tool/error]', color='red')\n", (5760, 5789), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5352, 5396), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/end]"""'], {'color': '"""blue"""'}), "('[tool/end]', color='blue')\n", (5368, 5396), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n')] |
import json
from typing import Any, Callable, List
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run
from langchain_core.utils.input import get_bolded_text, get_colored_text
def try_json_stringify(obj: Any, fallback: str) -> str:
"""
Try to stringify an object to JSON.
Args:
obj: Object to stringify.
fallback: Fallback string to return if the object cannot be stringified.
Returns:
A JSON string if the object can be stringified, otherwise the fallback string.
"""
try:
return json.dumps(obj, indent=2, ensure_ascii=False)
except Exception:
return fallback
def elapsed(run: Any) -> str:
"""Get the elapsed time of a run.
Args:
run: any object with a start_time and end_time attribute.
Returns:
A string with the elapsed time in seconds or
milliseconds if time is less than a second.
"""
elapsed_time = run.end_time - run.start_time
milliseconds = elapsed_time.total_seconds() * 1000
if milliseconds < 1000:
return f"{milliseconds:.0f}ms"
return f"{(milliseconds / 1000):.2f}s"
class FunctionCallbackHandler(BaseTracer):
"""Tracer that calls a function with a single str parameter."""
name: str = "function_callback_handler"
def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None:
super().__init__(**kwargs)
self.function_callback = function
def _persist_run(self, run: Run) -> None:
pass
def get_parents(self, run: Run) -> List[Run]:
parents = []
current_run = run
while current_run.parent_run_id:
parent = self.run_map.get(str(current_run.parent_run_id))
if parent:
parents.append(parent)
current_run = parent
else:
break
return parents
def get_breadcrumbs(self, run: Run) -> str:
parents = self.get_parents(run)[::-1]
string = " > ".join(
f"{parent.execution_order}:{parent.run_type}:{parent.name}"
if i != len(parents) - 1
else f"{parent.execution_order}:{parent.run_type}:{parent.name}"
for i, parent in enumerate(parents + [run])
)
return string
# logging methods
def _on_chain_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering {run_type} run with input:\n")
+ f"{try_json_stringify(run.inputs, '[inputs]')}"
)
def _on_chain_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[outputs]')}"
)
def _on_chain_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] {run_type} run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_llm_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
inputs = (
{"prompts": [p.strip() for p in run.inputs["prompts"]]}
if "prompts" in run.inputs
else run.inputs
)
self.function_callback(
f"{get_colored_text('[llm/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering LLM run with input:\n")
+ f"{try_json_stringify(inputs, '[inputs]')}"
)
def _on_llm_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting LLM run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[response]')}"
)
def _on_llm_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] LLM run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_tool_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f'{get_colored_text("[tool/start]", color="green")} '
+ get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n")
+ f'"{run.inputs["input"].strip()}"'
)
def _on_tool_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
if run.outputs:
self.function_callback(
f'{get_colored_text("[tool/end]", color="blue")} '
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n"
)
+ f'"{run.outputs["output"].strip()}"'
)
def _on_tool_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[tool/error]', color='red')} "
+ get_bolded_text(f"[{crumbs}] [{elapsed(run)}] ")
+ f"Tool run errored with error:\n"
f"{run.error}"
)
class ConsoleCallbackHandler(FunctionCallbackHandler):
"""Tracer that prints to the console."""
name: str = "console_callback_handler"
def __init__(self, **kwargs: Any) -> None:
super().__init__(function=print, **kwargs)
| [
"langchain_core.utils.input.get_colored_text",
"langchain_core.utils.input.get_bolded_text"
] | [((588, 633), 'json.dumps', 'json.dumps', (['obj'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(obj, indent=2, ensure_ascii=False)\n', (598, 633), False, 'import json\n'), ((2591, 2659), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering {run_type} run with input:\n"""'], {}), "(f'[{crumbs}] Entering {run_type} run with input:\\n')\n", (2606, 2659), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3968, 4029), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering LLM run with input:\n"""'], {}), "(f'[{crumbs}] Entering LLM run with input:\\n')\n", (3983, 4029), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5061, 5123), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering Tool run with input:\n"""'], {}), "(f'[{crumbs}] Entering Tool run with input:\\n')\n", (5076, 5123), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2525, 2573), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/start]"""'], {'color': '"""green"""'}), "('[chain/start]', color='green')\n", (2541, 2573), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2915, 2960), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/end]"""'], {'color': '"""blue"""'}), "('[chain/end]', color='blue')\n", (2931, 2960), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3353, 3399), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/error]"""'], {'color': '"""red"""'}), "('[chain/error]', color='red')\n", (3369, 3399), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3904, 3950), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/start]"""'], {'color': '"""green"""'}), "('[llm/start]', color='green')\n", (3920, 3950), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4234, 4277), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/end]"""'], {'color': '"""blue"""'}), "('[llm/end]', color='blue')\n", (4250, 4277), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4617, 4661), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/error]"""'], {'color': '"""red"""'}), "('[llm/error]', color='red')\n", (4633, 4661), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4996, 5043), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/start]"""'], {'color': '"""green"""'}), "('[tool/start]', color='green')\n", (5012, 5043), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5744, 5789), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/error]"""'], {'color': '"""red"""'}), "('[tool/error]', color='red')\n", (5760, 5789), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5352, 5396), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/end]"""'], {'color': '"""blue"""'}), "('[tool/end]', color='blue')\n", (5368, 5396), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n')] |
import json
from typing import Any, Callable, List
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run
from langchain_core.utils.input import get_bolded_text, get_colored_text
def try_json_stringify(obj: Any, fallback: str) -> str:
"""
Try to stringify an object to JSON.
Args:
obj: Object to stringify.
fallback: Fallback string to return if the object cannot be stringified.
Returns:
A JSON string if the object can be stringified, otherwise the fallback string.
"""
try:
return json.dumps(obj, indent=2, ensure_ascii=False)
except Exception:
return fallback
def elapsed(run: Any) -> str:
"""Get the elapsed time of a run.
Args:
run: any object with a start_time and end_time attribute.
Returns:
A string with the elapsed time in seconds or
milliseconds if time is less than a second.
"""
elapsed_time = run.end_time - run.start_time
milliseconds = elapsed_time.total_seconds() * 1000
if milliseconds < 1000:
return f"{milliseconds:.0f}ms"
return f"{(milliseconds / 1000):.2f}s"
class FunctionCallbackHandler(BaseTracer):
"""Tracer that calls a function with a single str parameter."""
name: str = "function_callback_handler"
def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None:
super().__init__(**kwargs)
self.function_callback = function
def _persist_run(self, run: Run) -> None:
pass
def get_parents(self, run: Run) -> List[Run]:
parents = []
current_run = run
while current_run.parent_run_id:
parent = self.run_map.get(str(current_run.parent_run_id))
if parent:
parents.append(parent)
current_run = parent
else:
break
return parents
def get_breadcrumbs(self, run: Run) -> str:
parents = self.get_parents(run)[::-1]
string = " > ".join(
f"{parent.execution_order}:{parent.run_type}:{parent.name}"
if i != len(parents) - 1
else f"{parent.execution_order}:{parent.run_type}:{parent.name}"
for i, parent in enumerate(parents + [run])
)
return string
# logging methods
def _on_chain_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering {run_type} run with input:\n")
+ f"{try_json_stringify(run.inputs, '[inputs]')}"
)
def _on_chain_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[outputs]')}"
)
def _on_chain_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] {run_type} run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_llm_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
inputs = (
{"prompts": [p.strip() for p in run.inputs["prompts"]]}
if "prompts" in run.inputs
else run.inputs
)
self.function_callback(
f"{get_colored_text('[llm/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering LLM run with input:\n")
+ f"{try_json_stringify(inputs, '[inputs]')}"
)
def _on_llm_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting LLM run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[response]')}"
)
def _on_llm_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] LLM run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_tool_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f'{get_colored_text("[tool/start]", color="green")} '
+ get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n")
+ f'"{run.inputs["input"].strip()}"'
)
def _on_tool_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
if run.outputs:
self.function_callback(
f'{get_colored_text("[tool/end]", color="blue")} '
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n"
)
+ f'"{run.outputs["output"].strip()}"'
)
def _on_tool_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[tool/error]', color='red')} "
+ get_bolded_text(f"[{crumbs}] [{elapsed(run)}] ")
+ f"Tool run errored with error:\n"
f"{run.error}"
)
class ConsoleCallbackHandler(FunctionCallbackHandler):
"""Tracer that prints to the console."""
name: str = "console_callback_handler"
def __init__(self, **kwargs: Any) -> None:
super().__init__(function=print, **kwargs)
| [
"langchain_core.utils.input.get_colored_text",
"langchain_core.utils.input.get_bolded_text"
] | [((588, 633), 'json.dumps', 'json.dumps', (['obj'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(obj, indent=2, ensure_ascii=False)\n', (598, 633), False, 'import json\n'), ((2591, 2659), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering {run_type} run with input:\n"""'], {}), "(f'[{crumbs}] Entering {run_type} run with input:\\n')\n", (2606, 2659), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3968, 4029), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering LLM run with input:\n"""'], {}), "(f'[{crumbs}] Entering LLM run with input:\\n')\n", (3983, 4029), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5061, 5123), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering Tool run with input:\n"""'], {}), "(f'[{crumbs}] Entering Tool run with input:\\n')\n", (5076, 5123), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2525, 2573), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/start]"""'], {'color': '"""green"""'}), "('[chain/start]', color='green')\n", (2541, 2573), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2915, 2960), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/end]"""'], {'color': '"""blue"""'}), "('[chain/end]', color='blue')\n", (2931, 2960), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3353, 3399), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/error]"""'], {'color': '"""red"""'}), "('[chain/error]', color='red')\n", (3369, 3399), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3904, 3950), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/start]"""'], {'color': '"""green"""'}), "('[llm/start]', color='green')\n", (3920, 3950), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4234, 4277), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/end]"""'], {'color': '"""blue"""'}), "('[llm/end]', color='blue')\n", (4250, 4277), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4617, 4661), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/error]"""'], {'color': '"""red"""'}), "('[llm/error]', color='red')\n", (4633, 4661), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4996, 5043), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/start]"""'], {'color': '"""green"""'}), "('[tool/start]', color='green')\n", (5012, 5043), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5744, 5789), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/error]"""'], {'color': '"""red"""'}), "('[tool/error]', color='red')\n", (5760, 5789), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5352, 5396), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/end]"""'], {'color': '"""blue"""'}), "('[tool/end]', color='blue')\n", (5368, 5396), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n')] |
import json
from typing import Any, Callable, List
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run
from langchain_core.utils.input import get_bolded_text, get_colored_text
def try_json_stringify(obj: Any, fallback: str) -> str:
"""
Try to stringify an object to JSON.
Args:
obj: Object to stringify.
fallback: Fallback string to return if the object cannot be stringified.
Returns:
A JSON string if the object can be stringified, otherwise the fallback string.
"""
try:
return json.dumps(obj, indent=2, ensure_ascii=False)
except Exception:
return fallback
def elapsed(run: Any) -> str:
"""Get the elapsed time of a run.
Args:
run: any object with a start_time and end_time attribute.
Returns:
A string with the elapsed time in seconds or
milliseconds if time is less than a second.
"""
elapsed_time = run.end_time - run.start_time
milliseconds = elapsed_time.total_seconds() * 1000
if milliseconds < 1000:
return f"{milliseconds:.0f}ms"
return f"{(milliseconds / 1000):.2f}s"
class FunctionCallbackHandler(BaseTracer):
"""Tracer that calls a function with a single str parameter."""
name: str = "function_callback_handler"
def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None:
super().__init__(**kwargs)
self.function_callback = function
def _persist_run(self, run: Run) -> None:
pass
def get_parents(self, run: Run) -> List[Run]:
parents = []
current_run = run
while current_run.parent_run_id:
parent = self.run_map.get(str(current_run.parent_run_id))
if parent:
parents.append(parent)
current_run = parent
else:
break
return parents
def get_breadcrumbs(self, run: Run) -> str:
parents = self.get_parents(run)[::-1]
string = " > ".join(
f"{parent.execution_order}:{parent.run_type}:{parent.name}"
if i != len(parents) - 1
else f"{parent.execution_order}:{parent.run_type}:{parent.name}"
for i, parent in enumerate(parents + [run])
)
return string
# logging methods
def _on_chain_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering {run_type} run with input:\n")
+ f"{try_json_stringify(run.inputs, '[inputs]')}"
)
def _on_chain_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[outputs]')}"
)
def _on_chain_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] {run_type} run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_llm_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
inputs = (
{"prompts": [p.strip() for p in run.inputs["prompts"]]}
if "prompts" in run.inputs
else run.inputs
)
self.function_callback(
f"{get_colored_text('[llm/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering LLM run with input:\n")
+ f"{try_json_stringify(inputs, '[inputs]')}"
)
def _on_llm_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting LLM run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[response]')}"
)
def _on_llm_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] LLM run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_tool_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f'{get_colored_text("[tool/start]", color="green")} '
+ get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n")
+ f'"{run.inputs["input"].strip()}"'
)
def _on_tool_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
if run.outputs:
self.function_callback(
f'{get_colored_text("[tool/end]", color="blue")} '
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n"
)
+ f'"{run.outputs["output"].strip()}"'
)
def _on_tool_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[tool/error]', color='red')} "
+ get_bolded_text(f"[{crumbs}] [{elapsed(run)}] ")
+ f"Tool run errored with error:\n"
f"{run.error}"
)
class ConsoleCallbackHandler(FunctionCallbackHandler):
"""Tracer that prints to the console."""
name: str = "console_callback_handler"
def __init__(self, **kwargs: Any) -> None:
super().__init__(function=print, **kwargs)
| [
"langchain_core.utils.input.get_colored_text",
"langchain_core.utils.input.get_bolded_text"
] | [((588, 633), 'json.dumps', 'json.dumps', (['obj'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(obj, indent=2, ensure_ascii=False)\n', (598, 633), False, 'import json\n'), ((2591, 2659), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering {run_type} run with input:\n"""'], {}), "(f'[{crumbs}] Entering {run_type} run with input:\\n')\n", (2606, 2659), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3968, 4029), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering LLM run with input:\n"""'], {}), "(f'[{crumbs}] Entering LLM run with input:\\n')\n", (3983, 4029), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5061, 5123), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering Tool run with input:\n"""'], {}), "(f'[{crumbs}] Entering Tool run with input:\\n')\n", (5076, 5123), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2525, 2573), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/start]"""'], {'color': '"""green"""'}), "('[chain/start]', color='green')\n", (2541, 2573), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2915, 2960), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/end]"""'], {'color': '"""blue"""'}), "('[chain/end]', color='blue')\n", (2931, 2960), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3353, 3399), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/error]"""'], {'color': '"""red"""'}), "('[chain/error]', color='red')\n", (3369, 3399), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3904, 3950), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/start]"""'], {'color': '"""green"""'}), "('[llm/start]', color='green')\n", (3920, 3950), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4234, 4277), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/end]"""'], {'color': '"""blue"""'}), "('[llm/end]', color='blue')\n", (4250, 4277), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4617, 4661), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/error]"""'], {'color': '"""red"""'}), "('[llm/error]', color='red')\n", (4633, 4661), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4996, 5043), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/start]"""'], {'color': '"""green"""'}), "('[tool/start]', color='green')\n", (5012, 5043), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5744, 5789), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/error]"""'], {'color': '"""red"""'}), "('[tool/error]', color='red')\n", (5760, 5789), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5352, 5396), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/end]"""'], {'color': '"""blue"""'}), "('[tool/end]', color='blue')\n", (5368, 5396), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n')] |
from typing import Any, Dict, List, Union
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import BaseMessage, get_buffer_string
class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory inside a limited size window."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
"""Number of messages to store in buffer."""
@property
def buffer(self) -> Union[str, List[BaseMessage]]:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
| [
"langchain.schema.messages.get_buffer_string"
] | [((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')] |
from typing import Any, Dict, List, Union
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import BaseMessage, get_buffer_string
class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory inside a limited size window."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
"""Number of messages to store in buffer."""
@property
def buffer(self) -> Union[str, List[BaseMessage]]:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
| [
"langchain.schema.messages.get_buffer_string"
] | [((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')] |
from typing import Any, Dict, List, Union
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import BaseMessage, get_buffer_string
class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory inside a limited size window."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
"""Number of messages to store in buffer."""
@property
def buffer(self) -> Union[str, List[BaseMessage]]:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
| [
"langchain.schema.messages.get_buffer_string"
] | [((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')] |
from typing import Any, Dict, List, Union
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import BaseMessage, get_buffer_string
class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory inside a limited size window."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
"""Number of messages to store in buffer."""
@property
def buffer(self) -> Union[str, List[BaseMessage]]:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
| [
"langchain.schema.messages.get_buffer_string"
] | [((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')] |
from typing import Any, Dict, Optional, Type # type: ignore
import langchain
from langchain import LLMChain, PromptTemplate
from langchain.experimental.autonomous_agents import AutoGPT
from sam.core.utils import logger
class AutoGptAgent:
agent: AutoGPT
def __init__(
self, ai_name: str, ai_role: str, memory: VectorStoreRetriever, llm: BaseChatModel, tools: List[BaseTool], **kwargs
):
self.agent = AutoGPT.from_llm_and_tools(
ai_name=ai_name,
ai_role=ai_role,
llm=llm,
memory=memory,
tools=tools,
)
def start(self, goals: List[str]):
return self.agent.run(goals=goals) | [
"langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools"
] | [((434, 535), 'langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools', 'AutoGPT.from_llm_and_tools', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'llm': 'llm', 'memory': 'memory', 'tools': 'tools'}), '(ai_name=ai_name, ai_role=ai_role, llm=llm,\n memory=memory, tools=tools)\n', (460, 535), False, 'from langchain.experimental.autonomous_agents import AutoGPT\n')] |
from typing import Any, Dict, Optional, Type # type: ignore
import langchain
from langchain import LLMChain, PromptTemplate
from langchain.experimental.autonomous_agents import AutoGPT
from sam.core.utils import logger
class AutoGptAgent:
agent: AutoGPT
def __init__(
self, ai_name: str, ai_role: str, memory: VectorStoreRetriever, llm: BaseChatModel, tools: List[BaseTool], **kwargs
):
self.agent = AutoGPT.from_llm_and_tools(
ai_name=ai_name,
ai_role=ai_role,
llm=llm,
memory=memory,
tools=tools,
)
def start(self, goals: List[str]):
return self.agent.run(goals=goals) | [
"langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools"
] | [((434, 535), 'langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools', 'AutoGPT.from_llm_and_tools', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'llm': 'llm', 'memory': 'memory', 'tools': 'tools'}), '(ai_name=ai_name, ai_role=ai_role, llm=llm,\n memory=memory, tools=tools)\n', (460, 535), False, 'from langchain.experimental.autonomous_agents import AutoGPT\n')] |
from typing import Any, Dict, Optional, Type # type: ignore
import langchain
from langchain import LLMChain, PromptTemplate
from langchain.experimental.autonomous_agents import AutoGPT
from sam.core.utils import logger
class AutoGptAgent:
agent: AutoGPT
def __init__(
self, ai_name: str, ai_role: str, memory: VectorStoreRetriever, llm: BaseChatModel, tools: List[BaseTool], **kwargs
):
self.agent = AutoGPT.from_llm_and_tools(
ai_name=ai_name,
ai_role=ai_role,
llm=llm,
memory=memory,
tools=tools,
)
def start(self, goals: List[str]):
return self.agent.run(goals=goals) | [
"langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools"
] | [((434, 535), 'langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools', 'AutoGPT.from_llm_and_tools', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'llm': 'llm', 'memory': 'memory', 'tools': 'tools'}), '(ai_name=ai_name, ai_role=ai_role, llm=llm,\n memory=memory, tools=tools)\n', (460, 535), False, 'from langchain.experimental.autonomous_agents import AutoGPT\n')] |
#import os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
import warnings
warnings.filterwarnings("ignore")
from langchain.agents.agent_toolkits import create_python_agent
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentType
from langchain.tools.python.tool import PythonREPLTool
#from langchain.python import PythonREPL
from langchain.chat_models import ChatOpenAI
import langchain
llm = ChatOpenAI(temperature=0)
tools = load_tools(["llm-math", "wikipedia"], llm=llm)
customer_list = [["Harrison", "Chase"],
["Lang", "Chain"],
["Dolly", "Too"],
["Elle", "Elem"],
["Geoff", "Fusion"],
["Trance", "Former"],
["Jen", "Ayai"]]
def do_answer1():
langchain.debug = True
agent = create_python_agent(
llm,
tool=PythonREPLTool(),
verbose=True
)
answer = agent.run(f"""Sort these customers by \
last name and then first name \
and print the output: {customer_list}""")
print(answer)
langchain.debug = False
def do_answer2():
from langchain.agents import tool
from datetime import date
langchain.debug = True
@tool
def time(text: str) -> str:
"""Returns todays date, use this for any \
questions related to knowing todays date. \
The input should always be an empty string, \
and this function will always return todays \
date - any date mathmatics should occur \
outside this function."""
return str(date.today())
agent = initialize_agent(
tools + [time],
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose = True)
try:
result = agent("whats the date today?")
except: # noqa
print("exception on external access")
print(result)
langchain.debug = False
if __name__ == "__main__":
#do_answer1()
do_answer2()
| [
"langchain.agents.initialize_agent",
"langchain.tools.python.tool.PythonREPLTool",
"langchain.agents.load_tools",
"langchain.chat_models.ChatOpenAI"
] | [((128, 161), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (151, 161), False, 'import warnings\n'), ((489, 514), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (499, 514), False, 'from langchain.chat_models import ChatOpenAI\n'), ((523, 569), 'langchain.agents.load_tools', 'load_tools', (["['llm-math', 'wikipedia']"], {'llm': 'llm'}), "(['llm-math', 'wikipedia'], llm=llm)\n", (533, 569), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((72, 85), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (83, 85), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((1666, 1800), 'langchain.agents.initialize_agent', 'initialize_agent', (['(tools + [time])', 'llm'], {'agent': 'AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'handle_parsing_errors': '(True)', 'verbose': '(True)'}), '(tools + [time], llm, agent=AgentType.\n CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose=True)\n', (1682, 1800), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((936, 952), 'langchain.tools.python.tool.PythonREPLTool', 'PythonREPLTool', ([], {}), '()\n', (950, 952), False, 'from langchain.tools.python.tool import PythonREPLTool\n'), ((1638, 1650), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1648, 1650), False, 'from datetime import date\n')] |
from typing import List, Optional, Type
from langchain.memory import (
ChatMessageHistory,
ConversationBufferMemory,
ConversationSummaryMemory,
RedisChatMessageHistory,
RedisEntityStore,
VectorStoreRetrieverMemory,
)
class Memory:
@staticmethod
def messageHistory(path: str):
history = ChatMessageHistory()
return history
@staticmethod
def bufferMemory(path: str):
memory = ConversationBufferMemory()
return memory
@staticmethod
def chatSummary(path: str):
memory = ConversationSummaryMemory()
return memory
| [
"langchain.memory.ConversationSummaryMemory",
"langchain.memory.ConversationBufferMemory",
"langchain.memory.ChatMessageHistory"
] | [((329, 349), 'langchain.memory.ChatMessageHistory', 'ChatMessageHistory', ([], {}), '()\n', (347, 349), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((442, 468), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (466, 468), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((559, 586), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {}), '()\n', (584, 586), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n')] |
from typing import List, Optional, Type
from langchain.memory import (
ChatMessageHistory,
ConversationBufferMemory,
ConversationSummaryMemory,
RedisChatMessageHistory,
RedisEntityStore,
VectorStoreRetrieverMemory,
)
class Memory:
@staticmethod
def messageHistory(path: str):
history = ChatMessageHistory()
return history
@staticmethod
def bufferMemory(path: str):
memory = ConversationBufferMemory()
return memory
@staticmethod
def chatSummary(path: str):
memory = ConversationSummaryMemory()
return memory
| [
"langchain.memory.ConversationSummaryMemory",
"langchain.memory.ConversationBufferMemory",
"langchain.memory.ChatMessageHistory"
] | [((329, 349), 'langchain.memory.ChatMessageHistory', 'ChatMessageHistory', ([], {}), '()\n', (347, 349), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((442, 468), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (466, 468), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((559, 586), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {}), '()\n', (584, 586), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n')] |
"""Callback Handler that prints to std out."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.utils import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class StdOutCallbackHandler(BaseCallbackHandler):
"""Callback Handler that prints to std out."""
def __init__(self, color: Optional[str] = None) -> None:
"""Initialize callback handler."""
self.color = color
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") # noqa: T201
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
print_text(action.log, color=color or self.color)
def on_tool_end(
self,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
output = str(output)
if observation_prefix is not None:
print_text(f"\n{observation_prefix}")
print_text(output, color=color or self.color)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}")
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end="\n")
| [
"langchain_core.utils.print_text"
] | [((1261, 1310), 'langchain_core.utils.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)'}), '(action.log, color=color or self.color)\n', (1271, 1310), False, 'from langchain_core.utils import print_text\n'), ((1727, 1772), 'langchain_core.utils.print_text', 'print_text', (['output'], {'color': '(color or self.color)'}), '(output, color=color or self.color)\n', (1737, 1772), False, 'from langchain_core.utils import print_text\n'), ((2042, 2094), 'langchain_core.utils.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end'}), '(text, color=color or self.color, end=end)\n', (2052, 2094), False, 'from langchain_core.utils import print_text\n'), ((2254, 2313), 'langchain_core.utils.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""'}), "(finish.log, color=color or self.color, end='\\n')\n", (2264, 2313), False, 'from langchain_core.utils import print_text\n'), ((1681, 1721), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {}), '(f"""\n{observation_prefix}""")\n', (1691, 1721), False, 'from langchain_core.utils import print_text\n'), ((1820, 1849), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {}), "(f'\\n{llm_prefix}')\n", (1830, 1849), False, 'from langchain_core.utils import print_text\n')] |
"""Callback Handler that prints to std out."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.utils import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class StdOutCallbackHandler(BaseCallbackHandler):
"""Callback Handler that prints to std out."""
def __init__(self, color: Optional[str] = None) -> None:
"""Initialize callback handler."""
self.color = color
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") # noqa: T201
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
print_text(action.log, color=color or self.color)
def on_tool_end(
self,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
output = str(output)
if observation_prefix is not None:
print_text(f"\n{observation_prefix}")
print_text(output, color=color or self.color)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}")
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end="\n")
| [
"langchain_core.utils.print_text"
] | [((1261, 1310), 'langchain_core.utils.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)'}), '(action.log, color=color or self.color)\n', (1271, 1310), False, 'from langchain_core.utils import print_text\n'), ((1727, 1772), 'langchain_core.utils.print_text', 'print_text', (['output'], {'color': '(color or self.color)'}), '(output, color=color or self.color)\n', (1737, 1772), False, 'from langchain_core.utils import print_text\n'), ((2042, 2094), 'langchain_core.utils.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end'}), '(text, color=color or self.color, end=end)\n', (2052, 2094), False, 'from langchain_core.utils import print_text\n'), ((2254, 2313), 'langchain_core.utils.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""'}), "(finish.log, color=color or self.color, end='\\n')\n", (2264, 2313), False, 'from langchain_core.utils import print_text\n'), ((1681, 1721), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {}), '(f"""\n{observation_prefix}""")\n', (1691, 1721), False, 'from langchain_core.utils import print_text\n'), ((1820, 1849), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {}), "(f'\\n{llm_prefix}')\n", (1830, 1849), False, 'from langchain_core.utils import print_text\n')] |
"""Callback Handler that prints to std out."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.utils import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class StdOutCallbackHandler(BaseCallbackHandler):
"""Callback Handler that prints to std out."""
def __init__(self, color: Optional[str] = None) -> None:
"""Initialize callback handler."""
self.color = color
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") # noqa: T201
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
print_text(action.log, color=color or self.color)
def on_tool_end(
self,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
output = str(output)
if observation_prefix is not None:
print_text(f"\n{observation_prefix}")
print_text(output, color=color or self.color)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}")
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end="\n")
| [
"langchain_core.utils.print_text"
] | [((1261, 1310), 'langchain_core.utils.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)'}), '(action.log, color=color or self.color)\n', (1271, 1310), False, 'from langchain_core.utils import print_text\n'), ((1727, 1772), 'langchain_core.utils.print_text', 'print_text', (['output'], {'color': '(color or self.color)'}), '(output, color=color or self.color)\n', (1737, 1772), False, 'from langchain_core.utils import print_text\n'), ((2042, 2094), 'langchain_core.utils.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end'}), '(text, color=color or self.color, end=end)\n', (2052, 2094), False, 'from langchain_core.utils import print_text\n'), ((2254, 2313), 'langchain_core.utils.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""'}), "(finish.log, color=color or self.color, end='\\n')\n", (2264, 2313), False, 'from langchain_core.utils import print_text\n'), ((1681, 1721), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {}), '(f"""\n{observation_prefix}""")\n', (1691, 1721), False, 'from langchain_core.utils import print_text\n'), ((1820, 1849), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {}), "(f'\\n{llm_prefix}')\n", (1830, 1849), False, 'from langchain_core.utils import print_text\n')] |
"""Callback Handler that prints to std out."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.utils import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class StdOutCallbackHandler(BaseCallbackHandler):
"""Callback Handler that prints to std out."""
def __init__(self, color: Optional[str] = None) -> None:
"""Initialize callback handler."""
self.color = color
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") # noqa: T201
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
print_text(action.log, color=color or self.color)
def on_tool_end(
self,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
output = str(output)
if observation_prefix is not None:
print_text(f"\n{observation_prefix}")
print_text(output, color=color or self.color)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}")
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end="\n")
| [
"langchain_core.utils.print_text"
] | [((1261, 1310), 'langchain_core.utils.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)'}), '(action.log, color=color or self.color)\n', (1271, 1310), False, 'from langchain_core.utils import print_text\n'), ((1727, 1772), 'langchain_core.utils.print_text', 'print_text', (['output'], {'color': '(color or self.color)'}), '(output, color=color or self.color)\n', (1737, 1772), False, 'from langchain_core.utils import print_text\n'), ((2042, 2094), 'langchain_core.utils.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end'}), '(text, color=color or self.color, end=end)\n', (2052, 2094), False, 'from langchain_core.utils import print_text\n'), ((2254, 2313), 'langchain_core.utils.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""'}), "(finish.log, color=color or self.color, end='\\n')\n", (2264, 2313), False, 'from langchain_core.utils import print_text\n'), ((1681, 1721), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {}), '(f"""\n{observation_prefix}""")\n', (1691, 1721), False, 'from langchain_core.utils import print_text\n'), ((1820, 1849), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {}), "(f'\\n{llm_prefix}')\n", (1830, 1849), False, 'from langchain_core.utils import print_text\n')] |
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_community.document_loaders import HNLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import UnstructuredHTMLLoader
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_openai.llms import OpenAI
from constant import openai
import os
os.environ['OPENAI_API_KEY'] = openai
loader = PyPDFLoader("attention is all you need.pdf")
data = loader.load()
# print(data[0])
loader = CSVLoader(file_path="job_placement.csv")
data = loader.load()
# print(data[0])
loader = HNLoader("https://news.ycombinator.com")
data = loader.load()
# print(data[0])
quote = "one Machine can do the work of fifty ordinary humans, No machine can do the" \
"work of one extraordinary human."
ct_splitter = CharacterTextSplitter(
separator='.',
chunk_size=24,
chunk_overlap=3
)
# docs = ct_splitter.split_text(quote)
# print(docs)
rc_splitter = RecursiveCharacterTextSplitter(
chunk_size=24,
chunk_overlap=3,
)
# docs = rc_splitter.split_text(quote)
# print(docs)
loader = UnstructuredHTMLLoader("data.html")
data = loader.load()
rc_splitter = RecursiveCharacterTextSplitter(
chunk_size=24,
chunk_overlap=3,
separators='.',
)
# docs = rc_splitter.split_documents(data)
# print(docs)
quote = "There is a kingdom of lychee fruit that are alive and thriving in Iceland, but they feel " \
"taken advantage of and are not fast enough for you."
splitter = RecursiveCharacterTextSplitter(
chunk_size=40,
chunk_overlap=10,
)
docs = splitter.split_text(quote)
embeddings = OpenAIEmbeddings(openai_api_key=openai)
vectordb = Chroma(
persist_directory="data",
embedding_function=embeddings
)
vectordb.persist()
docstorage = Chroma.from_texts(docs,embeddings)
qa = RetrievalQA.from_chain_type(
llm = OpenAI(model_name="gpt-3.5-turbo-instruct"),
chain_type="stuff",
retriever = docstorage.as_retriever()
)
# query = "Where do lychee fruit live?"
# print(qa.invoke(query))
quote = "There is a kingdom of lycee fruit that are alive and thriving in Iceland, but they fee" \
"taken advantage of and are not fast enough for you."
qa1 = RetrievalQAWithSourcesChain.from_chain_type(
llm = OpenAI(model_name="gpt-3.5-turbo-instruct"),
chain_type="stuff",
retriever = docstorage.as_retriever(),
)
results = qa1({'question':'What is the primary architecture presented in the document?'},return_only_outputs=True)
print(results)
| [
"langchain_community.document_loaders.PyPDFLoader",
"langchain.text_splitter.CharacterTextSplitter",
"langchain_openai.llms.OpenAI",
"langchain_community.document_loaders.csv_loader.CSVLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_community.document_loaders.UnstructuredHTMLLoader",
"langchain_community.document_loaders.HNLoader",
"langchain_community.vectorstores.Chroma.from_texts",
"langchain_community.vectorstores.Chroma",
"langchain_openai.embeddings.OpenAIEmbeddings"
] | [((741, 785), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['"""attention is all you need.pdf"""'], {}), "('attention is all you need.pdf')\n", (752, 785), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((838, 878), 'langchain_community.document_loaders.csv_loader.CSVLoader', 'CSVLoader', ([], {'file_path': '"""job_placement.csv"""'}), "(file_path='job_placement.csv')\n", (847, 878), False, 'from langchain_community.document_loaders.csv_loader import CSVLoader\n'), ((931, 971), 'langchain_community.document_loaders.HNLoader', 'HNLoader', (['"""https://news.ycombinator.com"""'], {}), "('https://news.ycombinator.com')\n", (939, 971), False, 'from langchain_community.document_loaders import HNLoader\n'), ((1166, 1234), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""."""', 'chunk_size': '(24)', 'chunk_overlap': '(3)'}), "(separator='.', chunk_size=24, chunk_overlap=3)\n", (1187, 1234), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1327, 1389), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)'}), '(chunk_size=24, chunk_overlap=3)\n', (1357, 1389), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1473, 1508), 'langchain_community.document_loaders.UnstructuredHTMLLoader', 'UnstructuredHTMLLoader', (['"""data.html"""'], {}), "('data.html')\n", (1495, 1508), False, 'from langchain_community.document_loaders import UnstructuredHTMLLoader\n'), ((1548, 1626), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)', 'separators': '"""."""'}), "(chunk_size=24, chunk_overlap=3, separators='.')\n", (1578, 1626), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1889, 1952), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(40)', 'chunk_overlap': '(10)'}), '(chunk_size=40, chunk_overlap=10)\n', (1919, 1952), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2020, 2059), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai'}), '(openai_api_key=openai)\n', (2036, 2059), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((2074, 2137), 'langchain_community.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""data"""', 'embedding_function': 'embeddings'}), "(persist_directory='data', embedding_function=embeddings)\n", (2080, 2137), False, 'from langchain_community.vectorstores import Chroma\n'), ((2189, 2224), 'langchain_community.vectorstores.Chroma.from_texts', 'Chroma.from_texts', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (2206, 2224), False, 'from langchain_community.vectorstores import Chroma\n'), ((2272, 2315), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2278, 2315), False, 'from langchain_openai.llms import OpenAI\n'), ((2688, 2731), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2694, 2731), False, 'from langchain_openai.llms import OpenAI\n')] |
from typing import Any, Dict, List, Literal, Optional, Union
from exa_py import Exa # type: ignore
from exa_py.api import HighlightsContentsOptions, TextContentsOptions # type: ignore
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.retrievers import BaseRetriever
from langchain_exa._utilities import initialize_client
def _get_metadata(result: Any) -> Dict[str, Any]:
"""Get the metadata from a result object."""
metadata = {
"title": result.title,
"url": result.url,
"id": result.id,
"score": result.score,
"published_date": result.published_date,
"author": result.author,
}
if getattr(result, "highlights"):
metadata["highlights"] = result.highlights
if getattr(result, "highlight_scores"):
metadata["highlight_scores"] = result.highlight_scores
return metadata
class ExaSearchRetriever(BaseRetriever):
"""Exa Search retriever."""
k: int = 10 # num_results
"""The number of search results to return."""
include_domains: Optional[List[str]] = None
"""A list of domains to include in the search."""
exclude_domains: Optional[List[str]] = None
"""A list of domains to exclude from the search."""
start_crawl_date: Optional[str] = None
"""The start date for the crawl (in YYYY-MM-DD format)."""
end_crawl_date: Optional[str] = None
"""The end date for the crawl (in YYYY-MM-DD format)."""
start_published_date: Optional[str] = None
"""The start date for when the document was published (in YYYY-MM-DD format)."""
end_published_date: Optional[str] = None
"""The end date for when the document was published (in YYYY-MM-DD format)."""
use_autoprompt: Optional[bool] = None
"""Whether to use autoprompt for the search."""
type: str = "neural"
"""The type of search, 'keyword' or 'neural'. Default: neural"""
highlights: Optional[Union[HighlightsContentsOptions, bool]] = None
"""Whether to set the page content to the highlights of the results."""
text_contents_options: Union[TextContentsOptions, Literal[True]] = True
"""How to set the page content of the results"""
client: Exa = Field(default=None)
exa_api_key: SecretStr = Field(default=None)
exa_base_url: Optional[str] = None
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate the environment."""
values = initialize_client(values)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
response = self.client.search_and_contents( # type: ignore[misc]
query,
num_results=self.k,
text=self.text_contents_options,
highlights=self.highlights, # type: ignore
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
start_crawl_date=self.start_crawl_date,
end_crawl_date=self.end_crawl_date,
start_published_date=self.start_published_date,
end_published_date=self.end_published_date,
use_autoprompt=self.use_autoprompt,
)
results = response.results
return [
Document(
page_content=(result.text),
metadata=_get_metadata(result),
)
for result in results
]
| [
"langchain_exa._utilities.initialize_client",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((2332, 2351), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2337, 2351), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2381, 2400), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2386, 2400), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2446, 2470), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2460, 2470), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2585, 2610), 'langchain_exa._utilities.initialize_client', 'initialize_client', (['values'], {}), '(values)\n', (2602, 2610), False, 'from langchain_exa._utilities import initialize_client\n')] |
from __future__ import annotations
from typing import Any, TypeVar
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
T = TypeVar("T")
class OutputFixingParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
parser: BaseOutputParser[T]
"""The parser to use to parse the output."""
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
retry_chain: Any
"""The LLMChain to use to retry the completion."""
max_retries: int = 1
"""The maximum number of times to retry the parse."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_FIX_PROMPT,
max_retries: int = 1,
) -> OutputFixingParser[T]:
"""Create an OutputFixingParser from a language model and a parser.
Args:
llm: llm to use for fixing
parser: parser to use for parsing
prompt: prompt to use for fixing
max_retries: Maximum number of retries to parse.
Returns:
OutputFixingParser
"""
from langchain.chains.llm import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
def parse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
async def aparse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return await self.parser.aparse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = await self.retry_chain.arun(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "output_fixing"
| [
"langchain_core.exceptions.OutputParserException",
"langchain.chains.llm.LLMChain"
] | [((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')] |
from __future__ import annotations
from typing import Any, TypeVar
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
T = TypeVar("T")
class OutputFixingParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
parser: BaseOutputParser[T]
"""The parser to use to parse the output."""
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
retry_chain: Any
"""The LLMChain to use to retry the completion."""
max_retries: int = 1
"""The maximum number of times to retry the parse."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_FIX_PROMPT,
max_retries: int = 1,
) -> OutputFixingParser[T]:
"""Create an OutputFixingParser from a language model and a parser.
Args:
llm: llm to use for fixing
parser: parser to use for parsing
prompt: prompt to use for fixing
max_retries: Maximum number of retries to parse.
Returns:
OutputFixingParser
"""
from langchain.chains.llm import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
def parse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
async def aparse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return await self.parser.aparse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = await self.retry_chain.arun(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "output_fixing"
| [
"langchain_core.exceptions.OutputParserException",
"langchain.chains.llm.LLMChain"
] | [((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')] |
from __future__ import annotations
from typing import Any, TypeVar
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
T = TypeVar("T")
class OutputFixingParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
parser: BaseOutputParser[T]
"""The parser to use to parse the output."""
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
retry_chain: Any
"""The LLMChain to use to retry the completion."""
max_retries: int = 1
"""The maximum number of times to retry the parse."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_FIX_PROMPT,
max_retries: int = 1,
) -> OutputFixingParser[T]:
"""Create an OutputFixingParser from a language model and a parser.
Args:
llm: llm to use for fixing
parser: parser to use for parsing
prompt: prompt to use for fixing
max_retries: Maximum number of retries to parse.
Returns:
OutputFixingParser
"""
from langchain.chains.llm import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
def parse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
async def aparse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return await self.parser.aparse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = await self.retry_chain.arun(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "output_fixing"
| [
"langchain_core.exceptions.OutputParserException",
"langchain.chains.llm.LLMChain"
] | [((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')] |
from __future__ import annotations
from typing import Any, TypeVar
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
T = TypeVar("T")
class OutputFixingParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
parser: BaseOutputParser[T]
"""The parser to use to parse the output."""
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
retry_chain: Any
"""The LLMChain to use to retry the completion."""
max_retries: int = 1
"""The maximum number of times to retry the parse."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_FIX_PROMPT,
max_retries: int = 1,
) -> OutputFixingParser[T]:
"""Create an OutputFixingParser from a language model and a parser.
Args:
llm: llm to use for fixing
parser: parser to use for parsing
prompt: prompt to use for fixing
max_retries: Maximum number of retries to parse.
Returns:
OutputFixingParser
"""
from langchain.chains.llm import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
def parse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
async def aparse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return await self.parser.aparse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = await self.retry_chain.arun(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "output_fixing"
| [
"langchain_core.exceptions.OutputParserException",
"langchain.chains.llm.LLMChain"
] | [((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')] |
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from meilisearch import Client
def _create_client(
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Client:
try:
import meilisearch
except ImportError:
raise ImportError(
"Could not import meilisearch python package. "
"Please install it with `pip install meilisearch`."
)
if not client:
url = url or get_from_env("url", "MEILI_HTTP_ADDR")
try:
api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY")
except Exception:
pass
client = meilisearch.Client(url=url, api_key=api_key)
elif not isinstance(client, meilisearch.Client):
raise ValueError(
f"client should be an instance of meilisearch.Client, "
f"got {type(client)}"
)
try:
client.version()
except ValueError as e:
raise ValueError(f"Failed to connect to Meilisearch: {e}")
return client
class Meilisearch(VectorStore):
"""`Meilisearch` vector store.
To use this, you need to have `meilisearch` python package installed,
and a running Meilisearch instance.
To learn more about Meilisearch Python, refer to the in-depth
Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/.
See the following documentation for how to run a Meilisearch instance:
https://www.meilisearch.com/docs/learn/getting_started/quick_start.
Example:
.. code-block:: python
from langchain.vectorstores import Meilisearch
from langchain.embeddings.openai import OpenAIEmbeddings
import meilisearch
# api_key is optional; provide it if your meilisearch instance requires it
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
vectorstore = Meilisearch(
embedding=embeddings,
client=client,
index_name='langchain_demo',
text_key='text')
"""
def __init__(
self,
embedding: Embeddings,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
text_key: str = "text",
metadata_key: str = "metadata",
):
"""Initialize with Meilisearch client."""
client = _create_client(client=client, url=url, api_key=api_key)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._metadata_key = metadata_key
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embedding and add them to the vector store.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]]): Optional list of metadata.
Defaults to None.
ids Optional[List[str]]: Optional list of IDs.
Defaults to None.
Returns:
List[str]: List of IDs of the texts added to the vectorstore.
"""
texts = list(texts)
# Embed and create the documents
docs = []
if ids is None:
ids = [uuid.uuid4().hex for _ in texts]
if metadatas is None:
metadatas = [{} for _ in texts]
embedding_vectors = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
id = ids[i]
metadata = metadatas[i]
metadata[self._text_key] = text
embedding = embedding_vectors[i]
docs.append(
{
"id": id,
"_vectors": embedding,
f"{self._metadata_key}": metadata,
}
)
# Send to Meilisearch
self._client.index(str(self._index_name)).add_documents(docs)
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to the query.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
docs_and_scores = self.similarity_search_with_score(
query=query,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to the query, along with scores.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
_query = self._embedding.embed_query(query)
docs = self.similarity_search_by_vector_with_scores(
embedding=_query,
k=k,
filter=filter,
kwargs=kwargs,
)
return docs
def similarity_search_by_vector_with_scores(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = []
results = self._client.index(str(self._index_name)).search(
"", {"vector": embedding, "limit": k, "filter": filter}
)
for result in results["hits"]:
metadata = result[self._metadata_key]
if self._text_key in metadata:
text = metadata.pop(self._text_key)
semantic_score = result["_semanticScore"]
docs.append(
(Document(page_content=text, metadata=metadata), semantic_score)
)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = self.similarity_search_by_vector_with_scores(
embedding=embedding,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs]
@classmethod
def from_texts(
cls: Type[Meilisearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
ids: Optional[List[str]] = None,
text_key: Optional[str] = "text",
metadata_key: Optional[str] = "metadata",
**kwargs: Any,
) -> Meilisearch:
"""Construct Meilisearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Meilisearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Meilisearch
from langchain.embeddings import OpenAIEmbeddings
import meilisearch
# The environment should be the one specified next to the API key
# in your Meilisearch console
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
docsearch = Meilisearch.from_texts(
client=client,
embeddings=embeddings,
)
"""
client = _create_client(client=client, url=url, api_key=api_key)
vectorstore = cls(
embedding=embedding,
client=client,
index_name=index_name,
)
vectorstore.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
text_key=text_key,
metadata_key=metadata_key,
)
return vectorstore
| [
"langchain.utils.get_from_env",
"langchain.docstore.document.Document"
] | [((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')] |
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from meilisearch import Client
def _create_client(
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Client:
try:
import meilisearch
except ImportError:
raise ImportError(
"Could not import meilisearch python package. "
"Please install it with `pip install meilisearch`."
)
if not client:
url = url or get_from_env("url", "MEILI_HTTP_ADDR")
try:
api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY")
except Exception:
pass
client = meilisearch.Client(url=url, api_key=api_key)
elif not isinstance(client, meilisearch.Client):
raise ValueError(
f"client should be an instance of meilisearch.Client, "
f"got {type(client)}"
)
try:
client.version()
except ValueError as e:
raise ValueError(f"Failed to connect to Meilisearch: {e}")
return client
class Meilisearch(VectorStore):
"""`Meilisearch` vector store.
To use this, you need to have `meilisearch` python package installed,
and a running Meilisearch instance.
To learn more about Meilisearch Python, refer to the in-depth
Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/.
See the following documentation for how to run a Meilisearch instance:
https://www.meilisearch.com/docs/learn/getting_started/quick_start.
Example:
.. code-block:: python
from langchain.vectorstores import Meilisearch
from langchain.embeddings.openai import OpenAIEmbeddings
import meilisearch
# api_key is optional; provide it if your meilisearch instance requires it
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
vectorstore = Meilisearch(
embedding=embeddings,
client=client,
index_name='langchain_demo',
text_key='text')
"""
def __init__(
self,
embedding: Embeddings,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
text_key: str = "text",
metadata_key: str = "metadata",
):
"""Initialize with Meilisearch client."""
client = _create_client(client=client, url=url, api_key=api_key)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._metadata_key = metadata_key
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embedding and add them to the vector store.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]]): Optional list of metadata.
Defaults to None.
ids Optional[List[str]]: Optional list of IDs.
Defaults to None.
Returns:
List[str]: List of IDs of the texts added to the vectorstore.
"""
texts = list(texts)
# Embed and create the documents
docs = []
if ids is None:
ids = [uuid.uuid4().hex for _ in texts]
if metadatas is None:
metadatas = [{} for _ in texts]
embedding_vectors = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
id = ids[i]
metadata = metadatas[i]
metadata[self._text_key] = text
embedding = embedding_vectors[i]
docs.append(
{
"id": id,
"_vectors": embedding,
f"{self._metadata_key}": metadata,
}
)
# Send to Meilisearch
self._client.index(str(self._index_name)).add_documents(docs)
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to the query.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
docs_and_scores = self.similarity_search_with_score(
query=query,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to the query, along with scores.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
_query = self._embedding.embed_query(query)
docs = self.similarity_search_by_vector_with_scores(
embedding=_query,
k=k,
filter=filter,
kwargs=kwargs,
)
return docs
def similarity_search_by_vector_with_scores(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = []
results = self._client.index(str(self._index_name)).search(
"", {"vector": embedding, "limit": k, "filter": filter}
)
for result in results["hits"]:
metadata = result[self._metadata_key]
if self._text_key in metadata:
text = metadata.pop(self._text_key)
semantic_score = result["_semanticScore"]
docs.append(
(Document(page_content=text, metadata=metadata), semantic_score)
)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = self.similarity_search_by_vector_with_scores(
embedding=embedding,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs]
@classmethod
def from_texts(
cls: Type[Meilisearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
ids: Optional[List[str]] = None,
text_key: Optional[str] = "text",
metadata_key: Optional[str] = "metadata",
**kwargs: Any,
) -> Meilisearch:
"""Construct Meilisearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Meilisearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Meilisearch
from langchain.embeddings import OpenAIEmbeddings
import meilisearch
# The environment should be the one specified next to the API key
# in your Meilisearch console
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
docsearch = Meilisearch.from_texts(
client=client,
embeddings=embeddings,
)
"""
client = _create_client(client=client, url=url, api_key=api_key)
vectorstore = cls(
embedding=embedding,
client=client,
index_name=index_name,
)
vectorstore.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
text_key=text_key,
metadata_key=metadata_key,
)
return vectorstore
| [
"langchain.utils.get_from_env",
"langchain.docstore.document.Document"
] | [((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')] |
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from meilisearch import Client
def _create_client(
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Client:
try:
import meilisearch
except ImportError:
raise ImportError(
"Could not import meilisearch python package. "
"Please install it with `pip install meilisearch`."
)
if not client:
url = url or get_from_env("url", "MEILI_HTTP_ADDR")
try:
api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY")
except Exception:
pass
client = meilisearch.Client(url=url, api_key=api_key)
elif not isinstance(client, meilisearch.Client):
raise ValueError(
f"client should be an instance of meilisearch.Client, "
f"got {type(client)}"
)
try:
client.version()
except ValueError as e:
raise ValueError(f"Failed to connect to Meilisearch: {e}")
return client
class Meilisearch(VectorStore):
"""`Meilisearch` vector store.
To use this, you need to have `meilisearch` python package installed,
and a running Meilisearch instance.
To learn more about Meilisearch Python, refer to the in-depth
Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/.
See the following documentation for how to run a Meilisearch instance:
https://www.meilisearch.com/docs/learn/getting_started/quick_start.
Example:
.. code-block:: python
from langchain.vectorstores import Meilisearch
from langchain.embeddings.openai import OpenAIEmbeddings
import meilisearch
# api_key is optional; provide it if your meilisearch instance requires it
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
vectorstore = Meilisearch(
embedding=embeddings,
client=client,
index_name='langchain_demo',
text_key='text')
"""
def __init__(
self,
embedding: Embeddings,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
text_key: str = "text",
metadata_key: str = "metadata",
):
"""Initialize with Meilisearch client."""
client = _create_client(client=client, url=url, api_key=api_key)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._metadata_key = metadata_key
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embedding and add them to the vector store.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]]): Optional list of metadata.
Defaults to None.
ids Optional[List[str]]: Optional list of IDs.
Defaults to None.
Returns:
List[str]: List of IDs of the texts added to the vectorstore.
"""
texts = list(texts)
# Embed and create the documents
docs = []
if ids is None:
ids = [uuid.uuid4().hex for _ in texts]
if metadatas is None:
metadatas = [{} for _ in texts]
embedding_vectors = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
id = ids[i]
metadata = metadatas[i]
metadata[self._text_key] = text
embedding = embedding_vectors[i]
docs.append(
{
"id": id,
"_vectors": embedding,
f"{self._metadata_key}": metadata,
}
)
# Send to Meilisearch
self._client.index(str(self._index_name)).add_documents(docs)
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to the query.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
docs_and_scores = self.similarity_search_with_score(
query=query,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to the query, along with scores.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
_query = self._embedding.embed_query(query)
docs = self.similarity_search_by_vector_with_scores(
embedding=_query,
k=k,
filter=filter,
kwargs=kwargs,
)
return docs
def similarity_search_by_vector_with_scores(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = []
results = self._client.index(str(self._index_name)).search(
"", {"vector": embedding, "limit": k, "filter": filter}
)
for result in results["hits"]:
metadata = result[self._metadata_key]
if self._text_key in metadata:
text = metadata.pop(self._text_key)
semantic_score = result["_semanticScore"]
docs.append(
(Document(page_content=text, metadata=metadata), semantic_score)
)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = self.similarity_search_by_vector_with_scores(
embedding=embedding,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs]
@classmethod
def from_texts(
cls: Type[Meilisearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
ids: Optional[List[str]] = None,
text_key: Optional[str] = "text",
metadata_key: Optional[str] = "metadata",
**kwargs: Any,
) -> Meilisearch:
"""Construct Meilisearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Meilisearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Meilisearch
from langchain.embeddings import OpenAIEmbeddings
import meilisearch
# The environment should be the one specified next to the API key
# in your Meilisearch console
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
docsearch = Meilisearch.from_texts(
client=client,
embeddings=embeddings,
)
"""
client = _create_client(client=client, url=url, api_key=api_key)
vectorstore = cls(
embedding=embedding,
client=client,
index_name=index_name,
)
vectorstore.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
text_key=text_key,
metadata_key=metadata_key,
)
return vectorstore
| [
"langchain.utils.get_from_env",
"langchain.docstore.document.Document"
] | [((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')] |
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from meilisearch import Client
def _create_client(
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Client:
try:
import meilisearch
except ImportError:
raise ImportError(
"Could not import meilisearch python package. "
"Please install it with `pip install meilisearch`."
)
if not client:
url = url or get_from_env("url", "MEILI_HTTP_ADDR")
try:
api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY")
except Exception:
pass
client = meilisearch.Client(url=url, api_key=api_key)
elif not isinstance(client, meilisearch.Client):
raise ValueError(
f"client should be an instance of meilisearch.Client, "
f"got {type(client)}"
)
try:
client.version()
except ValueError as e:
raise ValueError(f"Failed to connect to Meilisearch: {e}")
return client
class Meilisearch(VectorStore):
"""`Meilisearch` vector store.
To use this, you need to have `meilisearch` python package installed,
and a running Meilisearch instance.
To learn more about Meilisearch Python, refer to the in-depth
Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/.
See the following documentation for how to run a Meilisearch instance:
https://www.meilisearch.com/docs/learn/getting_started/quick_start.
Example:
.. code-block:: python
from langchain.vectorstores import Meilisearch
from langchain.embeddings.openai import OpenAIEmbeddings
import meilisearch
# api_key is optional; provide it if your meilisearch instance requires it
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
vectorstore = Meilisearch(
embedding=embeddings,
client=client,
index_name='langchain_demo',
text_key='text')
"""
def __init__(
self,
embedding: Embeddings,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
text_key: str = "text",
metadata_key: str = "metadata",
):
"""Initialize with Meilisearch client."""
client = _create_client(client=client, url=url, api_key=api_key)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._metadata_key = metadata_key
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embedding and add them to the vector store.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]]): Optional list of metadata.
Defaults to None.
ids Optional[List[str]]: Optional list of IDs.
Defaults to None.
Returns:
List[str]: List of IDs of the texts added to the vectorstore.
"""
texts = list(texts)
# Embed and create the documents
docs = []
if ids is None:
ids = [uuid.uuid4().hex for _ in texts]
if metadatas is None:
metadatas = [{} for _ in texts]
embedding_vectors = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
id = ids[i]
metadata = metadatas[i]
metadata[self._text_key] = text
embedding = embedding_vectors[i]
docs.append(
{
"id": id,
"_vectors": embedding,
f"{self._metadata_key}": metadata,
}
)
# Send to Meilisearch
self._client.index(str(self._index_name)).add_documents(docs)
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to the query.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
docs_and_scores = self.similarity_search_with_score(
query=query,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to the query, along with scores.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
_query = self._embedding.embed_query(query)
docs = self.similarity_search_by_vector_with_scores(
embedding=_query,
k=k,
filter=filter,
kwargs=kwargs,
)
return docs
def similarity_search_by_vector_with_scores(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = []
results = self._client.index(str(self._index_name)).search(
"", {"vector": embedding, "limit": k, "filter": filter}
)
for result in results["hits"]:
metadata = result[self._metadata_key]
if self._text_key in metadata:
text = metadata.pop(self._text_key)
semantic_score = result["_semanticScore"]
docs.append(
(Document(page_content=text, metadata=metadata), semantic_score)
)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = self.similarity_search_by_vector_with_scores(
embedding=embedding,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs]
@classmethod
def from_texts(
cls: Type[Meilisearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
ids: Optional[List[str]] = None,
text_key: Optional[str] = "text",
metadata_key: Optional[str] = "metadata",
**kwargs: Any,
) -> Meilisearch:
"""Construct Meilisearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Meilisearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Meilisearch
from langchain.embeddings import OpenAIEmbeddings
import meilisearch
# The environment should be the one specified next to the API key
# in your Meilisearch console
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
docsearch = Meilisearch.from_texts(
client=client,
embeddings=embeddings,
)
"""
client = _create_client(client=client, url=url, api_key=api_key)
vectorstore = cls(
embedding=embedding,
client=client,
index_name=index_name,
)
vectorstore.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
text_key=text_key,
metadata_key=metadata_key,
)
return vectorstore
| [
"langchain.utils.get_from_env",
"langchain.docstore.document.Document"
] | [((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')] |
import os
from operator import itemgetter
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda
import mlflow
# Uncomment the following to use the full abilities of langchain autologgin
# %pip install `langchain_community>=0.0.16`
# These two libraries enable autologging to log text analysis related artifacts
# %pip install textstat spacy
assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable."
# Enable mlflow langchain autologging
# Note: We only support auto-logging models that do not contain retrievers
mlflow.langchain.autolog(
log_input_examples=True,
log_model_signatures=True,
log_models=True,
log_inputs_outputs=True,
registered_model_name="lc_model",
)
prompt_with_history_str = """
Here is a history between you and a human: {chat_history}
Now, please answer this question: {question}
"""
prompt_with_history = PromptTemplate(
input_variables=["chat_history", "question"], template=prompt_with_history_str
)
def extract_question(input):
return input[-1]["content"]
def extract_history(input):
return input[:-1]
llm = OpenAI(temperature=0.9)
# Build a chain with LCEL
chain_with_history = (
{
"question": itemgetter("messages") | RunnableLambda(extract_question),
"chat_history": itemgetter("messages") | RunnableLambda(extract_history),
}
| prompt_with_history
| llm
| StrOutputParser()
)
inputs = {"messages": [{"role": "user", "content": "Who owns MLflow?"}]}
print(chain_with_history.invoke(inputs))
# sample output:
# "1. Databricks\n2. Microsoft\n3. Google\n4. Amazon\n\nEnter your answer: 1\n\n
# Correct! MLflow is an open source project developed by Databricks. ...
# We automatically log the model and trace related artifacts
# A model with name `lc_model` is registered, we can load it back as a PyFunc model
model_name = "lc_model"
model_version = 1
loaded_model = mlflow.pyfunc.load_model(f"models:/{model_name}/{model_version}")
print(loaded_model.predict(inputs))
| [
"langchain.schema.output_parser.StrOutputParser",
"langchain.llms.OpenAI",
"langchain.prompts.PromptTemplate",
"langchain.schema.runnable.RunnableLambda"
] | [((689, 845), 'mlflow.langchain.autolog', 'mlflow.langchain.autolog', ([], {'log_input_examples': '(True)', 'log_model_signatures': '(True)', 'log_models': '(True)', 'log_inputs_outputs': '(True)', 'registered_model_name': '"""lc_model"""'}), "(log_input_examples=True, log_model_signatures=True,\n log_models=True, log_inputs_outputs=True, registered_model_name='lc_model')\n", (713, 845), False, 'import mlflow\n'), ((1026, 1125), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['chat_history', 'question']", 'template': 'prompt_with_history_str'}), "(input_variables=['chat_history', 'question'], template=\n prompt_with_history_str)\n", (1040, 1125), False, 'from langchain.prompts import PromptTemplate\n'), ((1250, 1273), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (1256, 1273), False, 'from langchain.llms import OpenAI\n'), ((2049, 2114), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['f"""models:/{model_name}/{model_version}"""'], {}), "(f'models:/{model_name}/{model_version}')\n", (2073, 2114), False, 'import mlflow\n'), ((1539, 1556), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1554, 1556), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((1350, 1372), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (1360, 1372), False, 'from operator import itemgetter\n'), ((1375, 1407), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (1389, 1407), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((1433, 1455), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (1443, 1455), False, 'from operator import itemgetter\n'), ((1458, 1489), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_history'], {}), '(extract_history)\n', (1472, 1489), False, 'from langchain.schema.runnable import RunnableLambda\n')] |
## This is a fork/based from https://gist.github.com/wiseman/4a706428eaabf4af1002a07a114f61d6
from io import StringIO
import sys
import os
from typing import Dict, Optional
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents.tools import Tool
from langchain.llms import OpenAI
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo')
class PythonREPL:
"""Simulates a standalone Python REPL."""
def __init__(self):
pass
def run(self, command: str) -> str:
"""Run command and returns anything printed."""
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
try:
exec(command, globals())
sys.stdout = old_stdout
output = mystdout.getvalue()
except Exception as e:
sys.stdout = old_stdout
output = str(e)
return output
llm = OpenAI(temperature=0.0, openai_api_base=base_path, model_name=model_name)
python_repl = Tool(
"Python REPL",
PythonREPL().run,
"""A Python shell. Use this to execute python commands. Input should be a valid python command.
If you expect output it should be printed out.""",
)
tools = [python_repl]
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
agent.run("What is the 10th fibonacci number?") | [
"langchain.agents.initialize_agent",
"langchain.llms.OpenAI"
] | [((348, 409), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (362, 409), False, 'import os\n'), ((423, 468), 'os.environ.get', 'os.environ.get', (['"""MODEL_NAME"""', '"""gpt-3.5-turbo"""'], {}), "('MODEL_NAME', 'gpt-3.5-turbo')\n", (437, 468), False, 'import os\n'), ((1003, 1076), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.0)', 'openai_api_base': 'base_path', 'model_name': 'model_name'}), '(temperature=0.0, openai_api_base=base_path, model_name=model_name)\n', (1009, 1076), False, 'from langchain.llms import OpenAI\n'), ((1345, 1424), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (1361, 1424), False, 'from langchain.agents import initialize_agent\n'), ((741, 751), 'io.StringIO', 'StringIO', ([], {}), '()\n', (749, 751), False, 'from io import StringIO\n')] |
import time
from typing import List
import pandas as pd
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import VectorStore
from mindsdb.integrations.handlers.rag_handler.settings import (
PersistedVectorStoreSaver,
PersistedVectorStoreSaverConfig,
RAGBaseParameters,
VectorStoreFactory,
df_to_documents,
get_chroma_client,
load_embeddings_model,
url_to_documents,
)
from mindsdb.utilities import log
logger = log.getLogger(__name__)
def validate_document(doc) -> bool:
"""Check an individual document."""
# Example checks
if not isinstance(doc, Document):
return False
if not doc.page_content:
return False
return True
def validate_documents(documents) -> bool:
"""Validate document list format."""
if not isinstance(documents, list):
return False
if not documents:
return False
# Check fields/format of a document
return all([validate_document(doc) for doc in documents])
class RAGIngestor:
"""A class for converting a dataframe and/or url to a vectorstore embedded with a given embeddings model"""
def __init__(
self,
args: RAGBaseParameters,
df: pd.DataFrame,
):
self.args = args
self.df = df
self.embeddings_model_name = args.embeddings_model_name
self.vector_store = VectorStoreFactory.get_vectorstore_class(
args.vector_store_name
)
def split_documents(self, chunk_size, chunk_overlap) -> list:
# Load documents and split in chunks
logger.info(f"Loading documents from input data")
documents = []
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
if self.df is not None:
# if user provides a dataframe, load documents from dataframe
documents.extend(
df_to_documents(
df=self.df,
page_content_columns=self.args.context_columns,
url_column_name=self.args.url_column_name,
)
)
if self.args.url:
# if user provides a url, load documents from url
documents.extend(url_to_documents(self.args.url))
n_tokens = sum([len(doc.page_content) for doc in documents])
# split documents into chunks of text
texts = text_splitter.split_documents(documents)
logger.info(f"Loaded {len(documents)} documents from input data")
logger.info(f"Total number of tokens: {n_tokens}")
logger.info(f"Split into {len(texts)} chunks of text (tokens)")
return texts
def create_db_from_documents(self, documents, embeddings_model) -> VectorStore:
"""Create DB from documents."""
if self.args.vector_store_name == "chromadb":
return self.vector_store.from_documents(
documents=documents,
embedding=embeddings_model,
client=get_chroma_client(
persist_directory=self.args.vector_store_storage_path
),
collection_name=self.args.collection_name,
)
else:
return self.create_db_from_texts(documents, embeddings_model)
def create_db_from_texts(self, documents, embeddings_model) -> VectorStore:
"""Create DB from text content."""
texts = [doc.page_content for doc in documents]
metadata = [doc.metadata for doc in documents]
return self.vector_store.from_texts(
texts=texts, embedding=embeddings_model, metadatas=metadata
)
@staticmethod
def _create_batch_embeddings(documents: List[Document], embeddings_batch_size):
"""
create batch of document embeddings
"""
for i in range(0, len(documents), embeddings_batch_size):
yield documents[i: i + embeddings_batch_size]
def embeddings_to_vectordb(self) -> None:
"""Create vectorstore from documents and store locally."""
start_time = time.time()
# Load documents and splits in chunks (if not in evaluation_type mode)
documents = self.split_documents(
chunk_size=self.args.chunk_size, chunk_overlap=self.args.chunk_overlap
)
# Load embeddings model
embeddings_model = load_embeddings_model(
self.embeddings_model_name, self.args.use_gpu
)
logger.info(f"Creating vectorstore from documents")
if not validate_documents(documents):
raise ValueError("Invalid documents")
try:
db = self.create_db_from_documents(documents, embeddings_model)
except Exception as e:
raise Exception(
f"Error loading embeddings to {self.args.vector_store_name}: {e}"
)
config = PersistedVectorStoreSaverConfig(
vector_store_name=self.args.vector_store_name,
vector_store=db,
persist_directory=self.args.vector_store_storage_path,
collection_name=self.args.collection_name,
)
vector_store_saver = PersistedVectorStoreSaver(config)
vector_store_saver.save_vector_store(db)
db = None # Free up memory
end_time = time.time()
elapsed_time = round(end_time - start_time)
logger.info(f"Finished creating {self.args.vector_store_name} from texts, it has been "
f"persisted to {self.args.vector_store_storage_path}")
time_minutes = round(elapsed_time / 60)
if time_minutes > 1:
logger.info(f"Elapsed time: {time_minutes} minutes")
else:
logger.info(f"Elapsed time: {elapsed_time} seconds")
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((539, 562), 'mindsdb.utilities.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (552, 562), False, 'from mindsdb.utilities import log\n'), ((1455, 1519), 'mindsdb.integrations.handlers.rag_handler.settings.VectorStoreFactory.get_vectorstore_class', 'VectorStoreFactory.get_vectorstore_class', (['args.vector_store_name'], {}), '(args.vector_store_name)\n', (1495, 1519), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((1761, 1848), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap)\n', (1791, 1848), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((4190, 4201), 'time.time', 'time.time', ([], {}), '()\n', (4199, 4201), False, 'import time\n'), ((4477, 4545), 'mindsdb.integrations.handlers.rag_handler.settings.load_embeddings_model', 'load_embeddings_model', (['self.embeddings_model_name', 'self.args.use_gpu'], {}), '(self.embeddings_model_name, self.args.use_gpu)\n', (4498, 4545), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((4990, 5193), 'mindsdb.integrations.handlers.rag_handler.settings.PersistedVectorStoreSaverConfig', 'PersistedVectorStoreSaverConfig', ([], {'vector_store_name': 'self.args.vector_store_name', 'vector_store': 'db', 'persist_directory': 'self.args.vector_store_storage_path', 'collection_name': 'self.args.collection_name'}), '(vector_store_name=self.args.\n vector_store_name, vector_store=db, persist_directory=self.args.\n vector_store_storage_path, collection_name=self.args.collection_name)\n', (5021, 5193), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((5273, 5306), 'mindsdb.integrations.handlers.rag_handler.settings.PersistedVectorStoreSaver', 'PersistedVectorStoreSaver', (['config'], {}), '(config)\n', (5298, 5306), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((5414, 5425), 'time.time', 'time.time', ([], {}), '()\n', (5423, 5425), False, 'import time\n'), ((2019, 2141), 'mindsdb.integrations.handlers.rag_handler.settings.df_to_documents', 'df_to_documents', ([], {'df': 'self.df', 'page_content_columns': 'self.args.context_columns', 'url_column_name': 'self.args.url_column_name'}), '(df=self.df, page_content_columns=self.args.context_columns,\n url_column_name=self.args.url_column_name)\n', (2034, 2141), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((2349, 2380), 'mindsdb.integrations.handlers.rag_handler.settings.url_to_documents', 'url_to_documents', (['self.args.url'], {}), '(self.args.url)\n', (2365, 2380), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((3121, 3193), 'mindsdb.integrations.handlers.rag_handler.settings.get_chroma_client', 'get_chroma_client', ([], {'persist_directory': 'self.args.vector_store_storage_path'}), '(persist_directory=self.args.vector_store_storage_path)\n', (3138, 3193), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n')] |
"""
Multilingual retrieval based conversation system backed by ChatGPT
"""
import argparse
import os
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.retriever import CustomRetriever
from langchain import LLMChain
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import OpenAI
from langchain.prompts.prompt import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Multilingual retrieval based conversation system backed by ChatGPT")
parser.add_argument("--open_ai_key_path", type=str, default=None, help="path to the model")
parser.add_argument(
"--sql_file_path", type=str, default=None, help="path to the a empty folder for storing sql files for indexing"
)
args = parser.parse_args()
if not os.path.exists(args.sql_file_path):
os.makedirs(args.sql_file_path)
# Setup openai key
# Set env var OPENAI_API_KEY or load from a file
openai_key = open(args.open_ai_key_path).read()
os.environ["OPENAI_API_KEY"] = openai_key
llm = OpenAI(temperature=0.6)
information_retriever = CustomRetriever(k=3, sql_file_path=args.sql_file_path, verbose=True)
# VectorDB
embedding = HuggingFaceEmbeddings(
model_name="moka-ai/m3e-base", model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": False}
)
# Define memory with summarization ability
memory = ConversationBufferWithSummary(llm=llm)
# Load data to vector store
print("Select files for constructing retriever")
documents = []
while True:
file = input("Enter a file path or press Enter directory without input to exit:").strip()
if file == "":
break
data_name = input("Enter a short description of the data:")
retriever_data = DocumentLoader([[file, data_name.replace(" ", "_")]]).all_data
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=0)
splits = text_splitter.split_documents(retriever_data)
documents.extend(splits)
# Create retriever
information_retriever.add_documents(docs=documents, cleanup="incremental", mode="by_source", embedding=embedding)
prompt_template = """Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If the answer cannot be inferred based on the given context, please don't share false information.
Use the context and chat history to respond to the human's input at the end or carry on the conversation. You should generate one response only. No following up is needed.
context:
{context}
chat history
{chat_history}
Human: {question}
Assistant:"""
prompt_template_disambiguate = """You are a helpful, respectful and honest assistant. You always follow the instruction.
Please replace any ambiguous references in the given sentence with the specific names or entities mentioned in the chat history or just output the original sentence if no chat history is provided or if the sentence doesn't contain ambiguous references. Your output should be the disambiguated sentence itself (in the same line as "disambiguated sentence:") and contain nothing else.
Here is an example:
Chat history:
Human: I have a friend, Mike. Do you know him?
Assistant: Yes, I know a person named Mike
sentence: What's his favorite food?
disambiguated sentence: What's Mike's favorite food?
END OF EXAMPLE
Chat history:
{chat_history}
sentence: {input}
disambiguated sentence:"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["question", "chat_history", "context"])
memory.initiate_document_retrieval_chain(
llm,
PROMPT,
information_retriever,
chain_type_kwargs={
"chat_history": "",
},
)
PROMPT_DISAMBIGUATE = PromptTemplate(
template=prompt_template_disambiguate, input_variables=["chat_history", "input"]
)
llm_chain = RetrievalQA.from_chain_type(
llm=llm,
verbose=False,
chain_type="stuff",
retriever=information_retriever,
chain_type_kwargs={"prompt": PROMPT, "memory": memory},
)
llm_chain_disambiguate = LLMChain(llm=llm, prompt=PROMPT_DISAMBIGUATE)
def disambiguity(input):
out = llm_chain_disambiguate.run({"input": input, "chat_history": memory.buffer})
return out.split("\n")[0]
information_retriever.set_rephrase_handler(disambiguity)
while True:
user_input = input("User: ")
if " end " in user_input:
print("Agent: Happy to chat with you :)")
break
agent_response = llm_chain.run(user_input)
agent_response = agent_response.split("\n")[0]
print(f"Agent: {agent_response}")
| [
"langchain.prompts.prompt.PromptTemplate",
"langchain.LLMChain",
"langchain.embeddings.HuggingFaceEmbeddings",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI"
] | [((599, 709), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Multilingual retrieval based conversation system backed by ChatGPT"""'}), "(description=\n 'Multilingual retrieval based conversation system backed by ChatGPT')\n", (622, 709), False, 'import argparse\n'), ((1258, 1281), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.6)'}), '(temperature=0.6)\n', (1264, 1281), False, 'from langchain.llms import OpenAI\n'), ((1311, 1379), 'colossalqa.retriever.CustomRetriever', 'CustomRetriever', ([], {'k': '(3)', 'sql_file_path': 'args.sql_file_path', 'verbose': '(True)'}), '(k=3, sql_file_path=args.sql_file_path, verbose=True)\n', (1326, 1379), False, 'from colossalqa.retriever import CustomRetriever\n'), ((1411, 1546), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""moka-ai/m3e-base"""', 'model_kwargs': "{'device': 'cpu'}", 'encode_kwargs': "{'normalize_embeddings': False}"}), "(model_name='moka-ai/m3e-base', model_kwargs={'device':\n 'cpu'}, encode_kwargs={'normalize_embeddings': False})\n", (1432, 1546), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1618, 1656), 'colossalqa.memory.ConversationBufferWithSummary', 'ConversationBufferWithSummary', ([], {'llm': 'llm'}), '(llm=llm)\n', (1647, 1656), False, 'from colossalqa.memory import ConversationBufferWithSummary\n'), ((3951, 4052), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['question', 'chat_history', 'context']"}), "(template=prompt_template, input_variables=['question',\n 'chat_history', 'context'])\n", (3965, 4052), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((4260, 4361), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template_disambiguate', 'input_variables': "['chat_history', 'input']"}), "(template=prompt_template_disambiguate, input_variables=[\n 'chat_history', 'input'])\n", (4274, 4361), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((4388, 4556), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'verbose': '(False)', 'chain_type': '"""stuff"""', 'retriever': 'information_retriever', 'chain_type_kwargs': "{'prompt': PROMPT, 'memory': memory}"}), "(llm=llm, verbose=False, chain_type='stuff',\n retriever=information_retriever, chain_type_kwargs={'prompt': PROMPT,\n 'memory': memory})\n", (4415, 4556), False, 'from langchain.chains import RetrievalQA\n'), ((4625, 4670), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'PROMPT_DISAMBIGUATE'}), '(llm=llm, prompt=PROMPT_DISAMBIGUATE)\n', (4633, 4670), False, 'from langchain import LLMChain\n'), ((996, 1030), 'os.path.exists', 'os.path.exists', (['args.sql_file_path'], {}), '(args.sql_file_path)\n', (1010, 1030), False, 'import os\n'), ((1040, 1071), 'os.makedirs', 'os.makedirs', (['args.sql_file_path'], {}), '(args.sql_file_path)\n', (1051, 1071), False, 'import os\n'), ((2114, 2177), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(200)', 'chunk_overlap': '(0)'}), '(chunk_size=200, chunk_overlap=0)\n', (2144, 2177), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n')] |
from langchain.document_loaders import PyMuPDFLoader
from langchain.retrievers import ArxivRetriever
def scrape_pdf_with_pymupdf(url) -> str:
"""Scrape a pdf with pymupdf
Args:
url (str): The url of the pdf to scrape
Returns:
str: The text scraped from the pdf
"""
loader = PyMuPDFLoader(url)
doc = loader.load()
return str(doc)
def scrape_pdf_with_arxiv(query) -> str:
"""Scrape a pdf with arxiv
default document length of 70000 about ~15 pages or None for no limit
Args:
query (str): The query to search for
Returns:
str: The text scraped from the pdf
"""
retriever = ArxivRetriever(load_max_docs=2, doc_content_chars_max=None)
docs = retriever.get_relevant_documents(query=query)
return docs[0].page_content | [
"langchain.document_loaders.PyMuPDFLoader",
"langchain.retrievers.ArxivRetriever"
] | [((314, 332), 'langchain.document_loaders.PyMuPDFLoader', 'PyMuPDFLoader', (['url'], {}), '(url)\n', (327, 332), False, 'from langchain.document_loaders import PyMuPDFLoader\n'), ((662, 721), 'langchain.retrievers.ArxivRetriever', 'ArxivRetriever', ([], {'load_max_docs': '(2)', 'doc_content_chars_max': 'None'}), '(load_max_docs=2, doc_content_chars_max=None)\n', (676, 721), False, 'from langchain.retrievers import ArxivRetriever\n')] |
from langchain.document_loader import TelegramChatApiLoader
from application.parser.remote.base import BaseRemote
class TelegramChatApiRemote(BaseRemote):
def _init_parser(self, *args, **load_kwargs):
self.loader = TelegramChatApiLoader(**load_kwargs)
return {}
def parse_file(self, *args, **load_kwargs):
return | [
"langchain.document_loader.TelegramChatApiLoader"
] | [((228, 264), 'langchain.document_loader.TelegramChatApiLoader', 'TelegramChatApiLoader', ([], {}), '(**load_kwargs)\n', (249, 264), False, 'from langchain.document_loader import TelegramChatApiLoader\n')] |
from langchain import tools
from langchain.agents import Tool
from langchain.agents.load_tools import _BASE_TOOLS, _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS
from langchain.tools.json.tool import JsonSpec
from langflow.interface.importing.utils import import_class
from langflow.interface.tools.custom import PythonFunction, PythonFunctionTool
FILE_TOOLS = {"JsonSpec": JsonSpec}
CUSTOM_TOOLS = {
"Tool": Tool,
"PythonFunctionTool": PythonFunctionTool,
"PythonFunction": PythonFunction,
}
OTHER_TOOLS = {tool: import_class(f"langchain_community.tools.{tool}") for tool in tools.__all__}
ALL_TOOLS_NAMES = {
**_BASE_TOOLS,
**_LLM_TOOLS, # type: ignore
**{k: v[0] for k, v in _EXTRA_LLM_TOOLS.items()}, # type: ignore
**{k: v[0] for k, v in _EXTRA_OPTIONAL_TOOLS.items()},
**CUSTOM_TOOLS,
**FILE_TOOLS, # type: ignore
**OTHER_TOOLS,
}
| [
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.items",
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.items"
] | [((533, 582), 'langflow.interface.importing.utils.import_class', 'import_class', (['f"""langchain_community.tools.{tool}"""'], {}), "(f'langchain_community.tools.{tool}')\n", (545, 582), False, 'from langflow.interface.importing.utils import import_class\n'), ((711, 735), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.items', '_EXTRA_LLM_TOOLS.items', ([], {}), '()\n', (733, 735), False, 'from langchain.agents.load_tools import _BASE_TOOLS, _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((781, 810), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.items', '_EXTRA_OPTIONAL_TOOLS.items', ([], {}), '()\n', (808, 810), False, 'from langchain.agents.load_tools import _BASE_TOOLS, _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n')] |
from templates.common.suffix import suffix
from templates.common.format_instructions import format_instructions
from templates.common.docs_system_instructions import docs_system_instructions
from langchain.schema import (
# AIMessage,
HumanMessage,
SystemMessage
)
from langchain.tools.json.tool import JsonSpec
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.llms.openai import OpenAI
from langchain.agents import create_json_agent, ZeroShotAgent, AgentExecutor
from langchain.chains import LLMChain
from config.config import config
import openai # required
from dotenv import load_dotenv
load_dotenv()
class OpenAPIExplorerTool:
@staticmethod
def create_tools(docs):
json_spec = JsonSpec(dict_=docs)
json_toolkit = JsonToolkit(spec=json_spec)
tools = json_toolkit.get_tools()
return tools
class PipedreamOpenAPIAgent:
def __init__(self, docs, templates, auth_example, parsed_common_files):
system_instructions = format_template(
f"{templates.system_instructions(auth_example, parsed_common_files)}\n{docs_system_instructions}")
tools = OpenAPIExplorerTool.create_tools(docs)
tool_names = [tool.name for tool in tools]
prompt_template = ZeroShotAgent.create_prompt(
tools=tools,
prefix=system_instructions,
suffix=suffix,
format_instructions=format_instructions,
input_variables=['input', 'agent_scratchpad']
)
llm_chain = LLMChain(llm=get_llm(), prompt=prompt_template)
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
verbose = True if config['logging']['level'] == 'DEBUG' else False
self.agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=verbose)
def run(self, input):
try:
result = self.agent_executor.run(input)
except Exception as e:
result = str(e)
if "I don't know" in result:
return "I don't know"
if '```' not in result:
raise e
return format_result(result)
def format_template(text):
return text.replace("{", "{{").replace("}", "}}") # escape curly braces
def format_result(result):
if '```' in result:
if '```javascript' in result:
result = result.split('```javascript')[1].split('```')[0].strip()
else:
result = result.split('```')[1].split('```')[0].strip()
return result
def create_user_prompt(prompt, urls_content):
if len(urls_content) == 0:
return prompt + "\n\n"
user_prompt = f"{prompt}\n\n## API docs\n\n"
for item in urls_content:
user_prompt += f"\n\n### {item['url']}\n\n{item['content']}"
return user_prompt + "\n\n"
def get_llm():
if config['openai_api_type'] == "azure":
azure_config = config["azure"]
return AzureChatOpenAI(deployment_name=azure_config['deployment_name'],
model_name=azure_config["model"], temperature=config["temperature"], request_timeout=300)
else:
openai_config = config["openai"]
print(f"Using OpenAI API: {openai_config['model']}")
return ChatOpenAI(
model_name=openai_config["model"], temperature=config["temperature"])
def ask_agent(prompt, docs, templates, auth_example, parsed_common_files, urls_content):
agent = PipedreamOpenAPIAgent(
docs, templates, auth_example, parsed_common_files)
user_prompt = create_user_prompt(prompt, urls_content)
result = agent.run(user_prompt)
return result
def no_docs(prompt, templates, auth_example, parsed_common_files, urls_content, normal_order=True):
user_prompt = create_user_prompt(prompt, urls_content)
pd_instructions = format_template(
templates.system_instructions(auth_example, parsed_common_files))
result = get_llm()(messages=[
SystemMessage(content="You are the most intelligent software engineer in the world. You carefully provide accurate, factual, thoughtful, nuanced code, and are brilliant at reasoning. Follow all of the instructions below — they are all incredibly important. This code will be shipped directly to production, so it's important that it's accurate and complete."),
HumanMessage(content=user_prompt +
pd_instructions if normal_order else pd_instructions+user_prompt),
])
return format_result(result.content)
| [
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.agents.agent_toolkits.json.toolkit.JsonToolkit",
"langchain.agents.ZeroShotAgent.create_prompt",
"langchain.agents.ZeroShotAgent",
"langchain.chat_models.ChatOpenAI",
"langchain.schema.HumanMessage",
"langchain.schema.SystemMessage",
"langchain.chat_models.AzureChatOpenAI",
"langchain.tools.json.tool.JsonSpec"
] | [((701, 714), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (712, 714), False, 'from dotenv import load_dotenv\n'), ((810, 830), 'langchain.tools.json.tool.JsonSpec', 'JsonSpec', ([], {'dict_': 'docs'}), '(dict_=docs)\n', (818, 830), False, 'from langchain.tools.json.tool import JsonSpec\n'), ((854, 881), 'langchain.agents.agent_toolkits.json.toolkit.JsonToolkit', 'JsonToolkit', ([], {'spec': 'json_spec'}), '(spec=json_spec)\n', (865, 881), False, 'from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit\n'), ((1343, 1524), 'langchain.agents.ZeroShotAgent.create_prompt', 'ZeroShotAgent.create_prompt', ([], {'tools': 'tools', 'prefix': 'system_instructions', 'suffix': 'suffix', 'format_instructions': 'format_instructions', 'input_variables': "['input', 'agent_scratchpad']"}), "(tools=tools, prefix=system_instructions, suffix\n =suffix, format_instructions=format_instructions, input_variables=[\n 'input', 'agent_scratchpad'])\n", (1370, 1524), False, 'from langchain.agents import create_json_agent, ZeroShotAgent, AgentExecutor\n'), ((1670, 1730), 'langchain.agents.ZeroShotAgent', 'ZeroShotAgent', ([], {'llm_chain': 'llm_chain', 'allowed_tools': 'tool_names'}), '(llm_chain=llm_chain, allowed_tools=tool_names)\n', (1683, 1730), False, 'from langchain.agents import create_json_agent, ZeroShotAgent, AgentExecutor\n'), ((1837, 1914), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': 'verbose'}), '(agent=agent, tools=tools, verbose=verbose)\n', (1871, 1914), False, 'from langchain.agents import create_json_agent, ZeroShotAgent, AgentExecutor\n'), ((3038, 3201), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'deployment_name': "azure_config['deployment_name']", 'model_name': "azure_config['model']", 'temperature': "config['temperature']", 'request_timeout': '(300)'}), "(deployment_name=azure_config['deployment_name'], model_name\n =azure_config['model'], temperature=config['temperature'],\n request_timeout=300)\n", (3053, 3201), False, 'from langchain.chat_models import ChatOpenAI, AzureChatOpenAI\n'), ((3351, 3436), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': "openai_config['model']", 'temperature': "config['temperature']"}), "(model_name=openai_config['model'], temperature=config['temperature']\n )\n", (3361, 3436), False, 'from langchain.chat_models import ChatOpenAI, AzureChatOpenAI\n'), ((4061, 4430), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are the most intelligent software engineer in the world. You carefully provide accurate, factual, thoughtful, nuanced code, and are brilliant at reasoning. Follow all of the instructions below — they are all incredibly important. This code will be shipped directly to production, so it\'s important that it\'s accurate and complete."""'}), '(content=\n "You are the most intelligent software engineer in the world. You carefully provide accurate, factual, thoughtful, nuanced code, and are brilliant at reasoning. Follow all of the instructions below — they are all incredibly important. This code will be shipped directly to production, so it\'s important that it\'s accurate and complete."\n )\n', (4074, 4430), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((4430, 4537), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '(user_prompt + pd_instructions if normal_order else pd_instructions +\n user_prompt)'}), '(content=user_prompt + pd_instructions if normal_order else \n pd_instructions + user_prompt)\n', (4442, 4537), False, 'from langchain.schema import HumanMessage, SystemMessage\n')] |
import os
import threading
from chainlit.config import config
from chainlit.logger import logger
def init_lc_cache():
use_cache = config.project.cache is True and config.run.no_cache is False
if use_cache:
try:
import langchain
except ImportError:
return
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
if config.project.lc_cache_path is not None:
set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path))
if not os.path.exists(config.project.lc_cache_path):
logger.info(
f"LangChain cache created at: {config.project.lc_cache_path}"
)
_cache = {}
_cache_lock = threading.Lock()
def cache(func):
def wrapper(*args, **kwargs):
# Create a cache key based on the function name, arguments, and keyword arguments
cache_key = (
(func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items()))
)
with _cache_lock:
# Check if the result is already in the cache
if cache_key not in _cache:
# If not, call the function and store the result in the cache
_cache[cache_key] = func(*args, **kwargs)
return _cache[cache_key]
return wrapper
| [
"langchain.cache.SQLiteCache"
] | [((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')] |
import json
from typing import Any, List, Tuple
import requests
from taskweaver.plugin import Plugin, register_plugin
# response entry format: (title, url, snippet)
ResponseEntry = Tuple[str, str, str]
def browse_page(
query: str,
urls: List[str],
top_k: int = 3,
chunk_size: int = 1000,
chunk_overlap: int = 250,
) -> list[dict[str, Any]]:
try:
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import AsyncHtmlLoader
from langchain_community.document_transformers import Html2TextTransformer
except ImportError:
raise ImportError("Please install langchain/langchain-community first.")
loader = AsyncHtmlLoader(web_path=urls)
docs = loader.load()
html2text = Html2TextTransformer()
docs_transformed = html2text.transform_documents(docs)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
# Split
splits = text_splitter.split_documents(docs_transformed)
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
vector_store = FAISS.from_documents(
splits,
HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2"),
)
result = vector_store.similarity_search(
query=query,
k=top_k,
)
chunks = [
{
"metadata": r.metadata,
"snippet": r.page_content,
}
for r in result
]
return chunks
@register_plugin
class WebSearch(Plugin):
def search_query(self, query: str) -> List[ResponseEntry]:
api_provider = self.config.get("api_provider", "google_custom_search")
result_count = int(self.config.get("result_count", 3))
if api_provider == "google_custom_search":
return self._search_google_custom_search(query, cnt=result_count)
elif api_provider == "bing":
return self._search_bing(query, cnt=result_count)
else:
raise ValueError("Invalid API provider. Please check your config file.")
def __call__(self, queries: List[str], browse: bool = True) -> str:
query_results = []
query_urls = set()
for query in queries:
query_results.extend([r for r in self.search_query(query) if r[1] not in query_urls])
query_urls.update([r[1] for r in query_results])
if not browse:
return f"WebSearch has done searching for `{queries}`.\n" + self.ctx.wrap_text_with_delimiter_temporal(
"\n```json\n" + json.dumps(query_results, indent=4) + "```\n",
)
else:
return f"WebSearch has done searching for `{queries}`.\n" + self.ctx.wrap_text_with_delimiter_temporal(
"\n```json\n" + json.dumps(browse_page(",".join(queries), list(query_urls)), indent=4) + "```\n",
)
def _search_google_custom_search(self, query: str, cnt: int) -> List[ResponseEntry]:
api_key = self.config.get("google_api_key")
search_engine_id = self.config.get("google_search_engine_id")
url = f"https://www.googleapis.com/customsearch/v1?key={api_key}&cx={search_engine_id}&q={query}"
if cnt > 0:
url += f"&num={cnt}"
response = requests.get(url)
result_list: List[ResponseEntry] = []
for item in response.json()["items"]:
result_list.append((item["title"], item["link"], item["snippet"]))
return result_list
def _search_bing(self, query: str, cnt: int) -> List[ResponseEntry]:
api_key = self.config.get("bing_api_key")
url = f"https://api.bing.microsoft.com/v7.0/search?q={query}"
if cnt > 0:
url += f"&count={cnt}"
response = requests.get(url, headers={"Ocp-Apim-Subscription-Key": api_key})
result_list: List[ResponseEntry] = []
for item in response.json()["webPages"]["value"]:
result_list.append((item["name"], item["url"], item["snippet"]))
return result_list
| [
"langchain_community.document_transformers.Html2TextTransformer",
"langchain_community.document_loaders.AsyncHtmlLoader",
"langchain_community.embeddings.HuggingFaceEmbeddings",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((725, 755), 'langchain_community.document_loaders.AsyncHtmlLoader', 'AsyncHtmlLoader', ([], {'web_path': 'urls'}), '(web_path=urls)\n', (740, 755), False, 'from langchain_community.document_loaders import AsyncHtmlLoader\n'), ((798, 820), 'langchain_community.document_transformers.Html2TextTransformer', 'Html2TextTransformer', ([], {}), '()\n', (818, 820), False, 'from langchain_community.document_transformers import Html2TextTransformer\n'), ((901, 988), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap)\n', (931, 988), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1272, 1324), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (1293, 1324), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((3355, 3372), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3367, 3372), False, 'import requests\n'), ((3839, 3904), 'requests.get', 'requests.get', (['url'], {'headers': "{'Ocp-Apim-Subscription-Key': api_key}"}), "(url, headers={'Ocp-Apim-Subscription-Key': api_key})\n", (3851, 3904), False, 'import requests\n'), ((2646, 2681), 'json.dumps', 'json.dumps', (['query_results'], {'indent': '(4)'}), '(query_results, indent=4)\n', (2656, 2681), False, 'import json\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.