code
stringlengths 141
79.4k
| apis
sequencelengths 1
23
| extract_api
stringlengths 126
73.2k
|
---|---|---|
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Extra
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: LLMChain
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_keys
@property
def output_keys(self) -> List[str]:
"""Output keys for Hyde's LLM chain."""
return self.llm_chain.output_keys
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
"""Combine embeddings into final embeddings."""
return list(np.array(embeddings).mean(axis=0))
def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.llm_chain.input_keys[0]
result = self.llm_chain.generate([{var_name: text}])
documents = [generation.text for generation in result.generations[0]]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain(inputs, callbacks=_run_manager.get_child())
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
raise ValueError(
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
| [
"langchain.chains.hyde.prompts.PROMPT_MAP.keys",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain"
] | [((3148, 3180), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (3156, 3180), False, 'from langchain.chains.llm import LLMChain\n'), ((2258, 2303), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (2301, 2303), False, 'from langchain_core.callbacks import CallbackManagerForChainRun\n'), ((1580, 1600), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (1588, 1600), True, 'import numpy as np\n'), ((3091, 3108), 'langchain.chains.hyde.prompts.PROMPT_MAP.keys', 'PROMPT_MAP.keys', ([], {}), '()\n', (3106, 3108), False, 'from langchain.chains.hyde.prompts import PROMPT_MAP\n')] |
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Extra
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: LLMChain
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_keys
@property
def output_keys(self) -> List[str]:
"""Output keys for Hyde's LLM chain."""
return self.llm_chain.output_keys
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
"""Combine embeddings into final embeddings."""
return list(np.array(embeddings).mean(axis=0))
def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.llm_chain.input_keys[0]
result = self.llm_chain.generate([{var_name: text}])
documents = [generation.text for generation in result.generations[0]]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain(inputs, callbacks=_run_manager.get_child())
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
raise ValueError(
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
| [
"langchain.chains.hyde.prompts.PROMPT_MAP.keys",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain"
] | [((3148, 3180), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (3156, 3180), False, 'from langchain.chains.llm import LLMChain\n'), ((2258, 2303), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (2301, 2303), False, 'from langchain_core.callbacks import CallbackManagerForChainRun\n'), ((1580, 1600), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (1588, 1600), True, 'import numpy as np\n'), ((3091, 3108), 'langchain.chains.hyde.prompts.PROMPT_MAP.keys', 'PROMPT_MAP.keys', ([], {}), '()\n', (3106, 3108), False, 'from langchain.chains.hyde.prompts import PROMPT_MAP\n')] |
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Extra
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: LLMChain
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_keys
@property
def output_keys(self) -> List[str]:
"""Output keys for Hyde's LLM chain."""
return self.llm_chain.output_keys
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
"""Combine embeddings into final embeddings."""
return list(np.array(embeddings).mean(axis=0))
def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.llm_chain.input_keys[0]
result = self.llm_chain.generate([{var_name: text}])
documents = [generation.text for generation in result.generations[0]]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain(inputs, callbacks=_run_manager.get_child())
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
raise ValueError(
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
| [
"langchain.chains.hyde.prompts.PROMPT_MAP.keys",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain"
] | [((3148, 3180), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (3156, 3180), False, 'from langchain.chains.llm import LLMChain\n'), ((2258, 2303), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (2301, 2303), False, 'from langchain_core.callbacks import CallbackManagerForChainRun\n'), ((1580, 1600), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (1588, 1600), True, 'import numpy as np\n'), ((3091, 3108), 'langchain.chains.hyde.prompts.PROMPT_MAP.keys', 'PROMPT_MAP.keys', ([], {}), '()\n', (3106, 3108), False, 'from langchain.chains.hyde.prompts import PROMPT_MAP\n')] |
"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
from __future__ import annotations
from typing import Any, Callable, List, NamedTuple, Optional, Sequence
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.tools import BaseTool
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain.agents.agent_types import AgentType
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.agents.tools import Tool
from langchain.agents.utils import validate_tools_single_input
from langchain.chains import LLMChain
from langchain.tools.render import render_text_description
class ChainConfig(NamedTuple):
"""Configuration for chain to use in MRKL system.
Args:
action_name: Name of the action.
action: Action function to call.
action_description: Description of the action.
"""
action_name: str
action: Callable
action_description: str
@deprecated("0.1.0", alternative="create_react_agent", removal="0.2.0")
class ZeroShotAgent(Agent):
"""Agent for the MRKL chain."""
output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser)
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return MRKLOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.ZERO_SHOT_REACT_DESCRIPTION
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
) -> PromptTemplate:
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = render_text_description(list(tools))
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
if input_variables:
return PromptTemplate(template=template, input_variables=input_variables)
return PromptTemplate.from_template(template)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
if len(tools) == 0:
raise ValueError(
f"Got no tools for {cls.__name__}. At least one tool must be provided."
)
for tool in tools:
if tool.description is None:
raise ValueError(
f"Got a tool {tool.name} without a description. For this agent, "
f"a description must always be provided."
)
super()._validate_tools(tools)
@deprecated("0.1.0", removal="0.2.0")
class MRKLChain(AgentExecutor):
"""[Deprecated] Chain that implements the MRKL system."""
@classmethod
def from_chains(
cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any
) -> AgentExecutor:
"""User friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Args:
llm: The LLM to use as the agent LLM.
chains: The chains the MRKL system has access to.
**kwargs: parameters to be passed to initialization.
Returns:
An initialized MRKL chain.
"""
tools = [
Tool(
name=c.action_name,
func=c.action,
description=c.action_description,
)
for c in chains
]
agent = ZeroShotAgent.from_llm_and_tools(llm, tools)
return cls(agent=agent, tools=tools, **kwargs)
| [
"langchain.agents.mrkl.output_parser.MRKLOutputParser",
"langchain.agents.utils.validate_tools_single_input",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.PromptTemplate",
"langchain_core._api.deprecated",
"langchain.chains.LLMChain",
"langchain.agents.tools.Tool",
"langchain_core.prompts.PromptTemplate.from_template"
] | [((1278, 1348), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_react_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_react_agent', removal='0.2.0')\n", (1288, 1348), False, 'from langchain_core._api import deprecated\n'), ((5068, 5104), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (5078, 5104), False, 'from langchain_core._api import deprecated\n'), ((1453, 1492), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'MRKLOutputParser'}), '(default_factory=MRKLOutputParser)\n', (1458, 1492), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1603, 1621), 'langchain.agents.mrkl.output_parser.MRKLOutputParser', 'MRKLOutputParser', ([], {}), '()\n', (1619, 1621), False, 'from langchain.agents.mrkl.output_parser import MRKLOutputParser\n'), ((3228, 3266), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3256, 3266), False, 'from langchain_core.prompts import PromptTemplate\n'), ((4052, 4119), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callback_manager': 'callback_manager'}), '(llm=llm, prompt=prompt, callback_manager=callback_manager)\n', (4060, 4119), False, 'from langchain.chains import LLMChain\n'), ((4549, 4597), 'langchain.agents.utils.validate_tools_single_input', 'validate_tools_single_input', (['cls.__name__', 'tools'], {}), '(cls.__name__, tools)\n', (4576, 4597), False, 'from langchain.agents.utils import validate_tools_single_input\n'), ((3146, 3212), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': 'input_variables'}), '(template=template, input_variables=input_variables)\n', (3160, 3212), False, 'from langchain_core.prompts import PromptTemplate\n'), ((5785, 5858), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'c.action_name', 'func': 'c.action', 'description': 'c.action_description'}), '(name=c.action_name, func=c.action, description=c.action_description)\n', (5789, 5858), False, 'from langchain.agents.tools import Tool\n')] |
"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
from __future__ import annotations
from typing import Any, Callable, List, NamedTuple, Optional, Sequence
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.tools import BaseTool
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain.agents.agent_types import AgentType
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.agents.tools import Tool
from langchain.agents.utils import validate_tools_single_input
from langchain.chains import LLMChain
from langchain.tools.render import render_text_description
class ChainConfig(NamedTuple):
"""Configuration for chain to use in MRKL system.
Args:
action_name: Name of the action.
action: Action function to call.
action_description: Description of the action.
"""
action_name: str
action: Callable
action_description: str
@deprecated("0.1.0", alternative="create_react_agent", removal="0.2.0")
class ZeroShotAgent(Agent):
"""Agent for the MRKL chain."""
output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser)
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return MRKLOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.ZERO_SHOT_REACT_DESCRIPTION
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
) -> PromptTemplate:
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = render_text_description(list(tools))
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
if input_variables:
return PromptTemplate(template=template, input_variables=input_variables)
return PromptTemplate.from_template(template)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
if len(tools) == 0:
raise ValueError(
f"Got no tools for {cls.__name__}. At least one tool must be provided."
)
for tool in tools:
if tool.description is None:
raise ValueError(
f"Got a tool {tool.name} without a description. For this agent, "
f"a description must always be provided."
)
super()._validate_tools(tools)
@deprecated("0.1.0", removal="0.2.0")
class MRKLChain(AgentExecutor):
"""[Deprecated] Chain that implements the MRKL system."""
@classmethod
def from_chains(
cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any
) -> AgentExecutor:
"""User friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Args:
llm: The LLM to use as the agent LLM.
chains: The chains the MRKL system has access to.
**kwargs: parameters to be passed to initialization.
Returns:
An initialized MRKL chain.
"""
tools = [
Tool(
name=c.action_name,
func=c.action,
description=c.action_description,
)
for c in chains
]
agent = ZeroShotAgent.from_llm_and_tools(llm, tools)
return cls(agent=agent, tools=tools, **kwargs)
| [
"langchain.agents.mrkl.output_parser.MRKLOutputParser",
"langchain.agents.utils.validate_tools_single_input",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.PromptTemplate",
"langchain_core._api.deprecated",
"langchain.chains.LLMChain",
"langchain.agents.tools.Tool",
"langchain_core.prompts.PromptTemplate.from_template"
] | [((1278, 1348), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_react_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_react_agent', removal='0.2.0')\n", (1288, 1348), False, 'from langchain_core._api import deprecated\n'), ((5068, 5104), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (5078, 5104), False, 'from langchain_core._api import deprecated\n'), ((1453, 1492), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'MRKLOutputParser'}), '(default_factory=MRKLOutputParser)\n', (1458, 1492), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1603, 1621), 'langchain.agents.mrkl.output_parser.MRKLOutputParser', 'MRKLOutputParser', ([], {}), '()\n', (1619, 1621), False, 'from langchain.agents.mrkl.output_parser import MRKLOutputParser\n'), ((3228, 3266), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3256, 3266), False, 'from langchain_core.prompts import PromptTemplate\n'), ((4052, 4119), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callback_manager': 'callback_manager'}), '(llm=llm, prompt=prompt, callback_manager=callback_manager)\n', (4060, 4119), False, 'from langchain.chains import LLMChain\n'), ((4549, 4597), 'langchain.agents.utils.validate_tools_single_input', 'validate_tools_single_input', (['cls.__name__', 'tools'], {}), '(cls.__name__, tools)\n', (4576, 4597), False, 'from langchain.agents.utils import validate_tools_single_input\n'), ((3146, 3212), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': 'input_variables'}), '(template=template, input_variables=input_variables)\n', (3160, 3212), False, 'from langchain_core.prompts import PromptTemplate\n'), ((5785, 5858), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'c.action_name', 'func': 'c.action', 'description': 'c.action_description'}), '(name=c.action_name, func=c.action, description=c.action_description)\n', (5789, 5858), False, 'from langchain.agents.tools import Tool\n')] |
"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
from __future__ import annotations
from typing import Any, Callable, List, NamedTuple, Optional, Sequence
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.tools import BaseTool
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain.agents.agent_types import AgentType
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.agents.tools import Tool
from langchain.agents.utils import validate_tools_single_input
from langchain.chains import LLMChain
from langchain.tools.render import render_text_description
class ChainConfig(NamedTuple):
"""Configuration for chain to use in MRKL system.
Args:
action_name: Name of the action.
action: Action function to call.
action_description: Description of the action.
"""
action_name: str
action: Callable
action_description: str
@deprecated("0.1.0", alternative="create_react_agent", removal="0.2.0")
class ZeroShotAgent(Agent):
"""Agent for the MRKL chain."""
output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser)
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return MRKLOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.ZERO_SHOT_REACT_DESCRIPTION
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
) -> PromptTemplate:
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = render_text_description(list(tools))
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
if input_variables:
return PromptTemplate(template=template, input_variables=input_variables)
return PromptTemplate.from_template(template)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
if len(tools) == 0:
raise ValueError(
f"Got no tools for {cls.__name__}. At least one tool must be provided."
)
for tool in tools:
if tool.description is None:
raise ValueError(
f"Got a tool {tool.name} without a description. For this agent, "
f"a description must always be provided."
)
super()._validate_tools(tools)
@deprecated("0.1.0", removal="0.2.0")
class MRKLChain(AgentExecutor):
"""[Deprecated] Chain that implements the MRKL system."""
@classmethod
def from_chains(
cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any
) -> AgentExecutor:
"""User friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Args:
llm: The LLM to use as the agent LLM.
chains: The chains the MRKL system has access to.
**kwargs: parameters to be passed to initialization.
Returns:
An initialized MRKL chain.
"""
tools = [
Tool(
name=c.action_name,
func=c.action,
description=c.action_description,
)
for c in chains
]
agent = ZeroShotAgent.from_llm_and_tools(llm, tools)
return cls(agent=agent, tools=tools, **kwargs)
| [
"langchain.agents.mrkl.output_parser.MRKLOutputParser",
"langchain.agents.utils.validate_tools_single_input",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.PromptTemplate",
"langchain_core._api.deprecated",
"langchain.chains.LLMChain",
"langchain.agents.tools.Tool",
"langchain_core.prompts.PromptTemplate.from_template"
] | [((1278, 1348), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_react_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_react_agent', removal='0.2.0')\n", (1288, 1348), False, 'from langchain_core._api import deprecated\n'), ((5068, 5104), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (5078, 5104), False, 'from langchain_core._api import deprecated\n'), ((1453, 1492), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'MRKLOutputParser'}), '(default_factory=MRKLOutputParser)\n', (1458, 1492), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1603, 1621), 'langchain.agents.mrkl.output_parser.MRKLOutputParser', 'MRKLOutputParser', ([], {}), '()\n', (1619, 1621), False, 'from langchain.agents.mrkl.output_parser import MRKLOutputParser\n'), ((3228, 3266), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3256, 3266), False, 'from langchain_core.prompts import PromptTemplate\n'), ((4052, 4119), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callback_manager': 'callback_manager'}), '(llm=llm, prompt=prompt, callback_manager=callback_manager)\n', (4060, 4119), False, 'from langchain.chains import LLMChain\n'), ((4549, 4597), 'langchain.agents.utils.validate_tools_single_input', 'validate_tools_single_input', (['cls.__name__', 'tools'], {}), '(cls.__name__, tools)\n', (4576, 4597), False, 'from langchain.agents.utils import validate_tools_single_input\n'), ((3146, 3212), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': 'input_variables'}), '(template=template, input_variables=input_variables)\n', (3160, 3212), False, 'from langchain_core.prompts import PromptTemplate\n'), ((5785, 5858), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'c.action_name', 'func': 'c.action', 'description': 'c.action_description'}), '(name=c.action_name, func=c.action, description=c.action_description)\n', (5789, 5858), False, 'from langchain.agents.tools import Tool\n')] |
"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
from __future__ import annotations
from typing import Any, Callable, List, NamedTuple, Optional, Sequence
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.tools import BaseTool
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain.agents.agent_types import AgentType
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.agents.tools import Tool
from langchain.agents.utils import validate_tools_single_input
from langchain.chains import LLMChain
from langchain.tools.render import render_text_description
class ChainConfig(NamedTuple):
"""Configuration for chain to use in MRKL system.
Args:
action_name: Name of the action.
action: Action function to call.
action_description: Description of the action.
"""
action_name: str
action: Callable
action_description: str
@deprecated("0.1.0", alternative="create_react_agent", removal="0.2.0")
class ZeroShotAgent(Agent):
"""Agent for the MRKL chain."""
output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser)
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return MRKLOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.ZERO_SHOT_REACT_DESCRIPTION
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
) -> PromptTemplate:
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = render_text_description(list(tools))
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
if input_variables:
return PromptTemplate(template=template, input_variables=input_variables)
return PromptTemplate.from_template(template)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
if len(tools) == 0:
raise ValueError(
f"Got no tools for {cls.__name__}. At least one tool must be provided."
)
for tool in tools:
if tool.description is None:
raise ValueError(
f"Got a tool {tool.name} without a description. For this agent, "
f"a description must always be provided."
)
super()._validate_tools(tools)
@deprecated("0.1.0", removal="0.2.0")
class MRKLChain(AgentExecutor):
"""[Deprecated] Chain that implements the MRKL system."""
@classmethod
def from_chains(
cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any
) -> AgentExecutor:
"""User friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Args:
llm: The LLM to use as the agent LLM.
chains: The chains the MRKL system has access to.
**kwargs: parameters to be passed to initialization.
Returns:
An initialized MRKL chain.
"""
tools = [
Tool(
name=c.action_name,
func=c.action,
description=c.action_description,
)
for c in chains
]
agent = ZeroShotAgent.from_llm_and_tools(llm, tools)
return cls(agent=agent, tools=tools, **kwargs)
| [
"langchain.agents.mrkl.output_parser.MRKLOutputParser",
"langchain.agents.utils.validate_tools_single_input",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.PromptTemplate",
"langchain_core._api.deprecated",
"langchain.chains.LLMChain",
"langchain.agents.tools.Tool",
"langchain_core.prompts.PromptTemplate.from_template"
] | [((1278, 1348), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_react_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_react_agent', removal='0.2.0')\n", (1288, 1348), False, 'from langchain_core._api import deprecated\n'), ((5068, 5104), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (5078, 5104), False, 'from langchain_core._api import deprecated\n'), ((1453, 1492), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'MRKLOutputParser'}), '(default_factory=MRKLOutputParser)\n', (1458, 1492), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1603, 1621), 'langchain.agents.mrkl.output_parser.MRKLOutputParser', 'MRKLOutputParser', ([], {}), '()\n', (1619, 1621), False, 'from langchain.agents.mrkl.output_parser import MRKLOutputParser\n'), ((3228, 3266), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3256, 3266), False, 'from langchain_core.prompts import PromptTemplate\n'), ((4052, 4119), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callback_manager': 'callback_manager'}), '(llm=llm, prompt=prompt, callback_manager=callback_manager)\n', (4060, 4119), False, 'from langchain.chains import LLMChain\n'), ((4549, 4597), 'langchain.agents.utils.validate_tools_single_input', 'validate_tools_single_input', (['cls.__name__', 'tools'], {}), '(cls.__name__, tools)\n', (4576, 4597), False, 'from langchain.agents.utils import validate_tools_single_input\n'), ((3146, 3212), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': 'input_variables'}), '(template=template, input_variables=input_variables)\n', (3160, 3212), False, 'from langchain_core.prompts import PromptTemplate\n'), ((5785, 5858), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'c.action_name', 'func': 'c.action', 'description': 'c.action_description'}), '(name=c.action_name, func=c.action, description=c.action_description)\n', (5789, 5858), False, 'from langchain.agents.tools import Tool\n')] |
import base64
import io
import os
import uuid
from io import BytesIO
from pathlib import Path
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import LocalFileStore
from langchain_community.chat_models import ChatOllama
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.messages import HumanMessage
from PIL import Image
def image_summarize(img_base64, prompt):
"""
Make image summary
:param img_base64: Base64 encoded string for image
:param prompt: Text prompt for summarizatiomn
:return: Image summarization prompt
"""
chat = ChatOllama(model="bakllava", temperature=0)
msg = chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{img_base64}",
},
]
)
]
)
return msg.content
def generate_img_summaries(img_base64_list):
"""
Generate summaries for images
:param img_base64_list: Base64 encoded images
:return: List of image summaries and processed images
"""
# Store image summaries
image_summaries = []
processed_images = []
# Prompt
prompt = """Give a detailed summary of the image."""
# Apply summarization to images
for i, base64_image in enumerate(img_base64_list):
try:
image_summaries.append(image_summarize(base64_image, prompt))
processed_images.append(base64_image)
except Exception as e:
print(f"Error with image {i+1}: {e}") # noqa: T201
return image_summaries, processed_images
def get_images(img_path):
"""
Extract images.
:param img_path: A string representing the path to the images.
"""
# Get image URIs
pil_images = [
Image.open(os.path.join(img_path, image_name))
for image_name in os.listdir(img_path)
if image_name.endswith(".jpg")
]
return pil_images
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string
:param base64_string: Base64 string
:param size: Image size
:return: Re-sized Base64 string
"""
# Decode the Base64 string
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
# Resize the image
resized_img = img.resize(size, Image.LANCZOS)
# Save the resized image to a bytes buffer
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
# Encode the resized image to Base64
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def convert_to_base64(pil_image):
"""
Convert PIL images to Base64 encoded strings
:param pil_image: PIL image
:return: Re-sized Base64 string
"""
buffered = BytesIO()
pil_image.save(buffered, format="JPEG") # You can change the format if needed
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
# img_str = resize_base64_image(img_str, size=(831,623))
return img_str
def create_multi_vector_retriever(vectorstore, image_summaries, images):
"""
Create retriever that indexes summaries, but returns raw images or texts
:param vectorstore: Vectorstore to store embedded image sumamries
:param image_summaries: Image summaries
:param images: Base64 encoded images
:return: Retriever
"""
# Initialize the storage layer for images
store = LocalFileStore(
str(Path(__file__).parent / "multi_vector_retriever_metadata")
)
id_key = "doc_id"
# Create the multi-vector retriever
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
byte_store=store,
id_key=id_key,
)
# Helper function to add documents to the vectorstore and docstore
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(doc_summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
add_documents(retriever, image_summaries, images)
return retriever
# Load images
doc_path = Path(__file__).parent / "docs/"
rel_doc_path = doc_path.relative_to(Path.cwd())
print("Read images") # noqa: T201
pil_images = get_images(rel_doc_path)
# Convert to b64
images_base_64 = [convert_to_base64(i) for i in pil_images]
# Image summaries
print("Generate image summaries") # noqa: T201
image_summaries, images_base_64_processed = generate_img_summaries(images_base_64)
# The vectorstore to use to index the images summaries
vectorstore_mvr = Chroma(
collection_name="image_summaries",
persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"),
embedding_function=OllamaEmbeddings(model="llama2:7b"),
)
# Create documents
images_base_64_processed_documents = [
Document(page_content=i) for i in images_base_64_processed
]
# Create retriever
retriever_multi_vector_img = create_multi_vector_retriever(
vectorstore_mvr,
image_summaries,
images_base_64_processed_documents,
)
| [
"langchain_core.documents.Document",
"langchain_community.embeddings.OllamaEmbeddings",
"langchain_community.chat_models.ChatOllama",
"langchain_core.messages.HumanMessage",
"langchain.retrievers.multi_vector.MultiVectorRetriever"
] | [((731, 774), 'langchain_community.chat_models.ChatOllama', 'ChatOllama', ([], {'model': '"""bakllava"""', 'temperature': '(0)'}), "(model='bakllava', temperature=0)\n", (741, 774), False, 'from langchain_community.chat_models import ChatOllama\n'), ((2494, 2525), 'base64.b64decode', 'base64.b64decode', (['base64_string'], {}), '(base64_string)\n', (2510, 2525), False, 'import base64\n'), ((2706, 2718), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2716, 2718), False, 'import io\n'), ((3062, 3071), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3069, 3071), False, 'from io import BytesIO\n'), ((3881, 3959), 'langchain.retrievers.multi_vector.MultiVectorRetriever', 'MultiVectorRetriever', ([], {'vectorstore': 'vectorstore', 'byte_store': 'store', 'id_key': 'id_key'}), '(vectorstore=vectorstore, byte_store=store, id_key=id_key)\n', (3901, 3959), False, 'from langchain.retrievers.multi_vector import MultiVectorRetriever\n'), ((4634, 4644), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4642, 4644), False, 'from pathlib import Path\n'), ((5269, 5293), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 'i'}), '(page_content=i)\n', (5277, 5293), False, 'from langchain_core.documents import Document\n'), ((2547, 2567), 'io.BytesIO', 'io.BytesIO', (['img_data'], {}), '(img_data)\n', (2557, 2567), False, 'import io\n'), ((4566, 4580), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4570, 4580), False, 'from pathlib import Path\n'), ((5167, 5202), 'langchain_community.embeddings.OllamaEmbeddings', 'OllamaEmbeddings', ([], {'model': '"""llama2:7b"""'}), "(model='llama2:7b')\n", (5183, 5202), False, 'from langchain_community.embeddings import OllamaEmbeddings\n'), ((821, 957), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': "[{'type': 'text', 'text': prompt}, {'type': 'image_url', 'image_url':\n f'data:image/jpeg;base64,{img_base64}'}]"}), "(content=[{'type': 'text', 'text': prompt}, {'type':\n 'image_url', 'image_url': f'data:image/jpeg;base64,{img_base64}'}])\n", (833, 957), False, 'from langchain_core.messages import HumanMessage\n'), ((2071, 2105), 'os.path.join', 'os.path.join', (['img_path', 'image_name'], {}), '(img_path, image_name)\n', (2083, 2105), False, 'import os\n'), ((2133, 2153), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (2143, 2153), False, 'import os\n'), ((4223, 4278), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 's', 'metadata': '{id_key: doc_ids[i]}'}), '(page_content=s, metadata={id_key: doc_ids[i]})\n', (4231, 4278), False, 'from langchain_core.documents import Document\n'), ((4149, 4161), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4159, 4161), False, 'import uuid\n'), ((3737, 3751), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3741, 3751), False, 'from pathlib import Path\n'), ((5094, 5108), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5098, 5108), False, 'from pathlib import Path\n')] |
import streamlit as st
import datetime
import os
import psycopg2
from dotenv import load_dotenv
from langchain.prompts import PromptTemplate
from langchain.docstore.document import Document
def log(message):
current_time = datetime.datetime.now()
milliseconds = current_time.microsecond // 1000
timestamp = current_time.strftime(
"[%Y-%m-%d %H:%M:%S.{:03d}] ".format(milliseconds)
)
st.text(timestamp + message)
def check_input(question: str):
if question == "":
raise Exception("Please enter a question.")
else:
pass
_postgres_prompt = """\
You are a PostgreSQL expert. Given an input question, create a syntactically correct PostgreSQL query to run and return it as the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PostgreSQL.
Never query for all columns from a table. You must query only the columns that are needed to answer the question.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Create meaningful aliases for the columns. For example, if the column name is products_sold.count, you should it as total_sold_products.
Note that the columns with (member_type: measure) are numeric columns and the ones with (member_type: dimension) are string columns.
You should include at least one column with (member_type: measure) in your query.
There are two types of queries supported against cube tables: aggregated and non-aggregated. Aggregated are those with GROUP BY statement, and non-aggregated are those without. Cube queries issued to your database will always be aggregated, and it doesn't matter if you provide GROUP BY in a query or not.
Whenever you use a non-aggregated query you need to provide only column names in SQL:
SELECT status, count FROM orders
The same aggregated query should always aggregate measure columns using a corresponding aggregating function or special MEASURE() function:
SELECT status, SUM(count) FROM orders GROUP BY 1
SELECT status, MEASURE(count) FROM orders GROUP BY 1
If you can't construct the query answer `{no_answer_text}`
Only use the following table: {table_info}
Only look among the following columns and pick the relevant ones:
{columns_info}
Question: {input_question}
"""
PROMPT_POSTFIX = """\
Return the answer as a JSON object with the following format:
{
"query": "",
"filters": [{"column": \"\", "operator": \"\", "value": "\"\"}]
}
"""
CUBE_SQL_API_PROMPT = PromptTemplate(
input_variables=[
"input_question",
"table_info",
"columns_info",
"top_k",
"no_answer_text",
],
template=_postgres_prompt,
)
_NO_ANSWER_TEXT = "I can't answer this question."
def call_sql_api(sql_query: str):
load_dotenv()
CONN_STR = os.environ["DATABASE_URL"]
# Initializing Cube SQL API connection)
connection = psycopg2.connect(CONN_STR)
cursor = connection.cursor()
cursor.execute(sql_query)
columns = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
cursor.close()
connection.close()
return columns, rows
def create_docs_from_values(columns_values, table_name, column_name):
value_docs = []
for column_value in columns_values:
print(column_value)
metadata = dict(
table_name=table_name,
column_name=column_name,
)
page_content = column_value
value_docs.append(Document(page_content=page_content, metadata=metadata))
return value_docs
| [
"langchain.docstore.document.Document",
"langchain.prompts.PromptTemplate"
] | [((2668, 2806), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input_question', 'table_info', 'columns_info', 'top_k', 'no_answer_text']", 'template': '_postgres_prompt'}), "(input_variables=['input_question', 'table_info',\n 'columns_info', 'top_k', 'no_answer_text'], template=_postgres_prompt)\n", (2682, 2806), False, 'from langchain.prompts import PromptTemplate\n'), ((230, 253), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (251, 253), False, 'import datetime\n'), ((414, 442), 'streamlit.text', 'st.text', (['(timestamp + message)'], {}), '(timestamp + message)\n', (421, 442), True, 'import streamlit as st\n'), ((2952, 2965), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2963, 2965), False, 'from dotenv import load_dotenv\n'), ((3070, 3096), 'psycopg2.connect', 'psycopg2.connect', (['CONN_STR'], {}), '(CONN_STR)\n', (3086, 3096), False, 'import psycopg2\n'), ((3650, 3704), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'page_content', 'metadata': 'metadata'}), '(page_content=page_content, metadata=metadata)\n', (3658, 3704), False, 'from langchain.docstore.document import Document\n')] |
import os
import pandas as pd
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import mlflow
assert (
"OPENAI_API_KEY" in os.environ
), "Please set the OPENAI_API_KEY environment variable to run this example."
def build_and_evalute_model_with_prompt(prompt_template):
mlflow.start_run()
mlflow.log_param("prompt_template", prompt_template)
# Create a news summarization model using prompt engineering with LangChain. Log the model
# to MLflow Tracking
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(input_variables=["article"], template=prompt_template)
chain = LLMChain(llm=llm, prompt=prompt)
logged_model = mlflow.langchain.log_model(chain, artifact_path="model")
# Evaluate the model on a small sample dataset
sample_data = pd.read_csv("summarization_example_data.csv")
mlflow.evaluate(
model=logged_model.model_uri,
model_type="text-summarization",
data=sample_data,
targets="highlights",
)
mlflow.end_run()
prompt_template_1 = (
"Write a summary of the following article that is between triple backticks: ```{article}```"
)
print(f"Bulding and evaluating model with prompt: '{prompt_template_1}'")
build_and_evalute_model_with_prompt(prompt_template_1)
prompt_template_2 = (
"Write a summary of the following article that is between triple backticks. Be concise. Make"
" sure the summary includes important nouns and dates and keywords in the original text."
" Just return the summary. Do not include any text other than the summary: ```{article}```"
)
print(f"Building and evaluating model with prompt: '{prompt_template_2}'")
build_and_evalute_model_with_prompt(prompt_template_2)
# Load the evaluation results
results: pd.DataFrame = mlflow.load_table(
"eval_results_table.json", extra_columns=["run_id", "params.prompt_template"]
)
results_grouped_by_article = results.sort_values(by="id")
print("Evaluation results:")
print(results_grouped_by_article[["run_id", "params.prompt_template", "article", "outputs"]])
# Score the best model on a new article
new_article = """
Adnan Januzaj swapped the lush turf of Old Trafford for the green baize at Sheffield when he
turned up at the snooker World Championships on Wednesday. The Manchester United winger, who has
endured a frustrating season under Louis van Gaal, had turned out for the Under 21 side at Fulham
on Tuesday night amid reports he could be farmed out on loan next season. But Januzaj may want to
consider trying his hand at another sport after displaying his silky skillls on a mini pool table.
Adnan Januzaj (left) cheered on\xa0Shaun Murphy (right) at the World Championship in Sheffield.
Januzaj shows off his potting skills on a mini pool table at the Crucible on Wednesday.
The 20-year-old Belgium international was at the Crucible to cheer on his friend Shaun Murphy in
his quarter-final against Anthony McGill. The 2005 winner moved a step closer to an elusive second
title in Sheffield with a 13-8 victory, sealed with a 67 break. Three centuries in the match, and
the way he accelerated away from 6-6, showed Murphy is a man to fear, and next for him will be
Neil Robertson or Barry Hawkins. Januzaj turned out for Under 21s in the 4-1 victory at Fulham on
Tuesday night.
"""
print(
f"Scoring the model with prompt '{prompt_template_2}' on the article '{new_article[:70] + '...'}'"
)
best_model = mlflow.pyfunc.load_model(f"runs:/{mlflow.last_active_run().info.run_id}/model")
summary = best_model.predict({"article": new_article})
print(f"Summary: {summary}")
| [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate",
"langchain.llms.OpenAI"
] | [((1832, 1932), 'mlflow.load_table', 'mlflow.load_table', (['"""eval_results_table.json"""'], {'extra_columns': "['run_id', 'params.prompt_template']"}), "('eval_results_table.json', extra_columns=['run_id',\n 'params.prompt_template'])\n", (1849, 1932), False, 'import mlflow\n'), ((349, 367), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (365, 367), False, 'import mlflow\n'), ((372, 424), 'mlflow.log_param', 'mlflow.log_param', (['"""prompt_template"""', 'prompt_template'], {}), "('prompt_template', prompt_template)\n", (388, 424), False, 'import mlflow\n'), ((555, 578), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (561, 578), False, 'from langchain.llms import OpenAI\n'), ((592, 661), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['article']", 'template': 'prompt_template'}), "(input_variables=['article'], template=prompt_template)\n", (606, 661), False, 'from langchain.prompts import PromptTemplate\n'), ((674, 706), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (682, 706), False, 'from langchain.chains import LLMChain\n'), ((726, 782), 'mlflow.langchain.log_model', 'mlflow.langchain.log_model', (['chain'], {'artifact_path': '"""model"""'}), "(chain, artifact_path='model')\n", (752, 782), False, 'import mlflow\n'), ((853, 898), 'pandas.read_csv', 'pd.read_csv', (['"""summarization_example_data.csv"""'], {}), "('summarization_example_data.csv')\n", (864, 898), True, 'import pandas as pd\n'), ((903, 1026), 'mlflow.evaluate', 'mlflow.evaluate', ([], {'model': 'logged_model.model_uri', 'model_type': '"""text-summarization"""', 'data': 'sample_data', 'targets': '"""highlights"""'}), "(model=logged_model.model_uri, model_type=\n 'text-summarization', data=sample_data, targets='highlights')\n", (918, 1026), False, 'import mlflow\n'), ((1065, 1081), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (1079, 1081), False, 'import mlflow\n'), ((3510, 3534), 'mlflow.last_active_run', 'mlflow.last_active_run', ([], {}), '()\n', (3532, 3534), False, 'import mlflow\n')] |
import hashlib
try:
from langchain_community.document_loaders import UnstructuredXMLLoader
except ImportError:
raise ImportError(
'XML file requires extra dependencies. Install with `pip install --upgrade "embedchain[dataloaders]"`'
) from None
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
@register_deserializable
class XmlLoader(BaseLoader):
def load_data(self, xml_url):
"""Load data from a XML file."""
loader = UnstructuredXMLLoader(xml_url)
data = loader.load()
content = data[0].page_content
content = clean_string(content)
metadata = data[0].metadata
metadata["url"] = metadata["source"]
del metadata["source"]
output = [{"content": content, "meta_data": metadata}]
doc_id = hashlib.sha256((content + xml_url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": output,
}
| [
"langchain_community.document_loaders.UnstructuredXMLLoader"
] | [((588, 618), 'langchain_community.document_loaders.UnstructuredXMLLoader', 'UnstructuredXMLLoader', (['xml_url'], {}), '(xml_url)\n', (609, 618), False, 'from langchain_community.document_loaders import UnstructuredXMLLoader\n'), ((705, 726), 'embedchain.utils.misc.clean_string', 'clean_string', (['content'], {}), '(content)\n', (717, 726), False, 'from embedchain.utils.misc import clean_string\n')] |
import os
import voyager.utils as U
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import HumanMessage, SystemMessage
from langchain.vectorstores import Chroma
from voyager.prompts import load_prompt
from voyager.control_primitives import load_control_primitives
class SkillManager:
def __init__(
self,
model_name="gpt-3.5-turbo",
temperature=0,
retrieval_top_k=5,
request_timout=120,
ckpt_dir="ckpt",
resume=False,
):
self.llm = ChatOpenAI(
model_name=model_name,
temperature=temperature,
request_timeout=request_timout,
)
U.f_mkdir(f"{ckpt_dir}/skill/code")
U.f_mkdir(f"{ckpt_dir}/skill/description")
U.f_mkdir(f"{ckpt_dir}/skill/vectordb")
# programs for env execution
self.control_primitives = load_control_primitives()
if resume:
print(f"\033[33mLoading Skill Manager from {ckpt_dir}/skill\033[0m")
self.skills = U.load_json(f"{ckpt_dir}/skill/skills.json")
else:
self.skills = {}
self.retrieval_top_k = retrieval_top_k
self.ckpt_dir = ckpt_dir
self.vectordb = Chroma(
collection_name="skill_vectordb",
embedding_function=OpenAIEmbeddings(),
persist_directory=f"{ckpt_dir}/skill/vectordb",
)
assert self.vectordb._collection.count() == len(self.skills), (
f"Skill Manager's vectordb is not synced with skills.json.\n"
f"There are {self.vectordb._collection.count()} skills in vectordb but {len(self.skills)} skills in skills.json.\n"
f"Did you set resume=False when initializing the manager?\n"
f"You may need to manually delete the vectordb directory for running from scratch."
)
@property
def programs(self):
programs = ""
for skill_name, entry in self.skills.items():
programs += f"{entry['code']}\n\n"
for primitives in self.control_primitives:
programs += f"{primitives}\n\n"
return programs
def add_new_skill(self, info):
if info["task"].startswith("Deposit useless items into the chest at"):
# No need to reuse the deposit skill
return
program_name = info["program_name"]
program_code = info["program_code"]
skill_description = self.generate_skill_description(program_name, program_code)
print(
f"\033[33mSkill Manager generated description for {program_name}:\n{skill_description}\033[0m"
)
if program_name in self.skills:
print(f"\033[33mSkill {program_name} already exists. Rewriting!\033[0m")
self.vectordb._collection.delete(ids=[program_name])
i = 2
while f"{program_name}V{i}.js" in os.listdir(f"{self.ckpt_dir}/skill/code"):
i += 1
dumped_program_name = f"{program_name}V{i}"
else:
dumped_program_name = program_name
self.vectordb.add_texts(
texts=[skill_description],
ids=[program_name],
metadatas=[{"name": program_name}],
)
self.skills[program_name] = {
"code": program_code,
"description": skill_description,
}
assert self.vectordb._collection.count() == len(
self.skills
), "vectordb is not synced with skills.json"
U.dump_text(
program_code, f"{self.ckpt_dir}/skill/code/{dumped_program_name}.js"
)
U.dump_text(
skill_description,
f"{self.ckpt_dir}/skill/description/{dumped_program_name}.txt",
)
U.dump_json(self.skills, f"{self.ckpt_dir}/skill/skills.json")
self.vectordb.persist()
def generate_skill_description(self, program_name, program_code):
messages = [
SystemMessage(content=load_prompt("skill")),
HumanMessage(
content=program_code
+ "\n\n"
+ f"The main function is `{program_name}`."
),
]
skill_description = f" // { self.llm(messages).content}"
return f"async function {program_name}(bot) {{\n{skill_description}\n}}"
def retrieve_skills(self, query):
k = min(self.vectordb._collection.count(), self.retrieval_top_k)
if k == 0:
return []
print(f"\033[33mSkill Manager retrieving for {k} skills\033[0m")
docs_and_scores = self.vectordb.similarity_search_with_score(query, k=k)
print(
f"\033[33mSkill Manager retrieved skills: "
f"{', '.join([doc.metadata['name'] for doc, _ in docs_and_scores])}\033[0m"
)
skills = []
for doc, _ in docs_and_scores:
skills.append(self.skills[doc.metadata["name"]]["code"])
return skills
| [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((583, 678), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'request_timeout': 'request_timout'}), '(model_name=model_name, temperature=temperature, request_timeout=\n request_timout)\n', (593, 678), False, 'from langchain.chat_models import ChatOpenAI\n'), ((729, 764), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/code"""'], {}), "(f'{ckpt_dir}/skill/code')\n", (738, 764), True, 'import voyager.utils as U\n'), ((773, 815), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/description"""'], {}), "(f'{ckpt_dir}/skill/description')\n", (782, 815), True, 'import voyager.utils as U\n'), ((824, 863), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/vectordb"""'], {}), "(f'{ckpt_dir}/skill/vectordb')\n", (833, 863), True, 'import voyager.utils as U\n'), ((935, 960), 'voyager.control_primitives.load_control_primitives', 'load_control_primitives', ([], {}), '()\n', (958, 960), False, 'from voyager.control_primitives import load_control_primitives\n'), ((3548, 3633), 'voyager.utils.dump_text', 'U.dump_text', (['program_code', 'f"""{self.ckpt_dir}/skill/code/{dumped_program_name}.js"""'], {}), "(program_code,\n f'{self.ckpt_dir}/skill/code/{dumped_program_name}.js')\n", (3559, 3633), True, 'import voyager.utils as U\n'), ((3660, 3758), 'voyager.utils.dump_text', 'U.dump_text', (['skill_description', 'f"""{self.ckpt_dir}/skill/description/{dumped_program_name}.txt"""'], {}), "(skill_description,\n f'{self.ckpt_dir}/skill/description/{dumped_program_name}.txt')\n", (3671, 3758), True, 'import voyager.utils as U\n'), ((3798, 3860), 'voyager.utils.dump_json', 'U.dump_json', (['self.skills', 'f"""{self.ckpt_dir}/skill/skills.json"""'], {}), "(self.skills, f'{self.ckpt_dir}/skill/skills.json')\n", (3809, 3860), True, 'import voyager.utils as U\n'), ((1087, 1131), 'voyager.utils.load_json', 'U.load_json', (['f"""{ckpt_dir}/skill/skills.json"""'], {}), "(f'{ckpt_dir}/skill/skills.json')\n", (1098, 1131), True, 'import voyager.utils as U\n'), ((4054, 4145), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': "(program_code + '\\n\\n' + f'The main function is `{program_name}`.')"}), "(content=program_code + '\\n\\n' +\n f'The main function is `{program_name}`.')\n", (4066, 4145), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((1364, 1382), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1380, 1382), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2933, 2974), 'os.listdir', 'os.listdir', (['f"""{self.ckpt_dir}/skill/code"""'], {}), "(f'{self.ckpt_dir}/skill/code')\n", (2943, 2974), False, 'import os\n'), ((4019, 4039), 'voyager.prompts.load_prompt', 'load_prompt', (['"""skill"""'], {}), "('skill')\n", (4030, 4039), False, 'from voyager.prompts import load_prompt\n')] |
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_json_agent
from langflow.field_typing import (
BaseLanguageModel,
)
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonAgentComponent(CustomComponent):
display_name = "JsonAgent"
description = "Construct a json agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"toolkit": {"display_name": "Toolkit"},
}
def build(
self,
llm: BaseLanguageModel,
toolkit: JsonToolkit,
) -> AgentExecutor:
return create_json_agent(llm=llm, toolkit=toolkit)
| [
"langchain.agents.create_json_agent"
] | [((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')] |
from typing import Annotated, List, Optional
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from fastapi.responses import StreamingResponse
from langchain.embeddings.ollama import OllamaEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from logger import get_logger
from middlewares.auth import AuthBearer, get_current_user
from models.settings import BrainSettings, get_supabase_client
from models.user_usage import UserUsage
from modules.brain.service.brain_service import BrainService
from modules.chat.controller.chat.brainful_chat import BrainfulChat
from modules.chat.dto.chats import ChatItem, ChatQuestion
from modules.chat.dto.inputs import (
ChatUpdatableProperties,
CreateChatProperties,
QuestionAndAnswer,
)
from modules.chat.entity.chat import Chat
from modules.chat.service.chat_service import ChatService
from modules.notification.service.notification_service import NotificationService
from modules.user.entity.user_identity import UserIdentity
from packages.utils.telemetry import send_telemetry
from vectorstore.supabase import CustomSupabaseVectorStore
logger = get_logger(__name__)
chat_router = APIRouter()
notification_service = NotificationService()
brain_service = BrainService()
chat_service = ChatService()
def init_vector_store(user_id: UUID) -> CustomSupabaseVectorStore:
"""
Initialize the vector store
"""
brain_settings = BrainSettings()
supabase_client = get_supabase_client()
embeddings = None
if brain_settings.ollama_api_base_url:
embeddings = OllamaEmbeddings(
base_url=brain_settings.ollama_api_base_url
) # pyright: ignore reportPrivateUsage=none
else:
embeddings = OpenAIEmbeddings()
vector_store = CustomSupabaseVectorStore(
supabase_client, embeddings, table_name="vectors", user_id=user_id
)
return vector_store
def get_answer_generator(
chat_id: UUID,
chat_question: ChatQuestion,
brain_id: UUID,
current_user: UserIdentity,
):
chat_instance = BrainfulChat()
chat_instance.validate_authorization(user_id=current_user.id, brain_id=brain_id)
user_usage = UserUsage(
id=current_user.id,
email=current_user.email,
)
vector_store = init_vector_store(user_id=current_user.id)
# Get History
history = chat_service.get_chat_history(chat_id)
# Generic
brain, metadata_brain = brain_service.find_brain_from_question(
brain_id, chat_question.question, current_user, chat_id, history, vector_store
)
send_telemetry("question_asked", {"model_name": brain.model})
gpt_answer_generator = chat_instance.get_answer_generator(
brain=brain,
chat_id=str(chat_id),
model=brain.model,
temperature=0.1,
streaming=True,
prompt_id=chat_question.prompt_id,
user_id=current_user.id,
user_email=current_user.email,
)
return gpt_answer_generator
@chat_router.get("/chat/healthz", tags=["Health"])
async def healthz():
return {"status": "ok"}
# get all chats
@chat_router.get("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"])
async def get_chats(current_user: UserIdentity = Depends(get_current_user)):
"""
Retrieve all chats for the current user.
- `current_user`: The current authenticated user.
- Returns a list of all chats for the user.
This endpoint retrieves all the chats associated with the current authenticated user. It returns a list of chat objects
containing the chat ID and chat name for each chat.
"""
chats = chat_service.get_user_chats(str(current_user.id))
return {"chats": chats}
# delete one chat
@chat_router.delete(
"/chat/{chat_id}", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def delete_chat(chat_id: UUID):
"""
Delete a specific chat by chat ID.
"""
notification_service.remove_chat_notifications(chat_id)
chat_service.delete_chat_from_db(chat_id)
return {"message": f"{chat_id} has been deleted."}
# update existing chat metadata
@chat_router.put(
"/chat/{chat_id}/metadata", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def update_chat_metadata_handler(
chat_data: ChatUpdatableProperties,
chat_id: UUID,
current_user: UserIdentity = Depends(get_current_user),
):
"""
Update chat attributes
"""
chat = chat_service.get_chat_by_id(
chat_id # pyright: ignore reportPrivateUsage=none
)
if str(current_user.id) != chat.user_id:
raise HTTPException(
status_code=403, # pyright: ignore reportPrivateUsage=none
detail="You should be the owner of the chat to update it.", # pyright: ignore reportPrivateUsage=none
)
return chat_service.update_chat(chat_id=chat_id, chat_data=chat_data)
# create new chat
@chat_router.post("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"])
async def create_chat_handler(
chat_data: CreateChatProperties,
current_user: UserIdentity = Depends(get_current_user),
):
"""
Create a new chat with initial chat messages.
"""
return chat_service.create_chat(user_id=current_user.id, chat_data=chat_data)
# add new question to chat
@chat_router.post(
"/chat/{chat_id}/question",
dependencies=[
Depends(
AuthBearer(),
),
],
tags=["Chat"],
)
async def create_question_handler(
request: Request,
chat_question: ChatQuestion,
chat_id: UUID,
brain_id: Annotated[UUID | None, Query()] = None,
current_user: UserIdentity = Depends(get_current_user),
):
try:
logger.info(
f"Creating question for chat {chat_id} with brain {brain_id} of type {type(brain_id)}"
)
gpt_answer_generator = get_answer_generator(
chat_id, chat_question, brain_id, current_user
)
chat_answer = gpt_answer_generator.generate_answer(
chat_id, chat_question, save_answer=True
)
return chat_answer
except HTTPException as e:
raise e
# stream new question response from chat
@chat_router.post(
"/chat/{chat_id}/question/stream",
dependencies=[
Depends(
AuthBearer(),
),
],
tags=["Chat"],
)
async def create_stream_question_handler(
request: Request,
chat_question: ChatQuestion,
chat_id: UUID,
brain_id: Annotated[UUID | None, Query()] = None,
current_user: UserIdentity = Depends(get_current_user),
) -> StreamingResponse:
chat_instance = BrainfulChat()
chat_instance.validate_authorization(user_id=current_user.id, brain_id=brain_id)
user_usage = UserUsage(
id=current_user.id,
email=current_user.email,
)
logger.info(
f"Creating question for chat {chat_id} with brain {brain_id} of type {type(brain_id)}"
)
gpt_answer_generator = get_answer_generator(
chat_id, chat_question, brain_id, current_user
)
try:
return StreamingResponse(
gpt_answer_generator.generate_stream(
chat_id, chat_question, save_answer=True
),
media_type="text/event-stream",
)
except HTTPException as e:
raise e
# get chat history
@chat_router.get(
"/chat/{chat_id}/history", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def get_chat_history_handler(
chat_id: UUID,
) -> List[ChatItem]:
# TODO: RBAC with current_user
return chat_service.get_chat_history_with_notifications(chat_id)
@chat_router.post(
"/chat/{chat_id}/question/answer",
dependencies=[Depends(AuthBearer())],
tags=["Chat"],
)
async def add_question_and_answer_handler(
chat_id: UUID,
question_and_answer: QuestionAndAnswer,
) -> Optional[Chat]:
"""
Add a new question and anwser to the chat.
"""
return chat_service.add_question_and_answer(chat_id, question_and_answer)
| [
"langchain.embeddings.ollama.OllamaEmbeddings",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((1158, 1178), 'logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1168, 1178), False, 'from logger import get_logger\n'), ((1194, 1205), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (1203, 1205), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((1230, 1251), 'modules.notification.service.notification_service.NotificationService', 'NotificationService', ([], {}), '()\n', (1249, 1251), False, 'from modules.notification.service.notification_service import NotificationService\n'), ((1268, 1282), 'modules.brain.service.brain_service.BrainService', 'BrainService', ([], {}), '()\n', (1280, 1282), False, 'from modules.brain.service.brain_service import BrainService\n'), ((1298, 1311), 'modules.chat.service.chat_service.ChatService', 'ChatService', ([], {}), '()\n', (1309, 1311), False, 'from modules.chat.service.chat_service import ChatService\n'), ((1450, 1465), 'models.settings.BrainSettings', 'BrainSettings', ([], {}), '()\n', (1463, 1465), False, 'from models.settings import BrainSettings, get_supabase_client\n'), ((1488, 1509), 'models.settings.get_supabase_client', 'get_supabase_client', ([], {}), '()\n', (1507, 1509), False, 'from models.settings import BrainSettings, get_supabase_client\n'), ((1792, 1889), 'vectorstore.supabase.CustomSupabaseVectorStore', 'CustomSupabaseVectorStore', (['supabase_client', 'embeddings'], {'table_name': '"""vectors"""', 'user_id': 'user_id'}), "(supabase_client, embeddings, table_name='vectors',\n user_id=user_id)\n", (1817, 1889), False, 'from vectorstore.supabase import CustomSupabaseVectorStore\n'), ((2080, 2094), 'modules.chat.controller.chat.brainful_chat.BrainfulChat', 'BrainfulChat', ([], {}), '()\n', (2092, 2094), False, 'from modules.chat.controller.chat.brainful_chat import BrainfulChat\n'), ((2198, 2253), 'models.user_usage.UserUsage', 'UserUsage', ([], {'id': 'current_user.id', 'email': 'current_user.email'}), '(id=current_user.id, email=current_user.email)\n', (2207, 2253), False, 'from models.user_usage import UserUsage\n'), ((2593, 2654), 'packages.utils.telemetry.send_telemetry', 'send_telemetry', (['"""question_asked"""', "{'model_name': brain.model}"], {}), "('question_asked', {'model_name': brain.model})\n", (2607, 2654), False, 'from packages.utils.telemetry import send_telemetry\n'), ((3248, 3273), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (3255, 3273), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((4355, 4380), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (4362, 4380), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((5080, 5105), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (5087, 5105), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((5636, 5661), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (5643, 5661), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((6531, 6556), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (6538, 6556), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((6603, 6617), 'modules.chat.controller.chat.brainful_chat.BrainfulChat', 'BrainfulChat', ([], {}), '()\n', (6615, 6617), False, 'from modules.chat.controller.chat.brainful_chat import BrainfulChat\n'), ((6721, 6776), 'models.user_usage.UserUsage', 'UserUsage', ([], {'id': 'current_user.id', 'email': 'current_user.email'}), '(id=current_user.id, email=current_user.email)\n', (6730, 6776), False, 'from models.user_usage import UserUsage\n'), ((1596, 1657), 'langchain.embeddings.ollama.OllamaEmbeddings', 'OllamaEmbeddings', ([], {'base_url': 'brain_settings.ollama_api_base_url'}), '(base_url=brain_settings.ollama_api_base_url)\n', (1612, 1657), False, 'from langchain.embeddings.ollama import OllamaEmbeddings\n'), ((1754, 1772), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1770, 1772), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((4593, 4688), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(403)', 'detail': '"""You should be the owner of the chat to update it."""'}), "(status_code=403, detail=\n 'You should be the owner of the chat to update it.')\n", (4606, 4688), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((3168, 3180), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (3178, 3180), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((3797, 3809), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (3807, 3809), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((4191, 4203), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (4201, 4203), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((4948, 4960), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (4958, 4960), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((5586, 5593), 'fastapi.Query', 'Query', ([], {}), '()\n', (5591, 5593), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((5387, 5399), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (5397, 5399), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((6481, 6488), 'fastapi.Query', 'Query', ([], {}), '()\n', (6486, 6488), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((6275, 6287), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (6285, 6287), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((7390, 7402), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (7400, 7402), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((7688, 7700), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (7698, 7700), False, 'from middlewares.auth import AuthBearer, get_current_user\n')] |
from typing import Any
from langchain.utilities import ArxivAPIWrapper
from pydantic import BaseModel, Field
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
class ArxivSearchInput(BaseModel):
query: str = Field(..., description="Search query.")
class ArxivSearchTool(BuiltinTool):
"""
A tool for searching articles on Arxiv.
"""
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage | list[ToolInvokeMessage]:
"""
Invokes the Arxiv search tool with the given user ID and tool parameters.
Args:
user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Any]): The parameters for the tool, including the 'query' parameter.
Returns:
ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages.
"""
query = tool_parameters.get('query', '')
if not query:
return self.create_text_message('Please input query')
arxiv = ArxivAPIWrapper()
response = arxiv.run(query)
return self.create_text_message(self.summary(user_id=user_id, content=response))
| [
"langchain.utilities.ArxivAPIWrapper"
] | [((282, 321), 'pydantic.Field', 'Field', (['...'], {'description': '"""Search query."""'}), "(..., description='Search query.')\n", (287, 321), False, 'from pydantic import BaseModel, Field\n'), ((1160, 1177), 'langchain.utilities.ArxivAPIWrapper', 'ArxivAPIWrapper', ([], {}), '()\n', (1175, 1177), False, 'from langchain.utilities import ArxivAPIWrapper\n')] |
import json
import os
import pickle
from taskweaver.plugin import Plugin, register_plugin
@register_plugin
class DocumentRetriever(Plugin):
vectorstore = None
def _init(self):
try:
import tiktoken
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
except ImportError:
raise ImportError("Please install langchain-community first.")
self.embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
self.vectorstore = FAISS.load_local(
folder_path=self.config.get("index_folder"),
embeddings=self.embeddings,
)
with open(
os.path.join(
self.config.get("index_folder"),
"chunk_id_to_index.pkl",
),
"rb",
) as f:
self.chunk_id_to_index = pickle.load(f)
self.enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
def __call__(self, query: str, size: int = 5, target_length: int = 256):
if self.vectorstore is None:
self._init()
result = self.vectorstore.similarity_search(
query=query,
k=size,
)
expanded_chunks = self.do_expand(result, target_length)
return f"DocumentRetriever has done searching for `{query}`.\n" + self.ctx.wrap_text_with_delimiter_temporal(
"\n```json\n" + json.dumps(expanded_chunks, indent=4) + "```\n",
)
def do_expand(self, result, target_length):
expanded_chunks = []
# do expansion
for r in result:
source = r.metadata["source"]
chunk_id = r.metadata["chunk_id"]
content = r.page_content
expanded_result = content
left_chunk_id, right_chunk_id = chunk_id - 1, chunk_id + 1
left_valid, right_valid = True, True
chunk_ids = [chunk_id]
while True:
current_length = len(self.enc.encode(expanded_result))
if f"{source}_{left_chunk_id}" in self.chunk_id_to_index:
chunk_ids.append(left_chunk_id)
left_chunk_index = self.vectorstore.index_to_docstore_id[
self.chunk_id_to_index[f"{source}_{left_chunk_id}"]
]
left_chunk = self.vectorstore.docstore.search(left_chunk_index)
encoded_left_chunk = self.enc.encode(left_chunk.page_content)
if len(encoded_left_chunk) + current_length < target_length:
expanded_result = left_chunk.page_content + expanded_result
left_chunk_id -= 1
current_length += len(encoded_left_chunk)
else:
expanded_result += self.enc.decode(
encoded_left_chunk[-(target_length - current_length) :],
)
current_length = target_length
break
else:
left_valid = False
if f"{source}_{right_chunk_id}" in self.chunk_id_to_index:
chunk_ids.append(right_chunk_id)
right_chunk_index = self.vectorstore.index_to_docstore_id[
self.chunk_id_to_index[f"{source}_{right_chunk_id}"]
]
right_chunk = self.vectorstore.docstore.search(right_chunk_index)
encoded_right_chunk = self.enc.encode(right_chunk.page_content)
if len(encoded_right_chunk) + current_length < target_length:
expanded_result += right_chunk.page_content
right_chunk_id += 1
current_length += len(encoded_right_chunk)
else:
expanded_result += self.enc.decode(
encoded_right_chunk[: target_length - current_length],
)
current_length = target_length
break
else:
right_valid = False
if not left_valid and not right_valid:
break
expanded_chunks.append(
{
"chunk": expanded_result,
"metadata": r.metadata,
# "length": current_length,
# "chunk_ids": chunk_ids
},
)
return expanded_chunks
| [
"langchain_community.embeddings.HuggingFaceEmbeddings"
] | [((499, 551), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (520, 551), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((960, 1004), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (987, 1004), False, 'import tiktoken\n'), ((925, 939), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (936, 939), False, 'import pickle\n'), ((1466, 1503), 'json.dumps', 'json.dumps', (['expanded_chunks'], {'indent': '(4)'}), '(expanded_chunks, indent=4)\n', (1476, 1503), False, 'import json\n')] |
import os
from fedml.serving import FedMLPredictor
from fedml.serving import FedMLInferenceRunner
from langchain import PromptTemplate, LLMChain
from langchain.llms import HuggingFacePipeline
import torch
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
TextGenerationPipeline,
)
class Chatbot(FedMLPredictor): # Inherit FedMLClientPredictor
def __init__(self):
super().__init__()
PROMPT_FOR_GENERATION_FORMAT = f""""Below is an instruction that describes a task. Write a response that appropriately completes the request."
### Instruction:
{{instruction}}
### Response:
"""
prompt = PromptTemplate(
input_variables=["instruction"],
template=PROMPT_FOR_GENERATION_FORMAT
)
config = AutoConfig.from_pretrained("EleutherAI/pythia-70m")
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/pythia-70m",
torch_dtype=torch.float32, # float 16 not supported on CPU
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-70m", device_map="auto")
hf_pipeline = HuggingFacePipeline(
pipeline=TextGenerationPipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True,
task="text-generation",
do_sample=True,
max_new_tokens=256,
top_p=0.92,
top_k=0
)
)
self.chatbot = LLMChain(llm=hf_pipeline, prompt=prompt, verbose=True)
def predict(self, request:dict):
input_dict = request
question: str = input_dict.get("text", "").strip()
if len(question) == 0:
response_text = "<received empty input; no response generated.>"
else:
response_text = self.chatbot.predict(instruction=question)
return {"generated_text": str(response_text)}
if __name__ == "__main__":
print("Program starts...")
# Parse arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=50051, help="port number")
args = parser.parse_args()
print(f"args.batch_size: {args.batch_size}")
# Parse environment variables
local_rank = int(os.environ.get("LOCAL_RANK", 100))
print(f"local rank: {local_rank}")
chatbot = Chatbot()
fedml_inference_runner = FedMLInferenceRunner(chatbot)
fedml_inference_runner.run() | [
"langchain.LLMChain",
"langchain.PromptTemplate"
] | [((2184, 2209), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2207, 2209), False, 'import argparse\n'), ((2559, 2588), 'fedml.serving.FedMLInferenceRunner', 'FedMLInferenceRunner', (['chatbot'], {}), '(chatbot)\n', (2579, 2588), False, 'from fedml.serving import FedMLInferenceRunner\n'), ((706, 797), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['instruction']", 'template': 'PROMPT_FOR_GENERATION_FORMAT'}), "(input_variables=['instruction'], template=\n PROMPT_FOR_GENERATION_FORMAT)\n", (720, 797), False, 'from langchain import PromptTemplate, LLMChain\n'), ((845, 896), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['"""EleutherAI/pythia-70m"""'], {}), "('EleutherAI/pythia-70m')\n", (871, 896), False, 'from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextGenerationPipeline\n'), ((913, 1049), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['"""EleutherAI/pythia-70m"""'], {'torch_dtype': 'torch.float32', 'trust_remote_code': '(True)', 'device_map': '"""auto"""'}), "('EleutherAI/pythia-70m', torch_dtype=\n torch.float32, trust_remote_code=True, device_map='auto')\n", (949, 1049), False, 'from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextGenerationPipeline\n'), ((1160, 1233), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""EleutherAI/pythia-70m"""'], {'device_map': '"""auto"""'}), "('EleutherAI/pythia-70m', device_map='auto')\n", (1189, 1233), False, 'from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextGenerationPipeline\n'), ((1635, 1689), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'hf_pipeline', 'prompt': 'prompt', 'verbose': '(True)'}), '(llm=hf_pipeline, prompt=prompt, verbose=True)\n', (1643, 1689), False, 'from langchain import PromptTemplate, LLMChain\n'), ((2431, 2464), 'os.environ.get', 'os.environ.get', (['"""LOCAL_RANK"""', '(100)'], {}), "('LOCAL_RANK', 100)\n", (2445, 2464), False, 'import os\n'), ((1299, 1469), 'transformers.TextGenerationPipeline', 'TextGenerationPipeline', ([], {'model': 'model', 'tokenizer': 'tokenizer', 'return_full_text': '(True)', 'task': '"""text-generation"""', 'do_sample': '(True)', 'max_new_tokens': '(256)', 'top_p': '(0.92)', 'top_k': '(0)'}), "(model=model, tokenizer=tokenizer, return_full_text=\n True, task='text-generation', do_sample=True, max_new_tokens=256, top_p\n =0.92, top_k=0)\n", (1321, 1469), False, 'from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextGenerationPipeline\n')] |
from langchain.utilities import BashProcess
from langchain.agents import load_tools
def get_built_in_tools(tools: list[str]):
bash = BashProcess()
load_tools(["human"])
return [bash]
| [
"langchain.utilities.BashProcess",
"langchain.agents.load_tools"
] | [((139, 152), 'langchain.utilities.BashProcess', 'BashProcess', ([], {}), '()\n', (150, 152), False, 'from langchain.utilities import BashProcess\n'), ((158, 179), 'langchain.agents.load_tools', 'load_tools', (["['human']"], {}), "(['human'])\n", (168, 179), False, 'from langchain.agents import load_tools\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This would makes sure Python is aware there is more than one sub-package within bigdl,
# physically located elsewhere.
# Otherwise there would be module not found error in non-pip's setting as Python would
# only search the first bigdl package and end up finding only one sub-package.
# This file is adapted from
# https://github.com/hwchase17/langchain/blob/master/langchain/llms/huggingface_pipeline.py
# The MIT License
# Copyright (c) Harrison Chase
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import importlib.util
import logging
from typing import Any, List, Mapping, Optional
from pydantic import Extra
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class TransformersPipelineLLM(LLM):
"""Wrapper around the BigDL-LLM Transformer-INT4 model in Transformer.pipeline()
Example:
.. code-block:: python
from bigdl.llm.langchain.llms import TransformersPipelineLLM
llm = TransformersPipelineLLM.from_model_id(model_id="decapoda-research/llama-7b-hf")
"""
pipeline: Any #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name or model path to use."""
model_kwargs: Optional[dict] = None
"""Key word arguments passed to the model."""
pipeline_kwargs: Optional[dict] = None
"""Key word arguments passed to the pipeline."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@classmethod
def from_model_id(
cls,
model_id: str,
task: str,
model_kwargs: Optional[dict] = None,
pipeline_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> LLM:
"""Construct the pipeline object from model_id and task."""
try:
from bigdl.llm.transformers import (
AutoModel,
AutoModelForCausalLM,
# AutoModelForSeq2SeqLM,
)
from transformers import AutoTokenizer, LlamaTokenizer
from transformers import pipeline as hf_pipeline
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
_model_kwargs = model_kwargs or {}
# TODO: may refactore this code in the future
try:
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
except:
tokenizer = LlamaTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
# TODO: support this when related PR merged
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
_pipeline_kwargs = pipeline_kwargs or {}
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device='cpu', # only cpu now
model_kwargs=_model_kwargs,
**_pipeline_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return cls(
pipeline=pipeline,
model_id=model_id,
model_kwargs=_model_kwargs,
pipeline_kwargs=_pipeline_kwargs,
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"model_kwargs": self.model_kwargs,
"pipeline_kwargs": self.pipeline_kwargs,
}
@property
def _llm_type(self) -> str:
return "BigDL-llm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
response = self.pipeline(prompt)
if self.pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens"
] | [((5354, 5476), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': '"""cpu"""', 'model_kwargs': '_model_kwargs'}), "(task=task, model=model, tokenizer=tokenizer, device='cpu',\n model_kwargs=_model_kwargs, **_pipeline_kwargs)\n", (5365, 5476), True, 'from transformers import pipeline as hf_pipeline\n'), ((4206, 4262), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4235, 4262), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((7329, 7360), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (7348, 7360), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((4303, 4360), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4333, 4360), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((4441, 4528), 'bigdl.llm.transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'load_in_4bit': '(True)'}), '(model_id, load_in_4bit=True, **\n _model_kwargs)\n', (4477, 4528), False, 'from bigdl.llm.transformers import AutoModel, AutoModelForCausalLM\n')] |
"""
This module provides an implementation for generating question data from documents.
Supported types of document sources include:
- plain text
- unstructured files: Text, PDF, PowerPoint, HTML, Images,
Excel spreadsheets, Word documents, Markdown, etc.
- documents from Google Drive (provide file id).
Currently support only one document a time.
"""
import ast
import asyncio
import csv
import os
import pickle
import re
from typing import Any, Dict, Iterator, List
from langchain.document_loaders import GoogleDriveLoader, UnstructuredFileLoader
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from tqdm import tqdm
from yival.common import utils
from yival.common.model_utils import llm_completion
from yival.data_generators.base_data_generator import BaseDataGenerator
from yival.schemas.common_structures import InputData
from yival.schemas.data_generator_configs import DocumentDataGeneratorConfig
from yival.schemas.model_configs import Request
PROMPT_TEMPLATE = """
Context information is below.
---------------------
{CONTEXT}
---------------------
Please do not introduce priori knowledge,
only consider the content of the previous context information,
generate 5 questions based on the following query.
Answer ONLY a python list containing all the questions generated.
Keep your output crisp, with only a '[]' bracketed list.
{QUERY}
"""
class DocumentDataGenerator(BaseDataGenerator):
config: DocumentDataGeneratorConfig
default_config: DocumentDataGeneratorConfig = DocumentDataGeneratorConfig(
prompt=PROMPT_TEMPLATE,
document="",
source="text",
num_questions_per_chunk=5,
text_question_template=None,
document_chunk_size=512,
number_of_examples=1,
question_gen_query=f"You are a Teacher/Professor. Your task is to setup \
5 questions for an upcoming quiz/examination. The questions \
should be diverse in nature across the document. Restrict \
the questions to the context information provided."
)
def __init__(self, config: DocumentDataGeneratorConfig):
super().__init__(config)
self.config = config
def load_document(self, source: str, document: str):
if source == 'text':
doc = Document(page_content=document)
return doc
elif source == 'file':
file_loader = UnstructuredFileLoader(document)
docs = file_loader.load()
doc = docs[0]
return doc
elif source == 'drive':
drive_loader = GoogleDriveLoader(file_ids=[document])
docs = drive_loader.load()
doc = docs[0]
return doc
else:
return None
def get_doc_context(self, doc: Document, chunk_size: int) -> List[str]:
# Split Document into chunks
splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size)
splits = splitter.split_documents([doc])
# Generate contexts from splits
contexts = [str(split.page_content) for split in splits]
return contexts
def prepare_messages(self) -> List[Dict[str, Any]]:
"""Prepare the messages for GPT API based on configurations."""
if not self.config.prompt:
self.config.prompt = PROMPT_TEMPLATE
document = self.load_document(self.config.source, self.config.document)
if document:
contexts = self.get_doc_context(document, self.config.chunk_size)
else:
raise TypeError
contents = []
for context in contexts:
content = "Context information is below.\n---------------------\n\n" + context + "\n"
content = content + "---------------------\nPlease do not introduce priori knowledge,\n"
content = content + "only consider the content of the previous context information,\n generate "
content = content + str(
self.config.num_questions_per_chunk
) + " questions based on the following query."
content = content + "Answer ONLY a python list containing all the questions generated.\n"
content = content + "Context information is below.\n---------------------\n\n"
content = content + "Keep your output crisp, with only a '[]' bracketed list.\n"
content = content + self.config.question_gen_query + "\n"
if self.config.text_question_template:
content = content + "Please generate the questions according to the following template:\n" + self.config.text_question_template + "\n"
contents.append(content)
return [{"role": "user", "content": content} for content in contents]
def process_output(
self, output_content: str, all_data: List[InputData],
chunk: List[InputData]
):
"""Process the output from GPT API and update data lists."""
output_ls = eval(output_content)
input_data_instance = InputData(
example_id=super().generate_example_id(output_content),
content={"data": output_ls}
)
all_data.append(input_data_instance)
chunk.append(input_data_instance)
def generate_examples(self) -> Iterator[List[InputData]]:
all_data: List[InputData] = []
# Loading data from existing path if exists
if self.config.output_path and os.path.exists(self.config.output_path):
with open(self.config.output_path, 'rb') as file:
all_data = pickle.load(file)
for i in range(0, len(all_data), self.config.chunk_size):
yield all_data[i:i + self.config.chunk_size]
return
chunk: List[InputData] = []
while len(all_data) < self.config.number_of_examples:
messages = self.prepare_messages()
message_batches = [
messages
] * (self.config.number_of_examples - len(all_data))
with tqdm(
total=self.config.number_of_examples,
desc="Generating Examples",
unit="example"
) as pbar:
responses = asyncio.run(
utils.parallel_completions(
message_batches,
self.config.model_name,
self.config.max_token,
pbar=pbar
)
)
for r in responses:
self.process_output(
r["choices"][0]["message"]["content"], all_data, chunk
)
if chunk and len(chunk) >= self.config.chunk_size:
yield chunk
chunk = []
if self.config.output_path:
with open(self.config.output_path, 'wb') as file:
pickle.dump(all_data, file)
print(
f"Data succesfully generated and saved to {self.config.output_path}"
)
if self.config.output_csv_path:
with open(self.config.output_csv_path, 'w', newline='') as csvfile:
rows = [
BaseDataGenerator.input_data_to_csv_row(data)
for data in all_data
]
header = rows[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=header)
writer.writeheader()
for row in rows:
writer.writerow(row)
print(
f"Data succesfully generated and saved to {self.config.output_csv_path}"
)
if chunk:
yield chunk
BaseDataGenerator.register_data_generator(
"document_data_generator", DocumentDataGenerator,
DocumentDataGeneratorConfig
)
def main():
import time
start_time = time.time()
generator = DocumentDataGenerator(DocumentDataGenerator.default_config)
res = generator.generate_examples()
for d in res:
print(d)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Execution time: {elapsed_time:.2f} seconds")
if __name__ == "__main__":
main()
| [
"langchain.document_loaders.GoogleDriveLoader",
"langchain.schema.Document",
"langchain.document_loaders.UnstructuredFileLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((7802, 7926), 'yival.data_generators.base_data_generator.BaseDataGenerator.register_data_generator', 'BaseDataGenerator.register_data_generator', (['"""document_data_generator"""', 'DocumentDataGenerator', 'DocumentDataGeneratorConfig'], {}), "('document_data_generator',\n DocumentDataGenerator, DocumentDataGeneratorConfig)\n", (7843, 7926), False, 'from yival.data_generators.base_data_generator import BaseDataGenerator\n'), ((1615, 2131), 'yival.schemas.data_generator_configs.DocumentDataGeneratorConfig', 'DocumentDataGeneratorConfig', ([], {'prompt': 'PROMPT_TEMPLATE', 'document': '""""""', 'source': '"""text"""', 'num_questions_per_chunk': '(5)', 'text_question_template': 'None', 'document_chunk_size': '(512)', 'number_of_examples': '(1)', 'question_gen_query': 'f"""You are a Teacher/Professor. Your task is to setup 5 questions for an upcoming quiz/examination. The questions should be diverse in nature across the document. Restrict the questions to the context information provided."""'}), "(prompt=PROMPT_TEMPLATE, document='', source=\n 'text', num_questions_per_chunk=5, text_question_template=None,\n document_chunk_size=512, number_of_examples=1, question_gen_query=\n f'You are a Teacher/Professor. Your task is to setup 5 questions for an upcoming quiz/examination. The questions should be diverse in nature across the document. Restrict the questions to the context information provided.'\n )\n", (1642, 2131), False, 'from yival.schemas.data_generator_configs import DocumentDataGeneratorConfig\n'), ((7980, 7991), 'time.time', 'time.time', ([], {}), '()\n', (7989, 7991), False, 'import time\n'), ((8159, 8170), 'time.time', 'time.time', ([], {}), '()\n', (8168, 8170), False, 'import time\n'), ((3007, 3060), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size'}), '(chunk_size=chunk_size)\n', (3037, 3060), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2418, 2449), 'langchain.schema.Document', 'Document', ([], {'page_content': 'document'}), '(page_content=document)\n', (2426, 2449), False, 'from langchain.schema import Document\n'), ((5534, 5573), 'os.path.exists', 'os.path.exists', (['self.config.output_path'], {}), '(self.config.output_path)\n', (5548, 5573), False, 'import os\n'), ((2530, 2562), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['document'], {}), '(document)\n', (2552, 2562), False, 'from langchain.document_loaders import GoogleDriveLoader, UnstructuredFileLoader\n'), ((5664, 5681), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5675, 5681), False, 'import pickle\n'), ((6126, 6217), 'tqdm.tqdm', 'tqdm', ([], {'total': 'self.config.number_of_examples', 'desc': '"""Generating Examples"""', 'unit': '"""example"""'}), "(total=self.config.number_of_examples, desc='Generating Examples', unit\n ='example')\n", (6130, 6217), False, 'from tqdm import tqdm\n'), ((6977, 7004), 'pickle.dump', 'pickle.dump', (['all_data', 'file'], {}), '(all_data, file)\n', (6988, 7004), False, 'import pickle\n'), ((7470, 7512), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'header'}), '(csvfile, fieldnames=header)\n', (7484, 7512), False, 'import csv\n'), ((2709, 2747), 'langchain.document_loaders.GoogleDriveLoader', 'GoogleDriveLoader', ([], {'file_ids': '[document]'}), '(file_ids=[document])\n', (2726, 2747), False, 'from langchain.document_loaders import GoogleDriveLoader, UnstructuredFileLoader\n'), ((6345, 6451), 'yival.common.utils.parallel_completions', 'utils.parallel_completions', (['message_batches', 'self.config.model_name', 'self.config.max_token'], {'pbar': 'pbar'}), '(message_batches, self.config.model_name, self.\n config.max_token, pbar=pbar)\n', (6371, 6451), False, 'from yival.common import utils\n'), ((7300, 7345), 'yival.data_generators.base_data_generator.BaseDataGenerator.input_data_to_csv_row', 'BaseDataGenerator.input_data_to_csv_row', (['data'], {}), '(data)\n', (7339, 7345), False, 'from yival.data_generators.base_data_generator import BaseDataGenerator\n')] |
from typing import AsyncGenerator, Optional, Tuple
from langchain import ConversationChain
import logging
from typing import Optional, Tuple
from pydantic.v1 import SecretStr
from vocode.streaming.agent.base_agent import RespondAgent
from vocode.streaming.agent.utils import get_sentence_from_buffer
from langchain import ConversationChain
from langchain.schema import ChatMessage, AIMessage, HumanMessage
from langchain_community.chat_models import ChatAnthropic
import logging
from vocode import getenv
from vocode.streaming.models.agent import ChatAnthropicAgentConfig
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
HumanMessagePromptTemplate,
)
from vocode import getenv
from vocode.streaming.models.agent import ChatAnthropicAgentConfig
from langchain.memory import ConversationBufferMemory
SENTENCE_ENDINGS = [".", "!", "?"]
class ChatAnthropicAgent(RespondAgent[ChatAnthropicAgentConfig]):
def __init__(
self,
agent_config: ChatAnthropicAgentConfig,
logger: Optional[logging.Logger] = None,
anthropic_api_key: Optional[SecretStr] = None,
):
super().__init__(agent_config=agent_config, logger=logger)
import anthropic
# Convert anthropic_api_key to SecretStr if it's not None and not already a SecretStr
if anthropic_api_key is not None and not isinstance(
anthropic_api_key, SecretStr
):
anthropic_api_key = SecretStr(anthropic_api_key)
else:
# Retrieve anthropic_api_key from environment variable and convert to SecretStr
env_key = getenv("ANTHROPIC_API_KEY")
if env_key:
anthropic_api_key = SecretStr(env_key)
if not anthropic_api_key:
raise ValueError(
"ANTHROPIC_API_KEY must be set in environment or passed in as a SecretStr"
)
self.prompt = ChatPromptTemplate.from_messages(
[
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}"),
]
)
self.llm = ChatAnthropic(
model_name=agent_config.model_name,
anthropic_api_key=anthropic_api_key,
)
# streaming not well supported by langchain, so we will connect directly
self.anthropic_client = (
anthropic.AsyncAnthropic(api_key=str(anthropic_api_key))
if agent_config.generate_responses
else None
)
self.memory = ConversationBufferMemory(return_messages=True)
self.memory.chat_memory.messages.append(
HumanMessage(content=self.agent_config.prompt_preamble)
)
if agent_config.initial_message:
self.memory.chat_memory.messages.append(
AIMessage(content=agent_config.initial_message.text)
)
self.conversation = ConversationChain(
memory=self.memory, prompt=self.prompt, llm=self.llm
)
async def respond(
self,
human_input,
conversation_id: str,
is_interrupt: bool = False,
) -> Tuple[str, bool]:
text = await self.conversation.apredict(input=human_input)
self.logger.debug(f"LLM response: {text}")
return text, False
async def generate_response(
self,
human_input,
conversation_id: str,
is_interrupt: bool = False,
) -> AsyncGenerator[Tuple[str, bool], None]:
self.memory.chat_memory.messages.append(HumanMessage(content=human_input))
bot_memory_message = AIMessage(content="")
self.memory.chat_memory.messages.append(bot_memory_message)
prompt = self.llm._convert_messages_to_prompt(self.memory.chat_memory.messages)
if self.anthropic_client:
streamed_response = await self.anthropic_client.completions.create(
prompt=prompt,
max_tokens_to_sample=self.agent_config.max_tokens_to_sample,
model=self.agent_config.model_name,
stream=True,
)
buffer = ""
async for completion in streamed_response:
buffer += completion.completion
sentence, remainder = get_sentence_from_buffer(buffer)
if sentence:
bot_memory_message.content = bot_memory_message.content + sentence
buffer = remainder
yield sentence, True
continue
def update_last_bot_message_on_cut_off(self, message: str):
for memory_message in self.memory.chat_memory.messages[::-1]:
if (
isinstance(memory_message, ChatMessage)
and memory_message.role == "assistant"
) or isinstance(memory_message, AIMessage):
memory_message.content = message
return
| [
"langchain_community.chat_models.ChatAnthropic",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.memory.ConversationBufferMemory",
"langchain.prompts.MessagesPlaceholder",
"langchain.schema.HumanMessage",
"langchain.schema.AIMessage",
"langchain.ConversationChain"
] | [((2147, 2238), 'langchain_community.chat_models.ChatAnthropic', 'ChatAnthropic', ([], {'model_name': 'agent_config.model_name', 'anthropic_api_key': 'anthropic_api_key'}), '(model_name=agent_config.model_name, anthropic_api_key=\n anthropic_api_key)\n', (2160, 2238), False, 'from langchain_community.chat_models import ChatAnthropic\n'), ((2556, 2602), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'return_messages': '(True)'}), '(return_messages=True)\n', (2580, 2602), False, 'from langchain.memory import ConversationBufferMemory\n'), ((2936, 3007), 'langchain.ConversationChain', 'ConversationChain', ([], {'memory': 'self.memory', 'prompt': 'self.prompt', 'llm': 'self.llm'}), '(memory=self.memory, prompt=self.prompt, llm=self.llm)\n', (2953, 3007), False, 'from langchain import ConversationChain\n'), ((3624, 3645), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '""""""'}), "(content='')\n", (3633, 3645), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((1468, 1496), 'pydantic.v1.SecretStr', 'SecretStr', (['anthropic_api_key'], {}), '(anthropic_api_key)\n', (1477, 1496), False, 'from pydantic.v1 import SecretStr\n'), ((1625, 1652), 'vocode.getenv', 'getenv', (['"""ANTHROPIC_API_KEY"""'], {}), "('ANTHROPIC_API_KEY')\n", (1631, 1652), False, 'from vocode import getenv\n'), ((2664, 2719), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'self.agent_config.prompt_preamble'}), '(content=self.agent_config.prompt_preamble)\n', (2676, 2719), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((3559, 3592), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_input'}), '(content=human_input)\n', (3571, 3592), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((1713, 1731), 'pydantic.v1.SecretStr', 'SecretStr', (['env_key'], {}), '(env_key)\n', (1722, 1731), False, 'from pydantic.v1 import SecretStr\n'), ((1988, 2032), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""history"""'}), "(variable_name='history')\n", (2007, 2032), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate\n'), ((2050, 2101), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{input}"""'], {}), "('{input}')\n", (2090, 2101), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate\n'), ((2840, 2892), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'agent_config.initial_message.text'}), '(content=agent_config.initial_message.text)\n', (2849, 2892), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((4286, 4318), 'vocode.streaming.agent.utils.get_sentence_from_buffer', 'get_sentence_from_buffer', (['buffer'], {}), '(buffer)\n', (4310, 4318), False, 'from vocode.streaming.agent.utils import get_sentence_from_buffer\n')] |
from typing import Any, Dict
from langchain.base_language import BaseLanguageModel
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain.chains import ConversationChain
from real_agents.adapters.executors.base import BaseExecutor
from real_agents.adapters.memory import ConversationBufferMemory
class ChatExecutor(BaseExecutor):
"""Chat Executor."""
_DEFAULT_TEMPLATE = "The following is a friendly conversation between a human and an AI. \
The AI is talkative and provides lots of specific details from its context. \
If the AI does not know the answer to a question, it truthfully says it does not know."
output_key: str = "result"
def __init__(self) -> None:
"""Initialize the executor"""
self.memory = ConversationBufferMemory(return_messages=True)
def run(
self,
user_intent: str,
llm: BaseLanguageModel,
verbose: bool = True,
) -> Dict[str, Any]:
"""Run the executor.
Args:
user_intent: User intent to execute.
grounding_source: Grounding source to execute the program on.
llm: Language model to use.
verbose: Whether to print the logging.
Returns:
Result of string.
"""
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(self._DEFAULT_TEMPLATE),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}"),
]
)
method = ConversationChain(
llm=llm,
prompt=prompt,
verbose=verbose,
memory=self.memory,
)
result = method.predict(input=user_intent)
output = {self.output_key: result}
return output
| [
"langchain.chains.ConversationChain",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.prompts.MessagesPlaceholder"
] | [((894, 940), 'real_agents.adapters.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'return_messages': '(True)'}), '(return_messages=True)\n', (918, 940), False, 'from real_agents.adapters.memory import ConversationBufferMemory\n'), ((1746, 1824), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'verbose': 'verbose', 'memory': 'self.memory'}), '(llm=llm, prompt=prompt, verbose=verbose, memory=self.memory)\n', (1763, 1824), False, 'from langchain.chains import ConversationChain\n'), ((1502, 1567), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['self._DEFAULT_TEMPLATE'], {}), '(self._DEFAULT_TEMPLATE)\n', (1543, 1567), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate\n'), ((1586, 1630), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""history"""'}), "(variable_name='history')\n", (1605, 1630), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate\n'), ((1649, 1700), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{input}"""'], {}), "('{input}')\n", (1689, 1700), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate\n')] |
import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeLoader
import textwrap
# --------------------------------------------------------------
# Load the HuggingFaceHub API token from the .env file
# --------------------------------------------------------------
load_dotenv(find_dotenv())
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
# --------------------------------------------------------------
# Load the LLM model from the HuggingFaceHub
# --------------------------------------------------------------
repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options
falcon_llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
)
# --------------------------------------------------------------
# Create a PromptTemplate and LLMChain
# --------------------------------------------------------------
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=falcon_llm)
# --------------------------------------------------------------
# Run the LLMChain
# --------------------------------------------------------------
question = "How do I make a sandwich?"
response = llm_chain.run(question)
wrapped_text = textwrap.fill(
response, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
# --------------------------------------------------------------
# Load a video transcript from YouTube
# --------------------------------------------------------------
video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0"
loader = YoutubeLoader.from_youtube_url(video_url)
transcript = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000)
docs = text_splitter.split_documents(transcript)
# --------------------------------------------------------------
# Summarization with LangChain
# --------------------------------------------------------------
# Add map_prompt and combine_prompt to the chain for custom summarization
chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True)
print(chain.llm_chain.prompt.template)
print(chain.combine_document_chain.llm_chain.prompt.template)
# --------------------------------------------------------------
# Test the Falcon model with text summarization
# --------------------------------------------------------------
output_summary = chain.run(docs)
wrapped_text = textwrap.fill(
output_summary, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
# --------------------------------------------------------------
# Load an OpenAI model for comparison
# --------------------------------------------------------------
openai_llm = OpenAI(
model_name="text-davinci-003", temperature=0.1, max_tokens=500
) # max token length is 4097
chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True)
output_summary = chain.run(docs)
wrapped_text = textwrap.fill(
output_summary, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
| [
"langchain.chains.summarize.load_summarize_chain",
"langchain.LLMChain",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.OpenAI",
"langchain.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.HuggingFaceHub",
"langchain.PromptTemplate"
] | [((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question']"}), "(template=template, input_variables=['question'])\n", (1319, 1368), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1381, 1420), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'falcon_llm'}), '(prompt=prompt, llm=falcon_llm)\n', (1389, 1420), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1662, 1750), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(response, width=100, break_long_words=False,\n replace_whitespace=False)\n', (1675, 1750), False, 'import textwrap\n'), ((2012, 2053), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['video_url'], {}), '(video_url)\n', (2042, 2053), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((2098, 2145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(3000)'}), '(chunk_size=3000)\n', (2128, 2145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2440, 2511), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['falcon_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(falcon_llm, chain_type='map_reduce', verbose=True)\n", (2460, 2511), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2841, 2935), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (2854, 2935), False, 'import textwrap\n'), ((3142, 3212), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.1)', 'max_tokens': '(500)'}), "(model_name='text-davinci-003', temperature=0.1, max_tokens=500)\n", (3148, 3212), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((3255, 3326), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['openai_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(openai_llm, chain_type='map_reduce', verbose=True)\n", (3275, 3326), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((3375, 3469), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (3388, 3469), False, 'import textwrap\n'), ((541, 554), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (552, 554), False, 'from dotenv import load_dotenv, find_dotenv\n')] |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple
import dpath.util
from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel
from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier
from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode
from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType
from langchain.document_loaders.base import Document
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter
from langchain.utils import stringify_dict
METADATA_STREAM_FIELD = "_ab_stream"
METADATA_RECORD_ID_FIELD = "_ab_record_id"
CDC_DELETED_FIELD = "_ab_cdc_deleted_at"
@dataclass
class Chunk:
page_content: Optional[str]
metadata: Dict[str, Any]
record: AirbyteRecordMessage
embedding: Optional[List[float]] = None
headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "]
class DocumentProcessor:
"""
DocumentProcessor is a helper class that generates documents from Airbyte records.
It is used to generate documents from records before writing them to the destination:
* The text fields are extracted from the record and concatenated to a single string.
* The metadata fields are extracted from the record and added to the document metadata.
* The document is split into chunks of a given size using a langchain text splitter.
The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly,
except if you want to implement a custom writer.
The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor.
Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid.
"""
streams: Mapping[str, ConfiguredAirbyteStream]
@staticmethod
def check_config(config: ProcessingConfigModel) -> Optional[str]:
if config.text_splitter is not None and config.text_splitter.mode == "separator":
for s in config.text_splitter.separators:
try:
separator = json.loads(s)
if not isinstance(separator, str):
return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes."
except json.decoder.JSONDecodeError:
return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes."
return None
def _get_text_splitter(
self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel]
) -> RecursiveCharacterTextSplitter:
if splitter_config is None:
splitter_config = SeparatorSplitterConfigModel(mode="separator")
if splitter_config.mode == "separator":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=[json.loads(s) for s in splitter_config.separators],
keep_separator=splitter_config.keep_separator,
disallowed_special=(),
)
if splitter_config.mode == "markdown":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=headers_to_split_on[: splitter_config.split_level],
is_separator_regex=True,
keep_separator=True,
disallowed_special=(),
)
if splitter_config.mode == "code":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)),
disallowed_special=(),
)
def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog):
self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams}
self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter)
self.text_fields = config.text_fields
self.metadata_fields = config.metadata_fields
self.field_name_mappings = config.field_name_mappings
self.logger = logging.getLogger("airbyte.document_processor")
def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]:
"""
Generate documents from records.
:param records: List of AirbyteRecordMessages
:return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store)
"""
if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]:
return [], self._extract_primary_key(record)
doc = self._generate_document(record)
if doc is None:
text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields"
raise AirbyteTracedException(
internal_message="No text fields found in record",
message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.",
failure_type=FailureType.config_error,
)
chunks = [
Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record)
for chunk_document in self._split_document(doc)
]
id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None
return chunks, id_to_delete
def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]:
relevant_fields = self._extract_relevant_fields(record, self.text_fields)
if len(relevant_fields) == 0:
return None
text = stringify_dict(relevant_fields)
metadata = self._extract_metadata(record)
return Document(page_content=text, metadata=metadata)
def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]:
relevant_fields = {}
if fields and len(fields) > 0:
for field in fields:
values = dpath.util.values(record.data, field, separator=".")
if values and len(values) > 0:
relevant_fields[field] = values if len(values) > 1 else values[0]
else:
relevant_fields = record.data
return self._remap_field_names(relevant_fields)
def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]:
metadata = self._extract_relevant_fields(record, self.metadata_fields)
metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record)
primary_key = self._extract_primary_key(record)
if primary_key:
metadata[METADATA_RECORD_ID_FIELD] = primary_key
return metadata
def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]:
stream_identifier = create_stream_identifier(record)
current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier]
# if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones
if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup:
return None
primary_key = []
for key in current_stream.primary_key:
try:
primary_key.append(str(dpath.util.get(record.data, key)))
except KeyError:
primary_key.append("__not_found__")
stringified_primary_key = "_".join(primary_key)
return f"{stream_identifier}_{stringified_primary_key}"
def _split_document(self, doc: Document) -> List[Document]:
chunks: List[Document] = self.splitter.split_documents([doc])
return chunks
def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]:
if not self.field_name_mappings:
return fields
new_fields = fields.copy()
for mapping in self.field_name_mappings:
if mapping.from_field in new_fields:
new_fields[mapping.to_field] = new_fields.pop(mapping.from_field)
return new_fields
| [
"langchain.document_loaders.base.Document",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.Language",
"langchain.utils.stringify_dict"
] | [((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')] |
from dotenv import load_dotenv
from langchain import OpenAI
from langchain.document_loaders.csv_loader import CSVLoader
load_dotenv()
filepath = "academy/academy.csv"
loader = CSVLoader(filepath)
data = loader.load()
print(data)
llm = OpenAI(temperature=0)
from langchain.agents import create_csv_agent
agent = create_csv_agent(llm, filepath, verbose=True)
agent.run("What percentage of the respondents are students versus professionals?")
agent.run("List the top 3 devices that the respondents use to submit their responses")
agent.run("Consider iOS and Android as mobile devices. What is the percentage of respondents that discovered us through social media submitting this from a mobile device?")
| [
"langchain.document_loaders.csv_loader.CSVLoader",
"langchain.agents.create_csv_agent",
"langchain.OpenAI"
] | [((122, 135), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (133, 135), False, 'from dotenv import load_dotenv\n'), ((179, 198), 'langchain.document_loaders.csv_loader.CSVLoader', 'CSVLoader', (['filepath'], {}), '(filepath)\n', (188, 198), False, 'from langchain.document_loaders.csv_loader import CSVLoader\n'), ((239, 260), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (245, 260), False, 'from langchain import OpenAI\n'), ((316, 361), 'langchain.agents.create_csv_agent', 'create_csv_agent', (['llm', 'filepath'], {'verbose': '(True)'}), '(llm, filepath, verbose=True)\n', (332, 361), False, 'from langchain.agents import create_csv_agent\n')] |
from waifu.llm.Brain import Brain
from waifu.llm.VectorDB import VectorDB
from waifu.llm.SentenceTransformer import STEmbedding
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from typing import Any, List, Mapping, Optional
from langchain.schema import BaseMessage
import openai
class GPT(Brain):
def __init__(self, api_key: str,
name: str,
stream: bool=False,
callback=None,
model: str='gpt-3.5-turbo',
proxy: str=''):
self.llm = ChatOpenAI(openai_api_key=api_key,
model_name=model,
streaming=stream,
callbacks=[callback],
temperature=0.85)
self.llm_nonstream = ChatOpenAI(openai_api_key=api_key, model_name=model)
self.embedding = OpenAIEmbeddings(openai_api_key=api_key)
# self.embedding = STEmbedding()
self.vectordb = VectorDB(self.embedding, f'./memory/{name}.csv')
if proxy != '':
openai.proxy = proxy
def think(self, messages: List[BaseMessage]):
return self.llm(messages).content
def think_nonstream(self, messages: List[BaseMessage]):
return self.llm_nonstream(messages).content
def store_memory(self, text: str | list):
'''保存记忆 embedding'''
self.vectordb.store(text)
def extract_memory(self, text: str, top_n: int = 10):
'''提取 top_n 条相关记忆'''
return self.vectordb.query(text, top_n) | [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.chat_models.ChatOpenAI"
] | [((576, 690), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model', 'streaming': 'stream', 'callbacks': '[callback]', 'temperature': '(0.85)'}), '(openai_api_key=api_key, model_name=model, streaming=stream,\n callbacks=[callback], temperature=0.85)\n', (586, 690), False, 'from langchain.chat_models import ChatOpenAI\n'), ((812, 864), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model'}), '(openai_api_key=api_key, model_name=model)\n', (822, 864), False, 'from langchain.chat_models import ChatOpenAI\n'), ((890, 930), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'api_key'}), '(openai_api_key=api_key)\n', (906, 930), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((996, 1044), 'waifu.llm.VectorDB.VectorDB', 'VectorDB', (['self.embedding', 'f"""./memory/{name}.csv"""'], {}), "(self.embedding, f'./memory/{name}.csv')\n", (1004, 1044), False, 'from waifu.llm.VectorDB import VectorDB\n')] |
import re
from typing import Union
from langchain.schema import AgentAction, AgentFinish, OutputParserException
from src.agents.agent import AgentOutputParser
class ReActOutputParser(AgentOutputParser):
"""Output parser for the ReAct agent."""
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
action_prefix = "Action: "
if not text.strip().split("\n")[-1].startswith(action_prefix):
raise OutputParserException(f"Could not parse LLM Output: {text}")
action_block = text.strip().split("\n")[-1]
action_str = action_block[len(action_prefix) :]
# Parse out the action and the directive.
re_matches = re.search(r"(.*?)\[(.*?)\]", action_str)
if re_matches is None:
raise OutputParserException(
f"Could not parse action directive: {action_str}"
)
action, action_input = re_matches.group(1), re_matches.group(2)
if action == "Finish":
return AgentFinish({"output": action_input}, text)
else:
return AgentAction(action, action_input, text)
@property
def _type(self) -> str:
return "react"
| [
"langchain.schema.AgentFinish",
"langchain.schema.AgentAction",
"langchain.schema.OutputParserException"
] | [((685, 726), 're.search', 're.search', (['"""(.*?)\\\\[(.*?)\\\\]"""', 'action_str'], {}), "('(.*?)\\\\[(.*?)\\\\]', action_str)\n", (694, 726), False, 'import re\n'), ((444, 504), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM Output: {text}"""'], {}), "(f'Could not parse LLM Output: {text}')\n", (465, 504), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((775, 847), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse action directive: {action_str}"""'], {}), "(f'Could not parse action directive: {action_str}')\n", (796, 847), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((1000, 1043), 'langchain.schema.AgentFinish', 'AgentFinish', (["{'output': action_input}", 'text'], {}), "({'output': action_input}, text)\n", (1011, 1043), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((1077, 1116), 'langchain.schema.AgentAction', 'AgentAction', (['action', 'action_input', 'text'], {}), '(action, action_input, text)\n', (1088, 1116), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')] |
import re
from langchain.agents import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
from typing import Union
from cat.mad_hatter.mad_hatter import MadHatter
from cat.log import log
class ChooseProcedureOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise OutputParserException(f"Could not parse LLM output: `{llm_output}`")
# Extract action
action = match.group(1).strip()
action_input = match.group(2)
if action == "none_of_the_others":
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": None},
log=llm_output,
)
mh = MadHatter()
for Form in mh.forms:
if Form.name == action:
return AgentFinish(
return_values={
"output": None,
"form": action
},
log=llm_output,
)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output) | [
"langchain.schema.AgentFinish",
"langchain.schema.OutputParserException"
] | [((936, 975), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (945, 975), False, 'import re\n'), ((1551, 1562), 'cat.mad_hatter.mad_hatter.MadHatter', 'MadHatter', ([], {}), '()\n', (1560, 1562), False, 'from cat.mad_hatter.mad_hatter import MadHatter\n'), ((1016, 1084), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{llm_output}`"""'], {}), "(f'Could not parse LLM output: `{llm_output}`')\n", (1037, 1084), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((1260, 1319), 'langchain.schema.AgentFinish', 'AgentFinish', ([], {'return_values': "{'output': None}", 'log': 'llm_output'}), "(return_values={'output': None}, log=llm_output)\n", (1271, 1319), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((1653, 1728), 'langchain.schema.AgentFinish', 'AgentFinish', ([], {'return_values': "{'output': None, 'form': action}", 'log': 'llm_output'}), "(return_values={'output': None, 'form': action}, log=llm_output)\n", (1664, 1728), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')] |
import logging
from typing import Any, List, Dict, Optional
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun
from ix.chains.callbacks import IxHandler
from langchain import LLMChain as LangchainLLMChain
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain_community.chat_models import ChatOpenAI
from langchain.prompts.chat import (
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
)
from langchain.tools import Tool, format_tool_to_openai_function
from ix.chains.functions import FunctionSchema
from ix.task_log.models import TaskLogMessage
logger = logging.getLogger(__name__)
TEMPLATE_CLASSES = {
"system": SystemMessagePromptTemplate,
"user": HumanMessagePromptTemplate,
"assistant": AIMessagePromptTemplate,
}
class LLMChain(LangchainLLMChain):
"""
Extension of LLMChain to provide additional functionality:
- OpenAI functions may be connected as functions.
- input_keys excludes memory variables so that memory may be directly attached.
"""
# List of OpenAI functions to include in requests.
functions: List[FunctionSchema | Tool | BaseToolkit] = None
function_call: str = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.load_functions()
def load_functions(self) -> None:
"""Load functions for OpenAI if llm is OpenAI"""
if not isinstance(self.llm, ChatOpenAI):
logger.error(f"llm is not ChatOpenAI, it is {type(self.llm)}")
return
if not self.functions:
return
if not isinstance(self.llm_kwargs, dict):
self.llm_kwargs = {}
if self.function_call:
self.llm_kwargs["function_call"] = {"name": self.function_call}
# convert Langchain BaseTool and BaseToolkit to OpenAI functions. FunctionSchema
# are already OpenAI functions, we don't need to convert them.
converted_functions = []
for function in self.functions:
if isinstance(function, Tool):
converted_functions.append(format_tool_to_openai_function(function))
elif isinstance(function, BaseToolkit):
converted_functions.extend(
format_tool_to_openai_function(tool_func)
for tool_func in function.get_tools()
)
else:
converted_functions.append(function)
self.llm_kwargs["functions"] = converted_functions
@property
def input_keys(self) -> List[str]:
"""
Overridden to filter out memory variables from input_variables.
This is to be compatible with Sequence, which will raise a validation
error since it does not detect the variable is from memory.
"""
as_set = set(self.prompt.input_variables)
if self.memory:
as_set -= set(self.memory.memory_variables)
return list(as_set)
class LLMReply(LLMChain):
"""
Wrapper around LLMChain that records output as an ASSISTANT message.
This simplifies making simple agents that just reply to messages.
"""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
response = await super()._acall(inputs=inputs, run_manager=run_manager)
ix_handler = IxHandler.from_manager(run_manager)
await ix_handler.send_agent_msg(response["text"])
return response
def run(self, *args, **kwargs) -> Any:
response = super().run(*args, **kwargs)
TaskLogMessage.objects.create(
task_id=self.callbacks.task.id,
role="ASSISTANT",
parent=self.callbacks.think_msg,
content={
"type": "ASSISTANT",
"text": response,
# "agent": str(self.callback_manager.task.agent.id),
"agent": self.callbacks.agent.alias,
},
)
return response
| [
"langchain.tools.format_tool_to_openai_function"
] | [((646, 673), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (663, 673), False, 'import logging\n'), ((3458, 3493), 'ix.chains.callbacks.IxHandler.from_manager', 'IxHandler.from_manager', (['run_manager'], {}), '(run_manager)\n', (3480, 3493), False, 'from ix.chains.callbacks import IxHandler\n'), ((3676, 3883), 'ix.task_log.models.TaskLogMessage.objects.create', 'TaskLogMessage.objects.create', ([], {'task_id': 'self.callbacks.task.id', 'role': '"""ASSISTANT"""', 'parent': 'self.callbacks.think_msg', 'content': "{'type': 'ASSISTANT', 'text': response, 'agent': self.callbacks.agent.alias}"}), "(task_id=self.callbacks.task.id, role=\n 'ASSISTANT', parent=self.callbacks.think_msg, content={'type':\n 'ASSISTANT', 'text': response, 'agent': self.callbacks.agent.alias})\n", (3705, 3883), False, 'from ix.task_log.models import TaskLogMessage\n'), ((2144, 2184), 'langchain.tools.format_tool_to_openai_function', 'format_tool_to_openai_function', (['function'], {}), '(function)\n', (2174, 2184), False, 'from langchain.tools import Tool, format_tool_to_openai_function\n'), ((2302, 2343), 'langchain.tools.format_tool_to_openai_function', 'format_tool_to_openai_function', (['tool_func'], {}), '(tool_func)\n', (2332, 2343), False, 'from langchain.tools import Tool, format_tool_to_openai_function\n')] |
from typing import List, Union
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from pydantic import BaseModel, Extra, validator
from mindsdb.integrations.handlers.rag_handler.settings import (
DEFAULT_EMBEDDINGS_MODEL,
RAGBaseParameters,
)
EVAL_COLUMN_NAMES = (
"question",
"answers",
"context",
)
SUPPORTED_EVALUATION_TYPES = ("retrieval", "e2e")
GENERATION_METRICS = ("rouge", "meteor", "cosine_similarity", "accuracy")
RETRIEVAL_METRICS = ("cosine_similarity", "accuracy")
# todo make a separate class for evaluation parameters
class WriterLLMParameters(BaseModel):
"""Model parameters for the Writer LLM API interface"""
writer_api_key: str
writer_org_id: str = None
base_url: str = None
model_id: str = "palmyra-x"
callbacks: List[StreamingStdOutCallbackHandler] = [StreamingStdOutCallbackHandler()]
max_tokens: int = 1024
temperature: float = 0.0
top_p: float = 1
stop: List[str] = []
best_of: int = 5
verbose: bool = False
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
class WriterHandlerParameters(RAGBaseParameters):
"""Model parameters for create model"""
llm_params: WriterLLMParameters
generation_evaluation_metrics: List[str] = list(GENERATION_METRICS)
retrieval_evaluation_metrics: List[str] = list(RETRIEVAL_METRICS)
evaluation_type: str = "e2e"
n_rows_evaluation: int = None # if None, evaluate on all rows
retriever_match_threshold: float = 0.7
generator_match_threshold: float = 0.8
evaluate_dataset: Union[List[dict], str] = "squad_v2_val_100_sample"
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
use_enum_values = True
@validator("generation_evaluation_metrics", allow_reuse=True)
def generation_evaluation_metrics_must_be_supported(cls, v):
for metric in v:
if metric not in GENERATION_METRICS:
raise ValueError(
f"generation_evaluation_metrics must be one of {', '.join(str(v) for v in GENERATION_METRICS)}, got {metric}"
)
return v
@validator("retrieval_evaluation_metrics", allow_reuse=True)
def retrieval_evaluation_metrics_must_be_supported(cls, v):
for metric in v:
if metric not in GENERATION_METRICS:
raise ValueError(
f"retrieval_evaluation_metrics must be one of {', '.join(str(v) for v in RETRIEVAL_METRICS)}, got {metric}"
)
return v
@validator("evaluation_type", allow_reuse=True)
def evaluation_type_must_be_supported(cls, v):
if v not in SUPPORTED_EVALUATION_TYPES:
raise ValueError(
f"evaluation_type must be one of `retrieval` or `e2e`, got {v}"
)
return v
| [
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler"
] | [((1785, 1845), 'pydantic.validator', 'validator', (['"""generation_evaluation_metrics"""'], {'allow_reuse': '(True)'}), "('generation_evaluation_metrics', allow_reuse=True)\n", (1794, 1845), False, 'from pydantic import BaseModel, Extra, validator\n'), ((2190, 2249), 'pydantic.validator', 'validator', (['"""retrieval_evaluation_metrics"""'], {'allow_reuse': '(True)'}), "('retrieval_evaluation_metrics', allow_reuse=True)\n", (2199, 2249), False, 'from pydantic import BaseModel, Extra, validator\n'), ((2591, 2637), 'pydantic.validator', 'validator', (['"""evaluation_type"""'], {'allow_reuse': '(True)'}), "('evaluation_type', allow_reuse=True)\n", (2600, 2637), False, 'from pydantic import BaseModel, Extra, validator\n'), ((857, 889), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (887, 889), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')] |
from typing import List, Optional, Mapping, Any
from functools import partial
from langchain.llms.base import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from transformers import AutoModel, AutoTokenizer
from peft import get_peft_model, LoraConfig, TaskType
import os
import torch
class ChatGLM3(LLM):
model_path: str
max_length: int = 8192
temperature: float = 0.1
top_p: float = 0.7
history: List = []
streaming: bool = True
model: object = None
tokenizer: object = None
"""
def __init__(self, model_path: str, max_length: int = 8192, temperature: float = 0.1, top_p: float = 0.7, history: List = None, streaming: bool = True):
self.model_path = model_path
self.max_length = max_length
self.temperature = temperature
self.top_p = top_p
self.history = [] if history is None else history
self.streaming = streaming
self.model = None
self.tokenizer = None
"""
@property
def _llm_type(self) -> str:
return "chatglm3-6B"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
add_history: bool = False
) -> str:
if self.model is None or self.tokenizer is None:
raise RuntimeError("Must call `load_model()` to load model and tokenizer!")
if self.streaming:
text_callback = partial(StreamingStdOutCallbackHandler().on_llm_new_token, verbose=True)
resp = self.generate_resp(prompt, text_callback, add_history=add_history)
else:
resp = self.generate_resp(self, prompt, add_history=add_history)
return resp
def generate_resp(self, prompt, text_callback=None, add_history=True):
resp = ""
index = 0
if text_callback:
for i, (resp, _) in enumerate(self.model.stream_chat(
self.tokenizer,
prompt,
self.history,
max_length=self.max_length,
top_p=self.top_p,
temperature=self.temperature
)):
if add_history:
if i == 0:
self.history += [[prompt, resp]]
else:
self.history[-1] = [prompt, resp]
text_callback(resp[index:])
index = len(resp)
else:
resp, _ = self.model.chat(
self.tokenizer,
prompt,
self.history,
max_length=self.max_length,
top_p=self.top_p,
temperature=self.temperature
)
if add_history:
self.history += [[prompt, resp]]
return resp
def load_model(self):
if self.model is not None or self.tokenizer is not None:
return
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True)
self.model = AutoModel.from_pretrained(self.model_path, trust_remote_code=True).half().cuda().eval()
def load_model_from_checkpoint(self, checkpoint=None):
if self.model is not None or self.tokenizer is not None:
return
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True)
self.model = AutoModel.from_pretrained(self.model_path, trust_remote_code=True).half()
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
inference_mode=False,
r=8,
target_modules=['query_key_value'],
lora_alpha=32,
lora_dropout=0.1,
)
self.model = get_peft_model(self.model, peft_config).to("cuda")
if checkpoint=="text_classification":
model_dir = "./output/checkpoint-3000/"
peft_path = "{}/chatglm-lora.pt".format(model_dir)
if os.path.exists(peft_path):
self.model.load_state_dict(torch.load(peft_path), strict=False)
| [
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler"
] | [((3052, 3122), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.model_path'], {'trust_remote_code': '(True)'}), '(self.model_path, trust_remote_code=True)\n', (3081, 3122), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((3401, 3471), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.model_path'], {'trust_remote_code': '(True)'}), '(self.model_path, trust_remote_code=True)\n', (3430, 3471), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((3589, 3729), 'peft.LoraConfig', 'LoraConfig', ([], {'task_type': 'TaskType.CAUSAL_LM', 'inference_mode': '(False)', 'r': '(8)', 'target_modules': "['query_key_value']", 'lora_alpha': '(32)', 'lora_dropout': '(0.1)'}), "(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=8,\n target_modules=['query_key_value'], lora_alpha=32, lora_dropout=0.1)\n", (3599, 3729), False, 'from peft import get_peft_model, LoraConfig, TaskType\n'), ((4081, 4106), 'os.path.exists', 'os.path.exists', (['peft_path'], {}), '(peft_path)\n', (4095, 4106), False, 'import os\n'), ((3493, 3559), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['self.model_path'], {'trust_remote_code': '(True)'}), '(self.model_path, trust_remote_code=True)\n', (3518, 3559), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((3854, 3893), 'peft.get_peft_model', 'get_peft_model', (['self.model', 'peft_config'], {}), '(self.model, peft_config)\n', (3868, 3893), False, 'from peft import get_peft_model, LoraConfig, TaskType\n'), ((1564, 1596), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (1594, 1596), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((4152, 4173), 'torch.load', 'torch.load', (['peft_path'], {}), '(peft_path)\n', (4162, 4173), False, 'import torch\n'), ((3144, 3210), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['self.model_path'], {'trust_remote_code': '(True)'}), '(self.model_path, trust_remote_code=True)\n', (3169, 3210), False, 'from transformers import AutoModel, AutoTokenizer\n')] |
# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
from datasets import load_dataset
import json
import unicodedata
def remove_control_characters(s):
return "".join(ch for ch in s if unicodedata.category(ch)[0]!="C")
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 1536, # chars, not llm tokens
chunk_overlap = 0,
length_function = len,
is_separator_regex = False,
)
documents = load_dataset('Shitao/MLDR', "corpus-en", split='corpus')
feed_file = "/tmp/vespa_feed_file_en.json"
with open(feed_file, "w") as f:
for doc in documents:
id = doc["docid"]
text = doc['text']
chunks = text_splitter.create_documents([text])
text_chunks = [chunk.page_content for chunk in chunks]
text_chunks = [remove_control_characters(chunk) for chunk in text_chunks]
vespa_feed_doc = {
"put": "id:%s:doc::%s" % ("en", id),
"fields": {
"text": text_chunks
}
}
f.write(json.dumps(vespa_feed_doc))
f.write("\n")
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((362, 477), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1536)', 'chunk_overlap': '(0)', 'length_function': 'len', 'is_separator_regex': '(False)'}), '(chunk_size=1536, chunk_overlap=0,\n length_function=len, is_separator_regex=False)\n', (392, 477), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((539, 595), 'datasets.load_dataset', 'load_dataset', (['"""Shitao/MLDR"""', '"""corpus-en"""'], {'split': '"""corpus"""'}), "('Shitao/MLDR', 'corpus-en', split='corpus')\n", (551, 595), False, 'from datasets import load_dataset\n'), ((1148, 1174), 'json.dumps', 'json.dumps', (['vespa_feed_doc'], {}), '(vespa_feed_doc)\n', (1158, 1174), False, 'import json\n'), ((244, 268), 'unicodedata.category', 'unicodedata.category', (['ch'], {}), '(ch)\n', (264, 268), False, 'import unicodedata\n')] |
from time import sleep
import copy
import redis
import json
import pickle
import traceback
from flask import Response, request, stream_with_context
from typing import Dict, Union
import os
from langchain.schema import HumanMessage, SystemMessage
from backend.api.language_model import get_llm
from backend.main import app, message_id_register, message_pool, logger
from backend.utils.streaming import single_round_chat_with_agent_streaming
from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL
from backend.schemas import DEFAULT_USER_ID
from real_agents.adapters.llm import BaseLanguageModel
from real_agents.adapters.agent_helpers import AgentExecutor, Tool
from real_agents.adapters.callbacks.agent_streaming import \
AgentStreamingStdOutCallbackHandler
from real_agents.adapters.models import ChatOpenAI
from real_agents.adapters.memory import ConversationReActBufferMemory
from real_agents.adapters.data_model import DataModel, JsonDataModel
from real_agents.adapters.interactive_executor import initialize_webot_agent
from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor
r = redis.Redis(host=os.getenv("REDIS_SERVER"), port=6379, db=0) # adjust host/port/db as needed
# here webot and webot_status are stored in redis since the two global variable can not be modified and accessed normally in multiprocess
# fixme:now webot is stored without message_id or chat_id info, so it can only be used for one chat at a time
# fixme:now webot_status is stored with chat_id info, if the status is not reset after a message ended abnormally e.g. the message is interrupted, it will be reused wrongly for the next chat
def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor:
data = r.get(f'webot_{user_id}_{chat_id}')
if data is not None:
webot = pickle.loads(data)
else:
# initialize a webot with None instrucition if webot does not exist
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
return webot
def save_webot_to_redis(user_id: str, chat_id: str, webot: WebBrowsingExecutor, ):
r.set(f'webot_{user_id}_{chat_id}', pickle.dumps(webot))
def get_webot_status_from_redis(user_id: str, chat_id: str):
webot_status_json = r.get(f'webot_status_{user_id}_{chat_id}')
if webot_status_json is not None:
webot_status = json.loads(webot_status_json)
return webot_status
else:
return {}
def save_webot_status_to_redis(user_id: str, chat_id: str, webot_status: Dict):
r.set(f'webot_status_{user_id}_{chat_id}', json.dumps(webot_status))
def reset_webot(user_id: str, chat_id: str):
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
def reset_webot_status(user_id: str, chat_id: str):
webot_status = {"webot_status": "idle", "url": None}
save_webot_status_to_redis(user_id, chat_id, webot_status)
# this function has been deprecated
def get_plan(instruction: str, start_url: str, chat_llm: ChatOpenAI):
# fixme: Move this into a separate chain or executors to decompose the LLMs
system_message = f"""
You are a planner to assist another browser automation assistant.
Here is the instruction for the other assistant:
```
You MUST take one of the following actions. NEVER EVER EVER make up actions that do not exist:
1. click(element): Clicks on an element
2. setValue(element, value: string): Focuses on and sets the value of an input element
3. finish(): Indicates the task is finished
4. fail(): Indicates that you are unable to complete the task
You will be be given a task to perform and the current state of the DOM. You will also be given previous actions that you have taken. You may retry a failed action up to one time.
This is an example of an action:
<Thought>I should click the add to cart button</Thought>
<Action>click(223)</Action>
You MUST always include the <Thought> and <Action> open/close tags or else your response will be marked as invalid.
Rules you MUST follow:
1. You must only take one step at a time. You cannot take multiple actions in a single response.
2. You should not consider the action to present the result to the user. You only need to do available actions. If info in current page is enough for the user to solve the problem, you should finish.
```
Now your responsibility is to give a step-by-step plan according to user's instruction. This plan will be given to the assistant as a reference when it is performing tasks.
""".strip()
human_message = f"""
The user requests the following task:
{instruction}
Now you are at {start_url}
Provide a plan to do this (you can use pseudo description as below to describe the item).
Here is an example case:
request: Go to google calendar to schedule a meeting
current url: "https://google.com"
example plan:
1. setValue(searchBar, "google calendar")
2. click(search)
3. click(the item with title of google calendar)
4.1 if user has loginned
do nothing
4.2 if user hasn't loginned
do login
5. click(create event button)
6. setValue(event title input bar, "meeting")
7. click(save event button)
8. finish()
""".strip()
messages = [SystemMessage(content=system_message),
HumanMessage(content=human_message)]
response = chat_llm(messages).content
return response
def create_webot_interaction_executor(
llm: BaseLanguageModel,
llm_name: str,
user_id: str,
chat_id: str
) -> AgentExecutor:
"""Creates an agent executor for interaction.
Args:
llm: A llm model.
llm_name: A string llm name.
user_id: A string of user id.
chat_id: A string chat id.
Returns:
An agent executor.
"""
# Initialize memory
memory = ConversationReActBufferMemory(memory_key="chat_history",
return_messages=True, max_token_limit=10000)
class RunWebot:
def __init__(self, webot: WebotExecutor, llm: BaseLanguageModel, user_id: str,
chat_id: str):
self.llm = llm
self.webot = webot
self.user_id = user_id
self.chat_id = chat_id
def run(self, term: str) -> Union[str, Dict, DataModel]:
try:
user_id = self.user_id
chat_id = self.chat_id
reset_webot(user_id=user_id, chat_id=chat_id)
reset_webot_status(user_id=user_id, chat_id=chat_id)
raw_observation = self.webot.run(user_intent=term, llm=self.llm)
instruction, start_url = raw_observation["instruction"], \
raw_observation["start_url"]
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.instruction = instruction
# webot.plan = get_plan(instruction, start_url)
webot.plan = ""
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
webot_status = {
"webot_status": "running",
"url": start_url
}
save_webot_status_to_redis(user_id=user_id, chat_id=chat_id,
webot_status=webot_status)
while True:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
if webot.finish or webot.interrupt or webot.error or webot.fail:
break
else:
sleep(0.5)
save_webot_status_to_redis(user_id=user_id, chat_id=chat_id,
webot_status={"webot_status": "idle",
"url": None})
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.instruction = None
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
if webot.finish:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
action_history = webot.action_history
last_page = webot.pages_viewed[-1]
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": json.dumps({"action_history": action_history,
"last_page": last_page}, indent=4),
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.fail:
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": "The webot failed to execute the instruction.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.interrupt:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": "The web browsing is interrupted by user.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.error:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": "Error occurs during web browsing.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
except Exception as e:
print(traceback.format_exc())
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": f"Failed in web browsing with the input: {term}, please try again later.",
"intermediate_steps": json.dumps({"error": str(e)})
}
)
return observation
webot = WebotExecutor.from_webot()
llm = copy.deepcopy(llm)
run_webot = RunWebot(webot, llm, chat_id=chat_id, user_id=user_id)
tools = [Tool(name=webot.name, func=run_webot.run, description=webot.description)]
continue_model = llm_name if llm_name in NEED_CONTINUE_MODEL else None
interaction_executor = initialize_webot_agent(
tools, llm, continue_model, memory=memory, verbose=True
)
return interaction_executor
@app.route("/api/chat_xlang_webot", methods=["POST"])
def chat_xlang_webot() -> Dict:
"""Returns the chat response of web agent."""
try:
# Get request parameters
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
user_intent = request_json["user_intent"]
parent_message_id = request_json["parent_message_id"]
llm_name = request_json["llm_name"]
temperature = request_json.get("temperature", 0.4)
stop_words = ["[RESPONSE_BEGIN]", "TOOL RESPONSE"]
kwargs = {
"temperature": temperature,
"stop": stop_words,
}
# Get language model
llm = get_llm(llm_name, **kwargs)
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat",
msg_head="Request json").debug(request_json)
human_message_id = message_id_register.add_variable(user_intent)
ai_message_id = message_id_register.add_variable("")
stream_handler = AgentStreamingStdOutCallbackHandler()
# Build executor and run chat
# reset webot and status
reset_webot(user_id=user_id, chat_id=chat_id)
reset_webot_status(user_id=user_id, chat_id=chat_id)
interaction_executor = create_webot_interaction_executor(
llm=llm,
llm_name=llm_name,
chat_id=chat_id,
user_id=user_id
)
activated_message_list = message_pool.get_activated_message_list(user_id,
chat_id,
list(),
parent_message_id)
message_pool.load_agent_memory_from_list(interaction_executor.memory,
activated_message_list)
return stream_with_context(
Response(
single_round_chat_with_agent_streaming(
interaction_executor=interaction_executor,
user_intent=user_intent,
human_message_id=human_message_id,
ai_message_id=ai_message_id,
user_id=user_id,
chat_id=chat_id,
message_list=activated_message_list,
parent_message_id=parent_message_id,
stream_handler=stream_handler,
llm_name=llm_name,
app_type="webot",
),
content_type="application/json",
)
)
except Exception as e:
import traceback
traceback.print_exc()
return Response(response=None,
status=f"{OVERLOAD} backend is currently overloaded")
| [
"langchain.schema.SystemMessage",
"langchain.schema.HumanMessage"
] | [((11305, 11357), 'backend.main.app.route', 'app.route', (['"""/api/chat_xlang_webot"""'], {'methods': "['POST']"}), "('/api/chat_xlang_webot', methods=['POST'])\n", (11314, 11357), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((2664, 2689), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (2683, 2689), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((5769, 5875), 'real_agents.adapters.memory.ConversationReActBufferMemory', 'ConversationReActBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(10000)'}), "(memory_key='chat_history', return_messages=\n True, max_token_limit=10000)\n", (5798, 5875), False, 'from real_agents.adapters.memory import ConversationReActBufferMemory\n'), ((10859, 10885), 'real_agents.web_agent.WebotExecutor.from_webot', 'WebotExecutor.from_webot', ([], {}), '()\n', (10883, 10885), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((10896, 10914), 'copy.deepcopy', 'copy.deepcopy', (['llm'], {}), '(llm)\n', (10909, 10914), False, 'import copy\n'), ((11176, 11255), 'real_agents.adapters.interactive_executor.initialize_webot_agent', 'initialize_webot_agent', (['tools', 'llm', 'continue_model'], {'memory': 'memory', 'verbose': '(True)'}), '(tools, llm, continue_model, memory=memory, verbose=True)\n', (11198, 11255), False, 'from real_agents.adapters.interactive_executor import initialize_webot_agent\n'), ((1125, 1150), 'os.getenv', 'os.getenv', (['"""REDIS_SERVER"""'], {}), "('REDIS_SERVER')\n", (1134, 1150), False, 'import os\n'), ((1810, 1828), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (1822, 1828), False, 'import pickle\n'), ((1931, 1956), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (1950, 1956), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((2152, 2171), 'pickle.dumps', 'pickle.dumps', (['webot'], {}), '(webot)\n', (2164, 2171), False, 'import pickle\n'), ((2364, 2393), 'json.loads', 'json.loads', (['webot_status_json'], {}), '(webot_status_json)\n', (2374, 2393), False, 'import json\n'), ((2579, 2603), 'json.dumps', 'json.dumps', (['webot_status'], {}), '(webot_status)\n', (2589, 2603), False, 'import json\n'), ((5172, 5209), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message'}), '(content=system_message)\n', (5185, 5209), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((5227, 5262), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_message'}), '(content=human_message)\n', (5239, 5262), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((10999, 11071), 'real_agents.adapters.agent_helpers.Tool', 'Tool', ([], {'name': 'webot.name', 'func': 'run_webot.run', 'description': 'webot.description'}), '(name=webot.name, func=run_webot.run, description=webot.description)\n', (11003, 11071), False, 'from real_agents.adapters.agent_helpers import AgentExecutor, Tool\n'), ((11505, 11523), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (11521, 11523), False, 'from flask import Response, request, stream_with_context\n'), ((12048, 12075), 'backend.api.language_model.get_llm', 'get_llm', (['llm_name'], {}), '(llm_name, **kwargs)\n', (12055, 12075), False, 'from backend.api.language_model import get_llm\n'), ((12237, 12282), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['user_intent'], {}), '(user_intent)\n', (12269, 12282), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12307, 12343), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['""""""'], {}), "('')\n", (12339, 12343), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12370, 12407), 'real_agents.adapters.callbacks.agent_streaming.AgentStreamingStdOutCallbackHandler', 'AgentStreamingStdOutCallbackHandler', ([], {}), '()\n', (12405, 12407), False, 'from real_agents.adapters.callbacks.agent_streaming import AgentStreamingStdOutCallbackHandler\n'), ((13127, 13224), 'backend.main.message_pool.load_agent_memory_from_list', 'message_pool.load_agent_memory_from_list', (['interaction_executor.memory', 'activated_message_list'], {}), '(interaction_executor.memory,\n activated_message_list)\n', (13167, 13224), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((14066, 14087), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14085, 14087), False, 'import traceback\n'), ((14103, 14180), 'flask.Response', 'Response', ([], {'response': 'None', 'status': 'f"""{OVERLOAD} backend is currently overloaded"""'}), "(response=None, status=f'{OVERLOAD} backend is currently overloaded')\n", (14111, 14180), False, 'from flask import Response, request, stream_with_context\n'), ((12085, 12173), 'backend.main.logger.bind', 'logger.bind', ([], {'user_id': 'user_id', 'chat_id': 'chat_id', 'api': '"""/chat"""', 'msg_head': '"""Request json"""'}), "(user_id=user_id, chat_id=chat_id, api='/chat', msg_head=\n 'Request json')\n", (12096, 12173), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((13344, 13714), 'backend.utils.streaming.single_round_chat_with_agent_streaming', 'single_round_chat_with_agent_streaming', ([], {'interaction_executor': 'interaction_executor', 'user_intent': 'user_intent', 'human_message_id': 'human_message_id', 'ai_message_id': 'ai_message_id', 'user_id': 'user_id', 'chat_id': 'chat_id', 'message_list': 'activated_message_list', 'parent_message_id': 'parent_message_id', 'stream_handler': 'stream_handler', 'llm_name': 'llm_name', 'app_type': '"""webot"""'}), "(interaction_executor=\n interaction_executor, user_intent=user_intent, human_message_id=\n human_message_id, ai_message_id=ai_message_id, user_id=user_id, chat_id\n =chat_id, message_list=activated_message_list, parent_message_id=\n parent_message_id, stream_handler=stream_handler, llm_name=llm_name,\n app_type='webot')\n", (13382, 13714), False, 'from backend.utils.streaming import single_round_chat_with_agent_streaming\n'), ((7547, 7557), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (7552, 7557), False, 'from time import sleep\n'), ((10439, 10461), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10459, 10461), False, 'import traceback\n'), ((8392, 8477), 'json.dumps', 'json.dumps', (["{'action_history': action_history, 'last_page': last_page}"], {'indent': '(4)'}), "({'action_history': action_history, 'last_page': last_page}, indent=4\n )\n", (8402, 8477), False, 'import json\n'), ((8574, 8648), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (8584, 8648), False, 'import json\n'), ((9103, 9177), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9113, 9177), False, 'import json\n'), ((9634, 9708), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9644, 9708), False, 'import json\n'), ((10154, 10228), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (10164, 10228), False, 'import json\n')] |
from langchain_community.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
llm = ChatOllama(model="mixtral:8x7b-instruct-v0.1-fp16")
llm.base_url = "http://localhost:11434"
prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}")
chain = prompt | llm | StrOutputParser()
print(chain.invoke({"topic": "Space travel"}))
| [
"langchain_core.prompts.ChatPromptTemplate.from_template",
"langchain_community.chat_models.ChatOllama",
"langchain_core.output_parsers.StrOutputParser"
] | [((174, 225), 'langchain_community.chat_models.ChatOllama', 'ChatOllama', ([], {'model': '"""mixtral:8x7b-instruct-v0.1-fp16"""'}), "(model='mixtral:8x7b-instruct-v0.1-fp16')\n", (184, 225), False, 'from langchain_community.chat_models import ChatOllama\n'), ((275, 345), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['"""Tell me a short joke about {topic}"""'], {}), "('Tell me a short joke about {topic}')\n", (307, 345), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((369, 386), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (384, 386), False, 'from langchain_core.output_parsers import StrOutputParser\n')] |
import os
import json
from langchain.schema import messages_from_dict, messages_to_dict
from langchain.memory import (
ConversationBufferMemory,
ChatMessageHistory,
)
class YeagerAIContext:
"""Context for the @yeager.ai agent."""
def __init__(self, username: str, session_id: str, session_path: str):
self.username = username
self.session_id = session_id
self.session_path = session_path
self.session_message_history = ChatMessageHistory()
self.chat_buffer_memory = ConversationBufferMemory(
memory_key="chat_history", input_key="input"
)
def load_session_message_history(self):
try:
with open(os.path.join(self.session_path, "session_history.txt"), "r") as f:
dicts = json.loads(f.read())
self.session_message_history.messages = messages_from_dict(dicts)
except FileNotFoundError:
os.makedirs(self.session_path, exist_ok=True)
with open(os.path.join(self.session_path, "session_history.txt"), "w") as f:
f.close()
def save_session_message_history(self):
dicts = messages_to_dict(self.session_message_history.messages)
with open(os.path.join(self.session_path, "session_history.txt"), "w") as f:
f.write(json.dumps(dicts))
f.close()
def create_shadow_clones(self):
self.load_session_message_history()
self.chat_buffer_memory.chat_memory = self.session_message_history
def dispell_shadow_clones(self):
self.session_message_history = self.chat_buffer_memory.chat_memory
self.save_session_message_history()
| [
"langchain.schema.messages_from_dict",
"langchain.memory.ConversationBufferMemory",
"langchain.memory.ChatMessageHistory",
"langchain.schema.messages_to_dict"
] | [((472, 492), 'langchain.memory.ChatMessageHistory', 'ChatMessageHistory', ([], {}), '()\n', (490, 492), False, 'from langchain.memory import ConversationBufferMemory, ChatMessageHistory\n'), ((527, 597), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'input_key': '"""input"""'}), "(memory_key='chat_history', input_key='input')\n", (551, 597), False, 'from langchain.memory import ConversationBufferMemory, ChatMessageHistory\n'), ((1162, 1217), 'langchain.schema.messages_to_dict', 'messages_to_dict', (['self.session_message_history.messages'], {}), '(self.session_message_history.messages)\n', (1178, 1217), False, 'from langchain.schema import messages_from_dict, messages_to_dict\n'), ((868, 893), 'langchain.schema.messages_from_dict', 'messages_from_dict', (['dicts'], {}), '(dicts)\n', (886, 893), False, 'from langchain.schema import messages_from_dict, messages_to_dict\n'), ((940, 985), 'os.makedirs', 'os.makedirs', (['self.session_path'], {'exist_ok': '(True)'}), '(self.session_path, exist_ok=True)\n', (951, 985), False, 'import os\n'), ((1236, 1290), 'os.path.join', 'os.path.join', (['self.session_path', '"""session_history.txt"""'], {}), "(self.session_path, 'session_history.txt')\n", (1248, 1290), False, 'import os\n'), ((1323, 1340), 'json.dumps', 'json.dumps', (['dicts'], {}), '(dicts)\n', (1333, 1340), False, 'import json\n'), ((700, 754), 'os.path.join', 'os.path.join', (['self.session_path', '"""session_history.txt"""'], {}), "(self.session_path, 'session_history.txt')\n", (712, 754), False, 'import os\n'), ((1008, 1062), 'os.path.join', 'os.path.join', (['self.session_path', '"""session_history.txt"""'], {}), "(self.session_path, 'session_history.txt')\n", (1020, 1062), False, 'import os\n')] |
import argparse
import os
import subprocess
import time
import gradio as gr
from huggingface_hub import snapshot_download
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import (
Docx2txtLoader,
PyPDFLoader,
TextLoader,
YoutubeLoader,
)
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from openai import OpenAI
from chat_with_mlx import __version__
from chat_with_mlx.models.utils import model_info
from chat_with_mlx.rag.utils import get_prompt
os.environ["TOKENIZERS_PARALLELISM"] = "False"
SUPPORTED_LANG = [
"default",
"English",
"Spanish",
"Chinese",
"Vietnamese",
"Japanese",
"Korean",
"Indian",
"Turkish",
"German",
"French",
"Italian",
]
openai_api_base = "http://127.0.0.1:8080/v1"
model_dicts, yml_path, cfg_list, mlx_config = model_info()
model_list = list(cfg_list.keys())
client = OpenAI(api_key="EMPTY", base_url=openai_api_base)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)
emb = HuggingFaceEmbeddings(
model_name="nomic-ai/nomic-embed-text-v1.5",
model_kwargs={"trust_remote_code": True},
)
vectorstore = None
def load_model(model_name, lang):
global process, rag_prompt, rag_his_prompt, sys_prompt, default_lang
default_lang = "default"
prompts, sys_prompt = get_prompt(f"{yml_path[cfg_list[model_name]]}", lang)
rag_prompt, rag_his_prompt = prompts[0], prompts[1]
model_name_list = cfg_list[model_name].split("/")
directory_path = os.path.dirname(os.path.abspath(__file__))
local_model_dir = os.path.join(
directory_path, "models", "download", model_name_list[1]
)
if not os.path.exists(local_model_dir):
snapshot_download(repo_id=mlx_config[model_name], local_dir=local_model_dir)
command = ["python3", "-m", "mlx_lm.server", "--model", local_model_dir]
try:
process = subprocess.Popen(
command, stdin=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
process.stdin.write("y\n")
process.stdin.flush()
return {model_status: "Model Loaded"}
except Exception as e:
return {model_status: f"Exception occurred: {str(e)}"}
def kill_process():
global process
process.terminate()
time.sleep(2)
if process.poll() is None: # Check if the process has indeed terminated
process.kill() # Force kill if still running
print("Model Killed")
return {model_status: "Model Unloaded"}
def check_file_type(file_path):
# Check for document file extensions
if (
file_path.endswith(".pdf")
or file_path.endswith(".txt")
or file_path.endswith(".doc")
or file_path.endswith(".docx")
):
return True
# Check for YouTube link formats
elif (
file_path.startswith("https://www.youtube.com/")
or file_path.startswith("https://youtube.com/")
or file_path.startswith("https://youtu.be/")
):
return True
else:
return False
def upload(files):
supported = check_file_type(files)
if supported:
return {url: files, index_status: "Not Done"}
else:
return {url: "File type not supported", index_status: "Not Done"}
def indexing(mode, url):
global vectorstore
try:
if mode == "Files (docx, pdf, txt)":
if url.endswith(".pdf"):
loader = PyPDFLoader(url)
elif url.endswith(".docx"):
loader = Docx2txtLoader(url)
elif url.endswith(".txt"):
loader = TextLoader(url)
splits = loader.load_and_split(text_splitter)
elif mode == "YouTube (url)":
loader = YoutubeLoader.from_youtube_url(
url, add_video_info=False, language=["en", "vi"]
)
splits = loader.load_and_split(text_splitter)
vectorstore = Chroma.from_documents(documents=splits, embedding=emb)
return {index_status: "Indexing Done"}
except Exception as e:
# Print the error message or return it as part of the response
print(f"Error: {e}") # This will print the error to the console or log
return {"index_status": "Indexing Error", "error_message": str(e)}
def kill_index():
global vectorstore
vectorstore = None
return {index_status: "Indexing Undone"}
def build_rag_context(docs):
context = ""
for doc in docs:
context += doc.page_content + "\n"
return context
def chatbot(query, history, temp, max_tokens, freq_penalty, k_docs):
global chat_history, sys_prompt
if "vectorstore" in globals() and vectorstore is not None:
if len(history) == 0:
chat_history = []
if sys_prompt is not None:
chat_history.append({"role": "system", "content": sys_prompt})
docs = vectorstore.similarity_search(query, k=k_docs)
else:
history_str = ""
for i, message in enumerate(history):
history_str += f"User: {message[0]}\n"
history_str += f"AI: {message[1]}\n"
if sys_prompt is not None:
chat_history.append({"role": "system", "content": sys_prompt})
chat_history.append({"role": "user", "content": history_str})
docs = vectorstore.similarity_search(history_str)
context = build_rag_context(docs)
if len(history) == 0:
prompt = rag_prompt.format(context=context, question=query)
else:
prompt = rag_his_prompt.format(
chat_history=history_str, context=context, question=query
)
messages = [{"role": "user", "content": prompt}]
else:
if len(history) == 0:
chat_history = []
if sys_prompt is not None:
chat_history.append({"role": "system", "content": sys_prompt})
else:
chat_history = []
if sys_prompt is not None:
chat_history.append({"role": "system", "content": sys_prompt})
for i, message in enumerate(history):
chat_history.append({"role": "user", "content": message[0]})
chat_history.append({"role": "assistant", "content": message[1]})
chat_history.append({"role": "user", "content": query})
messages = chat_history
# Uncomment for debugging
# print(messages)
response = client.chat.completions.create(
model="gpt",
messages=messages,
temperature=temp,
frequency_penalty=freq_penalty,
max_tokens=max_tokens,
stream=True,
)
stop = ["<|im_end|>", "<|endoftext|>"]
partial_message = ""
for chunk in response:
if len(chunk.choices) != 0:
if chunk.choices[0].delta.content not in stop:
partial_message = partial_message + chunk.choices[0].delta.content
else:
partial_message = partial_message + ""
yield partial_message
with gr.Blocks(fill_height=True, theme=gr.themes.Soft()) as demo:
model_name = gr.Dropdown(
label="Model",
info="Select your model",
choices=sorted(model_list),
interactive=True,
render=False,
)
temp_slider = gr.State(0.2)
max_gen_token = gr.State(512)
freq_penalty = gr.State(1.05)
retrieve_docs = gr.State(3)
language = gr.State("default")
gr.ChatInterface(
chatbot=gr.Chatbot(height=600, render=False),
fn=chatbot, # Function to call on user input
title="Chat with MLX🍎", # Title of the web page
description="Chat with your data using Apple MLX Backend", # Description
additional_inputs=[temp_slider, max_gen_token, freq_penalty, retrieve_docs],
)
with gr.Accordion("Advanced Setting", open=False):
with gr.Row():
with gr.Column(scale=2):
temp_slider = gr.Slider(
label="Temperature",
value=0.2,
minimum=0.0,
maximum=1.0,
step=0.05,
interactive=True,
)
max_gen_token = gr.Slider(
label="Max Tokens",
value=512,
minimum=512,
maximum=4096,
step=256,
interactive=True,
)
with gr.Column(scale=2):
freq_penalty = gr.Slider(
label="Frequency Penalty",
value=1.05,
minimum=-2,
maximum=2,
step=0.05,
interactive=True,
)
retrieve_docs = gr.Slider(
label="No. Retrieval Docs",
value=3,
minimum=1,
maximum=10,
step=1,
interactive=True,
)
with gr.Row():
with gr.Column(scale=2):
model_name.render()
language = gr.Dropdown(
label="Language",
choices=sorted(SUPPORTED_LANG),
value="default",
interactive=True,
)
btn1 = gr.Button("Load Model", variant="primary")
btn3 = gr.Button("Unload Model", variant="stop")
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=9):
mode = gr.Dropdown(
label="Dataset",
info="Choose your dataset type",
choices=["Files (docx, pdf, txt)", "YouTube (url)"],
scale=5,
)
url = gr.Textbox(
label="URL",
info="Enter your filepath (URL for Youtube)",
interactive=True,
)
upload_button = gr.UploadButton(
label="Upload File", variant="primary"
)
# data = gr.Textbox(visible=lambda mode: mode == 'YouTube')
with gr.Column(scale=1):
model_status = gr.Textbox("Model Not Loaded", label="Model Status")
index_status = gr.Textbox("Not Index", label="Index Status")
btn1.click(
load_model,
inputs=[model_name, language],
outputs=[model_status],
)
btn3.click(kill_process, outputs=[model_status])
upload_button.upload(
upload, inputs=upload_button, outputs=[url, index_status]
)
index_button = gr.Button("Start Indexing", variant="primary")
index_button.click(
indexing, inputs=[mode, url], outputs=[index_status]
)
stop_index_button = gr.Button("Stop Indexing")
stop_index_button.click(kill_index, outputs=[index_status])
def app(port, share):
print(f"Starting MLX Chat on port {port}")
print(f"Sharing: {share}")
demo.launch(inbrowser=True, share=share, server_port=port)
def main():
parser = argparse.ArgumentParser(
description="Chat with MLX \n"
"Native RAG on MacOS and Apple Silicon with MLX 🧑💻"
)
parser.add_argument(
"--version", action="version", version=f"Chat with MLX {__version__}"
)
parser.add_argument(
"--port",
type=int,
default=7860,
help="Port number to run the app",
)
parser.add_argument(
"--share",
default=False,
help="Enable sharing the app",
)
args = parser.parse_args()
app(port=args.port, share=args.share)
| [
"langchain_community.document_loaders.PyPDFLoader",
"langchain_community.document_loaders.Docx2txtLoader",
"langchain_community.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_community.vectorstores.Chroma.from_documents",
"langchain_community.document_loaders.TextLoader",
"langchain_community.embeddings.HuggingFaceEmbeddings"
] | [((934, 946), 'chat_with_mlx.models.utils.model_info', 'model_info', ([], {}), '()\n', (944, 946), False, 'from chat_with_mlx.models.utils import model_info\n'), ((991, 1040), 'openai.OpenAI', 'OpenAI', ([], {'api_key': '"""EMPTY"""', 'base_url': 'openai_api_base'}), "(api_key='EMPTY', base_url=openai_api_base)\n", (997, 1040), False, 'from openai import OpenAI\n'), ((1057, 1121), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(512)', 'chunk_overlap': '(50)'}), '(chunk_size=512, chunk_overlap=50)\n', (1087, 1121), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1128, 1240), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""nomic-ai/nomic-embed-text-v1.5"""', 'model_kwargs': "{'trust_remote_code': True}"}), "(model_name='nomic-ai/nomic-embed-text-v1.5',\n model_kwargs={'trust_remote_code': True})\n", (1149, 1240), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((1431, 1484), 'chat_with_mlx.rag.utils.get_prompt', 'get_prompt', (['f"""{yml_path[cfg_list[model_name]]}"""', 'lang'], {}), "(f'{yml_path[cfg_list[model_name]]}', lang)\n", (1441, 1484), False, 'from chat_with_mlx.rag.utils import get_prompt\n'), ((1681, 1751), 'os.path.join', 'os.path.join', (['directory_path', '"""models"""', '"""download"""', 'model_name_list[1]'], {}), "(directory_path, 'models', 'download', model_name_list[1])\n", (1693, 1751), False, 'import os\n'), ((2378, 2391), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2388, 2391), False, 'import time\n'), ((7394, 7407), 'gradio.State', 'gr.State', (['(0.2)'], {}), '(0.2)\n', (7402, 7407), True, 'import gradio as gr\n'), ((7428, 7441), 'gradio.State', 'gr.State', (['(512)'], {}), '(512)\n', (7436, 7441), True, 'import gradio as gr\n'), ((7461, 7475), 'gradio.State', 'gr.State', (['(1.05)'], {}), '(1.05)\n', (7469, 7475), True, 'import gradio as gr\n'), ((7496, 7507), 'gradio.State', 'gr.State', (['(3)'], {}), '(3)\n', (7504, 7507), True, 'import gradio as gr\n'), ((7523, 7542), 'gradio.State', 'gr.State', (['"""default"""'], {}), "('default')\n", (7531, 7542), True, 'import gradio as gr\n'), ((11507, 11620), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chat with MLX \nNative RAG on MacOS and Apple Silicon with MLX 🧑\u200d💻"""'}), '(description=\n """Chat with MLX \nNative RAG on MacOS and Apple Silicon with MLX 🧑\u200d💻""")\n', (11530, 11620), False, 'import argparse\n'), ((1632, 1657), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1647, 1657), False, 'import os\n'), ((1778, 1809), 'os.path.exists', 'os.path.exists', (['local_model_dir'], {}), '(local_model_dir)\n', (1792, 1809), False, 'import os\n'), ((1819, 1895), 'huggingface_hub.snapshot_download', 'snapshot_download', ([], {'repo_id': 'mlx_config[model_name]', 'local_dir': 'local_model_dir'}), '(repo_id=mlx_config[model_name], local_dir=local_model_dir)\n', (1836, 1895), False, 'from huggingface_hub import snapshot_download\n'), ((2002, 2089), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdin': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'text': '(True)'}), '(command, stdin=subprocess.PIPE, stderr=subprocess.PIPE,\n text=True)\n', (2018, 2089), False, 'import subprocess\n'), ((4005, 4059), 'langchain_community.vectorstores.Chroma.from_documents', 'Chroma.from_documents', ([], {'documents': 'splits', 'embedding': 'emb'}), '(documents=splits, embedding=emb)\n', (4026, 4059), False, 'from langchain_community.vectorstores import Chroma\n'), ((7912, 7956), 'gradio.Accordion', 'gr.Accordion', (['"""Advanced Setting"""'], {'open': '(False)'}), "('Advanced Setting', open=False)\n", (7924, 7956), True, 'import gradio as gr\n'), ((9136, 9144), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9142, 9144), True, 'import gradio as gr\n'), ((7172, 7188), 'gradio.themes.Soft', 'gr.themes.Soft', ([], {}), '()\n', (7186, 7188), True, 'import gradio as gr\n'), ((7581, 7617), 'gradio.Chatbot', 'gr.Chatbot', ([], {'height': '(600)', 'render': '(False)'}), '(height=600, render=False)\n', (7591, 7617), True, 'import gradio as gr\n'), ((7971, 7979), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (7977, 7979), True, 'import gradio as gr\n'), ((9159, 9177), 'gradio.Column', 'gr.Column', ([], {'scale': '(2)'}), '(scale=2)\n', (9168, 9177), True, 'import gradio as gr\n'), ((9429, 9471), 'gradio.Button', 'gr.Button', (['"""Load Model"""'], {'variant': '"""primary"""'}), "('Load Model', variant='primary')\n", (9438, 9471), True, 'import gradio as gr\n'), ((9491, 9532), 'gradio.Button', 'gr.Button', (['"""Unload Model"""'], {'variant': '"""stop"""'}), "('Unload Model', variant='stop')\n", (9500, 9532), True, 'import gradio as gr\n'), ((9546, 9564), 'gradio.Column', 'gr.Column', ([], {'scale': '(4)'}), '(scale=4)\n', (9555, 9564), True, 'import gradio as gr\n'), ((3514, 3530), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['url'], {}), '(url)\n', (3525, 3530), False, 'from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, TextLoader, YoutubeLoader\n'), ((3813, 3898), 'langchain_community.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['url'], {'add_video_info': '(False)', 'language': "['en', 'vi']"}), "(url, add_video_info=False, language=['en', 'vi']\n )\n", (3843, 3898), False, 'from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, TextLoader, YoutubeLoader\n'), ((7998, 8016), 'gradio.Column', 'gr.Column', ([], {'scale': '(2)'}), '(scale=2)\n', (8007, 8016), True, 'import gradio as gr\n'), ((8048, 8149), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""Temperature"""', 'value': '(0.2)', 'minimum': '(0.0)', 'maximum': '(1.0)', 'step': '(0.05)', 'interactive': '(True)'}), "(label='Temperature', value=0.2, minimum=0.0, maximum=1.0, step=\n 0.05, interactive=True)\n", (8057, 8149), True, 'import gradio as gr\n'), ((8316, 8416), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""Max Tokens"""', 'value': '(512)', 'minimum': '(512)', 'maximum': '(4096)', 'step': '(256)', 'interactive': '(True)'}), "(label='Max Tokens', value=512, minimum=512, maximum=4096, step=\n 256, interactive=True)\n", (8325, 8416), True, 'import gradio as gr\n'), ((8568, 8586), 'gradio.Column', 'gr.Column', ([], {'scale': '(2)'}), '(scale=2)\n', (8577, 8586), True, 'import gradio as gr\n'), ((8619, 8723), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""Frequency Penalty"""', 'value': '(1.05)', 'minimum': '(-2)', 'maximum': '(2)', 'step': '(0.05)', 'interactive': '(True)'}), "(label='Frequency Penalty', value=1.05, minimum=-2, maximum=2,\n step=0.05, interactive=True)\n", (8628, 8723), True, 'import gradio as gr\n'), ((8891, 8991), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""No. Retrieval Docs"""', 'value': '(3)', 'minimum': '(1)', 'maximum': '(10)', 'step': '(1)', 'interactive': '(True)'}), "(label='No. Retrieval Docs', value=3, minimum=1, maximum=10, step=\n 1, interactive=True)\n", (8900, 8991), True, 'import gradio as gr\n'), ((9583, 9591), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9589, 9591), True, 'import gradio as gr\n'), ((3596, 3615), 'langchain_community.document_loaders.Docx2txtLoader', 'Docx2txtLoader', (['url'], {}), '(url)\n', (3610, 3615), False, 'from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, TextLoader, YoutubeLoader\n'), ((9614, 9632), 'gradio.Column', 'gr.Column', ([], {'scale': '(9)'}), '(scale=9)\n', (9623, 9632), True, 'import gradio as gr\n'), ((9661, 9789), 'gradio.Dropdown', 'gr.Dropdown', ([], {'label': '"""Dataset"""', 'info': '"""Choose your dataset type"""', 'choices': "['Files (docx, pdf, txt)', 'YouTube (url)']", 'scale': '(5)'}), "(label='Dataset', info='Choose your dataset type', choices=[\n 'Files (docx, pdf, txt)', 'YouTube (url)'], scale=5)\n", (9672, 9789), True, 'import gradio as gr\n'), ((9930, 10021), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""URL"""', 'info': '"""Enter your filepath (URL for Youtube)"""', 'interactive': '(True)'}), "(label='URL', info='Enter your filepath (URL for Youtube)',\n interactive=True)\n", (9940, 10021), True, 'import gradio as gr\n'), ((10149, 10204), 'gradio.UploadButton', 'gr.UploadButton', ([], {'label': '"""Upload File"""', 'variant': '"""primary"""'}), "(label='Upload File', variant='primary')\n", (10164, 10204), True, 'import gradio as gr\n'), ((10349, 10367), 'gradio.Column', 'gr.Column', ([], {'scale': '(1)'}), '(scale=1)\n', (10358, 10367), True, 'import gradio as gr\n'), ((10404, 10456), 'gradio.Textbox', 'gr.Textbox', (['"""Model Not Loaded"""'], {'label': '"""Model Status"""'}), "('Model Not Loaded', label='Model Status')\n", (10414, 10456), True, 'import gradio as gr\n'), ((10492, 10537), 'gradio.Textbox', 'gr.Textbox', (['"""Not Index"""'], {'label': '"""Index Status"""'}), "('Not Index', label='Index Status')\n", (10502, 10537), True, 'import gradio as gr\n'), ((10982, 11028), 'gradio.Button', 'gr.Button', (['"""Start Indexing"""'], {'variant': '"""primary"""'}), "('Start Indexing', variant='primary')\n", (10991, 11028), True, 'import gradio as gr\n'), ((11208, 11234), 'gradio.Button', 'gr.Button', (['"""Stop Indexing"""'], {}), "('Stop Indexing')\n", (11217, 11234), True, 'import gradio as gr\n'), ((3680, 3695), 'langchain_community.document_loaders.TextLoader', 'TextLoader', (['url'], {}), '(url)\n', (3690, 3695), False, 'from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, TextLoader, YoutubeLoader\n')] |
import time
import numpy as np
import torch
from torch.nn import functional as F
##########
# Functions for IMDB demo notebook.
# Data source: Stanford AI Lab https://ai.stanford.edu/~amaas/data/sentiment/
##########
# Output words instead of scores.
def sentiment_score_to_name(score: float):
if score > 0:
return "Positive"
elif score <= 0:
return "Negative"
# Split data into train, valid, test.
def partition_dataset(df_input, new_columns, smoke_test=False):
"""Splits data, assuming original, input dataframe contains 50K rows.
Args:
df_input (pandas.DataFrame): input data frame
smoke_test (boolean): if True, use smaller number of rows for testing
Returns:
df_train, df_val, df_test (pandas.DataFrame): train, valid, test splits.
"""
# Shuffle data and split into train/val/test.
df_shuffled = df_input.sample(frac=1, random_state=1).reset_index()
df_shuffled.columns = new_columns
df_train = df_shuffled.iloc[:35_000]
df_val = df_shuffled.iloc[35_000:40_000]
df_test = df_shuffled.iloc[40_000:]
# Save train/val/test split data locally in separate files.
df_train.to_csv("train.csv", index=False, encoding="utf-8")
df_val.to_csv("val.csv", index=False, encoding="utf-8")
df_test.to_csv("test.csv", index=False, encoding="utf-8")
return df_shuffled, df_train, df_val, df_test
# Function for experimenting with chunk_size.
def imdb_chunk_text(encoder, batch_size, df, chunk_size, chunk_overlap):
batch = df.head(batch_size).copy()
print(f"chunk size: {chunk_size}")
print(f"original shape: {batch.shape}")
start_time = time.time()
# 1. Change primary key type to string.
batch["movie_index"] = batch["movie_index"].apply(lambda x: str(x))
# 2. Split the documents into smaller chunks and add as new column to batch df.
batch['chunk'] = batch['text'].apply(recursive_splitter_wrapper,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
# Explode the 'chunk' column to create new rows for each chunk.
batch = batch.explode('chunk', ignore_index=True)
print(f"new shape: {batch.shape}")
# 3. Add embeddings as new column in df.
review_embeddings = torch.tensor(encoder.encode(batch['chunk']))
# Normalize embeddings to unit length.
review_embeddings = F.normalize(review_embeddings, p=2, dim=1)
# Quick check if embeddings are normalized.
norms = np.linalg.norm(review_embeddings, axis=1)
assert np.allclose(norms, 1.0, atol=1e-5) == True
# 4. Convert embeddings to list of `numpy.ndarray`, each containing `numpy.float32` numbers.
converted_values = list(map(np.float32, review_embeddings))
batch['vector'] = converted_values
# 5. Reorder columns for conveneince, so index first, labels at end.
new_order = ["movie_index", "text", "chunk", "vector", "label_int", "label"]
batch = batch[new_order]
end_time = time.time()
print(f"Chunking + embedding time for {batch_size} docs: {end_time - start_time} sec")
# Inspect the batch of data.
display(batch.head())
# assert len(batch.chunk[0]) <= MAX_SEQ_LENGTH-1
# assert len(batch.vector[0]) == EMBEDDING_LENGTH
print(f"type embeddings: {type(batch.vector)} of {type(batch.vector[0])}")
print(f"of numbers: {type(batch.vector[0][0])}")
# Chunking looks good, drop the original text column.
batch.drop(columns=["text"], inplace=True)
return batch
# Function for embedding a query.
def embed_query(encoder, query):
# Embed the query using same embedding model used to create the Milvus collection.
query_embeddings = torch.tensor(encoder.encode(query))
# Normalize embeddings to unit length.
query_embeddings = F.normalize(query_embeddings, p=2, dim=1)
# Quick check if embeddings are normalized.
norms = np.linalg.norm(query_embeddings, axis=1)
assert np.allclose(norms, 1.0, atol=1e-5) == True
# Convert the embeddings to list of list of np.float32.
query_embeddings = list(map(np.float32, query_embeddings))
return query_embeddings
##########
# Functions for LangChain chunking and embedding.
##########
from typing import List
from langchain.text_splitter import RecursiveCharacterTextSplitter
def recursive_splitter_wrapper(text, chunk_size, chunk_overlap):
# Default chunk overlap is 10% chunk_size.
chunk_overlap = np.round(chunk_size * 0.10, 0)
# Use langchain's convenient recursive chunking method.
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
length_function=len,
)
chunks: List[str] = text_splitter.split_text(text)
# Replace special characters with spaces.
chunks = [text.replace("<br /><br />", " ") for text in chunks]
return chunks
##########
# Functions to process Milvus Client API responses.
##########
def client_assemble_retrieved_context(retrieved_top_k, metadata_fields=[], num_shot_answers=3):
"""
For each question, assemble the context and metadata from the retrieved_top_k chunks.
retrieved_top_k: list of dicts
"""
# Assemble the context as a stuffed string.
distances = []
context = []
context_metadata = []
i = 1
for r in retrieved_top_k[0]:
distances.append(r['distance'])
if i <= num_shot_answers:
if len(metadata_fields) > 0:
metadata = {}
for field in metadata_fields:
metadata[field] = r['entity'][field]
context_metadata.append(metadata)
context.append(r['entity']['chunk'])
i += 1
# Assemble formatted results in a zipped list.
formatted_results = list(zip(distances, context, context_metadata))
# Return all the things for convenience.
return formatted_results, context, context_metadata
##########
# Functions to process Milvus Search API responses.
##########
# Parse out the answer and context metadata from Milvus Search response.
def assemble_answer_sources(answer, context_metadata):
"""Assemble the answer and grounding sources into a string"""
grounded_answer = f"Answer: {answer}\n"
grounded_answer += "Grounding sources and citations:\n"
for metadata in context_metadata:
try:
grounded_answer += f"'h1': {metadata['h1']}, 'h2':{metadata['h2']}\n"
except:
pass
try:
grounded_answer += f"'source': {metadata['source']}"
except:
pass
return grounded_answer
# Stuff answers into a context string and stuff metadata into a list of dicts.
def assemble_retrieved_context(retrieved_results, metadata_fields=[], num_shot_answers=3):
# Assemble the context as a stuffed string.
# Also save the context metadata to retrieve along with the answer.
context = []
context_metadata = []
i = 1
for r in retrieved_results[0]:
if i <= num_shot_answers:
if len(metadata_fields) > 0:
metadata = {}
for field in metadata_fields:
metadata[field] = getattr(r.entity, field, None)
context_metadata.append(metadata)
context.append(r.entity.text)
i += 1
return context, context_metadata | [
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((1670, 1681), 'time.time', 'time.time', ([], {}), '()\n', (1679, 1681), False, 'import time\n'), ((2431, 2473), 'torch.nn.functional.normalize', 'F.normalize', (['review_embeddings'], {'p': '(2)', 'dim': '(1)'}), '(review_embeddings, p=2, dim=1)\n', (2442, 2473), True, 'from torch.nn import functional as F\n'), ((2534, 2575), 'numpy.linalg.norm', 'np.linalg.norm', (['review_embeddings'], {'axis': '(1)'}), '(review_embeddings, axis=1)\n', (2548, 2575), True, 'import numpy as np\n'), ((3031, 3042), 'time.time', 'time.time', ([], {}), '()\n', (3040, 3042), False, 'import time\n'), ((3838, 3879), 'torch.nn.functional.normalize', 'F.normalize', (['query_embeddings'], {'p': '(2)', 'dim': '(1)'}), '(query_embeddings, p=2, dim=1)\n', (3849, 3879), True, 'from torch.nn import functional as F\n'), ((3940, 3980), 'numpy.linalg.norm', 'np.linalg.norm', (['query_embeddings'], {'axis': '(1)'}), '(query_embeddings, axis=1)\n', (3954, 3980), True, 'import numpy as np\n'), ((4486, 4515), 'numpy.round', 'np.round', (['(chunk_size * 0.1)', '(0)'], {}), '(chunk_size * 0.1, 0)\n', (4494, 4515), True, 'import numpy as np\n'), ((4598, 4706), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'length_function': 'len'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, length_function=len)\n', (4628, 4706), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2587, 2622), 'numpy.allclose', 'np.allclose', (['norms', '(1.0)'], {'atol': '(1e-05)'}), '(norms, 1.0, atol=1e-05)\n', (2598, 2622), True, 'import numpy as np\n'), ((3992, 4027), 'numpy.allclose', 'np.allclose', (['norms', '(1.0)'], {'atol': '(1e-05)'}), '(norms, 1.0, atol=1e-05)\n', (4003, 4027), True, 'import numpy as np\n')] |
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
llm_creative = ChatOpenAI(temperature=1, model_name="gpt-3.5-turbo")
def get_summary_chain() -> LLMChain:
summary_template = """
given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create:
1. a short summary
2. two interesting facts about them
\n{format_instructions}
"""
summary_prompt_template = PromptTemplate(
input_variables=["information", "twitter_posts"],
template=summary_template,
partial_variables={
"format_instructions": summary_parser.get_format_instructions()
},
)
return LLMChain(llm=llm, prompt=summary_prompt_template)
def get_interests_chain() -> LLMChain:
interesting_facts_template = """
given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create:
3 topics that might interest them
\n{format_instructions}
"""
interesting_facts_prompt_template = PromptTemplate(
input_variables=["information", "twitter_posts"],
template=interesting_facts_template,
partial_variables={
"format_instructions": topics_of_interest_parser.get_format_instructions()
},
)
return LLMChain(llm=llm, prompt=interesting_facts_prompt_template)
def get_ice_breaker_chain() -> LLMChain:
ice_breaker_template = """
given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create:
2 creative Ice breakers with them that are derived from their activity on Linkedin and twitter, preferably on latest tweets
\n{format_instructions}
"""
ice_breaker_prompt_template = PromptTemplate(
input_variables=["information", "twitter_posts"],
template=ice_breaker_template,
partial_variables={
"format_instructions": ice_breaker_parser.get_format_instructions()
},
)
return LLMChain(llm=llm_creative, prompt=ice_breaker_prompt_template)
| [
"langchain.chains.LLMChain",
"langchain_openai.ChatOpenAI"
] | [((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=1, model_name='gpt-3.5-turbo')\n", (304, 347), False, 'from langchain_openai import ChatOpenAI\n'), ((933, 982), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'summary_prompt_template'}), '(llm=llm, prompt=summary_prompt_template)\n', (941, 982), False, 'from langchain.chains import LLMChain\n'), ((1580, 1639), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'interesting_facts_prompt_template'}), '(llm=llm, prompt=interesting_facts_prompt_template)\n', (1588, 1639), False, 'from langchain.chains import LLMChain\n'), ((2304, 2366), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm_creative', 'prompt': 'ice_breaker_prompt_template'}), '(llm=llm_creative, prompt=ice_breaker_prompt_template)\n', (2312, 2366), False, 'from langchain.chains import LLMChain\n'), ((863, 903), 'output_parsers.summary_parser.get_format_instructions', 'summary_parser.get_format_instructions', ([], {}), '()\n', (901, 903), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((1499, 1550), 'output_parsers.topics_of_interest_parser.get_format_instructions', 'topics_of_interest_parser.get_format_instructions', ([], {}), '()\n', (1548, 1550), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((2230, 2274), 'output_parsers.ice_breaker_parser.get_format_instructions', 'ice_breaker_parser.get_format_instructions', ([], {}), '()\n', (2272, 2274), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n')] |
import asyncio
import uvicorn
from typing import AsyncIterable, Awaitable
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.responses import FileResponse, StreamingResponse
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
load_dotenv()
async def wait_done(fn: Awaitable, event: asyncio.Event):
try:
await fn
except Exception as e:
print(e)
event.set()
finally:
event.set()
async def call_openai(question: str) -> AsyncIterable[str]:
callback = AsyncIteratorCallbackHandler()
model = ChatOpenAI(streaming=True, verbose=True, callbacks=[callback])
coroutine = wait_done(model.agenerate(messages=[[HumanMessage(content=question)]]), callback.done)
task = asyncio.create_task(coroutine)
async for token in callback.aiter():
yield f"{token}"
await task
app = FastAPI()
@app.post("/ask")
def ask(body: dict):
return StreamingResponse(call_openai(body['question']), media_type="text/event-stream")
@app.get("/")
async def homepage():
return FileResponse('statics/index.html')
if __name__ == "__main__":
uvicorn.run(host="0.0.0.0", port=8888, app=app) | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((345, 358), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import load_dotenv\n'), ((959, 968), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (966, 968), False, 'from fastapi import FastAPI\n'), ((616, 646), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (644, 646), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((659, 721), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'verbose': '(True)', 'callbacks': '[callback]'}), '(streaming=True, verbose=True, callbacks=[callback])\n', (669, 721), False, 'from langchain.chat_models import ChatOpenAI\n'), ((837, 867), 'asyncio.create_task', 'asyncio.create_task', (['coroutine'], {}), '(coroutine)\n', (856, 867), False, 'import asyncio\n'), ((1149, 1183), 'fastapi.responses.FileResponse', 'FileResponse', (['"""statics/index.html"""'], {}), "('statics/index.html')\n", (1161, 1183), False, 'from fastapi.responses import FileResponse, StreamingResponse\n'), ((1216, 1263), 'uvicorn.run', 'uvicorn.run', ([], {'host': '"""0.0.0.0"""', 'port': '(8888)', 'app': 'app'}), "(host='0.0.0.0', port=8888, app=app)\n", (1227, 1263), False, 'import uvicorn\n'), ((776, 806), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'question'}), '(content=question)\n', (788, 806), False, 'from langchain.schema import HumanMessage\n')] |
# coding: UTF-8
import gc
import glob
import torch
import time
import os
import json
from collections import defaultdict
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.schema import Document
from langchain.vectorstores import FAISS
from tqdm import tqdm
import config
import re
base_path = config.BASE_PATH
shortname_allname_path = 'output/company/shortname_allname.json'
shortname_allname = json.load(open(shortname_allname_path,"r"))
text_path = "../../alltxt"
output_path = "output"
vector_path = "output/vector_bert"
chinese_numbers = "一二三四五六七八九十"
english_numbers = "123456789"
pattern = f"^([{chinese_numbers}]+、|\([{chinese_numbers}]\)|([{chinese_numbers}])|[{english_numbers}]、|\([{english_numbers}]\)|([{english_numbers}])|[{english_numbers}].)"
pattern_1 = f"^([{chinese_numbers}]+、)"
pattern_2 = f"^(\([{chinese_numbers}]\)|([{chinese_numbers}])|[{english_numbers}]、|\([{english_numbers}]\)|([{english_numbers}])|[{english_numbers}].)"
pattern2 = f"^([{chinese_numbers}]+、)"
def docs_add_one(strs,doc_id,):
result = []
metadata = {"source": f'doc_{doc_id}'}
if isinstance(strs, str):
result.append(Document(page_content=strs, metadata=metadata))
return result
def split_context(all_content,end_page,start_allrow):
docs = []
temp_dict2 = {}
temp_dict2_new = {}
for index,context in enumerate(all_content):
context = eval(context)
if not context:
continue
inside = context['inside']
page = context['page']
allrow = int(context['allrow'])
if int(page) < end_page and int(allrow) >= start_allrow:
# 在文本中查找所有匹配项
matches1 = re.findall(pattern_1, inside)
matches2 = re.findall(pattern_2, inside)
if len(inside)<30:
if matches1:
temp_dict2[inside] = all_content[index+1:index + 180]
elif matches2:
temp_dict2[inside] = all_content[index+1:index + 60]
doc_id = 0
for key,values in temp_dict2.items():
strs = ''
max_lengh = 1
if re.findall(pattern_1, key):
max_lengh = 2200
elif re.findall(pattern_2, key):
max_lengh = 800
for index_tmp,value in enumerate(values):
value = eval(value)
if not value:
continue
inside = value['inside']
text_type = value['type']
matches2 = re.findall(pattern2, inside)
if (matches2 and index_tmp > 4) or (len(strs)>max_lengh):
break
if text_type == '页眉' or text_type == '页脚' or inside == '':
continue
strs += f'\n{inside}'
if strs:
new_key = re.sub(pattern,'', key)
if new_key:
temp_dict2_new[new_key] = strs
# input_text = new_key
input_text = new_key + ":\n" + strs
single_vector = docs_add_one(input_text,doc_id)
docs.extend(single_vector)
doc_id += 1
return temp_dict2_new,docs
def covert_data_vector_opening(company_year,embeddings):
vector_store = []
temp_dict2 = {}
if company_year in shortname_allname:
file = shortname_allname[company_year]
try:
with open(os.path.join(base_path, file), "r", encoding='utf-8') as f:
all_content = f.readlines()
start_allrow = 3
end_page = 1000
for context in all_content:
context = eval(context)
if not context:
continue
inside = context['inside']
page = context['page']
allrow = int(context['allrow'])
if int(page) <= 10:
if inside == '一、公司信息':
start_allrow = allrow
end_page_list = re.findall(r'第.+节财务报告\.+(\d+)', inside)
if end_page_list:
end_page = int(end_page_list[0])+10
else:
break
temp_dict2,docs = split_context(all_content,end_page,start_allrow)
vector_store = FAISS.from_documents(docs, embeddings)
torch.cuda.empty_cache()
gc.collect()
except Exception as e:
print(e)
if vector_store and temp_dict2:
# print(f"{company_year}向量导入成功。")
return (vector_store,temp_dict2)
else:
return []
| [
"langchain.schema.Document",
"langchain.vectorstores.FAISS.from_documents"
] | [((2132, 2158), 're.findall', 're.findall', (['pattern_1', 'key'], {}), '(pattern_1, key)\n', (2142, 2158), False, 'import re\n'), ((1175, 1221), 'langchain.schema.Document', 'Document', ([], {'page_content': 'strs', 'metadata': 'metadata'}), '(page_content=strs, metadata=metadata)\n', (1183, 1221), False, 'from langchain.schema import Document\n'), ((1701, 1730), 're.findall', 're.findall', (['pattern_1', 'inside'], {}), '(pattern_1, inside)\n', (1711, 1730), False, 'import re\n'), ((1754, 1783), 're.findall', 're.findall', (['pattern_2', 'inside'], {}), '(pattern_2, inside)\n', (1764, 1783), False, 'import re\n'), ((2202, 2228), 're.findall', 're.findall', (['pattern_2', 'key'], {}), '(pattern_2, key)\n', (2212, 2228), False, 'import re\n'), ((2490, 2518), 're.findall', 're.findall', (['pattern2', 'inside'], {}), '(pattern2, inside)\n', (2500, 2518), False, 'import re\n'), ((2781, 2805), 're.sub', 're.sub', (['pattern', '""""""', 'key'], {}), "(pattern, '', key)\n", (2787, 2805), False, 'import re\n'), ((4234, 4272), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (4254, 4272), False, 'from langchain.vectorstores import FAISS\n'), ((4285, 4309), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (4307, 4309), False, 'import torch\n'), ((4322, 4334), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4332, 4334), False, 'import gc\n'), ((3353, 3382), 'os.path.join', 'os.path.join', (['base_path', 'file'], {}), '(base_path, file)\n', (3365, 3382), False, 'import os\n'), ((3942, 3982), 're.findall', 're.findall', (['"""第.+节财务报告\\\\.+(\\\\d+)"""', 'inside'], {}), "('第.+节财务报告\\\\.+(\\\\d+)', inside)\n", (3952, 3982), False, 'import re\n')] |
# -*- coding: UTF-8 -*-
"""
@Project : AI-Vtuber
@File : claude_model.py
@Author : HildaM
@Email : [email protected]
@Date : 2023/06/17 下午 4:44
@Description : 本地向量数据库模型设置
"""
from langchain.embeddings import HuggingFaceEmbeddings
import os
# 项目根路径
TEC2VEC_MODELS_PATH = os.getcwd() + "\\" + "data" + "\\" + "text2vec_models" "\\"
# 默认模型
DEFAULT_MODEL_NAME = "sebastian-hofstaetter_distilbert-dot-tas_b-b256-msmarco"
def get_default_model():
return HuggingFaceEmbeddings(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)
def get_text2vec_model(model_name):
"""
0. 判空。若为空,加载内置模型
1. 先判断项目data/tec2vec_models目录中是否存在模型
2. 存在则直接加载
3. 不存在,则从Huggingface中下载到本地,保存在系统cache中
"""
if model_name is None:
return HuggingFaceEmbeddings(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)
model_path = TEC2VEC_MODELS_PATH + model_name
if os.path.exists(model_path):
return HuggingFaceEmbeddings(model_name=model_path)
else:
return HuggingFaceEmbeddings(model_name=model_name)
| [
"langchain.embeddings.HuggingFaceEmbeddings"
] | [((468, 542), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '(TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)'}), '(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)\n', (489, 542), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((908, 934), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (922, 934), False, 'import os\n'), ((775, 849), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '(TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)'}), '(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)\n', (796, 849), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((951, 995), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_path'}), '(model_name=model_path)\n', (972, 995), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1021, 1065), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1042, 1065), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((283, 294), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (292, 294), False, 'import os\n')] |
import datetime
import json
import pkgutil
import time
import uuid
import os
import copy
from dataclasses import asdict
import datasets as ds
from cot.config import Config
from cot.utils.schemas.cot import features as cot_features
# disable transformation (e.g. map) caching
# https://huggingface.co/docs/datasets/v2.6.1/en/package_reference/main_classes#datasets.disable_caching
ds.disable_caching()
FRAGMENTS = json.loads(pkgutil.get_data(__name__, "fragments.json"))
def generate_and_extract(data, config):
"""
It takes a dataset and a config and generates cots for each example and extract answers.
:param data: Dataset/DatasetDict - the dataset you want to generate CoTs for and extract answers
:param config: Dictionary - the configurations of the input and model
:return: the dataset with generated cots and extracted answers
"""
ds.disable_caching()
data.cleanup_cache_files()
if isinstance(data, ds.arrow_dataset.Dataset):
features = data.info.features
question_type = data[0]["type"]
question_number_choices = len(data[0]["choices"])
elif isinstance(data, ds.dataset_dict.DatasetDict):
name_of_first_split = list(data.keys())[0]
features = data[name_of_first_split].info.features
question_type = data[name_of_first_split][0]["type"]
question_number_choices = len(data[name_of_first_split][0]["choices"])
else:
raise ValueError("Not recognized data")
# automated change of answer_extraction depending on the type of the task and the number of choices
# if type str make list
if isinstance(config["answer_extraction_keys"], str):
config["answer_extraction_keys"] = [config["answer_extraction_keys"]]
# make copy of config, so it is not changed permanently (but only for the current dataset), when auto-kojima is used:
adaptive_config = config.copy()
if adaptive_config["answer_extraction_keys"] == ["auto-kojima"]:
adaptive_config["answer_extraction_keys"] = adaptive_answer_extraction("auto-kojima", question_type, question_number_choices)
# The config is transformed into a dataclass object, where all testing is done
# But it will be transformed back to a dictionary for the function 'map'
config_as_dataclass = Config(**adaptive_config)
return data.map(
_generate_and_extract,
with_indices=True,
fn_kwargs=asdict(config_as_dataclass),
features=features,
load_from_cache_file=False,
)
def _generate_and_extract(
item,
idx,
# all of the following variables will be defined by the config_as_dataclass object
idx_range,
author,
api_service,
engine,
temperature,
max_tokens,
api_time_interval,
instruction_keys,
cot_trigger_keys,
template_cot_generation,
answer_extraction_keys,
template_answer_extraction,
warn,
verbose,
):
"""
The function takes in a JSON object (item) and generates a CoT (Chain-of-Thought) for each combination of
of instructions and CoT triggers. For each generated CoT and for each of the given answer extractions it extracts an answer.
:param item: the item (example) of a dataset to be processed
:param idx: the index of the item in the dataset
other parameters are handed over from config and are described in config.py
:return: item populated with various fields
"""
if idx_range == "all" or (idx >= idx_range[0] and idx < idx_range[1]):
pass
else:
return item
# predefine values in template dictionary that stay same over all runs of the current item
template_dict = {
"instruction": None,
"question": item["question"],
"answer_choices": multiple_choice_answer_formatting(item["choices"]),
"cot_trigger": None,
"cot": None,
"answer_extraction": None,
}
# try multiple times in case of API-Error
additional_api_time = 0
number_of_tries = 5
for i in range(0, number_of_tries):
try:
# add additional time to api_time_interval if there was an error
api_time_interval = api_time_interval + additional_api_time
# generate chain of thoughts and extract answers
for instruction_key in instruction_keys:
template_dict["instruction"] = get_fragments_value("instructions", instruction_key)
for cot_trigger_key in cot_trigger_keys:
generated_cot = {
"id": str(uuid.uuid4()),
"fragments_version": FRAGMENTS["version"],
"instruction": instruction_key,
"cot_trigger": cot_trigger_key,
"cot_trigger_template": template_cot_generation,
"prompt_text": "",
"cot": "",
"answers": [],
"author": author,
"date": "",
"api_service": api_service,
"model": str(
{
"name": engine,
"temperature": temperature,
"max_tokens": max_tokens,
}
),
"comment": "",
"annotations": [],
}
template_dict["cot_trigger"] = get_fragments_value("cot_triggers", cot_trigger_key)
# change template_cot_generation to generated_cot["cot_trigger_template"] to make it more logical
generate_cot_prompt = format_prompt(template_cot_generation, template_dict)
if verbose:
print("\n-----------------COT TRIGGER TEXT-----------------")
print(generate_cot_prompt)
cot = query_model(
generate_cot_prompt,
api_service,
engine,
temperature,
max_tokens,
api_time_interval,
)
if verbose:
print("\n------------------GENERATED COT-------------------")
print(cot)
template_dict["cot"] = cot
generated_cot["cot"] = cot
# deactivated automatic prompt text generation: (code line stays here for testing purposes)
# generated_cot["prompt_text"] = generate_cot_prompt
generated_cot["date"] = print_now(1)
# extract answers from generated chain of thoughts
for answer_extraction_key in answer_extraction_keys:
if answer_extraction_key is None:
pass
else:
answer = {
"id": str(uuid.uuid4()),
"answer_extraction": answer_extraction_key,
"answer_extraction_template": template_answer_extraction,
"answer_extraction_text": "",
"answer": "",
"answer_from_choices": "",
"correct_answer": None,
}
template_dict["answer_extraction"] = get_fragments_value("answer_extractions", answer_extraction_key)
answer_extraction_prompt = format_prompt(template_answer_extraction, template_dict)
if verbose:
print("\n----------------ANSWER EXTRACTION TEXT----------------")
print(answer_extraction_prompt)
predicted_answer = query_model(
answer_extraction_prompt,
api_service,
engine,
temperature,
max_tokens,
api_time_interval,
)
if verbose:
print("\n------------------EXTRACTED ANSWER-------------------")
print(predicted_answer)
answer["answer"] = predicted_answer
# deactivated automatic prompt text generation: (code line stays here for testing purposes)
# answer["answer_extraction_text"] = answer_extraction_prompt
generated_cot["answers"].append(answer)
item["generated_cot"].append(generated_cot)
except Exception as ex:
# if last try, raise error
if i == number_of_tries - 1:
raise ex
# if not last try, add additional time to api_time_interval and try again
additional_api_time += 10
print("(API-)Error in item " + str(idx) + ": " + str(ex))
print("Retrying with additional time of " + str(additional_api_time) + " seconds.")
pass
else:
break
return item
def helper(data):
ds.disable_caching()
data.cleanup_cache_files()
if isinstance(data, ds.arrow_dataset.Dataset):
features = data.info.features
elif isinstance(data, ds.dataset_dict.DatasetDict):
name_of_first_split = list(data.keys())[0]
features = data[name_of_first_split].info.features
else:
raise ValueError("Not recognized data")
return data, features
"""
Input: item, langchains, triggers
Output: cot and answer
Generate a cot and extract an answer with helper function _self_generate_extract
"""
def self_generate_extract(data,input_dict):
data, features = helper(data)
return data.map(
_self_generate_extract,
with_indices=True,
fn_kwargs=input_dict,
features=features,
load_from_cache_file=False,
)
def _self_generate_extract(item,idx,input_dict):
shadow_input_dict = copy.deepcopy(input_dict)
chain = input_dict['chain']
del shadow_input_dict['chain']
shadow_input_dict['question'] = item["question"]
shadow_input_dict['answer_choices'] = multiple_choice_answer_formatting(item["choices"])
#get cot and predicted answer
lang_chain = chain(shadow_input_dict)
generated_cot = {
"id": str(uuid.uuid4()),
"fragments_version": None,
"instruction": input_dict['instruction'],
"cot_trigger": input_dict['cot_trigger'],
"cot_trigger_template": "",
"prompt_text": "",
"cot": lang_chain['cot'],
"answers": [],
"author": "",
"date": "",
"api_service": "",
"model": str(
{
"name": input_dict['model'],
"temperature": input_dict["temperature"],
"max_tokens": input_dict["max_tokens"]
}
),
"comment": "generated and extracted",
"annotations": [],
}
generated_cot["date"] = print_now(1)
answer = {
"id": str(uuid.uuid4()),
"answer_extraction": input_dict['answer_extraction'],
"answer_extraction_template": "",
"answer_extraction_text": "",
"answer": lang_chain['predicted_answer'],
'answer_from_choices':"",
"correct_answer": None,
}
#add created answer and cot to item
generated_cot["answers"].append(answer)
item["generated_cot"].append(generated_cot)
print(item)
return item
"""Generate CoTs only"""
def self_generate(data,input_dict):
data, features = helper(data)
return data.map(
_self_generate,
with_indices=True,
fn_kwargs=input_dict,
features=features,
load_from_cache_file=False,
)
def _self_generate(item,idx, input_dict):
#feed data to input dict, isolate chain
shadow_input_dict = copy.deepcopy(input_dict)
chain = input_dict['chain']
del shadow_input_dict['chain']
shadow_input_dict['question'] = item["question"]
shadow_input_dict['answer_choices'] = multiple_choice_answer_formatting(item["choices"])
#get cot and predicted answer
lang_chain = chain(shadow_input_dict)
"""If conditions for input keys"""
generated_cot = {
"id": str(uuid.uuid4()),
"fragments_version": "",
"instruction": input_dict["instruction"],
"cot_trigger": input_dict["cot_trigger"],
"cot_trigger_template": "",
"prompt_text": "",
"cot": lang_chain['cot'],
"answers": [],
"author": "",
"date": "",
"api_service": input_dict["api_service"],
"model": str(
{
"name": input_dict["model"],
"temperature": input_dict["temperature"],
"max_tokens": input_dict["max_tokens"]
}
),
"comment": "generated only",
"annotations": [],
}
generated_cot["date"] = print_now(1)
item["generated_cot"].append(generated_cot)
return item
"""Extract answers based on CoTs only"""
def self_extract(data,input_dict):
data, features = helper(data)
return data.map(
_self_extract,
with_indices=True,
fn_kwargs=input_dict,
features=features,
load_from_cache_file=False,
)
"""ToDo show which CoT to take"""
def _self_extract(item,idx,input_dict):
#extract based on the first cot in the dataset, throw error otherwise
if len(item['generated_cot'])>1:
raise ValueError('Too many generated CoTs, only one allowed')
else:
cot = item['generated_cot'][0]['cot']
input_dict['cot'] = cot
shadow_input_dict = copy.deepcopy(input_dict)
chain = input_dict['chain']
del shadow_input_dict['chain']
shadow_input_dict['question'] = item["question"]
shadow_input_dict['answer_choices'] = multiple_choice_answer_formatting(item["choices"])
#get cot and predicted answer
lang_chain = chain(shadow_input_dict)
"""If conditions for input keys"""
answer = {
"id": str(uuid.uuid4()),
"answer_extraction": input_dict['answer_extraction'],
"answer_extraction_template": "",
"answer_extraction_text": "",
"answer": "",
'answer_from_choices':"",
"correct_answer": None,
}
answer["answer"] = lang_chain['predicted_answer']
#we add the answer to the already existing generated cot
# print(item['generated_cot'][0]["answers"])
# item['generated_cot'][0]["answers"].append(answer)
# print("################")
# print(item['generated_cot'][0]["answers"])
return item
"""Reflect on CoT (or some other part) and generate new answer"""
def self_reflect(data, input_dict):
data, features = helper(data)
return data.map(
_self_reflect,
with_indices=True,
fn_kwargs=input_dict,
features=features,
load_from_cache_file=False,
)
"""In this version the reflection is added to generated_cot"""
def _self_reflect(item, idx, input_dict):
#reflect based on the first cot in the dataset, throw error otherwise
if len(item['generated_cot']) > 1:
raise ValueError('Too many generated CoTs, only one allowed')
else:
input_dict['cot'] = item['generated_cot'][0]['cot']
shadow_input_dict = copy.deepcopy(input_dict)
chain = input_dict['chain']
del shadow_input_dict['chain']
shadow_input_dict['question'] = item["question"]
shadow_input_dict['answer_choices'] = multiple_choice_answer_formatting(item["choices"])
# here we take the first answer from the first cot
shadow_input_dict['answer'] = item["generated_cot"][0]['answers'][0]['answer']
#this is where the magic happens
lang_chain = chain(shadow_input_dict)
#retrieve question and answer choices from item, add to input dict
generated_cot = {
"id": str(uuid.uuid4()),
"fragments_version": "",
"instruction": "",
"cot_trigger": input_dict["reflection_prompt"],
"cot_trigger_template": "",
"prompt_text": "",
"cot": lang_chain['reflection'],
"answers": [],
"author": "",
"date": "",
"api_service": input_dict["api_service"],
"model": str(
{
"name": input_dict["model"],
"temperature": input_dict["temperature"],
"max_tokens": input_dict["max_tokens"],
}
),
"comment": "self_reflection cot",
"annotations": [],
}
generated_cot["date"] = print_now(1)
"""If conditions for input keys"""
answer = {
"id": str(uuid.uuid4()),
"answer_extraction": input_dict['reflect_answer_extraction'],
"answer_extraction_template": "",
"answer_extraction_text": "self_reflection",
"answer": "",
'answer_from_choices':"",
"correct_answer": None,
}
answer["answer"] = lang_chain['reflection_answer']
generated_cot["answers"].append(answer)
item["generated_cot"].append(generated_cot)
return item
def full_text_prompts(dataset, prompt_text=True, answer_extraction_text=True):
assert isinstance(dataset, ds.arrow_dataset.Dataset), "dataset must be an arrow dataset"
dataset = dataset.map(
_full_text_prompts,
fn_kwargs={
"prompt_text": prompt_text,
"answer_extraction_text": answer_extraction_text,
},
features=dataset.info.features,
load_from_cache_file=False,
)
return dataset
def _full_text_prompts(item, prompt_text, answer_extraction_text):
# predefine values in template dictionary that stay same over all runs of the current item
template_dict = {
"instruction": None,
"question": item["question"],
"cot_trigger": None,
"cot": None,
"answer_extraction": None,
}
for generated_cot in item["generated_cot"]:
answer_choices = (multiple_choice_answer_formatting(item["choices"]),)
# function returns a tuple instead of a string
# did not find out why it behaves differently here than in the _generate_and_extract function
if type(answer_choices) == tuple:
answer_choices = answer_choices[0]
template_dict["answer_choices"] = answer_choices
# generate chain of thoughts and extract answers
# for instruction_key in instruction_keys:
template_dict["instruction"] = get_fragments_value("instructions", generated_cot["instruction"])
template_dict["cot_trigger"] = get_fragments_value("cot_triggers", generated_cot["cot_trigger"])
generate_cot_prompt = format_prompt(generated_cot["cot_trigger_template"], template_dict)
template_dict["cot"] = generated_cot["cot"]
# Everything above could also be relevant for the answer extraction
# now generating the full text for the chain of thoughts
if prompt_text:
generated_cot["prompt_text"] = generate_cot_prompt
# if answer_extraction: ...
if answer_extraction_text:
# extract answers from generated chain of thoughts
for answer in generated_cot["answers"]:
if answer["answer_extraction"] is None:
# if no answer extraction key is given, return item, since cot_prompt text is already generated
return item
else:
template_dict["answer_extraction"] = get_fragments_value("answer_extractions", answer["answer_extraction"])
answer_extraction_prompt = format_prompt(answer["answer_extraction_template"], template_dict)
answer["answer_extraction_text"] = answer_extraction_prompt
return item
def select_generated_cots(dataset, **kwargs):
"""This function handles which pregenerated CoTs are deleted (can be used after loading a collection with "load_pregenerated_cots=True").
:param dataset: The dataset to delete unwanted pregenerated CoTs from.
:param kwargs: A dictionary of the form {"key": value}, where value has to be a string or list of strings.
e.g. {"author": ["author1", "author2"]} or {"author": "author1"}.
Overviews of current authors and their cot_triggers:
"kojima": kojima-01
"wei": few-shot (as a prompt)
"lievin": kojima-01, lievin-01, lievin-02, lievin-03, lievin-10
"lievin_100": 100 times kojima-01 with high temperature
"thoughtsource": None, kojima-01
"""
# general info why this function is necessary:
# Unfortunately the loading function of the datasets does not let you specify which pregenerated COTS to load
# So we load all of them and then delete the ones we don't want
# disable progress bar
ds.disable_progress_bar()
# remove all the pregenerated COTS that are not in the list
dataset = dataset.map(
_select_generated_cots,
fn_kwargs={**kwargs},
features=dataset.info.features,
load_from_cache_file=False,
)
return dataset
# def _select_generated_cots(item, reverse=False, **kwargs):
# # if reverse is True, unselect/delete all CoTs that match the given criteria
# # load all allows keys from the cot_features
# allowed_keys = list(cot_features["generated_cot"][0].keys()) + ["answer"]
# for key, value in kwargs.items():
# # check if key is allowed
# if key not in allowed_keys:
# raise ValueError(f"Key '{key}' not in allowed keys {allowed_keys}")
# # if value is None or a string, convert it to a list
# if value is None or type(value) == str:
# value = [value]
# # loop over all generated CoTs in the item and delete the ones that don't match the given criteria
# if key == "model":
# if not reverse:
# item["generated_cot"] = [cot for cot in item["generated_cot"] if eval(cot["model"])["name"] in value]
# else:
# item["generated_cot"] = [cot for cot in item["generated_cot"] if eval(cot["model"])["name"] not in value]
# elif key == "answer":
# if not reverse:
# item["generated_cot"] = [cot for cot in item["generated_cot"] if cot["answers"][0]["correct_answer"] == value]
# else:
# item["generated_cot"] = [cot for cot in item["generated_cot"] if cot["answers"][0]["correct_answer"] != value]
# else:
# if not reverse:
# item["generated_cot"] = [cot for cot in item["generated_cot"] if cot[str(key)] in value]
# else:
# item["generated_cot"] = [cot for cot in item["generated_cot"] if cot[str(key)] not in value]
# return item
def _select_generated_cots(item, reverse=False, **kwargs):
allowed_keys = list(cot_features["generated_cot"][0].keys()) + ["answer"]
filtered_cots = []
for key, value in kwargs.items():
if key not in allowed_keys:
raise ValueError(f"Key '{key}' not in allowed keys {allowed_keys}")
if value is None or type(value) == str:
value = [value]
if key == "model":
cots = [cot for cot in item["generated_cot"] if eval(cot["model"])["name"] in value]
elif key == "answer":
cots = [cot for cot in item["generated_cot"] if cot["answers"][0]["correct_answer"] == value]
else:
cots = [cot for cot in item["generated_cot"] if cot[str(key)] in value]
filtered_cots.append(cots)
if reverse:
# Flatten the list of filtered cots
flattened_filtered_cots = [cot for sublist in filtered_cots for cot in sublist]
# Remove duplicates from the flattened list
unique_filtered_cots = list({id(cot): cot for cot in flattened_filtered_cots}.values())
# Remove the unique filtered cots from the original set
item["generated_cot"] = [cot for cot in item["generated_cot"] if cot not in unique_filtered_cots]
else:
# Flatten the list of filtered cots
flattened_filtered_cots = [cot for sublist in filtered_cots for cot in sublist]
# Remove duplicates from the flattened list
item["generated_cot"] = list({id(cot): cot for cot in flattened_filtered_cots}.values())
return item
def delete_all_generated_cots(dataset):
"""This function deletes all pregenerated COTS from a dataset."""
dataset = dataset.map(
_delete_all_generated_cots,
features=dataset.info.features,
load_from_cache_file=False,
)
return dataset
def _delete_all_generated_cots(item):
item["generated_cot"] = []
return item
def print_now(return_flag=0):
"""
It takes a flag as an argument and prints the current time in a specific format
:param return_flag: 0 = print, 1 = return, defaults to 0 (optional)
:return: the current time in the format of 'YYYY/MM/DD HH:MM:SS'
"""
now = datetime.datetime.now()
now = now.strftime("%Y/%m/%d %H:%M:%S")
if return_flag == 0:
print(now)
elif return_flag == 1:
return now
else:
pass
def multiple_choice_answer_formatting(answer_choices):
"""Transforms a list of answer choices into a string with letters (A,B,C,...) for each answer choice."""
# only supports uppercase letters at the moment, as this is current standard
# Adding Letters (A,B,C,...) for the given multiple choice answers.
return "\n".join([f"{chr(65+i)}) {example}" for i, example in enumerate(answer_choices)]) # 65 is the ASCII code for A
def adaptive_answer_extraction(preference, type, len_choices):
if preference == "auto-kojima":
if type == "bool":
return "kojima-yes-no"
elif type == "multiplechoice":
if len_choices == 3: answer_extraction_key = 'kojima-A-C'
elif len_choices == 4: answer_extraction_key = 'kojima-A-D'
elif len_choices == 5: answer_extraction_key = 'kojima-A-E'
elif len_choices == 6: answer_extraction_key = 'kojima-A-F'
return(answer_extraction_key)
else: raise ValueError("type must be bool or multiplechoice")
def get_fragments_value(str, key):
if key is None:
return None
else:
return FRAGMENTS[str][key]
def format_prompt(template, dictionary):
output = template.format_map(Correct_output(dictionary))
# remove leading whitespaces
output = output.lstrip()
return output
class Correct_output(dict):
# TODO: do I ever need this? I think there will never be missing keys
# and None keys are handled by delete_empty_curly_brackets
def __missing__(self, key):
return ""
def __getitem__(self, key):
return dict.get(self, key) or ""
# def get(self, key):
# return dict.get(self, key) or ""
# def delete_empty_curly_brackets(string):
# string.replace("{None}\n", "")
# # string.replace("\n{None}", "") # TODO: do I need this?
# string.replace("{None}", "")
# return string
def query_model(input, api_service, engine, temperature, max_tokens, api_time_interval):
if api_service == "mock_api":
# time.sleep(api_time_interval)
return " Test mock chain of thought."
# return ("This is a " + 20 * "long " + "Mock CoT.\n")*20
# langchain package implementation
else:
from langchain import LLMChain, Prompt
time.sleep(api_time_interval)
template = "{prompt}"
prompt = Prompt(template=template, input_variables=["prompt"])
if api_service == "openai":
from langchain import OpenAI
llm_chain = LLMChain(
prompt=prompt,
llm=OpenAI(
# parameter options: https://beta.openai.com/docs/api-reference/completions/create-completion
model_name=engine,
max_tokens=max_tokens,
temperature=temperature,
# type: ignore (suppress pylance error)
),
)
if api_service == "openai_chat":
from langchain.chat_models import ChatOpenAI
llm_chain = LLMChain(
prompt=prompt,
llm=ChatOpenAI(
model_name=engine,
max_tokens=max_tokens,
temperature=temperature,
# type: ignore (suppress pylance error)
),
)
if api_service == "huggingface_hub":
from langchain import HuggingFaceHub
llm_chain = LLMChain(
prompt=prompt,
llm=HuggingFaceHub(
# parameter options: https://huggingface.co/docs/api-inference/detailed_parameters
repo_id=engine,
model_kwargs={"temperature": temperature, "max_length": max_tokens},
# type: ignore (suppress pylance error)
),
)
if api_service == "huggingface_endpoint":
# from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint
llm_chain = LLMChain(
prompt=prompt,
llm=HuggingFaceEndpoint(
# we just use the engine name as the endpoint url here
endpoint_url=engine,
# read API key from environment variable
huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
model_kwargs={"temperature": temperature, "max_length": max_tokens},
task="text2text-generation"
),
)
if api_service == "cohere":
from langchain import Cohere
llm_chain = LLMChain(
prompt=prompt,
llm=Cohere(
model=engine,
max_tokens=max_tokens,
temperature=temperature,
# type: ignore (suppress pylance error)
),
)
response = llm_chain.predict(prompt=input, stop=None)
return response
### this is code from the langchain package
# I needed to make a small adaptation to the HuggingFaceEndpoint class to catch an Error
# will be deleted in the future
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation")
class HuggingFaceEndpoint(LLM, BaseModel):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with. Should be a task that returns `generated_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please it install it with `pip install huggingface_hub`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(f"Error raised by inference API: {generated_text['error']}")
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env",
"langchain.HuggingFaceHub",
"langchain.chat_models.ChatOpenAI",
"langchain.Cohere",
"langchain.Prompt",
"langchain.OpenAI"
] | [((383, 403), 'datasets.disable_caching', 'ds.disable_caching', ([], {}), '()\n', (401, 403), True, 'import datasets as ds\n'), ((428, 472), 'pkgutil.get_data', 'pkgutil.get_data', (['__name__', '"""fragments.json"""'], {}), "(__name__, 'fragments.json')\n", (444, 472), False, 'import pkgutil\n'), ((873, 893), 'datasets.disable_caching', 'ds.disable_caching', ([], {}), '()\n', (891, 893), True, 'import datasets as ds\n'), ((2303, 2328), 'cot.config.Config', 'Config', ([], {}), '(**adaptive_config)\n', (2309, 2328), False, 'from cot.config import Config\n'), ((9480, 9500), 'datasets.disable_caching', 'ds.disable_caching', ([], {}), '()\n', (9498, 9500), True, 'import datasets as ds\n'), ((10364, 10389), 'copy.deepcopy', 'copy.deepcopy', (['input_dict'], {}), '(input_dict)\n', (10377, 10389), False, 'import copy\n'), ((12563, 12588), 'copy.deepcopy', 'copy.deepcopy', (['input_dict'], {}), '(input_dict)\n', (12576, 12588), False, 'import copy\n'), ((14540, 14565), 'copy.deepcopy', 'copy.deepcopy', (['input_dict'], {}), '(input_dict)\n', (14553, 14565), False, 'import copy\n'), ((16328, 16353), 'copy.deepcopy', 'copy.deepcopy', (['input_dict'], {}), '(input_dict)\n', (16341, 16353), False, 'import copy\n'), ((21874, 21899), 'datasets.disable_progress_bar', 'ds.disable_progress_bar', ([], {}), '()\n', (21897, 21899), True, 'import datasets as ds\n'), ((26053, 26076), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26074, 26076), False, 'import datetime\n'), ((32963, 32979), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (32977, 32979), False, 'from pydantic import BaseModel, Extra, root_validator\n'), ((28528, 28557), 'time.sleep', 'time.sleep', (['api_time_interval'], {}), '(api_time_interval)\n', (28538, 28557), False, 'import time\n'), ((28605, 28658), 'langchain.Prompt', 'Prompt', ([], {'template': 'template', 'input_variables': "['prompt']"}), "(template=template, input_variables=['prompt'])\n", (28611, 28658), False, 'from langchain import LLMChain, Prompt\n'), ((33150, 33238), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (33170, 33238), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2427, 2454), 'dataclasses.asdict', 'asdict', (['config_as_dataclass'], {}), '(config_as_dataclass)\n', (2433, 2454), False, 'from dataclasses import asdict\n'), ((10732, 10744), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10742, 10744), False, 'import uuid\n'), ((11620, 11632), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11630, 11632), False, 'import uuid\n'), ((12970, 12982), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12980, 12982), False, 'import uuid\n'), ((14948, 14960), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14958, 14960), False, 'import uuid\n'), ((16901, 16913), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16911, 16913), False, 'import uuid\n'), ((17716, 17728), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (17726, 17728), False, 'import uuid\n'), ((35267, 35340), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (35280, 35340), False, 'import requests\n'), ((36347, 36378), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (36366, 36378), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((28823, 28896), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': 'engine', 'max_tokens': 'max_tokens', 'temperature': 'temperature'}), '(model_name=engine, max_tokens=max_tokens, temperature=temperature)\n', (28829, 28896), False, 'from langchain import OpenAI\n'), ((29350, 29427), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'engine', 'max_tokens': 'max_tokens', 'temperature': 'temperature'}), '(model_name=engine, max_tokens=max_tokens, temperature=temperature)\n', (29360, 29427), False, 'from langchain.chat_models import ChatOpenAI\n'), ((29763, 29866), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'engine', 'model_kwargs': "{'temperature': temperature, 'max_length': max_tokens}"}), "(repo_id=engine, model_kwargs={'temperature': temperature,\n 'max_length': max_tokens})\n", (29777, 29866), False, 'from langchain import HuggingFaceHub\n'), ((30912, 30980), 'langchain.Cohere', 'Cohere', ([], {'model': 'engine', 'max_tokens': 'max_tokens', 'temperature': 'temperature'}), '(model=engine, max_tokens=max_tokens, temperature=temperature)\n', (30918, 30980), False, 'from langchain import Cohere\n'), ((33357, 33429), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (33362, 33429), False, 'from huggingface_hub.hf_api import HfApi\n'), ((4556, 4568), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4566, 4568), False, 'import uuid\n'), ((7088, 7100), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7098, 7100), False, 'import uuid\n')] |
import os
import threading
import time
from contextlib import ExitStack
from pathlib import Path
from typing import cast, Optional
import yaml
from dotenv import load_dotenv
from firebase_admin import auth
from langchain.text_splitter import CharacterTextSplitter
from llama_index import SimpleDirectoryReader
from readerwriterlock import rwlock
from realtime_ai_character.database.chroma import get_chroma
from realtime_ai_character.database.connection import get_db
from realtime_ai_character.logger import get_logger
from realtime_ai_character.models.character import Character as CharacterModel
from realtime_ai_character.utils import Character, Singleton
load_dotenv()
logger = get_logger(__name__)
class CatalogManager(Singleton):
def __init__(self):
super().__init__()
overwrite = os.getenv("OVERWRITE_CHROMA") != "false"
# skip Chroma if Openai API key is not set
if os.getenv("OPENAI_API_KEY"):
self.db = get_chroma()
else:
self.db = get_chroma(embedding=False)
overwrite = False
logger.warning("OVERWRITE_CHROMA disabled due to OPENAI_API_KEY not set")
self.sql_db = next(get_db())
self.sql_load_interval = 30
self.sql_load_lock = rwlock.RWLockFair()
if overwrite:
logger.info("Overwriting existing data in the chroma.")
self.db.delete_collection()
self.db = get_chroma()
self.characters: dict[str, Character] = {}
self.author_name_cache: dict[str, str] = {}
self.load_characters("default", overwrite)
self.load_characters("community", overwrite)
if overwrite:
logger.info("Persisting data in the chroma.")
self.db.persist()
logger.info(f"Total document load: {self.db._client.get_collection('llm').count()}")
self.run_load_sql_db_thread = True
self.load_sql_db_thread = threading.Thread(target=self.load_sql_db_loop)
self.load_sql_db_thread.daemon = True
self.load_sql_db_thread.start()
def load_sql_db_loop(self):
while self.run_load_sql_db_thread:
self.load_character_from_sql_database()
time.sleep(self.sql_load_interval)
def stop_load_sql_db_loop(self):
self.run_load_sql_db_thread = False
def get_character(self, name) -> Optional[Character]:
with self.sql_load_lock.gen_rlock():
return self.characters.get(name)
def load_character(self, directory: Path, source: str):
with ExitStack() as stack:
f_yaml = stack.enter_context(open(directory / "config.yaml"))
yaml_content = cast(dict, yaml.safe_load(f_yaml))
character_id = yaml_content["character_id"]
character_name = yaml_content["character_name"]
voice_id_env = os.getenv(character_id.upper() + "_VOICE_ID")
voice_id = voice_id_env or str(yaml_content["voice_id"])
order = yaml_content.get("order", 10**6)
self.characters[character_id] = Character(
character_id=character_id,
name=character_name,
llm_system_prompt=yaml_content["system"],
llm_user_prompt=yaml_content["user"],
source=source,
location="repo",
voice_id=voice_id,
author_name=yaml_content.get("author_name", ""),
visibility="public" if source == "default" else yaml_content["visibility"],
tts=yaml_content["text_to_speech_use"],
order=order,
# rebyte config
rebyte_api_project_id=yaml_content["rebyte_api_project_id"],
rebyte_api_agent_id=yaml_content["rebyte_api_agent_id"],
rebyte_api_version=yaml_content.get("rebyte_api_version"),
)
return character_name
def load_data(self, character_name: str, data_path: Path):
loader = SimpleDirectoryReader(data_path.absolute().as_posix())
documents = loader.load_data()
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=500, chunk_overlap=100)
docs = text_splitter.create_documents(
texts=[d.text for d in documents],
metadatas=[
{
"character_name": character_name,
"id": d.id_,
}
for d in documents
],
)
self.db.add_documents(docs)
def load_characters(self, source: str, overwrite: bool):
"""
Load characters from the character_catalog directory. Use /data to create
documents and add them to the chroma.
:param source: 'default' or 'community'
:param overwrite: if True, overwrite existing data in the chroma.
"""
if source == "default":
path = Path(__file__).parent
excluded_dirs = {"__pycache__", "archive", "community"}
elif source == "community":
path = Path(__file__).parent / "community"
excluded_dirs = {"__pycache__", "archive"}
else:
raise ValueError(f"Invalid source: {source}")
directories = [d for d in path.iterdir() if d.is_dir() and d.name not in excluded_dirs]
for directory in directories:
character_name = self.load_character(directory, source)
if character_name and overwrite:
logger.info("Overwriting data for character: " + character_name)
self.load_data(character_name, directory / "data")
logger.info(f"Loaded {len(self.characters)} characters: IDs {list(self.characters.keys())}")
def load_character_from_sql_database(self):
logger.info("Started loading characters from SQL database")
character_models = self.sql_db.query(CharacterModel).all()
with self.sql_load_lock.gen_wlock():
# delete all characters with location == 'database'
keys_to_delete = []
for character_id in self.characters.keys():
if self.characters[character_id].location == "database":
keys_to_delete.append(character_id)
for key in keys_to_delete:
del self.characters[key]
# add all characters from sql database
for character_model in character_models:
if character_model.author_id not in self.author_name_cache:
author_name = (
auth.get_user(character_model.author_id).display_name
if os.getenv("USE_AUTH") == "true"
else "anonymous author"
)
self.author_name_cache[character_model.author_id] = author_name # type: ignore
else:
author_name = self.author_name_cache[character_model.author_id]
character = Character(
character_id=character_model.id, # type: ignore
name=character_model.name, # type: ignore
llm_system_prompt=character_model.system_prompt, # type: ignore
llm_user_prompt=character_model.user_prompt, # type: ignore
source="community",
location="database",
voice_id=character_model.voice_id, # type: ignore
author_name=author_name,
author_id=character_model.author_id, # type: ignore
visibility=character_model.visibility, # type: ignore
tts=character_model.tts, # type: ignore
data=character_model.data, # type: ignore
# rebyte config
rebyte_api_project_id=character_model.rebyte_api_project_id, # type: ignore
rebyte_api_agent_id=character_model.rebyte_api_agent_id, # type: ignore
rebyte_api_version=character_model.rebyte_api_version, # type: ignore
)
self.characters[character_model.id] = character # type: ignore
# TODO: load context data from storage
logger.info(f"Loaded {len(character_models)} characters from sql database")
def get_catalog_manager() -> CatalogManager:
return CatalogManager.get_instance()
if __name__ == "__main__":
manager = CatalogManager.get_instance()
| [
"langchain.text_splitter.CharacterTextSplitter"
] | [((664, 677), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (675, 677), False, 'from dotenv import load_dotenv\n'), ((687, 707), 'realtime_ai_character.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (697, 707), False, 'from realtime_ai_character.logger import get_logger\n'), ((917, 944), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (926, 944), False, 'import os\n'), ((1263, 1282), 'readerwriterlock.rwlock.RWLockFair', 'rwlock.RWLockFair', ([], {}), '()\n', (1280, 1282), False, 'from readerwriterlock import rwlock\n'), ((1937, 1983), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.load_sql_db_loop'}), '(target=self.load_sql_db_loop)\n', (1953, 1983), False, 'import threading\n'), ((4113, 4185), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(500)', 'chunk_overlap': '(100)'}), "(separator='\\n', chunk_size=500, chunk_overlap=100)\n", (4134, 4185), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((814, 843), 'os.getenv', 'os.getenv', (['"""OVERWRITE_CHROMA"""'], {}), "('OVERWRITE_CHROMA')\n", (823, 843), False, 'import os\n'), ((968, 980), 'realtime_ai_character.database.chroma.get_chroma', 'get_chroma', ([], {}), '()\n', (978, 980), False, 'from realtime_ai_character.database.chroma import get_chroma\n'), ((1017, 1044), 'realtime_ai_character.database.chroma.get_chroma', 'get_chroma', ([], {'embedding': '(False)'}), '(embedding=False)\n', (1027, 1044), False, 'from realtime_ai_character.database.chroma import get_chroma\n'), ((1188, 1196), 'realtime_ai_character.database.connection.get_db', 'get_db', ([], {}), '()\n', (1194, 1196), False, 'from realtime_ai_character.database.connection import get_db\n'), ((1436, 1448), 'realtime_ai_character.database.chroma.get_chroma', 'get_chroma', ([], {}), '()\n', (1446, 1448), False, 'from realtime_ai_character.database.chroma import get_chroma\n'), ((2210, 2244), 'time.sleep', 'time.sleep', (['self.sql_load_interval'], {}), '(self.sql_load_interval)\n', (2220, 2244), False, 'import time\n'), ((2550, 2561), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (2559, 2561), False, 'from contextlib import ExitStack\n'), ((2684, 2706), 'yaml.safe_load', 'yaml.safe_load', (['f_yaml'], {}), '(f_yaml)\n', (2698, 2706), False, 'import yaml\n'), ((4912, 4926), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4916, 4926), False, 'from pathlib import Path\n'), ((6968, 7571), 'realtime_ai_character.utils.Character', 'Character', ([], {'character_id': 'character_model.id', 'name': 'character_model.name', 'llm_system_prompt': 'character_model.system_prompt', 'llm_user_prompt': 'character_model.user_prompt', 'source': '"""community"""', 'location': '"""database"""', 'voice_id': 'character_model.voice_id', 'author_name': 'author_name', 'author_id': 'character_model.author_id', 'visibility': 'character_model.visibility', 'tts': 'character_model.tts', 'data': 'character_model.data', 'rebyte_api_project_id': 'character_model.rebyte_api_project_id', 'rebyte_api_agent_id': 'character_model.rebyte_api_agent_id', 'rebyte_api_version': 'character_model.rebyte_api_version'}), "(character_id=character_model.id, name=character_model.name,\n llm_system_prompt=character_model.system_prompt, llm_user_prompt=\n character_model.user_prompt, source='community', location='database',\n voice_id=character_model.voice_id, author_name=author_name, author_id=\n character_model.author_id, visibility=character_model.visibility, tts=\n character_model.tts, data=character_model.data, rebyte_api_project_id=\n character_model.rebyte_api_project_id, rebyte_api_agent_id=\n character_model.rebyte_api_agent_id, rebyte_api_version=character_model\n .rebyte_api_version)\n", (6977, 7571), False, 'from realtime_ai_character.utils import Character, Singleton\n'), ((5057, 5071), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5061, 5071), False, 'from pathlib import Path\n'), ((6632, 6653), 'os.getenv', 'os.getenv', (['"""USE_AUTH"""'], {}), "('USE_AUTH')\n", (6641, 6653), False, 'import os\n'), ((6551, 6591), 'firebase_admin.auth.get_user', 'auth.get_user', (['character_model.author_id'], {}), '(character_model.author_id)\n', (6564, 6591), False, 'from firebase_admin import auth\n')] |
import sys
from dotenv import load_dotenv
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.llms import OpenAI
from commands import chrome_click_on_link, chrome_get_the_links_on_the_page, chrome_open_url, chrome_read_the_page, computer_applescript_action, say_text
# load environment variables
load_dotenv()
def main(command):
llm = OpenAI(temperature=0)
tools = [
computer_applescript_action,
chrome_open_url,
chrome_get_the_links_on_the_page,
chrome_click_on_link,
chrome_read_the_page
]
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
result = agent.run(command)
if result:
say_text(f'The result is {result}')
else:
say_text(f'Finished doing {command}')
if __name__ == "__main__":
command = sys.argv[1]
if not command:
print("Please provide a command to execute e.g. python main.py 'Open the calculator app'")
exit(1)
main(command)
| [
"langchain.agents.initialize_agent",
"langchain.llms.OpenAI"
] | [((350, 363), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (361, 363), False, 'from dotenv import load_dotenv\n'), ((394, 415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (400, 415), False, 'from langchain.llms import OpenAI\n'), ((613, 692), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (629, 692), False, 'from langchain.agents import initialize_agent\n'), ((750, 785), 'commands.say_text', 'say_text', (['f"""The result is {result}"""'], {}), "(f'The result is {result}')\n", (758, 785), False, 'from commands import chrome_click_on_link, chrome_get_the_links_on_the_page, chrome_open_url, chrome_read_the_page, computer_applescript_action, say_text\n'), ((804, 841), 'commands.say_text', 'say_text', (['f"""Finished doing {command}"""'], {}), "(f'Finished doing {command}')\n", (812, 841), False, 'from commands import chrome_click_on_link, chrome_get_the_links_on_the_page, chrome_open_url, chrome_read_the_page, computer_applescript_action, say_text\n')] |
""" Adapted from https://github.com/QwenLM/Qwen-7B/blob/main/examples/react_demo.py """
import json
import os
from langchain.llms import OpenAI
llm = OpenAI(
model_name="qwen",
temperature=0,
openai_api_base="http://192.168.0.53:7891/v1",
openai_api_key="xxx",
)
# 将一个插件的关键信息拼接成一段文本的模版。
TOOL_DESC = """{name_for_model}: Call this tool to interact with the {name_for_human} API. What is the {name_for_human} API useful for? {description_for_model} Parameters: {parameters}"""
# ReAct prompting 的 instruction 模版,将包含插件的详细信息。
PROMPT_REACT = """Answer the following questions as best you can. You have access to the following tools:
{tools_text}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tools_name_text}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!
Question: {query}"""
def llm_with_plugin(prompt: str, history, list_of_plugin_info=()):
"""
Args:
prompt: 用户的最新一个问题。
history: 用户与模型的对话历史,是一个 list,
list 中的每个元素为 {"user": "用户输入", "bot": "模型输出"} 的一轮对话。
最新的一轮对话放 list 末尾。不包含最新一个问题。
list_of_plugin_info: 候选插件列表,是一个 list,list 中的每个元素为一个插件的关键信息。
比如 list_of_plugin_info = [plugin_info_0, plugin_info_1, plugin_info_2],
其中 plugin_info_0, plugin_info_1, plugin_info_2 这几个样例见本文档前文。
Returns: 模型对用户最新一个问题的回答。
"""
chat_history = [(x["user"], x["bot"]) for x in history] + [(prompt, '')]
# 需要让模型进行续写的初始文本
planning_prompt = build_input_text(chat_history, list_of_plugin_info)
text = ""
while True:
output = text_completion(planning_prompt + text, stop_words=["Observation:", "Observation:\n"])
action, action_input, output = parse_latest_plugin_call(output)
if action: # 需要调用插件
# action、action_input 分别为需要调用的插件代号、输入参数
# observation是插件返回的结果,为字符串
observation = call_plugin(action, action_input)
output += f"\nObservation: {observation}\nThought:"
text += output
else: # 生成结束,并且不再需要调用插件
text += output
break
new_history = []
new_history.extend(history)
new_history.append({"user": prompt, "bot": text})
return text, new_history
def build_input_text(chat_history, list_of_plugin_info) -> str:
""" 将对话历史、插件信息聚合成一段初始文本 """
tools_text = []
for plugin_info in list_of_plugin_info:
tool = TOOL_DESC.format(
name_for_model=plugin_info["name_for_model"],
name_for_human=plugin_info["name_for_human"],
description_for_model=plugin_info["description_for_model"],
parameters=json.dumps(plugin_info["parameters"], ensure_ascii=False),
)
if plugin_info.get("args_format", "json") == "json":
tool += " Format the arguments as a JSON object."
elif plugin_info['args_format'] == 'code':
tool += " Enclose the code within triple backticks (`) at the beginning and end of the code."
else:
raise NotImplementedError
tools_text.append(tool)
tools_text = '\n\n'.join(tools_text)
# 候选插件的代号
tools_name_text = ", ".join([plugin_info["name_for_model"] for plugin_info in list_of_plugin_info])
im_start = "<|im_start|>"
im_end = "<|im_end|>"
prompt = f"{im_start}system\nYou are a helpful assistant.{im_end}"
for i, (query, response) in enumerate(chat_history):
if list_of_plugin_info: # 如果有候选插件
# 倒数第一轮或倒数第二轮对话填入详细的插件信息,但具体什么位置填可以自行判断
if (len(chat_history) == 1) or (i == len(chat_history) - 2):
query = PROMPT_REACT.format(
tools_text=tools_text,
tools_name_text=tools_name_text,
query=query,
)
query = query.lstrip("\n").rstrip() # 重要!若不 strip 会与训练时数据的构造方式产生差异。
response = response.lstrip("\n").rstrip() # 重要!若不 strip 会与训练时数据的构造方式产生差异。
# 使用续写模式(text completion)时,需要用如下格式区分用户和AI:
prompt += f"\n{im_start}user\n{query}{im_end}"
prompt += f"\n{im_start}assistant\n{response}{im_end}"
assert prompt.endswith(f"\n{im_start}assistant\n{im_end}")
prompt = prompt[: -len(f"{im_end}")]
return prompt
def text_completion(input_text: str, stop_words) -> str: # 作为一个文本续写模型来使用
im_end = "<|im_end|>"
if im_end not in stop_words:
stop_words = stop_words + [im_end]
return llm(input_text, stop=stop_words) # 续写 input_text 的结果,不包含 input_text 的内容
def parse_latest_plugin_call(text):
plugin_name, plugin_args = "", ""
i = text.rfind("\nAction:")
j = text.rfind("\nAction Input:")
k = text.rfind("\nObservation:")
if 0 <= i < j: # If the text has `Action` and `Action input`,
if k < j: # but does not contain `Observation`,
# then it is likely that `Observation` is ommited by the LLM,
# because the output text may have discarded the stop word.
text = text.rstrip() + "\nObservation:" # Add it back.
k = text.rfind("\nObservation:")
plugin_name = text[i + len("\nAction:"): j].strip()
plugin_args = text[j + len("\nAction Input:"): k].strip()
text = text[:k]
return plugin_name, plugin_args, text
def call_plugin(plugin_name: str, plugin_args: str) -> str:
""" 请开发者自行完善这部分内容。这里的参考实现仅是 demo 用途,非生产用途 """
if plugin_name == "google_search":
# 使用 SerpAPI 需要在这里填入您的 SERPAPI_API_KEY!
os.environ["SERPAPI_API_KEY"] = os.getenv("SERPAPI_API_KEY", default="")
from langchain import SerpAPIWrapper
return SerpAPIWrapper().run(json.loads(plugin_args)["search_query"])
elif plugin_name == "image_gen":
import urllib.parse
prompt = json.loads(plugin_args)["prompt"]
prompt = urllib.parse.quote(prompt)
return json.dumps({"image_url": f"https://image.pollinations.ai/prompt/{prompt}"}, ensure_ascii=False)
else:
raise NotImplementedError
def test():
tools = [
{
"name_for_human": "谷歌搜索",
"name_for_model": "google_search",
"description_for_model": "谷歌搜索是一个通用搜索引擎,可用于访问互联网、查询百科知识、了解时事新闻等。",
"parameters": [
{
"name": "search_query",
"description": "搜索关键词或短语",
"required": True,
"schema": {"type": "string"},
}
],
},
{
"name_for_human": "文生图",
"name_for_model": "image_gen",
"description_for_model": "文生图是一个AI绘画(图像生成)服务,输入文本描述,返回根据文本作画得到的图片的URL",
"parameters": [
{
"name": "prompt",
"description": "英文关键词,描述了希望图像具有什么内容",
"required": True,
"schema": {"type": "string"},
}
],
},
]
history = []
for query in ["你好", "谁是周杰伦", "他老婆是谁", "给我画个可爱的小猫吧,最好是黑猫"]:
print(f"User's Query:\n{query}\n")
response, history = llm_with_plugin(prompt=query, history=history, list_of_plugin_info=tools)
print(f"Qwen's Response:\n{response}\n")
if __name__ == "__main__":
test()
"""如果执行成功,在终端下应当能看到如下输出:
User's Query:
你好
Qwen's Response:
Thought: 提供的工具对回答该问题帮助较小,我将不使用工具直接作答。
Final Answer: 你好!很高兴见到你。有什么我可以帮忙的吗?
User's Query:
谁是周杰伦
Qwen's Response:
Thought: 我应该使用Google搜索查找相关信息。
Action: google_search
Action Input: {"search_query": "周杰伦"}
Observation: Jay Chou is a Taiwanese singer, songwriter, record producer, rapper, actor, television personality, and businessman.
Thought: I now know the final answer.
Final Answer: 周杰伦(Jay Chou)是一位来自台湾的歌手、词曲创作人、音乐制作人、说唱歌手、演员、电视节目主持人和企业家。他以其独特的音乐风格和才华在华语乐坛享有很高的声誉。
User's Query:
他老婆是谁
Qwen's Response:
Thought: 我应该使用Google搜索查找相关信息。
Action: google_search
Action Input: {"search_query": "周杰伦 老婆"}
Observation: Hannah Quinlivan
Thought: I now know the final answer.
Final Answer: 周杰伦的老婆是Hannah Quinlivan,她是一位澳大利亚籍的模特和演员。两人于2015年结婚,并育有一子。
User's Query:
给我画个可爱的小猫吧,最好是黑猫
Qwen's Response:
Thought: 我应该使用文生图API来生成一张可爱的小猫图片。
Action: image_gen
Action Input: {"prompt": "cute black cat"}
Observation: {"image_url": "https://image.pollinations.ai/prompt/cute%20black%20cat"}
Thought: I now know the final answer.
Final Answer: 生成的可爱小猫图片的URL为https://image.pollinations.ai/prompt/cute%20black%20cat。你可以点击这个链接查看图片。
"""
| [
"langchain.SerpAPIWrapper",
"langchain.llms.OpenAI"
] | [((153, 267), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""qwen"""', 'temperature': '(0)', 'openai_api_base': '"""http://192.168.0.53:7891/v1"""', 'openai_api_key': '"""xxx"""'}), "(model_name='qwen', temperature=0, openai_api_base=\n 'http://192.168.0.53:7891/v1', openai_api_key='xxx')\n", (159, 267), False, 'from langchain.llms import OpenAI\n'), ((5778, 5818), 'os.getenv', 'os.getenv', (['"""SERPAPI_API_KEY"""'], {'default': '""""""'}), "('SERPAPI_API_KEY', default='')\n", (5787, 5818), False, 'import os\n'), ((6118, 6217), 'json.dumps', 'json.dumps', (["{'image_url': f'https://image.pollinations.ai/prompt/{prompt}'}"], {'ensure_ascii': '(False)'}), "({'image_url': f'https://image.pollinations.ai/prompt/{prompt}'},\n ensure_ascii=False)\n", (6128, 6217), False, 'import json\n'), ((2933, 2990), 'json.dumps', 'json.dumps', (["plugin_info['parameters']"], {'ensure_ascii': '(False)'}), "(plugin_info['parameters'], ensure_ascii=False)\n", (2943, 2990), False, 'import json\n'), ((5880, 5896), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (5894, 5896), False, 'from langchain import SerpAPIWrapper\n'), ((5901, 5924), 'json.loads', 'json.loads', (['plugin_args'], {}), '(plugin_args)\n', (5911, 5924), False, 'import json\n'), ((6025, 6048), 'json.loads', 'json.loads', (['plugin_args'], {}), '(plugin_args)\n', (6035, 6048), False, 'import json\n')] |
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_community.chat_models import ChatAnthropic
prompt = ChatPromptTemplate.from_template(
"Tell me a short joke about {topic}")
output_parser = StrOutputParser()
anthropic = ChatAnthropic(model="claude-2")
anthropic_chain = (
{"topic": RunnablePassthrough()}
| prompt
| anthropic
| output_parser
)
if __name__ == "__main__":
import os
os.environ["LANGCHAIN_API_KEY"] = "..."
os.environ["LANGCHAIN_TRACING_V2"] = "true"
# it's hard to customize the logging output of langchain
# so here's their way to try to make money from you!
print(anthropic_chain.invoke("ice cream"))
| [
"langchain_core.prompts.ChatPromptTemplate.from_template",
"langchain_community.chat_models.ChatAnthropic",
"langchain_core.runnables.RunnablePassthrough",
"langchain_core.output_parsers.StrOutputParser"
] | [((237, 307), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['"""Tell me a short joke about {topic}"""'], {}), "('Tell me a short joke about {topic}')\n", (269, 307), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((329, 346), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (344, 346), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((359, 390), 'langchain_community.chat_models.ChatAnthropic', 'ChatAnthropic', ([], {'model': '"""claude-2"""'}), "(model='claude-2')\n", (372, 390), False, 'from langchain_community.chat_models import ChatAnthropic\n'), ((425, 446), 'langchain_core.runnables.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (444, 446), False, 'from langchain_core.runnables import RunnablePassthrough\n')] |
import os
from typing import Optional
from langchain import LLMChain, OpenAI, PromptTemplate
from langchain.chains.base import Chain
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.base import BaseLLM
from langchain.llms.loading import load_llm
DEFAULT_LLM = None
# Default template, no memory
TEMPLATE = """
You are working with a pandas dataframe in Python. The name of the dataframe is `df`.
The dataframe has the following columns: {df_columns}.
You should execute code as commanded to either provide information to answer the question or to
do the transformations required.
You should not assign any variables; you should return a one-liner in Pandas.
This is your objective: {query}
Go!
```python
print(df.head())
```
```output
{df_head}
```
```python"""
PROMPT = PromptTemplate(template=TEMPLATE, input_variables=["query", "df_head", "df_columns"])
# Template with memory
# TODO: add result of expected code to memory; currently we only remember what code was run.
TEMPLATE_WITH_MEMORY = """
You are working with a pandas dataframe in Python. The name of the dataframe is `df`.
The dataframe has the following columns: {df_columns}.
You are interacting with a programmer. The programmer issues commands and you should translate
them into Python code and execute them.
This is the history of your interaction so far:
{chat_history}
Human: {query}
Go!
```python
df.head()
```
```output
{df_head}
```
```python
"""
PROMPT_WITH_MEMORY = PromptTemplate(
template=TEMPLATE_WITH_MEMORY, input_variables=["chat_history", "query", "df_head", "df_columns"]
)
def set_llm(llm: BaseLLM) -> None:
global DEFAULT_LLM
DEFAULT_LLM = llm
def get_chain(llm: Optional[BaseLLM] = None, use_memory: bool = True) -> Chain:
"""Get chain to use."""
if llm is None:
if DEFAULT_LLM is None:
llm_config_path = os.environ.get("LLPANDAS_LLM_CONFIGURATION")
if llm_config_path is None:
llm = OpenAI(temperature=0)
else:
llm = load_llm(llm_config_path)
else:
llm = DEFAULT_LLM
if use_memory:
memory = ConversationBufferMemory(memory_key="chat_history", input_key="query")
chain = LLMChain(llm=llm, prompt=PROMPT_WITH_MEMORY, memory=memory)
else:
chain = LLMChain(llm=llm, prompt=PROMPT)
return chain
| [
"langchain.LLMChain",
"langchain.llms.loading.load_llm",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.OpenAI",
"langchain.PromptTemplate"
] | [((825, 914), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'TEMPLATE', 'input_variables': "['query', 'df_head', 'df_columns']"}), "(template=TEMPLATE, input_variables=['query', 'df_head',\n 'df_columns'])\n", (839, 914), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((1501, 1619), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'TEMPLATE_WITH_MEMORY', 'input_variables': "['chat_history', 'query', 'df_head', 'df_columns']"}), "(template=TEMPLATE_WITH_MEMORY, input_variables=[\n 'chat_history', 'query', 'df_head', 'df_columns'])\n", (1515, 1619), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((2171, 2241), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'input_key': '"""query"""'}), "(memory_key='chat_history', input_key='query')\n", (2195, 2241), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((2258, 2317), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'PROMPT_WITH_MEMORY', 'memory': 'memory'}), '(llm=llm, prompt=PROMPT_WITH_MEMORY, memory=memory)\n', (2266, 2317), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((2344, 2376), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'PROMPT'}), '(llm=llm, prompt=PROMPT)\n', (2352, 2376), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((1895, 1939), 'os.environ.get', 'os.environ.get', (['"""LLPANDAS_LLM_CONFIGURATION"""'], {}), "('LLPANDAS_LLM_CONFIGURATION')\n", (1909, 1939), False, 'import os\n'), ((2002, 2023), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (2008, 2023), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((2064, 2089), 'langchain.llms.loading.load_llm', 'load_llm', (['llm_config_path'], {}), '(llm_config_path)\n', (2072, 2089), False, 'from langchain.llms.loading import load_llm\n')] |
import re
from typing import Optional
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from ..parser.custom import ParserBaseModel as ParserBaseModel
class CodeBlock(ParserBaseModel):
code: str
language: Optional[str] = None
@classmethod
def parse(cls, text: str) -> "CodeBlock":
matches = re.finditer(r"```(?P<language>\w+)?\n?(?P<code>.*?)```", text, re.DOTALL)
for match in matches:
groupdict = match.groupdict()
groupdict["language"] = groupdict.get("language", None)
# custom markdown fix
if groupdict["language"] == "markdown":
t = text.split("```markdown")[1]
return cls(
language="markdown",
code=t[: -(len(t.split("```")[-1]) + 3)],
)
return cls(**groupdict)
return cls(code=text) # TODO: fix this hack
raise OutputParserException("Invalid codeblock")
@staticmethod
def format_instructions() -> str:
return "Answer with a codeblock."
@staticmethod
def custom_grammar() -> str | None:
return 'root ::= "```" ([^`] | "`" [^`] | "``" [^`])* "```"'
def __str__(self) -> str:
return self.code
class Error(BaseModel):
"""
Fallback function for invalid input.
If you are unsure on what function to call, use this error function as fallback.
This will tell the user that the input is not valid.
"""
title: str = Field(description="CamelCase Name titeling the error")
description: str = Field(..., description="Short description of the unexpected situation")
def __raise__(self) -> None:
raise Exception(self.description)
| [
"langchain_core.exceptions.OutputParserException"
] | [((1541, 1595), 'pydantic.Field', 'Field', ([], {'description': '"""CamelCase Name titeling the error"""'}), "(description='CamelCase Name titeling the error')\n", (1546, 1595), False, 'from pydantic import BaseModel, Field\n'), ((1619, 1690), 'pydantic.Field', 'Field', (['...'], {'description': '"""Short description of the unexpected situation"""'}), "(..., description='Short description of the unexpected situation')\n", (1624, 1690), False, 'from pydantic import BaseModel, Field\n'), ((368, 442), 're.finditer', 're.finditer', (['"""```(?P<language>\\\\w+)?\\\\n?(?P<code>.*?)```"""', 'text', 're.DOTALL'], {}), "('```(?P<language>\\\\w+)?\\\\n?(?P<code>.*?)```', text, re.DOTALL)\n", (379, 442), False, 'import re\n'), ((972, 1014), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Invalid codeblock"""'], {}), "('Invalid codeblock')\n", (993, 1014), False, 'from langchain_core.exceptions import OutputParserException\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : AI. @by PyCharm
# @File : promptwatch
# @Time : 2023/7/13 10:03
# @Author : betterme
# @WeChat : meutils
# @Software : PyCharm
# @Description :
import os
from meutils.pipe import *
from langchain import OpenAI, LLMChain, PromptTemplate
from promptwatch import PromptWatch, register_prompt_template
prompt_template = PromptTemplate.from_template("这是个prompt: {input}")
prompt_template = register_prompt_template("name_of_your_template", prompt_template)
my_chain = LLMChain(llm=OpenAI(streaming=True), prompt=prompt_template)
with PromptWatch(api_key=os.getenv('PROMPT_WATCH_API_KEY')) as pw:
my_chain("1+1=")
| [
"langchain.OpenAI",
"langchain.PromptTemplate.from_template"
] | [((417, 467), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""这是个prompt: {input}"""'], {}), "('这是个prompt: {input}')\n", (445, 467), False, 'from langchain import OpenAI, LLMChain, PromptTemplate\n'), ((486, 552), 'promptwatch.register_prompt_template', 'register_prompt_template', (['"""name_of_your_template"""', 'prompt_template'], {}), "('name_of_your_template', prompt_template)\n", (510, 552), False, 'from promptwatch import PromptWatch, register_prompt_template\n'), ((577, 599), 'langchain.OpenAI', 'OpenAI', ([], {'streaming': '(True)'}), '(streaming=True)\n', (583, 599), False, 'from langchain import OpenAI, LLMChain, PromptTemplate\n'), ((651, 684), 'os.getenv', 'os.getenv', (['"""PROMPT_WATCH_API_KEY"""'], {}), "('PROMPT_WATCH_API_KEY')\n", (660, 684), False, 'import os\n')] |
import sys
from typing import Any
import readline
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
import colorama
from callbacks import handlers
from config import config
from i18n import text
from utils import utils
from agent.agent import create_agent
from walrus.toolkit import WalrusToolKit
from k8s.toolkit import KubernetesToolKit
last_error = None
def setup_agent() -> Any:
config.init()
colorama.init()
llm = ChatOpenAI(
model_name="gpt-4",
temperature=0,
callbacks=[handlers.PrintReasoningCallbackHandler()],
)
text.init_system_messages(llm)
memory = ConversationBufferMemory(memory_key="chat_history")
enabled_toolkits = [
toolkit.lower() for toolkit in config.APPILOT_CONFIG.toolkits
]
tools = []
if "kubernetes" in enabled_toolkits:
kubernetes_toolkit = KubernetesToolKit(llm=llm)
tools.extend(kubernetes_toolkit.get_tools())
elif "walrus" in enabled_toolkits:
walrus_toolkit = WalrusToolKit(llm=llm)
tools.extend(walrus_toolkit.get_tools())
else:
print(text.get("enable_no_toolkit"))
sys.exit(1)
return create_agent(
llm,
shared_memory=memory,
tools=tools,
verbose=config.APPILOT_CONFIG.verbose,
)
def run():
appilot_agent = setup_agent()
print(text.get("welcome"))
user_query = None
while True:
user_query = input(">")
if utils.is_inform_sent():
continue
elif user_query == "exit":
break
elif user_query == "appilot_log":
print_last_error()
continue
elif user_query.startswith("#"):
continue
elif not user_query.strip():
continue
try:
result = appilot_agent.run(user_query)
except handlers.HumanRejectedException as he:
utils.print_rejected_message()
continue
except Exception as e:
handle_exception(e)
continue
utils.print_ai_response(result)
def handle_exception(e):
global last_error
print(text.get("response_prefix"), end="")
print(text.get("error_occur_message"))
last_error = e
def print_last_error():
if last_error is None:
print(text.get("response_prefix"), end="")
print(text.get("no_error_message"))
else:
print(last_error)
| [
"langchain.memory.ConversationBufferMemory"
] | [((442, 455), 'config.config.init', 'config.init', ([], {}), '()\n', (453, 455), False, 'from config import config\n'), ((460, 475), 'colorama.init', 'colorama.init', ([], {}), '()\n', (473, 475), False, 'import colorama\n'), ((623, 653), 'i18n.text.init_system_messages', 'text.init_system_messages', (['llm'], {}), '(llm)\n', (648, 653), False, 'from i18n import text\n'), ((668, 719), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""'}), "(memory_key='chat_history')\n", (692, 719), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1211, 1307), 'agent.agent.create_agent', 'create_agent', (['llm'], {'shared_memory': 'memory', 'tools': 'tools', 'verbose': 'config.APPILOT_CONFIG.verbose'}), '(llm, shared_memory=memory, tools=tools, verbose=config.\n APPILOT_CONFIG.verbose)\n', (1223, 1307), False, 'from agent.agent import create_agent\n'), ((908, 934), 'k8s.toolkit.KubernetesToolKit', 'KubernetesToolKit', ([], {'llm': 'llm'}), '(llm=llm)\n', (925, 934), False, 'from k8s.toolkit import KubernetesToolKit\n'), ((1400, 1419), 'i18n.text.get', 'text.get', (['"""welcome"""'], {}), "('welcome')\n", (1408, 1419), False, 'from i18n import text\n'), ((1502, 1524), 'utils.utils.is_inform_sent', 'utils.is_inform_sent', ([], {}), '()\n', (1522, 1524), False, 'from utils import utils\n'), ((2090, 2121), 'utils.utils.print_ai_response', 'utils.print_ai_response', (['result'], {}), '(result)\n', (2113, 2121), False, 'from utils import utils\n'), ((2181, 2208), 'i18n.text.get', 'text.get', (['"""response_prefix"""'], {}), "('response_prefix')\n", (2189, 2208), False, 'from i18n import text\n'), ((2228, 2259), 'i18n.text.get', 'text.get', (['"""error_occur_message"""'], {}), "('error_occur_message')\n", (2236, 2259), False, 'from i18n import text\n'), ((1052, 1074), 'walrus.toolkit.WalrusToolKit', 'WalrusToolKit', ([], {'llm': 'llm'}), '(llm=llm)\n', (1065, 1074), False, 'from walrus.toolkit import WalrusToolKit\n'), ((1187, 1198), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1195, 1198), False, 'import sys\n'), ((2347, 2374), 'i18n.text.get', 'text.get', (['"""response_prefix"""'], {}), "('response_prefix')\n", (2355, 2374), False, 'from i18n import text\n'), ((2398, 2426), 'i18n.text.get', 'text.get', (['"""no_error_message"""'], {}), "('no_error_message')\n", (2406, 2426), False, 'from i18n import text\n'), ((569, 609), 'callbacks.handlers.PrintReasoningCallbackHandler', 'handlers.PrintReasoningCallbackHandler', ([], {}), '()\n', (607, 609), False, 'from callbacks import handlers\n'), ((1148, 1177), 'i18n.text.get', 'text.get', (['"""enable_no_toolkit"""'], {}), "('enable_no_toolkit')\n", (1156, 1177), False, 'from i18n import text\n'), ((1945, 1975), 'utils.utils.print_rejected_message', 'utils.print_rejected_message', ([], {}), '()\n', (1973, 1975), False, 'from utils import utils\n')] |
"""Wrapper around Cohere APIs."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from pydantic import Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]:
import cohere
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(cohere.error.CohereError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.generate(**kwargs)
return _completion_with_retry(**kwargs)
class Cohere(LLM):
"""Wrapper around Cohere large language models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Cohere
cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = None
"""Model name to use."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
k: int = 0
"""Number of most likely tokens to consider at each step."""
p: int = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens. Between 0 and 1."""
truncate: Optional[str] = None
"""Specify how the client handles inputs longer than the maximum token
length: Truncate from START, END or NONE"""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
cohere_api_key: Optional[str] = None
stop: Optional[List[str]] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"k": self.k,
"p": self.p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"truncate": self.truncate,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cohere"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
"""
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
response = completion_with_retry(
self, model=self.model, prompt=prompt, **params
)
text = response.generations[0].text
# If stop tokens are provided, Cohere's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')] |
"""Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = get_from_dict_or_env(
values, "gooseai_api_key", "GOOSEAI_API_KEY"
)
try:
import openai
openai.api_key = gooseai_api_key
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
| [
"langchain.utils.get_from_dict_or_env"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')] |
import copy
from typing import Generic, Type, TypeVar
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseGenerationOutputParser
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.runnables import Runnable
from pydantic import BaseModel, ValidationError
from ..schema.types import UniversalChatModel
from ..syntax.output_types import CodeBlock as CodeBlock
from ..utils.msg_tools import msg_to_str
M = TypeVar("M", bound=BaseModel)
class RetryOpenAIFunctionPydanticParser(BaseGenerationOutputParser[M]):
pydantic_schema: Type[M]
args_only: bool = False
retry: int
retry_llm: UniversalChatModel = None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> M:
try:
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
"This output parser can only be used with a chat generation.",
)
message = generation.message
try:
func_call = copy.deepcopy(message.additional_kwargs["function_call"])
except KeyError as exc:
raise OutputParserException(
f"Could not parse function call: {exc}",
llm_output=msg_to_str(message),
)
if self.args_only:
pydantic_args = self.pydantic_schema.model_validate_json(func_call)
else:
pydantic_args = self.pydantic_schema.model_validate_json(func_call["arguments"])
return pydantic_args
except ValidationError as e:
if self.retry > 0:
print(f"Retrying parsing {self.pydantic_schema.__name__}...")
return self.retry_chain.invoke(
input={"output": result, "error": str(e)},
config={"run_name": "RetryOpenAIFunctionPydanticParser"},
)
# no retries left
raise OutputParserException(str(e), llm_output=msg_to_str(message))
@property
def retry_chain(self) -> Runnable:
from ..syntax.executable import compile_runnable
return compile_runnable(
instruction="Retry parsing the output by fixing the error.",
input_args=["output", "error"],
output_types=[self.pydantic_schema],
llm=self.retry_llm,
settings_override={"retry_parse": self.retry - 1},
)
class RetryOpenAIFunctionPydanticUnionParser(BaseGenerationOutputParser[M]):
output_types: list[type[M]]
args_only: bool = False
retry: int
retry_llm: UniversalChatModel = None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> M:
try:
function_call = self._pre_parse_function_call(result)
output_type_names = [t.__name__.lower() for t in self.output_types]
if function_call["name"] not in output_type_names:
raise OutputParserException("Invalid function call")
output_type = self._get_output_type(function_call["name"])
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException("This output parser can only be used with a chat generation.")
message = generation.message
try:
func_call = copy.deepcopy(message.additional_kwargs["function_call"])
except KeyError as exc:
raise OutputParserException(
f"Could not parse function call: {exc}",
llm_output=msg_to_str(message),
)
if self.args_only:
pydantic_args = output_type.model_validate_json(func_call["arguments"])
else:
pydantic_args = output_type.model_validate_json(func_call["arguments"])
return pydantic_args
except (ValidationError, OutputParserException) as e:
if self.retry > 0:
print(f"Retrying parsing {output_type.__name__}...")
return self.retry_chain.invoke(
input={"output": result, "error": str(e)},
config={"run_name": "RetryOpenAIFunctionPydanticUnionParser"},
)
# no retries left
raise OutputParserException(str(e), llm_output=msg_to_str(message))
def _pre_parse_function_call(self, result: list[Generation]) -> dict:
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException("This output parser can only be used with a chat generation.")
message = generation.message
try:
func_call = copy.deepcopy(message.additional_kwargs["function_call"])
except KeyError:
raise OutputParserException(
"The model refused to respond with a " f"function call:\n{message.content}\n\n",
llm_output=msg_to_str(message),
)
return func_call
def _get_output_type(self, function_name: str) -> Type[M]:
output_type_iter = filter(lambda t: t.__name__.lower() == function_name, self.output_types)
if output_type_iter is None:
raise OutputParserException(f"No parser found for function: {function_name}")
return next(output_type_iter)
@property
def retry_chain(self) -> Runnable:
from ..syntax.executable import compile_runnable
return compile_runnable(
instruction="Retry parsing the output by fixing the error.",
input_args=["output", "error"],
output_types=self.output_types,
llm=self.retry_llm,
settings_override={"retry_parse": self.retry - 1},
)
class RetryOpenAIFunctionPrimitiveTypeParser(RetryOpenAIFunctionPydanticParser, Generic[M]):
"""
Parse primitve types by wrapping them in a PydanticModel and parsing them.
Examples: int, float, bool, list[str], dict[str, int], Literal["a", "b", "c"], etc.
"""
def parse_result(self, result: list[Generation], *, partial: bool = False) -> M:
return super().parse_result(result, partial=partial).value
| [
"langchain_core.exceptions.OutputParserException"
] | [((490, 519), 'typing.TypeVar', 'TypeVar', (['"""M"""'], {'bound': 'BaseModel'}), "('M', bound=BaseModel)\n", (497, 519), False, 'from typing import Generic, Type, TypeVar\n'), ((4673, 4762), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""This output parser can only be used with a chat generation."""'], {}), "(\n 'This output parser can only be used with a chat generation.')\n", (4694, 4762), False, 'from langchain_core.exceptions import OutputParserException\n'), ((4832, 4889), 'copy.deepcopy', 'copy.deepcopy', (["message.additional_kwargs['function_call']"], {}), "(message.additional_kwargs['function_call'])\n", (4845, 4889), False, 'import copy\n'), ((5360, 5431), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['f"""No parser found for function: {function_name}"""'], {}), "(f'No parser found for function: {function_name}')\n", (5381, 5431), False, 'from langchain_core.exceptions import OutputParserException\n'), ((922, 1011), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""This output parser can only be used with a chat generation."""'], {}), "(\n 'This output parser can only be used with a chat generation.')\n", (943, 1011), False, 'from langchain_core.exceptions import OutputParserException\n'), ((1132, 1189), 'copy.deepcopy', 'copy.deepcopy', (["message.additional_kwargs['function_call']"], {}), "(message.additional_kwargs['function_call'])\n", (1145, 1189), False, 'import copy\n'), ((3073, 3119), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Invalid function call"""'], {}), "('Invalid function call')\n", (3094, 3119), False, 'from langchain_core.exceptions import OutputParserException\n'), ((3309, 3398), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""This output parser can only be used with a chat generation."""'], {}), "(\n 'This output parser can only be used with a chat generation.')\n", (3330, 3398), False, 'from langchain_core.exceptions import OutputParserException\n'), ((3480, 3537), 'copy.deepcopy', 'copy.deepcopy', (["message.additional_kwargs['function_call']"], {}), "(message.additional_kwargs['function_call'])\n", (3493, 3537), False, 'import copy\n')] |
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class Anyscale(LLM):
"""Wrapper around Anyscale Services.
To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``,
``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscale = Anyscale(anyscale_service_url="SERVICE_URL",
anyscale_service_route="SERVICE_ROUTE",
anyscale_service_token="SERVICE_TOKEN")
# Use Ray for distributed processing
import ray
prompt_list=[]
@ray.remote
def send_query(llm, prompt):
resp = llm(prompt)
return resp
futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list]
results = ray.get(futures)
"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model. Reserved for future use"""
anyscale_service_url: Optional[str] = None
anyscale_service_route: Optional[str] = None
anyscale_service_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
anyscale_service_url = get_from_dict_or_env(
values, "anyscale_service_url", "ANYSCALE_SERVICE_URL"
)
anyscale_service_route = get_from_dict_or_env(
values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE"
)
anyscale_service_token = get_from_dict_or_env(
values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN"
)
try:
anyscale_service_endpoint = f"{anyscale_service_url}/-/route"
headers = {"Authorization": f"Bearer {anyscale_service_token}"}
requests.get(anyscale_service_endpoint, headers=headers)
except requests.exceptions.RequestException as e:
raise ValueError(e)
values["anyscale_service_url"] = anyscale_service_url
values["anyscale_service_route"] = anyscale_service_route
values["anyscale_service_token"] = anyscale_service_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"anyscale_service_url": self.anyscale_service_url,
"anyscale_service_route": self.anyscale_service_route,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anyscale"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to Anyscale Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = anyscale("Tell me a joke.")
"""
anyscale_service_endpoint = (
f"{self.anyscale_service_url}/{self.anyscale_service_route}"
)
headers = {"Authorization": f"Bearer {self.anyscale_service_token}"}
body = {"prompt": prompt}
resp = requests.post(anyscale_service_endpoint, headers=headers, json=body)
if resp.status_code != 200:
raise ValueError(
f"Error returned by service, status code {resp.status_code}"
)
text = resp.text
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : AI. @by PyCharm
# @File : chatpicture
# @Time : 2023/8/23 13:56
# @Author : betterme
# @WeChat : meutils
# @Software : PyCharm
# @Description : 增加代理 根据意图选择 OCR类型
from meutils.pipe import *
from meutils.ai_cv.ocr_api import OCR
class ChatPicture(object):
def __init__(self):
pass
if __name__ == '__main__':
img = Path("/Users/betterme/PycharmProjects/AI/aizoo/aizoo/api/港澳台通行证.webp").read_bytes()
print(OCR.basic_accurate(img))
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from chatllm.llmchain.prompts.prompt_templates import CHAT_CONTEXT_PROMPT
llm = ChatOpenAI()
prompt = CHAT_CONTEXT_PROMPT
context = json.dumps(OCR.basic_accurate(img), ensure_ascii=False)
# c = LLMChain(llm=llm, prompt=prompt)
# print(c.run(context=context, question="出生日期是?"))
print(context)
| [
"langchain.chat_models.ChatOpenAI"
] | [((732, 744), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (742, 744), False, 'from langchain.chat_models import ChatOpenAI\n'), ((526, 549), 'meutils.ai_cv.ocr_api.OCR.basic_accurate', 'OCR.basic_accurate', (['img'], {}), '(img)\n', (544, 549), False, 'from meutils.ai_cv.ocr_api import OCR\n'), ((804, 827), 'meutils.ai_cv.ocr_api.OCR.basic_accurate', 'OCR.basic_accurate', (['img'], {}), '(img)\n', (822, 827), False, 'from meutils.ai_cv.ocr_api import OCR\n')] |
from typing import List
from pydantic import BaseModel, Field
from langchain.agents import AgentExecutor, Tool
from langchain.llms.base import BaseLLM
from .agent.base import AutonomousAgent
class ExecutionAgent(BaseModel):
agent: AgentExecutor = Field(...)
@classmethod
def from_llm(cls, llm: BaseLLM, objective: str, tools: List[Tool], verbose: bool = True) -> "ExecutionAgent":
agent = AutonomousAgent.from_llm_and_tools(llm=llm, tools=tools, objective=objective, verbose=verbose)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=verbose)
return cls(agent=agent_executor)
def execute_task(self, task: str, context: str) -> str:
for i in range(3):
try:
return self.agent.run({"input": task, "context": context})
except ValueError:
print(f"Value error running executor agent. Will retry {2-i} times")
return "Failed to execute task."
| [
"langchain.agents.AgentExecutor.from_agent_and_tools"
] | [((254, 264), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (259, 264), False, 'from pydantic import BaseModel, Field\n'), ((533, 610), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': 'verbose'}), '(agent=agent, tools=tools, verbose=verbose)\n', (567, 610), False, 'from langchain.agents import AgentExecutor, Tool\n')] |
# process_text.py
from lib.chat.setup import openai_embeddings
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.document_loaders.csv_loader import CSVLoader
import requests
import json
import chardet
def read_input_files(file_paths):
combined_articles = []
for file_path in file_paths:
content = read_file(file_path)
combined_articles.append(content)
return combined_articles
def split_texts(all_combined_articles):
all_texts = []
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=4096,
chunk_overlap=256,
length_function=len
)
for combined_articles in all_combined_articles:
all_texts.append(text_splitter.split_text(combined_articles))
return all_texts
def create_documents(all_texts, prefixes):
all_docs = []
for texts, prefix in zip(all_texts, prefixes):
metadatas = [{"SOURCES": f"{prefix}{i}"} for i in range(len(texts))]
docs = [Document(id=f'{prefix}{i}', page_content=texts[i], metadata=metadatas[i]) for i in range(len(texts))]
all_docs.extend(docs)
return all_docs
def get_fieldnames_from_csv(csv_path):
with open(csv_path, newline="", encoding="utf-8") as csvfile:
fieldnames = csvfile.readline().strip().split(';')
return fieldnames
def documents_from_csv(csv_path,fieldnames):
loader = CSVLoader(file_path=csv_path,
csv_args={
'delimiter': ';',
'quotechar': '"',
'fieldnames': fieldnames
})
docs = loader.load()
return docs
def create_vectordb_from_docs(docs, persist_directory=None):
vectordb = Chroma.from_documents(documents=docs, embedding=openai_embeddings, persist_directory=persist_directory)
if persist_directory:
vectordb.persist()
return vectordb
def load_persisted_chromadb(persist_directory):
vectordb = Chroma(persist_directory=persist_directory,
embedding_function=openai_embeddings)
return vectordb
def read_file(file_path):
with open(file_path, 'rb') as file:
raw_data = file.read()
detected_encoding = chardet.detect(raw_data)['encoding']
content = raw_data.decode(detected_encoding, errors='ignore')
return content
def write_file(file_path, content):
with open(file_path, 'w', encoding='utf-8') as file:
file.write(content)
def split_text(text, max_size=16 * 1024):
words = text.split(' ')
chunks = []
current_chunk = []
for word in words:
if len(' '.join(current_chunk)) + len(word) < max_size:
current_chunk.append(word)
else:
chunks.append(' '.join(current_chunk))
current_chunk = [word]
if current_chunk:
chunks.append(' '.join(current_chunk))
return chunks
def translate_text(text, api_key, source_lang='EL', target_lang='EN'):
url = 'https://api-free.deepl.com/v2/translate'
headers = {'Authorization': f'DeepL-Auth-Key {api_key}'}
data = {'text': text, 'source_lang': source_lang, 'target_lang': target_lang}
response = requests.post(url, headers=headers, data=data)
try:
translations = json.loads(response.text)['translations']
return translations[0]['text']
except json.JSONDecodeError:
print(f"Error: status code {response.status_code}, response content: {response.text}")
return ""
def translate_documents(file_paths, api_key, source_lang, target_lang):
translated_files = []
for file_path in file_paths:
text = read_file(file_path)
chunks = split_text(text)
translated_chunks = [translate_text(chunk, api_key, source_lang, target_lang) for chunk in chunks]
translated_text = ' '.join(translated_chunks)
translated_file_path = f"{file_path[:-4]}_translated.txt"
write_file(translated_file_path, translated_text)
translated_files.append(translated_file_path)
return translated_files
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.docstore.document.Document",
"langchain.vectorstores.Chroma.from_documents",
"langchain.document_loaders.csv_loader.CSVLoader",
"langchain.vectorstores.Chroma"
] | [((612, 703), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(4096)', 'chunk_overlap': '(256)', 'length_function': 'len'}), '(chunk_size=4096, chunk_overlap=256,\n length_function=len)\n', (642, 703), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1649, 1755), 'langchain.document_loaders.csv_loader.CSVLoader', 'CSVLoader', ([], {'file_path': 'csv_path', 'csv_args': '{\'delimiter\': \';\', \'quotechar\': \'"\', \'fieldnames\': fieldnames}'}), '(file_path=csv_path, csv_args={\'delimiter\': \';\', \'quotechar\': \'"\',\n \'fieldnames\': fieldnames})\n', (1658, 1755), False, 'from langchain.document_loaders.csv_loader import CSVLoader\n'), ((1992, 2099), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', ([], {'documents': 'docs', 'embedding': 'openai_embeddings', 'persist_directory': 'persist_directory'}), '(documents=docs, embedding=openai_embeddings,\n persist_directory=persist_directory)\n', (2013, 2099), False, 'from langchain.vectorstores import Chroma\n'), ((2234, 2320), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'persist_directory', 'embedding_function': 'openai_embeddings'}), '(persist_directory=persist_directory, embedding_function=\n openai_embeddings)\n', (2240, 2320), False, 'from langchain.vectorstores import Chroma\n'), ((3434, 3480), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'data'}), '(url, headers=headers, data=data)\n', (3447, 3480), False, 'import requests\n'), ((2481, 2505), 'chardet.detect', 'chardet.detect', (['raw_data'], {}), '(raw_data)\n', (2495, 2505), False, 'import chardet\n'), ((1251, 1324), 'langchain.docstore.document.Document', 'Document', ([], {'id': 'f"""{prefix}{i}"""', 'page_content': 'texts[i]', 'metadata': 'metadatas[i]'}), "(id=f'{prefix}{i}', page_content=texts[i], metadata=metadatas[i])\n", (1259, 1324), False, 'from langchain.docstore.document import Document\n'), ((3518, 3543), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (3528, 3543), False, 'import json\n')] |
import re
from typing import List
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
from langchain.docstore.document import Document as LCDocument
class MarkDownSplitter(TextSplitter):
'''To split markdown'''
def split_text(self, text: str) -> List[str]:
if self.count_token(text) < self._chunk_size:
texts = [text]
else:
lines = text.split('\n')
new_lines = self.remove_long_code(lines)
markdown_splitter = RecursiveCharacterTextSplitter(chunk_size=self._chunk_size, chunk_overlap=0,
length_function=self.count_token)
documents = markdown_splitter.create_documents(['\n'.join(new_lines)])
texts = self._keep_parent_title(documents)
return texts
def remove_long_code(self, lines: List[str]) -> List[str]:
new_lines = []
code = ''
is_code = False
for line in lines:
line = self._strip_line(line)
if line.startswith('```'):
is_code = not is_code
if is_code or line.startswith('```'):
code = code + line
else:
if len(code) > 0 and self.count_token(code) <= self._chunk_size:
new_lines.append(code)
new_lines.append(line)
code = ''
return new_lines
def _keep_parent_title(self, documents: List[LCDocument]) -> List[str]:
docs = []
now_title_stack = []
for doc_chunk in documents:
new_chunk = []
lines = doc_chunk.page_content.split('\n')
for inner_idx, line in enumerate(lines):
if line.strip() == '':
continue
if self._is_title(line):
now_head_level = self._get_level(line)
last_level_in_stack = self._get_last_level(now_title_stack)
while now_head_level <= last_level_in_stack:
now_title_stack.pop()
last_level_in_stack = self._get_last_level(now_title_stack)
now_title_stack.append(line)
if inner_idx == 0 and line.strip() != '':
new_chunk.extend(now_title_stack)
if not self._is_title(line):
new_chunk.append(line)
else:
new_chunk.append(line)
docs.append('\n'.join(new_chunk))
return docs
def count_token(self, doc): # todo
# doc_len = len(re.findall(r'\w+', doc)) + len(re.findall(r'[^\w\s]', doc)) + len(re.findall(r'\n', doc)) // 4
doc_len = len(doc) // 3
return doc_len
def _strip_line(self, l):
l = re.sub(r'<(.*?)>', '', l)
return l.lstrip()
def _get_last_level(self, now_title_stack):
if len(now_title_stack) > 0:
return self._get_level(now_title_stack[-1])
else:
return 0
def _get_level(self, line):
return len(re.findall('#', line.split(' ')[0]))
def _is_title(self, line):
return line.split('# ')[0].replace('#', '') == ''
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((2824, 2848), 're.sub', 're.sub', (['"""<(.*?)>"""', '""""""', 'l'], {}), "('<(.*?)>', '', l)\n", (2830, 2848), False, 'import re\n'), ((514, 628), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'self._chunk_size', 'chunk_overlap': '(0)', 'length_function': 'self.count_token'}), '(chunk_size=self._chunk_size, chunk_overlap=0,\n length_function=self.count_token)\n', (544, 628), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter\n')] |
import os
import json
from dotenv import load_dotenv
from langchain.agents import Tool
from langchain.chat_models import ChatOpenAI
from ai.ai_functions import get_company_info, get_intro_response
from consts import company_handbook_faiss_path, llm_model_type, demo_company_name
from utils import calculate_vesting
# Load .env variables
load_dotenv()
# LLM Initialization
openai_api_key = os.getenv("OPENAI_API_KEY")
llm = ChatOpenAI(max_retries=3, temperature=0, # type: ignore
model_name=llm_model_type)
def tool_describe_skills():
"""
This function creates a LangChain agent's tool that uses a generic LLM chain to introduce itself and give user suggestions for what it can do.
"""
return Tool(name="Introduction", func=lambda query: get_intro_response(query), description=f"""useful for questions like 'what can I ask', 'what can you do', 'what else can you do', 'what can I ask you', 'can you suggest some things I can ask'. Action Input is the user's direct query.""", return_direct=True) # type: ignore
def tool_retrieve_company_info():
"""
This function creates a LangChain agent's tool that uses QARetrieval chain to retrieve information from the company handbook based on a FAISS vectorstore.
"""
return Tool(name="Company Guidelines", func=lambda query: get_company_info(user_reply=query, index_path=company_handbook_faiss_path), description=f"""useful for questions about {demo_company_name}'s polices, work from home, IT, CEO, product, meeting conduct, diversity and inclusion (DEI), career progression, management tips, sales process, HR, team events, 1 on 1 guidelines, coaching, onboarding tips. Pass user's response directly to this tool""", return_direct=True) # type: ignore
def tool_calculate_stock_options():
"""
This function takes the user's starting date, their total amount of shares, and their vesting schedules and returns the number of shares they have vested so far along with shares that hasn't vested.
"""
return Tool(name="Stock Options/Shares calculator", func=lambda query: calculate_vesting(start_date=json.loads(query)["start_date"], total_shares=json.loads(query)["total_shares"], vesting_schedule=json.loads(query)["vesting_schedule"]), description=f"""useful for when asked about stock options and share calculations. Action Input should be a JSON object of a start_date (YYYY-MM-DD), total_shares and vesting_schedule (tuple of decimal numbers) keys.""", return_direct=False) # type: ignore
| [
"langchain.chat_models.ChatOpenAI"
] | [((339, 352), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (350, 352), False, 'from dotenv import load_dotenv\n'), ((393, 420), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (402, 420), False, 'import os\n'), ((427, 494), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'max_retries': '(3)', 'temperature': '(0)', 'model_name': 'llm_model_type'}), '(max_retries=3, temperature=0, model_name=llm_model_type)\n', (437, 494), False, 'from langchain.chat_models import ChatOpenAI\n'), ((778, 803), 'ai.ai_functions.get_intro_response', 'get_intro_response', (['query'], {}), '(query)\n', (796, 803), False, 'from ai.ai_functions import get_company_info, get_intro_response\n'), ((1325, 1399), 'ai.ai_functions.get_company_info', 'get_company_info', ([], {'user_reply': 'query', 'index_path': 'company_handbook_faiss_path'}), '(user_reply=query, index_path=company_handbook_faiss_path)\n', (1341, 1399), False, 'from ai.ai_functions import get_company_info, get_intro_response\n'), ((2117, 2134), 'json.loads', 'json.loads', (['query'], {}), '(query)\n', (2127, 2134), False, 'import json\n'), ((2163, 2180), 'json.loads', 'json.loads', (['query'], {}), '(query)\n', (2173, 2180), False, 'import json\n'), ((2215, 2232), 'json.loads', 'json.loads', (['query'], {}), '(query)\n', (2225, 2232), False, 'import json\n')] |
from langchain.prompts import PromptTemplate
_symptom_extract_template = """Consider the following conversation patient note:
Patient note: {note}
Choose on of the symptoms to be the chief complaint (it is usually the first symptom mentioned).
Provide your response strictly in the following format, replacing only the name_of_chief_complaint (keeping : yes), and refrain from including any additional text:
<symptom> name_of_chief_complaint </symptom>
"""
_symptom_match_template = """Given the symptom: {symptom} which of the following retrievals is the best match?
Retrievals:
{retrievals}
Select only one and write it below in the following format:
<match> choice </match>
Remember, do not include any other text, ensure your choice is in the provided retrievals, and follow the output format.
"""
CC_EXTRACT_PROMPT = PromptTemplate.from_template(_symptom_extract_template)
CC_MATCH_PROMPT = PromptTemplate.from_template(_symptom_match_template)
| [
"langchain.prompts.PromptTemplate.from_template"
] | [((830, 885), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_extract_template'], {}), '(_symptom_extract_template)\n', (858, 885), False, 'from langchain.prompts import PromptTemplate\n'), ((904, 957), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_match_template'], {}), '(_symptom_match_template)\n', (932, 957), False, 'from langchain.prompts import PromptTemplate\n')] |
import ast
import re
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.transform import BaseTransformOutputParser
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import BaseTool
from typing_extensions import TypedDict
THOUGHT_PATTERN = r"Thought: ([^\n]*)"
ACTION_PATTERN = r"\n*(\d+)\. (\w+)\((.*)\)(\s*#\w+\n)?"
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
END_OF_PLAN = "<END_OF_PLAN>"
### Helper functions
def _ast_parse(arg: str) -> Any:
try:
return ast.literal_eval(arg)
except: # noqa
return arg
def _parse_llm_compiler_action_args(args: str, tool: Union[str, BaseTool]) -> list[Any]:
"""Parse arguments from a string."""
if args == "":
return ()
if isinstance(tool, str):
return ()
extracted_args = {}
tool_key = None
prev_idx = None
for key in tool.args.keys():
# Split if present
if f"{key}=" in args:
idx = args.index(f"{key}=")
if prev_idx is not None:
extracted_args[tool_key] = _ast_parse(
args[prev_idx:idx].strip().rstrip(",")
)
args = args.split(f"{key}=", 1)[1]
tool_key = key
prev_idx = 0
if prev_idx is not None:
extracted_args[tool_key] = _ast_parse(
args[prev_idx:].strip().rstrip(",").rstrip(")")
)
return extracted_args
def default_dependency_rule(idx, args: str):
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def _get_dependencies_from_graph(
idx: int, tool_name: str, args: Dict[str, Any]
) -> dict[str, list[str]]:
"""Get dependencies from a graph."""
if tool_name == "join":
return list(range(1, idx))
return [i for i in range(1, idx) if default_dependency_rule(i, str(args))]
class Task(TypedDict):
idx: int
tool: BaseTool
args: list
dependencies: Dict[str, list]
thought: Optional[str]
def instantiate_task(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: Union[str, Any],
thought: Optional[str] = None,
) -> Task:
if tool_name == "join":
tool = "join"
else:
try:
tool = tools[[tool.name for tool in tools].index(tool_name)]
except ValueError as e:
raise OutputParserException(f"Tool {tool_name} not found.") from e
tool_args = _parse_llm_compiler_action_args(args, tool)
dependencies = _get_dependencies_from_graph(idx, tool_name, tool_args)
return Task(
idx=idx,
tool=tool,
args=tool_args,
dependencies=dependencies,
thought=thought,
)
class LLMCompilerPlanParser(BaseTransformOutputParser[dict], extra="allow"):
"""Planning output parser."""
tools: List[BaseTool]
def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[Task]:
texts = []
# TODO: Cleanup tuple state tracking here.
thought = None
for chunk in input:
# Assume input is str. TODO: support vision/other formats
text = chunk if isinstance(chunk, str) else str(chunk.content)
for task, thought in self.ingest_token(text, texts, thought):
yield task
# Final possible task
if texts:
task, _ = self._parse_task("".join(texts), thought)
if task:
yield task
def parse(self, text: str) -> List[Task]:
return list(self._transform([text]))
def stream(
self,
input: str | BaseMessage,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Task]:
yield from self.transform([input], config, **kwargs)
def ingest_token(
self, token: str, buffer: List[str], thought: Optional[str]
) -> Iterator[Tuple[Optional[Task], str]]:
buffer.append(token)
if "\n" in token:
buffer_ = "".join(buffer).split("\n")
suffix = buffer_[-1]
for line in buffer_[:-1]:
task, thought = self._parse_task(line, thought)
if task:
yield task, thought
buffer.clear()
buffer.append(suffix)
def _parse_task(self, line: str, thought: Optional[str] = None):
task = None
if match := re.match(THOUGHT_PATTERN, line):
# Optionally, action can be preceded by a thought
thought = match.group(1)
elif match := re.match(ACTION_PATTERN, line):
# if action is parsed, return the task, and clear the buffer
idx, tool_name, args, _ = match.groups()
idx = int(idx)
task = instantiate_task(
tools=self.tools,
idx=idx,
tool_name=tool_name,
args=args,
thought=thought,
)
thought = None
# Else it is just dropped
return task, thought
| [
"langchain_core.exceptions.OutputParserException"
] | [((1688, 1716), 're.findall', 're.findall', (['ID_PATTERN', 'args'], {}), '(ID_PATTERN, args)\n', (1698, 1716), False, 'import re\n'), ((715, 736), 'ast.literal_eval', 'ast.literal_eval', (['arg'], {}), '(arg)\n', (731, 736), False, 'import ast\n'), ((4606, 4637), 're.match', 're.match', (['THOUGHT_PATTERN', 'line'], {}), '(THOUGHT_PATTERN, line)\n', (4614, 4637), False, 'import re\n'), ((2579, 2632), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['f"""Tool {tool_name} not found."""'], {}), "(f'Tool {tool_name} not found.')\n", (2600, 2632), False, 'from langchain_core.exceptions import OutputParserException\n'), ((4760, 4790), 're.match', 're.match', (['ACTION_PATTERN', 'line'], {}), '(ACTION_PATTERN, line)\n', (4768, 4790), False, 'import re\n')] |
import re
import string
import traceback
from collections import Counter
import numpy as np
import pandas as pd
import tqdm
from langchain.evaluation.qa import QAEvalChain
from langchain.llms import OpenAI
from algos.PWS import PWS_Base, PWS_Extra
from algos.notool import CoT, IO
from algos.react import ReactBase
from nodes import LLMNode
import openai
def normalize_answer(s):
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
normalized_prediction = normalize_answer(prediction)
normalized_ground_truth = normalize_answer(ground_truth)
if normalized_prediction in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth:
return 0
if normalized_ground_truth in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth:
return 0
prediction_tokens = normalized_prediction.split()
ground_truth_tokens = normalized_ground_truth.split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def llm_accuracy_score(query, prediction, ground_truth):
data = [{
'query': query,
'answer': ground_truth,
}]
pred = [{
'query': query,
'answer': ground_truth,
'result': prediction,
}]
eval_chain = QAEvalChain.from_llm(OpenAI(
temperature=0,
))
graded_outputs = eval_chain.evaluate(data, pred)
return 1 if graded_outputs[0]['text'].strip() == 'CORRECT' else 0
class Evaluator:
def __init__(self, task, dataset, algo, maxtry=3):
assert task in ["hotpot_qa", "trivia_qa", "gsm8k", "physics_question", "disfl_qa",
"sports_understanding", "strategy_qa", "sotu_qa"]
assert isinstance(dataset, pd.DataFrame)
assert isinstance(algo, (PWS_Base, PWS_Extra, ReactBase, IO, CoT))
self.task = task
self.dataset = dataset
self.algo = algo
self.maxtry = maxtry
self.failed_response = self._failed_response()
self.eval_data = self._initialize_eval_dict()
def run(self):
print("\n******************* Start Evaluation *******************\n")
if self.task in ["hotpot_qa", "sotu_qa"]:
for i in tqdm.tqdm(range(len(self.dataset))):
question = self.dataset["question"][i]
label = self.dataset["answer"][i]
for _ in range(self.maxtry):
try:
response = self.algo.run(question)
break
except Exception:
traceback.print_exc()
response = self.failed_response
self._update_eval_dict(question, label, response)
elif self.task == "fever":
for i in tqdm.tqdm(range(len(self.dataset))):
question = self.dataset["claim"][i]
label = self.dataset["label"][i]
for _ in range(self.maxtry):
try:
response = self.algo.run(question)
break
except:
response = self.failed_response
self._update_eval_dict(question, label, response)
elif self.task == "trivia_qa":
for i in tqdm.tqdm(range(len(self.dataset))):
question = self.dataset["question"][i]
label = self.dataset["answer"][i]["value"]
for _ in range(self.maxtry):
try:
response = self.algo.run(question)
break
except:
response = self.failed_response
self._update_eval_dict(question, label, response)
elif self.task == "gsm8k":
for i in tqdm.tqdm(range(len(self.dataset))):
question = self.dataset["question"][i]
label = self.dataset["answer"][i].split("#### ")[1]
for _ in range(self.maxtry):
try:
response = self.algo.run(question)
break
except:
response = self.failed_response
self._update_eval_dict(question, label, response)
elif self.task in ["physics_question", "sports_understanding", "strategy_qa"]:
for i in tqdm.tqdm(range(len(self.dataset))):
question = self.dataset["input"][i]
label = self.dataset["target"][i]
for _ in range(self.maxtry):
try:
response = self.algo.run(question)
break
except:
response = self.failed_response
self._update_eval_dict(question, label, response)
else:
raise NotImplementedError
return self._get_avg_results(), self.eval_data
def _initialize_eval_dict(self):
data = {}
for d in ["label", "preds", "em", "f1", "acc", "wall_time", "total_tokens", "total_cost", "steps", "token_cost",
"tool_cost", "planner_log", "solver_log"]:
data[d] = []
return data
def _update_eval_dict(self, question, label, response):
print("=== Planner ===" + '\n\n' + response.get("planner_log", '') + '\n' + "=== Solver ===" + '\n\n' + response.get("solver_log", ''))
pred = self._parse_prediction(response["output"])
self.eval_data["label"] += [label]
self.eval_data["preds"] += [pred]
self.eval_data["em"] += [self.get_metrics(question, label, pred)["em"]]
self.eval_data["f1"] += [self.get_metrics(question, label, pred)["f1"]]
self.eval_data["acc"] += [self.get_metrics(question, label, pred)["acc"]]
self.eval_data["wall_time"] += [response["wall_time"]]
self.eval_data["total_tokens"] += [response["total_tokens"]]
self.eval_data["total_cost"] += [response["total_cost"]]
self.eval_data["steps"] += [response["steps"]]
self.eval_data["token_cost"] += [response["token_cost"]]
self.eval_data["tool_cost"] += [response["tool_cost"]]
LLMNode.refresh('succ' if self.get_metrics(question, label, pred)["acc"] else 'fail')
if "planner_log" in response:
self.eval_data["planner_log"] += [response["planner_log"]]
if "solver_log" in response:
self.eval_data["solver_log"] += [response["solver_log"]]
def _get_avg_results(self):
result = {}
result["avg_em"] = np.nanmean(self.eval_data["em"])
result["avg_f1"] = np.nanmean(self.eval_data["f1"])
result["avg_acc"] = np.nanmean(self.eval_data["acc"])
result["avg_wall_time"] = np.nanmean(self.eval_data["wall_time"])
result["avg_total_tokens"] = np.nanmean(self.eval_data["total_tokens"])
result["avg_total_cost"] = np.nanmean(self.eval_data["total_cost"])
result["avg_steps"] = np.nanmean(self.eval_data["steps"])
result["avg_token_cost"] = np.nanmean(self.eval_data["token_cost"])
result["avg_tool_cost"] = np.nanmean(self.eval_data["tool_cost"])
return result
def get_metrics(self, query, label, pred):
if pred is None:
return {'em': 0, 'f1': 0}
norm_label = normalize_answer(label)
norm_pred = normalize_answer(pred)
em = (norm_pred == norm_label)
f1 = f1_score(norm_pred, norm_label)
acc = llm_accuracy_score(query, pred, label)
return {'em': em, 'f1': f1, 'acc': acc}
def _parse_prediction(self, output):
if isinstance(self.algo, IO):
return str(output).strip("\n")
elif isinstance(self.algo, CoT):
return str(output).split("\n")[-1].replace("Answer:", "")
elif isinstance(self.algo, ReactBase):
return str(output).strip("\n")
elif isinstance(self.algo, PWS_Base):
return str(output).strip("\n")
elif isinstance(self.algo, PWS_Extra):
return str(output).strip("\n")
def _failed_response(self):
resposne = {}
for key in ["input", "output", "wall_time", "total_tokens", "total_cost", "steps", "token_cost", "tool_cost"]:
resposne[key] = np.nan
return resposne
| [
"langchain.llms.OpenAI"
] | [((432, 469), 're.sub', 're.sub', (['"""\\\\b(a|an|the)\\\\b"""', '""" """', 'text'], {}), "('\\\\b(a|an|the)\\\\b', ' ', text)\n", (438, 469), False, 'import re\n'), ((1337, 1363), 'collections.Counter', 'Counter', (['prediction_tokens'], {}), '(prediction_tokens)\n', (1344, 1363), False, 'from collections import Counter\n'), ((1366, 1394), 'collections.Counter', 'Counter', (['ground_truth_tokens'], {}), '(ground_truth_tokens)\n', (1373, 1394), False, 'from collections import Counter\n'), ((1933, 1954), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1939, 1954), False, 'from langchain.llms import OpenAI\n'), ((7211, 7243), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['em']"], {}), "(self.eval_data['em'])\n", (7221, 7243), True, 'import numpy as np\n'), ((7271, 7303), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['f1']"], {}), "(self.eval_data['f1'])\n", (7281, 7303), True, 'import numpy as np\n'), ((7332, 7365), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['acc']"], {}), "(self.eval_data['acc'])\n", (7342, 7365), True, 'import numpy as np\n'), ((7400, 7439), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['wall_time']"], {}), "(self.eval_data['wall_time'])\n", (7410, 7439), True, 'import numpy as np\n'), ((7477, 7519), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['total_tokens']"], {}), "(self.eval_data['total_tokens'])\n", (7487, 7519), True, 'import numpy as np\n'), ((7555, 7595), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['total_cost']"], {}), "(self.eval_data['total_cost'])\n", (7565, 7595), True, 'import numpy as np\n'), ((7626, 7661), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['steps']"], {}), "(self.eval_data['steps'])\n", (7636, 7661), True, 'import numpy as np\n'), ((7697, 7737), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['token_cost']"], {}), "(self.eval_data['token_cost'])\n", (7707, 7737), True, 'import numpy as np\n'), ((7772, 7811), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['tool_cost']"], {}), "(self.eval_data['tool_cost'])\n", (7782, 7811), True, 'import numpy as np\n'), ((3209, 3230), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3228, 3230), False, 'import traceback\n')] |
import json
from langchain.schema.messages import SystemMessage
from langchain.output_parsers.json import parse_partial_json
from creator.code_interpreter import CodeInterpreter, language_map
from creator.config.library import config
from creator.utils import load_system_prompt, remove_tips
from creator.llm.llm_creator import create_llm
from .base import BaseAgent
DEBUGGING_TIPS = load_system_prompt(config.tips_for_testing_prompt_path)
VERIFY_TIPS = load_system_prompt(config.tips_for_veryfy_prompt_path)
class CodeTesterAgent(BaseAgent):
total_tries: int = 10
output_key: str = "output"
allow_user_confirm: bool = config.run_human_confirm
@property
def _chain_type(self):
return "CodeTesterAgent"
def postprocess_mesasge(self, message):
function_call = message.additional_kwargs.get("function_call", None)
if function_call is not None:
name = function_call.get("name", "run_code")
arguments = function_call.get("arguments", "{}")
arguments_json = parse_partial_json(arguments)
if name not in ("run_code", "test_summary") or not arguments_json:
language = name if name in language_map else "python"
function_call = {
"name": "run_code",
"arguments": json.dumps({"language": language, "code": arguments}, ensure_ascii=False)
}
message.additional_kwargs["function_call"] = function_call
return message
def messages_hot_fix(self, langchain_messages):
langchain_messages = remove_tips(langchain_messages)
tool_result = langchain_messages[-1].content
tool_result = parse_partial_json(tool_result)
if len(tool_result.get("stderr", "")) > 0 and "error" in tool_result["stderr"].lower(): # add tips for debugging
langchain_messages.append(SystemMessage(content=DEBUGGING_TIPS))
else:
langchain_messages.append(SystemMessage(content=VERIFY_TIPS))
return langchain_messages
def parse_output(self, messages):
function_call = messages[-1].get("function_call", None)
test_summary = None
if function_call is not None:
function_name = function_call.get("name", "")
arguments = parse_partial_json(function_call.get("arguments", "{}"))
if function_name == "test_summary":
test_summary = arguments.get("test_cases", [])
messages = messages[:-1]
return {
"output":{
"messages": messages,
"test_summary": test_summary,
}
}
def create_code_tester_agent(llm):
template = load_system_prompt(config.tester_agent_prompt_path)
tool = CodeInterpreter()
code_interpreter_function_schema = tool.to_function_schema()
with open(config.testsummary_function_schema_path, encoding="utf-8") as f:
test_summary_function_schema = json.load(f)
chain = CodeTesterAgent(
llm=llm,
system_template=template,
function_schemas=[code_interpreter_function_schema, test_summary_function_schema],
tools=[tool],
verbose=False,
)
return chain
llm = create_llm(config)
code_tester_agent = create_code_tester_agent(llm=llm)
| [
"langchain.output_parsers.json.parse_partial_json",
"langchain.schema.messages.SystemMessage"
] | [((389, 444), 'creator.utils.load_system_prompt', 'load_system_prompt', (['config.tips_for_testing_prompt_path'], {}), '(config.tips_for_testing_prompt_path)\n', (407, 444), False, 'from creator.utils import load_system_prompt, remove_tips\n'), ((459, 513), 'creator.utils.load_system_prompt', 'load_system_prompt', (['config.tips_for_veryfy_prompt_path'], {}), '(config.tips_for_veryfy_prompt_path)\n', (477, 513), False, 'from creator.utils import load_system_prompt, remove_tips\n'), ((3253, 3271), 'creator.llm.llm_creator.create_llm', 'create_llm', (['config'], {}), '(config)\n', (3263, 3271), False, 'from creator.llm.llm_creator import create_llm\n'), ((2727, 2778), 'creator.utils.load_system_prompt', 'load_system_prompt', (['config.tester_agent_prompt_path'], {}), '(config.tester_agent_prompt_path)\n', (2745, 2778), False, 'from creator.utils import load_system_prompt, remove_tips\n'), ((2790, 2807), 'creator.code_interpreter.CodeInterpreter', 'CodeInterpreter', ([], {}), '()\n', (2805, 2807), False, 'from creator.code_interpreter import CodeInterpreter, language_map\n'), ((1607, 1638), 'creator.utils.remove_tips', 'remove_tips', (['langchain_messages'], {}), '(langchain_messages)\n', (1618, 1638), False, 'from creator.utils import load_system_prompt, remove_tips\n'), ((1714, 1745), 'langchain.output_parsers.json.parse_partial_json', 'parse_partial_json', (['tool_result'], {}), '(tool_result)\n', (1732, 1745), False, 'from langchain.output_parsers.json import parse_partial_json\n'), ((2992, 3004), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3001, 3004), False, 'import json\n'), ((1045, 1074), 'langchain.output_parsers.json.parse_partial_json', 'parse_partial_json', (['arguments'], {}), '(arguments)\n', (1063, 1074), False, 'from langchain.output_parsers.json import parse_partial_json\n'), ((1906, 1943), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': 'DEBUGGING_TIPS'}), '(content=DEBUGGING_TIPS)\n', (1919, 1943), False, 'from langchain.schema.messages import SystemMessage\n'), ((1997, 2031), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': 'VERIFY_TIPS'}), '(content=VERIFY_TIPS)\n', (2010, 2031), False, 'from langchain.schema.messages import SystemMessage\n'), ((1331, 1404), 'json.dumps', 'json.dumps', (["{'language': language, 'code': arguments}"], {'ensure_ascii': '(False)'}), "({'language': language, 'code': arguments}, ensure_ascii=False)\n", (1341, 1404), False, 'import json\n')] |
import requests
from typing import Any, Dict, Optional
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains import APIChain
from langchain.prompts import BasePromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from .requests_l402 import RequestsL402Wrapper
from .requests_l402 import ResponseTextWrapper
from lightning import LightningNode
class L402APIChain(APIChain):
requests_wrapper: Any
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
lightning_node = None,
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
requests_L402 = RequestsL402Wrapper(lightning_node, requests)
lang_chain_request_L402 = ResponseTextWrapper(
requests_wrapper=requests_L402,
)
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=lang_chain_request_L402,
api_docs=api_docs,
**kwargs,
)
| [
"langchain.chains.llm.LLMChain"
] | [((1139, 1179), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (1147, 1179), False, 'from langchain.chains.llm import LLMChain\n'), ((1207, 1252), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (1215, 1252), False, 'from langchain.chains.llm import LLMChain\n')] |
from langchain.agents.tools import Tool
from langchain.chains import LLMMathChain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
llm = OpenAI(temperature=0)
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
tools = [
Tool(
name="Calculator",
func=llm_math_chain.run,
description="useful for when you need to answer questions about math",
),
]
model = ChatOpenAI(temperature=0)
planner = load_chat_planner(model)
executor = load_agent_executor(model, tools, verbose=True)
agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)
if __name__ == "__main__":
agent.run(
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?"
)
| [
"langchain.chains.LLMMathChain.from_llm",
"langchain.llms.OpenAI",
"langchain.chat_models.ChatOpenAI",
"langchain_experimental.plan_and_execute.load_chat_planner",
"langchain_experimental.plan_and_execute.PlanAndExecute",
"langchain.agents.tools.Tool",
"langchain_experimental.plan_and_execute.load_agent_executor"
] | [((292, 313), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (298, 313), False, 'from langchain.llms import OpenAI\n'), ((331, 375), 'langchain.chains.LLMMathChain.from_llm', 'LLMMathChain.from_llm', ([], {'llm': 'llm', 'verbose': '(True)'}), '(llm=llm, verbose=True)\n', (352, 375), False, 'from langchain.chains import LLMMathChain\n'), ((553, 578), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (563, 578), False, 'from langchain.chat_models import ChatOpenAI\n'), ((590, 614), 'langchain_experimental.plan_and_execute.load_chat_planner', 'load_chat_planner', (['model'], {}), '(model)\n', (607, 614), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((626, 673), 'langchain_experimental.plan_and_execute.load_agent_executor', 'load_agent_executor', (['model', 'tools'], {'verbose': '(True)'}), '(model, tools, verbose=True)\n', (645, 673), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((682, 746), 'langchain_experimental.plan_and_execute.PlanAndExecute', 'PlanAndExecute', ([], {'planner': 'planner', 'executor': 'executor', 'verbose': '(True)'}), '(planner=planner, executor=executor, verbose=True)\n', (696, 746), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((390, 514), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Calculator"""', 'func': 'llm_math_chain.run', 'description': '"""useful for when you need to answer questions about math"""'}), "(name='Calculator', func=llm_math_chain.run, description=\n 'useful for when you need to answer questions about math')\n", (394, 514), False, 'from langchain.agents.tools import Tool\n')] |
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"langchain.chains.sql_database.base.SQLDatabaseChain",
"langchain.prompts.loading.load_prompt_from_config",
"langchain.chains.qa_with_sources.base.QAWithSourcesChain",
"langchain.chains.pal.base.PALChain",
"langchain.chains.combine_documents.refine.RefineDocumentsChain",
"langchain.chains.llm.LLMChain",
"langchain.chains.hyde.base.HypotheticalDocumentEmbedder",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.llm_checker.base.LLMCheckerChain",
"langchain.llms.loading.load_llm_from_config",
"langchain.chains.vector_db_qa.base.VectorDBQA",
"langchain.chains.llm_bash.base.LLMBashChain",
"langchain.utilities.loading.try_load_from_hub",
"langchain.chains.llm_requests.LLMRequestsChain",
"langchain.chains.api.base.APIChain",
"langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain",
"langchain.chains.llm_math.base.LLMMathChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain"
] | [((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')] |
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')] |
"""Clear Weaviate index."""
import logging
import os
import weaviate
from langchain.embeddings import OpenAIEmbeddings
from langchain.indexes import SQLRecordManager, index
from langchain.vectorstores import Weaviate
logger = logging.getLogger(__name__)
WEAVIATE_URL = os.environ["WEAVIATE_URL"]
WEAVIATE_API_KEY = os.environ["WEAVIATE_API_KEY"]
RECORD_MANAGER_DB_URL = os.environ["RECORD_MANAGER_DB_URL"]
WEAVIATE_DOCS_INDEX_NAME = "LangChain_Combined_Docs_OpenAI_text_embedding_3_small"
def clear():
client = weaviate.Client(
url=WEAVIATE_URL,
auth_client_secret=weaviate.AuthApiKey(api_key=WEAVIATE_API_KEY),
)
vectorstore = Weaviate(
client=client,
index_name=WEAVIATE_DOCS_INDEX_NAME,
text_key="text",
embedding=OpenAIEmbeddings(),
by_text=False,
attributes=["source", "title"],
)
record_manager = SQLRecordManager(
f"weaviate/{WEAVIATE_DOCS_INDEX_NAME}", db_url=RECORD_MANAGER_DB_URL
)
record_manager.create_schema()
indexing_stats = index(
[],
record_manager,
vectorstore,
cleanup="full",
source_id_key="source",
)
logger.info("Indexing stats: ", indexing_stats)
logger.info(
"LangChain now has this many vectors: ",
client.query.aggregate(WEAVIATE_DOCS_INDEX_NAME).with_meta_count().do(),
)
if __name__ == "__main__":
clear()
| [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.indexes.index",
"langchain.indexes.SQLRecordManager"
] | [((228, 255), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (245, 255), False, 'import logging\n'), ((893, 984), 'langchain.indexes.SQLRecordManager', 'SQLRecordManager', (['f"""weaviate/{WEAVIATE_DOCS_INDEX_NAME}"""'], {'db_url': 'RECORD_MANAGER_DB_URL'}), "(f'weaviate/{WEAVIATE_DOCS_INDEX_NAME}', db_url=\n RECORD_MANAGER_DB_URL)\n", (909, 984), False, 'from langchain.indexes import SQLRecordManager, index\n'), ((1051, 1129), 'langchain.indexes.index', 'index', (['[]', 'record_manager', 'vectorstore'], {'cleanup': '"""full"""', 'source_id_key': '"""source"""'}), "([], record_manager, vectorstore, cleanup='full', source_id_key='source')\n", (1056, 1129), False, 'from langchain.indexes import SQLRecordManager, index\n'), ((590, 635), 'weaviate.AuthApiKey', 'weaviate.AuthApiKey', ([], {'api_key': 'WEAVIATE_API_KEY'}), '(api_key=WEAVIATE_API_KEY)\n', (609, 635), False, 'import weaviate\n'), ((782, 800), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (798, 800), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
import sys
from arbol import aprint, asection
from langchain.chains import LLMChain
from langchain.llms import BaseLLM
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from napari_chatgpt.chat_server.callbacks.callbacks_arbol_stdout import \
ArbolCallbackHandler
from napari_chatgpt.utils.openai.default_model import \
get_default_openai_model_name
from napari_chatgpt.utils.python.installed_packages import \
installed_package_list
from napari_chatgpt.utils.strings.extract_code import extract_code_from_markdown
_add_comments_prompt = f"""
**Context:**
You are an expert Python coder specialised in documenting code, writing docstrings, and inferring type hints.
You can understand the purpose of complex code and write detailed explanations for it.
**Task:**
You are given a piece of Python code. Your task is to add explanatory comments, detailed docstrings and correct type hints to the code.
The answer should be the code with comments, docstrings and type hints added. The functionality of the code should remain the same. Make sure we have the right answer.
**Code Semantics:**
- Do not change the ideas, purpose, semantics, implementation details, nor calculations present in the code.
- Only add or amend comments, docstrings and type hints.
- Do not introduce new functions, methods, classes, types, or variables.
- Do not change the existing code structure, indentation, or formatting.
**Docstrings:**
- When adding dosctrings to methods and functions please use the NumPy/SciPy docstrings format.
- Docstrings should contain a summary, parameters, and return type.
- Use your extensive and expert knowledge in datascience, image processing and analysis to explain the purpose of functions or in the docstring summary.
- If the code does not define any function, that is OK, just add a general docstring at the top of the code.
**Comments:**
- Add comments to explain the purpose of the code, the purpose of the functions, the purpose of the variables, and the purpose of the different parts of the code.
- About existing comments: do not trust existing comments, use the code as the 'ground truth'.
- Existing comments and docstrings that explain the code might not actually correspond to the code.
- Make sure to understand the code so that your comments really explain and correspond to the code.
**Types:**
- Please use the correct type hints for function parameters and return types.
- If you are not sure about the type, please use the 'Any' type.
- Do not define types, only use built-in types or types from the typing module. Do not forget to add import statements for the types you use.
**Context:**
- The code is written against Python version: {sys.version.split()[0]}.
- Here is the list of installed packages: {'{installed_packages}'}.
**Code:**
```python
{'{code}'}
```
**Commented, documented, and type annotated code in markdown format:**
"""
def add_comments(code: str,
llm: BaseLLM = None,
model_name: str = None,
verbose: bool = False) -> str:
with(asection(
f'Automatically adds comments, docstrings and types for code of length: {len(code)}')):
try:
# Cleanup code:
code = code.strip()
# If code is empty, nothing to improve!
if len(code) == 0:
return ''
aprint(f'Input code:\n{code}')
# Instantiates LLM if needed:
llm = llm or ChatOpenAI(model_name=model_name or get_default_openai_model_name(),
temperature=0)
# Make prompt template:
prompt_template = PromptTemplate(template=_add_comments_prompt,
input_variables=["code", 'installed_packages'])
# Instantiate chain:
chain = LLMChain(
prompt=prompt_template,
llm=llm,
verbose=verbose,
callbacks=[ArbolCallbackHandler('Comment Code')]
)
# List of installed packages:
package_list = installed_package_list()
# Variable for prompt:
variables = {"code": code, 'installed_packages': ' '.join(package_list)}
# call LLM:
response = chain.invoke(variables)['text']
# Extract code from the response:
commented_code = extract_code_from_markdown(response)
# Cleanup:
commented_code = commented_code.strip()
return commented_code
except Exception as e:
import traceback
traceback.print_exc()
aprint(e)
return code
| [
"langchain_core.prompts.PromptTemplate"
] | [((2758, 2777), 'sys.version.split', 'sys.version.split', ([], {}), '()\n', (2775, 2777), False, 'import sys\n'), ((3414, 3447), 'arbol.aprint', 'aprint', (['f"""Input code:\n{code}"""'], {}), '(f"""Input code:\n{code}""")\n', (3420, 3447), False, 'from arbol import aprint, asection\n'), ((3700, 3797), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': '_add_comments_prompt', 'input_variables': "['code', 'installed_packages']"}), "(template=_add_comments_prompt, input_variables=['code',\n 'installed_packages'])\n", (3714, 3797), False, 'from langchain_core.prompts import PromptTemplate\n'), ((4150, 4174), 'napari_chatgpt.utils.python.installed_packages.installed_package_list', 'installed_package_list', ([], {}), '()\n', (4172, 4174), False, 'from napari_chatgpt.utils.python.installed_packages import installed_package_list\n'), ((4452, 4488), 'napari_chatgpt.utils.strings.extract_code.extract_code_from_markdown', 'extract_code_from_markdown', (['response'], {}), '(response)\n', (4478, 4488), False, 'from napari_chatgpt.utils.strings.extract_code import extract_code_from_markdown\n'), ((4673, 4694), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4692, 4694), False, 'import traceback\n'), ((4707, 4716), 'arbol.aprint', 'aprint', (['e'], {}), '(e)\n', (4713, 4716), False, 'from arbol import aprint, asection\n'), ((4028, 4064), 'napari_chatgpt.chat_server.callbacks.callbacks_arbol_stdout.ArbolCallbackHandler', 'ArbolCallbackHandler', (['"""Comment Code"""'], {}), "('Comment Code')\n", (4048, 4064), False, 'from napari_chatgpt.chat_server.callbacks.callbacks_arbol_stdout import ArbolCallbackHandler\n'), ((3549, 3580), 'napari_chatgpt.utils.openai.default_model.get_default_openai_model_name', 'get_default_openai_model_name', ([], {}), '()\n', (3578, 3580), False, 'from napari_chatgpt.utils.openai.default_model import get_default_openai_model_name\n')] |
import logging
logging.basicConfig(level=logging.CRITICAL)
import os
from pathlib import Path
import openai
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from llama_index import (
GPTVectorStoreIndex,
LLMPredictor,
ServiceContext,
StorageContext,
download_loader,
load_index_from_storage,
)
from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file
load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
history = []
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.618, model_name=models["gpt-3"], max_tokens=256))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size_limit=1024)
def make_index(file):
cls()
print("👀 Loading...")
PDFReader = download_loader("PDFReader")
loader = PDFReader()
documents = loader.load_data(file=Path(FILES) / file)
if os.path.exists(Path(CACHE) / file):
print("📚 Index found in cache")
return
else:
print("📚 Index not found in cache, creating it...")
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir=Path(CACHE) / file)
def chat(file_name, index):
while True:
prompt = input("\n😎 Prompt: ")
if prompt == "exit":
handle_exit()
elif prompt == "save":
handle_save(str(file_name), history)
query_engine = index.as_query_engine(response_mode="compact")
response = query_engine.query(prompt)
print("\n👻 Response: " + str(response))
history.append({"user": prompt, "response": str(response)})
def ask(file_name):
try:
print("👀 Loading...")
storage_context = StorageContext.from_defaults(persist_dir=Path(CACHE) / file_name)
index = load_index_from_storage(storage_context, service_context=service_context)
cls()
print("✅ Ready! Let's start the conversation")
print("ℹ️ Press Ctrl+C to exit")
chat(file_name, index)
except KeyboardInterrupt:
handle_exit()
if __name__ == "__main__":
initialize()
file = select_file()
if file:
file_name = Path(file).name
make_index(file_name)
ask(file_name)
else:
print("No files found")
handle_exit()
| [
"langchain.chat_models.ChatOpenAI"
] | [((16, 59), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.CRITICAL'}), '(level=logging.CRITICAL)\n', (35, 59), False, 'import logging\n'), ((444, 457), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (455, 457), False, 'from dotenv import load_dotenv\n'), ((644, 729), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size_limit': '(1024)'}), '(llm_predictor=llm_predictor, chunk_size_limit=1024\n )\n', (672, 729), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((753, 758), 'utils.cls', 'cls', ([], {}), '()\n', (756, 758), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((802, 830), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (817, 830), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((2171, 2183), 'utils.initialize', 'initialize', ([], {}), '()\n', (2181, 2183), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2195, 2208), 'utils.select_file', 'select_file', ([], {}), '()\n', (2206, 2208), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((551, 624), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.618)', 'model_name': "models['gpt-3']", 'max_tokens': '(256)'}), "(temperature=0.618, model_name=models['gpt-3'], max_tokens=256)\n", (561, 624), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1099, 1177), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1133, 1177), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((1871, 1944), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1894, 1944), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((1953, 1958), 'utils.cls', 'cls', ([], {}), '()\n', (1956, 1958), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2361, 2374), 'utils.handle_exit', 'handle_exit', ([], {}), '()\n', (2372, 2374), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((937, 948), 'pathlib.Path', 'Path', (['CACHE'], {}), '(CACHE)\n', (941, 948), False, 'from pathlib import Path\n'), ((1374, 1387), 'utils.handle_exit', 'handle_exit', ([], {}), '()\n', (1385, 1387), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2124, 2137), 'utils.handle_exit', 'handle_exit', ([], {}), '()\n', (2135, 2137), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2242, 2252), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (2246, 2252), False, 'from pathlib import Path\n'), ((894, 905), 'pathlib.Path', 'Path', (['FILES'], {}), '(FILES)\n', (898, 905), False, 'from pathlib import Path\n'), ((1228, 1239), 'pathlib.Path', 'Path', (['CACHE'], {}), '(CACHE)\n', (1232, 1239), False, 'from pathlib import Path\n'), ((1830, 1841), 'pathlib.Path', 'Path', (['CACHE'], {}), '(CACHE)\n', (1834, 1841), False, 'from pathlib import Path\n')] |
"""This is the logic for ingesting PDF and DOCX files into LangChain."""
import os
from pathlib import Path
from langchain.text_splitter import RecursiveCharacterTextSplitter
from pdfminer.high_level import extract_text
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from dotenv import load_dotenv
import faiss, pickle, docx2txt
load_dotenv()
OPENAI_API_TOKEN = os.getenv('OPENAI_API_TOKEN')
# Here we extract the text from your pdf files.
files = list(Path("docs/").glob("**/*.pdf"))
count = 0
for file in files:
count += 1
filename = "docs/" + "pdf" + str(count) + ".txt"
text = extract_text(file)
with open(filename, 'w') as f:
f.write(text)
# Here we extract the text from your docx files.
files = list(Path("docs/").glob("**/*.docx"))
count = 0
for file in files:
count += 1
filename = "docs/" + "docx" + str(count) + ".txt"
text = docx2txt.process(file)
with open(filename, 'w') as f:
f.write(text)
# Here we load in the data from the text files created above.
ps = list(Path("docs/").glob("**/*.txt"))
data = []
sources = []
for p in ps:
with open(p) as f:
data.append(f.read())
sources.append(p)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=500, length_function=len)
docs = []
metadatas = []
for i, d in enumerate(data):
splits = text_splitter.split_text(d)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Here we create a vector store from the documents and save it to disk.
store = FAISS.from_texts(docs, OpenAIEmbeddings(), metadatas=metadatas)
faiss.write_index(store.index, "stax_docs.index")
store.index = None
with open("stax_faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
| [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((374, 387), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (385, 387), False, 'from dotenv import load_dotenv\n'), ((408, 437), 'os.getenv', 'os.getenv', (['"""OPENAI_API_TOKEN"""'], {}), "('OPENAI_API_TOKEN')\n", (417, 437), False, 'import os\n'), ((1354, 1445), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(500)', 'length_function': 'len'}), '(chunk_size=1500, chunk_overlap=500,\n length_function=len)\n', (1384, 1445), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1770, 1819), 'faiss.write_index', 'faiss.write_index', (['store.index', '"""stax_docs.index"""'], {}), "(store.index, 'stax_docs.index')\n", (1787, 1819), False, 'import faiss, pickle, docx2txt\n'), ((641, 659), 'pdfminer.high_level.extract_text', 'extract_text', (['file'], {}), '(file)\n', (653, 659), False, 'from pdfminer.high_level import extract_text\n'), ((924, 946), 'docx2txt.process', 'docx2txt.process', (['file'], {}), '(file)\n', (940, 946), False, 'import faiss, pickle, docx2txt\n'), ((1729, 1747), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1745, 1747), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1889, 1910), 'pickle.dump', 'pickle.dump', (['store', 'f'], {}), '(store, f)\n', (1900, 1910), False, 'import faiss, pickle, docx2txt\n'), ((501, 514), 'pathlib.Path', 'Path', (['"""docs/"""'], {}), "('docs/')\n", (505, 514), False, 'from pathlib import Path\n'), ((782, 795), 'pathlib.Path', 'Path', (['"""docs/"""'], {}), "('docs/')\n", (786, 795), False, 'from pathlib import Path\n'), ((1078, 1091), 'pathlib.Path', 'Path', (['"""docs/"""'], {}), "('docs/')\n", (1082, 1091), False, 'from pathlib import Path\n')] |
import os
from langchain.llms.bedrock import Bedrock
from langchain import PromptTemplate
def get_llm():
model_kwargs = {
"maxTokenCount": 1024,
"stopSequences": [],
"temperature": 0,
"topP": 0.9
}
llm = Bedrock(
# credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default)
region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default)
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary)
model_id="amazon.titan-tg1-large", #use the Anthropic Claude model
model_kwargs=model_kwargs) #configure the properties for Claude
return llm
def get_prompt(user_input, template):
prompt_template = PromptTemplate.from_template(template) #this will automatically identify the input variables for the template
prompt = prompt_template.format(user_input=user_input)
return prompt
def get_text_response(user_input, template): #text-to-text client function
llm = get_llm()
prompt = get_prompt(user_input, template)
return llm.predict(prompt) #return a response to the prompt
| [
"langchain.PromptTemplate.from_template"
] | [((844, 882), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (872, 882), False, 'from langchain import PromptTemplate\n'), ((437, 470), 'os.environ.get', 'os.environ.get', (['"""BWB_REGION_NAME"""'], {}), "('BWB_REGION_NAME')\n", (451, 470), False, 'import os\n'), ((536, 570), 'os.environ.get', 'os.environ.get', (['"""BWB_ENDPOINT_URL"""'], {}), "('BWB_ENDPOINT_URL')\n", (550, 570), False, 'import os\n')] |
from langchain import PromptTemplate, LLMChain
from langchain.document_loaders import TextLoader
from langchain.embeddings import LlamaCppEmbeddings
from langchain.llms import GPT4All
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.callbacks.base import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores.faiss import FAISS
# SCRIPT INFO:
#
# This script allows you to create a vectorstore from a file and query it with a question (hard coded).
#
# It shows how you could send questions to a GPT4All custom knowledge base and receive answers.
#
# If you want a chat style interface using a similar custom knowledge base, you can use the custom_chatbot.py script provided.
# Setup
gpt4all_path = './models/gpt4all-converted.bin'
llama_path = './models/ggml-model-q4_0.bin'
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
loader = TextLoader('./docs/shortened_sotu.txt')
embeddings = LlamaCppEmbeddings(model_path=llama_path)
llm = GPT4All(model=gpt4all_path, callback_manager=callback_manager, verbose=True)
# Split text
def split_chunks(sources):
chunks = []
splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=32)
for chunk in splitter.split_documents(sources):
chunks.append(chunk)
return chunks
def create_index(chunks):
texts = [doc.page_content for doc in chunks]
metadatas = [doc.metadata for doc in chunks]
search_index = FAISS.from_texts(texts, embeddings, metadatas=metadatas)
return search_index
def similarity_search(query, index):
matched_docs = index.similarity_search(query, k=4)
sources = []
for doc in matched_docs:
sources.append(
{
"page_content": doc.page_content,
"metadata": doc.metadata,
}
)
return matched_docs, sources
# Create Index
# docs = loader.load()
# chunks = split_chunks(docs)
# index = create_index(chunks)
# Save Index (use this to save the index for later use)
# Comment the line below after running once successfully (IMPORTANT)
# index.save_local("state_of_the_union_index")
# Load Index (use this to load the index from a file, eg on your second time running things and beyond)
# Uncomment the line below after running once successfully (IMPORTANT)
index = FAISS.load_local("./full_sotu_index", embeddings)
# Set your query here manually
question = "Summarize the comments about NATO and its purpose."
matched_docs, sources = similarity_search(question, index)
template = """
Please use the following context to answer questions.
Context: {context}
---
Question: {question}
Answer: Let's think step by step."""
context = "\n".join([doc.page_content for doc in matched_docs])
prompt = PromptTemplate(template=template, input_variables=["context", "question"]).partial(context=context)
llm_chain = LLMChain(prompt=prompt, llm=llm)
print(llm_chain.run(question)) | [
"langchain.llms.GPT4All",
"langchain.LLMChain",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.document_loaders.TextLoader",
"langchain.vectorstores.faiss.FAISS.load_local",
"langchain.vectorstores.faiss.FAISS.from_texts",
"langchain.embeddings.LlamaCppEmbeddings",
"langchain.PromptTemplate"
] | [((968, 1007), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""./docs/shortened_sotu.txt"""'], {}), "('./docs/shortened_sotu.txt')\n", (978, 1007), False, 'from langchain.document_loaders import TextLoader\n'), ((1021, 1062), 'langchain.embeddings.LlamaCppEmbeddings', 'LlamaCppEmbeddings', ([], {'model_path': 'llama_path'}), '(model_path=llama_path)\n', (1039, 1062), False, 'from langchain.embeddings import LlamaCppEmbeddings\n'), ((1069, 1145), 'langchain.llms.GPT4All', 'GPT4All', ([], {'model': 'gpt4all_path', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), '(model=gpt4all_path, callback_manager=callback_manager, verbose=True)\n', (1076, 1145), False, 'from langchain.llms import GPT4All\n'), ((2399, 2448), 'langchain.vectorstores.faiss.FAISS.load_local', 'FAISS.load_local', (['"""./full_sotu_index"""', 'embeddings'], {}), "('./full_sotu_index', embeddings)\n", (2415, 2448), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((2941, 2973), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (2949, 2973), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1219, 1283), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(32)'}), '(chunk_size=256, chunk_overlap=32)\n', (1249, 1283), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1529, 1585), 'langchain.vectorstores.faiss.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {'metadatas': 'metadatas'}), '(texts, embeddings, metadatas=metadatas)\n', (1545, 1585), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((924, 956), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (954, 956), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((2829, 2903), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (2843, 2903), False, 'from langchain import PromptTemplate, LLMChain\n')] |
import os
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
class Database:
def __init__(self, directory):
self.embeddings = OpenAIEmbeddings()
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
self.directory = directory
self.files = os.listdir(self.directory)
def list_files(self):
if len(self.files) == 0:
return None
return self.files
def save_or_add_to_transcripts(self, name, transcript):
persist_directory = os.path.join(self.directory, name)
if not os.path.exists(persist_directory):
os.makedirs(persist_directory)
transcript_file = os.path.join(persist_directory, "transcript.txt")
with open(transcript_file, 'a') as f:
f.write(transcript + "\n\n")
def load_db(self, name):
persist_directory = os.path.join(self.directory, name)
transcript_file = os.path.join(persist_directory, "transcript.txt")
with open(transcript_file, 'r') as f:
transcript = f.read()
split_docs = self.text_splitter.split_text(transcript)
db = Chroma.from_texts(texts=split_docs, embedding=self.embeddings)
return db
| [
"langchain.vectorstores.Chroma.from_texts",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((251, 269), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (267, 269), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((299, 363), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(50)'}), '(chunk_size=500, chunk_overlap=50)\n', (329, 363), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((420, 446), 'os.listdir', 'os.listdir', (['self.directory'], {}), '(self.directory)\n', (430, 446), False, 'import os\n'), ((646, 680), 'os.path.join', 'os.path.join', (['self.directory', 'name'], {}), '(self.directory, name)\n', (658, 680), False, 'import os\n'), ((800, 849), 'os.path.join', 'os.path.join', (['persist_directory', '"""transcript.txt"""'], {}), "(persist_directory, 'transcript.txt')\n", (812, 849), False, 'import os\n'), ((996, 1030), 'os.path.join', 'os.path.join', (['self.directory', 'name'], {}), '(self.directory, name)\n', (1008, 1030), False, 'import os\n'), ((1057, 1106), 'os.path.join', 'os.path.join', (['persist_directory', '"""transcript.txt"""'], {}), "(persist_directory, 'transcript.txt')\n", (1069, 1106), False, 'import os\n'), ((1263, 1325), 'langchain.vectorstores.Chroma.from_texts', 'Chroma.from_texts', ([], {'texts': 'split_docs', 'embedding': 'self.embeddings'}), '(texts=split_docs, embedding=self.embeddings)\n', (1280, 1325), False, 'from langchain.vectorstores import Chroma\n'), ((696, 729), 'os.path.exists', 'os.path.exists', (['persist_directory'], {}), '(persist_directory)\n', (710, 729), False, 'import os\n'), ((743, 773), 'os.makedirs', 'os.makedirs', (['persist_directory'], {}), '(persist_directory)\n', (754, 773), False, 'import os\n')] |
import os
import re
import time
from typing import Any
from dotenv import load_dotenv
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import LLMResult
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
CHAT_UPDATE_INTERVAL_SEC = 1
load_dotenv()
# ボットトークンを使ってアプリを初期化します
app = App(
signing_secret=os.environ["SLACK_SIGNING_SECRET"],
token=os.environ["SLACK_BOT_TOKEN"],
process_before_response=True,
)
class SlackStreamingCallbackHandler(BaseCallbackHandler):
last_send_time = time.time()
message = ""
def __init__(self, channel, ts):
self.channel = channel
self.ts = ts
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.message += token
now = time.time()
if now - self.last_send_time > CHAT_UPDATE_INTERVAL_SEC:
self.last_send_time = now
app.client.chat_update(
channel=self.channel, ts=self.ts, text=f"{self.message}..."
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
app.client.chat_update(channel=self.channel, ts=self.ts, text=self.message)
@app.event("app_mention")
def handle_mention(event, say):
channel = event["channel"]
thread_ts = event["ts"]
message = re.sub("<@.*>", "", event["text"])
result = say("\n\nTyping...", thread_ts=thread_ts)
ts = result["ts"]
callback = SlackStreamingCallbackHandler(channel=channel, ts=ts)
llm = ChatOpenAI(
model_name=os.environ["OPENAI_API_MODEL"],
temperature=os.environ["OPENAI_API_TEMPERATURE"],
streaming=True,
callbacks=[callback],
)
llm.predict(message)
# ソケットモードハンドラーを使ってアプリを起動します
if __name__ == "__main__":
SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start()
| [
"langchain.chat_models.ChatOpenAI"
] | [((347, 360), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (358, 360), False, 'from dotenv import load_dotenv\n'), ((392, 518), 'slack_bolt.App', 'App', ([], {'signing_secret': "os.environ['SLACK_SIGNING_SECRET']", 'token': "os.environ['SLACK_BOT_TOKEN']", 'process_before_response': '(True)'}), "(signing_secret=os.environ['SLACK_SIGNING_SECRET'], token=os.environ[\n 'SLACK_BOT_TOKEN'], process_before_response=True)\n", (395, 518), False, 'from slack_bolt import App\n'), ((610, 621), 'time.time', 'time.time', ([], {}), '()\n', (619, 621), False, 'import time\n'), ((1365, 1399), 're.sub', 're.sub', (['"""<@.*>"""', '""""""', "event['text']"], {}), "('<@.*>', '', event['text'])\n", (1371, 1399), False, 'import re\n'), ((1558, 1704), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': "os.environ['OPENAI_API_MODEL']", 'temperature': "os.environ['OPENAI_API_TEMPERATURE']", 'streaming': '(True)', 'callbacks': '[callback]'}), "(model_name=os.environ['OPENAI_API_MODEL'], temperature=os.\n environ['OPENAI_API_TEMPERATURE'], streaming=True, callbacks=[callback])\n", (1568, 1704), False, 'from langchain.chat_models import ChatOpenAI\n'), ((837, 848), 'time.time', 'time.time', ([], {}), '()\n', (846, 848), False, 'import time\n'), ((1826, 1879), 'slack_bolt.adapter.socket_mode.SocketModeHandler', 'SocketModeHandler', (['app', "os.environ['SLACK_APP_TOKEN']"], {}), "(app, os.environ['SLACK_APP_TOKEN'])\n", (1843, 1879), False, 'from slack_bolt.adapter.socket_mode import SocketModeHandler\n')] |
import json
from typing import Optional, Any
from langchain.schema import AIMessage
from langchain.schema.runnable import RunnableSerializable, RunnableConfig
from pydantic import BaseModel
class FunctionCall(BaseModel):
name: str
arguments: dict[str, Any]
class ParseFunctionCall(RunnableSerializable[AIMessage, FunctionCall]):
"""
Parse OpenAI function call from an AI Message. Used in conjunction with
functions bound to the LLM request.
"""
def invoke(
self,
input: AIMessage,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> FunctionCall:
if isinstance(input, dict):
input = AIMessage(**input)
additional_kwargs = input.additional_kwargs
if "function_call" in additional_kwargs:
function_call = additional_kwargs["function_call"]
if isinstance(function_call, str):
function_call = json.loads(function_call)
if isinstance(function_call["arguments"], str):
function_call["arguments"] = json.loads(function_call["arguments"])
return FunctionCall(**function_call)
else:
raise ValueError("No function call found in input.")
async def ainvoke(
self,
input: AIMessage,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> FunctionCall:
if isinstance(input, dict):
input = AIMessage(**input)
additional_kwargs = input.additional_kwargs
if "function_call" in additional_kwargs:
function_call = additional_kwargs["function_call"]
if isinstance(function_call, str):
function_call = json.loads(function_call)
if isinstance(function_call["arguments"], str):
function_call["arguments"] = json.loads(function_call["arguments"])
return FunctionCall(**function_call)
else:
raise ValueError("No function call found in input.")
| [
"langchain.schema.AIMessage"
] | [((682, 700), 'langchain.schema.AIMessage', 'AIMessage', ([], {}), '(**input)\n', (691, 700), False, 'from langchain.schema import AIMessage\n'), ((1460, 1478), 'langchain.schema.AIMessage', 'AIMessage', ([], {}), '(**input)\n', (1469, 1478), False, 'from langchain.schema import AIMessage\n'), ((945, 970), 'json.loads', 'json.loads', (['function_call'], {}), '(function_call)\n', (955, 970), False, 'import json\n'), ((1077, 1115), 'json.loads', 'json.loads', (["function_call['arguments']"], {}), "(function_call['arguments'])\n", (1087, 1115), False, 'import json\n'), ((1723, 1748), 'json.loads', 'json.loads', (['function_call'], {}), '(function_call)\n', (1733, 1748), False, 'import json\n'), ((1855, 1893), 'json.loads', 'json.loads', (["function_call['arguments']"], {}), "(function_call['arguments'])\n", (1865, 1893), False, 'import json\n')] |
from langchain.chains.router import MultiPromptChain
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import os
# A template for working with LangChain multi prompt chain.
# It's a great way to let the large language model choose which prompts suits the question.
# Load env files
load_dotenv()
openai_api_key = os.environ.get('openai_api_key')
# Create the templates
marketing_template = """
You are a skilled marketing professional.
You have a deep understanding of market analysis, consumer behavior, branding, and digital marketing strategies.
You can provide insightful recommendations and creative solutions to address various marketing-related questions.
Here is a marketing-related question:
{input}"""
business_template = """
You are an experienced business expert.
You possess knowledge in areas such as business strategy, entrepreneurship, market research, and financial analysis.
You can provide practical insights and strategic advice to address various business-related questions.
Here is a business-related question:
{input}"""
# Create prompt info
prompt_infos = [
{
"name": "marketing",
"description": "Good for answering marketing questions",
"prompt_template": marketing_template
},
{
"name": "business",
"description": "Good for answering business-related questions",
"prompt_template": business_template
}
]
# Create the chain
llm = ChatOpenAI(openai_api_key=openai_api_key, model_name="gpt-3.5-turbo", temperature=0.3)
chain = MultiPromptChain.from_prompts(llm=llm, prompt_infos=prompt_infos, verbose=True)
# Example usage
question = "What is the best way to finance a startup?"
response = chain.run(question) | [
"langchain.chains.router.MultiPromptChain.from_prompts",
"langchain.chat_models.ChatOpenAI"
] | [((310, 323), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (321, 323), False, 'from dotenv import load_dotenv\n'), ((341, 373), 'os.environ.get', 'os.environ.get', (['"""openai_api_key"""'], {}), "('openai_api_key')\n", (355, 373), False, 'import os\n'), ((1461, 1551), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'openai_api_key', 'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.3)'}), "(openai_api_key=openai_api_key, model_name='gpt-3.5-turbo',\n temperature=0.3)\n", (1471, 1551), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1556, 1635), 'langchain.chains.router.MultiPromptChain.from_prompts', 'MultiPromptChain.from_prompts', ([], {'llm': 'llm', 'prompt_infos': 'prompt_infos', 'verbose': '(True)'}), '(llm=llm, prompt_infos=prompt_infos, verbose=True)\n', (1585, 1635), False, 'from langchain.chains.router import MultiPromptChain\n')] |
from dotenv import load_dotenv
from src.slackbot import SlackBot
from src.handlers import create_handlers
import asyncio
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
handler = StreamingStdOutCallbackHandler()
# Load environment variables
load_dotenv()
# Load custom tools
import src.custom_tools as custom_tools
tools = [custom_tools.disk_usage, custom_tools.memory_usage,
custom_tools.asyncArxivQueryRun(max_workers=4),
custom_tools.asyncDuckDuckGoSearchRun(max_workers=4),
custom_tools.WebPilot(max_workers=4)]
# You can load more tools using load_tools
# from langchain.agents import load_tools
# tools.extend(load_tools(['ddg-search', 'arxiv', 'requests_all']))
# Create SlackBot instance
bot = SlackBot(name='SlackBot', verbose=True,
max_tokens=500, model_type='openai',
chunk_size=500, # Chunk size for splitter
chunk_overlap=50, # Chunk overlap for splitter
k_similarity=5, # Numbers of chunks to return in retriever
log_filename='_slackbot.log',
tools=tools,
)
## LLM configuration
if bot.model_type == 'llama':
config = dict(gpu_layers=32, temperature=0.8, batch_size=1024,
context_length=2048, threads=6, stream=True, max_new_tokens=bot.max_tokens)
else:
config = dict(model_name="gpt-3.5-turbo-16k", temperature=0.8,
streaming=True, max_tokens=bot.max_tokens)
# Initialize LLM and embeddings
bot.app.logger.info("Initializing LLM and embeddings...")
bot.initialize_llm(max_tokens_threads=4000, config=config, callbacks=[handler])
bot.initialize_embeddings()
# Create handlers for commands /ask, /modify_bot, /bot_info and bot mentions
create_handlers(bot)
### You can create new handlers for other commands as follow
# @bot.app.command("/foo")
# async def handle_foo(say, respond, ack, command):
# await ack()
# # do something..
# Load bot in async mode
async def start():
await bot.start()
if __name__ == "__main__":
logger = bot.app.logger
try:
asyncio.run(start())
logger.info('App started.')
except KeyboardInterrupt:
logger.info('App stopped by user.')
except Exception as e:
logger.info('App stopped due to error.')
logger.error(str(e))
finally:
logger.info('App stopped.') | [
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler"
] | [((211, 243), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (241, 243), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((273, 286), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (284, 286), False, 'from dotenv import load_dotenv\n'), ((765, 943), 'src.slackbot.SlackBot', 'SlackBot', ([], {'name': '"""SlackBot"""', 'verbose': '(True)', 'max_tokens': '(500)', 'model_type': '"""openai"""', 'chunk_size': '(500)', 'chunk_overlap': '(50)', 'k_similarity': '(5)', 'log_filename': '"""_slackbot.log"""', 'tools': 'tools'}), "(name='SlackBot', verbose=True, max_tokens=500, model_type='openai',\n chunk_size=500, chunk_overlap=50, k_similarity=5, log_filename=\n '_slackbot.log', tools=tools)\n", (773, 943), False, 'from src.slackbot import SlackBot\n'), ((1764, 1784), 'src.handlers.create_handlers', 'create_handlers', (['bot'], {}), '(bot)\n', (1779, 1784), False, 'from src.handlers import create_handlers\n'), ((419, 465), 'src.custom_tools.asyncArxivQueryRun', 'custom_tools.asyncArxivQueryRun', ([], {'max_workers': '(4)'}), '(max_workers=4)\n', (450, 465), True, 'import src.custom_tools as custom_tools\n'), ((476, 528), 'src.custom_tools.asyncDuckDuckGoSearchRun', 'custom_tools.asyncDuckDuckGoSearchRun', ([], {'max_workers': '(4)'}), '(max_workers=4)\n', (513, 528), True, 'import src.custom_tools as custom_tools\n'), ((539, 575), 'src.custom_tools.WebPilot', 'custom_tools.WebPilot', ([], {'max_workers': '(4)'}), '(max_workers=4)\n', (560, 575), True, 'import src.custom_tools as custom_tools\n')] |
import os
import requests
from langchain.tools import tool
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from sec_api import QueryApi
from unstructured.partition.html import partition_html
class SECTools():
@tool("Search 10-Q form")
def search_10q(data):
"""
Useful to search information from the latest 10-Q form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested and what
question you have from it.
For example, `AAPL|what was last quarter's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-Q\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
@tool("Search 10-K form")
def search_10k(data):
"""
Useful to search information from the latest 10-K form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested, what
question you have from it.
For example, `AAPL|what was last year's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-K\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
def __embedding_search(url, ask):
text = SECTools.__download_form_html(url)
elements = partition_html(text=text)
content = "\n".join([str(el) for el in elements])
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 150,
length_function = len,
is_separator_regex = False,
)
docs = text_splitter.create_documents([content])
retriever = FAISS.from_documents(
docs, OpenAIEmbeddings()
).as_retriever()
answers = retriever.get_relevant_documents(ask, top_k=4)
answers = "\n\n".join([a.page_content for a in answers])
return answers
def __download_form_html(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
'Cache-Control': 'max-age=0',
'Dnt': '1',
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"macOS"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
response = requests.get(url, headers=headers)
return response.text
| [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.tools.tool"
] | [((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
#!/usr/bin/env python
"""A more complex example that shows how to configure index name at run time."""
from typing import Any, Iterable, List, Optional, Type
from fastapi import FastAPI
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.retriever import BaseRetriever
from langchain.schema.runnable import (
ConfigurableFieldSingleOption,
RunnableConfig,
RunnableSerializable,
)
from langchain.schema.vectorstore import VST
from langchain.vectorstores import FAISS, VectorStore
from langserve import add_routes
from langserve.pydantic_v1 import BaseModel, Field
vectorstore1 = FAISS.from_texts(
["cats like fish", "dogs like sticks"], embedding=OpenAIEmbeddings()
)
vectorstore2 = FAISS.from_texts(["x_n+1=a * xn * (1-xn)"], embedding=OpenAIEmbeddings())
app = FastAPI(
title="LangChain Server",
version="1.0",
description="Spin up a simple api server using Langchain's Runnable interfaces",
)
class UnderlyingVectorStore(VectorStore):
"""This is a fake vectorstore for demo purposes."""
def __init__(self, collection_name: str) -> None:
"""Fake vectorstore that has a collection name."""
self.collection_name = collection_name
def as_retriever(self) -> BaseRetriever:
if self.collection_name == "index1":
return vectorstore1.as_retriever()
elif self.collection_name == "index2":
return vectorstore2.as_retriever()
else:
raise NotImplementedError(
f"No retriever for collection {self.collection_name}"
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
raise NotImplementedError()
@classmethod
def from_texts(
cls: Type[VST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> VST:
raise NotImplementedError()
def similarity_search(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
raise NotImplementedError()
class ConfigurableRetriever(RunnableSerializable[str, List[Document]]):
"""Create a custom retriever that can be configured by the user.
This is an example of how to create a custom runnable that can be configured
to use a different collection name at run time.
Configuration involves instantiating a VectorStore with a collection name.
at run time, so the underlying vectorstore should be *cheap* to instantiate.
For example, it should not be making any network requests at instantiation time.
Make sure that the vectorstore you use meets this criteria.
"""
collection_name: str
def invoke(
self, input: str, config: Optional[RunnableConfig] = None
) -> List[Document]:
"""Invoke the retriever."""
vectorstore = UnderlyingVectorStore(self.collection_name)
retriever = vectorstore.as_retriever()
return retriever.invoke(input, config=config)
configurable_collection_name = ConfigurableRetriever(
collection_name="index1"
).configurable_fields(
collection_name=ConfigurableFieldSingleOption(
id="collection_name",
name="Collection Name",
description="The name of the collection to use for the retriever.",
options={
"Index 1": "index1",
"Index 2": "index2",
},
default="Index 1",
)
)
class Request(BaseModel):
__root__: str = Field(default="cat", description="Search query")
add_routes(app, configurable_collection_name.with_types(input_type=Request))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000)
| [
"langchain.schema.runnable.ConfigurableFieldSingleOption",
"langchain.embeddings.OpenAIEmbeddings"
] | [((893, 1027), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""LangChain Server"""', 'version': '"""1.0"""', 'description': '"""Spin up a simple api server using Langchain\'s Runnable interfaces"""'}), '(title=\'LangChain Server\', version=\'1.0\', description=\n "Spin up a simple api server using Langchain\'s Runnable interfaces")\n', (900, 1027), False, 'from fastapi import FastAPI\n'), ((3661, 3709), 'langserve.pydantic_v1.Field', 'Field', ([], {'default': '"""cat"""', 'description': '"""Search query"""'}), "(default='cat', description='Search query')\n", (3666, 3709), False, 'from langserve.pydantic_v1 import BaseModel, Field\n'), ((3841, 3886), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""localhost"""', 'port': '(8000)'}), "(app, host='localhost', port=8000)\n", (3852, 3886), False, 'import uvicorn\n'), ((774, 792), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (790, 792), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((865, 883), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (881, 883), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((3314, 3536), 'langchain.schema.runnable.ConfigurableFieldSingleOption', 'ConfigurableFieldSingleOption', ([], {'id': '"""collection_name"""', 'name': '"""Collection Name"""', 'description': '"""The name of the collection to use for the retriever."""', 'options': "{'Index 1': 'index1', 'Index 2': 'index2'}", 'default': '"""Index 1"""'}), "(id='collection_name', name='Collection Name',\n description='The name of the collection to use for the retriever.',\n options={'Index 1': 'index1', 'Index 2': 'index2'}, default='Index 1')\n", (3343, 3536), False, 'from langchain.schema.runnable import ConfigurableFieldSingleOption, RunnableConfig, RunnableSerializable\n')] |
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Otherwise, use this simpler format:
Question: ${{Question without hard calculation}}
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
print(37593 * 67)
```
```output
2518731
```
Answer: 2518731
Question: {question}
"""
PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
| [
"langchain.prompts.prompt.PromptTemplate"
] | [((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')] |
from itertools import chain
import pandas as pd
from datasets import Dataset
from joblib import Parallel, delayed
from langchain.text_splitter import RecursiveCharacterTextSplitter
from tqdm import tqdm
def sl_hf_dataset_for_tokenizer(
sl, sl_dataset_name, tokenizer, max_length, margin=192, min_length=7
):
"""
Create a HuggingFace dataset from a SpeakLeash dataset.
params: sl: SpeakLeash object
params: sl_dataset_name: SpeakLeash dataset name
params: tokenizer: HuggingFace tokenizer
params: max_length: maximum length of the tokenized text
params: margin: margin subtract from the max_length (this helps
to avoid RecursiveCharacterTextSplitter returning too many
too long chunks)
params: min_length: minimum length of the tokenized text
returns: HuggingFace dataset
"""
corpus = sl.get(sl_dataset_name)
# this is used only for length calculation
def token_len(text):
return len(tokenizer.encode(text, max_length=None, truncation=False))
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=max_length - margin,
chunk_overlap=0,
length_function=token_len,
)
dataset = Parallel(n_jobs=-1)(
delayed(text_splitter.split_text)(document)
for document in tqdm(corpus.data, total=corpus.manifest["stats"]["documents"])
)
dataset = list(chain.from_iterable(dataset))
df = pd.DataFrame(dataset, columns=["text"])
hf_dataset = Dataset.from_pandas(df)
hf_dataset = hf_dataset.map(
lambda examples: tokenizer(examples["text"], truncation=True, max_length=max_length),
batched=True,
)
# Filter out samples that have input_ids exceeding max_length
hf_dataset = hf_dataset.filter(
lambda sample: min_length <= len(sample["input_ids"]) < max_length
)
return hf_dataset
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((1048, 1158), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(max_length - margin)', 'chunk_overlap': '(0)', 'length_function': 'token_len'}), '(chunk_size=max_length - margin,\n chunk_overlap=0, length_function=token_len)\n', (1078, 1158), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1426, 1465), 'pandas.DataFrame', 'pd.DataFrame', (['dataset'], {'columns': "['text']"}), "(dataset, columns=['text'])\n", (1438, 1465), True, 'import pandas as pd\n'), ((1483, 1506), 'datasets.Dataset.from_pandas', 'Dataset.from_pandas', (['df'], {}), '(df)\n', (1502, 1506), False, 'from datasets import Dataset\n'), ((1201, 1220), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1209, 1220), False, 'from joblib import Parallel, delayed\n'), ((1386, 1414), 'itertools.chain.from_iterable', 'chain.from_iterable', (['dataset'], {}), '(dataset)\n', (1405, 1414), False, 'from itertools import chain\n'), ((1230, 1263), 'joblib.delayed', 'delayed', (['text_splitter.split_text'], {}), '(text_splitter.split_text)\n', (1237, 1263), False, 'from joblib import Parallel, delayed\n'), ((1298, 1360), 'tqdm.tqdm', 'tqdm', (['corpus.data'], {'total': "corpus.manifest['stats']['documents']"}), "(corpus.data, total=corpus.manifest['stats']['documents'])\n", (1302, 1360), False, 'from tqdm import tqdm\n')] |
from contextlib import contextmanager
import uuid
import os
import tiktoken
from . import S2_tools as scholar
import csv
import sys
import requests
# pdf loader
from langchain.document_loaders import OnlinePDFLoader
## paper questioning tools
from llama_index import Document
from llama_index.vector_stores import PineconeVectorStore
from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
def PaperSearchAndDownload(query):
# make new workspace
if not os.path.exists( os.path.join(os.getcwd(),'workspaces') ): os.mkdir(os.path.join(os.getcwd(),'workspaces'))
workspace_dir_name = os.path.join(os.getcwd(),'workspaces',query.split()[0] + '_'+ str(uuid.uuid4().hex))
os.mkdir(workspace_dir_name)
os.mkdir(os.path.join(workspace_dir_name,'results'))
os.mkdir(os.path.join(workspace_dir_name,'refy_suggestions'))
os.environ['workspace'] = workspace_dir_name
# 1) search papers
print(' 1) Searching base papers')
papers = scholar.find_paper_from_query(query, result_limit=10)
if len(papers == 0):
papers = scholar.find_paper_from_query(query, result_limit=50)
scholar.update_dataframe(incomplete=papers, dest=os.path.join(workspace_dir_name, 'results','papers.csv'))
delete_duplicates_from_csv(csv_file=os.path.join(workspace_dir_name, 'results','papers.csv'))
# 2) Cross-reference reccomendation system:
# a paper is reccomended if and only if it's related to more than one paper
print('\n\n 2) Expanding with Scholar reccomendations')
counts = {}
candidates = {}
for paper in papers:
guesses = scholar.find_recommendations(paper)
for guess in guesses:
if not guess['isOpenAccess']: continue
candidates[guess['title']] = guess
if guess['title'] not in counts.keys(): counts[guess['title']] = 1
else: counts[guess['title']] += 1
# reccomend only papers that appeared more than once
reccomends = []
for key in counts:
if counts[key]>1: reccomends.append(candidates[key])
print(f'found {len(reccomends)} additional papers')
# update the csv
scholar.update_dataframe(incomplete= reccomends, dest=os.path.join(workspace_dir_name, 'results','papers.csv'))
delete_duplicates_from_csv(csv_file=os.path.join(workspace_dir_name, 'results','papers.csv'))
# download the papers (1/2)
print('downloading papers (1/2)')
with open(os.path.join(workspace_dir_name,'results','papers.csv'), 'r',encoding='utf-8') as fp:
csvfile = csv.DictReader(fp)
scholar.download_pdf_from_id(" ".join( row['paperId'] for row in csvfile), workspace_dir_name)
scholar.write_bib_file(csv_file=os.path.join(workspace_dir_name,'results','papers.csv'), bib_file=os.path.join(workspace_dir_name,'results','papers.bib'))
# expand further with refy reccomendendation system
print('\n\n 3) Expanding with Refy reccomendendation system')
print('this might take a while...')
scholar.refy_reccomend(bib_path=os.path.join(workspace_dir_name,'results','papers.bib'))
with open(os.path.join(workspace_dir_name, 'refy_suggestions', 'test.csv'), 'r',encoding='utf-8') as fp:
csvfile = csv.DictReader(fp)
for row in csvfile:
title = scholar.replace_non_alphanumeric(row['title'])
title = title.replace(" ","_")
save_path = os.path.join(workspace_dir_name,'refy_suggestions',(title+'.pdf'))
try:
download_paper(url=row['url'], save_path=save_path)
except:
print(f'couldn t download {row}')
return f'{os.path.join(os.getcwd(), workspace_dir_name)}'
import urllib
def download_paper(url, save_path=f"{uuid.uuid4().hex}.pdf"):
success_string = f"paper saved successfully at {os.path.join(os.path.abspath(save_path))}"
if url.endswith('.pdf'):
urllib.request.urlretrieve(url, save_path)
return success_string
if 'doi' in url:
doi = paper_id = "/".join(url.split("/")[-2:])
# Construct the Crossref API URL
print(doi)
doi_url = f"https://doi.org/{doi}"
# Send a GET request to the doi.org URL
response = requests.get(doi_url, allow_redirects=True)
# Check if the request was successful
if response.status_code == 200:
# Extract the final URL after redirection
url = response.url
if 'arxiv' in url:
# URL del paper su arXiv
# Ottieni l'ID del paper dall'URL
paper_id = url.split("/")[-1]
# Costruisci l'URL di download del paper
pdf_url = f"http://arxiv.org/pdf/{paper_id}.pdf"
# Scarica il paper in formato PDF
urllib.request.urlretrieve(pdf_url, save_path)
return success_string
else:
if '/full' in url:
urllib.request.urlretrieve(url.replace('/full','/pdf'))
return success_string
if 'plos.org' in url:
final_url = url.replace('article?', 'article/file?')
urllib.request.urlretrieve(final_url, save_path)
return success_string
return f'\nfailed to download {url}'
def download_bibtex_library(csv_path):
with open(csv_path, 'r',encoding='utf-8') as fp:
csvfile = csv.DictReader(fp)
for row in csvfile:
title = scholar.replace_non_alphanumeric(row['title'])
title = title.replace(" ","-")
save_path = os.path.join(os.path.join(csv_path, '..', title+'.pdf'))
try:
download_paper(url=row['url'], save_path=save_path)
except:
try:
download_paper(url=row['url']+'.pdf', save_path=save_path)
except:
print(f'couldn t download {row}')
def generate_chunks(text, CHUNK_LENGTH = 4000):
enc = tiktoken.encoding_for_model("gpt-4")
tokens = enc.encode(text)
token_chunks = [tokens[i:i + CHUNK_LENGTH] for i in range(0, len(tokens), CHUNK_LENGTH)]
word_chunks = [enc.decode(chunk) for chunk in token_chunks]
return word_chunks
from langchain.vectorstores import Chroma, Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
import langid
import time
# def process_pdf_folder(folder_path):
# if not os.path.exists(folder_path):
# return 'the folder does not exist, check your spelling'
# for item in os.listdir(folder_path):
# if not item.endswith('.pdf'):continue
# with open(os.path.join(folder_path,'SUMMARY.txt'), 'a', encoding='UTF-8') as write_file:
# write_file.write(item)
# write_file.write("\n\n\n")
# txt = summarize_pdf(item, model='Vicuna')
# try:
# write_file.write(txt)
# except:
# print(txt)
# with open(os.path.join(folder_path,'SUMMARY.txt'), 'r', encoding='UTF-8') as read_file:
# return read_file.read()
# # def summarize_pdf(pdf_path, model= None):
# text = readPDF(pdf_path)
# # according to the TLDR Model, consider smaller chunks
# text_chunks = generate_chunks(text, 700)
# if model is not None:
# summarizer = LocalSearchEngine(tldr_model=model)
# summary=''
# for chunk in text_chunks:
# summary += summarizer.tldr(chunk)
# return summary
def get_result_path(path, exclude = []):
for item in os.listdir(path):
if item == 'papers.csv':
return os.path.join(path, item)
if os.path.isdir(os.path.join(path, item)) and item not in exclude:
res = get_result_path(os.path.join(path, item))
if res: return res
return
def get_workspace_titles(workspace_name):
csv_file_path = get_result_path(workspace_name)
papers_available = []
with open(csv_file_path, 'r', encoding='utf-8') as file:
csv_file = csv.DictReader(file)
for row in csv_file:
papers_available.append(row['title'])
return papers_available
import re
def same_title(title1, title2):
try:
title1 = re.sub(r'[^a-zA-Z]', ' ', title1)
title2 = re.sub(r'[^a-zA-Z]', ' ', title2)
except:
return False
words1 = set(title1.lower().split())
words2 = set(title2.lower().split())
return words1 == words2 or words1 <= words2 or words1 >= words2
def glimpse_pdf(title):
# find papers.csv in workspace
for workspace_name in os.listdir('workspaces'):
csv_file_path = get_result_path(workspace_name)
if csv_file_path is None: return 'no paper found'
with open(csv_file_path, 'r', encoding='utf-8') as file:
csv_file = csv.DictReader(file)
for row in csv_file:
if same_title(row['title'], title): return f"{row['title']}, paperId: {row['paperId']}, summary: {row['abstract']}"
return f'\nno paper found with title {title}'
def count_tokens(text):
enc = tiktoken.encoding_for_model("gpt-4")
tokens = enc.encode(text)
return len(tokens)
def readPDF(pdf_path):
loader = OnlinePDFLoader(pdf_path)
data = loader.load()
text_content = ''
for page in data:
formatted_content = page.page_content.replace('\n\n', ' ')
text_content+=formatted_content
return text_content
def get_pdf_path(dir, exclude=[]):
paths = []
for item in os.listdir(dir):
itempath = os.path.join(dir,item)
if item.endswith('.pdf'): paths.append(itempath)
if os.path.isdir(itempath)and item not in exclude:
subpaths = get_pdf_path(itempath)
for i in subpaths: paths.append(i)
return paths
def delete_duplicates_from_csv(csv_file):
print('verifying duplicates...')
to_delete = []
def delete_csv_row_by_title(csv_file, title):
# Read the CSV file and store rows in a list
with open(csv_file, 'r',encoding='UTF-8') as file:
reader = csv.DictReader(file)
rows = list(reader)
# Find the row index with the matching title
row_index = None
for index, row in enumerate(rows):
if row['title'] == title:
row_index = index
break
# If no matching title is found, return
if row_index is None:
print(f"No row with title '{title}' found.")
return
# Remove the row from the list
del rows[row_index]
# Write the updated rows back to the CSV file
with open(csv_file, 'w', newline='',encoding='UTF-8') as file:
fieldnames = reader.fieldnames
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(rows)
with open(csv_file, 'r', encoding='UTF-8') as file:
DELETED = 0
reader = csv.DictReader(file)
rows = list(reader)
entries = set()
for row in rows:
if row['title']=='' or row['title'] is None: continue
if row['title'] not in entries:entries.add(row['title'])
else:
DELETED+=1
to_delete.append(row['title'])
for title in to_delete: delete_csv_row_by_title(csv_file, title=title)
print(f"Deleted {DELETED} duplicates")
return
def update_workspace_dataframe(workspace, verbose = True):
ADDED = 0
# find results.csv
csv_path = get_result_path(workspace)
# get titles in csv
titles = get_workspace_titles(workspace)
# get local papers path
paths = get_pdf_path(workspace, exclude='refy_suggestions')
# adding new to csv:
for path in paths:
exists = False
# extract the title from the local paper
title = scholar.extract_title(path)
for t in titles:
if same_title(t,title): exists = True
# add it to dataframe if it was not found on the DF
if not exists:
if verbose: print(f"\nnew paper detected: {title}")
# find it with online
paper = scholar.find_paper_online(path)
if paper :
if verbose: print(f"\t---> best match found online: {paper['title']} " )
for t in titles:
if same_title(paper['title'], title):
if verbose: print(f"\t this paper is already present in the dataframe. skipping")
else:
if verbose: print(path, '-x-> no match found')
continue
with open(csv_path, 'a', encoding='utf-8') as fp:
areYouSure = True
for t in titles:
if same_title(t,paper['title']): areYouSure =False
if not areYouSure:
if verbose: print(f"double check revealed that the paper is already in the dataframe. Skipping")
continue
if verbose: print(f"\t---> adding {paper['title']}")
ADDED +=1
paper_authors = paper.get('authors', [])
journal_data = {}
if 'journal' in paper:
journal_data = paper.get('journal',[])
if journal_data is not None:
if 'name' not in journal_data: journal_data['name'] = ''
if 'pages' not in journal_data: journal_data['pages'] = ''
if paper.get('tldr',[]) != []:tldr = paper['tldr']['text']
elif paper.get('summary',[]) != []:tldr = paper['summary']
elif 'abstract' in paper:tldr = paper['abstract']
else: tldr = 'No summary available'
if 'year' in paper:
year = paper['year']
elif 'updated' in paper:year = paper['updated']
else: year = ''
if 'citationStyles' in paper:
if 'bibtex' in paper['citationStyles']: citStyle = paper['citationStyles']['bibtex']
else: citStyle = paper['citationStyles'][0]
else: citStyle = ''
csvfile = csv.DictWriter(fp, ['paperId', 'title', 'first_author', 'year', 'abstract','tldr','bibtex','influentialCitationCount','venue','journal','pages'])
try:
csvfile.writerow({
'title': paper['title'],
'first_author': paper_authors[0]['name'] if paper_authors else '',
'year': year,
'abstract': paper['abstract'] if 'abstract' in paper else '',
'paperId': paper['paperId'] if 'paperId' in paper else '',
'tldr':tldr,
'bibtex':citStyle,
'influentialCitationCount': paper['influentialCitationCount'] if 'influentialCitationCount' in paper else '0',
'venue':paper['venue'] if 'venue' in paper else '',
'journal':journal_data['name'] if journal_data is not None else '',
'pages':journal_data['pages'] if journal_data is not None else '',
})
except Exception as e:
if verbose: print('could not add ', title, '\n',e)
# delete dupes if present
if verbose: print(f"\n\nCSV UPDATE: Added {ADDED} new papers")
# clean form dupes
delete_duplicates_from_csv(csv_path)
# update bib
scholar.write_bib_file(csv_path)
return
def load_workspace(folderdir):
docs =[]
for item in os.listdir(folderdir):
if item.endswith('.pdf'):
print(f' > loading {item}')
with suppress_stdout():
content = readPDF(os.path.join(folderdir, item))
docs.append(Document(
text = content,
doc_id = uuid.uuid4().hex
))
if item =='.'or item =='..':continue
if os.path.isdir( os.path.join(folderdir,item) ):
sub_docs = load_workspace(os.path.join(folderdir,item))
for doc in sub_docs:
docs.append(doc)
return docs
# List paths of all pdf files in a folder
def list_workspace_elements(folderdir):
docs =[]
for item in os.listdir(folderdir):
if item.endswith('.pdf'):
docs.append(rf"{os.path.join(folderdir,item)}")
if item =='.'or item =='..':continue
if os.path.isdir( os.path.join(folderdir,item) ):
sub_docs = list_workspace_elements(os.path.join(folderdir,item))
for doc in sub_docs:
docs.append(doc)
return docs
def llama_query_engine(docs:list, pinecone_index_name:str):
pinecone.init(
api_key= os.environ['PINECONE_API_KEY'],
environment= os.environ['PINECONE_API_ENV']
)
# Find the pinecone index
if pinecone_index_name not in pinecone.list_indexes():
# we create a new index
pinecone.create_index(
name=pinecone_index_name,
metric='dotproduct',
dimension=1536 # 1536 dim of text-embedding-ada-002
)
index = pinecone.Index(pinecone_index_name)
# init it
vector_store = PineconeVectorStore(pinecone_index=index)
time.sleep(1)
# setup our storage (vector db)
storage_context = StorageContext.from_defaults(
vector_store=vector_store
)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
# populate the vector store
LamaIndex = GPTVectorStoreIndex.from_documents(
docs, storage_context=storage_context,
service_context=service_context
)
print('PINECONE Vector Index initialized:\n',index.describe_index_stats())
# init the query engine
query_engine = LamaIndex.as_query_engine()
return query_engine, LamaIndex
@contextmanager
def suppress_stdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout | [
"langchain.document_loaders.OnlinePDFLoader"
] | [((768, 796), 'os.mkdir', 'os.mkdir', (['workspace_dir_name'], {}), '(workspace_dir_name)\n', (776, 796), False, 'import os\n'), ((5950, 5986), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (5977, 5986), False, 'import tiktoken\n'), ((7532, 7548), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (7542, 7548), False, 'import os\n'), ((8565, 8589), 'os.listdir', 'os.listdir', (['"""workspaces"""'], {}), "('workspaces')\n", (8575, 8589), False, 'import os\n'), ((9082, 9118), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (9109, 9118), False, 'import tiktoken\n'), ((9209, 9234), 'langchain.document_loaders.OnlinePDFLoader', 'OnlinePDFLoader', (['pdf_path'], {}), '(pdf_path)\n', (9224, 9234), False, 'from langchain.document_loaders import OnlinePDFLoader\n'), ((9509, 9524), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (9519, 9524), False, 'import os\n'), ((15782, 15803), 'os.listdir', 'os.listdir', (['folderdir'], {}), '(folderdir)\n', (15792, 15803), False, 'import os\n'), ((16505, 16526), 'os.listdir', 'os.listdir', (['folderdir'], {}), '(folderdir)\n', (16515, 16526), False, 'import os\n'), ((16958, 17060), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINECONE_API_KEY']", 'environment': "os.environ['PINECONE_API_ENV']"}), "(api_key=os.environ['PINECONE_API_KEY'], environment=os.\n environ['PINECONE_API_ENV'])\n", (16971, 17060), False, 'import pinecone\n'), ((17396, 17431), 'pinecone.Index', 'pinecone.Index', (['pinecone_index_name'], {}), '(pinecone_index_name)\n', (17410, 17431), False, 'import pinecone\n'), ((17470, 17511), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'index'}), '(pinecone_index=index)\n', (17489, 17511), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((17516, 17529), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17526, 17529), False, 'import time\n'), ((17589, 17644), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (17617, 17644), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((17678, 17747), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-ada-002', embed_batch_size=100)\n", (17693, 17747), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((17770, 17823), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (17798, 17823), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((17878, 17988), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(docs, storage_context=storage_context,\n service_context=service_context)\n', (17912, 17988), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((692, 703), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (701, 703), False, 'import os\n'), ((810, 853), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""'], {}), "(workspace_dir_name, 'results')\n", (822, 853), False, 'import os\n'), ((867, 919), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""'], {}), "(workspace_dir_name, 'refy_suggestions')\n", (879, 919), False, 'import os\n'), ((2613, 2631), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (2627, 2631), False, 'import csv\n'), ((3285, 3303), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (3299, 3303), False, 'import csv\n'), ((3963, 4005), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'save_path'], {}), '(url, save_path)\n', (3989, 4005), False, 'import urllib\n'), ((4283, 4326), 'requests.get', 'requests.get', (['doi_url'], {'allow_redirects': '(True)'}), '(doi_url, allow_redirects=True)\n', (4295, 4326), False, 'import requests\n'), ((4795, 4841), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['pdf_url', 'save_path'], {}), '(pdf_url, save_path)\n', (4821, 4841), False, 'import urllib\n'), ((5368, 5386), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (5382, 5386), False, 'import csv\n'), ((8008, 8028), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (8022, 8028), False, 'import csv\n'), ((8205, 8237), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'title1'], {}), "('[^a-zA-Z]', ' ', title1)\n", (8211, 8237), False, 'import re\n'), ((8256, 8288), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'title2'], {}), "('[^a-zA-Z]', ' ', title2)\n", (8262, 8288), False, 'import re\n'), ((9545, 9568), 'os.path.join', 'os.path.join', (['dir', 'item'], {}), '(dir, item)\n', (9557, 9568), False, 'import os\n'), ((10975, 10995), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (10989, 10995), False, 'import csv\n'), ((17149, 17172), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (17170, 17172), False, 'import pinecone\n'), ((17214, 17302), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'pinecone_index_name', 'metric': '"""dotproduct"""', 'dimension': '(1536)'}), "(name=pinecone_index_name, metric='dotproduct',\n dimension=1536)\n", (17235, 17302), False, 'import pinecone\n'), ((1250, 1307), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (1262, 1307), False, 'import os\n'), ((1348, 1405), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (1360, 1405), False, 'import os\n'), ((2268, 2325), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2280, 2325), False, 'import os\n'), ((2366, 2423), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2378, 2423), False, 'import os\n'), ((2509, 2566), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2521, 2566), False, 'import os\n'), ((2778, 2835), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2790, 2835), False, 'import os\n'), ((2844, 2901), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.bib"""'], {}), "(workspace_dir_name, 'results', 'papers.bib')\n", (2856, 2901), False, 'import os\n'), ((3100, 3157), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.bib"""'], {}), "(workspace_dir_name, 'results', 'papers.bib')\n", (3112, 3157), False, 'import os\n'), ((3172, 3236), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""', '"""test.csv"""'], {}), "(workspace_dir_name, 'refy_suggestions', 'test.csv')\n", (3184, 3236), False, 'import os\n'), ((3468, 3536), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""', "(title + '.pdf')"], {}), "(workspace_dir_name, 'refy_suggestions', title + '.pdf')\n", (3480, 3536), False, 'import os\n'), ((5119, 5167), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['final_url', 'save_path'], {}), '(final_url, save_path)\n', (5145, 5167), False, 'import urllib\n'), ((7602, 7626), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7614, 7626), False, 'import os\n'), ((8802, 8822), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (8816, 8822), False, 'import csv\n'), ((9636, 9659), 'os.path.isdir', 'os.path.isdir', (['itempath'], {}), '(itempath)\n', (9649, 9659), False, 'import os\n'), ((10078, 10098), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (10092, 10098), False, 'import csv\n'), ((10760, 10803), 'csv.DictWriter', 'csv.DictWriter', (['file'], {'fieldnames': 'fieldnames'}), '(file, fieldnames=fieldnames)\n', (10774, 10803), False, 'import csv\n'), ((16201, 16230), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16213, 16230), False, 'import os\n'), ((16702, 16731), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16714, 16731), False, 'import os\n'), ((576, 587), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (585, 587), False, 'import os\n'), ((627, 638), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (636, 638), False, 'import os\n'), ((3718, 3729), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3727, 3729), False, 'import os\n'), ((3806, 3818), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3816, 3818), False, 'import uuid\n'), ((3896, 3922), 'os.path.abspath', 'os.path.abspath', (['save_path'], {}), '(save_path)\n', (3911, 3922), False, 'import os\n'), ((5564, 5608), 'os.path.join', 'os.path.join', (['csv_path', '""".."""', "(title + '.pdf')"], {}), "(csv_path, '..', title + '.pdf')\n", (5576, 5608), False, 'import os\n'), ((7652, 7676), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7664, 7676), False, 'import os\n'), ((7738, 7762), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7750, 7762), False, 'import os\n'), ((14294, 14449), 'csv.DictWriter', 'csv.DictWriter', (['fp', "['paperId', 'title', 'first_author', 'year', 'abstract', 'tldr', 'bibtex',\n 'influentialCitationCount', 'venue', 'journal', 'pages']"], {}), "(fp, ['paperId', 'title', 'first_author', 'year', 'abstract',\n 'tldr', 'bibtex', 'influentialCitationCount', 'venue', 'journal', 'pages'])\n", (14308, 14449), False, 'import csv\n'), ((16271, 16300), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16283, 16300), False, 'import os\n'), ((16781, 16810), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16793, 16810), False, 'import os\n'), ((745, 757), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (755, 757), False, 'import uuid\n'), ((15951, 15980), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (15963, 15980), False, 'import os\n'), ((16590, 16619), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16602, 16619), False, 'import os\n'), ((16085, 16097), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16095, 16097), False, 'import uuid\n')] |
import streamlit as st
from langchain.prompts import PromptTemplate
chat_template = PromptTemplate(
input_variables=['transcript','summary','chat_history','user_message', 'sentiment_report'],
template='''
You are an AI chatbot intended to discuss about the user's audio transcription.
\nTRANSCRIPT: "{transcript}"
\nTRANSCIRPT SUMMARY: "{summary}"
\nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"
\nCHAT HISTORY: {chat_history}
\nUSER MESSAGE: "{user_message}"
\nAI RESPONSE HERE:
'''
)
sentiment_prompt = PromptTemplate(
input_variables=['transcript','summary'],
template='''
Return a single word sentiment of either ['Positive','Negative' or 'Neutral'] from this transcript and summary.
After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.
\nTRANSCRIPT: {transcript}
\nTRANSCRIPT SUMMARY: {summary}
\nSENTIMENT LABEL HERE ('Positive','Negative', or 'Neutral') <comma-seperated> REPORT HERE:
'''
)
fact_check_prompt = '''
Fact-check this transcript for factual or logical inacurracies or inconsistencies
\nWrite a report on the factuality / logic of the transcirpt
\nTRANSCRIPT: {}
\nTRANSCRIPT SUMMARY: {}
\nAI FACT CHECK RESPONSE HERE:
''' | [
"langchain.prompts.PromptTemplate"
] | [((88, 562), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary', 'chat_history', 'user_message', 'sentiment_report']", 'template': '"""\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\', \'chat_history\',\n \'user_message\', \'sentiment_report\'], template=\n """\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """\n )\n', (102, 562), False, 'from langchain.prompts import PromptTemplate\n'), ((595, 1095), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary']", 'template': '"""\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\'], template=\n """\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """\n )\n', (609, 1095), False, 'from langchain.prompts import PromptTemplate\n')] |
from enum import Enum
from typing import Callable, Tuple
from langchain.agents.agent import AgentExecutor
from langchain.agents.tools import BaseTool, Tool
class ToolScope(Enum):
GLOBAL = "global"
SESSION = "session"
SessionGetter = Callable[[], Tuple[str, AgentExecutor]]
def tool(
name: str,
description: str,
scope: ToolScope = ToolScope.GLOBAL,
):
def decorator(func):
func.name = name
func.description = description
func.is_tool = True
func.scope = scope
return func
return decorator
class ToolWrapper:
def __init__(self, name: str, description: str, scope: ToolScope, func):
self.name = name
self.description = description
self.scope = scope
self.func = func
def is_global(self) -> bool:
return self.scope == ToolScope.GLOBAL
def is_per_session(self) -> bool:
return self.scope == ToolScope.SESSION
def to_tool(
self,
get_session: SessionGetter = lambda: [],
) -> BaseTool:
func = self.func
if self.is_per_session():
func = lambda *args, **kwargs: self.func(
*args, **kwargs, get_session=get_session
)
return Tool(
name=self.name,
description=self.description,
func=func,
)
class BaseToolSet:
def tool_wrappers(cls) -> list[ToolWrapper]:
methods = [
getattr(cls, m) for m in dir(cls) if hasattr(getattr(cls, m), "is_tool")
]
return [ToolWrapper(m.name, m.description, m.scope, m) for m in methods]
| [
"langchain.agents.tools.Tool"
] | [((1245, 1306), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'self.name', 'description': 'self.description', 'func': 'func'}), '(name=self.name, description=self.description, func=func)\n', (1249, 1306), False, 'from langchain.agents.tools import BaseTool, Tool\n')] |
import asyncio
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain import PromptTemplate
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.chains import RetrievalQA
import chainlit as cl
DB_FAISS_PATH = 'vectorstores/db_faiss'
custom_prompt_template = """Use the following pieces of information to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context: {context}
Question: {question}
Only return the helpful answer below and nothing else.
Helpful answer:
"""
def set_custom_prompt():
"""
Prompt template for QA retrieval for each vectorstore
"""
prompt = PromptTemplate(template=custom_prompt_template,
input_variables=['context', 'question'])
return prompt
# Retrieval QA Chain
def retrieval_qa_chain(llm, prompt, db):
qa_chain = RetrievalQA.from_chain_type(llm=llm,
chain_type='stuff',
retriever=db.as_retriever(search_kwargs={'k': 2}),
return_source_documents=True,
chain_type_kwargs={'prompt': prompt}
)
return qa_chain
# Loading the model
def load_llm():
# Load the locally downloaded model here
llm = CTransformers(
model="TheBloke/Llama-2-7B-Chat-GGML",
model_type="llama",
max_new_tokens=512,
temperature=0.5
)
return llm
# QA Model Function
async def qa_bot():
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
model_kwargs={'device': 'cpu'})
db = FAISS.load_local(DB_FAISS_PATH, embeddings)
llm = load_llm()
qa_prompt = set_custom_prompt()
qa = retrieval_qa_chain(llm, qa_prompt, db)
return qa
# Output function
async def final_result(query):
qa_result = await qa_bot()
response = await qa_result({'query': query})
return response
# chainlit code
@cl.on_chat_start
async def start():
chain = await qa_bot()
# msg = cl.Message(content="Starting the bot...")
# await msg.send()
# msg.content = "Hi, Welcome to Medical Bot. What is your query?"
# await msg.update()
cl.user_session.set("chain", chain)
@cl.on_message
async def main(message):
chain = cl.user_session.get("chain")
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
)
cb.answer_reached = True
res = await chain.acall(message.content, callbacks=[cb])
answer = res["result"]
sources = res["source_documents"]
if sources:
answer += f"\nSources:" + str(sources)
else:
answer += "\nNo sources found"
await cl.Message(content=answer).send()
if __name__ == "__main__":
asyncio.run(cl.main())
| [
"langchain.llms.CTransformers",
"langchain.embeddings.HuggingFaceEmbeddings",
"langchain.vectorstores.FAISS.load_local",
"langchain.PromptTemplate"
] | [((808, 900), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'custom_prompt_template', 'input_variables': "['context', 'question']"}), "(template=custom_prompt_template, input_variables=['context',\n 'question'])\n", (822, 900), False, 'from langchain import PromptTemplate\n'), ((1522, 1635), 'langchain.llms.CTransformers', 'CTransformers', ([], {'model': '"""TheBloke/Llama-2-7B-Chat-GGML"""', 'model_type': '"""llama"""', 'max_new_tokens': '(512)', 'temperature': '(0.5)'}), "(model='TheBloke/Llama-2-7B-Chat-GGML', model_type='llama',\n max_new_tokens=512, temperature=0.5)\n", (1535, 1635), False, 'from langchain.llms import CTransformers\n'), ((1753, 1863), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name='sentence-transformers/all-MiniLM-L6-v2',\n model_kwargs={'device': 'cpu'})\n", (1774, 1863), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1910, 1953), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['DB_FAISS_PATH', 'embeddings'], {}), '(DB_FAISS_PATH, embeddings)\n', (1926, 1953), False, 'from langchain.vectorstores import FAISS\n'), ((2504, 2539), 'chainlit.user_session.set', 'cl.user_session.set', (['"""chain"""', 'chain'], {}), "('chain', chain)\n", (2523, 2539), True, 'import chainlit as cl\n'), ((2597, 2625), 'chainlit.user_session.get', 'cl.user_session.get', (['"""chain"""'], {}), "('chain')\n", (2616, 2625), True, 'import chainlit as cl\n'), ((2636, 2740), 'chainlit.AsyncLangchainCallbackHandler', 'cl.AsyncLangchainCallbackHandler', ([], {'stream_final_answer': '(True)', 'answer_prefix_tokens': "['FINAL', 'ANSWER']"}), "(stream_final_answer=True,\n answer_prefix_tokens=['FINAL', 'ANSWER'])\n", (2668, 2740), True, 'import chainlit as cl\n'), ((3124, 3133), 'chainlit.main', 'cl.main', ([], {}), '()\n', (3131, 3133), True, 'import chainlit as cl\n'), ((3043, 3069), 'chainlit.Message', 'cl.Message', ([], {'content': 'answer'}), '(content=answer)\n', (3053, 3069), True, 'import chainlit as cl\n')] |
import tkinter as tk
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
import openai
import os
import sys
from dotenv import load_dotenv, set_key
from langchain.agents import Tool
from langchain.memory import ConversationTokenBufferMemory, ReadOnlySharedMemory
from langchain.chat_models import ChatOpenAI
from langchain.utilities import GoogleSearchAPIWrapper, WikipediaAPIWrapper, WolframAlphaAPIWrapper, OpenWeatherMapAPIWrapper
from langchain.agents import initialize_agent
from langchain.chains import LLMMathChain, RetrievalQA
from langchain.utilities.zapier import ZapierNLAWrapper
from langchain.agents.agent_toolkits import ZapierToolkit
from pathlib import Path
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
import pinecone
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.summarize import load_summarize_chain
import traceback
import configparser
import tkinter
from tkinter import ttk, Toplevel, messagebox
import sv_ttk
config = configparser.ConfigParser()
config.read("settings.ini")
bot_name = config.get("settings", "bot_name")
BOT_NAME = bot_name
def restart_app():
python = sys.executable
os.execl(python, python, *sys.argv)
class SearchSettings:
def __init__(self, file_path="settings.ini"):
config = configparser.ConfigParser()
config.read(file_path)
self.enable_search = config.getboolean("tools", "enable_search")
self.enable_wikipedia = config.getboolean("tools", "enable_wikipedia")
self.enable_calculator = config.getboolean("tools", "enable_calculator")
self.enable_wolfram_alpha = config.getboolean("tools", "enable_wolfram_alpha")
self.enable_weather = config.getboolean("tools", "enable_weather")
self.enable_zapier = config.getboolean("tools", "enable_zapier")
self.enable_pinecone = config.getboolean("tools", "enable_pinecone")
# Initialize variables
env_path = Path(__file__).parent / ".env"
load_dotenv(dotenv_path=env_path)
openai.api_key = os.getenv("OPENAI_API_KEY")
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
GOOGLE_CSE_ID = os.getenv("GOOGLE_CSE_ID")
WOLFROM_ALPHA_APPID = os.getenv("WOLFROM_ALPHA_APPID")
OPENWEATHERMAP_API_KEY = os.getenv("OPENWEATHERMAP_API_KEY")
ZAPIER_NLA_API_KEY = os.getenv("ZAPIER_NLA_API_KEY")
PINE_API_KEY = os.getenv("PINE_API_KEY")
PINE_ENV = os.getenv("PINE_ENV")
# Initialize Pinecone
embeddings = OpenAIEmbeddings()
pinecone_env = config.get("pinecone", "pinecone_env")
pinecone.init(
api_key=PINE_API_KEY,
environment=pinecone_env
)
index_name = config.get("pinecone", "pinecone_index")
docsearch = Pinecone.from_existing_index(index_name, embeddings)
settings = SearchSettings("settings.ini")
# Define the memory and the LLM engine
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.5, max_tokens=150, verbose=True)
memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=1500, memory_key="chat_history", return_messages=True)
doc_chain = load_qa_chain(llm, chain_type="map_reduce")
readonlymemory = ReadOnlySharedMemory(memory=memory)
# Define the tools
search = GoogleSearchAPIWrapper(k=2)
wikipedia = WikipediaAPIWrapper()
llm_math = LLMMathChain(llm=llm)
wolfram_alpha = WolframAlphaAPIWrapper()
weather = OpenWeatherMapAPIWrapper()
zapier= ZapierNLAWrapper()
toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)
retriever = docsearch.as_retriever(search_type="similarity", search_kwargs={"k":2})
pinecone_tool = RetrievalQA.from_chain_type(llm=llm, chain_type="map_rerank", retriever=docsearch.as_retriever())
wikisummarize = load_summarize_chain(llm, chain_type="stuff")
class WikiPage:
def __init__(self, title, summary):
self.title = title
self.page_content = summary
self.metadata = {}
def wiki_summary(search_query: str) -> str:
wikipedia_wrapper = wikipedia
wiki_result = wikipedia_wrapper.run(search_query)
if not wiki_result:
return "No good Wikipedia Search Result was found"
wiki_pages = []
for section in wiki_result.split("\n\n"):
title, summary = section.split("\nSummary: ", 1)
title = title.replace("Page: ", "")
wiki_pages.append(WikiPage(title=title, summary=summary))
summary_result = wikisummarize.run(wiki_pages)
return summary_result
tools = []
if settings.enable_search:
tools.append(
Tool(
name="Search",
func=search.run,
description="Useful when you need to answer questions about current events and real-time information"
)
)
if settings.enable_wikipedia:
tools.append(
Tool(
name="Wikipedia",
func=wiki_summary,
description="Useful for searching information on historical information on Wikipedia. "
"Use this more than the normal search if the question is about events that occured before 2023, like the 'What was the 2008 financial crisis?' or 'Who won the 2016 US presidential election?'"
)
)
if settings.enable_calculator:
tools.append(
Tool(
name='Calculator',
func=llm_math.run,
description='Useful for when you need to answer questions about math.'
)
)
if settings.enable_wolfram_alpha:
tools.append(
Tool(
name='Wolfram Alpha',
func=wolfram_alpha.run,
description="Useful for when you need to answer questions about Math, "
"Science, Technology, Culture, people, Society and Everyday Life. "
"Input should be a search query"
)
)
if settings.enable_weather:
tools.append(
Tool(
name='Weather',
func=weather.run,
description="Useful for when you need to answer questions about weather."
)
)
# Adjust pinecone tool settings in settings.ini
if settings.enable_pinecone:
pinecone_name = config.get("pinecone", "tool_name")
pinecone_description = config.get("pinecone", "tool_description")
tools.append(
Tool(
name=pinecone_name,
func=pinecone_tool.run,
description=pinecone_description
)
)
bot_context = config.get("settings", "bot_context")
CONTEXT = bot_context
# Workaround so that the agent chain respects the Zapier settings since it uses a toolkit
if settings.enable_zapier:
agent_chain = initialize_agent([*toolkit.get_tools(), *tools], llm, agent="chat-conversational-react-description", verbose=True, memory=memory)
else:
agent_chain = initialize_agent(tools, llm, agent="chat-conversational-react-description", verbose=True, memory=memory)
agent_chain.memory.chat_memory.add_ai_message(CONTEXT)
#Define the Settings GUI
def open_settings():
settings_window = Toplevel()
settings_window.title("Settings")
settings_window.iconbitmap(default="icon.ico")
def save_settings():
config.set("settings", "bot_name", bot_name_var.get())
config.set("settings", "bot_context", bot_context_var.get())
config.set("settings", "hotkey", hotkey_var.get())
for key, var in settings_vars.items():
config.set("tools", key, str(var.get()))
config.set("pinecone", "tool_name", pinecone_tool_name_var.get())
config.set("pinecone", "tool_description", pinecone_tool_description_var.get())
config.set("pinecone", "pinecone_index", pinecone_index_var.get())
config.set("pinecone", "pinecone_env", pinecone_env_var.get())
config.set("voice", "use_bark", str(use_bark_var.get()))
config.set("voice", "history_prompt", history_prompt_var.get())
with open("settings.ini", "w") as configfile:
config.write(configfile)
restart = messagebox.askyesno("Settings Saved", "Settings have been saved successfully. Would you like to restart the application?")
if restart:
restart_app()
else:
settings_window.destroy()
settings_frame = ttk.Frame(settings_window, padding="10")
settings_frame.grid(row=0, column=0, sticky="nsew")
bot_name_var = tk.StringVar()
bot_context_var = tk.StringVar()
hotkey_var = tk.StringVar()
bot_name_var.set(config.get("settings", "bot_name"))
bot_context_var.set(config.get("settings", "bot_context"))
hotkey_var.set(config.get("settings", "hotkey"))
settings_vars = {}
for key in config.options("tools"):
settings_vars[key] = tk.BooleanVar()
settings_vars[key].set(config.getboolean("tools", key))
ttk.Label(settings_frame, text="General Settings").grid(row=0, column=0, sticky="w", pady=5)
ttk.Separator(settings_frame, orient="horizontal").grid(row=1, column=0, sticky="ew")
ttk.Label(settings_frame, text="Bot Name:").grid(row=2, column=0, sticky="w")
ttk.Entry(settings_frame, textvariable=bot_name_var).grid(row=3, column=0, sticky="nsew")
ttk.Label(settings_frame, text="Bot Context:").grid(row=4, column=0, sticky="w")
ttk.Entry(settings_frame, textvariable=bot_context_var).grid(row=5, column=0, sticky="nsew")
ttk.Label(settings_frame, text="Hotkey:").grid(row=6, column=0, sticky="w")
ttk.Entry(settings_frame, textvariable=hotkey_var).grid(row=7, column=0, sticky="nsew")
ttk.Label(settings_frame, text="Tools Selection").grid(row=8, column=0, sticky="w", pady=5)
ttk.Separator(settings_frame, orient="horizontal").grid(row=9, column=0, sticky="ew")
row = 10
for key, var in settings_vars.items():
ttk.Checkbutton(settings_frame, text=key, variable=var).grid(row=row, column=0, sticky="w")
row += 1
ttk.Label(settings_frame, text="Pinecone Settings").grid(row=row, column=0, sticky="w", pady=5)
row += 1
ttk.Separator(settings_frame, orient="horizontal").grid(row=row, column=0, sticky="ew")
row += 1
pinecone_tool_name_var = tk.StringVar()
pinecone_tool_description_var = tk.StringVar()
pinecone_index_var = tk.StringVar()
pinecone_env_var = tk.StringVar()
pinecone_tool_name_var.set(config.get("pinecone", "tool_name"))
pinecone_tool_description_var.set(config.get("pinecone", "tool_description"))
pinecone_index_var.set(config.get("pinecone", "pinecone_index"))
pinecone_env_var.set(config.get("pinecone", "pinecone_env"))
ttk.Label(settings_frame, text="Tool Name:").grid(row=row, column=0, sticky="w")
row += 1
ttk.Entry(settings_frame, textvariable=pinecone_tool_name_var).grid(row=row, column=0, sticky="nsew")
row += 1
ttk.Label(settings_frame, text="Tool Description:").grid(row=row, column=0, sticky="w")
row += 1
ttk.Entry(settings_frame, textvariable=pinecone_tool_description_var).grid(row=row, column=0, sticky="nsew")
row += 1
ttk.Label(settings_frame, text="Pinecone Index:").grid(row=row, column=0, sticky="w")
row += 1
ttk.Entry(settings_frame, textvariable=pinecone_index_var).grid(row=row, column=0, sticky="nsew")
row += 1
ttk.Label(settings_frame, text="Pinecone Environment:").grid(row=row, column=0, sticky="w")
row += 1
ttk.Entry(settings_frame, textvariable=pinecone_env_var).grid(row=row, column=0, sticky="nsew")
row += 1
ttk.Label(settings_frame, text="Voice Settings").grid(row=row, column=0, sticky="w", pady=5)
row += 1
ttk.Separator(settings_frame, orient="horizontal").grid(row=row, column=0, sticky="ew")
row += 1
use_bark_var = tk.BooleanVar()
history_prompt_var = tk.StringVar()
use_bark_var.set(config.getboolean("voice", "use_bark"))
history_prompt_var.set(config.get("voice", "history_prompt"))
ttk.Checkbutton(settings_frame, text="Use Bark", variable=use_bark_var).grid(row=row, column=0, sticky="w")
row += 1
ttk.Label(settings_frame, text="Bark Voice:").grid(row=row, column=0, sticky="w")
row += 1
ttk.Entry(settings_frame, textvariable=history_prompt_var).grid(row=row, column=0, sticky="nsew")
row += 1
ttk.Button(settings_frame, text="Edit API Keys", command=edit_api_keys).grid(row=row, column=0, pady=(10, 0))
row += 1
ttk.Button(settings_frame, text="Save Settings", command=save_settings).grid(row=row, column=0, pady=(10, 0))
settings_window.columnconfigure(0, weight=1)
settings_window.rowconfigure(0, weight=1)
# Define the keys GUI
def edit_api_keys():
load_dotenv()
def save_api_keys():
def update_env_key(env_file, key, value):
with open(env_file, 'r') as file:
lines = file.readlines()
with open(env_file, 'w') as file:
for line in lines:
if line.startswith(key + '='):
line = f"{key}={value}\n"
file.write(line)
env_vars = {
'OPENAI_API_KEY': openai_api_key_var.get(),
'GOOGLE_API_KEY': google_api_key_var.get(),
'GOOGLE_CSE_ID': google_cse_id_var.get(),
'WOLFRAM_ALPHA_APPID': wolfram_alpha_appid_var.get(),
'OPENWEATHERMAP_API_KEY': openweathermap_api_key_var.get(),
'ZAPIER_NLA_API_KEY': zapier_nla_api_key_var.get(),
'PINE_API_KEY': pine_api_key_var.get()
}
for key, value in env_vars.items():
update_env_key('.env', key, value)
restart = messagebox.askyesno("API Keys Saved", "API keys have been saved successfully. Would you like to restart the application?")
if restart:
restart_app()
else:
api_keys_window.destroy()
api_keys_window = Toplevel()
api_keys_window.title("API Keys")
api_keys_window.iconbitmap(default="icon.ico")
api_keys_frame = ttk.Frame(api_keys_window, padding="10")
api_keys_frame.grid(row=0, column=0, sticky="nsew")
openai_api_key_var = tk.StringVar(value=os.getenv('OPENAI_API_KEY'))
google_api_key_var = tk.StringVar(value=os.getenv('GOOGLE_API_KEY'))
google_cse_id_var = tk.StringVar(value=os.getenv('GOOGLE_CSE_ID'))
wolfram_alpha_appid_var = tk.StringVar(value=os.getenv('WOLFRAM_ALPHA_APPID'))
openweathermap_api_key_var = tk.StringVar(value=os.getenv('OPENWEATHERMAP_API_KEY'))
zapier_nla_api_key_var = tk.StringVar(value=os.getenv('ZAPIER_NLA_API_KEY'))
pine_api_key_var = tk.StringVar(value=os.getenv('PINE_API_KEY'))
vars = {
'OPENAI_API_KEY': openai_api_key_var,
'GOOGLE_API_KEY': google_api_key_var,
'GOOGLE_CSE_ID': google_cse_id_var,
'WOLFRAM_ALPHA_APPID': wolfram_alpha_appid_var,
'OPENWEATHERMAP_API_KEY': openweathermap_api_key_var,
'ZAPIER_NLA_API_KEY': zapier_nla_api_key_var,
'PINE_API_KEY': pine_api_key_var
}
display_names = {
'OPENAI_API_KEY': 'OpenAI API Key',
'GOOGLE_API_KEY': 'Google API Key',
'GOOGLE_CSE_ID': 'Google CSE ID',
'WOLFRAM_ALPHA_APPID': 'Wolfram Alpha App ID',
'OPENWEATHERMAP_API_KEY': 'OpenWeatherMap API Key',
'ZAPIER_NLA_API_KEY': 'Zapier NLA API Key',
'PINE_API_KEY': 'Pine API Key'
}
row = 0
for key, var in vars.items():
display_key = display_names.get(key, key)
ttk.Label(api_keys_frame, text=display_key + ":").grid(row=row, column=0, sticky="w")
ttk.Entry(api_keys_frame, textvariable=var).grid(row=row, column=1, sticky="nsew")
row += 1
ttk.Button(api_keys_frame, text="Save API Keys", command=save_api_keys).grid(row=row, column=0, pady=(10, 0), columnspan=2)
api_keys_window.columnconfigure(0, weight=1)
api_keys_window.rowconfigure(0, weight=1)
api_keys_frame.columnconfigure(1, weight=1)
# Define the main function
def main():
def on_submit():
user_input = user_entry.get()
user_entry.delete(0, tk.END)
if user_input:
chat_history.config(state="normal")
chat_history.insert(tk.END, f"User: {user_input}\n")
chat_history.config(state="disabled")
chat_history.yview(tk.END)
root.update_idletasks()
agent_chain.memory.chat_memory.add_user_message(user_input)
try:
response = agent_chain.run(input=user_input)
bot_response = response
except Exception as e:
tb_string = traceback.format_exc()
if "This model's maximum context length is" in tb_string:
agent_chain.memory.chat_memory.clear()
bot_response = (
"Apologies, the last request went over the maximum context length so I have to clear my memory. Is there anything else I can help you with?"
)
else:
traceback.print_exc()
bot_response = f"Apologies, An error occurred while processing your request: {str(e)}."
chat_history.config(state="normal")
chat_history.insert(tk.END, f"{BOT_NAME}: {bot_response}\n")
chat_history.config(state="disabled")
chat_history.yview(tk.END)
agent_chain.memory.chat_memory.add_ai_message(bot_response)
root = tkinter.Tk()
root.title(f"{BOT_NAME} Chatbot")
root.iconbitmap(default="icon.ico")
chat_frame = ttk.Frame(root, padding="5")
chat_frame.grid(row=0, column=0, sticky="nsew")
user_frame = ttk.Frame(root, padding="5")
user_frame.grid(row=1, column=0, sticky="nsew")
chat_history = ScrolledText(chat_frame, wrap="word", width=80, height=20, state="disabled")
chat_history.grid(row=0, column=0, sticky="nsew")
user_entry = ttk.Entry(user_frame, width=70)
user_entry.grid(row=0, column=0, sticky="nsew")
user_entry.focus()
user_entry.bind("<Return>", lambda event: on_submit())
settings_button = ttk.Button(user_frame, text="Settings", command=open_settings)
settings_button.grid(row=0, column=2, padx=(5, 0))
submit_button = ttk.Button(user_frame, text="Submit", command=on_submit)
submit_button.grid(row=0, column=1, padx=(5, 0))
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
chat_frame.columnconfigure(0, weight=1)
chat_frame.rowconfigure(0, weight=1)
user_frame.columnconfigure(0, weight=5)
user_frame.rowconfigure(0, weight=1)
sv_ttk.set_theme("dark")
root.mainloop()
if __name__ == "__main__":
main() | [
"langchain.chains.question_answering.load_qa_chain",
"langchain.chains.summarize.load_summarize_chain",
"langchain.agents.initialize_agent",
"langchain.utilities.WolframAlphaAPIWrapper",
"langchain.memory.ConversationTokenBufferMemory",
"langchain.utilities.WikipediaAPIWrapper",
"langchain.utilities.OpenWeatherMapAPIWrapper",
"langchain.utilities.zapier.ZapierNLAWrapper",
"langchain.agents.Tool",
"langchain.chat_models.ChatOpenAI",
"langchain.agents.agent_toolkits.ZapierToolkit.from_zapier_nla_wrapper",
"langchain.vectorstores.Pinecone.from_existing_index",
"langchain.chains.LLMMathChain",
"langchain.memory.ReadOnlySharedMemory",
"langchain.utilities.GoogleSearchAPIWrapper",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((1053, 1080), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1078, 1080), False, 'import configparser\n'), ((2024, 2057), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': 'env_path'}), '(dotenv_path=env_path)\n', (2035, 2057), False, 'from dotenv import load_dotenv, set_key\n'), ((2075, 2102), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2084, 2102), False, 'import os\n'), ((2120, 2147), 'os.getenv', 'os.getenv', (['"""GOOGLE_API_KEY"""'], {}), "('GOOGLE_API_KEY')\n", (2129, 2147), False, 'import os\n'), ((2164, 2190), 'os.getenv', 'os.getenv', (['"""GOOGLE_CSE_ID"""'], {}), "('GOOGLE_CSE_ID')\n", (2173, 2190), False, 'import os\n'), ((2213, 2245), 'os.getenv', 'os.getenv', (['"""WOLFROM_ALPHA_APPID"""'], {}), "('WOLFROM_ALPHA_APPID')\n", (2222, 2245), False, 'import os\n'), ((2271, 2306), 'os.getenv', 'os.getenv', (['"""OPENWEATHERMAP_API_KEY"""'], {}), "('OPENWEATHERMAP_API_KEY')\n", (2280, 2306), False, 'import os\n'), ((2328, 2359), 'os.getenv', 'os.getenv', (['"""ZAPIER_NLA_API_KEY"""'], {}), "('ZAPIER_NLA_API_KEY')\n", (2337, 2359), False, 'import os\n'), ((2375, 2400), 'os.getenv', 'os.getenv', (['"""PINE_API_KEY"""'], {}), "('PINE_API_KEY')\n", (2384, 2400), False, 'import os\n'), ((2412, 2433), 'os.getenv', 'os.getenv', (['"""PINE_ENV"""'], {}), "('PINE_ENV')\n", (2421, 2433), False, 'import os\n'), ((2470, 2488), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2486, 2488), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2544, 2605), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'PINE_API_KEY', 'environment': 'pinecone_env'}), '(api_key=PINE_API_KEY, environment=pinecone_env)\n', (2557, 2605), False, 'import pinecone\n'), ((2684, 2736), 'langchain.vectorstores.Pinecone.from_existing_index', 'Pinecone.from_existing_index', (['index_name', 'embeddings'], {}), '(index_name, embeddings)\n', (2712, 2736), False, 'from langchain.vectorstores import Pinecone\n'), ((2826, 2915), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)', 'max_tokens': '(150)', 'verbose': '(True)'}), "(model_name='gpt-3.5-turbo', temperature=0.5, max_tokens=150,\n verbose=True)\n", (2836, 2915), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2921, 3035), 'langchain.memory.ConversationTokenBufferMemory', 'ConversationTokenBufferMemory', ([], {'llm': 'llm', 'max_token_limit': '(1500)', 'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(llm=llm, max_token_limit=1500, memory_key=\n 'chat_history', return_messages=True)\n", (2950, 3035), False, 'from langchain.memory import ConversationTokenBufferMemory, ReadOnlySharedMemory\n'), ((3043, 3086), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""map_reduce"""'}), "(llm, chain_type='map_reduce')\n", (3056, 3086), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((3104, 3139), 'langchain.memory.ReadOnlySharedMemory', 'ReadOnlySharedMemory', ([], {'memory': 'memory'}), '(memory=memory)\n', (3124, 3139), False, 'from langchain.memory import ConversationTokenBufferMemory, ReadOnlySharedMemory\n'), ((3169, 3196), 'langchain.utilities.GoogleSearchAPIWrapper', 'GoogleSearchAPIWrapper', ([], {'k': '(2)'}), '(k=2)\n', (3191, 3196), False, 'from langchain.utilities import GoogleSearchAPIWrapper, WikipediaAPIWrapper, WolframAlphaAPIWrapper, OpenWeatherMapAPIWrapper\n'), ((3209, 3230), 'langchain.utilities.WikipediaAPIWrapper', 'WikipediaAPIWrapper', ([], {}), '()\n', (3228, 3230), False, 'from langchain.utilities import GoogleSearchAPIWrapper, WikipediaAPIWrapper, WolframAlphaAPIWrapper, OpenWeatherMapAPIWrapper\n'), ((3242, 3263), 'langchain.chains.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm'}), '(llm=llm)\n', (3254, 3263), False, 'from langchain.chains import LLMMathChain, RetrievalQA\n'), ((3280, 3304), 'langchain.utilities.WolframAlphaAPIWrapper', 'WolframAlphaAPIWrapper', ([], {}), '()\n', (3302, 3304), False, 'from langchain.utilities import GoogleSearchAPIWrapper, WikipediaAPIWrapper, WolframAlphaAPIWrapper, OpenWeatherMapAPIWrapper\n'), ((3315, 3341), 'langchain.utilities.OpenWeatherMapAPIWrapper', 'OpenWeatherMapAPIWrapper', ([], {}), '()\n', (3339, 3341), False, 'from langchain.utilities import GoogleSearchAPIWrapper, WikipediaAPIWrapper, WolframAlphaAPIWrapper, OpenWeatherMapAPIWrapper\n'), ((3350, 3368), 'langchain.utilities.zapier.ZapierNLAWrapper', 'ZapierNLAWrapper', ([], {}), '()\n', (3366, 3368), False, 'from langchain.utilities.zapier import ZapierNLAWrapper\n'), ((3379, 3424), 'langchain.agents.agent_toolkits.ZapierToolkit.from_zapier_nla_wrapper', 'ZapierToolkit.from_zapier_nla_wrapper', (['zapier'], {}), '(zapier)\n', (3416, 3424), False, 'from langchain.agents.agent_toolkits import ZapierToolkit\n'), ((3639, 3684), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (3659, 3684), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1227, 1262), 'os.execl', 'os.execl', (['python', 'python', '*sys.argv'], {}), '(python, python, *sys.argv)\n', (1235, 1262), False, 'import os\n'), ((6621, 6729), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""chat-conversational-react-description"""', 'verbose': '(True)', 'memory': 'memory'}), "(tools, llm, agent='chat-conversational-react-description',\n verbose=True, memory=memory)\n", (6637, 6729), False, 'from langchain.agents import initialize_agent\n'), ((6851, 6861), 'tkinter.Toplevel', 'Toplevel', ([], {}), '()\n', (6859, 6861), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((8077, 8117), 'tkinter.ttk.Frame', 'ttk.Frame', (['settings_window'], {'padding': '"""10"""'}), "(settings_window, padding='10')\n", (8086, 8117), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((8194, 8208), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (8206, 8208), True, 'import tkinter as tk\n'), ((8231, 8245), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (8243, 8245), True, 'import tkinter as tk\n'), ((8263, 8277), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (8275, 8277), True, 'import tkinter as tk\n'), ((9953, 9967), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (9965, 9967), True, 'import tkinter as tk\n'), ((10004, 10018), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (10016, 10018), True, 'import tkinter as tk\n'), ((10044, 10058), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (10056, 10058), True, 'import tkinter as tk\n'), ((10082, 10096), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (10094, 10096), True, 'import tkinter as tk\n'), ((11503, 11518), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {}), '()\n', (11516, 11518), True, 'import tkinter as tk\n'), ((11544, 11558), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (11556, 11558), True, 'import tkinter as tk\n'), ((12409, 12422), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (12420, 12422), False, 'from dotenv import load_dotenv, set_key\n'), ((13612, 13622), 'tkinter.Toplevel', 'Toplevel', ([], {}), '()\n', (13620, 13622), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((13733, 13773), 'tkinter.ttk.Frame', 'ttk.Frame', (['api_keys_window'], {'padding': '"""10"""'}), "(api_keys_window, padding='10')\n", (13742, 13773), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((17154, 17166), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (17164, 17166), False, 'import tkinter\n'), ((17263, 17291), 'tkinter.ttk.Frame', 'ttk.Frame', (['root'], {'padding': '"""5"""'}), "(root, padding='5')\n", (17272, 17291), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((17362, 17390), 'tkinter.ttk.Frame', 'ttk.Frame', (['root'], {'padding': '"""5"""'}), "(root, padding='5')\n", (17371, 17390), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((17463, 17539), 'tkinter.scrolledtext.ScrolledText', 'ScrolledText', (['chat_frame'], {'wrap': '"""word"""', 'width': '(80)', 'height': '(20)', 'state': '"""disabled"""'}), "(chat_frame, wrap='word', width=80, height=20, state='disabled')\n", (17475, 17539), False, 'from tkinter.scrolledtext import ScrolledText\n'), ((17612, 17643), 'tkinter.ttk.Entry', 'ttk.Entry', (['user_frame'], {'width': '(70)'}), '(user_frame, width=70)\n', (17621, 17643), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((17801, 17863), 'tkinter.ttk.Button', 'ttk.Button', (['user_frame'], {'text': '"""Settings"""', 'command': 'open_settings'}), "(user_frame, text='Settings', command=open_settings)\n", (17811, 17863), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((17939, 17995), 'tkinter.ttk.Button', 'ttk.Button', (['user_frame'], {'text': '"""Submit"""', 'command': 'on_submit'}), "(user_frame, text='Submit', command=on_submit)\n", (17949, 17995), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((18298, 18322), 'sv_ttk.set_theme', 'sv_ttk.set_theme', (['"""dark"""'], {}), "('dark')\n", (18314, 18322), False, 'import sv_ttk\n'), ((1353, 1380), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1378, 1380), False, 'import configparser\n'), ((1993, 2007), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1997, 2007), False, 'from pathlib import Path\n'), ((4427, 4576), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""Useful when you need to answer questions about current events and real-time information"""'}), "(name='Search', func=search.run, description=\n 'Useful when you need to answer questions about current events and real-time information'\n )\n", (4431, 4576), False, 'from langchain.agents import Tool\n'), ((4675, 5004), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Wikipedia"""', 'func': 'wiki_summary', 'description': '"""Useful for searching information on historical information on Wikipedia. Use this more than the normal search if the question is about events that occured before 2023, like the \'What was the 2008 financial crisis?\' or \'Who won the 2016 US presidential election?\'"""'}), '(name=\'Wikipedia\', func=wiki_summary, description=\n "Useful for searching information on historical information on Wikipedia. Use this more than the normal search if the question is about events that occured before 2023, like the \'What was the 2008 financial crisis?\' or \'Who won the 2016 US presidential election?\'"\n )\n', (4679, 5004), False, 'from langchain.agents import Tool\n'), ((5119, 5238), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Calculator"""', 'func': 'llm_math.run', 'description': '"""Useful for when you need to answer questions about math."""'}), "(name='Calculator', func=llm_math.run, description=\n 'Useful for when you need to answer questions about math.')\n", (5123, 5238), False, 'from langchain.agents import Tool\n'), ((5346, 5574), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Wolfram Alpha"""', 'func': 'wolfram_alpha.run', 'description': '"""Useful for when you need to answer questions about Math, Science, Technology, Culture, people, Society and Everyday Life. Input should be a search query"""'}), "(name='Wolfram Alpha', func=wolfram_alpha.run, description=\n 'Useful for when you need to answer questions about Math, Science, Technology, Culture, people, Society and Everyday Life. Input should be a search query'\n )\n", (5350, 5574), False, 'from langchain.agents import Tool\n'), ((5725, 5843), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Weather"""', 'func': 'weather.run', 'description': '"""Useful for when you need to answer questions about weather."""'}), "(name='Weather', func=weather.run, description=\n 'Useful for when you need to answer questions about weather.')\n", (5729, 5843), False, 'from langchain.agents import Tool\n'), ((6121, 6208), 'langchain.agents.Tool', 'Tool', ([], {'name': 'pinecone_name', 'func': 'pinecone_tool.run', 'description': 'pinecone_description'}), '(name=pinecone_name, func=pinecone_tool.run, description=\n pinecone_description)\n', (6125, 6208), False, 'from langchain.agents import Tool\n'), ((7834, 7965), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Settings Saved"""', '"""Settings have been saved successfully. Would you like to restart the application?"""'], {}), "('Settings Saved',\n 'Settings have been saved successfully. Would you like to restart the application?'\n )\n", (7853, 7965), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((8544, 8559), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {}), '()\n', (8557, 8559), True, 'import tkinter as tk\n'), ((13368, 13499), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""API Keys Saved"""', '"""API keys have been saved successfully. Would you like to restart the application?"""'], {}), "('API Keys Saved',\n 'API keys have been saved successfully. Would you like to restart the application?'\n )\n", (13387, 13499), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((8629, 8679), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""General Settings"""'}), "(settings_frame, text='General Settings')\n", (8638, 8679), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((8726, 8776), 'tkinter.ttk.Separator', 'ttk.Separator', (['settings_frame'], {'orient': '"""horizontal"""'}), "(settings_frame, orient='horizontal')\n", (8739, 8776), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((8817, 8860), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Bot Name:"""'}), "(settings_frame, text='Bot Name:')\n", (8826, 8860), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((8899, 8951), 'tkinter.ttk.Entry', 'ttk.Entry', (['settings_frame'], {'textvariable': 'bot_name_var'}), '(settings_frame, textvariable=bot_name_var)\n', (8908, 8951), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((8994, 9040), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Bot Context:"""'}), "(settings_frame, text='Bot Context:')\n", (9003, 9040), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((9079, 9134), 'tkinter.ttk.Entry', 'ttk.Entry', (['settings_frame'], {'textvariable': 'bot_context_var'}), '(settings_frame, textvariable=bot_context_var)\n', (9088, 9134), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((9177, 9218), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Hotkey:"""'}), "(settings_frame, text='Hotkey:')\n", (9186, 9218), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((9257, 9307), 'tkinter.ttk.Entry', 'ttk.Entry', (['settings_frame'], {'textvariable': 'hotkey_var'}), '(settings_frame, textvariable=hotkey_var)\n', (9266, 9307), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((9350, 9399), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Tools Selection"""'}), "(settings_frame, text='Tools Selection')\n", (9359, 9399), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((9446, 9496), 'tkinter.ttk.Separator', 'ttk.Separator', (['settings_frame'], {'orient': '"""horizontal"""'}), "(settings_frame, orient='horizontal')\n", (9459, 9496), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((9710, 9761), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Pinecone Settings"""'}), "(settings_frame, text='Pinecone Settings')\n", (9719, 9761), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((9823, 9873), 'tkinter.ttk.Separator', 'ttk.Separator', (['settings_frame'], {'orient': '"""horizontal"""'}), "(settings_frame, orient='horizontal')\n", (9836, 9873), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((10385, 10429), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Tool Name:"""'}), "(settings_frame, text='Tool Name:')\n", (10394, 10429), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((10483, 10545), 'tkinter.ttk.Entry', 'ttk.Entry', (['settings_frame'], {'textvariable': 'pinecone_tool_name_var'}), '(settings_frame, textvariable=pinecone_tool_name_var)\n', (10492, 10545), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((10602, 10653), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Tool Description:"""'}), "(settings_frame, text='Tool Description:')\n", (10611, 10653), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((10707, 10776), 'tkinter.ttk.Entry', 'ttk.Entry', (['settings_frame'], {'textvariable': 'pinecone_tool_description_var'}), '(settings_frame, textvariable=pinecone_tool_description_var)\n', (10716, 10776), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((10833, 10882), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Pinecone Index:"""'}), "(settings_frame, text='Pinecone Index:')\n", (10842, 10882), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((10936, 10994), 'tkinter.ttk.Entry', 'ttk.Entry', (['settings_frame'], {'textvariable': 'pinecone_index_var'}), '(settings_frame, textvariable=pinecone_index_var)\n', (10945, 10994), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((11051, 11106), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Pinecone Environment:"""'}), "(settings_frame, text='Pinecone Environment:')\n", (11060, 11106), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((11160, 11216), 'tkinter.ttk.Entry', 'ttk.Entry', (['settings_frame'], {'textvariable': 'pinecone_env_var'}), '(settings_frame, textvariable=pinecone_env_var)\n', (11169, 11216), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((11273, 11321), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Voice Settings"""'}), "(settings_frame, text='Voice Settings')\n", (11282, 11321), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((11383, 11433), 'tkinter.ttk.Separator', 'ttk.Separator', (['settings_frame'], {'orient': '"""horizontal"""'}), "(settings_frame, orient='horizontal')\n", (11396, 11433), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((11690, 11761), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['settings_frame'], {'text': '"""Use Bark"""', 'variable': 'use_bark_var'}), "(settings_frame, text='Use Bark', variable=use_bark_var)\n", (11705, 11761), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((11815, 11860), 'tkinter.ttk.Label', 'ttk.Label', (['settings_frame'], {'text': '"""Bark Voice:"""'}), "(settings_frame, text='Bark Voice:')\n", (11824, 11860), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((11914, 11972), 'tkinter.ttk.Entry', 'ttk.Entry', (['settings_frame'], {'textvariable': 'history_prompt_var'}), '(settings_frame, textvariable=history_prompt_var)\n', (11923, 11972), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((12029, 12100), 'tkinter.ttk.Button', 'ttk.Button', (['settings_frame'], {'text': '"""Edit API Keys"""', 'command': 'edit_api_keys'}), "(settings_frame, text='Edit API Keys', command=edit_api_keys)\n", (12039, 12100), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((12156, 12227), 'tkinter.ttk.Button', 'ttk.Button', (['settings_frame'], {'text': '"""Save Settings"""', 'command': 'save_settings'}), "(settings_frame, text='Save Settings', command=save_settings)\n", (12166, 12227), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((13874, 13901), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (13883, 13901), False, 'import os\n'), ((13947, 13974), 'os.getenv', 'os.getenv', (['"""GOOGLE_API_KEY"""'], {}), "('GOOGLE_API_KEY')\n", (13956, 13974), False, 'import os\n'), ((14019, 14045), 'os.getenv', 'os.getenv', (['"""GOOGLE_CSE_ID"""'], {}), "('GOOGLE_CSE_ID')\n", (14028, 14045), False, 'import os\n'), ((14096, 14128), 'os.getenv', 'os.getenv', (['"""WOLFRAM_ALPHA_APPID"""'], {}), "('WOLFRAM_ALPHA_APPID')\n", (14105, 14128), False, 'import os\n'), ((14182, 14217), 'os.getenv', 'os.getenv', (['"""OPENWEATHERMAP_API_KEY"""'], {}), "('OPENWEATHERMAP_API_KEY')\n", (14191, 14217), False, 'import os\n'), ((14267, 14298), 'os.getenv', 'os.getenv', (['"""ZAPIER_NLA_API_KEY"""'], {}), "('ZAPIER_NLA_API_KEY')\n", (14276, 14298), False, 'import os\n'), ((14342, 14367), 'os.getenv', 'os.getenv', (['"""PINE_API_KEY"""'], {}), "('PINE_API_KEY')\n", (14351, 14367), False, 'import os\n'), ((15403, 15474), 'tkinter.ttk.Button', 'ttk.Button', (['api_keys_frame'], {'text': '"""Save API Keys"""', 'command': 'save_api_keys'}), "(api_keys_frame, text='Save API Keys', command=save_api_keys)\n", (15413, 15474), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((9597, 9652), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['settings_frame'], {'text': 'key', 'variable': 'var'}), '(settings_frame, text=key, variable=var)\n', (9612, 9652), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((15205, 15254), 'tkinter.ttk.Label', 'ttk.Label', (['api_keys_frame'], {'text': "(display_key + ':')"}), "(api_keys_frame, text=display_key + ':')\n", (15214, 15254), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((15299, 15342), 'tkinter.ttk.Entry', 'ttk.Entry', (['api_keys_frame'], {'textvariable': 'var'}), '(api_keys_frame, textvariable=var)\n', (15308, 15342), False, 'from tkinter import ttk, Toplevel, messagebox\n'), ((16324, 16346), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (16344, 16346), False, 'import traceback\n'), ((16747, 16768), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (16766, 16768), False, 'import traceback\n')] |
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import os
from langchain.chains import SimpleSequentialChain
# Create a .env file in the root of your project and add your OpenAI API key to it
# Load env files
load_dotenv()
openai_api_key = os.environ.get('openai_api_key')
# This is an LLMChain to generate company names given a company description.
llm = ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo")
# Create templates
template_name = """You are a company name generator. Based on a company description, it is your job to create a company name.
Company description: {company_description}
Company name:"""
prompt_template_name = PromptTemplate(input_variables=["company_description"], template=template_name)
# This is an LLMChain to generate company slogans given a company name and company description.
template_slogan = """You are a company slogan generator. Based on a company name, it is your job to create a company slogan.
Company name: {company_name}
Company slogan:"""
prompt_template_slogan = PromptTemplate(input_variables=["company_name"], template=template_slogan)
# Create chains
name_chain = LLMChain(llm=llm, prompt=prompt_template_name)
slogan_chain = LLMChain(llm=llm, prompt=prompt_template_slogan)
# This is the overall chain where we run these two chains in sequence.
overall_chain = SimpleSequentialChain(chains=[name_chain, slogan_chain], verbose=True)
slogan = overall_chain.run("We are a company that sells shoes.")
| [
"langchain.chains.LLMChain",
"langchain.chains.SimpleSequentialChain",
"langchain.prompts.PromptTemplate",
"langchain.chat_models.ChatOpenAI"
] | [((321, 334), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (332, 334), False, 'from dotenv import load_dotenv\n'), ((352, 384), 'os.environ.get', 'os.environ.get', (['"""openai_api_key"""'], {}), "('openai_api_key')\n", (366, 384), False, 'import os\n'), ((469, 524), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0.7, model_name='gpt-3.5-turbo')\n", (479, 524), False, 'from langchain.chat_models import ChatOpenAI\n'), ((757, 836), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_description']", 'template': 'template_name'}), "(input_variables=['company_description'], template=template_name)\n", (771, 836), False, 'from langchain.prompts import PromptTemplate\n'), ((1137, 1211), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_name']", 'template': 'template_slogan'}), "(input_variables=['company_name'], template=template_slogan)\n", (1151, 1211), False, 'from langchain.prompts import PromptTemplate\n'), ((1242, 1288), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_name'}), '(llm=llm, prompt=prompt_template_name)\n', (1250, 1288), False, 'from langchain.chains import LLMChain\n'), ((1304, 1352), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_slogan'}), '(llm=llm, prompt=prompt_template_slogan)\n', (1312, 1352), False, 'from langchain.chains import LLMChain\n'), ((1441, 1511), 'langchain.chains.SimpleSequentialChain', 'SimpleSequentialChain', ([], {'chains': '[name_chain, slogan_chain]', 'verbose': '(True)'}), '(chains=[name_chain, slogan_chain], verbose=True)\n', (1462, 1511), False, 'from langchain.chains import SimpleSequentialChain\n')] |
import os
import re
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
load_dotenv()
# ボットトークンを使ってアプリを初期化します
app = App(token=os.environ.get("SLACK_BOT_TOKEN"))
@app.event("app_mention")
def handle_mention(event, say):
thread_ts = event["ts"]
message = re.sub("<@.*>", "", event["text"])
llm = ChatOpenAI(
model_name=os.environ["OPENAI_API_MODEL"],
temperature=os.environ["OPENAI_API_TEMPERATURE"],
)
response = llm.predict(message)
say(text=response, thread_ts=thread_ts)
# ソケットモードハンドラーを使ってアプリを起動します
if __name__ == "__main__":
SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start()
| [
"langchain.chat_models.ChatOpenAI"
] | [((186, 199), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (197, 199), False, 'from dotenv import load_dotenv\n'), ((378, 412), 're.sub', 're.sub', (['"""<@.*>"""', '""""""', "event['text']"], {}), "('<@.*>', '', event['text'])\n", (384, 412), False, 'import re\n'), ((424, 532), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': "os.environ['OPENAI_API_MODEL']", 'temperature': "os.environ['OPENAI_API_TEMPERATURE']"}), "(model_name=os.environ['OPENAI_API_MODEL'], temperature=os.\n environ['OPENAI_API_TEMPERATURE'])\n", (434, 532), False, 'from langchain.chat_models import ChatOpenAI\n'), ((241, 274), 'os.environ.get', 'os.environ.get', (['"""SLACK_BOT_TOKEN"""'], {}), "('SLACK_BOT_TOKEN')\n", (255, 274), False, 'import os\n'), ((693, 746), 'slack_bolt.adapter.socket_mode.SocketModeHandler', 'SocketModeHandler', (['app', "os.environ['SLACK_APP_TOKEN']"], {}), "(app, os.environ['SLACK_APP_TOKEN'])\n", (710, 746), False, 'from slack_bolt.adapter.socket_mode import SocketModeHandler\n')] |
""" This module contains the function to classify the user query. """
import json
from langchain.prompts import ChatPromptTemplate
from langchain.chains import create_extraction_chain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import TextLoader
from langchain.document_loaders import DirectoryLoader
from ..config import Config
config = Config()
config.load()
OPENAI_API_KEY = config.openai_key
async def classify_user_query(query, context, document_types):
"""Classify the user query based on the context and document types."""
llm = ChatOpenAI(temperature=0, model=config.model)
prompt_classify = ChatPromptTemplate.from_template(
"""You are a classifier.
You store user memories, thoughts and feelings.
Determine if you need to use them to answer this query : {query}"""
)
json_structure = [
{
"name": "classifier",
"description": "Classification",
"parameters": {
"type": "object",
"properties": {
"UserQueryClassifier": {
"type": "bool",
"description": "The classification of documents "
"in groups such as legal, medical, etc.",
}
},
"required": ["UserQueryClassifier"],
},
}
]
chain_filter = prompt_classify | llm.bind(
function_call={"name": "classifier"}, functions=json_structure
)
classifier_output = await chain_filter.ainvoke(
{"query": query, "context": context, "document_types": document_types}
)
arguments_str = classifier_output.additional_kwargs["function_call"]["arguments"]
print("This is the arguments string", arguments_str)
arguments_dict = json.loads(arguments_str)
classfier_value = arguments_dict.get("UserQueryClassifier", None)
print("This is the classifier value", classfier_value)
return classfier_value
| [
"langchain.prompts.ChatPromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI"
] | [((582, 627), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'config.model'}), '(temperature=0, model=config.model)\n', (592, 627), False, 'from langchain.chat_models import ChatOpenAI\n'), ((650, 853), 'langchain.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['"""You are a classifier. \n You store user memories, thoughts and feelings. \n Determine if you need to use them to answer this query : {query}"""'], {}), '(\n """You are a classifier. \n You store user memories, thoughts and feelings. \n Determine if you need to use them to answer this query : {query}"""\n )\n', (682, 853), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((1854, 1879), 'json.loads', 'json.loads', (['arguments_str'], {}), '(arguments_str)\n', (1864, 1879), False, 'import json\n')] |
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from langchain.schema.runnable import Runnable
from langchain.schema.runnable.config import RunnableConfig
import chainlit as cl
@cl.on_chat_start
async def on_chat_start():
model = ChatOpenAI(openai_api_base="http://localhost:8888/v1",streaming=True)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a very knowledgeable historian who provides accurate and eloquent answers to historical questions.",
),
("human", "{question}"),
]
)
runnable = prompt | model | StrOutputParser()
cl.user_session.set("runnable", runnable)
@cl.on_message
async def on_message(message: cl.Message):
runnable = cl.user_session.get("runnable") # type: Runnable
msg = cl.Message(content="")
for chunk in await cl.make_async(runnable.stream)(
{"question": message.content},
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
):
await msg.stream_token(chunk)
await msg.send()
| [
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.schema.StrOutputParser",
"langchain.chat_models.ChatOpenAI"
] | [((328, 398), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_base': '"""http://localhost:8888/v1"""', 'streaming': '(True)'}), "(openai_api_base='http://localhost:8888/v1', streaming=True)\n", (338, 398), False, 'from langchain.chat_models import ChatOpenAI\n'), ((411, 600), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[(\'system\',\n "You\'re a very knowledgeable historian who provides accurate and eloquent answers to historical questions."\n ), (\'human\', \'{question}\')]'], {}), '([(\'system\',\n "You\'re a very knowledgeable historian who provides accurate and eloquent answers to historical questions."\n ), (\'human\', \'{question}\')])\n', (443, 600), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((742, 783), 'chainlit.user_session.set', 'cl.user_session.set', (['"""runnable"""', 'runnable'], {}), "('runnable', runnable)\n", (761, 783), True, 'import chainlit as cl\n'), ((859, 890), 'chainlit.user_session.get', 'cl.user_session.get', (['"""runnable"""'], {}), "('runnable')\n", (878, 890), True, 'import chainlit as cl\n'), ((920, 942), 'chainlit.Message', 'cl.Message', ([], {'content': '""""""'}), "(content='')\n", (930, 942), True, 'import chainlit as cl\n'), ((720, 737), 'langchain.schema.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (735, 737), False, 'from langchain.schema import StrOutputParser\n'), ((967, 997), 'chainlit.make_async', 'cl.make_async', (['runnable.stream'], {}), '(runnable.stream)\n', (980, 997), True, 'import chainlit as cl\n'), ((1079, 1108), 'chainlit.LangchainCallbackHandler', 'cl.LangchainCallbackHandler', ([], {}), '()\n', (1106, 1108), True, 'import chainlit as cl\n')] |
import os
import streamlit as st
from PyPDF2 import PdfReader, PdfWriter
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
def ChatPDF(text):
# st.write(text)
#split into chunks
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function=len
)
chunks = text_splitter.split_text(text)
# st.write(chunks)
# creating embeddings
OPENAI_API_KEY = st.text_input("OPENAI API KEY", type = "password")
if OPENAI_API_KEY:
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
# st.write("Embedding Created")
# st.write(embeddings)
knowledge_base = FAISS.from_texts(chunks, embeddings)
st.write("Knowledge Base created ")
#show user input
def ask_question(i=0):
user_question = st.text_input("Ask a question about your PDF?",key = i)
if user_question:
docs = knowledge_base.similarity_search(user_question)
# st.write(docs)
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=user_question)
print(cb)
st.write(response)
ask_question(i+1)
ask_question()
def main():
st.set_page_config(page_title="Ask ur PDF",
page_icon="📄")
hide_st_style = """
<style>
#mainMenue {visibility: hidden;}
footer {visibility: hidden;}
#header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
# st.write(st.set_page_config)
st.header("Ask your PDF 🤔💭")
#uploading file
pdf = st.file_uploader("Upload your PDF ", type="pdf")
# extract the text
if pdf is not None:
option = st.selectbox("What you want to do with PDF📜", [
"Meta Data📂",
"Extract Raw Text📄",
"Extract Links🔗",
"Extract Images🖼️",
"Make PDF password protected🔐",
"PDF Annotation📝",
"ChatPDF💬"
])
pdf_reader = PdfReader(pdf)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
if option == "Meta Data📂":
st.write(pdf_reader.metadata)
elif option == "Make PDF password protected🔐":
pswd = st.text_input("Enter yourpass word", type="password")
if pswd:
with st.spinner("Encrypting..."):
pdf_writer = PdfWriter()
for page_num in range(len(pdf_reader.pages)):
pdf_writer.add_page(pdf_reader.pages[page_num])
pdf_writer.encrypt(pswd)
with open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "wb") as f:
pdf_writer.write(f)
st.success("Encryption Successful!")
st.download_button(
label="Download Encrypted PDF",
data=open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "rb").read(),
file_name=f"{pdf.name.split('.')[0]}_encrypted.pdf",
mime="application/octet-stream",
)
try:
os.remove(f"{pdf.name.split('.')[0]}_encrypted.pdf")
except: pass
elif option == "Extract Raw Text📄":
st.write(text)
elif option == "Extract Links🔗":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
subtype = annot.get_object()["/Subtype"]
if subtype == "/Link":
try:
st.write(annot.get_object()["/A"]["/URI"])
except: pass
elif option == "Extract Images🖼️":
for page in pdf_reader.pages:
try:
for img in page.images:
st.write(img.name)
st.image(img.data)
except: pass
elif option == "PDF Annotation📝":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
obj = annot.get_object()
st.write(obj)
st.write("***********")
annotation = {"subtype": obj["/Subtype"], "location": obj["/Rect"]}
st.write(annotation)
elif option == "ChatPDF💬":
ChatPDF(text)
if __name__ == "__main__":
main()
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.callbacks.get_openai_callback",
"langchain.vectorstores.FAISS.from_texts",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((481, 579), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (502, 579), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((745, 793), 'streamlit.text_input', 'st.text_input', (['"""OPENAI API KEY"""'], {'type': '"""password"""'}), "('OPENAI API KEY', type='password')\n", (758, 793), True, 'import streamlit as st\n'), ((1783, 1841), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Ask ur PDF"""', 'page_icon': '"""📄"""'}), "(page_title='Ask ur PDF', page_icon='📄')\n", (1801, 1841), True, 'import streamlit as st\n'), ((2081, 2131), 'streamlit.markdown', 'st.markdown', (['hide_st_style'], {'unsafe_allow_html': '(True)'}), '(hide_st_style, unsafe_allow_html=True)\n', (2092, 2131), True, 'import streamlit as st\n'), ((2175, 2203), 'streamlit.header', 'st.header', (['"""Ask your PDF 🤔💭"""'], {}), "('Ask your PDF 🤔💭')\n", (2184, 2203), True, 'import streamlit as st\n'), ((2242, 2290), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your PDF """'], {'type': '"""pdf"""'}), "('Upload your PDF ', type='pdf')\n", (2258, 2290), True, 'import streamlit as st\n'), ((842, 889), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (858, 889), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((989, 1025), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['chunks', 'embeddings'], {}), '(chunks, embeddings)\n', (1005, 1025), False, 'from langchain.vectorstores import FAISS\n'), ((1035, 1070), 'streamlit.write', 'st.write', (['"""Knowledge Base created """'], {}), "('Knowledge Base created ')\n", (1043, 1070), True, 'import streamlit as st\n'), ((2360, 2551), 'streamlit.selectbox', 'st.selectbox', (['"""What you want to do with PDF📜"""', "['Meta Data📂', 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬']"], {}), "('What you want to do with PDF📜', ['Meta Data📂',\n 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬'])\n", (2372, 2551), True, 'import streamlit as st\n'), ((2672, 2686), 'PyPDF2.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (2681, 2686), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((1160, 1214), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF?"""'], {'key': 'i'}), "('Ask a question about your PDF?', key=i)\n", (1173, 1214), True, 'import streamlit as st\n'), ((2835, 2864), 'streamlit.write', 'st.write', (['pdf_reader.metadata'], {}), '(pdf_reader.metadata)\n', (2843, 2864), True, 'import streamlit as st\n'), ((1378, 1415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (1384, 1415), False, 'from langchain.llms import OpenAI\n'), ((1441, 1479), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (1454, 1479), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1667, 1685), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (1675, 1685), True, 'import streamlit as st\n'), ((2941, 2994), 'streamlit.text_input', 'st.text_input', (['"""Enter yourpass word"""'], {'type': '"""password"""'}), "('Enter yourpass word', type='password')\n", (2954, 2994), True, 'import streamlit as st\n'), ((1502, 1523), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1521, 1523), False, 'from langchain.callbacks import get_openai_callback\n'), ((4062, 4076), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (4070, 4076), True, 'import streamlit as st\n'), ((3039, 3066), 'streamlit.spinner', 'st.spinner', (['"""Encrypting..."""'], {}), "('Encrypting...')\n", (3049, 3066), True, 'import streamlit as st\n'), ((3102, 3113), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (3111, 3113), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((3480, 3516), 'streamlit.success', 'st.success', (['"""Encryption Successful!"""'], {}), "('Encryption Successful!')\n", (3490, 3516), True, 'import streamlit as st\n'), ((4697, 4715), 'streamlit.write', 'st.write', (['img.name'], {}), '(img.name)\n', (4705, 4715), True, 'import streamlit as st\n'), ((4741, 4759), 'streamlit.image', 'st.image', (['img.data'], {}), '(img.data)\n', (4749, 4759), True, 'import streamlit as st\n'), ((5041, 5054), 'streamlit.write', 'st.write', (['obj'], {}), '(obj)\n', (5049, 5054), True, 'import streamlit as st\n'), ((5080, 5103), 'streamlit.write', 'st.write', (['"""***********"""'], {}), "('***********')\n", (5088, 5103), True, 'import streamlit as st\n'), ((5222, 5242), 'streamlit.write', 'st.write', (['annotation'], {}), '(annotation)\n', (5230, 5242), True, 'import streamlit as st\n')] |
"""Example of observing LLM calls made by via callable OpenAI LLM."""
from langchain.llms import OpenAI
from langchain_prefect.plugins import RecordLLMCalls
llm = OpenAI(temperature=0.9)
with RecordLLMCalls():
llm("What would be a good name for a company that makes colorful socks?")
| [
"langchain.llms.OpenAI",
"langchain_prefect.plugins.RecordLLMCalls"
] | [((166, 189), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (172, 189), False, 'from langchain.llms import OpenAI\n'), ((196, 212), 'langchain_prefect.plugins.RecordLLMCalls', 'RecordLLMCalls', ([], {}), '()\n', (210, 212), False, 'from langchain_prefect.plugins import RecordLLMCalls\n')] |
from langchain.agents import load_tools
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.utilities import SerpAPIWrapper
from langchain_app.models.vicuna_request_llm import VicunaLLM
# First, let's load the language model we're going to use to control the agent.
llm = VicunaLLM()
params = {
"engine": "google",
"gl": "us",
"hl": "en",
}
search = SerpAPIWrapper(params=params)
# Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in.
tools = load_tools(["python_repl", "multi_line_human"], llm=llm)
tools.append(
Tool(
name="Search",
func=search.run,
description="useful for when you need to ask with search",
)
)
# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
# Now let's test it out!
agent.run(
"""
Oh, wait, before your start your next question, we should go over one more tool that is available.
The action MultiLineHuman can be used to ask help to a human.
Action: MultiLineHuman
Action Input: "Human, please fix this error for me?"
Observation:
# Gladly so, you declared a variable with \_, which is an illegal character.
# Here's the right version:
empty_list = []
Thought: The Human helped me, I should use his observation as input to the shell
Action: Python REPL
Action Input:
empty_list = []
Observation:
Thought: It worked this time. I have concluded the task.
Final Answer: The following script worked:
empty_list = []
OK, now you begin
Question: Ask from the help of a human about a topic.
You should then help him.
"""
)
| [
"langchain.agents.initialize_agent",
"langchain.utilities.SerpAPIWrapper",
"langchain.agents.load_tools",
"langchain.agents.Tool",
"langchain_app.models.vicuna_request_llm.VicunaLLM"
] | [((328, 339), 'langchain_app.models.vicuna_request_llm.VicunaLLM', 'VicunaLLM', ([], {}), '()\n', (337, 339), False, 'from langchain_app.models.vicuna_request_llm import VicunaLLM\n'), ((419, 448), 'langchain.utilities.SerpAPIWrapper', 'SerpAPIWrapper', ([], {'params': 'params'}), '(params=params)\n', (433, 448), False, 'from langchain.utilities import SerpAPIWrapper\n'), ((566, 622), 'langchain.agents.load_tools', 'load_tools', (["['python_repl', 'multi_line_human']"], {'llm': 'llm'}), "(['python_repl', 'multi_line_human'], llm=llm)\n", (576, 622), False, 'from langchain.agents import load_tools\n'), ((891, 982), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n verbose=True)\n', (907, 982), False, 'from langchain.agents import initialize_agent, Tool\n'), ((642, 742), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to ask with search"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to ask with search')\n", (646, 742), False, 'from langchain.agents import initialize_agent, Tool\n')] |
"""Toolkit for the Wolfram Alpha API."""
from typing import List
from langchain.tools.base import BaseTool, BaseToolkit
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaToolkit(BaseToolkit):
"""Tool that adds the capability to interact with Wolfram Alpha."""
wolfram_alpha_appid: str
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
wrapper = WolframAlphaAPIWrapper(wolfram_alpha_appid=self.wolfram_alpha_appid)
return [
WolframAlphaQueryRun(
api_wrapper=wrapper,
)
]
| [
"langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper",
"langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun"
] | [((509, 577), 'langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper', 'WolframAlphaAPIWrapper', ([], {'wolfram_alpha_appid': 'self.wolfram_alpha_appid'}), '(wolfram_alpha_appid=self.wolfram_alpha_appid)\n', (531, 577), False, 'from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper\n'), ((607, 648), 'langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun', 'WolframAlphaQueryRun', ([], {'api_wrapper': 'wrapper'}), '(api_wrapper=wrapper)\n', (627, 648), False, 'from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun\n')] |
# /app/src/tools/setup.py
import logging
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool
from langchain_community.tools import DuckDuckGoSearchResults
from src.tools.doc_search import DocumentSearch
logger = logging.getLogger(__name__)
class SearchWebInput(BaseModel):
query: str = Field(description="The search query")
class SearchTechDocsInput(BaseModel):
query: str = Field(description="The search query")
collection: str = Field(default="techdocs", description="The document collection to search in")
class SearchWebTool(BaseTool):
name = "search_web"
description = "Conducts DuckDuckGo searches."
args_schema = SearchWebInput
return_direct = True
def _run(self, query: str, **kwargs) -> str:
search = DuckDuckGoSearchResults()
return search.run(query)
class SearchTechDocsTool(BaseTool):
name = "search_techdocs"
description = "This tool enables the querying of a specialized vector store named ‘TechDocs,’ a repository where users archive valuable technical documentation they have encountered. It is particularly beneficial when engaging with technical subjects or when involved in coding activities. Utilize this search tool to scrutinize the vector store for pertinent context when addressing technical inquiries or tasks. If a term from the user input is unfamiliar but appears to be technical in nature, it is imperative to consult ‘TechDocs’ to ascertain whether relevant information or context is available therein. For your awareness, the information provided is sourced from ‘TechDocs,’ and we will refer to this source for any related queries."
args_schema = SearchTechDocsInput
return_direct = True
def _run(self, query: str, collection: str = "techdocs", **kwargs) -> str:
search = DocumentSearch(query, collection)
results = search.search_documents()
return results
class ToolSetup:
"""
A class dedicated to the setup and initialization of tools used by the agent.
"""
@classmethod
def setup_tools(cls) -> list:
"""
Initializes and returns a list of tools for the agent.
Returns:
- list: A list of initialized tools for agent's use.
"""
return [SearchWebTool(), SearchTechDocsTool()]
| [
"langchain_community.tools.DuckDuckGoSearchResults",
"langchain.pydantic_v1.Field"
] | [((251, 278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (268, 278), False, 'import logging\n'), ((331, 368), 'langchain.pydantic_v1.Field', 'Field', ([], {'description': '"""The search query"""'}), "(description='The search query')\n", (336, 368), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((426, 463), 'langchain.pydantic_v1.Field', 'Field', ([], {'description': '"""The search query"""'}), "(description='The search query')\n", (431, 463), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((486, 563), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '"""techdocs"""', 'description': '"""The document collection to search in"""'}), "(default='techdocs', description='The document collection to search in')\n", (491, 563), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((796, 821), 'langchain_community.tools.DuckDuckGoSearchResults', 'DuckDuckGoSearchResults', ([], {}), '()\n', (819, 821), False, 'from langchain_community.tools import DuckDuckGoSearchResults\n'), ((1829, 1862), 'src.tools.doc_search.DocumentSearch', 'DocumentSearch', (['query', 'collection'], {}), '(query, collection)\n', (1843, 1862), False, 'from src.tools.doc_search import DocumentSearch\n')] |
#!/usr/bin/env python3
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All
import os
import time
load_dotenv()
models_directory = os.environ.get('MODELS_DIRECTORY')
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME")
persist_directory = os.environ.get('PERSIST_DIRECTORY')
llm_model_name = os.environ.get('LLM_MODEL_NAME')
model_n_ctx = os.environ.get('MODEL_N_CTX')
model_n_batch = int(os.environ.get('MODEL_N_BATCH',8))
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
from constants import CHROMA_SETTINGS
def main():
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name, cache_folder=models_directory)
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
# Prepare the LLM
llm = GPT4All(model=f'./{models_directory}/{llm_model_name}', n_ctx=model_n_ctx, backend='gptj', n_batch=model_n_batch, verbose=False)
# Create a QA chain
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
if query.strip() == "":
continue
# Get the answer from the chain
start = time.time()
res = qa(query)
answer, docs = res['result'], res['source_documents']
end = time.time()
# Print the result
print("\n\n> Question:")
print(query)
print(f"\n> Answer (took {round(end - start, 2)} s.):")
print(answer)
# Print the relevant sources used for the answer
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
if __name__ == "__main__":
main()
| [
"langchain.llms.GPT4All",
"langchain.embeddings.HuggingFaceEmbeddings",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.vectorstores.Chroma"
] | [((251, 264), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (262, 264), False, 'from dotenv import load_dotenv\n'), ((286, 320), 'os.environ.get', 'os.environ.get', (['"""MODELS_DIRECTORY"""'], {}), "('MODELS_DIRECTORY')\n", (300, 320), False, 'import os\n'), ((345, 384), 'os.environ.get', 'os.environ.get', (['"""EMBEDDINGS_MODEL_NAME"""'], {}), "('EMBEDDINGS_MODEL_NAME')\n", (359, 384), False, 'import os\n'), ((405, 440), 'os.environ.get', 'os.environ.get', (['"""PERSIST_DIRECTORY"""'], {}), "('PERSIST_DIRECTORY')\n", (419, 440), False, 'import os\n'), ((458, 490), 'os.environ.get', 'os.environ.get', (['"""LLM_MODEL_NAME"""'], {}), "('LLM_MODEL_NAME')\n", (472, 490), False, 'import os\n'), ((505, 534), 'os.environ.get', 'os.environ.get', (['"""MODEL_N_CTX"""'], {}), "('MODEL_N_CTX')\n", (519, 534), False, 'import os\n'), ((555, 589), 'os.environ.get', 'os.environ.get', (['"""MODEL_N_BATCH"""', '(8)'], {}), "('MODEL_N_BATCH', 8)\n", (569, 589), False, 'import os\n'), ((617, 658), 'os.environ.get', 'os.environ.get', (['"""TARGET_SOURCE_CHUNKS"""', '(4)'], {}), "('TARGET_SOURCE_CHUNKS', 4)\n", (631, 658), False, 'import os\n'), ((730, 821), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'embeddings_model_name', 'cache_folder': 'models_directory'}), '(model_name=embeddings_model_name, cache_folder=\n models_directory)\n', (751, 821), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((826, 937), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'persist_directory', 'embedding_function': 'embeddings', 'client_settings': 'CHROMA_SETTINGS'}), '(persist_directory=persist_directory, embedding_function=embeddings,\n client_settings=CHROMA_SETTINGS)\n', (832, 937), False, 'from langchain.vectorstores import Chroma\n'), ((1050, 1182), 'langchain.llms.GPT4All', 'GPT4All', ([], {'model': 'f"""./{models_directory}/{llm_model_name}"""', 'n_ctx': 'model_n_ctx', 'backend': '"""gptj"""', 'n_batch': 'model_n_batch', 'verbose': '(False)'}), "(model=f'./{models_directory}/{llm_model_name}', n_ctx=model_n_ctx,\n backend='gptj', n_batch=model_n_batch, verbose=False)\n", (1057, 1182), False, 'from langchain.llms import GPT4All\n'), ((1225, 1337), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever', 'return_source_documents': '(True)'}), "(llm=llm, chain_type='stuff', retriever=\n retriever, return_source_documents=True)\n", (1252, 1337), False, 'from langchain.chains import RetrievalQA\n'), ((1593, 1604), 'time.time', 'time.time', ([], {}), '()\n', (1602, 1604), False, 'import time\n'), ((1705, 1716), 'time.time', 'time.time', ([], {}), '()\n', (1714, 1716), False, 'import time\n')] |
from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType
from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex
'''
Title of the page: A simple Python implementation of the ReAct pattern for LLMs
Name of the website: LlamaIndex (GPT Index) is a data framework for your LLM application.
URL: https://github.com/jerryjliu/llama_index
'''
docs = SimpleDirectoryReader("../data/paul_graham/").load_data()
from llama_index import ServiceContext, LLMPredictor, TreeIndex
from langchain.chat_models import ChatOpenAI
llm_predictor = LLMPredictor(llm=ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0))
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
callback_manager = CallbackManager([llama_debug])
service_context = ServiceContext.from_defaults(callback_manager=callback_manager, llm_predictor=llm_predictor)
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
# Print info on the LLM calls during the list index query
print(llama_debug.get_event_time_info(CBEventType.LLM))
# Print info on llm inputs/outputs - returns start/end events for each LLM call
event_pairs = llama_debug.get_llm_inputs_outputs()
print(event_pairs[0][0])
print(event_pairs[0][1].payload.keys())
print(event_pairs[0][1].payload['response'])
# Get info on any event type
event_pairs = llama_debug.get_event_pairs(CBEventType.CHUNKING)
print(event_pairs[0][0].payload.keys()) # get first chunking start event
print(event_pairs[0][1].payload.keys()) # get first chunking end event
# Clear the currently cached events
llama_debug.flush_event_logs()
| [
"langchain.chat_models.ChatOpenAI"
] | [((676, 718), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (693, 718), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((738, 768), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug]'], {}), '([llama_debug])\n', (753, 768), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((787, 883), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'callback_manager': 'callback_manager', 'llm_predictor': 'llm_predictor'}), '(callback_manager=callback_manager,\n llm_predictor=llm_predictor)\n', (815, 883), False, 'from llama_index import ServiceContext, LLMPredictor, TreeIndex\n'), ((889, 959), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (920, 959), False, 'from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((405, 450), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""../data/paul_graham/"""'], {}), "('../data/paul_graham/')\n", (426, 450), False, 'from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((606, 659), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (616, 659), False, 'from langchain.chat_models import ChatOpenAI\n')] |
from typing import Any, Callable
from pandas import DataFrame
from exact_rag.config import EmbeddingType, Embeddings, DatabaseType, Databases
from langchain_openai import OpenAIEmbeddings
from langchain_community.embeddings import OllamaEmbeddings
from langchain.vectorstores.chroma import Chroma
from langchain.vectorstores.elasticsearch import ElasticsearchStore
from langchain.indexes import SQLRecordManager
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import DataFrameLoader
from langchain.indexes import index
from langchain.chains import RetrievalQA
from langchain_openai import ChatOpenAI
from langchain_community.llms.ollama import Ollama
class Caller:
def __init__(
self,
callable: Callable[..., Any],
arg_swap: dict[str, Any] | None = None,
accept_only: list[str] | None = None,
):
self._callable = callable
self._arg_swap = arg_swap
self._accept_only = accept_only
def __call__(self, **args):
if self._accept_only:
args = {
key: value for (key, value) in args.items() if key in self._accept_only
}
if self._arg_swap:
args = {
self._arg_swap.get(arg, arg): value for (arg, value) in args.items()
}
return self._callable(**args)
embeddings = {
EmbeddingType.openai: Caller(OpenAIEmbeddings, accept_only=["api_key"]),
EmbeddingType.ollama: Caller(OllamaEmbeddings, accept_only=["model"]),
}
dbs = {
DatabaseType.chroma: Caller(
Chroma,
{"embedding": "embedding_function"},
accept_only=["embedding", "persist_directory", "collection_name"],
),
DatabaseType.elastic: Caller(
ElasticsearchStore,
{"collection_name": "index_name", "url": "es_url"},
accept_only=[
"embedding",
"url",
"collection_name",
"distance_strategy",
"strategy",
],
),
}
chats = {
EmbeddingType.openai: Caller(
ChatOpenAI,
{
"api_key": "openai_api_key",
"chat_model_name": "model_name",
"chat_temperature": "temperature",
},
accept_only=["chat_model_name", "chat_temperature", "api_key"],
),
EmbeddingType.ollama: Caller(
Ollama,
accept_only=["model"],
),
}
class DataEmbedding:
def __init__(self, embedding_model: Embeddings, database_model: Databases):
embedding_type = embedding_model.type
self._embedding = embeddings[embedding_type](**embedding_model.model_dump())
print("Embedding initialized.")
database_type = database_model.type
self._vectorstore = dbs[database_type](
embedding=self._embedding,
**database_model.model_dump(),
)
print("Vectorstore initialized.")
self._record_manager = SQLRecordManager(
database_model.sql_namespace,
db_url=f"sqlite:///{database_model.sql_url}",
)
print("Record manager initialized.")
self._record_manager.create_schema()
print(" schema created.")
self._splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=database_model.splitter_chunk_size,
chunk_overlap=database_model.splitter_chunk_overlap,
)
print("Splitter initialized.")
self._qa = RetrievalQA.from_chain_type(
llm=chats[embedding_type](**embedding_model.model_dump()),
chain_type="stuff",
retriever=self._vectorstore.as_retriever(
search_type=embedding_model.search_type,
search_kwargs={
"k": embedding_model.search_k,
"fetch_k": embedding_model.search_fetch_k,
},
),
)
print("Chat initialized.")
def load(self, text: str):
id_key = "hash"
content_name = "text"
dataframe = DataFrame().from_dict([{id_key: hash(text), content_name: text}])
loader = DataFrameLoader(dataframe, page_content_column=content_name)
data = loader.load()
documents = self._splitter.split_documents(data)
index(
documents,
self._record_manager,
self._vectorstore,
cleanup="incremental",
source_id_key=id_key,
)
def chat(self, query: str):
self.load(query)
return self._qa.invoke({"query": query})
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.indexes.index",
"langchain_community.document_loaders.DataFrameLoader",
"langchain.indexes.SQLRecordManager"
] | [((2955, 3052), 'langchain.indexes.SQLRecordManager', 'SQLRecordManager', (['database_model.sql_namespace'], {'db_url': 'f"""sqlite:///{database_model.sql_url}"""'}), "(database_model.sql_namespace, db_url=\n f'sqlite:///{database_model.sql_url}')\n", (2971, 3052), False, 'from langchain.indexes import SQLRecordManager\n'), ((3237, 3399), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'database_model.splitter_chunk_size', 'chunk_overlap': 'database_model.splitter_chunk_overlap'}), '(chunk_size=\n database_model.splitter_chunk_size, chunk_overlap=database_model.\n splitter_chunk_overlap)\n', (3289, 3399), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((4141, 4201), 'langchain_community.document_loaders.DataFrameLoader', 'DataFrameLoader', (['dataframe'], {'page_content_column': 'content_name'}), '(dataframe, page_content_column=content_name)\n', (4156, 4201), False, 'from langchain_community.document_loaders import DataFrameLoader\n'), ((4296, 4403), 'langchain.indexes.index', 'index', (['documents', 'self._record_manager', 'self._vectorstore'], {'cleanup': '"""incremental"""', 'source_id_key': 'id_key'}), "(documents, self._record_manager, self._vectorstore, cleanup=\n 'incremental', source_id_key=id_key)\n", (4301, 4403), False, 'from langchain.indexes import index\n'), ((4058, 4069), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (4067, 4069), False, 'from pandas import DataFrame\n')] |
import base64
from enum import Enum
import json
import time
import logging
from pywebagent.env.browser import BrowserEnv
from langchain.schema import HumanMessage, SystemMessage
from langchain.chat_models import ChatOpenAI
logger = logging.getLogger(__name__)
TASK_STATUS = Enum("TASK_STATUS", "IN_PROGRESS SUCCESS FAILED")
class Task:
def __init__(self, task, args) -> None:
self.task = task
self.args = args
def get_llm():
return ChatOpenAI(
model_name="gpt-4-vision-preview",
temperature=1,
request_timeout=120,
max_tokens=2000,
)
def generate_user_message(task, observation):
log_history = '\n'.join(observation.env_state.log_history if observation.env_state.log_history else [])
marked_elements_tags = ', '.join([f"({str(i)}) - <{elem['tag'].lower()}>" for i, elem in observation.marked_elements.items()])
text_prompt = f"""
Execution error:
{observation.error_message}
URL:
{observation.url}
Marked elements tags:
{marked_elements_tags}
Task:
{task.task}
Log of last actions:
{log_history}
Task Arguments:
{json.dumps(task.args, indent=4)}
"""
screenshot_binary = observation.screenshot
base64_image = base64.b64encode(screenshot_binary).decode('utf-8')
image_content = {
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
"detail": "high", # low, high or auto
},
}
text_content = {"type": "text", "text": text_prompt}
return HumanMessage(content=[text_content, image_content])
def generate_system_message():
system_prompt = """
You are an AI agent that controls a webpage using python code, in order to achieve a task.
You are provided a screenshot of the webpage at each timeframe, and you decide on the next python line to execute.
You can use the following functions:
- actions.click(element_id, log_message) # click on an element
- actions.input_text(element_id, text, clear_before_input, log_message) # Use clear_before_input=True to replace the text instead of appending to it. Never use this method on a combobox.
- actions.upload_files(element_id, files: list, log_message) # use this instead of click if clicking is expected to open a file picker
- actions.scroll(direction, log_message) # scroll the page up or down. direction is either 'up' or 'down'.
- actions.combobox_select(element_id, option, log_message) # select an option from a combobox.
- actions.finish(did_succeed, output: dict, reason) # the task is complete with did_succeed=True or False, and a text reason. output is optional dictionary of output values if the task succeeded.
- actions.act(url: str, task: str, log_message, **kwargs) # run another agent on a different webpage. The sub-agent will run until it finishes and will output a result which you can use later. Useful for getting auth details from email for example.
# task argument should be described in natural language. kwargs are additional arguments the sub-agent needs to complete the task. YOU MUST PROVIDE ALL NEEDED ARGUMENTS, OTHERWISE THE SUB-AGENT WILL FAIL.
element_id is always an integer, and is visible as a green label with white number around the TOP-LEFT CORNER OF EACH ELEMENT. Make sure to examine all green highlighted elements before choosing one to interact with.
log_message is a short one sentence explanation of what the action does.
Do not use keyword arguments, all arguments are positional.
IMPORTANT: ONLY ONE WEBPAGE FUNCTION CALL IS ALLOWED, EXCEPT FOR FORMS WHERE MULTIPLE CALLS ARE ALLOWED TO FILL MULTIPLE FIELDS! NOTHING IS ALLOWED AFTER THE "```" ENDING THE CODE BLOCK
IMPORTANT: LOOK FOR CUES IN THE SCREENSHOTS TO SEE WHAT PARTS OF THE TASK ARE COMPLETED AND WHAT PARTS ARE NOT. FOR EXAMPLE, IF YOU ARE ASKED TO BUY A PRODUCT, LOOK FOR CUES THAT THE PRODUCT IS IN THE CART.
Response format:
Reasoning:
Explanation for the next action, particularly focusing on interpreting the attached screenshot image.
Code:
```python
# variable definitions and non-webpage function calls are allowed
...
# a single webpage function call.
actions.func_name(args..)
```
"""
return SystemMessage(content=system_prompt)
def extract_code(text):
"""
Extracts all text in a string following the pattern "'\nCode:\n".
"""
pattern = "\nCode:\n```python\n"
start_index = text.find(pattern)
if start_index == -1:
raise Exception("Code not found")
# Extract the text following the pattern, without the trailing "```"
extracted_text = text[start_index + len(pattern):-3]
return extracted_text
def calcualte_next_action(task, observation):
llm = get_llm()
system_message = generate_system_message()
user_message = generate_user_message(task, observation)
try:
ai_message = llm([system_message, user_message])
except:
# This sometimes solves the RPM limit issue
logger.warning("Failed to get response from OpenAI, trying again in 30 seconds")
time.sleep(30)
ai_message = llm([system_message, user_message])
logger.info(f"AI message: {ai_message.content}")
code_to_execute = extract_code(ai_message.content)
return code_to_execute
def get_task_status(observation):
if observation.env_state.has_successfully_completed:
return TASK_STATUS.SUCCESS
elif observation.env_state.has_failed:
return TASK_STATUS.FAILED
else:
return TASK_STATUS.IN_PROGRESS
def act(url, task, max_actions=40, **kwargs):
task = Task(task=task, args=kwargs)
browser = BrowserEnv(headless=False)
observation = browser.reset(url)
for i in range(max_actions):
action = calcualte_next_action(task, observation)
observation = browser.step(action, observation.marked_elements)
task_status = get_task_status(observation)
if task_status in [TASK_STATUS.SUCCESS, TASK_STATUS.FAILED]:
return task_status, observation.env_state.output
logger.warning(f"Reached {i} actions without completing the task.")
return TASK_STATUS.FAILED, observation.env_state.output
| [
"langchain.schema.SystemMessage",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((233, 260), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (250, 260), False, 'import logging\n'), ((277, 326), 'enum.Enum', 'Enum', (['"""TASK_STATUS"""', '"""IN_PROGRESS SUCCESS FAILED"""'], {}), "('TASK_STATUS', 'IN_PROGRESS SUCCESS FAILED')\n", (281, 326), False, 'from enum import Enum\n'), ((461, 563), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4-vision-preview"""', 'temperature': '(1)', 'request_timeout': '(120)', 'max_tokens': '(2000)'}), "(model_name='gpt-4-vision-preview', temperature=1,\n request_timeout=120, max_tokens=2000)\n", (471, 563), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1653, 1704), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '[text_content, image_content]'}), '(content=[text_content, image_content])\n', (1665, 1704), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((4455, 4491), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (4468, 4491), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((5890, 5916), 'pywebagent.env.browser.BrowserEnv', 'BrowserEnv', ([], {'headless': '(False)'}), '(headless=False)\n', (5900, 5916), False, 'from pywebagent.env.browser import BrowserEnv\n'), ((1224, 1255), 'json.dumps', 'json.dumps', (['task.args'], {'indent': '(4)'}), '(task.args, indent=4)\n', (1234, 1255), False, 'import json\n'), ((1322, 1357), 'base64.b64encode', 'base64.b64encode', (['screenshot_binary'], {}), '(screenshot_binary)\n', (1338, 1357), False, 'import base64\n'), ((5312, 5326), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (5322, 5326), False, 'import time\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.