id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
49
117
bf38ac40fe32-7
line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I\'m working with Person #2.\nOutput: Langchain, Person #2\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:', template_format='f-string', validate_template=True)#
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-8
field human_prefix: str = 'Human'# field k: int = 2# field kg: langchain.graphs.networkx_graph.NetworkxEntityGraph [Optional]#
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-9
field knowledge_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template="You are a networked intelligence helping a human track knowledge triples about all relevant people, things, concepts, etc. and integrating them with your knowledge stored within your weights as well as that stored in a knowledge graph. Extract all of the knowledge triples from the last line of conversation. A knowledge triple is a clause that contains a subject, a predicate, and an object. The subject is the entity being described, the predicate is the property of the subject that is being described, and the object is the value of the property.\n\nEXAMPLE\nConversation history:\nPerson #1: Did you hear aliens landed in Area 51?\nAI: No, I didn't hear that. What do you know about Area 51?\nPerson #1: It's a secret military base in Nevada.\nAI: What do you know about Nevada?\nLast line of conversation:\nPerson #1: It's a state in the US. It's also the number 1 producer of gold in the US.\n\nOutput: (Nevada, is a, state)<|>(Nevada, is in, US)<|>(Nevada, is the number 1 producer of, gold)\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: Hello.\nAI: Hi! How are you?\nPerson #1: I'm good. How are you?\nAI: I'm good too.\nLast line of conversation:\nPerson #1: I'm going to the store.\n\nOutput: NONE\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: What do you know about Descartes?\nAI: Descartes was a French philosopher, mathematician, and scientist who lived in the 17th
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-10
Descartes was a French philosopher, mathematician, and scientist who lived in the 17th century.\nPerson #1: The Descartes I'm referring to is a standup comedian and interior designer from Montreal.\nAI: Oh yes, He is a comedian and an interior designer. He has been in the industry for 30 years. His favorite food is baked bean pie.\nLast line of conversation:\nPerson #1: Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\nOutput: (Descartes, likes to drive, antique scooters)<|>(Descartes, plays, mandolin)\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:", template_format='f-string', validate_template=True)#
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-11
field llm: langchain.base_language.BaseLanguageModel [Required]# field summary_message_cls: Type[langchain.schema.BaseMessage] = <class 'langchain.schema.SystemMessage'># Number of previous utterances to include in the context. clear() β†’ None[source]# Clear memory contents. get_current_entities(input_string: str) β†’ List[str][source]# get_knowledge_triplets(input_string: str) β†’ List[langchain.graphs.networkx_graph.KnowledgeTriple][source]# load_memory_variables(inputs: Dict[str, Any]) β†’ Dict[str, Any][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β†’ None[source]# Save context from this conversation to buffer. pydantic model langchain.memory.ConversationStringBufferMemory[source]# Buffer for storing conversation memory. field ai_prefix: str = 'AI'# Prefix to use for AI generated responses. field buffer: str = ''# field human_prefix: str = 'Human'# field input_key: Optional[str] = None# field output_key: Optional[str] = None# clear() β†’ None[source]# Clear memory contents. load_memory_variables(inputs: Dict[str, Any]) β†’ Dict[str, str][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β†’ None[source]# Save context from this conversation to buffer. property memory_variables: List[str]# Will always return list of memory variables. :meta private: pydantic model langchain.memory.ConversationSummaryBufferMemory[source]# Buffer with summarizer for storing conversation memory. field max_token_limit: int = 2000# field memory_key: str = 'history'# field moving_summary_buffer: str = ''#
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-12
field memory_key: str = 'history'# field moving_summary_buffer: str = ''# clear() β†’ None[source]# Clear memory contents. load_memory_variables(inputs: Dict[str, Any]) β†’ Dict[str, Any][source]# Return history buffer. prune() β†’ None[source]# Prune buffer if it exceeds max token limit save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β†’ None[source]# Save context from this conversation to buffer. property buffer: List[langchain.schema.BaseMessage]# pydantic model langchain.memory.ConversationSummaryMemory[source]# Conversation summarizer to memory. field buffer: str = ''# clear() β†’ None[source]# Clear memory contents. classmethod from_messages(llm: langchain.base_language.BaseLanguageModel, chat_memory: langchain.schema.BaseChatMessageHistory, *, summarize_step: int = 2, **kwargs: Any) β†’ langchain.memory.summary.ConversationSummaryMemory[source]# load_memory_variables(inputs: Dict[str, Any]) β†’ Dict[str, Any][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β†’ None[source]# Save context from this conversation to buffer. pydantic model langchain.memory.ConversationTokenBufferMemory[source]# Buffer for storing conversation memory. field ai_prefix: str = 'AI'# field human_prefix: str = 'Human'# field llm: langchain.base_language.BaseLanguageModel [Required]# field max_token_limit: int = 2000# field memory_key: str = 'history'# load_memory_variables(inputs: Dict[str, Any]) β†’ Dict[str, Any][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β†’ None[source]#
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-13
Save context from this conversation to buffer. Pruned. property buffer: List[langchain.schema.BaseMessage]# String buffer of memory. class langchain.memory.CosmosDBChatMessageHistory(cosmos_endpoint: str, cosmos_database: str, cosmos_container: str, session_id: str, user_id: str, credential: Any = None, connection_string: Optional[str] = None, ttl: Optional[int] = None, cosmos_client_kwargs: Optional[dict] = None)[source]# Chat history backed by Azure CosmosDB. add_message(message: langchain.schema.BaseMessage) β†’ None[source]# Add a self-created message to the store clear() β†’ None[source]# Clear session memory from this memory and cosmos. load_messages() β†’ None[source]# Retrieve the messages from Cosmos prepare_cosmos() β†’ None[source]# Prepare the CosmosDB client. Use this function or the context manager to make sure your database is ready. upsert_messages() β†’ None[source]# Update the cosmosdb item. class langchain.memory.DynamoDBChatMessageHistory(table_name: str, session_id: str)[source]# Chat message history that stores history in AWS DynamoDB. This class expects that a DynamoDB table with name table_name and a partition Key of SessionId is present. Parameters table_name – name of the DynamoDB table session_id – arbitrary key that is used to store the messages of a single chat session. add_message(message: langchain.schema.BaseMessage) β†’ None[source]# Append the message to the record in DynamoDB clear() β†’ None[source]# Clear session memory from DynamoDB property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from DynamoDB class langchain.memory.FileChatMessageHistory(file_path: str)[source]#
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-14
class langchain.memory.FileChatMessageHistory(file_path: str)[source]# Chat message history that stores history in a local file. Parameters file_path – path of the local file to store the messages. add_message(message: langchain.schema.BaseMessage) β†’ None[source]# Append the message to the record in the local file clear() β†’ None[source]# Clear session memory from the local file property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from the local file pydantic model langchain.memory.InMemoryEntityStore[source]# Basic in-memory entity store. field store: Dict[str, Optional[str]] = {}# clear() β†’ None[source]# Delete all entities from store. delete(key: str) β†’ None[source]# Delete entity value from store. exists(key: str) β†’ bool[source]# Check if entity exists in store. get(key: str, default: Optional[str] = None) β†’ Optional[str][source]# Get entity value from store. set(key: str, value: Optional[str]) β†’ None[source]# Set entity value in store. class langchain.memory.MomentoChatMessageHistory(session_id: str, cache_client: momento.CacheClient, cache_name: str, *, key_prefix: str = 'message_store:', ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True)[source]# Chat message history cache that uses Momento as a backend. See https://gomomento.com/ add_message(message: langchain.schema.BaseMessage) β†’ None[source]# Store a message in the cache. Parameters message (BaseMessage) – The message object to store. Raises SdkException – Momento service or network error. Exception – Unexpected response. clear() β†’ None[source]#
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-15
Exception – Unexpected response. clear() β†’ None[source]# Remove the session’s messages from the cache. Raises SdkException – Momento service or network error. Exception – Unexpected response. classmethod from_client_params(session_id: str, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any) β†’ MomentoChatMessageHistory[source]# Construct cache from CacheClient parameters. property messages: list[langchain.schema.BaseMessage]# Retrieve the messages from Momento. Raises SdkException – Momento service or network error Exception – Unexpected response Returns List of cached messages Return type list[BaseMessage] class langchain.memory.MongoDBChatMessageHistory(connection_string: str, session_id: str, database_name: str = 'chat_history', collection_name: str = 'message_store')[source]# Chat message history that stores history in MongoDB. Parameters connection_string – connection string to connect to MongoDB session_id – arbitrary key that is used to store the messages of a single chat session. database_name – name of the database to use collection_name – name of the collection to use add_message(message: langchain.schema.BaseMessage) β†’ None[source]# Append the message to the record in MongoDB clear() β†’ None[source]# Clear session memory from MongoDB property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from MongoDB class langchain.memory.PostgresChatMessageHistory(session_id: str, connection_string: str = 'postgresql://postgres:mypassword@localhost/chat_history', table_name: str = 'message_store')[source]# add_message(message: langchain.schema.BaseMessage) β†’ None[source]# Append the message to the record in PostgreSQL clear() β†’ None[source]#
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-16
Append the message to the record in PostgreSQL clear() β†’ None[source]# Clear session memory from PostgreSQL property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from PostgreSQL pydantic model langchain.memory.ReadOnlySharedMemory[source]# A memory wrapper that is read-only and cannot be changed. field memory: langchain.schema.BaseMemory [Required]# clear() β†’ None[source]# Nothing to clear, got a memory like a vault. load_memory_variables(inputs: Dict[str, Any]) β†’ Dict[str, str][source]# Load memory variables from memory. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β†’ None[source]# Nothing should be saved or changed property memory_variables: List[str]# Return memory variables. class langchain.memory.RedisChatMessageHistory(session_id: str, url: str = 'redis://localhost:6379/0', key_prefix: str = 'message_store:', ttl: Optional[int] = None)[source]# add_message(message: langchain.schema.BaseMessage) β†’ None[source]# Append the message to the record in Redis clear() β†’ None[source]# Clear session memory from Redis property key: str# Construct the record key to use property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from Redis pydantic model langchain.memory.RedisEntityStore[source]# Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. field key_prefix: str = 'memory_store'# field recall_ttl: Optional[int] = 259200# field redis_client: Any = None# field session_id: str = 'default'# field ttl: Optional[int] = 86400#
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-17
field ttl: Optional[int] = 86400# clear() β†’ None[source]# Delete all entities from store. delete(key: str) β†’ None[source]# Delete entity value from store. exists(key: str) β†’ bool[source]# Check if entity exists in store. get(key: str, default: Optional[str] = None) β†’ Optional[str][source]# Get entity value from store. set(key: str, value: Optional[str]) β†’ None[source]# Set entity value in store. property full_key_prefix: str# pydantic model langchain.memory.SQLiteEntityStore[source]# SQLite-backed Entity store field session_id: str = 'default'# field table_name: str = 'memory_store'# clear() β†’ None[source]# Delete all entities from store. delete(key: str) β†’ None[source]# Delete entity value from store. exists(key: str) β†’ bool[source]# Check if entity exists in store. get(key: str, default: Optional[str] = None) β†’ Optional[str][source]# Get entity value from store. set(key: str, value: Optional[str]) β†’ None[source]# Set entity value in store. property full_table_name: str# pydantic model langchain.memory.SimpleMemory[source]# Simple memory for storing context or other bits of information that shouldn’t ever change between prompts. field memories: Dict[str, Any] = {}# clear() β†’ None[source]# Nothing to clear, got a memory like a vault. load_memory_variables(inputs: Dict[str, Any]) β†’ Dict[str, str][source]# Return key-value pairs given the text input to the chain. If None, return all memories
https://python.langchain.com/en/latest/reference/modules/memory.html
bf38ac40fe32-18
Return key-value pairs given the text input to the chain. If None, return all memories save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β†’ None[source]# Nothing should be saved or changed, my memory is set in stone. property memory_variables: List[str]# Input keys this memory class will load dynamically. pydantic model langchain.memory.VectorStoreRetrieverMemory[source]# Class for a VectorStore-backed memory object. field input_key: Optional[str] = None# Key name to index the inputs to load_memory_variables. field memory_key: str = 'history'# Key name to locate the memories in the result of load_memory_variables. field retriever: langchain.vectorstores.base.VectorStoreRetriever [Required]# VectorStoreRetriever object to connect to. field return_docs: bool = False# Whether or not to return the result of querying the database directly. clear() β†’ None[source]# Nothing to clear. load_memory_variables(inputs: Dict[str, Any]) β†’ Dict[str, Union[List[langchain.schema.Document], str]][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β†’ None[source]# Save context from this conversation to buffer. property memory_variables: List[str]# The list of keys emitted from the load_memory_variables method. previous Document Transformers next Agents By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/reference/modules/memory.html
fb4eba2e13bc-0
.rst .pdf SearxNG Search Contents Quick Start Searching Engine Parameters Search Tips SearxNG Search# Utility for using SearxNG meta search API. SearxNG is a privacy-friendly free metasearch engine that aggregates results from multiple search engines and databases and supports the OpenSearch specification. More details on the installation instructions here. For the search API refer to https://docs.searxng.org/dev/search_api.html Quick Start# In order to use this utility you need to provide the searx host. This can be done by passing the named parameter searx_host or exporting the environment variable SEARX_HOST. Note: this is the only required parameter. Then create a searx search instance like this: from langchain.utilities import SearxSearchWrapper # when the host starts with `http` SSL is disabled and the connection # is assumed to be on a private network searx_host='http://self.hosted' search = SearxSearchWrapper(searx_host=searx_host) You can now use the search instance to query the searx API. Searching# Use the run() and results() methods to query the searx API. Other methods are available for convenience. SearxResults is a convenience wrapper around the raw json result. Example usage of the run method to make a search: s.run(query="what is the best search engine?") Engine Parameters# You can pass any accepted searx search API parameters to the SearxSearchWrapper instance. In the following example we are using the engines and the language parameters: # assuming the searx host is set as above or exported as an env variable
https://python.langchain.com/en/latest/reference/modules/searx_search.html
fb4eba2e13bc-1
# assuming the searx host is set as above or exported as an env variable s = SearxSearchWrapper(engines=['google', 'bing'], language='es') Search Tips# Searx offers a special search syntax that can also be used instead of passing engine parameters. For example the following query: s = SearxSearchWrapper("langchain library", engines=['github']) # can also be written as: s = SearxSearchWrapper("langchain library !github") # or even: s = SearxSearchWrapper("langchain library !gh") In some situations you might want to pass an extra string to the search query. For example when the run() method is called by an agent. The search suffix can also be used as a way to pass extra parameters to searx or the underlying search engines. # select the github engine and pass the search suffix s = SearchWrapper("langchain library", query_suffix="!gh") s = SearchWrapper("langchain library") # select github the conventional google search syntax s.run("large language models", query_suffix="site:github.com") NOTE: A search suffix can be defined on both the instance and the method level. The resulting query will be the concatenation of the two with the former taking precedence. See SearxNG Configured Engines and SearxNG Search Syntax for more details. Notes This wrapper is based on the SearxNG fork searxng/searxng which is better maintained than the original Searx project and offers more features. Public searxNG instances often use a rate limiter for API usage, so you might want to use a self hosted instance and disable the rate limiter.
https://python.langchain.com/en/latest/reference/modules/searx_search.html
fb4eba2e13bc-2
use a self hosted instance and disable the rate limiter. If you are self-hosting an instance you can customize the rate limiter for your own network as described here. For a list of public SearxNG instances see https://searx.space/ class langchain.utilities.searx_search.SearxResults(data: str)[source]# Dict like wrapper around search api results. property answers: Any# Helper accessor on the json result. pydantic model langchain.utilities.searx_search.SearxSearchWrapper[source]# Wrapper for Searx API. To use you need to provide the searx host by passing the named parameter searx_host or exporting the environment variable SEARX_HOST. In some situations you might want to disable SSL verification, for example if you are running searx locally. You can do this by passing the named parameter unsecure. You can also pass the host url scheme as http to disable SSL. Example from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://localhost:8888") Example with SSL disabled:from langchain.utilities import SearxSearchWrapper # note the unsecure parameter is not needed if you pass the url scheme as # http searx = SearxSearchWrapper(searx_host="http://localhost:8888", unsecure=True) Validators disable_ssl_warnings Β» unsecure validate_params Β» all fields field aiosession: Optional[Any] = None# field categories: Optional[List[str]] = []# field engines: Optional[List[str]] = []# field headers: Optional[dict] = None# field k: int = 10# field params: dict [Optional]# field query_suffix: Optional[str] = ''#
https://python.langchain.com/en/latest/reference/modules/searx_search.html
fb4eba2e13bc-3
field params: dict [Optional]# field query_suffix: Optional[str] = ''# field searx_host: str = ''# field unsecure: bool = False# async aresults(query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β†’ List[Dict][source]# Asynchronously query with json results. Uses aiohttp. See results for more info. async arun(query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β†’ str[source]# Asynchronously version of run. results(query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β†’ List[Dict][source]# Run query through Searx API and returns the results with metadata. Parameters query – The query to search for. query_suffix – Extra suffix appended to the query. num_results – Limit the number of results to return. engines – List of engines to use for the query. categories – List of categories to use for the query. **kwargs – extra parameters to pass to the searx API. Returns {snippet: The description of the result. title: The title of the result. link: The link to the result. engines: The engines used for the result. category: Searx category of the result. } Return type Dict with the following keys run(query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β†’ str[source]#
https://python.langchain.com/en/latest/reference/modules/searx_search.html
fb4eba2e13bc-4
Run query through Searx API and parse results. You can pass any other params to the searx query API. Parameters query – The query to search for. query_suffix – Extra suffix appended to the query. engines – List of engines to use for the query. categories – List of categories to use for the query. **kwargs – extra parameters to pass to the searx API. Returns The result of the query. Return type str Raises ValueError – If an error occured with the query. Example This will make a query to the qwant engine: from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://my.searx.host") searx.run("what is the weather in France ?", engine="qwant") # the same result can be achieved using the `!` syntax of searx # to select the engine using `query_suffix` searx.run("what is the weather in France ?", query_suffix="!qwant") Contents Quick Start Searching Engine Parameters Search Tips By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/reference/modules/searx_search.html
5e59ab06f6eb-0
.rst .pdf Tools Tools# Core toolkit implementations. pydantic model langchain.tools.AIPluginTool[source]# field api_spec: str [Required]# field args_schema: Type[AIPluginToolSchema] = <class 'langchain.tools.plugin.AIPluginToolSchema'># Pydantic model class to validate and parse the tool’s input arguments. field plugin: AIPlugin [Required]# classmethod from_plugin_url(url: str) β†’ langchain.tools.plugin.AIPluginTool[source]# pydantic model langchain.tools.APIOperation[source]# A model for a single API operation. field base_url: str [Required]# The base URL of the operation. field description: Optional[str] = None# The description of the operation. field method: langchain.tools.openapi.utils.openapi_utils.HTTPVerb [Required]# The HTTP method of the operation. field operation_id: str [Required]# The unique identifier of the operation. field path: str [Required]# The path of the operation. field properties: Sequence[langchain.tools.openapi.utils.api_models.APIProperty] [Required]# field request_body: Optional[langchain.tools.openapi.utils.api_models.APIRequestBody] = None# The request body of the operation. classmethod from_openapi_spec(spec: langchain.tools.openapi.utils.openapi_utils.OpenAPISpec, path: str, method: str) β†’ langchain.tools.openapi.utils.api_models.APIOperation[source]# Create an APIOperation from an OpenAPI spec. classmethod from_openapi_url(spec_url: str, path: str, method: str) β†’ langchain.tools.openapi.utils.api_models.APIOperation[source]# Create an APIOperation from an OpenAPI URL. to_typescript() β†’ str[source]# Get typescript string representation of the operation.
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-1
to_typescript() β†’ str[source]# Get typescript string representation of the operation. static ts_type_from_python(type_: Union[str, Type, tuple, None, enum.Enum]) β†’ str[source]# property body_params: List[str]# property path_params: List[str]# property query_params: List[str]# pydantic model langchain.tools.AzureCogsFormRecognizerTool[source]# Tool that queries the Azure Cognitive Services Form Recognizer API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/applied-ai-services/form-recognizer/quickstarts/get-started-sdks-rest-api?view=form-recog-3.0.0&pivots=programming-language-python pydantic model langchain.tools.AzureCogsImageAnalysisTool[source]# Tool that queries the Azure Cognitive Services Image Analysis API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40 pydantic model langchain.tools.AzureCogsSpeech2TextTool[source]# Tool that queries the Azure Cognitive Services Speech2Text API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python pydantic model langchain.tools.AzureCogsText2SpeechTool[source]# Tool that queries the Azure Cognitive Services Text2Speech API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-text-to-speech?pivots=programming-language-python pydantic model langchain.tools.BaseTool[source]# Interface LangChain tools must implement.
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-2
Interface LangChain tools must implement. field args_schema: Optional[Type[pydantic.main.BaseModel]] = None# Pydantic model class to validate and parse the tool’s input arguments. field callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None# Deprecated. Please use callbacks instead. field callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None# Callbacks to be called during tool execution. field description: str [Required]# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field handle_tool_error: Optional[Union[bool, str, Callable[[langchain.tools.base.ToolException], str]]] = False# Handle the content of the ToolException thrown. field name: str [Required]# The unique name of the tool that clearly communicates its purpose. field return_direct: bool = False# Whether to return the tool’s output directly. Setting this to True means that after the tool is called, the AgentExecutor will stop looping. field verbose: bool = False# Whether to log the tool’s progress. async arun(tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = 'green', color: Optional[str] = 'green', callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) β†’ Any[source]# Run the tool asynchronously.
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-3
Run the tool asynchronously. run(tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = 'green', color: Optional[str] = 'green', callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) β†’ Any[source]# Run the tool. property args: dict# property is_single_input: bool# Whether the tool only accepts a single input. pydantic model langchain.tools.BingSearchResults[source]# Tool that has capability to query the Bing Search API and get back json. field api_wrapper: langchain.utilities.bing_search.BingSearchAPIWrapper [Required]# field num_results: int = 4# pydantic model langchain.tools.BingSearchRun[source]# Tool that adds the capability to query the Bing search API. field api_wrapper: langchain.utilities.bing_search.BingSearchAPIWrapper [Required]# pydantic model langchain.tools.BraveSearch[source]# field search_wrapper: BraveSearchWrapper [Required]# classmethod from_api_key(api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any) β†’ langchain.tools.brave_search.tool.BraveSearch[source]# pydantic model langchain.tools.ClickTool[source]# field args_schema: Type[BaseModel] = <class 'langchain.tools.playwright.click.ClickToolInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Click on an element with the given CSS selector'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'click_element'#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-4
field name: str = 'click_element'# The unique name of the tool that clearly communicates its purpose. field playwright_strict: bool = False# Whether to employ Playwright’s strict mode when clicking on elements. field playwright_timeout: float = 1000# Timeout (in ms) for Playwright to wait for element to be ready. field visible_only: bool = True# Whether to consider only visible elements. pydantic model langchain.tools.CopyFileTool[source]# field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.copy.FileCopyInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Create a copy of a file in a specified location'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'copy_file'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.CurrentWebPageTool[source]# field args_schema: Type[BaseModel] = <class 'pydantic.main.BaseModel'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Returns the URL of the current page'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'current_webpage'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.DeleteFileTool[source]# field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.delete.FileDeleteInput'>#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-5
Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Delete a file'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'file_delete'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.DuckDuckGoSearchResults[source]# Tool that queries the Duck Duck Go Search API and get back json. field api_wrapper: langchain.utilities.duckduckgo_search.DuckDuckGoSearchAPIWrapper [Optional]# field num_results: int = 4# pydantic model langchain.tools.DuckDuckGoSearchRun[source]# Tool that adds the capability to query the DuckDuckGo search API. field api_wrapper: langchain.utilities.duckduckgo_search.DuckDuckGoSearchAPIWrapper [Optional]# pydantic model langchain.tools.ExtractHyperlinksTool[source]# Extract all hyperlinks on the page. field args_schema: Type[BaseModel] = <class 'langchain.tools.playwright.extract_hyperlinks.ExtractHyperlinksToolInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Extract all hyperlinks on the current webpage'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'extract_hyperlinks'# The unique name of the tool that clearly communicates its purpose. static scrape_page(page: Any, html_content: str, absolute_urls: bool) β†’ str[source]# pydantic model langchain.tools.ExtractTextTool[source]#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-6
pydantic model langchain.tools.ExtractTextTool[source]# field args_schema: Type[BaseModel] = <class 'pydantic.main.BaseModel'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Extract all the text on the current webpage'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'extract_text'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.FileSearchTool[source]# field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.file_search.FileSearchInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Recursively search for files in a subdirectory that match the regex pattern'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'file_search'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.GetElementsTool[source]# field args_schema: Type[BaseModel] = <class 'langchain.tools.playwright.get_elements.GetElementsToolInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Retrieve elements in the current web page matching the given CSS selector'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'get_elements'# The unique name of the tool that clearly communicates its purpose.
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-7
The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.GmailCreateDraft[source]# field args_schema: Type[langchain.tools.gmail.create_draft.CreateDraftSchema] = <class 'langchain.tools.gmail.create_draft.CreateDraftSchema'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Use this tool to create a draft email with the provided message fields.'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'create_gmail_draft'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.GmailGetMessage[source]# field args_schema: Type[langchain.tools.gmail.get_message.SearchArgsSchema] = <class 'langchain.tools.gmail.get_message.SearchArgsSchema'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Use this tool to fetch an email by message ID. Returns the thread ID, snipet, body, subject, and sender.'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'get_gmail_message'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.GmailGetThread[source]# field args_schema: Type[langchain.tools.gmail.get_thread.GetThreadSchema] = <class 'langchain.tools.gmail.get_thread.GetThreadSchema'># Pydantic model class to validate and parse the tool’s input arguments.
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-8
Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Use this tool to search for email messages. The input must be a valid Gmail query. The output is a JSON list of messages.'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'get_gmail_thread'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.GmailSearch[source]# field args_schema: Type[langchain.tools.gmail.search.SearchArgsSchema] = <class 'langchain.tools.gmail.search.SearchArgsSchema'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Use this tool to search for email messages or threads. The input must be a valid Gmail query. The output is a JSON list of the requested resource.'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'search_gmail'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.GmailSendMessage[source]# field description: str = 'Use this tool to send email messages. The input is the message, recipents'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'send_gmail_message'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.GooglePlacesTool[source]# Tool that adds the capability to query the Google places API. field api_wrapper: langchain.utilities.google_places_api.GooglePlacesAPIWrapper [Optional]#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-9
field api_wrapper: langchain.utilities.google_places_api.GooglePlacesAPIWrapper [Optional]# field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.google_places.tool.GooglePlacesSchema'># Pydantic model class to validate and parse the tool’s input arguments. pydantic model langchain.tools.GoogleSearchResults[source]# Tool that has capability to query the Google Search API and get back json. field api_wrapper: langchain.utilities.google_search.GoogleSearchAPIWrapper [Required]# field num_results: int = 4# pydantic model langchain.tools.GoogleSearchRun[source]# Tool that adds the capability to query the Google search API. field api_wrapper: langchain.utilities.google_search.GoogleSearchAPIWrapper [Required]# pydantic model langchain.tools.GoogleSerperResults[source]# Tool that has capability to query the Serper.dev Google Search API and get back json. field api_wrapper: langchain.utilities.google_serper.GoogleSerperAPIWrapper [Optional]# pydantic model langchain.tools.GoogleSerperRun[source]# Tool that adds the capability to query the Serper.dev Google search API. field api_wrapper: langchain.utilities.google_serper.GoogleSerperAPIWrapper [Required]# pydantic model langchain.tools.HumanInputRun[source]# Tool that adds the capability to ask user for input. field input_func: Callable [Optional]# field prompt_func: Callable[[str], None] [Optional]# pydantic model langchain.tools.IFTTTWebhook[source]# IFTTT Webhook. Parameters name – name of the tool description – description of the tool url – url to hit with the json event. field url: str [Required]# pydantic model langchain.tools.InfoPowerBITool[source]#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-10
pydantic model langchain.tools.InfoPowerBITool[source]# Tool for getting metadata about a PowerBI Dataset. field powerbi: langchain.utilities.powerbi.PowerBIDataset [Required]# pydantic model langchain.tools.ListDirectoryTool[source]# field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.list_dir.DirectoryListingInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'List files and directories in a specified folder'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'list_directory'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.ListPowerBITool[source]# Tool for getting tables names. field powerbi: langchain.utilities.powerbi.PowerBIDataset [Required]# pydantic model langchain.tools.MetaphorSearchResults[source]# Tool that has capability to query the Metaphor Search API and get back json. field api_wrapper: langchain.utilities.metaphor_search.MetaphorSearchAPIWrapper [Required]# pydantic model langchain.tools.MoveFileTool[source]# field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.move.FileMoveInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Move or rename a file from one location to another'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'move_file'#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-11
field name: str = 'move_file'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.NavigateBackTool[source]# Navigate back to the previous page in the browser history. field args_schema: Type[BaseModel] = <class 'pydantic.main.BaseModel'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Navigate back to the previous page in the browser history'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'previous_webpage'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.NavigateTool[source]# field args_schema: Type[BaseModel] = <class 'langchain.tools.playwright.navigate.NavigateToolInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Navigate a browser to the specified URL'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'navigate_browser'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.OpenAPISpec[source]# OpenAPI Model that removes misformatted parts of the spec. classmethod from_file(path: Union[str, pathlib.Path]) β†’ langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]# Get an OpenAPI spec from a file path. classmethod from_spec_dict(spec_dict: dict) β†’ langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]# Get an OpenAPI spec from a dict.
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-12
Get an OpenAPI spec from a dict. classmethod from_text(text: str) β†’ langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]# Get an OpenAPI spec from a text. classmethod from_url(url: str) β†’ langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]# Get an OpenAPI spec from a URL. static get_cleaned_operation_id(operation: openapi_schema_pydantic.v3.v3_1_0.operation.Operation, path: str, method: str) β†’ str[source]# Get a cleaned operation id from an operation id. get_methods_for_path(path: str) β†’ List[str][source]# Return a list of valid methods for the specified path. get_operation(path: str, method: str) β†’ openapi_schema_pydantic.v3.v3_1_0.operation.Operation[source]# Get the operation object for a given path and HTTP method. get_parameters_for_operation(operation: openapi_schema_pydantic.v3.v3_1_0.operation.Operation) β†’ List[openapi_schema_pydantic.v3.v3_1_0.parameter.Parameter][source]# Get the components for a given operation. get_referenced_schema(ref: openapi_schema_pydantic.v3.v3_1_0.reference.Reference) β†’ openapi_schema_pydantic.v3.v3_1_0.schema.Schema[source]# Get a schema (or nested reference) or err. get_request_body_for_operation(operation: openapi_schema_pydantic.v3.v3_1_0.operation.Operation) β†’ Optional[openapi_schema_pydantic.v3.v3_1_0.request_body.RequestBody][source]# Get the request body for a given operation. classmethod parse_obj(obj: dict) β†’ langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]# property base_url: str#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-13
property base_url: str# Get the base url. pydantic model langchain.tools.OpenWeatherMapQueryRun[source]# Tool that adds the capability to query using the OpenWeatherMap API. field api_wrapper: langchain.utilities.openweathermap.OpenWeatherMapAPIWrapper [Optional]# pydantic model langchain.tools.PubmedQueryRun[source]# Tool that adds the capability to search using the PubMed API. field api_wrapper: langchain.utilities.pupmed.PubMedAPIWrapper [Optional]# pydantic model langchain.tools.QueryPowerBITool[source]# Tool for querying a Power BI Dataset. Validators raise_deprecation Β» all fields validate_llm_chain_input_variables Β» llm_chain field examples: Optional[str] = '\nQuestion: How many rows are in the table <table>?\nDAX: EVALUATE ROW("Number of rows", COUNTROWS(<table>))\n----\nQuestion: How many rows are in the table <table> where <column> is not empty?\nDAX: EVALUATE ROW("Number of rows", COUNTROWS(FILTER(<table>, <table>[<column>] <> "")))\n----\nQuestion: What was the average of <column> in <table>?\nDAX: EVALUATE ROW("Average", AVERAGE(<table>[<column>]))\n----\n'# field llm_chain: langchain.chains.llm.LLMChain [Required]# field max_iterations: int = 5# field powerbi: langchain.utilities.powerbi.PowerBIDataset [Required]# field session_cache: Dict[str, Any] [Optional]#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-14
field template: Optional[str] = '\nAnswer the question below with a DAX query that can be sent to Power BI. DAX queries have a simple syntax comprised of just one required keyword, EVALUATE, and several optional keywords: ORDER BY, START AT, DEFINE, MEASURE, VAR, TABLE, and COLUMN. Each keyword defines a statement used for the duration of the query. Any time < or > are used in the text below it means that those values need to be replaced by table, columns or other things. If the question is not something you can answer with a DAX query, reply with "I cannot answer this" and the question will be escalated to a human.\n\nSome DAX functions return a table instead of a scalar, and must be wrapped in a function that evaluates the table and returns a scalar; unless the table is a single column, single row table, then it is treated as a scalar value. Most DAX functions require one or more arguments, which can include tables, columns, expressions, and values. However, some functions, such as PI, do not require any arguments, but always require parentheses to indicate the null argument. For example, you must always type PI(), not PI. You can also nest functions within other functions. \n\nSome commonly used functions are:\nEVALUATE <table> - At the most basic level, a DAX query is an EVALUATE statement containing a table expression. At least one EVALUATE statement is required, however, a query can contain any number of EVALUATE statements.\nEVALUATE <table> ORDER BY <expression> ASC or DESC - The optional ORDER BY keyword defines one or more expressions used to sort query results. Any expression that can be evaluated for each row of the result is valid.\nEVALUATE <table> ORDER BY <expression> ASC or DESC START AT <value> or <parameter> - The optional
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-15
ORDER BY <expression> ASC or DESC START AT <value> or <parameter> - The optional START AT keyword is used inside an ORDER BY clause. It defines the value at which the query results begin.\nDEFINE MEASURE | VAR; EVALUATE <table> - The optional DEFINE keyword introduces one or more calculated entity definitions that exist only for the duration of the query. Definitions precede the EVALUATE statement and are valid for all EVALUATE statements in the query. Definitions can be variables, measures, tables1, and columns1. Definitions can reference other definitions that appear before or after the current definition. At least one definition is required if the DEFINE keyword is included in a query.\nMEASURE <table name>[<measure name>] = <scalar expression> - Introduces a measure definition in a DEFINE statement of a DAX query.\nVAR <name> = <expression> - Stores the result of an expression as a named variable, which can then be passed as an argument to other measure expressions. Once resultant values have been calculated for a variable expression, those values do not change, even if the variable is referenced in another expression.\n\nFILTER(<table>,<filter>) - Returns a table that represents a subset of another table or expression, where <filter> is a Boolean expression that is to be evaluated for each row of the table. For example, [Amount] > 0 or [Region] = "France"\nROW(<name>, <expression>) - Returns a table with a single row containing values that result from the expressions given to each column.\nDISTINCT(<column>) - Returns a one-column table that contains the distinct values from the specified column. In other words, duplicate values are removed and only unique values are returned. This function cannot be used to Return values into a cell or column on a worksheet; rather, you nest the DISTINCT function within a formula, to get a list of distinct values that can be passed
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-16
you nest the DISTINCT function within a formula, to get a list of distinct values that can be passed to another function and then counted, summed, or used for other operations.\nDISTINCT(<table>) - Returns a table by removing duplicate rows from another table or expression.\n\nAggregation functions, names with a A in it, handle booleans and empty strings in appropriate ways, while the same function without A only uses the numeric values in a column. Functions names with an X in it can include a expression as an argument, this will be evaluated for each row in the table and the result will be used in the regular function calculation, these are the functions:\nCOUNT(<column>), COUNTA(<column>), COUNTX(<table>,<expression>), COUNTAX(<table>,<expression>), COUNTROWS([<table>]), COUNTBLANK(<column>), DISTINCTCOUNT(<column>), DISTINCTCOUNTNOBLANK (<column>) - these are all variantions of count functions.\nAVERAGE(<column>), AVERAGEA(<column>), AVERAGEX(<table>,<expression>) - these are all variantions of average functions.\nMAX(<column>), MAXA(<column>), MAXX(<table>,<expression>) - these are all variantions of max functions.\nMIN(<column>), MINA(<column>), MINX(<table>,<expression>) - these are all variantions of min functions.\nPRODUCT(<column>), PRODUCTX(<table>,<expression>) - these are all variantions of product functions.\nSUM(<column>), SUMX(<table>,<expression>) - these are all variantions of sum functions.\n\nDate and time functions:\nDATE(year, month, day) - Returns a date value that represents the specified year, month, and day.\nDATEDIFF(date1, date2, <interval>) - Returns the difference between two date values, in the specified
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-17
date2, <interval>) - Returns the difference between two date values, in the specified interval, that can be SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, QUARTER, YEAR.\nDATEVALUE(<date_text>) - Returns a date value that represents the specified date.\nYEAR(<date>), QUARTER(<date>), MONTH(<date>), DAY(<date>), HOUR(<date>), MINUTE(<date>), SECOND(<date>) - Returns the part of the date for the specified date.\n\nFinally, make sure to escape double quotes with a single backslash, and make sure that only table names have single quotes around them, while names of measures or the values of columns that you want to compare against are in escaped double quotes. Newlines are not necessary and can be skipped. The queries are serialized as json and so will have to fit be compliant with json syntax. Sometimes you will get a question, a DAX query and a error, in that case you need to rewrite the DAX query to get the correct answer.\n\nThe following tables exist: {tables}\n\nand the schema\'s for some are given here:\n{schemas}\n\nExamples:\n{examples}\n\nQuestion: {tool_input}\nDAX: \n'#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-18
pydantic model langchain.tools.ReadFileTool[source]# field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.read.ReadFileInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Read file from disk'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'read_file'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.SceneXplainTool[source]# Tool that adds the capability to explain images. field api_wrapper: langchain.utilities.scenexplain.SceneXplainAPIWrapper [Optional]# pydantic model langchain.tools.ShellTool[source]# Tool to run shell commands. field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.shell.tool.ShellInput'># Schema for input arguments. field description: str = 'Run shell commands on this Linux machine.'# Description of tool. field name: str = 'terminal'# Name of tool. field process: langchain.utilities.bash.BashProcess [Optional]# Bash process to run commands. pydantic model langchain.tools.SteamshipImageGenerationTool[source]# field model_name: ModelName [Required]# field return_urls: Optional[bool] = False# field size: Optional[str] = '512x512'# field steamship: Steamship [Required]# pydantic model langchain.tools.StructuredTool[source]# Tool that can operate on any number of inputs. field args_schema: Type[pydantic.main.BaseModel] [Required]# The input arguments’ schema. The tool schema.
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-19
The input arguments’ schema. The tool schema. field coroutine: Optional[Callable[[...], Awaitable[Any]]] = None# The asynchronous version of the function. field description: str = ''# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field func: Callable[[...], Any] [Required]# The function to run when the tool is called. classmethod from_function(func: Callable, name: Optional[str] = None, description: Optional[str] = None, return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, infer_schema: bool = True, **kwargs: Any) β†’ langchain.tools.base.StructuredTool[source]# property args: dict# The tool’s input arguments. pydantic model langchain.tools.Tool[source]# Tool that takes in function or coroutine directly. field args_schema: Optional[Type[pydantic.main.BaseModel]] = None# Pydantic model class to validate and parse the tool’s input arguments. field callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None# Deprecated. Please use callbacks instead. field callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None# Callbacks to be called during tool execution. field coroutine: Optional[Callable[[...], Awaitable[str]]] = None# The asynchronous version of the function. field description: str = ''# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field func: Callable[[...], str] [Required]# The function to run when the tool is called.
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-20
The function to run when the tool is called. field handle_tool_error: Optional[Union[bool, str, Callable[[langchain.tools.base.ToolException], str]]] = False# Handle the content of the ToolException thrown. field name: str [Required]# The unique name of the tool that clearly communicates its purpose. field return_direct: bool = False# Whether to return the tool’s output directly. Setting this to True means that after the tool is called, the AgentExecutor will stop looping. field verbose: bool = False# Whether to log the tool’s progress. classmethod from_function(func: Callable, name: str, description: str, return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, **kwargs: Any) β†’ langchain.tools.base.Tool[source]# Initialize tool from a function. property args: dict# The tool’s input arguments. pydantic model langchain.tools.VectorStoreQATool[source]# Tool for the VectorDBQA chain. To be initialized with name and chain. static get_description(name: str, description: str) β†’ str[source]# pydantic model langchain.tools.VectorStoreQAWithSourcesTool[source]# Tool for the VectorDBQAWithSources chain. static get_description(name: str, description: str) β†’ str[source]# pydantic model langchain.tools.WikipediaQueryRun[source]# Tool that adds the capability to search using the Wikipedia API. field api_wrapper: langchain.utilities.wikipedia.WikipediaAPIWrapper [Required]# pydantic model langchain.tools.WolframAlphaQueryRun[source]# Tool that adds the capability to query using the Wolfram Alpha SDK. field api_wrapper: langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper [Required]#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-21
pydantic model langchain.tools.WriteFileTool[source]# field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.write.WriteFileInput'># Pydantic model class to validate and parse the tool’s input arguments. field description: str = 'Write file to disk'# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field name: str = 'write_file'# The unique name of the tool that clearly communicates its purpose. pydantic model langchain.tools.YouTubeSearchTool[source]# pydantic model langchain.tools.ZapierNLAListActions[source]# Returns a list of all exposed (enabled) actions associated withcurrent user (associated with the set api_key). Change your exposed actions here: https://nla.zapier.com/demo/start/ The return list can be empty if no actions exposed. Else will contain a list of action objects: [{β€œid”: str, β€œdescription”: str, β€œparams”: Dict[str, str] }] params will always contain an instructions key, the only required param. All others optional and if provided will override any AI guesses (see β€œunderstanding the AI guessing flow” here: https://nla.zapier.com/api/v1/docs) Parameters None – field api_wrapper: langchain.utilities.zapier.ZapierNLAWrapper [Optional]# pydantic model langchain.tools.ZapierNLARunAction[source]# Executes an action that is identified by action_id, must be exposed(enabled) by the current user (associated with the set api_key). Change your exposed actions here: https://nla.zapier.com/demo/start/
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-22
your exposed actions here: https://nla.zapier.com/demo/start/ The return JSON is guaranteed to be less than ~500 words (350 tokens) making it safe to inject into the prompt of another LLM call. Parameters action_id – a specific action ID (from list actions) of the action to execute (the set api_key must be associated with the action owner) instructions – a natural language instruction string for using the action (eg. β€œget the latest email from Mike Knoop” for β€œGmail: find email” action) params – a dict, optional. Any params provided will override AI guesses from instructions (see β€œunderstanding the AI guessing flow” here: https://nla.zapier.com/api/v1/docs) field action_id: str [Required]# field api_wrapper: langchain.utilities.zapier.ZapierNLAWrapper [Optional]#
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-23
field base_prompt: str = 'A wrapper around Zapier NLA actions. The input to this tool is a natural language instruction, for example "get the latest email from my bank" or "send a slack message to the #general channel". Each tool will have params associated with it that are specified as a list. You MUST take into account the params when creating the instruction. For example, if the params are [\'Message_Text\', \'Channel\'], your instruction should be something like \'send a slack message to the #general channel with the text hello world\'. Another example: if the params are [\'Calendar\', \'Search_Term\'], your instruction should be something like \'find the meeting in my personal calendar at 3pm\'. Do not make up params, they will be explicitly specified in the tool description. If you do not have enough information to fill in the params, just say \'not enough information provided in the instruction, missing <param>\'. If you get a none or null response, STOP EXECUTION, do not try to another tool!This tool specifically used for: {zapier_description}, and has params: {params}'# field params: Optional[dict] = None# field params_schema: Dict[str, str] [Optional]# field zapier_description: str [Required]# langchain.tools.tool(*args: Union[str, Callable], return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, infer_schema: bool = True) β†’ Callable[source]# Make tools out of functions, can be used with or without arguments. Parameters *args – The arguments to the tool. return_direct – Whether to return directly from the tool rather than continuing the agent loop. args_schema – optional argument schema for user to specify infer_schema – Whether to infer the schema of the arguments from
https://python.langchain.com/en/latest/reference/modules/tools.html
5e59ab06f6eb-24
infer_schema – Whether to infer the schema of the arguments from the function’s signature. This also makes the resultant tool accept a dictionary input to its run() function. Requires: Function must be of type (str) -> str Function must have a docstring Examples @tool def search_api(query: str) -> str: # Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return previous Agents next Agent Toolkits By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/reference/modules/tools.html
dab40ed88528-0
.rst .pdf Agent Toolkits Agent Toolkits# Agent toolkits. pydantic model langchain.agents.agent_toolkits.AzureCognitiveServicesToolkit[source]# Toolkit for Azure Cognitive Services. get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.FileManagementToolkit[source]# Toolkit for interacting with a Local Files. field root_dir: Optional[str] = None# If specified, all file operations are made relative to root_dir. field selected_tools: Optional[List[str]] = None# If provided, only provide the selected tools. Defaults to all. get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.GmailToolkit[source]# Toolkit for interacting with Gmail. field api_resource: Resource [Optional]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.JiraToolkit[source]# Jira Toolkit. field tools: List[langchain.tools.base.BaseTool] = []# classmethod from_jira_api_wrapper(jira_api_wrapper: langchain.utilities.jira.JiraAPIWrapper) β†’ langchain.agents.agent_toolkits.jira.toolkit.JiraToolkit[source]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.JsonToolkit[source]# Toolkit for interacting with a JSON spec. field spec: langchain.tools.json.tool.JsonSpec [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-1
get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.NLAToolkit[source]# Natural Language API Toolkit Definition. field nla_tools: Sequence[langchain.agents.agent_toolkits.nla.tool.NLATool] [Required]# List of API Endpoint Tools. classmethod from_llm_and_ai_plugin(llm: langchain.base_language.BaseLanguageModel, ai_plugin: langchain.tools.plugin.AIPlugin, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) β†’ langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]# Instantiate the toolkit from an OpenAPI Spec URL classmethod from_llm_and_ai_plugin_url(llm: langchain.base_language.BaseLanguageModel, ai_plugin_url: str, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) β†’ langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]# Instantiate the toolkit from an OpenAPI Spec URL classmethod from_llm_and_spec(llm: langchain.base_language.BaseLanguageModel, spec: langchain.tools.openapi.utils.openapi_utils.OpenAPISpec, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) β†’ langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]# Instantiate the toolkit by creating tools for each operation. classmethod from_llm_and_url(llm: langchain.base_language.BaseLanguageModel, open_api_url: str, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) β†’ langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-2
Instantiate the toolkit from an OpenAPI Spec URL get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools for all the API operations. pydantic model langchain.agents.agent_toolkits.OpenAPIToolkit[source]# Toolkit for interacting with a OpenAPI api. field json_agent: langchain.agents.agent.AgentExecutor [Required]# field requests_wrapper: langchain.requests.TextRequestsWrapper [Required]# classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, json_spec: langchain.tools.json.tool.JsonSpec, requests_wrapper: langchain.requests.TextRequestsWrapper, **kwargs: Any) β†’ langchain.agents.agent_toolkits.openapi.toolkit.OpenAPIToolkit[source]# Create json agent from llm, then initialize. get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.PlayWrightBrowserToolkit[source]# Toolkit for web browser tools. field async_browser: Optional['AsyncBrowser'] = None# field sync_browser: Optional['SyncBrowser'] = None# classmethod from_browser(sync_browser: Optional[SyncBrowser] = None, async_browser: Optional[AsyncBrowser] = None) β†’ PlayWrightBrowserToolkit[source]# Instantiate the toolkit. get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.PowerBIToolkit[source]# Toolkit for interacting with PowerBI dataset. field callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None# field examples: Optional[str] = None# field llm: langchain.base_language.BaseLanguageModel [Required]# field max_iterations: int = 5#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-3
field max_iterations: int = 5# field powerbi: langchain.utilities.powerbi.PowerBIDataset [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.SQLDatabaseToolkit[source]# Toolkit for interacting with SQL databases. field db: langchain.sql_database.SQLDatabase [Required]# field llm: langchain.base_language.BaseLanguageModel [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. property dialect: str# Return string representation of dialect to use. pydantic model langchain.agents.agent_toolkits.SparkSQLToolkit[source]# Toolkit for interacting with Spark SQL. field db: langchain.utilities.spark_sql.SparkSQL [Required]# field llm: langchain.base_language.BaseLanguageModel [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.VectorStoreInfo[source]# Information about a vectorstore. field description: str [Required]# field name: str [Required]# field vectorstore: langchain.vectorstores.base.VectorStore [Required]# pydantic model langchain.agents.agent_toolkits.VectorStoreRouterToolkit[source]# Toolkit for routing between vectorstores. field llm: langchain.base_language.BaseLanguageModel [Optional]# field vectorstores: List[langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreInfo] [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-4
Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.VectorStoreToolkit[source]# Toolkit for interacting with a vector store. field llm: langchain.base_language.BaseLanguageModel [Optional]# field vectorstore_info: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreInfo [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.ZapierToolkit[source]# Zapier Toolkit. field tools: List[langchain.tools.base.BaseTool] = []# classmethod from_zapier_nla_wrapper(zapier_nla_wrapper: langchain.utilities.zapier.ZapierNLAWrapper) β†’ langchain.agents.agent_toolkits.zapier.toolkit.ZapierToolkit[source]# Create a toolkit from a ZapierNLAWrapper. get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. langchain.agents.agent_toolkits.create_csv_agent(llm: langchain.base_language.BaseLanguageModel, path: Union[str, List[str]], pandas_kwargs: Optional[dict] = None, **kwargs: Any) β†’ langchain.agents.agent.AgentExecutor[source]# Create csv agent by loading to a dataframe and using pandas agent.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-5
langchain.agents.agent_toolkits.create_json_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.json.toolkit.JsonToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with JSON.\nYour goal is to return a final answer by interacting with the JSON.\nYou have access to the following tools which help you learn more about the JSON you are interacting with.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nDo not make up any information that is not contained in the JSON.\nYour input to the tools should be in the form of `data["key"][0]` where `data` is the JSON blob you are interacting with, and the syntax used is Python. \nYou should only use keys that you know for a fact exist. You must validate that a key exists by seeing it previously when calling `json_spec_list_keys`. \nIf you have not seen a key in one of those responses, you cannot use it.\nYou should only add one key at a time to the path. You cannot add multiple keys at once.\nIf you encounter a "KeyError", go back to the previous key, look at the available keys, and try again.\n\nIf the question does not seem to be related to the JSON, just return "I don\'t know" as the answer.\nAlways begin your interaction with the `json_spec_list_keys` tool with input "data" to see what keys exist in the JSON.\n\nNote that sometimes the value at a given path is large. In this case, you will get an error "Value is a large dictionary, should explore its keys directly".\nIn this case, you should ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-6
you should ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys exist at that path.\nDo not simply refer the user to the JSON or a section of the JSON, as this is not a valid answer. Keep digging until you find the answer and explicitly return it.\n', suffix: str = 'Begin!"\n\nQuestion: {input}\nThought: I should look at the keys that exist in data to see what I have access to\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-7
Construct a json agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-8
langchain.agents.agent_toolkits.create_openapi_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.openapi.toolkit.OpenAPIToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = "You are an agent designed to answer questions by making web requests to an API given the openapi spec.\n\nIf the question does not seem related to the API, return I don't know. Do not make up an answer.\nOnly use information provided by the tools to construct your response.\n\nFirst, find the base URL needed to make the request.\n\nSecond, find the relevant paths needed to answer the question. Take note that, sometimes, you might need to make more than one request to more than one path to answer the question.\n\nThird, find the required parameters needed to make the request. For GET requests, these are usually URL parameters and for POST requests, these are request body parameters.\n\nFourth, make the requests needed to answer the question. Ensure that you are sending the correct parameters to the request by checking which parameters are required. For parameters with a fixed set of values, please use the spec to look at which values are allowed.\n\nUse the exact parameter names as listed in the spec, do not make up any names or abbreviate the names of parameters.\nIf you get a not found error, ensure that you are using a path that actually exists in the spec.\n", suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should explore the spec to find the base url for the API.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-9
you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False, return_intermediate_steps: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-10
Construct a json agent from an LLM and tools. langchain.agents.agent_toolkits.create_pandas_dataframe_agent(llm: langchain.base_language.BaseLanguageModel, df: Any, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', agent_executor_kwargs: Optional[Dict[str, Any]] = None, include_df_in_prompt: Optional[bool] = True, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]# Construct a pandas agent from an LLM and dataframe.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-11
langchain.agents.agent_toolkits.create_pbi_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: Optional[langchain.agents.agent_toolkits.powerbi.toolkit.PowerBIToolkit], powerbi: Optional[langchain.utilities.powerbi.PowerBIDataset] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to help users interact with a PowerBI Dataset.\n\nAgent has access to a tool that can write a query based on the question and then run those against PowerBI, Microsofts business intelligence tool. The questions from the users should be interpreted as related to the dataset that is available and not general questions about the world. If the question does not seem related to the dataset, just return "This does not appear to be part of this dataset." as the answer.\n\nGiven an input question, ask to run the questions against the dataset, then look at the results and return the answer, the answer should be a complete sentence that answers the question, if multiple rows are asked find a way to write that in a easily readible format for a human, also make sure to represent numbers in readable ways, like 1M instead of 1000000. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I can first ask which tables I have, then how each table is defined and then ask the query tool the question I need, and finally create a nice sentence that answers the question.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-12
you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', examples: Optional[str] = None, input_variables: Optional[List[str]] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-13
Construct a pbi agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-14
langchain.agents.agent_toolkits.create_pbi_chat_agent(llm: langchain.chat_models.base.BaseChatModel, toolkit: Optional[langchain.agents.agent_toolkits.powerbi.toolkit.PowerBIToolkit], powerbi: Optional[langchain.utilities.powerbi.PowerBIDataset] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, prefix: str = 'Assistant is a large language model built to help users interact with a PowerBI Dataset.\n\nAssistant has access to a tool that can write a query based on the question and then run those against PowerBI, Microsofts business intelligence tool. The questions from the users should be interpreted as related to the dataset that is available and not general questions about the world. If the question does not seem related to the dataset, just return "This does not appear to be part of this dataset." as the answer.\n\nGiven an input question, ask to run the questions against the dataset, then look at the results and return the answer, the answer should be a complete sentence that answers the question, if multiple rows are asked find a way to write that in a easily readible format for a human, also make sure to represent numbers in readable ways, like 1M instead of 1000000. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\n', suffix: str = "TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n{{tools}}\n\n{format_instructions}\n\nUSER'S INPUT\n--------------------\nHere is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-15
(remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n{{{{input}}}}\n", examples: Optional[str] = None, input_variables: Optional[List[str]] = None, memory: Optional[langchain.memory.chat_memory.BaseChatMemory] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-16
Construct a pbi agent from an Chat LLM and tools. If you supply only a toolkit and no powerbi dataset, the same LLM is used for both. langchain.agents.agent_toolkits.create_python_agent(llm: langchain.base_language.BaseLanguageModel, tool: langchain.tools.python.tool.PythonREPLTool, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, verbose: bool = False, prefix: str = 'You are an agent designed to write and execute python code to answer questions.\nYou have access to a python REPL, which you can use to execute python code.\nIf you get an error, debug your code and try again.\nOnly use the output of your code to answer the question. \nYou might know the answer without running any code, but you should still run the code to get the answer.\nIf it does not seem like you can write code to answer the question, just return "I don\'t know" as the answer.\n', agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]# Construct a python agent from an LLM and tool.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-17
Construct a python agent from an LLM and tool. langchain.agents.agent_toolkits.create_spark_dataframe_agent(llm: langchain.llms.base.BaseLLM, df: Any, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = '\nYou are working with a spark dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:', suffix: str = '\nThis is the result of `print(df.first())`:\n{df}\n\nBegin!\nQuestion: {input}\n{agent_scratchpad}', input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]# Construct a spark agent from an LLM and dataframe.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-18
langchain.agents.agent_toolkits.create_spark_sql_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.spark_sql.toolkit.SparkSQLToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with Spark SQL.\nGiven an input question, create a syntactically correct Spark SQL query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for the relevant columns given the question.\nYou have access to tools for interacting with the database.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n\nIf the question does not seem related to the database, just return "I don\'t know" as the answer.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should look at the tables in the database to see what I can query.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-19
Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-20
Construct a sql agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-21
langchain.agents.agent_toolkits.create_sql_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.sql.toolkit.SQLDatabaseToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with a SQL database.\nGiven an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for the relevant columns given the question.\nYou have access to tools for interacting with the database.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n\nIf the question does not seem related to the database, just return "I don\'t know" as the answer.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should look at the tables in the database to see what I can query.Β  Then I should query the schema of the most relevant tables.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-22
to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-23
Construct a sql agent from an LLM and tools. langchain.agents.agent_toolkits.create_vectorstore_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to answer questions about sets of documents.\nYou have access to tools for interacting with the documents, and the inputs to the tools are questions.\nSometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.\nIf the question does not seem relevant to any of the tools provided, just return "I don\'t know" as the answer.\n', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]# Construct a vectorstore agent from an LLM and tools. langchain.agents.agent_toolkits.create_vectorstore_router_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to answer questions.\nYou have access to tools for interacting with different sources, and the inputs to the tools are questions.\nYour main task is to decide which of the tools is relevant for answering question at hand.\nFor complex questions, you can break the question down into sub questions and use tools to answers the sub questions.\n', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
dab40ed88528-24
Construct a vectorstore router agent from an LLM and tools. previous Tools next Utilities By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
799587adb051-0
.rst .pdf Utilities Utilities# General utilities. pydantic model langchain.utilities.ApifyWrapper[source]# Wrapper around Apify. To use, you should have the apify-client python package installed, and the environment variable APIFY_API_TOKEN set with your API key, or pass apify_api_token as a named parameter to the constructor. field apify_client: Any = None# field apify_client_async: Any = None# async acall_actor(actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], langchain.schema.Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None) β†’ langchain.document_loaders.apify_dataset.ApifyDatasetLoader[source]# Run an Actor on the Apify platform and wait for results to be ready. Parameters actor_id (str) – The ID or name of the Actor on the Apify platform. run_input (Dict) – The input object of the Actor that you’re trying to run. dataset_mapping_function (Callable) – A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional) – Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional) – Optional memory limit for the run, in megabytes. timeout_secs (int, optional) – Optional timeout for the run, in seconds. Returns A loader that will fetch the records from theActor run’s default dataset. Return type ApifyDatasetLoader
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-1
Return type ApifyDatasetLoader call_actor(actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], langchain.schema.Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None) β†’ langchain.document_loaders.apify_dataset.ApifyDatasetLoader[source]# Run an Actor on the Apify platform and wait for results to be ready. Parameters actor_id (str) – The ID or name of the Actor on the Apify platform. run_input (Dict) – The input object of the Actor that you’re trying to run. dataset_mapping_function (Callable) – A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional) – Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional) – Optional memory limit for the run, in megabytes. timeout_secs (int, optional) – Optional timeout for the run, in seconds. Returns A loader that will fetch the records from theActor run’s default dataset. Return type ApifyDatasetLoader pydantic model langchain.utilities.ArxivAPIWrapper[source]# Wrapper around ArxivAPI. To use, you should have the arxiv python package installed. https://lukasschwab.me/arxiv.py/index.html This wrapper will use the Arxiv API to conduct searches and fetch document summaries. By default, it will return the document summaries of the top-k results. It limits the Document content by doc_content_chars_max. Set doc_content_chars_max=None if you don’t want to limit the content size. Parameters
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-2
Set doc_content_chars_max=None if you don’t want to limit the content size. Parameters top_k_results – number of the top-scored document used for the arxiv tool ARXIV_MAX_QUERY_LENGTH – the cut limit on the query used for the arxiv tool. load_max_docs – a limit to the number of loaded documents load_all_available_meta – if True: the metadata of the loaded Documents gets all available meta info(see https://lukasschwab.me/arxiv.py/index.html#Result), if False: the metadata gets only the most informative fields. field arxiv_exceptions: Any = None# field doc_content_chars_max: int = 4000# field load_all_available_meta: bool = False# field load_max_docs: int = 100# field top_k_results: int = 3# load(query: str) β†’ List[langchain.schema.Document][source]# Run Arxiv search and get the article texts plus the article meta information. See https://lukasschwab.me/arxiv.py/index.html#Search Returns: a list of documents with the document.page_content in text format run(query: str) β†’ str[source]# Run Arxiv search and get the article meta information. See https://lukasschwab.me/arxiv.py/index.html#Search See https://lukasschwab.me/arxiv.py/index.html#Result It uses only the most informative fields of article meta information. class langchain.utilities.BashProcess(strip_newlines: bool = False, return_err_output: bool = False, persistent: bool = False)[source]# Executes bash commands and returns the output. process_output(output: str, command: str) β†’ str[source]# run(commands: Union[str, List[str]]) β†’ str[source]# Run commands and return final output.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-3
Run commands and return final output. pydantic model langchain.utilities.BingSearchAPIWrapper[source]# Wrapper for Bing Search API. In order to set this up, follow instructions at: https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e field bing_search_url: str [Required]# field bing_subscription_key: str [Required]# field k: int = 10# results(query: str, num_results: int) β†’ List[Dict][source]# Run query through BingSearch and return metadata. Parameters query – The query to search for. num_results – The number of results to return. Returns snippet - The description of the result. title - The title of the result. link - The link to the result. Return type A list of dictionaries with the following keys run(query: str) β†’ str[source]# Run query through BingSearch and parse result. pydantic model langchain.utilities.DuckDuckGoSearchAPIWrapper[source]# Wrapper for DuckDuckGo Search API. Free and does not require any setup field k: int = 10# field max_results: int = 5# field region: Optional[str] = 'wt-wt'# field safesearch: str = 'moderate'# field time: Optional[str] = 'y'# get_snippets(query: str) β†’ List[str][source]# Run query through DuckDuckGo and return concatenated results. results(query: str, num_results: int) β†’ List[Dict[str, str]][source]# Run query through DuckDuckGo and return metadata. Parameters query – The query to search for. num_results – The number of results to return. Returns
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-4
num_results – The number of results to return. Returns snippet - The description of the result. title - The title of the result. link - The link to the result. Return type A list of dictionaries with the following keys run(query: str) β†’ str[source]# pydantic model langchain.utilities.GooglePlacesAPIWrapper[source]# Wrapper around Google Places API. To use, you should have the googlemaps python package installed,an API key for the google maps platform, and the enviroment variable β€˜β€™GPLACES_API_KEY’’ set with your API key , or pass β€˜gplaces_api_key’ as a named parameter to the constructor. By default, this will return the all the results on the input query.You can use the top_k_results argument to limit the number of results. Example from langchain import GooglePlacesAPIWrapper gplaceapi = GooglePlacesAPIWrapper() field gplaces_api_key: Optional[str] = None# field top_k_results: Optional[int] = None# fetch_place_details(place_id: str) β†’ Optional[str][source]# format_place_details(place_details: Dict[str, Any]) β†’ Optional[str][source]# run(query: str) β†’ str[source]# Run Places search and get k number of places that exists that match. pydantic model langchain.utilities.GoogleSearchAPIWrapper[source]# Wrapper for Google Search API. Adapted from: Instructions adapted from https://stackoverflow.com/questions/ 37083058/ programmatically-searching-google-in-python-using-custom-search TODO: DOCS for using it 1. Install google-api-python-client - If you don’t already have a Google account, sign up. - If you have never created a Google APIs Console project, read the Managing Projects page and create a project in the Google API Console.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-5
read the Managing Projects page and create a project in the Google API Console. - Install the library using pip install google-api-python-client The current version of the library is 2.70.0 at this time 2. To create an API key: - Navigate to the APIs & Servicesβ†’Credentials panel in Cloud Console. - Select Create credentials, then select API key from the drop-down menu. - The API key created dialog box displays your newly created key. - You now have an API_KEY 3. Setup Custom Search Engine so you can search the entire web - Create a custom search engine in this link. - In Sites to search, add any valid URL (i.e. www.stackoverflow.com). - That’s all you have to fill up, the rest doesn’t matter. In the left-side menu, click Edit search engine β†’ {your search engine name} β†’ Setup Set Search the entire web to ON. Remove the URL you added from the list of Sites to search. - Under Search engine ID you’ll find the search-engine-ID. 4. Enable the Custom Search API - Navigate to the APIs & Servicesβ†’Dashboard panel in Cloud Console. - Click Enable APIs and Services. - Search for Custom Search API and click on it. - Click Enable. URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis .com field google_api_key: Optional[str] = None# field google_cse_id: Optional[str] = None# field k: int = 10# field siterestrict: bool = False# results(query: str, num_results: int) β†’ List[Dict][source]# Run query through GoogleSearch and return metadata. Parameters query – The query to search for. num_results – The number of results to return. Returns snippet - The description of the result.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-6
Returns snippet - The description of the result. title - The title of the result. link - The link to the result. Return type A list of dictionaries with the following keys run(query: str) β†’ str[source]# Run query through GoogleSearch and parse result. pydantic model langchain.utilities.GoogleSerperAPIWrapper[source]# Wrapper around the Serper.dev Google Search API. You can create a free API key at https://serper.dev. To use, you should have the environment variable SERPER_API_KEY set with your API key, or pass serper_api_key as a named parameter to the constructor. Example from langchain import GoogleSerperAPIWrapper google_serper = GoogleSerperAPIWrapper() field aiosession: Optional[aiohttp.client.ClientSession] = None# field gl: str = 'us'# field hl: str = 'en'# field k: int = 10# field serper_api_key: Optional[str] = None# field tbs: Optional[str] = None# field type: Literal['news', 'search', 'places', 'images'] = 'search'# async aresults(query: str, **kwargs: Any) β†’ Dict[source]# Run query through GoogleSearch. async arun(query: str, **kwargs: Any) β†’ str[source]# Run query through GoogleSearch and parse result async. results(query: str, **kwargs: Any) β†’ Dict[source]# Run query through GoogleSearch. run(query: str, **kwargs: Any) β†’ str[source]# Run query through GoogleSearch and parse result. pydantic model langchain.utilities.GraphQLAPIWrapper[source]# Wrapper around GraphQL API. To use, you should have the gql python package installed.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-7
Wrapper around GraphQL API. To use, you should have the gql python package installed. This wrapper will use the GraphQL API to conduct queries. field custom_headers: Optional[Dict[str, str]] = None# field graphql_endpoint: str [Required]# run(query: str) β†’ str[source]# Run a GraphQL query and get the results. pydantic model langchain.utilities.LambdaWrapper[source]# Wrapper for AWS Lambda SDK. Docs for using: pip install boto3 Create a lambda function using the AWS Console or CLI Run aws configure and enter your AWS credentials field awslambda_tool_description: Optional[str] = None# field awslambda_tool_name: Optional[str] = None# field function_name: Optional[str] = None# run(query: str) β†’ str[source]# Invoke Lambda function and parse result. pydantic model langchain.utilities.MetaphorSearchAPIWrapper[source]# Wrapper for Metaphor Search API. field k: int = 10# field metaphor_api_key: str [Required]# results(query: str, num_results: int) β†’ List[Dict][source]# Run query through Metaphor Search and return metadata. Parameters query – The query to search for. num_results – The number of results to return. Returns title - The title of the url - The url author - Author of the content, if applicable. Otherwise, None. date_created - Estimated date created, in YYYY-MM-DD format. Otherwise, None. Return type A list of dictionaries with the following keys async results_async(query: str, num_results: int) β†’ List[Dict][source]# Get results from the Metaphor Search API asynchronously. pydantic model langchain.utilities.OpenWeatherMapAPIWrapper[source]#
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-8
pydantic model langchain.utilities.OpenWeatherMapAPIWrapper[source]# Wrapper for OpenWeatherMap API using PyOWM. Docs for using: Go to OpenWeatherMap and sign up for an API key Save your API KEY into OPENWEATHERMAP_API_KEY env variable pip install pyowm field openweathermap_api_key: Optional[str] = None# field owm: Any = None# run(location: str) β†’ str[source]# Get the current weather information for a specified location. pydantic model langchain.utilities.PowerBIDataset[source]# Create PowerBI engine from dataset ID and credential or token. Use either the credential or a supplied token to authenticate. If both are supplied the credential is used to generate a token. The impersonated_user_name is the UPN of a user to be impersonated. If the model is not RLS enabled, this will be ignored. Validators fix_table_names Β» table_names token_or_credential_present Β» all fields field aiosession: Optional[aiohttp.ClientSession] = None# field credential: Optional[TokenCredential] = None# field dataset_id: str [Required]# field group_id: Optional[str] = None# field impersonated_user_name: Optional[str] = None# field sample_rows_in_table_info: int = 1# Constraints exclusiveMinimum = 0 maximum = 10 field schemas: Dict[str, str] [Optional]# field table_names: List[str] [Required]# field token: Optional[str] = None# async aget_table_info(table_names: Optional[Union[List[str], str]] = None) β†’ str[source]# Get information about specified tables. async arun(command: str) β†’ Any[source]# Execute a DAX command and return the result asynchronously.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-9
Execute a DAX command and return the result asynchronously. get_schemas() β†’ str[source]# Get the available schema’s. get_table_info(table_names: Optional[Union[List[str], str]] = None) β†’ str[source]# Get information about specified tables. get_table_names() β†’ Iterable[str][source]# Get names of tables available. run(command: str) β†’ Any[source]# Execute a DAX command and return a json representing the results. property headers: Dict[str, str]# Get the token. property request_url: str# Get the request url. property table_info: str# Information about all tables in the database. pydantic model langchain.utilities.PubMedAPIWrapper[source]# Wrapper around PubMed API. This wrapper will use the PubMed API to conduct searches and fetch document summaries. By default, it will return the document summaries of the top-k results of an input search. Parameters top_k_results – number of the top-scored document used for the PubMed tool load_max_docs – a limit to the number of loaded documents load_all_available_meta – if True: the metadata of the loaded Documents gets all available meta info(see https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch) if False: the metadata gets only the most informative fields. field doc_content_chars_max: int = 2000# field email: str = '[email protected]'# field load_all_available_meta: bool = False# field load_max_docs: int = 25# field top_k_results: int = 3# load(query: str) β†’ List[dict][source]# Search PubMed for documents matching the query. Return a list of dictionaries containing the document metadata.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-10
Search PubMed for documents matching the query. Return a list of dictionaries containing the document metadata. load_docs(query: str) β†’ List[langchain.schema.Document][source]# retrieve_article(uid: str, webenv: str) β†’ dict[source]# run(query: str) β†’ str[source]# Run PubMed search and get the article meta information. See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch It uses only the most informative fields of article meta information. pydantic model langchain.utilities.PythonREPL[source]# Simulates a standalone Python REPL. field globals: Optional[Dict] [Optional] (alias '_globals')# field locals: Optional[Dict] [Optional] (alias '_locals')# run(command: str) β†’ str[source]# Run command with own globals/locals and returns anything printed. pydantic model langchain.utilities.SearxSearchWrapper[source]# Wrapper for Searx API. To use you need to provide the searx host by passing the named parameter searx_host or exporting the environment variable SEARX_HOST. In some situations you might want to disable SSL verification, for example if you are running searx locally. You can do this by passing the named parameter unsecure. You can also pass the host url scheme as http to disable SSL. Example from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://localhost:8888") Example with SSL disabled:from langchain.utilities import SearxSearchWrapper # note the unsecure parameter is not needed if you pass the url scheme as # http searx = SearxSearchWrapper(searx_host="http://localhost:8888", unsecure=True) Validators
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-11
unsecure=True) Validators disable_ssl_warnings Β» unsecure validate_params Β» all fields field aiosession: Optional[Any] = None# field categories: Optional[List[str]] = []# field engines: Optional[List[str]] = []# field headers: Optional[dict] = None# field k: int = 10# field params: dict [Optional]# field query_suffix: Optional[str] = ''# field searx_host: str = ''# field unsecure: bool = False# async aresults(query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β†’ List[Dict][source]# Asynchronously query with json results. Uses aiohttp. See results for more info. async arun(query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β†’ str[source]# Asynchronously version of run. results(query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β†’ List[Dict][source]# Run query through Searx API and returns the results with metadata. Parameters query – The query to search for. query_suffix – Extra suffix appended to the query. num_results – Limit the number of results to return. engines – List of engines to use for the query. categories – List of categories to use for the query. **kwargs – extra parameters to pass to the searx API. Returns {snippet: The description of the result. title: The title of the result. link: The link to the result.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-12
title: The title of the result. link: The link to the result. engines: The engines used for the result. category: Searx category of the result. } Return type Dict with the following keys run(query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β†’ str[source]# Run query through Searx API and parse results. You can pass any other params to the searx query API. Parameters query – The query to search for. query_suffix – Extra suffix appended to the query. engines – List of engines to use for the query. categories – List of categories to use for the query. **kwargs – extra parameters to pass to the searx API. Returns The result of the query. Return type str Raises ValueError – If an error occured with the query. Example This will make a query to the qwant engine: from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://my.searx.host") searx.run("what is the weather in France ?", engine="qwant") # the same result can be achieved using the `!` syntax of searx # to select the engine using `query_suffix` searx.run("what is the weather in France ?", query_suffix="!qwant") pydantic model langchain.utilities.SerpAPIWrapper[source]# Wrapper around SerpAPI. To use, you should have the google-search-results python package installed, and the environment variable SERPAPI_API_KEY set with your API key, or pass serpapi_api_key as a named parameter to the constructor. Example
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-13
serpapi_api_key as a named parameter to the constructor. Example from langchain import SerpAPIWrapper serpapi = SerpAPIWrapper() field aiosession: Optional[aiohttp.client.ClientSession] = None# field params: dict = {'engine': 'google', 'gl': 'us', 'google_domain': 'google.com', 'hl': 'en'}# field serpapi_api_key: Optional[str] = None# async aresults(query: str) β†’ dict[source]# Use aiohttp to run query through SerpAPI and return the results async. async arun(query: str, **kwargs: Any) β†’ str[source]# Run query through SerpAPI and parse result async. get_params(query: str) β†’ Dict[str, str][source]# Get parameters for SerpAPI. results(query: str) β†’ dict[source]# Run query through SerpAPI and return the raw result. run(query: str, **kwargs: Any) β†’ str[source]# Run query through SerpAPI and parse result. class langchain.utilities.SparkSQL(spark_session: Optional[SparkSession] = None, catalog: Optional[str] = None, schema: Optional[str] = None, ignore_tables: Optional[List[str]] = None, include_tables: Optional[List[str]] = None, sample_rows_in_table_info: int = 3)[source]# classmethod from_uri(database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any) β†’ langchain.utilities.spark_sql.SparkSQL[source]# Creating a remote Spark Session via Spark connect. For example: SparkSQL.from_uri(β€œsc://localhost:15002”) get_table_info(table_names: Optional[List[str]] = None) β†’ str[source]#
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-14
get_table_info(table_names: Optional[List[str]] = None) β†’ str[source]# get_table_info_no_throw(table_names: Optional[List[str]] = None) β†’ str[source]# Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If sample_rows_in_table_info, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper. get_usable_table_names() β†’ Iterable[str][source]# Get names of tables available. run(command: str, fetch: str = 'all') β†’ str[source]# run_no_throw(command: str, fetch: str = 'all') β†’ str[source]# Execute a SQL command and return a string representing the results. If the statement returns rows, a string of the results is returned. If the statement returns no rows, an empty string is returned. If the statement throws an error, the error message is returned. pydantic model langchain.utilities.TextRequestsWrapper[source]# Lightweight wrapper around requests library. The main purpose of this wrapper is to always return a text output. field aiosession: Optional[aiohttp.client.ClientSession] = None# field headers: Optional[Dict[str, str]] = None# async adelete(url: str, **kwargs: Any) β†’ str[source]# DELETE the URL and return the text asynchronously. async aget(url: str, **kwargs: Any) β†’ str[source]# GET the URL and return the text asynchronously. async apatch(url: str, data: Dict[str, Any], **kwargs: Any) β†’ str[source]# PATCH the URL and return the text asynchronously.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-15
PATCH the URL and return the text asynchronously. async apost(url: str, data: Dict[str, Any], **kwargs: Any) β†’ str[source]# POST to the URL and return the text asynchronously. async aput(url: str, data: Dict[str, Any], **kwargs: Any) β†’ str[source]# PUT the URL and return the text asynchronously. delete(url: str, **kwargs: Any) β†’ str[source]# DELETE the URL and return the text. get(url: str, **kwargs: Any) β†’ str[source]# GET the URL and return the text. patch(url: str, data: Dict[str, Any], **kwargs: Any) β†’ str[source]# PATCH the URL and return the text. post(url: str, data: Dict[str, Any], **kwargs: Any) β†’ str[source]# POST to the URL and return the text. put(url: str, data: Dict[str, Any], **kwargs: Any) β†’ str[source]# PUT the URL and return the text. property requests: langchain.requests.Requests# pydantic model langchain.utilities.TwilioAPIWrapper[source]# Sms Client using Twilio. To use, you should have the twilio python package installed, and the environment variables TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, and TWILIO_FROM_NUMBER, or pass account_sid, auth_token, and from_number as named parameters to the constructor. Example from langchain.utilities.twilio import TwilioAPIWrapper twilio = TwilioAPIWrapper( account_sid="ACxxx", auth_token="xxx", from_number="+10123456789" ) twilio.run('test', '+12484345508') field account_sid: Optional[str] = None# Twilio account string identifier.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-16
field account_sid: Optional[str] = None# Twilio account string identifier. field auth_token: Optional[str] = None# Twilio auth token. field from_number: Optional[str] = None# A Twilio phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164) format, an [alphanumeric sender ID](https://www.twilio.com/docs/sms/send-messages#use-an-alphanumeric-sender-id), or a [Channel Endpoint address](https://www.twilio.com/docs/sms/channels#channel-addresses) that is enabled for the type of message you want to send. Phone numbers or [short codes](https://www.twilio.com/docs/sms/api/short-code) purchased from Twilio also work here. You cannot, for example, spoof messages from a private cell phone number. If you are using messaging_service_sid, this parameter must be empty. run(body: str, to: str) β†’ str[source]# Run body through Twilio and respond with message sid. Parameters body – The text of the message you want to send. Can be up to 1,600 characters in length. to – The destination phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164) format for SMS/MMS or [Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses) for other 3rd-party channels. pydantic model langchain.utilities.WikipediaAPIWrapper[source]# Wrapper around WikipediaAPI. To use, you should have the wikipedia python package installed. This wrapper will use the Wikipedia API to conduct searches and fetch page summaries. By default, it will return the page summaries of the top-k results.
https://python.langchain.com/en/latest/reference/modules/utilities.html
799587adb051-17
of the top-k results. It limits the Document content by doc_content_chars_max. field doc_content_chars_max: int = 4000# field lang: str = 'en'# field load_all_available_meta: bool = False# field top_k_results: int = 3# load(query: str) β†’ List[langchain.schema.Document][source]# Run Wikipedia search and get the article text plus the meta information. See Returns: a list of documents. run(query: str) β†’ str[source]# Run Wikipedia search and get page summaries. pydantic model langchain.utilities.WolframAlphaAPIWrapper[source]# Wrapper for Wolfram Alpha. Docs for using: Go to wolfram alpha and sign up for a developer account Create an app and get your APP ID Save your APP ID into WOLFRAM_ALPHA_APPID env variable pip install wolframalpha field wolfram_alpha_appid: Optional[str] = None# run(query: str) β†’ str[source]# Run query through WolframAlpha and parse result. previous Agent Toolkits next Experimental Modules By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/reference/modules/utilities.html
41f0167be997-0
.rst .pdf Retrievers Retrievers# pydantic model langchain.retrievers.ArxivRetriever[source]# It is effectively a wrapper for ArxivAPIWrapper. It wraps load() to get_relevant_documents(). It uses all ArxivAPIWrapper arguments without any change. async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.AzureCognitiveSearchRetriever[source]# Wrapper around Azure Cognitive Search. field aiosession: Optional[aiohttp.client.ClientSession] = None# ClientSession, in case we want to reuse connection for better performance. field api_key: str = ''# API Key. Both Admin and Query keys work, but for reading data it’s recommended to use a Query key. field api_version: str = '2020-06-30'# API version field content_key: str = 'content'# Key in a retrieved result to set as the Document page_content. field index_name: str = ''# Name of Index inside Azure Cognitive Search service field service_name: str = ''# Name of Azure Cognitive Search service async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]#
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-1
get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.ChatGPTPluginRetriever[source]# field aiosession: Optional[aiohttp.client.ClientSession] = None# field bearer_token: str [Required]# field filter: Optional[dict] = None# field top_k: int = 3# field url: str [Required]# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.ContextualCompressionRetriever[source]# Retriever that wraps a base retriever and compresses the results. field base_compressor: langchain.retrievers.document_compressors.base.BaseDocumentCompressor [Required]# Compressor for compressing retrieved documents. field base_retriever: langchain.schema.BaseRetriever [Required]# Base Retriever to use for getting relevant documents. async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-2
Get documents relevant for a query. Parameters query – string to find relevant documents for Returns Sequence of relevant documents class langchain.retrievers.DataberryRetriever(datastore_url: str, top_k: Optional[int] = None, api_key: Optional[str] = None)[source]# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents api_key: Optional[str]# datastore_url: str# get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents top_k: Optional[int]# class langchain.retrievers.ElasticSearchBM25Retriever(client: Any, index_name: str)[source]# Wrapper around Elasticsearch using BM25 as a retrieval method. To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the β€œDeployments” page. To obtain your Elastic Cloud password for the default β€œelastic” user: Log in to the Elastic Cloud console at https://cloud.elastic.co Go to β€œSecurity” > β€œUsers” Locate the β€œelastic” user and click β€œEdit” Click β€œReset password”
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-3
Locate the β€œelastic” user and click β€œEdit” Click β€œReset password” Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. add_texts(texts: Iterable[str], refresh_indices: bool = True) β†’ List[str][source]# Run more texts through the embeddings and add to the retriver. Parameters texts – Iterable of strings to add to the retriever. refresh_indices – bool to refresh ElasticSearch indices Returns List of ids from adding the texts into the retriever. async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents classmethod create(elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75) β†’ langchain.retrievers.elastic_search_bm25.ElasticSearchBM25Retriever[source]# get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.KNNRetriever[source]# field embeddings: langchain.embeddings.base.Embeddings [Required]# field index: Any = None# field k: int = 4# field relevancy_threshold: Optional[float] = None# field texts: List[str] [Required]# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-4
Parameters query – string to find relevant documents for Returns List of relevant documents classmethod from_texts(texts: List[str], embeddings: langchain.embeddings.base.Embeddings, **kwargs: Any) β†’ langchain.retrievers.knn.KNNRetriever[source]# get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents class langchain.retrievers.MetalRetriever(client: Any, params: Optional[dict] = None)[source]# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.PineconeHybridSearchRetriever[source]# field alpha: float = 0.5# field embeddings: langchain.embeddings.base.Embeddings [Required]# field index: Any = None# field sparse_encoder: Any = None# field top_k: int = 4# add_texts(texts: List[str], ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None) β†’ None[source]# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-5
Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.PubMedRetriever[source]# It is effectively a wrapper for PubMedAPIWrapper. It wraps load() to get_relevant_documents(). It uses all PubMedAPIWrapper arguments without any change. async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.RemoteLangChainRetriever[source]# field headers: Optional[dict] = None# field input_key: str = 'message'# field metadata_key: str = 'metadata'# field page_content_key: str = 'page_content'# field response_key: str = 'response'# field url: str [Required]# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-6
Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.SVMRetriever[source]# field embeddings: langchain.embeddings.base.Embeddings [Required]# field index: Any = None# field k: int = 4# field relevancy_threshold: Optional[float] = None# field texts: List[str] [Required]# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents classmethod from_texts(texts: List[str], embeddings: langchain.embeddings.base.Embeddings, **kwargs: Any) β†’ langchain.retrievers.svm.SVMRetriever[source]# get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.SelfQueryRetriever[source]# Retriever that wraps around a vector store and uses an LLM to generate the vector store queries. field llm_chain: langchain.chains.llm.LLMChain [Required]# The LLMChain for generating the vector store queries. field search_kwargs: dict [Optional]# Keyword arguments to pass in to the vector store search. field search_type: str = 'similarity'# The search type to perform on the vector store. field structured_query_translator: langchain.chains.query_constructor.ir.Visitor [Required]# Translator for turning internal query language into vectorstore search params. field vectorstore: langchain.vectorstores.base.VectorStore [Required]#
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-7
field vectorstore: langchain.vectorstores.base.VectorStore [Required]# The underlying vector store from which documents will be retrieved. field verbose: bool = False# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, vectorstore: langchain.vectorstores.base.VectorStore, document_contents: str, metadata_field_info: List[langchain.chains.query_constructor.schema.AttributeInfo], structured_query_translator: Optional[langchain.chains.query_constructor.ir.Visitor] = None, chain_kwargs: Optional[Dict] = None, enable_limit: bool = False, **kwargs: Any) β†’ langchain.retrievers.self_query.base.SelfQueryRetriever[source]# get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.TFIDFRetriever[source]# field docs: List[langchain.schema.Document] [Required]# field k: int = 4# field tfidf_array: Any = None# field vectorizer: Any = None# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents classmethod from_documents(documents: Iterable[langchain.schema.Document], *, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any) β†’ langchain.retrievers.tfidf.TFIDFRetriever[source]#
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-8
classmethod from_texts(texts: Iterable[str], metadatas: Optional[Iterable[dict]] = None, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any) β†’ langchain.retrievers.tfidf.TFIDFRetriever[source]# get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents pydantic model langchain.retrievers.TimeWeightedVectorStoreRetriever[source]# Retriever combining embedding similarity with recency. field decay_rate: float = 0.01# The exponential decay factor used as (1.0-decay_rate)**(hrs_passed). field default_salience: Optional[float] = None# The salience to assign memories not retrieved from the vector store. None assigns no salience to documents not fetched from the vector store. field k: int = 4# The maximum number of documents to retrieve in a given call. field memory_stream: List[langchain.schema.Document] [Optional]# The memory_stream of documents to search through. field other_score_keys: List[str] = []# Other keys in the metadata to factor into the score, e.g. β€˜importance’. field search_kwargs: dict [Optional]# Keyword arguments to pass to the vectorstore similarity search. field vectorstore: langchain.vectorstores.base.VectorStore [Required]# The vectorstore to store documents and determine salience. async aadd_documents(documents: List[langchain.schema.Document], **kwargs: Any) β†’ List[str][source]# Add documents to vectorstore. add_documents(documents: List[langchain.schema.Document], **kwargs: Any) β†’ List[str][source]#
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-9
Add documents to vectorstore. async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Return documents that are relevant to the query. get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Return documents that are relevant to the query. get_salient_docs(query: str) β†’ Dict[int, Tuple[langchain.schema.Document, float]][source]# Return documents that are salient to the query. class langchain.retrievers.VespaRetriever(app: Vespa, body: Dict, content_field: str, metadata_fields: Optional[Sequence[str]] = None)[source]# async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents classmethod from_params(url: str, content_field: str, *, k: Optional[int] = None, metadata_fields: Union[Sequence[str], Literal['*']] = (), sources: Optional[Union[Sequence[str], Literal['*']]] = None, _filter: Optional[str] = None, yql: Optional[str] = None, **kwargs: Any) β†’ langchain.retrievers.vespa_retriever.VespaRetriever[source]# Instantiate retriever from params. Parameters url (str) – Vespa app URL. content_field (str) – Field in results to return as Document page_content. k (Optional[int]) – Number of Documents to return. Defaults to None. metadata_fields (Sequence[str] or "*") – Fields in results to include in document metadata. Defaults to empty tuple (). sources (Sequence[str] or "*" or None) – Sources to retrieve from. Defaults to None.
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-10
from. Defaults to None. _filter (Optional[str]) – Document filter condition expressed in YQL. Defaults to None. yql (Optional[str]) – Full YQL query to be used. Should not be specified if _filter or sources are specified. Defaults to None. kwargs (Any) – Keyword arguments added to query body. get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents_with_filter(query: str, *, _filter: Optional[str] = None) β†’ List[langchain.schema.Document][source]# class langchain.retrievers.WeaviateHybridSearchRetriever(client: Any, index_name: str, text_key: str, alpha: float = 0.5, k: int = 4, attributes: Optional[List[str]] = None, create_schema_if_missing: bool = True)[source]# class Config[source]# Configuration for this pydantic object. arbitrary_types_allowed = True# extra = 'forbid'# add_documents(docs: List[langchain.schema.Document], **kwargs: Any) β†’ List[str][source]# Upload documents to Weaviate. async aget_relevant_documents(query: str, where_filter: Optional[Dict[str, object]] = None) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str, where_filter: Optional[Dict[str, object]] = None) β†’ List[langchain.schema.Document][source]# Look up similar documents in Weaviate.
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-11
Look up similar documents in Weaviate. pydantic model langchain.retrievers.WikipediaRetriever[source]# It is effectively a wrapper for WikipediaAPIWrapper. It wraps load() to get_relevant_documents(). It uses all WikipediaAPIWrapper arguments without any change. async aget_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents class langchain.retrievers.ZepRetriever(session_id: str, url: str, top_k: Optional[int] = None)[source]# A Retriever implementation for the Zep long-term memory store. Search your user’s long-term chat history with Zep. Note: You will need to provide the user’s session_id to use this retriever. More on Zep: Zep provides long-term conversation storage for LLM apps. The server stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs. For server installation instructions, see: https://getzep.github.io/deployment/quickstart/ async aget_relevant_documents(query: str, metadata: Optional[Dict] = None) β†’ List[langchain.schema.Document][source]# Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents get_relevant_documents(query: str, metadata: Optional[Dict] = None) β†’ List[langchain.schema.Document][source]#
https://python.langchain.com/en/latest/reference/modules/retrievers.html
41f0167be997-12
Get documents relevant for a query. Parameters query – string to find relevant documents for Returns List of relevant documents previous Vector Stores next Document Compressors By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/reference/modules/retrievers.html
601ff707afd2-0
.rst .pdf SerpAPI SerpAPI# For backwards compatiblity. pydantic model langchain.serpapi.SerpAPIWrapper[source]# Wrapper around SerpAPI. To use, you should have the google-search-results python package installed, and the environment variable SERPAPI_API_KEY set with your API key, or pass serpapi_api_key as a named parameter to the constructor. Example from langchain import SerpAPIWrapper serpapi = SerpAPIWrapper() field aiosession: Optional[aiohttp.client.ClientSession] = None# field params: dict = {'engine': 'google', 'gl': 'us', 'google_domain': 'google.com', 'hl': 'en'}# field serpapi_api_key: Optional[str] = None# async aresults(query: str) β†’ dict[source]# Use aiohttp to run query through SerpAPI and return the results async. async arun(query: str, **kwargs: Any) β†’ str[source]# Run query through SerpAPI and parse result async. get_params(query: str) β†’ Dict[str, str][source]# Get parameters for SerpAPI. results(query: str) β†’ dict[source]# Run query through SerpAPI and return the raw result. run(query: str, **kwargs: Any) β†’ str[source]# Run query through SerpAPI and parse result. By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/reference/modules/serpapi.html
ab9fbe8bbaa9-0
.rst .pdf Agents Agents# Interface for agents. pydantic model langchain.agents.Agent[source]# Class responsible for calling the language model and deciding the action. This is driven by an LLMChain. The prompt in the LLMChain MUST include a variable called β€œagent_scratchpad” where the agent can put its intermediary work. field allowed_tools: Optional[List[str]] = None# field llm_chain: langchain.chains.llm.LLMChain [Required]# field output_parser: langchain.agents.agent.AgentOutputParser [Required]# async aplan(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) β†’ Union[langchain.schema.AgentAction, langchain.schema.AgentFinish][source]# Given input, decided what to do. Parameters intermediate_steps – Steps the LLM has taken to date, along with observations callbacks – Callbacks to run. **kwargs – User inputs. Returns Action specifying what tool to use. abstract classmethod create_prompt(tools: Sequence[langchain.tools.base.BaseTool]) β†’ langchain.prompts.base.BasePromptTemplate[source]# Create a prompt for this class. dict(**kwargs: Any) β†’ Dict[source]# Return dictionary representation of agent. classmethod from_llm_and_tools(llm: langchain.base_language.BaseLanguageModel, tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, **kwargs: Any) β†’ langchain.agents.agent.Agent[source]# Construct an agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agents.html