id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 49
117
|
---|---|---|
165b0d984e00-42
|
"type": "object",
"properties": {
"host": {
"title": "Host",
"default": "localhost",
"env_names": "{'myscale_host'}",
"type": "string"
},
"port": {
"title": "Port",
"default": 8443,
"env_names": "{'myscale_port'}",
"type": "integer"
},
"username": {
"title": "Username",
"env_names": "{'myscale_username'}",
"type": "string"
},
"password": {
"title": "Password",
"env_names": "{'myscale_password'}",
"type": "string"
},
"index_type": {
"title": "Index Type",
"default": "IVFFLAT",
"env_names": "{'myscale_index_type'}",
"type": "string"
},
"index_param": {
"title": "Index Param",
"env_names": "{'myscale_index_param'}",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"column_map": {
"title": "Column Map",
"default": {
"id": "id",
"text": "text",
"vector": "vector",
"metadata": "metadata"
},
"env_names": "{'myscale_column_map'}",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"database": {
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-43
|
"type": "string"
}
},
"database": {
"title": "Database",
"default": "default",
"env_names": "{'myscale_database'}",
"type": "string"
},
"table": {
"title": "Table",
"default": "langchain",
"env_names": "{'myscale_table'}",
"type": "string"
},
"metric": {
"title": "Metric",
"default": "cosine",
"env_names": "{'myscale_metric'}",
"type": "string"
}
},
"additionalProperties": false
}
Config
env_file: str = .env
env_file_encoding: str = utf-8
env_prefix: str = myscale_
Fields
column_map (Dict[str, str])
database (str)
host (str)
index_param (Optional[Dict[str, str]])
index_type (str)
metric (str)
password (Optional[str])
port (int)
table (str)
username (Optional[str])
field column_map: Dict[str, str] = {'id': 'id', 'metadata': 'metadata', 'text': 'text', 'vector': 'vector'}#
field database: str = 'default'#
field host: str = 'localhost'#
field index_param: Optional[Dict[str, str]] = None#
field index_type: str = 'IVFFLAT'#
field metric: str = 'cosine'#
field password: Optional[str] = None#
field port: int = 8443#
field table: str = 'langchain'#
field username: Optional[str] = None#
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-44
|
field table: str = 'langchain'#
field username: Optional[str] = None#
class langchain.vectorstores.OpenSearchVectorSearch(opensearch_url: str, index_name: str, embedding_function: langchain.embeddings.base.Embeddings, **kwargs: Any)[source]#
Wrapper around OpenSearch as a vector database.
Example
from langchain import OpenSearchVectorSearch
opensearch_vector_search = OpenSearchVectorSearch(
"http://localhost:9200",
"embeddings",
embedding_function
)
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, bulk_size: int = 500, **kwargs: Any) → List[str][source]#
Run more texts through the embeddings and add to the vectorstore.
Parameters
texts – Iterable of strings to add to the vectorstore.
metadatas – Optional list of metadatas associated with the texts.
bulk_size – Bulk API request count; Default: 500
Returns
List of ids from adding the texts into the vectorstore.
Optional Args:vector_field: Document field embeddings are stored in. Defaults to
“vector_field”.
text_field: Document field the text of the document is stored in. Defaults
to “text”.
classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, bulk_size: int = 500, **kwargs: Any) → langchain.vectorstores.opensearch_vector_search.OpenSearchVectorSearch[source]#
Construct OpenSearchVectorSearch wrapper from raw documents.
Example
from langchain import OpenSearchVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
opensearch_vector_search = OpenSearchVectorSearch.from_texts(
texts,
embeddings,
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-45
|
texts,
embeddings,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Args:vector_field: Document field embeddings are stored in. Defaults to
“vector_field”.
text_field: Document field the text of the document is stored in. Defaults
to “text”.
Optional Keyword Args for Approximate Search:engine: “nmslib”, “faiss”, “lucene”; default: “nmslib”
space_type: “l2”, “l1”, “cosinesimil”, “linf”, “innerproduct”; default: “l2”
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:is_appx_search: False
similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to query.
By default supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
Returns
List of Documents most similar to the query.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-46
|
Returns
List of Documents most similar to the query.
Optional Args:vector_field: Document field embeddings are stored in. Defaults to
“vector_field”.
text_field: Document field the text of the document is stored in. Defaults
to “text”.
metadata_field: Document field that metadata is stored in. Defaults to
“metadata”.
Can be set to a special value “*” to include the entire document.
Optional Args for Approximate Search:search_type: “approximate_search”; default: “approximate_search”
boolean_filter: A Boolean filter consists of a Boolean query that
contains a k-NN query and a filter.
subquery_clause: Query clause on the knn vector field; default: “must”
lucene_filter: the Lucene algorithm decides whether to perform an exact
k-NN search with pre-filtering or an approximate search with modified
post-filtering.
Optional Args for Script Scoring Search:search_type: “script_scoring”; default: “approximate_search”
space_type: “l2”, “l1”, “linf”, “cosinesimil”, “innerproduct”,
“hammingbit”; default: “l2”
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {“match_all”: {}}
Optional Args for Painless Scripting Search:search_type: “painless_scripting”; default: “approximate_search”
space_type: “l2Squared”, “l1Norm”, “cosineSimilarity”; default: “l2Squared”
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {“match_all”: {}}
similarity_search_with_score(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]#
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-47
|
Return docs and it’s scores most similar to query.
By default supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
Returns
List of Documents along with its scores most similar to the query.
Optional Args:same as similarity_search
class langchain.vectorstores.Pinecone(index: Any, embedding_function: Callable, text_key: str, namespace: Optional[str] = None)[source]#
Wrapper around Pinecone vector database.
To use, you should have the pinecone-client python package installed.
Example
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
index = pinecone.Index("langchain-demo")
embeddings = OpenAIEmbeddings()
vectorstore = Pinecone(index, embeddings.embed_query, "text")
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, namespace: Optional[str] = None, batch_size: int = 32, **kwargs: Any) → List[str][source]#
Run more texts through the embeddings and add to the vectorstore.
Parameters
texts – Iterable of strings to add to the vectorstore.
metadatas – Optional list of metadatas associated with the texts.
ids – Optional list of ids to associate with the texts.
namespace – Optional pinecone namespace to add the texts to.
Returns
List of ids from adding the texts into the vectorstore.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-48
|
Returns
List of ids from adding the texts into the vectorstore.
classmethod from_existing_index(index_name: str, embedding: langchain.embeddings.base.Embeddings, text_key: str = 'text', namespace: Optional[str] = None) → langchain.vectorstores.pinecone.Pinecone[source]#
Load pinecone vectorstore from index name.
classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, text_key: str = 'text', index_name: Optional[str] = None, namespace: Optional[str] = None, **kwargs: Any) → langchain.vectorstores.pinecone.Pinecone[source]#
Construct Pinecone wrapper from raw documents.
This is a user friendly interface that:
Embeds documents.
Adds the documents to a provided Pinecone index
This is intended to be a quick way to get started.
Example
from langchain import Pinecone
from langchain.embeddings import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
embeddings = OpenAIEmbeddings()
pinecone = Pinecone.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]#
Return pinecone documents most similar to query.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-49
|
k – Number of Documents to return. Defaults to 4.
filter – Dictionary of argument(s) to filter on metadata
namespace – Namespace to search in. Default will search in ‘’ namespace.
Returns
List of Documents most similar to the query and score for each
similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None) → List[Tuple[langchain.schema.Document, float]][source]#
Return pinecone documents most similar to query, along with scores.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
filter – Dictionary of argument(s) to filter on metadata
namespace – Namespace to search in. Default will search in ‘’ namespace.
Returns
List of Documents most similar to the query and score for each
class langchain.vectorstores.Qdrant(client: Any, collection_name: str, embeddings: Optional[langchain.embeddings.base.Embeddings] = None, content_payload_key: str = 'page_content', metadata_payload_key: str = 'metadata', embedding_function: Optional[Callable] = None)[source]#
Wrapper around Qdrant vector database.
To use you should have the qdrant-client package installed.
Example
from qdrant_client import QdrantClient
from langchain import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
CONTENT_KEY = 'page_content'#
METADATA_KEY = 'metadata'#
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]#
Run more texts through the embeddings and add to the vectorstore.
Parameters
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-50
|
Run more texts through the embeddings and add to the vectorstore.
Parameters
texts – Iterable of strings to add to the vectorstore.
metadatas – Optional list of metadatas associated with the texts.
Returns
List of ids from adding the texts into the vectorstore.
classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, location: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = 6333, grpc_port: int = 6334, prefer_grpc: bool = False, https: Optional[bool] = None, api_key: Optional[str] = None, prefix: Optional[str] = None, timeout: Optional[float] = None, host: Optional[str] = None, path: Optional[str] = None, collection_name: Optional[str] = None, distance_func: str = 'Cosine', content_payload_key: str = 'page_content', metadata_payload_key: str = 'metadata', **kwargs: Any) → langchain.vectorstores.qdrant.Qdrant[source]#
Construct Qdrant wrapper from a list of texts.
Parameters
texts – A list of texts to be indexed in Qdrant.
embedding – A subclass of Embeddings, responsible for text vectorization.
metadatas – An optional list of metadata. If provided it has to be of the same
length as a list of texts.
location – If :memory: - use in-memory Qdrant instance.
If str - use it as a url parameter.
If None - fallback to relying on host and port parameters.
url – either host or str of “Optional[scheme], host, Optional[port],
Optional[prefix]”. Default: None
port – Port of the REST API interface. Default: 6333
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-51
|
port – Port of the REST API interface. Default: 6333
grpc_port – Port of the gRPC interface. Default: 6334
prefer_grpc – If true - use gPRC interface whenever possible in custom methods.
Default: False
https – If true - use HTTPS(SSL) protocol. Default: None
api_key – API key for authentication in Qdrant Cloud. Default: None
prefix – If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout – Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host – Host name of Qdrant service. If url and host are None, set to
‘localhost’. Default: None
path – Path in which the vectors will be stored while using local mode.
Default: None
collection_name – Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func – Distance function. One of: “Cosine” / “Euclid” / “Dot”.
Default: “Cosine”
content_payload_key – A payload key used to store the content of the document.
Default: “page_content”
metadata_payload_key – A payload key used to store the metadata of the document.
Default: “metadata”
**kwargs – Additional arguments passed directly into REST client initialization
This is a user friendly interface that:
Creates embeddings, one for each text
Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-52
|
This is intended to be a quick way to get started.
Example
from langchain import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
fetch_k – Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult – Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns
List of Documents selected by maximal marginal relevance.
similarity_search(query: str, k: int = 4, filter: Optional[Dict[str, Union[str, int, bool, dict, list]]] = None, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to query.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
filter – Filter by metadata. Defaults to None.
Returns
List of Documents most similar to the query.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-53
|
Returns
List of Documents most similar to the query.
similarity_search_with_score(query: str, k: int = 4, filter: Optional[Dict[str, Union[str, int, bool, dict, list]]] = None) → List[Tuple[langchain.schema.Document, float]][source]#
Return docs most similar to query.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
filter – Filter by metadata. Defaults to None.
Returns
List of Documents most similar to the query and score for each.
class langchain.vectorstores.Redis(redis_url: str, index_name: str, embedding_function: typing.Callable, content_key: str = 'content', metadata_key: str = 'metadata', vector_key: str = 'content_vector', relevance_score_fn: typing.Optional[typing.Callable[[float], float]] = <function _default_relevance_score>, **kwargs: typing.Any)[source]#
Wrapper around Redis vector database.
To use, you should have the redis python package installed.
Example
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Redis(
redis_url="redis://username:password@localhost:6379"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, keys: Optional[List[str]] = None, batch_size: int = 1000, **kwargs: Any) → List[str][source]#
Add more texts to the vectorstore.
Parameters
texts (Iterable[str]) – Iterable of strings/text to add to the vectorstore.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-54
|
Parameters
texts (Iterable[str]) – Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]], optional) – Optional list of metadatas.
Defaults to None.
embeddings (Optional[List[List[float]]], optional) – Optional pre-generated
embeddings. Defaults to None.
keys (Optional[List[str]], optional) – Optional key values to use as ids.
Defaults to None.
batch_size (int, optional) – Batch size to use for writes. Defaults to 1000.
Returns
List of ids added to the vectorstore
Return type
List[str]
as_retriever(**kwargs: Any) → langchain.vectorstores.redis.RedisVectorStoreRetriever[source]#
static drop_index(index_name: str, delete_documents: bool, **kwargs: Any) → bool[source]#
Drop a Redis search index.
Parameters
index_name (str) – Name of the index to drop.
delete_documents (bool) – Whether to drop the associated documents.
Returns
Whether or not the drop was successful.
Return type
bool
classmethod from_existing_index(embedding: langchain.embeddings.base.Embeddings, index_name: str, content_key: str = 'content', metadata_key: str = 'metadata', vector_key: str = 'content_vector', **kwargs: Any) → langchain.vectorstores.redis.Redis[source]#
Connect to an existing Redis index.
classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, index_name: Optional[str] = None, content_key: str = 'content', metadata_key: str = 'metadata', vector_key: str = 'content_vector', **kwargs: Any) → langchain.vectorstores.redis.Redis[source]#
Create a Redis vectorstore from raw documents.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-55
|
Create a Redis vectorstore from raw documents.
This is a user-friendly interface that:
Embeds documents.
Creates a new index for the embeddings in Redis.
Adds the documents to the newly created Redis index.
This is intended to be a quick way to get started.
.. rubric:: Example
classmethod from_texts_return_keys(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, index_name: Optional[str] = None, content_key: str = 'content', metadata_key: str = 'metadata', vector_key: str = 'content_vector', distance_metric: Literal['COSINE', 'IP', 'L2'] = 'COSINE', **kwargs: Any) → Tuple[langchain.vectorstores.redis.Redis, List[str]][source]#
Create a Redis vectorstore from raw documents.
This is a user-friendly interface that:
Embeds documents.
Creates a new index for the embeddings in Redis.
Adds the documents to the newly created Redis index.
This is intended to be a quick way to get started.
.. rubric:: Example
similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Returns the most similar indexed documents to the query text.
Parameters
query (str) – The query text for which to find similar documents.
k (int) – The number of documents to return. Default is 4.
Returns
A list of documents that are most similar to the query text.
Return type
List[Document]
similarity_search_limit_score(query: str, k: int = 4, score_threshold: float = 0.2, **kwargs: Any) → List[langchain.schema.Document][source]#
Returns the most similar indexed documents to the query text within the
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-56
|
Returns the most similar indexed documents to the query text within the
score_threshold range.
Parameters
query (str) – The query text for which to find similar documents.
k (int) – The number of documents to return. Default is 4.
score_threshold (float) – The minimum matching score required for a document
0.2. (to be considered a match. Defaults to) –
similarity (Because the similarity calculation algorithm is based on cosine) –
:param :
:param the smaller the angle:
:param the higher the similarity.:
Returns
A list of documents that are most similar to the query text,
including the match score for each document.
Return type
List[Document]
Note
If there are no documents that satisfy the score_threshold value,
an empty list is returned.
similarity_search_with_score(query: str, k: int = 4) → List[Tuple[langchain.schema.Document, float]][source]#
Return docs most similar to query.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
Returns
List of Documents most similar to the query and score for each
class langchain.vectorstores.SupabaseVectorStore(client: supabase.client.Client, embedding: Embeddings, table_name: str, query_name: Union[str, None] = None)[source]#
VectorStore for a Supabase postgres database. Assumes you have the pgvector
extension installed and a match_documents (or similar) function. For more details:
https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase
You can implement your own match_documents function in order to limit the search
space to a subset of documents based on your own authorization or business logic.
Note that the Supabase Python client does not yet support async operations.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-57
|
Note that the Supabase Python client does not yet support async operations.
If you’d like to use max_marginal_relevance_search, please review the instructions
below on modifying the match_documents function to return matched embeddings.
add_texts(texts: Iterable[str], metadatas: Optional[List[dict[Any, Any]]] = None, **kwargs: Any) → List[str][source]#
Run more texts through the embeddings and add to the vectorstore.
Parameters
texts – Iterable of strings to add to the vectorstore.
metadatas – Optional list of metadatas associated with the texts.
kwargs – vectorstore specific parameters
Returns
List of ids from adding the texts into the vectorstore.
add_vectors(vectors: List[List[float]], documents: List[langchain.schema.Document]) → List[str][source]#
classmethod from_texts(texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[supabase.client.Client] = None, table_name: Optional[str] = 'documents', query_name: Union[str, None] = 'match_documents', **kwargs: Any) → SupabaseVectorStore[source]#
Return VectorStore initialized from texts and embeddings.
max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
fetch_k – Number of Documents to fetch to pass to MMR algorithm.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-58
|
fetch_k – Number of Documents to fetch to pass to MMR algorithm.
lambda_mult – Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns
List of Documents selected by maximal marginal relevance.
max_marginal_relevance_search requires that query_name returns matched
embeddings alongside the match documents. The following function function
demonstrates how to do this:
```sql
CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536),
match_count int)
RETURNS TABLE(id bigint,
content text,
metadata jsonb,
embedding vector(1536),
similarity float)
LANGUAGE plpgsql
AS $$
# variable_conflict use_column
BEGINRETURN query
SELECT
id,
content,
metadata,
embedding,
1 -(docstore.embedding <=> query_embedding) AS similarity
FROMdocstore
ORDER BYdocstore.embedding <=> query_embedding
LIMIT match_count;
END;
$$;```
max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Parameters
embedding – Embedding to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
fetch_k – Number of Documents to fetch to pass to MMR algorithm.
lambda_mult – Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-59
|
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns
List of Documents selected by maximal marginal relevance.
query_name: str#
similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to query.
similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to embedding vector.
Parameters
embedding – Embedding to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
Returns
List of Documents most similar to the query vector.
similarity_search_by_vector_returning_embeddings(query: List[float], k: int) → List[Tuple[langchain.schema.Document, float, numpy.ndarray[numpy.float32, Any]]][source]#
similarity_search_by_vector_with_relevance_scores(query: List[float], k: int) → List[Tuple[langchain.schema.Document, float]][source]#
similarity_search_with_relevance_scores(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]#
Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Parameters
query – input text
k – Number of Documents to return. Defaults to 4.
**kwargs – kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns
List of Tuples of (doc, similarity_score)
table_name: str#
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-60
|
Returns
List of Tuples of (doc, similarity_score)
table_name: str#
class langchain.vectorstores.Tair(embedding_function: langchain.embeddings.base.Embeddings, url: str, index_name: str, content_key: str = 'content', metadata_key: str = 'metadata', search_params: Optional[dict] = None, **kwargs: Any)[source]#
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]#
Add texts data to an existing index.
create_index_if_not_exist(dim: int, distance_type: str, index_type: str, data_type: str, **kwargs: Any) → bool[source]#
static drop_index(index_name: str = 'langchain', **kwargs: Any) → bool[source]#
Drop an existing index.
Parameters
index_name (str) – Name of the index to drop.
Returns
True if the index is dropped successfully.
Return type
bool
classmethod from_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = 'langchain', content_key: str = 'content', metadata_key: str = 'metadata', **kwargs: Any) → langchain.vectorstores.tair.Tair[source]#
Return VectorStore initialized from documents and embeddings.
classmethod from_existing_index(embedding: langchain.embeddings.base.Embeddings, index_name: str = 'langchain', content_key: str = 'content', metadata_key: str = 'metadata', **kwargs: Any) → langchain.vectorstores.tair.Tair[source]#
Connect to an existing Tair index.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-61
|
Connect to an existing Tair index.
classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = 'langchain', content_key: str = 'content', metadata_key: str = 'metadata', **kwargs: Any) → langchain.vectorstores.tair.Tair[source]#
Return VectorStore initialized from texts and embeddings.
similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Returns the most similar indexed documents to the query text.
Parameters
query (str) – The query text for which to find similar documents.
k (int) – The number of documents to return. Default is 4.
Returns
A list of documents that are most similar to the query text.
Return type
List[Document]
class langchain.vectorstores.Typesense(typesense_client: Client, embedding: Embeddings, *, typesense_collection_name: Optional[str] = None, text_key: str = 'text')[source]#
Wrapper around Typesense vector search.
To use, you should have the typesense python package installed.
Example
from langchain.embedding.openai import OpenAIEmbeddings
from langchain.vectorstores import Typesense
import typesense
node = {
"host": "localhost", # For Typesense Cloud use xxx.a1.typesense.net
"port": "8108", # For Typesense Cloud use 443
"protocol": "http" # For Typesense Cloud use https
}
typesense_client = typesense.Client(
{
"nodes": [node],
"api_key": "<API_KEY>",
"connection_timeout_seconds": 2
}
)
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-62
|
"connection_timeout_seconds": 2
}
)
typesense_collection_name = "langchain-memory"
embedding = OpenAIEmbeddings()
vectorstore = Typesense(
typesense_client,
typesense_collection_name,
embedding.embed_query,
"text",
)
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]#
Run more texts through the embedding and add to the vectorstore.
Parameters
texts – Iterable of strings to add to the vectorstore.
metadatas – Optional list of metadatas associated with the texts.
ids – Optional list of ids to associate with the texts.
Returns
List of ids from adding the texts into the vectorstore.
classmethod from_client_params(embedding: langchain.embeddings.base.Embeddings, *, host: str = 'localhost', port: Union[str, int] = '8108', protocol: str = 'http', typesense_api_key: Optional[str] = None, connection_timeout_seconds: int = 2, **kwargs: Any) → langchain.vectorstores.typesense.Typesense[source]#
Initialize Typesense directly from client parameters.
Example
from langchain.embedding.openai import OpenAIEmbeddings
from langchain.vectorstores import Typesense
# Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY".
vectorstore = Typesense(
OpenAIEmbeddings(),
host="localhost",
port="8108",
protocol="http",
typesense_collection_name="langchain-memory",
)
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-63
|
protocol="http",
typesense_collection_name="langchain-memory",
)
classmethod from_texts(texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, typesense_client: Optional[Client] = None, typesense_client_params: Optional[dict] = None, typesense_collection_name: Optional[str] = None, text_key: str = 'text', **kwargs: Any) → Typesense[source]#
Construct Typesense wrapper from raw text.
similarity_search(query: str, k: int = 4, filter: Optional[str] = '', **kwargs: Any) → List[langchain.schema.Document][source]#
Return typesense documents most similar to query.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
filter – typesense filter_by expression to filter documents on
Returns
List of Documents most similar to the query and score for each
similarity_search_with_score(query: str, k: int = 4, filter: Optional[str] = '') → List[Tuple[langchain.schema.Document, float]][source]#
Return typesense documents most similar to query, along with scores.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
filter – typesense filter_by expression to filter documents on
Returns
List of Documents most similar to the query and score for each
class langchain.vectorstores.Vectara(vectara_customer_id: Optional[str] = None, vectara_corpus_id: Optional[str] = None, vectara_api_key: Optional[str] = None)[source]#
Implementation of Vector Store using Vectara (https://vectara.com).
.. rubric:: Example
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-64
|
Implementation of Vector Store using Vectara (https://vectara.com).
.. rubric:: Example
from langchain.vectorstores import Vectara
vectorstore = Vectara(
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key
)
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]#
Run more texts through the embeddings and add to the vectorstore.
Parameters
texts – Iterable of strings to add to the vectorstore.
metadatas – Optional list of metadatas associated with the texts.
Returns
List of ids from adding the texts into the vectorstore.
as_retriever(**kwargs: Any) → langchain.vectorstores.vectara.VectaraRetriever[source]#
classmethod from_texts(texts: List[str], embedding: Optional[langchain.embeddings.base.Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.vectara.Vectara[source]#
Construct Vectara wrapper from raw documents.
This is intended to be a quick way to get started.
.. rubric:: Example
from langchain import Vectara
vectara = Vectara.from_texts(
texts,
vectara_customer_id=customer_id,
vectara_corpus_id=corpus_id,
vectara_api_key=api_key,
)
similarity_search(query: str, k: int = 5, alpha: float = 0.025, filter: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]#
Return Vectara documents most similar to query, along with scores.
Parameters
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-65
|
Return Vectara documents most similar to query, along with scores.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 5.
filter – Dictionary of argument(s) to filter on metadata. For example a
filter can be “doc.rating > 3.0 and part.lang = ‘deu’”} see
https://docs.vectara.com/docs/search-apis/sql/filter-overview for more
details.
Returns
List of Documents most similar to the query
similarity_search_with_score(query: str, k: int = 5, alpha: float = 0.025, filter: Optional[str] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]#
Return Vectara documents most similar to query, along with scores.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 5.
alpha – parameter for hybrid search (called “lambda” in Vectara
documentation).
filter – Dictionary of argument(s) to filter on metadata. For example a
filter can be “doc.rating > 3.0 and part.lang = ‘deu’”} see
https://docs.vectara.com/docs/search-apis/sql/filter-overview
for more details.
Returns
List of Documents most similar to the query and score for each.
class langchain.vectorstores.VectorStore[source]#
Interface for vector stores.
async aadd_documents(documents: List[langchain.schema.Document], **kwargs: Any) → List[str][source]#
Run more documents through the embeddings and add to the vectorstore.
Parameters
(List[Document] (documents) – Documents to add to the vectorstore.
Returns
List of IDs of the added texts.
Return type
List[str]
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-66
|
Returns
List of IDs of the added texts.
Return type
List[str]
async aadd_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]#
Run more texts through the embeddings and add to the vectorstore.
add_documents(documents: List[langchain.schema.Document], **kwargs: Any) → List[str][source]#
Run more documents through the embeddings and add to the vectorstore.
Parameters
(List[Document] (documents) – Documents to add to the vectorstore.
Returns
List of IDs of the added texts.
Return type
List[str]
abstract add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]#
Run more texts through the embeddings and add to the vectorstore.
Parameters
texts – Iterable of strings to add to the vectorstore.
metadatas – Optional list of metadatas associated with the texts.
kwargs – vectorstore specific parameters
Returns
List of ids from adding the texts into the vectorstore.
async classmethod afrom_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, **kwargs: Any) → langchain.vectorstores.base.VST[source]#
Return VectorStore initialized from documents and embeddings.
async classmethod afrom_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.base.VST[source]#
Return VectorStore initialized from texts and embeddings.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-67
|
Return VectorStore initialized from texts and embeddings.
async amax_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs selected using the maximal marginal relevance.
async amax_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs selected using the maximal marginal relevance.
as_retriever(**kwargs: Any) → langchain.vectorstores.base.VectorStoreRetriever[source]#
async asearch(query: str, search_type: str, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to query using specified search type.
async asimilarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to query.
async asimilarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to embedding vector.
async asimilarity_search_with_relevance_scores(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]#
Return docs most similar to query.
classmethod from_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, **kwargs: Any) → langchain.vectorstores.base.VST[source]#
Return VectorStore initialized from documents and embeddings.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-68
|
Return VectorStore initialized from documents and embeddings.
abstract classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.base.VST[source]#
Return VectorStore initialized from texts and embeddings.
max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
fetch_k – Number of Documents to fetch to pass to MMR algorithm.
lambda_mult – Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns
List of Documents selected by maximal marginal relevance.
max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Parameters
embedding – Embedding to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
fetch_k – Number of Documents to fetch to pass to MMR algorithm.
lambda_mult – Number between 0 and 1 that determines the degree
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-69
|
lambda_mult – Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns
List of Documents selected by maximal marginal relevance.
search(query: str, search_type: str, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to query using specified search type.
abstract similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to query.
similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to embedding vector.
Parameters
embedding – Embedding to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
Returns
List of Documents most similar to the query vector.
similarity_search_with_relevance_scores(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]#
Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Parameters
query – input text
k – Number of Documents to return. Defaults to 4.
**kwargs – kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns
List of Tuples of (doc, similarity_score)
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-70
|
Returns
List of Tuples of (doc, similarity_score)
class langchain.vectorstores.Weaviate(client: typing.Any, index_name: str, text_key: str, embedding: typing.Optional[langchain.embeddings.base.Embeddings] = None, attributes: typing.Optional[typing.List[str]] = None, relevance_score_fn: typing.Optional[typing.Callable[[float], float]] = <function _default_score_normalizer>, by_text: bool = True)[source]#
Wrapper around Weaviate vector database.
To use, you should have the weaviate-client python package installed.
Example
import weaviate
from langchain.vectorstores import Weaviate
client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...)
weaviate = Weaviate(client, index_name, text_key)
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]#
Upload texts with metadata (properties) to Weaviate.
classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.weaviate.Weaviate[source]#
Construct Weaviate wrapper from raw documents.
This is a user-friendly interface that:
Embeds documents.
Creates a new index for the embeddings in the Weaviate instance.
Adds the documents to the newly created Weaviate index.
This is intended to be a quick way to get started.
Example
from langchain.vectorstores.weaviate import Weaviate
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-71
|
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
fetch_k – Number of Documents to fetch to pass to MMR algorithm.
lambda_mult – Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns
List of Documents selected by maximal marginal relevance.
max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Parameters
embedding – Embedding to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
fetch_k – Number of Documents to fetch to pass to MMR algorithm.
lambda_mult – Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns
List of Documents selected by maximal marginal relevance.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-72
|
Defaults to 0.5.
Returns
List of Documents selected by maximal marginal relevance.
similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to query.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
Returns
List of Documents most similar to the query.
similarity_search_by_text(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to query.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
Returns
List of Documents most similar to the query.
similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]#
Look up similar documents by embedding vector in Weaviate.
similarity_search_with_score(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]#
class langchain.vectorstores.Zilliz(embedding_function: langchain.embeddings.base.Embeddings, collection_name: str = 'LangChainCollection', connection_args: Optional[dict[str, Any]] = None, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: Optional[bool] = False)[source]#
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
165b0d984e00-73
|
classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'LangChainCollection', connection_args: dict[str, Any] = {}, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any) → langchain.vectorstores.zilliz.Zilliz[source]#
Create a Zilliz collection, indexes it with HNSW, and insert data.
Parameters
texts (List[str]) – Text data.
embedding (Embeddings) – Embedding function.
metadatas (Optional[List[dict]]) – Metadata for each text if it exists.
Defaults to None.
collection_name (str, optional) – Collection name to use. Defaults to
“LangChainCollection”.
connection_args (dict[str, Any], optional) – Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional) – Which consistency level to use. Defaults
to “Session”.
index_params (Optional[dict], optional) – Which index_params to use.
Defaults to None.
search_params (Optional[dict], optional) – Which search params to use.
Defaults to None.
drop_old (Optional[bool], optional) – Whether to drop the collection with
that name if it exists. Defaults to False.
Returns
Zilliz Vector Store
Return type
Zilliz
previous
Document Loaders
next
Retrievers
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
843ef6badd0d-0
|
.rst
.pdf
Agents
Agents#
Interface for agents.
pydantic model langchain.agents.Agent[source]#
Class responsible for calling the language model and deciding the action.
This is driven by an LLMChain. The prompt in the LLMChain MUST include
a variable called “agent_scratchpad” where the agent can put its
intermediary work.
field allowed_tools: Optional[List[str]] = None#
field llm_chain: langchain.chains.llm.LLMChain [Required]#
field output_parser: langchain.agents.agent.AgentOutputParser [Required]#
async aplan(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[langchain.schema.AgentAction, langchain.schema.AgentFinish][source]#
Given input, decided what to do.
Parameters
intermediate_steps – Steps the LLM has taken to date,
along with observations
callbacks – Callbacks to run.
**kwargs – User inputs.
Returns
Action specifying what tool to use.
abstract classmethod create_prompt(tools: Sequence[langchain.tools.base.BaseTool]) → langchain.prompts.base.BasePromptTemplate[source]#
Create a prompt for this class.
dict(**kwargs: Any) → Dict[source]#
Return dictionary representation of agent.
classmethod from_llm_and_tools(llm: langchain.base_language.BaseLanguageModel, tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, **kwargs: Any) → langchain.agents.agent.Agent[source]#
Construct an agent from an LLM and tools.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-1
|
Construct an agent from an LLM and tools.
get_allowed_tools() → Optional[List[str]][source]#
get_full_inputs(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], **kwargs: Any) → Dict[str, Any][source]#
Create the full inputs for the LLMChain from intermediate steps.
plan(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[langchain.schema.AgentAction, langchain.schema.AgentFinish][source]#
Given input, decided what to do.
Parameters
intermediate_steps – Steps the LLM has taken to date,
along with observations
callbacks – Callbacks to run.
**kwargs – User inputs.
Returns
Action specifying what tool to use.
return_stopped_response(early_stopping_method: str, intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], **kwargs: Any) → langchain.schema.AgentFinish[source]#
Return response when agent has been stopped due to max iterations.
tool_run_logging_kwargs() → Dict[source]#
abstract property llm_prefix: str#
Prefix to append the LLM call with.
abstract property observation_prefix: str#
Prefix to append the observation with.
property return_values: List[str]#
Return values of the agent.
pydantic model langchain.agents.AgentExecutor[source]#
Consists of an agent using tools.
Validators
raise_deprecation » all fields
set_verbose » verbose
validate_return_direct_tool » all fields
validate_tools » all fields
field agent: Union[BaseSingleActionAgent, BaseMultiActionAgent] [Required]#
field early_stopping_method: str = 'force'#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-2
|
field early_stopping_method: str = 'force'#
field handle_parsing_errors: Union[bool, str, Callable[[OutputParserException], str]] = False#
field max_execution_time: Optional[float] = None#
field max_iterations: Optional[int] = 15#
field return_intermediate_steps: bool = False#
field tools: Sequence[BaseTool] [Required]#
classmethod from_agent_and_tools(agent: Union[langchain.agents.agent.BaseSingleActionAgent, langchain.agents.agent.BaseMultiActionAgent], tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, **kwargs: Any) → langchain.agents.agent.AgentExecutor[source]#
Create from agent and tools.
lookup_tool(name: str) → langchain.tools.base.BaseTool[source]#
Lookup tool by name.
save(file_path: Union[pathlib.Path, str]) → None[source]#
Raise error - saving not supported for Agent Executors.
save_agent(file_path: Union[pathlib.Path, str]) → None[source]#
Save the underlying agent.
pydantic model langchain.agents.AgentOutputParser[source]#
abstract parse(text: str) → Union[langchain.schema.AgentAction, langchain.schema.AgentFinish][source]#
Parse text into agent action/finish.
class langchain.agents.AgentType(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]#
CHAT_CONVERSATIONAL_REACT_DESCRIPTION = 'chat-conversational-react-description'#
CHAT_ZERO_SHOT_REACT_DESCRIPTION = 'chat-zero-shot-react-description'#
CONVERSATIONAL_REACT_DESCRIPTION = 'conversational-react-description'#
REACT_DOCSTORE = 'react-docstore'#
SELF_ASK_WITH_SEARCH = 'self-ask-with-search'#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-3
|
SELF_ASK_WITH_SEARCH = 'self-ask-with-search'#
STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION = 'structured-chat-zero-shot-react-description'#
ZERO_SHOT_REACT_DESCRIPTION = 'zero-shot-react-description'#
pydantic model langchain.agents.BaseMultiActionAgent[source]#
Base Agent class.
abstract async aplan(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[List[langchain.schema.AgentAction], langchain.schema.AgentFinish][source]#
Given input, decided what to do.
Parameters
intermediate_steps – Steps the LLM has taken to date,
along with observations
callbacks – Callbacks to run.
**kwargs – User inputs.
Returns
Actions specifying what tool to use.
dict(**kwargs: Any) → Dict[source]#
Return dictionary representation of agent.
get_allowed_tools() → Optional[List[str]][source]#
abstract plan(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[List[langchain.schema.AgentAction], langchain.schema.AgentFinish][source]#
Given input, decided what to do.
Parameters
intermediate_steps – Steps the LLM has taken to date,
along with observations
callbacks – Callbacks to run.
**kwargs – User inputs.
Returns
Actions specifying what tool to use.
return_stopped_response(early_stopping_method: str, intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], **kwargs: Any) → langchain.schema.AgentFinish[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-4
|
Return response when agent has been stopped due to max iterations.
save(file_path: Union[pathlib.Path, str]) → None[source]#
Save the agent.
Parameters
file_path – Path to file to save the agent to.
Example:
.. code-block:: python
# If working with agent executor
agent.agent.save(file_path=”path/agent.yaml”)
tool_run_logging_kwargs() → Dict[source]#
property return_values: List[str]#
Return values of the agent.
pydantic model langchain.agents.BaseSingleActionAgent[source]#
Base Agent class.
abstract async aplan(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[langchain.schema.AgentAction, langchain.schema.AgentFinish][source]#
Given input, decided what to do.
Parameters
intermediate_steps – Steps the LLM has taken to date,
along with observations
callbacks – Callbacks to run.
**kwargs – User inputs.
Returns
Action specifying what tool to use.
dict(**kwargs: Any) → Dict[source]#
Return dictionary representation of agent.
classmethod from_llm_and_tools(llm: langchain.base_language.BaseLanguageModel, tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, **kwargs: Any) → langchain.agents.agent.BaseSingleActionAgent[source]#
get_allowed_tools() → Optional[List[str]][source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-5
|
get_allowed_tools() → Optional[List[str]][source]#
abstract plan(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[langchain.schema.AgentAction, langchain.schema.AgentFinish][source]#
Given input, decided what to do.
Parameters
intermediate_steps – Steps the LLM has taken to date,
along with observations
callbacks – Callbacks to run.
**kwargs – User inputs.
Returns
Action specifying what tool to use.
return_stopped_response(early_stopping_method: str, intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], **kwargs: Any) → langchain.schema.AgentFinish[source]#
Return response when agent has been stopped due to max iterations.
save(file_path: Union[pathlib.Path, str]) → None[source]#
Save the agent.
Parameters
file_path – Path to file to save the agent to.
Example:
.. code-block:: python
# If working with agent executor
agent.agent.save(file_path=”path/agent.yaml”)
tool_run_logging_kwargs() → Dict[source]#
property return_values: List[str]#
Return values of the agent.
pydantic model langchain.agents.ConversationalAgent[source]#
An agent designed to hold a conversation in addition to using tools.
field ai_prefix: str = 'AI'#
field output_parser: langchain.agents.agent.AgentOutputParser [Optional]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-6
|
classmethod create_prompt(tools: Sequence[langchain.tools.base.BaseTool], prefix: str = 'Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful tool that can help with a wide range of
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-7
|
powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n\nTOOLS:\n------\n\nAssistant has access to the following tools:', suffix: str = 'Begin!\n\nPrevious conversation history:\n{chat_history}\n\nNew input: {input}\n{agent_scratchpad}', format_instructions: str = 'To use a tool, please use the following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\n{ai_prefix}: [your response here]\n```', ai_prefix: str = 'AI', human_prefix: str = 'Human', input_variables:
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-8
|
'AI', human_prefix: str = 'Human', input_variables: Optional[List[str]] = None) → langchain.prompts.prompt.PromptTemplate[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-9
|
Create prompt in the style of the zero shot agent.
Parameters
tools – List of tools the agent will have access to, used to format the
prompt.
prefix – String to put before the list of tools.
suffix – String to put after the list of tools.
ai_prefix – String to use before AI output.
human_prefix – String to use before human output.
input_variables – List of input variables the final prompt will expect.
Returns
A PromptTemplate with the template assembled from the pieces here.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-10
|
classmethod from_llm_and_tools(llm: langchain.base_language.BaseLanguageModel, tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, prefix: str = 'Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-11
|
receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n\nTOOLS:\n------\n\nAssistant has access to the following tools:', suffix: str = 'Begin!\n\nPrevious conversation history:\n{chat_history}\n\nNew input: {input}\n{agent_scratchpad}', format_instructions: str = 'To use a tool, please use the following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-12
|
the format:\n\n```\nThought: Do I need to use a tool? No\n{ai_prefix}: [your response here]\n```', ai_prefix: str = 'AI', human_prefix: str = 'Human', input_variables: Optional[List[str]] = None, **kwargs: Any) → langchain.agents.agent.Agent[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-13
|
Construct an agent from an LLM and tools.
property llm_prefix: str#
Prefix to append the llm call with.
property observation_prefix: str#
Prefix to append the observation with.
pydantic model langchain.agents.ConversationalChatAgent[source]#
An agent designed to hold a conversation in addition to using tools.
field output_parser: langchain.agents.agent.AgentOutputParser [Optional]#
field template_tool_response: str = "TOOL RESPONSE: \n---------------------\n{observation}\n\nUSER'S INPUT\n--------------------\n\nOkay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else."#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-14
|
classmethod create_prompt(tools: Sequence[langchain.tools.base.BaseTool], system_message: str = 'Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.', human_message: str = "TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n{{tools}}\n\n{format_instructions}\n\nUSER'S INPUT\n--------------------\nHere is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n{{{{input}}}}", input_variables: Optional[List[str]] = None, output_parser: Optional[langchain.schema.BaseOutputParser] = None) → langchain.prompts.base.BasePromptTemplate[source]#
Create a prompt for this class.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-15
|
classmethod from_llm_and_tools(llm: langchain.base_language.BaseLanguageModel, tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, system_message: str = 'Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-16
|
it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.', human_message: str = "TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n{{tools}}\n\n{format_instructions}\n\nUSER'S INPUT\n--------------------\nHere is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n{{{{input}}}}", input_variables: Optional[List[str]] = None, **kwargs: Any) → langchain.agents.agent.Agent[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-17
|
Construct an agent from an LLM and tools.
property llm_prefix: str#
Prefix to append the llm call with.
property observation_prefix: str#
Prefix to append the observation with.
pydantic model langchain.agents.LLMSingleActionAgent[source]#
field llm_chain: langchain.chains.llm.LLMChain [Required]#
field output_parser: langchain.agents.agent.AgentOutputParser [Required]#
field stop: List[str] [Required]#
async aplan(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[langchain.schema.AgentAction, langchain.schema.AgentFinish][source]#
Given input, decided what to do.
Parameters
intermediate_steps – Steps the LLM has taken to date,
along with observations
callbacks – Callbacks to run.
**kwargs – User inputs.
Returns
Action specifying what tool to use.
dict(**kwargs: Any) → Dict[source]#
Return dictionary representation of agent.
plan(intermediate_steps: List[Tuple[langchain.schema.AgentAction, str]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[langchain.schema.AgentAction, langchain.schema.AgentFinish][source]#
Given input, decided what to do.
Parameters
intermediate_steps – Steps the LLM has taken to date,
along with observations
callbacks – Callbacks to run.
**kwargs – User inputs.
Returns
Action specifying what tool to use.
tool_run_logging_kwargs() → Dict[source]#
pydantic model langchain.agents.MRKLChain[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-18
|
pydantic model langchain.agents.MRKLChain[source]#
Chain that implements the MRKL system.
Example
from langchain import OpenAI, MRKLChain
from langchain.chains.mrkl.base import ChainConfig
llm = OpenAI(temperature=0)
prompt = PromptTemplate(...)
chains = [...]
mrkl = MRKLChain.from_chains(llm=llm, prompt=prompt)
Validators
raise_deprecation » all fields
set_verbose » verbose
validate_return_direct_tool » all fields
validate_tools » all fields
classmethod from_chains(llm: langchain.base_language.BaseLanguageModel, chains: List[langchain.agents.mrkl.base.ChainConfig], **kwargs: Any) → langchain.agents.agent.AgentExecutor[source]#
User friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Parameters
llm – The LLM to use as the agent LLM.
chains – The chains the MRKL system has access to.
**kwargs – parameters to be passed to initialization.
Returns
An initialized MRKL chain.
Example
from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, MRKLChain
from langchain.chains.mrkl.base import ChainConfig
llm = OpenAI(temperature=0)
search = SerpAPIWrapper()
llm_math_chain = LLMMathChain(llm=llm)
chains = [
ChainConfig(
action_name = "Search",
action=search.search,
action_description="useful for searching"
),
ChainConfig(
action_name="Calculator",
action=llm_math_chain.run,
action_description="useful for doing math"
)
]
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-19
|
action_description="useful for doing math"
)
]
mrkl = MRKLChain.from_chains(llm, chains)
pydantic model langchain.agents.ReActChain[source]#
Chain that implements the ReAct paper.
Example
from langchain import ReActChain, OpenAI
react = ReAct(llm=OpenAI())
Validators
raise_deprecation » all fields
set_verbose » verbose
validate_return_direct_tool » all fields
validate_tools » all fields
pydantic model langchain.agents.ReActTextWorldAgent[source]#
Agent for the ReAct TextWorld chain.
classmethod create_prompt(tools: Sequence[langchain.tools.base.BaseTool]) → langchain.prompts.base.BasePromptTemplate[source]#
Return default prompt.
pydantic model langchain.agents.SelfAskWithSearchChain[source]#
Chain that does self ask with search.
Example
from langchain import SelfAskWithSearchChain, OpenAI, GoogleSerperAPIWrapper
search_chain = GoogleSerperAPIWrapper()
self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain)
Validators
raise_deprecation » all fields
set_verbose » verbose
validate_return_direct_tool » all fields
validate_tools » all fields
pydantic model langchain.agents.StructuredChatAgent[source]#
field output_parser: langchain.agents.agent.AgentOutputParser [Optional]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-20
|
field output_parser: langchain.agents.agent.AgentOutputParser [Optional]#
classmethod create_prompt(tools: Sequence[langchain.tools.base.BaseTool], prefix: str = 'Respond to the human as helpfully and accurately as possible. You have access to the following tools:', suffix: str = 'Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\nThought:', human_message_template: str = '{input}\n\n{agent_scratchpad}', format_instructions: str = 'Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{{{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}}}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{{{\n "action": "Final Answer",\n "action_input": "Final response to human"\n}}}}\n```', input_variables: Optional[List[str]] = None, memory_prompts: Optional[List[langchain.prompts.base.BasePromptTemplate]] = None) → langchain.prompts.base.BasePromptTemplate[source]#
Create a prompt for this class.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-21
|
classmethod from_llm_and_tools(llm: langchain.base_language.BaseLanguageModel, tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, prefix: str = 'Respond to the human as helpfully and accurately as possible. You have access to the following tools:', suffix: str = 'Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\nThought:', human_message_template: str = '{input}\n\n{agent_scratchpad}', format_instructions: str = 'Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{{{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}}}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{{{\n "action": "Final Answer",\n "action_input": "Final response to human"\n}}}}\n```', input_variables: Optional[List[str]] = None, memory_prompts: Optional[List[langchain.prompts.base.BasePromptTemplate]] = None, **kwargs: Any) → langchain.agents.agent.Agent[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-22
|
Construct an agent from an LLM and tools.
property llm_prefix: str#
Prefix to append the llm call with.
property observation_prefix: str#
Prefix to append the observation with.
pydantic model langchain.agents.Tool[source]#
Tool that takes in function or coroutine directly.
field coroutine: Optional[Callable[[...], Awaitable[str]]] = None#
The asynchronous version of the function.
field description: str = ''#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field func: Callable[[...], str] [Required]#
The function to run when the tool is called.
classmethod from_function(func: Callable, name: str, description: str, return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, **kwargs: Any) → langchain.tools.base.Tool[source]#
Initialize tool from a function.
property args: dict#
The tool’s input arguments.
pydantic model langchain.agents.ZeroShotAgent[source]#
Agent for the MRKL chain.
field output_parser: langchain.agents.agent.AgentOutputParser [Optional]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-23
|
field output_parser: langchain.agents.agent.AgentOutputParser [Optional]#
classmethod create_prompt(tools: Sequence[langchain.tools.base.BaseTool], prefix: str = 'Answer the following questions as best you can. You have access to the following tools:', suffix: str = 'Begin!\n\nQuestion: {input}\nThought:{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None) → langchain.prompts.prompt.PromptTemplate[source]#
Create prompt in the style of the zero shot agent.
Parameters
tools – List of tools the agent will have access to, used to format the
prompt.
prefix – String to put before the list of tools.
suffix – String to put after the list of tools.
input_variables – List of input variables the final prompt will expect.
Returns
A PromptTemplate with the template assembled from the pieces here.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-24
|
Returns
A PromptTemplate with the template assembled from the pieces here.
classmethod from_llm_and_tools(llm: langchain.base_language.BaseLanguageModel, tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, prefix: str = 'Answer the following questions as best you can. You have access to the following tools:', suffix: str = 'Begin!\n\nQuestion: {input}\nThought:{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, **kwargs: Any) → langchain.agents.agent.Agent[source]#
Construct an agent from an LLM and tools.
property llm_prefix: str#
Prefix to append the llm call with.
property observation_prefix: str#
Prefix to append the observation with.
langchain.agents.create_csv_agent(llm: langchain.base_language.BaseLanguageModel, path: Union[str, List[str]], pandas_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.agents.agent.AgentExecutor[source]#
Create csv agent by loading to a dataframe and using pandas agent.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-25
|
langchain.agents.create_json_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.json.toolkit.JsonToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with JSON.\nYour goal is to return a final answer by interacting with the JSON.\nYou have access to the following tools which help you learn more about the JSON you are interacting with.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nDo not make up any information that is not contained in the JSON.\nYour input to the tools should be in the form of `data["key"][0]` where `data` is the JSON blob you are interacting with, and the syntax used is Python. \nYou should only use keys that you know for a fact exist. You must validate that a key exists by seeing it previously when calling `json_spec_list_keys`. \nIf you have not seen a key in one of those responses, you cannot use it.\nYou should only add one key at a
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-26
|
use it.\nYou should only add one key at a time to the path. You cannot add multiple keys at once.\nIf you encounter a "KeyError", go back to the previous key, look at the available keys, and try again.\n\nIf the question does not seem to be related to the JSON, just return "I don\'t know" as the answer.\nAlways begin your interaction with the `json_spec_list_keys` tool with input "data" to see what keys exist in the JSON.\n\nNote that sometimes the value at a given path is large. In this case, you will get an error "Value is a large dictionary, should explore its keys directly".\nIn this case, you should ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys exist at that path.\nDo not simply refer the user to the JSON or a section of the JSON, as this is not a valid answer. Keep digging until you find the answer and explicitly return it.\n', suffix: str = 'Begin!"\n\nQuestion: {input}\nThought: I should
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-27
|
'Begin!"\n\nQuestion: {input}\nThought: I should look at the keys that exist in data to see what I have access to\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-28
|
Construct a json agent from an LLM and tools.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-29
|
langchain.agents.create_openapi_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.openapi.toolkit.OpenAPIToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = "You are an agent designed to answer questions by making web requests to an API given the openapi spec.\n\nIf the question does not seem related to the API, return I don't know. Do not make up an answer.\nOnly use information provided by the tools to construct your response.\n\nFirst, find the base URL needed to make the request.\n\nSecond, find the relevant paths needed to answer the question. Take note that, sometimes, you might need to make more than one request to more than one path to answer the question.\n\nThird, find the required parameters needed to make the request. For GET requests, these are usually URL parameters and for POST requests, these are request body parameters.\n\nFourth, make the requests needed to answer the question. Ensure that you are sending the correct parameters to the request by checking which parameters are required. For parameters with a fixed set
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-30
|
which parameters are required. For parameters with a fixed set of values, please use the spec to look at which values are allowed.\n\nUse the exact parameter names as listed in the spec, do not make up any names or abbreviate the names of parameters.\nIf you get a not found error, ensure that you are using a path that actually exists in the spec.\n", suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should explore the spec to find the base url for the API.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, max_iterations: Optional[int] = 15, max_execution_time:
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-31
|
None, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False, return_intermediate_steps: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-32
|
Construct a json agent from an LLM and tools.
langchain.agents.create_pandas_dataframe_agent(llm: langchain.base_language.BaseLanguageModel, df: Any, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', agent_executor_kwargs: Optional[Dict[str, Any]] = None, include_df_in_prompt: Optional[bool] = True, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
Construct a pandas agent from an LLM and dataframe.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-33
|
langchain.agents.create_pbi_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: Optional[langchain.agents.agent_toolkits.powerbi.toolkit.PowerBIToolkit], powerbi: Optional[langchain.utilities.powerbi.PowerBIDataset] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to help users interact with a PowerBI Dataset.\n\nAgent has access to a tool that can write a query based on the question and then run those against PowerBI, Microsofts business intelligence tool. The questions from the users should be interpreted as related to the dataset that is available and not general questions about the world. If the question does not seem related to the dataset, just return "This does not appear to be part of this dataset." as the answer.\n\nGiven an input question, ask to run the questions against the dataset, then look at the results and return the answer, the answer should be a complete sentence that answers the question, if multiple rows are asked find a way to write that in a easily readible format for a human, also make sure to represent
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-34
|
format for a human, also make sure to represent numbers in readable ways, like 1M instead of 1000000. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I can first ask which tables I have, then how each table is defined and then ask the query tool the question I need, and finally create a nice sentence that answers the question.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', examples: Optional[str] = None, input_variables: Optional[List[str]] = None, top_k: int =
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-35
|
Optional[List[str]] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-36
|
Construct a pbi agent from an LLM and tools.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-37
|
langchain.agents.create_pbi_chat_agent(llm: langchain.chat_models.base.BaseChatModel, toolkit: Optional[langchain.agents.agent_toolkits.powerbi.toolkit.PowerBIToolkit], powerbi: Optional[langchain.utilities.powerbi.PowerBIDataset] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, prefix: str = 'Assistant is a large language model built to help users interact with a PowerBI Dataset.\n\nAssistant has access to a tool that can write a query based on the question and then run those against PowerBI, Microsofts business intelligence tool. The questions from the users should be interpreted as related to the dataset that is available and not general questions about the world. If the question does not seem related to the dataset, just return "This does not appear to be part of this dataset." as the answer.\n\nGiven an input question, ask to run the questions against the dataset, then look at the results and return the answer, the answer should be a complete sentence that answers the question, if multiple rows are asked find a way to write that in
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-38
|
rows are asked find a way to write that in a easily readible format for a human, also make sure to represent numbers in readable ways, like 1M instead of 1000000. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\n', suffix: str = "TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n{{tools}}\n\n{format_instructions}\n\nUSER'S INPUT\n--------------------\nHere is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n{{{{input}}}}\n", examples: Optional[str] = None, input_variables: Optional[List[str]] = None, memory: Optional[langchain.memory.chat_memory.BaseChatMemory] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-39
|
Construct a pbi agent from an Chat LLM and tools.
If you supply only a toolkit and no powerbi dataset, the same LLM is used for both.
langchain.agents.create_spark_dataframe_agent(llm: langchain.llms.base.BaseLLM, df: Any, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = '\nYou are working with a spark dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:', suffix: str = '\nThis is the result of `print(df.first())`:\n{df}\n\nBegin!\nQuestion: {input}\n{agent_scratchpad}', input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
Construct a spark agent from an LLM and dataframe.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-40
|
langchain.agents.create_spark_sql_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.spark_sql.toolkit.SparkSQLToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with Spark SQL.\nGiven an input question, create a syntactically correct Spark SQL query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for the relevant columns given the question.\nYou have access to tools for interacting with the database.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.\n\nDO NOT make
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-41
|
rewrite the query and try again.\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n\nIf the question does not seem related to the database, just return "I don\'t know" as the answer.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should look at the tables in the database to see what I can query.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False,
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-42
|
str = 'force', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-43
|
Construct a sql agent from an LLM and tools.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-44
|
langchain.agents.create_sql_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.sql.toolkit.SQLDatabaseToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with a SQL database.\nGiven an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for the relevant columns given the question.\nYou have access to tools for interacting with the database.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.\n\nDO NOT make
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-45
|
rewrite the query and try again.\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n\nIf the question does not seem related to the database, just return "I don\'t know" as the answer.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should look at the tables in the database to see what I can query.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False,
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-46
|
str = 'force', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-47
|
Construct a sql agent from an LLM and tools.
langchain.agents.create_vectorstore_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to answer questions about sets of documents.\nYou have access to tools for interacting with the documents, and the inputs to the tools are questions.\nSometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.\nIf the question does not seem relevant to any of the tools provided, just return "I don\'t know" as the answer.\n', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
Construct a vectorstore agent from an LLM and tools.
langchain.agents.create_vectorstore_router_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to answer questions.\nYou have access to tools for interacting with different sources, and the inputs to the tools are questions.\nYour main task is to decide which of the tools is relevant for answering question at hand.\nFor complex questions, you can break the question down into sub questions and use tools to answers the sub questions.\n', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
Construct a vectorstore router agent from an LLM and tools.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-48
|
Construct a vectorstore router agent from an LLM and tools.
langchain.agents.get_all_tool_names() → List[str][source]#
Get a list of all possible tool names.
langchain.agents.initialize_agent(tools: Sequence[langchain.tools.base.BaseTool], llm: langchain.base_language.BaseLanguageModel, agent: Optional[langchain.agents.agent_types.AgentType] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, agent_path: Optional[str] = None, agent_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.agents.agent.AgentExecutor[source]#
Load an agent executor given tools and LLM.
Parameters
tools – List of tools this agent has access to.
llm – Language model to use as the agent.
agent – Agent type to use. If None and agent_path is also None, will default to
AgentType.ZERO_SHOT_REACT_DESCRIPTION.
callback_manager – CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path – Path to serialized agent to use.
agent_kwargs – Additional key word arguments to pass to the underlying agent
**kwargs – Additional key word arguments passed to the agent executor
Returns
An agent executor
langchain.agents.load_agent(path: Union[str, pathlib.Path], **kwargs: Any) → langchain.agents.agent.BaseSingleActionAgent[source]#
Unified method for loading a agent from LangChainHub or local fs.
langchain.agents.load_huggingface_tool(task_or_repo_id: str, model_repo_id: Optional[str] = None, token: Optional[str] = None, remote: bool = False, **kwargs: Any) → langchain.tools.base.BaseTool[source]#
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-49
|
langchain.agents.load_tools(tool_names: List[str], llm: Optional[langchain.base_language.BaseLanguageModel] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → List[langchain.tools.base.BaseTool][source]#
Load tools based on their name.
Parameters
tool_names – name of tools to load.
llm – Optional language model, may be needed to initialize certain tools.
callbacks – Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns
List of tools.
langchain.agents.tool(*args: Union[str, Callable], return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, infer_schema: bool = True) → Callable[source]#
Make tools out of functions, can be used with or without arguments.
Parameters
*args – The arguments to the tool.
return_direct – Whether to return directly from the tool rather
than continuing the agent loop.
args_schema – optional argument schema for user to specify
infer_schema – Whether to infer the schema of the arguments from
the function’s signature. This also makes the resultant tool
accept a dictionary input to its run() function.
Requires:
Function must be of type (str) -> str
Function must have a docstring
Examples
@tool
def search_api(query: str) -> str:
# Searches the API for the query.
return
@tool("search", return_direct=True)
def search_api(query: str) -> str:
# Searches the API for the query.
return
previous
Agents
next
Tools
By Harrison Chase
© Copyright 2023, Harrison Chase.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
843ef6badd0d-50
|
Tools
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023.
|
https://python.langchain.com/en/latest/reference/modules/agents.html
|
ca871290c6e1-0
|
.rst
.pdf
Document Transformers
Document Transformers#
Transform documents
pydantic model langchain.document_transformers.EmbeddingsRedundantFilter[source]#
Filter that drops redundant documents by comparing their embeddings.
field embeddings: langchain.embeddings.base.Embeddings [Required]#
Embeddings to use for embedding document contents.
field similarity_fn: Callable = <function cosine_similarity>#
Similarity function for comparing documents. Function expected to take as input
two matrices (List[List[float]]) and return a matrix of scores where higher values
indicate greater similarity.
field similarity_threshold: float = 0.95#
Threshold for determining when two documents are similar enough
to be considered redundant.
async atransform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) → Sequence[langchain.schema.Document][source]#
Asynchronously transform a list of documents.
transform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) → Sequence[langchain.schema.Document][source]#
Filter down documents.
langchain.document_transformers.get_stateful_documents(documents: Sequence[langchain.schema.Document]) → Sequence[langchain.document_transformers._DocumentWithState][source]#
previous
Document Compressors
next
Memory
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023.
|
https://python.langchain.com/en/latest/reference/modules/document_transformers.html
|
398920484b1c-0
|
.rst
.pdf
Example Selector
Example Selector#
Logic for selecting examples to include in prompts.
pydantic model langchain.prompts.example_selector.LengthBasedExampleSelector[source]#
Select examples based on length.
Validators
calculate_example_text_lengths » example_text_lengths
field example_prompt: langchain.prompts.prompt.PromptTemplate [Required]#
Prompt template used to format the examples.
field examples: List[dict] [Required]#
A list of the examples that the prompt template expects.
field get_text_length: Callable[[str], int] = <function _get_length_based>#
Function to measure prompt length. Defaults to word count.
field max_length: int = 2048#
Max length for the prompt, beyond which examples are cut.
add_example(example: Dict[str, str]) → None[source]#
Add new example to list.
select_examples(input_variables: Dict[str, str]) → List[dict][source]#
Select which examples to use based on the input lengths.
pydantic model langchain.prompts.example_selector.MaxMarginalRelevanceExampleSelector[source]#
ExampleSelector that selects examples based on Max Marginal Relevance.
This was shown to improve performance in this paper:
https://arxiv.org/pdf/2211.13892.pdf
field fetch_k: int = 20#
Number of examples to fetch to rerank.
classmethod from_examples(examples: List[dict], embeddings: langchain.embeddings.base.Embeddings, vectorstore_cls: Type[langchain.vectorstores.base.VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, fetch_k: int = 20, **vectorstore_cls_kwargs: Any) → langchain.prompts.example_selector.semantic_similarity.MaxMarginalRelevanceExampleSelector[source]#
Create k-shot example selector using example list and embeddings.
|
https://python.langchain.com/en/latest/reference/modules/example_selector.html
|
398920484b1c-1
|
Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Parameters
examples – List of examples to use in the prompt.
embeddings – An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls – A vector store DB interface class, e.g. FAISS.
k – Number of examples to select
input_keys – If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs – optional kwargs containing url for vector store
Returns
The ExampleSelector instantiated, backed by a vector store.
select_examples(input_variables: Dict[str, str]) → List[dict][source]#
Select which examples to use based on semantic similarity.
pydantic model langchain.prompts.example_selector.SemanticSimilarityExampleSelector[source]#
Example selector that selects examples based on SemanticSimilarity.
field example_keys: Optional[List[str]] = None#
Optional keys to filter examples to.
field input_keys: Optional[List[str]] = None#
Optional keys to filter input to. If provided, the search is based on
the input variables instead of all variables.
field k: int = 4#
Number of examples to select.
field vectorstore: langchain.vectorstores.base.VectorStore [Required]#
VectorStore than contains information about examples.
add_example(example: Dict[str, str]) → str[source]#
Add new example to vectorstore.
classmethod from_examples(examples: List[dict], embeddings: langchain.embeddings.base.Embeddings, vectorstore_cls: Type[langchain.vectorstores.base.VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, **vectorstore_cls_kwargs: Any) → langchain.prompts.example_selector.semantic_similarity.SemanticSimilarityExampleSelector[source]#
|
https://python.langchain.com/en/latest/reference/modules/example_selector.html
|
398920484b1c-2
|
Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Parameters
examples – List of examples to use in the prompt.
embeddings – An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls – A vector store DB interface class, e.g. FAISS.
k – Number of examples to select
input_keys – If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs – optional kwargs containing url for vector store
Returns
The ExampleSelector instantiated, backed by a vector store.
select_examples(input_variables: Dict[str, str]) → List[dict][source]#
Select which examples to use based on semantic similarity.
previous
PromptTemplates
next
Output Parsers
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023.
|
https://python.langchain.com/en/latest/reference/modules/example_selector.html
|
fbef1e1bbc08-0
|
.rst
.pdf
SearxNG Search
Contents
Quick Start
Searching
Engine Parameters
Search Tips
SearxNG Search#
Utility for using SearxNG meta search API.
SearxNG is a privacy-friendly free metasearch engine that aggregates results from
multiple search engines and databases and
supports the OpenSearch
specification.
More detailes on the installtion instructions here.
For the search API refer to https://docs.searxng.org/dev/search_api.html
Quick Start#
In order to use this utility you need to provide the searx host. This can be done
by passing the named parameter searx_host
or exporting the environment variable SEARX_HOST.
Note: this is the only required parameter.
Then create a searx search instance like this:
from langchain.utilities import SearxSearchWrapper
# when the host starts with `http` SSL is disabled and the connection
# is assumed to be on a private network
searx_host='http://self.hosted'
search = SearxSearchWrapper(searx_host=searx_host)
You can now use the search instance to query the searx API.
Searching#
Use the run() and
results() methods to query the searx API.
Other methods are are available for convenience.
SearxResults is a convenience wrapper around the raw json result.
Example usage of the run method to make a search:
s.run(query="what is the best search engine?")
Engine Parameters#
You can pass any accepted searx search API parameters to the
SearxSearchWrapper instance.
In the following example we are using the
engines and the language parameters:
# assuming the searx host is set as above or exported as an env variable
|
https://python.langchain.com/en/latest/reference/modules/searx_search.html
|
fbef1e1bbc08-1
|
# assuming the searx host is set as above or exported as an env variable
s = SearxSearchWrapper(engines=['google', 'bing'],
language='es')
Search Tips#
Searx offers a special
search syntax
that can also be used instead of passing engine parameters.
For example the following query:
s = SearxSearchWrapper("langchain library", engines=['github'])
# can also be written as:
s = SearxSearchWrapper("langchain library !github")
# or even:
s = SearxSearchWrapper("langchain library !gh")
In some situations you might want to pass an extra string to the search query.
For example when the run() method is called by an agent. The search suffix can
also be used as a way to pass extra parameters to searx or the underlying search
engines.
# select the github engine and pass the search suffix
s = SearchWrapper("langchain library", query_suffix="!gh")
s = SearchWrapper("langchain library")
# select github the conventional google search syntax
s.run("large language models", query_suffix="site:github.com")
NOTE: A search suffix can be defined on both the instance and the method level.
The resulting query will be the concatenation of the two with the former taking
precedence.
See SearxNG Configured Engines and
SearxNG Search Syntax
for more details.
Notes
This wrapper is based on the SearxNG fork searxng/searxng which is
better maintained than the original Searx project and offers more features.
Public searxNG instances often use a rate limiter for API usage, so you might want to
use a self hosted instance and disable the rate limiter.
|
https://python.langchain.com/en/latest/reference/modules/searx_search.html
|
fbef1e1bbc08-2
|
use a self hosted instance and disable the rate limiter.
If you are self-hosting an instance you can customize the rate limiter for your
own network as described here.
For a list of public SearxNG instances see https://searx.space/
class langchain.utilities.searx_search.SearxResults(data: str)[source]#
Dict like wrapper around search api results.
property answers: Any#
Helper accessor on the json result.
pydantic model langchain.utilities.searx_search.SearxSearchWrapper[source]#
Wrapper for Searx API.
To use you need to provide the searx host by passing the named parameter
searx_host or exporting the environment variable SEARX_HOST.
In some situations you might want to disable SSL verification, for example
if you are running searx locally. You can do this by passing the named parameter
unsecure. You can also pass the host url scheme as http to disable SSL.
Example
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://localhost:8888")
Example with SSL disabled:from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
Validators
disable_ssl_warnings » unsecure
validate_params » all fields
field aiosession: Optional[Any] = None#
field categories: Optional[List[str]] = []#
field engines: Optional[List[str]] = []#
field headers: Optional[dict] = None#
field k: int = 10#
field params: dict [Optional]#
field query_suffix: Optional[str] = ''#
|
https://python.langchain.com/en/latest/reference/modules/searx_search.html
|
fbef1e1bbc08-3
|
field params: dict [Optional]#
field query_suffix: Optional[str] = ''#
field searx_host: str = ''#
field unsecure: bool = False#
async aresults(query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]#
Asynchronously query with json results.
Uses aiohttp. See results for more info.
async arun(query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]#
Asynchronously version of run.
results(query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]#
Run query through Searx API and returns the results with metadata.
Parameters
query – The query to search for.
query_suffix – Extra suffix appended to the query.
num_results – Limit the number of results to return.
engines – List of engines to use for the query.
categories – List of categories to use for the query.
**kwargs – extra parameters to pass to the searx API.
Returns
{snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
Return type
Dict with the following keys
run(query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]#
|
https://python.langchain.com/en/latest/reference/modules/searx_search.html
|
fbef1e1bbc08-4
|
Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Parameters
query – The query to search for.
query_suffix – Extra suffix appended to the query.
engines – List of engines to use for the query.
categories – List of categories to use for the query.
**kwargs – extra parameters to pass to the searx API.
Returns
The result of the query.
Return type
str
Raises
ValueError – If an error occured with the query.
Example
This will make a query to the qwant engine:
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
Contents
Quick Start
Searching
Engine Parameters
Search Tips
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023.
|
https://python.langchain.com/en/latest/reference/modules/searx_search.html
|
3dc6d9bef1c1-0
|
.rst
.pdf
Python REPL
Python REPL#
For backwards compatibility.
pydantic model langchain.python.PythonREPL[source]#
Simulates a standalone Python REPL.
field globals: Optional[Dict] [Optional] (alias '_globals')#
field locals: Optional[Dict] [Optional] (alias '_locals')#
run(command: str) → str[source]#
Run command with own globals/locals and returns anything printed.
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023.
|
https://python.langchain.com/en/latest/reference/modules/python.html
|
884d79fd4972-0
|
.rst
.pdf
Agent Toolkits
Agent Toolkits#
Agent toolkits.
pydantic model langchain.agents.agent_toolkits.AzureCognitiveServicesToolkit[source]#
Toolkit for Azure Cognitive Services.
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.FileManagementToolkit[source]#
Toolkit for interacting with a Local Files.
field root_dir: Optional[str] = None#
If specified, all file operations are made relative to root_dir.
field selected_tools: Optional[List[str]] = None#
If provided, only provide the selected tools. Defaults to all.
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.GmailToolkit[source]#
Toolkit for interacting with Gmail.
field api_resource: Resource [Optional]#
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.JiraToolkit[source]#
Jira Toolkit.
field tools: List[langchain.tools.base.BaseTool] = []#
classmethod from_jira_api_wrapper(jira_api_wrapper: langchain.utilities.jira.JiraAPIWrapper) → langchain.agents.agent_toolkits.jira.toolkit.JiraToolkit[source]#
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.JsonToolkit[source]#
Toolkit for interacting with a JSON spec.
field spec: langchain.tools.json.tool.JsonSpec [Required]#
get_tools() → List[langchain.tools.base.BaseTool][source]#
|
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
|
884d79fd4972-1
|
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.NLAToolkit[source]#
Natural Language API Toolkit Definition.
field nla_tools: Sequence[langchain.agents.agent_toolkits.nla.tool.NLATool] [Required]#
List of API Endpoint Tools.
classmethod from_llm_and_ai_plugin(llm: langchain.base_language.BaseLanguageModel, ai_plugin: langchain.tools.plugin.AIPlugin, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) → langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]#
Instantiate the toolkit from an OpenAPI Spec URL
classmethod from_llm_and_ai_plugin_url(llm: langchain.base_language.BaseLanguageModel, ai_plugin_url: str, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) → langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]#
Instantiate the toolkit from an OpenAPI Spec URL
classmethod from_llm_and_spec(llm: langchain.base_language.BaseLanguageModel, spec: langchain.tools.openapi.utils.openapi_utils.OpenAPISpec, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) → langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]#
Instantiate the toolkit by creating tools for each operation.
classmethod from_llm_and_url(llm: langchain.base_language.BaseLanguageModel, open_api_url: str, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) → langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]#
|
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
|
884d79fd4972-2
|
Instantiate the toolkit from an OpenAPI Spec URL
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools for all the API operations.
pydantic model langchain.agents.agent_toolkits.OpenAPIToolkit[source]#
Toolkit for interacting with a OpenAPI api.
field json_agent: langchain.agents.agent.AgentExecutor [Required]#
field requests_wrapper: langchain.requests.TextRequestsWrapper [Required]#
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, json_spec: langchain.tools.json.tool.JsonSpec, requests_wrapper: langchain.requests.TextRequestsWrapper, **kwargs: Any) → langchain.agents.agent_toolkits.openapi.toolkit.OpenAPIToolkit[source]#
Create json agent from llm, then initialize.
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.PlayWrightBrowserToolkit[source]#
Toolkit for web browser tools.
field async_browser: Optional['AsyncBrowser'] = None#
field sync_browser: Optional['SyncBrowser'] = None#
classmethod from_browser(sync_browser: Optional[SyncBrowser] = None, async_browser: Optional[AsyncBrowser] = None) → PlayWrightBrowserToolkit[source]#
Instantiate the toolkit.
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.PowerBIToolkit[source]#
Toolkit for interacting with PowerBI dataset.
field callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None#
field examples: Optional[str] = None#
field llm: langchain.base_language.BaseLanguageModel [Required]#
field max_iterations: int = 5#
|
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
|
884d79fd4972-3
|
field max_iterations: int = 5#
field powerbi: langchain.utilities.powerbi.PowerBIDataset [Required]#
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.SQLDatabaseToolkit[source]#
Toolkit for interacting with SQL databases.
field db: langchain.sql_database.SQLDatabase [Required]#
field llm: langchain.base_language.BaseLanguageModel [Required]#
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
property dialect: str#
Return string representation of dialect to use.
pydantic model langchain.agents.agent_toolkits.SparkSQLToolkit[source]#
Toolkit for interacting with Spark SQL.
field db: langchain.utilities.spark_sql.SparkSQL [Required]#
field llm: langchain.base_language.BaseLanguageModel [Required]#
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.VectorStoreInfo[source]#
Information about a vectorstore.
field description: str [Required]#
field name: str [Required]#
field vectorstore: langchain.vectorstores.base.VectorStore [Required]#
pydantic model langchain.agents.agent_toolkits.VectorStoreRouterToolkit[source]#
Toolkit for routing between vectorstores.
field llm: langchain.base_language.BaseLanguageModel [Optional]#
field vectorstores: List[langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreInfo] [Required]#
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
|
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
|
884d79fd4972-4
|
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.VectorStoreToolkit[source]#
Toolkit for interacting with a vector store.
field llm: langchain.base_language.BaseLanguageModel [Optional]#
field vectorstore_info: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreInfo [Required]#
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
pydantic model langchain.agents.agent_toolkits.ZapierToolkit[source]#
Zapier Toolkit.
field tools: List[langchain.tools.base.BaseTool] = []#
classmethod from_zapier_nla_wrapper(zapier_nla_wrapper: langchain.utilities.zapier.ZapierNLAWrapper) → langchain.agents.agent_toolkits.zapier.toolkit.ZapierToolkit[source]#
Create a toolkit from a ZapierNLAWrapper.
get_tools() → List[langchain.tools.base.BaseTool][source]#
Get the tools in the toolkit.
langchain.agents.agent_toolkits.create_csv_agent(llm: langchain.base_language.BaseLanguageModel, path: Union[str, List[str]], pandas_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.agents.agent.AgentExecutor[source]#
Create csv agent by loading to a dataframe and using pandas agent.
|
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
|
884d79fd4972-5
|
langchain.agents.agent_toolkits.create_json_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.json.toolkit.JsonToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with JSON.\nYour goal is to return a final answer by interacting with the JSON.\nYou have access to the following tools which help you learn more about the JSON you are interacting with.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nDo not make up any information that is not contained in the JSON.\nYour input to the tools should be in the form of `data["key"][0]` where `data` is the JSON blob you are interacting with, and the syntax used is Python. \nYou should only use keys that you know for a fact exist. You must validate that a key exists by seeing it previously when calling `json_spec_list_keys`. \nIf you have not seen a key in one of those responses, you cannot use it.\nYou should only add one key
|
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
|
884d79fd4972-6
|
you cannot use it.\nYou should only add one key at a time to the path. You cannot add multiple keys at once.\nIf you encounter a "KeyError", go back to the previous key, look at the available keys, and try again.\n\nIf the question does not seem to be related to the JSON, just return "I don\'t know" as the answer.\nAlways begin your interaction with the `json_spec_list_keys` tool with input "data" to see what keys exist in the JSON.\n\nNote that sometimes the value at a given path is large. In this case, you will get an error "Value is a large dictionary, should explore its keys directly".\nIn this case, you should ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys exist at that path.\nDo not simply refer the user to the JSON or a section of the JSON, as this is not a valid answer. Keep digging until you find the answer and explicitly return it.\n', suffix: str = 'Begin!"\n\nQuestion: {input}\nThought:
|
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.