id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 49
117
|
---|---|---|
d716902bc319-0 | Source code for langchain.memory.vectorstore
"""Class for a VectorStore-backed memory object."""
from typing import Any, Dict, List, Optional, Union
from pydantic import Field
from langchain.memory.chat_memory import BaseMemory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import Document
from langchain.vectorstores.base import VectorStoreRetriever
[docs]class VectorStoreRetrieverMemory(BaseMemory):
"""Class for a VectorStore-backed memory object."""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
memory_key: str = "history" #: :meta private:
"""Key name to locate the memories in the result of load_memory_variables."""
input_key: Optional[str] = None
"""Key name to index the inputs to load_memory_variables."""
return_docs: bool = False
"""Whether or not to return the result of querying the database directly."""
@property
def memory_variables(self) -> List[str]:
"""The list of keys emitted from the load_memory_variables method."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
[docs] def load_memory_variables(
self, inputs: Dict[str, Any]
) -> Dict[str, Union[List[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.get_relevant_documents(query) | https://python.langchain.com/en/latest/_modules/langchain/memory/vectorstore.html |
d716902bc319-1 | docs = self.retriever.get_relevant_documents(query)
result: Union[List[Document], str]
if not self.return_docs:
result = "\n".join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
def _form_documents(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> List[Document]:
"""Format context from this conversation to buffer."""
# Each document should only include the current turn, not the chat history
filtered_inputs = {k: v for k, v in inputs.items() if k != self.memory_key}
texts = [
f"{k}: {v}"
for k, v in list(filtered_inputs.items()) + list(outputs.items())
]
page_content = "\n".join(texts)
return [Document(page_content=page_content)]
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents)
[docs] def clear(self) -> None:
"""Nothing to clear."""
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/vectorstore.html |
a5d0d8815534-0 | Source code for langchain.memory.buffer
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import get_buffer_string
[docs]class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
if self.return_messages:
return self.chat_memory.messages
else:
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
[docs]class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict: | https://python.langchain.com/en/latest/_modules/langchain/memory/buffer.html |
a5d0d8815534-1 | def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError(
"return_messages must be False for ConversationStringBufferMemory"
)
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
[docs] def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/buffer.html |
2688281252ae-0 | Source code for langchain.memory.summary
from __future__ import annotations
from typing import Any, Dict, List, Type
from pydantic import BaseModel, root_validator
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
SystemMessage,
get_buffer_string,
)
class SummarizerMixin(BaseModel):
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT
summary_message_cls: Type[BaseMessage] = SystemMessage
def predict_new_summary(
self, messages: List[BaseMessage], existing_summary: str
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return chain.predict(summary=existing_summary, new_lines=new_lines)
[docs]class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
"""Conversation summarizer to memory."""
buffer: str = ""
memory_key: str = "history" #: :meta private:
[docs] @classmethod
def from_messages(
cls,
llm: BaseLanguageModel,
chat_memory: BaseChatMessageHistory,
*,
summarize_step: int = 2,
**kwargs: Any,
) -> ConversationSummaryMemory: | https://python.langchain.com/en/latest/_modules/langchain/memory/summary.html |
2688281252ae-1 | **kwargs: Any,
) -> ConversationSummaryMemory:
obj = cls(llm=llm, chat_memory=chat_memory, **kwargs)
for i in range(0, len(obj.chat_memory.messages), summarize_step):
obj.buffer = obj.predict_new_summary(
obj.chat_memory.messages[i : i + summarize_step], obj.buffer
)
return obj
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = [self.summary_message_cls(content=self.buffer)]
else:
buffer = self.buffer
return {self.memory_key: buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.buffer = self.predict_new_summary(
self.chat_memory.messages[-2:], self.buffer
)
[docs] def clear(self) -> None:
"""Clear memory contents.""" | https://python.langchain.com/en/latest/_modules/langchain/memory/summary.html |
2688281252ae-2 | [docs] def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.buffer = ""
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/summary.html |
9934c661e53f-0 | Source code for langchain.memory.combined
import warnings
from typing import Any, Dict, List, Set
from pydantic import validator
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import BaseMemory
[docs]class CombinedMemory(BaseMemory):
"""Class for combining multiple memories' data together."""
memories: List[BaseMemory]
"""For tracking all the memories that should be accessed."""
@validator("memories")
def check_repeated_memory_variable(
cls, value: List[BaseMemory]
) -> List[BaseMemory]:
all_variables: Set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
raise ValueError(
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
all_variables |= set(val.memory_variables)
return value
@validator("memories")
def check_input_key(cls, value: List[BaseMemory]) -> List[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory):
if val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}"
)
return value
@property
def memory_variables(self) -> List[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables) | https://python.langchain.com/en/latest/_modules/langchain/memory/combined.html |
9934c661e53f-1 | for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: Dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
memory_data = {
**memory_data,
**data,
}
return memory_data
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
[docs] def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/combined.html |
7859f8bcaff1-0 | Source code for langchain.memory.buffer_window
from typing import Any, Dict, List
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import BaseMessage, get_buffer_string
[docs]class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
@property
def buffer(self) -> List[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
buffer: Any = self.buffer[-self.k * 2 :] if self.k > 0 else []
if not self.return_messages:
buffer = get_buffer_string(
buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: buffer}
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/buffer_window.html |
87a574cff794-0 | Source code for langchain.memory.simple
from typing import Any, Dict, List
from langchain.schema import BaseMemory
[docs]class SimpleMemory(BaseMemory):
"""Simple memory for storing context or other bits of information that shouldn't
ever change between prompts.
"""
memories: Dict[str, Any] = dict()
@property
def memory_variables(self) -> List[str]:
return list(self.memories.keys())
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
return self.memories
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed, my memory is set in stone."""
pass
[docs] def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/simple.html |
97744e46ff89-0 | Source code for langchain.memory.readonly
from typing import Any, Dict, List
from langchain.schema import BaseMemory
[docs]class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
[docs] def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/readonly.html |
7d4364b37c40-0 | Source code for langchain.memory.kg
from typing import Any, Dict, List, Type, Union
from pydantic import Field
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.graphs import NetworkxEntityGraph
from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import (
BaseMessage,
SystemMessage,
get_buffer_string,
)
[docs]class ConversationKGMemory(BaseChatMemory):
"""Knowledge graph memory for storing conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
summary_message_cls: Type[BaseMessage] = SystemMessage
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summary_strings = [] | https://python.langchain.com/en/latest/_modules/langchain/memory/kg.html |
7d4364b37c40-1 | entities = self._get_current_entities(inputs)
summary_strings = []
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summary = f"On {entity}: {'. '.join(knowledge)}."
summary_strings.append(summary)
context: Union[str, List]
if not summary_strings:
context = [] if self.return_messages else ""
elif self.return_messages:
context = [
self.summary_message_cls(content=text) for text in summary_strings
]
else:
context = "\n".join(summary_strings)
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
[docs] def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix, | https://python.langchain.com/en/latest/_modules/langchain/memory/kg.html |
7d4364b37c40-2 | human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
[docs] def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
[docs] def clear(self) -> None:
"""Clear memory contents.""" | https://python.langchain.com/en/latest/_modules/langchain/memory/kg.html |
7d4364b37c40-3 | [docs] def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/kg.html |
c03b8a97050b-0 | Source code for langchain.memory.summary_buffer
from typing import Any, Dict, List
from pydantic import root_validator
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.summary import SummarizerMixin
from langchain.schema import BaseMessage, get_buffer_string
[docs]class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
"""Buffer with summarizer for storing conversation memory."""
max_token_limit: int = 2000
moving_summary_buffer: str = ""
memory_key: str = "history"
@property
def buffer(self) -> List[BaseMessage]:
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
buffer = self.buffer
if self.moving_summary_buffer != "":
first_messages: List[BaseMessage] = [
self.summary_message_cls(content=self.moving_summary_buffer)
]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix
)
return {self.memory_key: final_buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError( | https://python.langchain.com/en/latest/_modules/langchain/memory/summary_buffer.html |
c03b8a97050b-1 | if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.prune()
[docs] def prune(self) -> None:
"""Prune buffer if it exceeds max token limit"""
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
self.moving_summary_buffer = self.predict_new_summary(
pruned_memory, self.moving_summary_buffer
)
[docs] def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.moving_summary_buffer = ""
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/summary_buffer.html |
e7676da47855-0 | Source code for langchain.memory.chat_message_histories.redis
import json
import logging
from typing import List, Optional
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
[docs]class RedisChatMessageHistory(BaseChatMessageHistory):
def __init__(
self,
session_id: str,
url: str = "redis://localhost:6379/0",
key_prefix: str = "message_store:",
ttl: Optional[int] = None,
):
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
self.redis_client = redis.Redis.from_url(url=url)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
@property
def key(self) -> str:
"""Construct the record key to use"""
return self.key_prefix + self.session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m.decode("utf-8")) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Redis""" | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/redis.html |
e7676da47855-1 | """Append the message to the record in Redis"""
self.redis_client.lpush(self.key, json.dumps(_message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl)
[docs] def clear(self) -> None:
"""Clear session memory from Redis"""
self.redis_client.delete(self.key)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/redis.html |
03b9178d7d2a-0 | Source code for langchain.memory.chat_message_histories.in_memory
from typing import List
from pydantic import BaseModel
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
)
[docs]class ChatMessageHistory(BaseChatMessageHistory, BaseModel):
messages: List[BaseMessage] = []
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
[docs] def clear(self) -> None:
self.messages = []
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/in_memory.html |
eae6068a09dc-0 | Source code for langchain.memory.chat_message_histories.cassandra
import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_KEYSPACE_NAME = "chat_history"
DEFAULT_TABLE_NAME = "message_store"
DEFAULT_USERNAME = "cassandra"
DEFAULT_PASSWORD = "cassandra"
DEFAULT_PORT = 9042
[docs]class CassandraChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in Cassandra.
Args:
contact_points: list of ips to connect to Cassandra cluster
session_id: arbitrary key that is used to store the messages
of a single chat session.
port: port to connect to Cassandra cluster
username: username to connect to Cassandra cluster
password: password to connect to Cassandra cluster
keyspace_name: name of the keyspace to use
table_name: name of the table to use
"""
def __init__(
self,
contact_points: List[str],
session_id: str,
port: int = DEFAULT_PORT,
username: str = DEFAULT_USERNAME,
password: str = DEFAULT_PASSWORD,
keyspace_name: str = DEFAULT_KEYSPACE_NAME,
table_name: str = DEFAULT_TABLE_NAME,
):
self.contact_points = contact_points
self.session_id = session_id
self.port = port
self.username = username
self.password = password
self.keyspace_name = keyspace_name
self.table_name = table_name
try:
from cassandra import (
AuthenticationFailed,
OperationTimedOut,
UnresolvableContactPoints,
) | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html |
eae6068a09dc-1 | OperationTimedOut,
UnresolvableContactPoints,
)
from cassandra.cluster import Cluster, PlainTextAuthProvider
except ImportError:
raise ValueError(
"Could not import cassandra-driver python package. "
"Please install it with `pip install cassandra-driver`."
)
self.cluster: Cluster = Cluster(
contact_points,
port=port,
auth_provider=PlainTextAuthProvider(
username=self.username, password=self.password
),
)
try:
self.session = self.cluster.connect()
except (
AuthenticationFailed,
UnresolvableContactPoints,
OperationTimedOut,
) as error:
logger.error(
"Unable to establish connection with \
cassandra chat message history database"
)
raise error
self._prepare_cassandra()
def _prepare_cassandra(self) -> None:
"""Create the keyspace and table if they don't exist yet"""
from cassandra import OperationTimedOut, Unavailable
try:
self.session.execute(
f"""CREATE KEYSPACE IF NOT EXISTS
{self.keyspace_name} WITH REPLICATION =
{{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }};"""
)
except (OperationTimedOut, Unavailable) as error:
logger.error(
f"Unable to create cassandra \
chat message history keyspace: {self.keyspace_name}."
)
raise error
self.session.set_keyspace(self.keyspace_name)
try:
self.session.execute(
f"""CREATE TABLE IF NOT EXISTS
{self.table_name} (id UUID, session_id varchar, | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html |
eae6068a09dc-2 | {self.table_name} (id UUID, session_id varchar,
history text, PRIMARY KEY ((session_id), id) );"""
)
except (OperationTimedOut, Unavailable) as error:
logger.error(
f"Unable to create cassandra \
chat message history table: {self.table_name}"
)
raise error
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from Cassandra"""
from cassandra import ReadFailure, ReadTimeout, Unavailable
try:
rows = self.session.execute(
f"""SELECT * FROM {self.table_name}
WHERE session_id = '{self.session_id}' ;"""
)
except (Unavailable, ReadTimeout, ReadFailure) as error:
logger.error("Unable to Retreive chat history messages from cassadra")
raise error
if rows:
items = [json.loads(row.history) for row in rows]
else:
items = []
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Cassandra"""
import uuid
from cassandra import Unavailable, WriteFailure, WriteTimeout
try:
self.session.execute(
"""INSERT INTO message_store
(id, session_id, history) VALUES (%s, %s, %s);""",
(uuid.uuid4(), self.session_id, json.dumps(_message_to_dict(message))),
)
except (Unavailable, WriteTimeout, WriteFailure) as error:
logger.error("Unable to write chat history messages to cassandra")
raise error | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html |
eae6068a09dc-3 | logger.error("Unable to write chat history messages to cassandra")
raise error
[docs] def clear(self) -> None:
"""Clear session memory from Cassandra"""
from cassandra import OperationTimedOut, Unavailable
try:
self.session.execute(
f"DELETE FROM {self.table_name} WHERE session_id = '{self.session_id}';"
)
except (Unavailable, OperationTimedOut) as error:
logger.error("Unable to clear chat history messages from cassandra")
raise error
def __del__(self) -> None:
if self.session:
self.session.shutdown()
if self.cluster:
self.cluster.shutdown()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html |
a1c2c68264be-0 | Source code for langchain.memory.chat_message_histories.file
import json
import logging
from pathlib import Path
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
[docs]class FileChatMessageHistory(BaseChatMessageHistory):
"""
Chat message history that stores history in a local file.
Args:
file_path: path of the local file to store the messages.
"""
def __init__(self, file_path: str):
self.file_path = Path(file_path)
if not self.file_path.exists():
self.file_path.touch()
self.file_path.write_text(json.dumps([]))
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from the local file"""
items = json.loads(self.file_path.read_text())
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in the local file"""
messages = messages_to_dict(self.messages)
messages.append(messages_to_dict([message])[0])
self.file_path.write_text(json.dumps(messages))
[docs] def clear(self) -> None:
"""Clear session memory from the local file"""
self.file_path.write_text(json.dumps([]))
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/file.html |
18233249c401-0 | Source code for langchain.memory.chat_message_histories.mongodb
import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_DBNAME = "chat_history"
DEFAULT_COLLECTION_NAME = "message_store"
[docs]class MongoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in MongoDB.
Args:
connection_string: connection string to connect to MongoDB
session_id: arbitrary key that is used to store the messages
of a single chat session.
database_name: name of the database to use
collection_name: name of the collection to use
"""
def __init__(
self,
connection_string: str,
session_id: str,
database_name: str = DEFAULT_DBNAME,
collection_name: str = DEFAULT_COLLECTION_NAME,
):
from pymongo import MongoClient, errors
self.connection_string = connection_string
self.session_id = session_id
self.database_name = database_name
self.collection_name = collection_name
try:
self.client: MongoClient = MongoClient(connection_string)
except errors.ConnectionFailure as error:
logger.error(error)
self.db = self.client[database_name]
self.collection = self.db[collection_name]
self.collection.create_index("SessionId")
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from MongoDB"""
from pymongo import errors
try:
cursor = self.collection.find({"SessionId": self.session_id})
except errors.OperationFailure as error:
logger.error(error)
if cursor: | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/mongodb.html |
18233249c401-1 | except errors.OperationFailure as error:
logger.error(error)
if cursor:
items = [json.loads(document["History"]) for document in cursor]
else:
items = []
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in MongoDB"""
from pymongo import errors
try:
self.collection.insert_one(
{
"SessionId": self.session_id,
"History": json.dumps(_message_to_dict(message)),
}
)
except errors.WriteError as err:
logger.error(err)
[docs] def clear(self) -> None:
"""Clear session memory from MongoDB"""
from pymongo import errors
try:
self.collection.delete_many({"SessionId": self.session_id})
except errors.WriteError as err:
logger.error(err)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/mongodb.html |
4fa91a1ffffa-0 | Source code for langchain.memory.chat_message_histories.momento
from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
from langchain.utils import get_from_env
if TYPE_CHECKING:
import momento
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
[docs]class MomentoChatMessageHistory(BaseChatMessageHistory):
"""Chat message history cache that uses Momento as a backend.
See https://gomomento.com/"""
def __init__(
self,
session_id: str,
cache_client: momento.CacheClient,
cache_name: str,
*,
key_prefix: str = "message_store:",
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory, | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
4fa91a1ffffa-1 | Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
"""
try:
from momento import CacheClient
from momento.requests import CollectionTtl
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.key = key_prefix + session_id
self.cache_client = cache_client
self.cache_name = cache_name
if ttl is not None:
self.ttl = CollectionTtl.of(ttl)
else:
self.ttl = CollectionTtl.from_cache_ttl()
[docs] @classmethod
def from_client_params(
cls,
session_id: str, | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
4fa91a1ffffa-2 | def from_client_params(
cls,
session_id: str,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> MomentoChatMessageHistory:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
credentials = CredentialProvider.from_string(auth_token)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(session_id, cache_client, cache_name, ttl=ttl, **kwargs)
@property
def messages(self) -> list[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from Momento.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
Returns:
list[BaseMessage]: List of cached messages
"""
from momento.responses import CacheListFetch
fetch_response = self.cache_client.list_fetch(self.cache_name, self.key)
if isinstance(fetch_response, CacheListFetch.Hit):
items = [json.loads(m) for m in fetch_response.value_list_string]
return messages_from_dict(items)
elif isinstance(fetch_response, CacheListFetch.Miss):
return []
elif isinstance(fetch_response, CacheListFetch.Error): | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
4fa91a1ffffa-3 | return []
elif isinstance(fetch_response, CacheListFetch.Error):
raise fetch_response.inner_exception
else:
raise Exception(f"Unexpected response: {fetch_response}")
[docs] def add_message(self, message: BaseMessage) -> None:
"""Store a message in the cache.
Args:
message (BaseMessage): The message object to store.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheListPushBack
item = json.dumps(_message_to_dict(message))
push_response = self.cache_client.list_push_back(
self.cache_name, self.key, item, ttl=self.ttl
)
if isinstance(push_response, CacheListPushBack.Success):
return None
elif isinstance(push_response, CacheListPushBack.Error):
raise push_response.inner_exception
else:
raise Exception(f"Unexpected response: {push_response}")
[docs] def clear(self) -> None:
"""Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheDelete
delete_response = self.cache_client.delete(self.cache_name, self.key)
if isinstance(delete_response, CacheDelete.Success):
return None
elif isinstance(delete_response, CacheDelete.Error):
raise delete_response.inner_exception
else:
raise Exception(f"Unexpected response: {delete_response}")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
302daf0307b7-0 | Source code for langchain.memory.chat_message_histories.cosmos_db
"""Azure CosmosDB Memory History."""
from __future__ import annotations
import logging
from types import TracebackType
from typing import TYPE_CHECKING, Any, List, Optional, Type
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from azure.cosmos import ContainerProxy
[docs]class CosmosDBChatMessageHistory(BaseChatMessageHistory):
"""Chat history backed by Azure CosmosDB."""
def __init__(
self,
cosmos_endpoint: str,
cosmos_database: str,
cosmos_container: str,
session_id: str,
user_id: str,
credential: Any = None,
connection_string: Optional[str] = None,
ttl: Optional[int] = None,
cosmos_client_kwargs: Optional[dict] = None,
):
"""
Initializes a new instance of the CosmosDBChatMessageHistory class.
Make sure to call prepare_cosmos or use the context manager to make
sure your database is ready.
Either a credential or a connection string must be provided.
:param cosmos_endpoint: The connection endpoint for the Azure Cosmos DB account.
:param cosmos_database: The name of the database to use.
:param cosmos_container: The name of the container to use.
:param session_id: The session ID to use, can be overwritten while loading.
:param user_id: The user ID to use, can be overwritten while loading.
:param credential: The credential to use to authenticate to Azure Cosmos DB.
:param connection_string: The connection string to use to authenticate. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html |
302daf0307b7-1 | :param connection_string: The connection string to use to authenticate.
:param ttl: The time to live (in seconds) to use for documents in the container.
:param cosmos_client_kwargs: Additional kwargs to pass to the CosmosClient.
"""
self.cosmos_endpoint = cosmos_endpoint
self.cosmos_database = cosmos_database
self.cosmos_container = cosmos_container
self.credential = credential
self.conn_string = connection_string
self.session_id = session_id
self.user_id = user_id
self.ttl = ttl
self.messages: List[BaseMessage] = []
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosClient,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
) from exc
if self.credential:
self._client = CosmosClient(
url=self.cosmos_endpoint,
credential=self.credential,
**cosmos_client_kwargs or {},
)
elif self.conn_string:
self._client = CosmosClient.from_connection_string(
conn_str=self.conn_string,
**cosmos_client_kwargs or {},
)
else:
raise ValueError("Either a connection string or a credential must be set.")
self._container: Optional[ContainerProxy] = None
[docs] def prepare_cosmos(self) -> None:
"""Prepare the CosmosDB client.
Use this function or the context manager to make sure your database is ready.
"""
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501 | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html |
302daf0307b7-2 | PartitionKey,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
) from exc
database = self._client.create_database_if_not_exists(self.cosmos_database)
self._container = database.create_container_if_not_exists(
self.cosmos_container,
partition_key=PartitionKey("/user_id"),
default_ttl=self.ttl,
)
self.load_messages()
def __enter__(self) -> "CosmosDBChatMessageHistory":
"""Context manager entry point."""
self._client.__enter__()
self.prepare_cosmos()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Context manager exit"""
self.upsert_messages()
self._client.__exit__(exc_type, exc_val, traceback)
[docs] def load_messages(self) -> None:
"""Retrieve the messages from Cosmos"""
if not self._container:
raise ValueError("Container not initialized")
try:
from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosHttpResponseError,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
) from exc
try:
item = self._container.read_item(
item=self.session_id, partition_key=self.user_id
)
except CosmosHttpResponseError: | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html |
302daf0307b7-3 | )
except CosmosHttpResponseError:
logger.info("no session found")
return
if "messages" in item and len(item["messages"]) > 0:
self.messages = messages_from_dict(item["messages"])
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
self.upsert_messages()
[docs] def upsert_messages(self) -> None:
"""Update the cosmosdb item."""
if not self._container:
raise ValueError("Container not initialized")
self._container.upsert_item(
body={
"id": self.session_id,
"user_id": self.user_id,
"messages": messages_to_dict(self.messages),
}
)
[docs] def clear(self) -> None:
"""Clear session memory from this memory and cosmos."""
self.messages = []
if self._container:
self._container.delete_item(
item=self.session_id, partition_key=self.user_id
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html |
d8e18f006e0a-0 | Source code for langchain.memory.chat_message_histories.dynamodb
import logging
from typing import List, Optional
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
[docs]class DynamoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in AWS DynamoDB.
This class expects that a DynamoDB table with name `table_name`
and a partition Key of `SessionId` is present.
Args:
table_name: name of the DynamoDB table
session_id: arbitrary key that is used to store the messages
of a single chat session.
endpoint_url: URL of the AWS endpoint to connect to. This argument
is optional and useful for test purposes, like using Localstack.
If you plan to use AWS cloud service, you normally don't have to
worry about setting the endpoint_url.
"""
def __init__(
self, table_name: str, session_id: str, endpoint_url: Optional[str] = None
):
import boto3
if endpoint_url:
client = boto3.resource("dynamodb", endpoint_url=endpoint_url)
else:
client = boto3.resource("dynamodb")
self.table = client.Table(table_name)
self.session_id = session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from DynamoDB"""
from botocore.exceptions import ClientError
try:
response = self.table.get_item(Key={"SessionId": self.session_id})
except ClientError as error: | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/dynamodb.html |
d8e18f006e0a-1 | except ClientError as error:
if error.response["Error"]["Code"] == "ResourceNotFoundException":
logger.warning("No record found with session id: %s", self.session_id)
else:
logger.error(error)
if response and "Item" in response:
items = response["Item"]["History"]
else:
items = []
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in DynamoDB"""
from botocore.exceptions import ClientError
messages = messages_to_dict(self.messages)
_message = _message_to_dict(message)
messages.append(_message)
try:
self.table.put_item(
Item={"SessionId": self.session_id, "History": messages}
)
except ClientError as err:
logger.error(err)
[docs] def clear(self) -> None:
"""Clear session memory from DynamoDB"""
from botocore.exceptions import ClientError
try:
self.table.delete_item(Key={"SessionId": self.session_id})
except ClientError as err:
logger.error(err)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/dynamodb.html |
1788e5617a56-0 | Source code for langchain.memory.chat_message_histories.postgres
import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_STRING = "postgresql://postgres:mypassword@localhost/chat_history"
[docs]class PostgresChatMessageHistory(BaseChatMessageHistory):
def __init__(
self,
session_id: str,
connection_string: str = DEFAULT_CONNECTION_STRING,
table_name: str = "message_store",
):
import psycopg
from psycopg.rows import dict_row
try:
self.connection = psycopg.connect(connection_string)
self.cursor = self.connection.cursor(row_factory=dict_row)
except psycopg.OperationalError as error:
logger.error(error)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} (
id SERIAL PRIMARY KEY,
session_id TEXT NOT NULL,
message JSONB NOT NULL
);"""
self.cursor.execute(create_table_query)
self.connection.commit()
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from PostgreSQL"""
query = f"SELECT message FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
items = [record["message"] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/postgres.html |
1788e5617a56-1 | messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format(
sql.Identifier(self.table_name)
)
self.cursor.execute(
query, (self.session_id, json.dumps(_message_to_dict(message)))
)
self.connection.commit()
[docs] def clear(self) -> None:
"""Clear session memory from PostgreSQL"""
query = f"DELETE FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
self.connection.commit()
def __del__(self) -> None:
if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/postgres.html |
29d93a7a718e-0 | .rst
.pdf
Indexes
Indexes#
Indexes refer to ways to structure documents so that LLMs can best interact with them.
LangChain has a number of modules that help you load, structure, store, and retrieve documents.
Docstore
Text Splitter
Document Loaders
Vector Stores
Retrievers
Document Compressors
Document Transformers
previous
Embeddings
next
Docstore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/reference/indexes.html |
69af32dbe667-0 | .rst
.pdf
Models
Models#
LangChain provides interfaces and integrations for a number of different types of models.
LLMs
Chat Models
Embeddings
previous
API References
next
Chat Models
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/reference/models.html |
45336e00c0a5-0 | .rst
.pdf
Prompts
Prompts#
The reference guides here all relate to objects for working with Prompts.
PromptTemplates
Example Selector
Output Parsers
previous
How to serialize prompts
next
PromptTemplates
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/reference/prompts.html |
7d66d15c4122-0 | .rst
.pdf
Agents
Agents#
Reference guide for Agents and associated abstractions.
Agents
Tools
Agent Toolkits
previous
Memory
next
Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/reference/agents.html |
2cfcb2537b9f-0 | .md
.pdf
Installation
Contents
Official Releases
Installing from source
Installation#
Official Releases#
LangChain is available on PyPi, so to it is easily installable with:
pip install langchain
That will install the bare minimum requirements of LangChain.
A lot of the value of LangChain comes when integrating it with various model providers, datastores, etc.
By default, the dependencies needed to do that are NOT installed.
However, there are two other ways to install LangChain that do bring in those dependencies.
To install modules needed for the common LLM providers, run:
pip install langchain[llms]
To install all modules needed for all integrations, run:
pip install langchain[all]
Note that if you are using zsh, you’ll need to quote square brackets when passing them as an argument to a command, for example:
pip install 'langchain[all]'
Installing from source#
If you want to install from source, you can do so by cloning the repo and running:
pip install -e .
previous
SQL Question Answering Benchmarking: Chinook
next
API References
Contents
Official Releases
Installing from source
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/reference/installation.html |
290e16503ce6-0 | .rst
.pdf
Memory
Memory#
class langchain.memory.CassandraChatMessageHistory(contact_points: List[str], session_id: str, port: int = 9042, username: str = 'cassandra', password: str = 'cassandra', keyspace_name: str = 'chat_history', table_name: str = 'message_store')[source]#
Chat message history that stores history in Cassandra.
Parameters
contact_points – list of ips to connect to Cassandra cluster
session_id – arbitrary key that is used to store the messages
of a single chat session.
port – port to connect to Cassandra cluster
username – username to connect to Cassandra cluster
password – password to connect to Cassandra cluster
keyspace_name – name of the keyspace to use
table_name – name of the table to use
add_message(message: langchain.schema.BaseMessage) → None[source]#
Append the message to the record in Cassandra
clear() → None[source]#
Clear session memory from Cassandra
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from Cassandra
pydantic model langchain.memory.ChatMessageHistory[source]#
field messages: List[langchain.schema.BaseMessage] = []#
add_message(message: langchain.schema.BaseMessage) → None[source]#
Add a self-created message to the store
clear() → None[source]#
Remove all messages from the store
pydantic model langchain.memory.CombinedMemory[source]#
Class for combining multiple memories’ data together.
Validators
check_input_key » memories
check_repeated_memory_variable » memories
field memories: List[langchain.schema.BaseMemory] [Required]#
For tracking all the memories that should be accessed.
clear() → None[source]#
Clear context from this session for every memory. | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-1 | clear() → None[source]#
Clear context from this session for every memory.
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]#
Load all vars from sub-memories.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
Save context from this session for every memory.
property memory_variables: List[str]#
All the memory variables that this instance provides.
pydantic model langchain.memory.ConversationBufferMemory[source]#
Buffer for storing conversation memory.
field ai_prefix: str = 'AI'#
field human_prefix: str = 'Human'#
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]#
Return history buffer.
property buffer: Any#
String buffer of memory.
pydantic model langchain.memory.ConversationBufferWindowMemory[source]#
Buffer for storing conversation memory.
field ai_prefix: str = 'AI'#
field human_prefix: str = 'Human'#
field k: int = 5#
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]#
Return history buffer.
property buffer: List[langchain.schema.BaseMessage]#
String buffer of memory.
pydantic model langchain.memory.ConversationEntityMemory[source]#
Entity extractor & summarizer to memory.
field ai_prefix: str = 'AI'#
field chat_history_key: str = 'history'#
field entity_cache: List[str] = []# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-2 | field entity_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.\n\nThe conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.\n\nReturn the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.\nOutput: Langchain\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-3 | line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I\'m working with Person #2.\nOutput: Langchain, Person #2\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:', template_format='f-string', validate_template=True)# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-4 | field entity_store: langchain.memory.entity.BaseEntityStore [Optional]#
field entity_summarization_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['entity', 'summary', 'history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant helping a human keep track of facts about relevant people, places, and concepts in their life. Update the summary of the provided entity in the "Entity" section based on the last line of your conversation with the human. If you are writing the summary for the first time, return a single sentence.\nThe update should only include facts that are relayed in the last line of conversation about the provided entity, and should only contain facts about the provided entity.\n\nIf there is no new information about the provided entity or the information is not worth noting (not an important or relevant fact to remember long-term), return the existing summary unchanged.\n\nFull conversation history (for context):\n{history}\n\nEntity to summarize:\n{entity}\n\nExisting summary of {entity}:\n{summary}\n\nLast line of conversation:\nHuman: {input}\nUpdated summary:', template_format='f-string', validate_template=True)#
field human_prefix: str = 'Human'#
field k: int = 3#
field llm: langchain.base_language.BaseLanguageModel [Required]#
clear() → None[source]#
Clear memory contents.
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
Save context from this conversation to buffer.
property buffer: List[langchain.schema.BaseMessage]#
pydantic model langchain.memory.ConversationKGMemory[source]#
Knowledge graph memory for storing conversation memory. | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-5 | Knowledge graph memory for storing conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
field ai_prefix: str = 'AI'# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-6 | field entity_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.\n\nThe conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.\n\nReturn the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.\nOutput: Langchain\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-7 | line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I\'m working with Person #2.\nOutput: Langchain, Person #2\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:', template_format='f-string', validate_template=True)# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-8 | field human_prefix: str = 'Human'#
field k: int = 2#
field kg: langchain.graphs.networkx_graph.NetworkxEntityGraph [Optional]# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-9 | field knowledge_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template="You are a networked intelligence helping a human track knowledge triples about all relevant people, things, concepts, etc. and integrating them with your knowledge stored within your weights as well as that stored in a knowledge graph. Extract all of the knowledge triples from the last line of conversation. A knowledge triple is a clause that contains a subject, a predicate, and an object. The subject is the entity being described, the predicate is the property of the subject that is being described, and the object is the value of the property.\n\nEXAMPLE\nConversation history:\nPerson #1: Did you hear aliens landed in Area 51?\nAI: No, I didn't hear that. What do you know about Area 51?\nPerson #1: It's a secret military base in Nevada.\nAI: What do you know about Nevada?\nLast line of conversation:\nPerson #1: It's a state in the US. It's also the number 1 producer of gold in the US.\n\nOutput: (Nevada, is a, state)<|>(Nevada, is in, US)<|>(Nevada, is the number 1 producer of, gold)\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: Hello.\nAI: Hi! How are you?\nPerson #1: I'm good. How are you?\nAI: I'm good too.\nLast line of conversation:\nPerson #1: I'm going to the store.\n\nOutput: NONE\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: What do you know about Descartes?\nAI: Descartes was a French philosopher, mathematician, and scientist who lived in the 17th | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-10 | Descartes was a French philosopher, mathematician, and scientist who lived in the 17th century.\nPerson #1: The Descartes I'm referring to is a standup comedian and interior designer from Montreal.\nAI: Oh yes, He is a comedian and an interior designer. He has been in the industry for 30 years. His favorite food is baked bean pie.\nLast line of conversation:\nPerson #1: Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\nOutput: (Descartes, likes to drive, antique scooters)<|>(Descartes, plays, mandolin)\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:", template_format='f-string', validate_template=True)# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-11 | field llm: langchain.base_language.BaseLanguageModel [Required]#
field summary_message_cls: Type[langchain.schema.BaseMessage] = <class 'langchain.schema.SystemMessage'>#
Number of previous utterances to include in the context.
clear() → None[source]#
Clear memory contents.
get_current_entities(input_string: str) → List[str][source]#
get_knowledge_triplets(input_string: str) → List[langchain.graphs.networkx_graph.KnowledgeTriple][source]#
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
Save context from this conversation to buffer.
pydantic model langchain.memory.ConversationStringBufferMemory[source]#
Buffer for storing conversation memory.
field ai_prefix: str = 'AI'#
Prefix to use for AI generated responses.
field buffer: str = ''#
field human_prefix: str = 'Human'#
field input_key: Optional[str] = None#
field output_key: Optional[str] = None#
clear() → None[source]#
Clear memory contents.
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
Save context from this conversation to buffer.
property memory_variables: List[str]#
Will always return list of memory variables.
:meta private:
pydantic model langchain.memory.ConversationSummaryBufferMemory[source]#
Buffer with summarizer for storing conversation memory.
field max_token_limit: int = 2000#
field memory_key: str = 'history'#
field moving_summary_buffer: str = ''# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-12 | field memory_key: str = 'history'#
field moving_summary_buffer: str = ''#
clear() → None[source]#
Clear memory contents.
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]#
Return history buffer.
prune() → None[source]#
Prune buffer if it exceeds max token limit
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
Save context from this conversation to buffer.
property buffer: List[langchain.schema.BaseMessage]#
pydantic model langchain.memory.ConversationSummaryMemory[source]#
Conversation summarizer to memory.
field buffer: str = ''#
clear() → None[source]#
Clear memory contents.
classmethod from_messages(llm: langchain.base_language.BaseLanguageModel, chat_memory: langchain.schema.BaseChatMessageHistory, *, summarize_step: int = 2, **kwargs: Any) → langchain.memory.summary.ConversationSummaryMemory[source]#
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
Save context from this conversation to buffer.
pydantic model langchain.memory.ConversationTokenBufferMemory[source]#
Buffer for storing conversation memory.
field ai_prefix: str = 'AI'#
field human_prefix: str = 'Human'#
field llm: langchain.base_language.BaseLanguageModel [Required]#
field max_token_limit: int = 2000#
field memory_key: str = 'history'#
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-13 | Save context from this conversation to buffer. Pruned.
property buffer: List[langchain.schema.BaseMessage]#
String buffer of memory.
class langchain.memory.CosmosDBChatMessageHistory(cosmos_endpoint: str, cosmos_database: str, cosmos_container: str, session_id: str, user_id: str, credential: Any = None, connection_string: Optional[str] = None, ttl: Optional[int] = None, cosmos_client_kwargs: Optional[dict] = None)[source]#
Chat history backed by Azure CosmosDB.
add_message(message: langchain.schema.BaseMessage) → None[source]#
Add a self-created message to the store
clear() → None[source]#
Clear session memory from this memory and cosmos.
load_messages() → None[source]#
Retrieve the messages from Cosmos
prepare_cosmos() → None[source]#
Prepare the CosmosDB client.
Use this function or the context manager to make sure your database is ready.
upsert_messages() → None[source]#
Update the cosmosdb item.
class langchain.memory.DynamoDBChatMessageHistory(table_name: str, session_id: str, endpoint_url: Optional[str] = None)[source]#
Chat message history that stores history in AWS DynamoDB.
This class expects that a DynamoDB table with name table_name
and a partition Key of SessionId is present.
Parameters
table_name – name of the DynamoDB table
session_id – arbitrary key that is used to store the messages
of a single chat session.
endpoint_url – URL of the AWS endpoint to connect to. This argument
is optional and useful for test purposes, like using Localstack.
If you plan to use AWS cloud service, you normally don’t have to
worry about setting the endpoint_url.
add_message(message: langchain.schema.BaseMessage) → None[source]# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-14 | add_message(message: langchain.schema.BaseMessage) → None[source]#
Append the message to the record in DynamoDB
clear() → None[source]#
Clear session memory from DynamoDB
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from DynamoDB
class langchain.memory.FileChatMessageHistory(file_path: str)[source]#
Chat message history that stores history in a local file.
Parameters
file_path – path of the local file to store the messages.
add_message(message: langchain.schema.BaseMessage) → None[source]#
Append the message to the record in the local file
clear() → None[source]#
Clear session memory from the local file
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from the local file
pydantic model langchain.memory.InMemoryEntityStore[source]#
Basic in-memory entity store.
field store: Dict[str, Optional[str]] = {}#
clear() → None[source]#
Delete all entities from store.
delete(key: str) → None[source]#
Delete entity value from store.
exists(key: str) → bool[source]#
Check if entity exists in store.
get(key: str, default: Optional[str] = None) → Optional[str][source]#
Get entity value from store.
set(key: str, value: Optional[str]) → None[source]#
Set entity value in store.
class langchain.memory.MomentoChatMessageHistory(session_id: str, cache_client: momento.CacheClient, cache_name: str, *, key_prefix: str = 'message_store:', ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True)[source]#
Chat message history cache that uses Momento as a backend.
See https://gomomento.com/ | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-15 | See https://gomomento.com/
add_message(message: langchain.schema.BaseMessage) → None[source]#
Store a message in the cache.
Parameters
message (BaseMessage) – The message object to store.
Raises
SdkException – Momento service or network error.
Exception – Unexpected response.
clear() → None[source]#
Remove the session’s messages from the cache.
Raises
SdkException – Momento service or network error.
Exception – Unexpected response.
classmethod from_client_params(session_id: str, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any) → MomentoChatMessageHistory[source]#
Construct cache from CacheClient parameters.
property messages: list[langchain.schema.BaseMessage]#
Retrieve the messages from Momento.
Raises
SdkException – Momento service or network error
Exception – Unexpected response
Returns
List of cached messages
Return type
list[BaseMessage]
class langchain.memory.MongoDBChatMessageHistory(connection_string: str, session_id: str, database_name: str = 'chat_history', collection_name: str = 'message_store')[source]#
Chat message history that stores history in MongoDB.
Parameters
connection_string – connection string to connect to MongoDB
session_id – arbitrary key that is used to store the messages
of a single chat session.
database_name – name of the database to use
collection_name – name of the collection to use
add_message(message: langchain.schema.BaseMessage) → None[source]#
Append the message to the record in MongoDB
clear() → None[source]#
Clear session memory from MongoDB
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from MongoDB | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-16 | property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from MongoDB
class langchain.memory.PostgresChatMessageHistory(session_id: str, connection_string: str = 'postgresql://postgres:mypassword@localhost/chat_history', table_name: str = 'message_store')[source]#
add_message(message: langchain.schema.BaseMessage) → None[source]#
Append the message to the record in PostgreSQL
clear() → None[source]#
Clear session memory from PostgreSQL
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from PostgreSQL
pydantic model langchain.memory.ReadOnlySharedMemory[source]#
A memory wrapper that is read-only and cannot be changed.
field memory: langchain.schema.BaseMemory [Required]#
clear() → None[source]#
Nothing to clear, got a memory like a vault.
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]#
Load memory variables from memory.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
Nothing should be saved or changed
property memory_variables: List[str]#
Return memory variables.
class langchain.memory.RedisChatMessageHistory(session_id: str, url: str = 'redis://localhost:6379/0', key_prefix: str = 'message_store:', ttl: Optional[int] = None)[source]#
add_message(message: langchain.schema.BaseMessage) → None[source]#
Append the message to the record in Redis
clear() → None[source]#
Clear session memory from Redis
property key: str#
Construct the record key to use
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from Redis
pydantic model langchain.memory.RedisEntityStore[source]# | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-17 | Retrieve the messages from Redis
pydantic model langchain.memory.RedisEntityStore[source]#
Redis-backed Entity store. Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
field key_prefix: str = 'memory_store'#
field recall_ttl: Optional[int] = 259200#
field redis_client: Any = None#
field session_id: str = 'default'#
field ttl: Optional[int] = 86400#
clear() → None[source]#
Delete all entities from store.
delete(key: str) → None[source]#
Delete entity value from store.
exists(key: str) → bool[source]#
Check if entity exists in store.
get(key: str, default: Optional[str] = None) → Optional[str][source]#
Get entity value from store.
set(key: str, value: Optional[str]) → None[source]#
Set entity value in store.
property full_key_prefix: str#
pydantic model langchain.memory.SQLiteEntityStore[source]#
SQLite-backed Entity store
field session_id: str = 'default'#
field table_name: str = 'memory_store'#
clear() → None[source]#
Delete all entities from store.
delete(key: str) → None[source]#
Delete entity value from store.
exists(key: str) → bool[source]#
Check if entity exists in store.
get(key: str, default: Optional[str] = None) → Optional[str][source]#
Get entity value from store.
set(key: str, value: Optional[str]) → None[source]#
Set entity value in store.
property full_table_name: str#
pydantic model langchain.memory.SimpleMemory[source]#
Simple memory for storing context or other bits of information that shouldn’t | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-18 | Simple memory for storing context or other bits of information that shouldn’t
ever change between prompts.
field memories: Dict[str, Any] = {}#
clear() → None[source]#
Nothing to clear, got a memory like a vault.
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]#
Return key-value pairs given the text input to the chain.
If None, return all memories
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
Nothing should be saved or changed, my memory is set in stone.
property memory_variables: List[str]#
Input keys this memory class will load dynamically.
pydantic model langchain.memory.VectorStoreRetrieverMemory[source]#
Class for a VectorStore-backed memory object.
field input_key: Optional[str] = None#
Key name to index the inputs to load_memory_variables.
field memory_key: str = 'history'#
Key name to locate the memories in the result of load_memory_variables.
field retriever: langchain.vectorstores.base.VectorStoreRetriever [Required]#
VectorStoreRetriever object to connect to.
field return_docs: bool = False#
Whether or not to return the result of querying the database directly.
clear() → None[source]#
Nothing to clear.
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Union[List[langchain.schema.Document], str]][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
Save context from this conversation to buffer.
property memory_variables: List[str]#
The list of keys emitted from the load_memory_variables method.
previous
Document Transformers
next
Agents
By Harrison Chase | https://python.langchain.com/en/latest/reference/modules/memory.html |
290e16503ce6-19 | previous
Document Transformers
next
Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/reference/modules/memory.html |
15bd39437f78-0 | .rst
.pdf
Utilities
Utilities#
General utilities.
pydantic model langchain.utilities.ApifyWrapper[source]#
Wrapper around Apify.
To use, you should have the apify-client python package installed,
and the environment variable APIFY_API_TOKEN set with your API key, or pass
apify_api_token as a named parameter to the constructor.
field apify_client: Any = None#
field apify_client_async: Any = None#
async acall_actor(actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], langchain.schema.Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None) → langchain.document_loaders.apify_dataset.ApifyDatasetLoader[source]#
Run an Actor on the Apify platform and wait for results to be ready.
Parameters
actor_id (str) – The ID or name of the Actor on the Apify platform.
run_input (Dict) – The input object of the Actor that you’re trying to run.
dataset_mapping_function (Callable) – A function that takes a single
dictionary (an Apify dataset item) and converts it to
an instance of the Document class.
build (str, optional) – Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional) – Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional) – Optional timeout for the run, in seconds.
Returns
A loader that will fetch the records from theActor run’s default dataset.
Return type
ApifyDatasetLoader | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-1 | Return type
ApifyDatasetLoader
call_actor(actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], langchain.schema.Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None) → langchain.document_loaders.apify_dataset.ApifyDatasetLoader[source]#
Run an Actor on the Apify platform and wait for results to be ready.
Parameters
actor_id (str) – The ID or name of the Actor on the Apify platform.
run_input (Dict) – The input object of the Actor that you’re trying to run.
dataset_mapping_function (Callable) – A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional) – Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional) – Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional) – Optional timeout for the run, in seconds.
Returns
A loader that will fetch the records from theActor run’s default dataset.
Return type
ApifyDatasetLoader
pydantic model langchain.utilities.ArxivAPIWrapper[source]#
Wrapper around ArxivAPI.
To use, you should have the arxiv python package installed.
https://lukasschwab.me/arxiv.py/index.html
This wrapper will use the Arxiv API to conduct searches and
fetch document summaries. By default, it will return the document summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
Set doc_content_chars_max=None if you don’t want to limit the content size.
Parameters | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-2 | Set doc_content_chars_max=None if you don’t want to limit the content size.
Parameters
top_k_results – number of the top-scored document used for the arxiv tool
ARXIV_MAX_QUERY_LENGTH – the cut limit on the query used for the arxiv tool.
load_max_docs – a limit to the number of loaded documents
load_all_available_meta –
if True: the metadata of the loaded Documents gets all available meta info(see https://lukasschwab.me/arxiv.py/index.html#Result),
if False: the metadata gets only the most informative fields.
field arxiv_exceptions: Any = None#
field doc_content_chars_max: int = 4000#
field load_all_available_meta: bool = False#
field load_max_docs: int = 100#
field top_k_results: int = 3#
load(query: str) → List[langchain.schema.Document][source]#
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
run(query: str) → str[source]#
Run Arxiv search and get the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
See https://lukasschwab.me/arxiv.py/index.html#Result
It uses only the most informative fields of article meta information.
class langchain.utilities.BashProcess(strip_newlines: bool = False, return_err_output: bool = False, persistent: bool = False)[source]#
Executes bash commands and returns the output.
process_output(output: str, command: str) → str[source]#
run(commands: Union[str, List[str]]) → str[source]#
Run commands and return final output. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-3 | Run commands and return final output.
pydantic model langchain.utilities.BingSearchAPIWrapper[source]#
Wrapper for Bing Search API.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
field bing_search_url: str [Required]#
field bing_subscription_key: str [Required]#
field k: int = 10#
results(query: str, num_results: int) → List[Dict][source]#
Run query through BingSearch and return metadata.
Parameters
query – The query to search for.
num_results – The number of results to return.
Returns
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
Return type
A list of dictionaries with the following keys
run(query: str) → str[source]#
Run query through BingSearch and parse result.
pydantic model langchain.utilities.DuckDuckGoSearchAPIWrapper[source]#
Wrapper for DuckDuckGo Search API.
Free and does not require any setup
field k: int = 10#
field max_results: int = 5#
field region: Optional[str] = 'wt-wt'#
field safesearch: str = 'moderate'#
field time: Optional[str] = 'y'#
get_snippets(query: str) → List[str][source]#
Run query through DuckDuckGo and return concatenated results.
results(query: str, num_results: int) → List[Dict[str, str]][source]#
Run query through DuckDuckGo and return metadata.
Parameters
query – The query to search for.
num_results – The number of results to return.
Returns | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-4 | num_results – The number of results to return.
Returns
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
Return type
A list of dictionaries with the following keys
run(query: str) → str[source]#
pydantic model langchain.utilities.GooglePlacesAPIWrapper[source]#
Wrapper around Google Places API.
To use, you should have the googlemaps python package installed,an API key for the google maps platform,
and the enviroment variable ‘’GPLACES_API_KEY’’
set with your API key , or pass ‘gplaces_api_key’
as a named parameter to the constructor.
By default, this will return the all the results on the input query.You can use the top_k_results argument to limit the number of results.
Example
from langchain import GooglePlacesAPIWrapper
gplaceapi = GooglePlacesAPIWrapper()
field gplaces_api_key: Optional[str] = None#
field top_k_results: Optional[int] = None#
fetch_place_details(place_id: str) → Optional[str][source]#
format_place_details(place_details: Dict[str, Any]) → Optional[str][source]#
run(query: str) → str[source]#
Run Places search and get k number of places that exists that match.
pydantic model langchain.utilities.GoogleSearchAPIWrapper[source]#
Wrapper for Google Search API.
Adapted from: Instructions adapted from https://stackoverflow.com/questions/
37083058/
programmatically-searching-google-in-python-using-custom-search
TODO: DOCS for using it
1. Install google-api-python-client
- If you don’t already have a Google account, sign up.
- If you have never created a Google APIs Console project,
read the Managing Projects page and create a project in the Google API Console. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-5 | read the Managing Projects page and create a project in the Google API Console.
- Install the library using pip install google-api-python-client
The current version of the library is 2.70.0 at this time
2. To create an API key:
- Navigate to the APIs & Services→Credentials panel in Cloud Console.
- Select Create credentials, then select API key from the drop-down menu.
- The API key created dialog box displays your newly created key.
- You now have an API_KEY
3. Setup Custom Search Engine so you can search the entire web
- Create a custom search engine in this link.
- In Sites to search, add any valid URL (i.e. www.stackoverflow.com).
- That’s all you have to fill up, the rest doesn’t matter.
In the left-side menu, click Edit search engine → {your search engine name}
→ Setup Set Search the entire web to ON. Remove the URL you added from
the list of Sites to search.
- Under Search engine ID you’ll find the search-engine-ID.
4. Enable the Custom Search API
- Navigate to the APIs & Services→Dashboard panel in Cloud Console.
- Click Enable APIs and Services.
- Search for Custom Search API and click on it.
- Click Enable.
URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis
.com
field google_api_key: Optional[str] = None#
field google_cse_id: Optional[str] = None#
field k: int = 10#
field siterestrict: bool = False#
results(query: str, num_results: int) → List[Dict][source]#
Run query through GoogleSearch and return metadata.
Parameters
query – The query to search for.
num_results – The number of results to return.
Returns
snippet - The description of the result. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-6 | Returns
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
Return type
A list of dictionaries with the following keys
run(query: str) → str[source]#
Run query through GoogleSearch and parse result.
pydantic model langchain.utilities.GoogleSerperAPIWrapper[source]#
Wrapper around the Serper.dev Google Search API.
You can create a free API key at https://serper.dev.
To use, you should have the environment variable SERPER_API_KEY
set with your API key, or pass serper_api_key as a named parameter
to the constructor.
Example
from langchain import GoogleSerperAPIWrapper
google_serper = GoogleSerperAPIWrapper()
field aiosession: Optional[aiohttp.client.ClientSession] = None#
field gl: str = 'us'#
field hl: str = 'en'#
field k: int = 10#
field serper_api_key: Optional[str] = None#
field tbs: Optional[str] = None#
field type: Literal['news', 'search', 'places', 'images'] = 'search'#
async aresults(query: str, **kwargs: Any) → Dict[source]#
Run query through GoogleSearch.
async arun(query: str, **kwargs: Any) → str[source]#
Run query through GoogleSearch and parse result async.
results(query: str, **kwargs: Any) → Dict[source]#
Run query through GoogleSearch.
run(query: str, **kwargs: Any) → str[source]#
Run query through GoogleSearch and parse result.
pydantic model langchain.utilities.GraphQLAPIWrapper[source]#
Wrapper around GraphQL API.
To use, you should have the gql python package installed. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-7 | Wrapper around GraphQL API.
To use, you should have the gql python package installed.
This wrapper will use the GraphQL API to conduct queries.
field custom_headers: Optional[Dict[str, str]] = None#
field graphql_endpoint: str [Required]#
run(query: str) → str[source]#
Run a GraphQL query and get the results.
pydantic model langchain.utilities.LambdaWrapper[source]#
Wrapper for AWS Lambda SDK.
Docs for using:
pip install boto3
Create a lambda function using the AWS Console or CLI
Run aws configure and enter your AWS credentials
field awslambda_tool_description: Optional[str] = None#
field awslambda_tool_name: Optional[str] = None#
field function_name: Optional[str] = None#
run(query: str) → str[source]#
Invoke Lambda function and parse result.
pydantic model langchain.utilities.MetaphorSearchAPIWrapper[source]#
Wrapper for Metaphor Search API.
field k: int = 10#
field metaphor_api_key: str [Required]#
results(query: str, num_results: int) → List[Dict][source]#
Run query through Metaphor Search and return metadata.
Parameters
query – The query to search for.
num_results – The number of results to return.
Returns
title - The title of the
url - The url
author - Author of the content, if applicable. Otherwise, None.
date_created - Estimated date created,
in YYYY-MM-DD format. Otherwise, None.
Return type
A list of dictionaries with the following keys
async results_async(query: str, num_results: int) → List[Dict][source]#
Get results from the Metaphor Search API asynchronously.
pydantic model langchain.utilities.OpenWeatherMapAPIWrapper[source]# | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-8 | pydantic model langchain.utilities.OpenWeatherMapAPIWrapper[source]#
Wrapper for OpenWeatherMap API using PyOWM.
Docs for using:
Go to OpenWeatherMap and sign up for an API key
Save your API KEY into OPENWEATHERMAP_API_KEY env variable
pip install pyowm
field openweathermap_api_key: Optional[str] = None#
field owm: Any = None#
run(location: str) → str[source]#
Get the current weather information for a specified location.
pydantic model langchain.utilities.PowerBIDataset[source]#
Create PowerBI engine from dataset ID and credential or token.
Use either the credential or a supplied token to authenticate.
If both are supplied the credential is used to generate a token.
The impersonated_user_name is the UPN of a user to be impersonated.
If the model is not RLS enabled, this will be ignored.
Validators
fix_table_names » table_names
token_or_credential_present » all fields
field aiosession: Optional[aiohttp.ClientSession] = None#
field credential: Optional[TokenCredential] = None#
field dataset_id: str [Required]#
field group_id: Optional[str] = None#
field impersonated_user_name: Optional[str] = None#
field sample_rows_in_table_info: int = 1#
Constraints
exclusiveMinimum = 0
maximum = 10
field schemas: Dict[str, str] [Optional]#
field table_names: List[str] [Required]#
field token: Optional[str] = None#
async aget_table_info(table_names: Optional[Union[List[str], str]] = None) → str[source]#
Get information about specified tables.
async arun(command: str) → Any[source]#
Execute a DAX command and return the result asynchronously. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-9 | Execute a DAX command and return the result asynchronously.
get_schemas() → str[source]#
Get the available schema’s.
get_table_info(table_names: Optional[Union[List[str], str]] = None) → str[source]#
Get information about specified tables.
get_table_names() → Iterable[str][source]#
Get names of tables available.
run(command: str) → Any[source]#
Execute a DAX command and return a json representing the results.
property headers: Dict[str, str]#
Get the token.
property request_url: str#
Get the request url.
property table_info: str#
Information about all tables in the database.
pydantic model langchain.utilities.PubMedAPIWrapper[source]#
Wrapper around PubMed API.
This wrapper will use the PubMed API to conduct searches and fetch
document summaries. By default, it will return the document summaries
of the top-k results of an input search.
Parameters
top_k_results – number of the top-scored document used for the PubMed tool
load_max_docs – a limit to the number of loaded documents
load_all_available_meta –
if True: the metadata of the loaded Documents gets all available meta info(see https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch)
if False: the metadata gets only the most informative fields.
field doc_content_chars_max: int = 2000#
field email: str = '[email protected]'#
field load_all_available_meta: bool = False#
field load_max_docs: int = 25#
field top_k_results: int = 3#
load(query: str) → List[dict][source]#
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-10 | Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
load_docs(query: str) → List[langchain.schema.Document][source]#
retrieve_article(uid: str, webenv: str) → dict[source]#
run(query: str) → str[source]#
Run PubMed search and get the article meta information.
See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
It uses only the most informative fields of article meta information.
pydantic model langchain.utilities.PythonREPL[source]#
Simulates a standalone Python REPL.
field globals: Optional[Dict] [Optional] (alias '_globals')#
field locals: Optional[Dict] [Optional] (alias '_locals')#
run(command: str) → str[source]#
Run command with own globals/locals and returns anything printed.
pydantic model langchain.utilities.SearxSearchWrapper[source]#
Wrapper for Searx API.
To use you need to provide the searx host by passing the named parameter
searx_host or exporting the environment variable SEARX_HOST.
In some situations you might want to disable SSL verification, for example
if you are running searx locally. You can do this by passing the named parameter
unsecure. You can also pass the host url scheme as http to disable SSL.
Example
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://localhost:8888")
Example with SSL disabled:from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
Validators | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-11 | unsecure=True)
Validators
disable_ssl_warnings » unsecure
validate_params » all fields
field aiosession: Optional[Any] = None#
field categories: Optional[List[str]] = []#
field engines: Optional[List[str]] = []#
field headers: Optional[dict] = None#
field k: int = 10#
field params: dict [Optional]#
field query_suffix: Optional[str] = ''#
field searx_host: str = ''#
field unsecure: bool = False#
async aresults(query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]#
Asynchronously query with json results.
Uses aiohttp. See results for more info.
async arun(query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]#
Asynchronously version of run.
results(query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]#
Run query through Searx API and returns the results with metadata.
Parameters
query – The query to search for.
query_suffix – Extra suffix appended to the query.
num_results – Limit the number of results to return.
engines – List of engines to use for the query.
categories – List of categories to use for the query.
**kwargs – extra parameters to pass to the searx API.
Returns
{snippet: The description of the result.
title: The title of the result.
link: The link to the result. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-12 | title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
Return type
Dict with the following keys
run(query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]#
Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Parameters
query – The query to search for.
query_suffix – Extra suffix appended to the query.
engines – List of engines to use for the query.
categories – List of categories to use for the query.
**kwargs – extra parameters to pass to the searx API.
Returns
The result of the query.
Return type
str
Raises
ValueError – If an error occured with the query.
Example
This will make a query to the qwant engine:
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
pydantic model langchain.utilities.SerpAPIWrapper[source]#
Wrapper around SerpAPI.
To use, you should have the google-search-results python package installed,
and the environment variable SERPAPI_API_KEY set with your API key, or pass
serpapi_api_key as a named parameter to the constructor.
Example | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-13 | serpapi_api_key as a named parameter to the constructor.
Example
from langchain import SerpAPIWrapper
serpapi = SerpAPIWrapper()
field aiosession: Optional[aiohttp.client.ClientSession] = None#
field params: dict = {'engine': 'google', 'gl': 'us', 'google_domain': 'google.com', 'hl': 'en'}#
field serpapi_api_key: Optional[str] = None#
async aresults(query: str) → dict[source]#
Use aiohttp to run query through SerpAPI and return the results async.
async arun(query: str, **kwargs: Any) → str[source]#
Run query through SerpAPI and parse result async.
get_params(query: str) → Dict[str, str][source]#
Get parameters for SerpAPI.
results(query: str) → dict[source]#
Run query through SerpAPI and return the raw result.
run(query: str, **kwargs: Any) → str[source]#
Run query through SerpAPI and parse result.
class langchain.utilities.SparkSQL(spark_session: Optional[SparkSession] = None, catalog: Optional[str] = None, schema: Optional[str] = None, ignore_tables: Optional[List[str]] = None, include_tables: Optional[List[str]] = None, sample_rows_in_table_info: int = 3)[source]#
classmethod from_uri(database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any) → langchain.utilities.spark_sql.SparkSQL[source]#
Creating a remote Spark Session via Spark connect.
For example: SparkSQL.from_uri(“sc://localhost:15002”)
get_table_info(table_names: Optional[List[str]] = None) → str[source]# | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-14 | get_table_info(table_names: Optional[List[str]] = None) → str[source]#
get_table_info_no_throw(table_names: Optional[List[str]] = None) → str[source]#
Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If sample_rows_in_table_info, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
get_usable_table_names() → Iterable[str][source]#
Get names of tables available.
run(command: str, fetch: str = 'all') → str[source]#
run_no_throw(command: str, fetch: str = 'all') → str[source]#
Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
pydantic model langchain.utilities.TextRequestsWrapper[source]#
Lightweight wrapper around requests library.
The main purpose of this wrapper is to always return a text output.
field aiosession: Optional[aiohttp.client.ClientSession] = None#
field headers: Optional[Dict[str, str]] = None#
async adelete(url: str, **kwargs: Any) → str[source]#
DELETE the URL and return the text asynchronously.
async aget(url: str, **kwargs: Any) → str[source]#
GET the URL and return the text asynchronously.
async apatch(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
PATCH the URL and return the text asynchronously. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-15 | PATCH the URL and return the text asynchronously.
async apost(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
POST to the URL and return the text asynchronously.
async aput(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
PUT the URL and return the text asynchronously.
delete(url: str, **kwargs: Any) → str[source]#
DELETE the URL and return the text.
get(url: str, **kwargs: Any) → str[source]#
GET the URL and return the text.
patch(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
PATCH the URL and return the text.
post(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
POST to the URL and return the text.
put(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
PUT the URL and return the text.
property requests: langchain.requests.Requests#
pydantic model langchain.utilities.TwilioAPIWrapper[source]#
Sms Client using Twilio.
To use, you should have the twilio python package installed,
and the environment variables TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, and
TWILIO_FROM_NUMBER, or pass account_sid, auth_token, and from_number as
named parameters to the constructor.
Example
from langchain.utilities.twilio import TwilioAPIWrapper
twilio = TwilioAPIWrapper(
account_sid="ACxxx",
auth_token="xxx",
from_number="+10123456789"
)
twilio.run('test', '+12484345508')
field account_sid: Optional[str] = None#
Twilio account string identifier. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-16 | field account_sid: Optional[str] = None#
Twilio account string identifier.
field auth_token: Optional[str] = None#
Twilio auth token.
field from_number: Optional[str] = None#
A Twilio phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164)
format, an
[alphanumeric sender ID](https://www.twilio.com/docs/sms/send-messages#use-an-alphanumeric-sender-id),
or a [Channel Endpoint address](https://www.twilio.com/docs/sms/channels#channel-addresses)
that is enabled for the type of message you want to send. Phone numbers or
[short codes](https://www.twilio.com/docs/sms/api/short-code) purchased from
Twilio also work here. You cannot, for example, spoof messages from a private
cell phone number. If you are using messaging_service_sid, this parameter
must be empty.
run(body: str, to: str) → str[source]#
Run body through Twilio and respond with message sid.
Parameters
body – The text of the message you want to send. Can be up to 1,600
characters in length.
to – The destination phone number in
[E.164](https://www.twilio.com/docs/glossary/what-e164) format for
SMS/MMS or
[Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses)
for other 3rd-party channels.
pydantic model langchain.utilities.WikipediaAPIWrapper[source]#
Wrapper around WikipediaAPI.
To use, you should have the wikipedia python package installed.
This wrapper will use the Wikipedia API to conduct searches and
fetch page summaries. By default, it will return the page summaries
of the top-k results. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
15bd39437f78-17 | of the top-k results.
It limits the Document content by doc_content_chars_max.
field doc_content_chars_max: int = 4000#
field lang: str = 'en'#
field load_all_available_meta: bool = False#
field top_k_results: int = 3#
load(query: str) → List[langchain.schema.Document][source]#
Run Wikipedia search and get the article text plus the meta information.
See
Returns: a list of documents.
run(query: str) → str[source]#
Run Wikipedia search and get page summaries.
pydantic model langchain.utilities.WolframAlphaAPIWrapper[source]#
Wrapper for Wolfram Alpha.
Docs for using:
Go to wolfram alpha and sign up for a developer account
Create an app and get your APP ID
Save your APP ID into WOLFRAM_ALPHA_APPID env variable
pip install wolframalpha
field wolfram_alpha_appid: Optional[str] = None#
run(query: str) → str[source]#
Run query through WolframAlpha and parse result.
previous
Agent Toolkits
next
Experimental Modules
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
6c4c047a51a9-0 | .rst
.pdf
SearxNG Search
Contents
Quick Start
Searching
Engine Parameters
Search Tips
SearxNG Search#
Utility for using SearxNG meta search API.
SearxNG is a privacy-friendly free metasearch engine that aggregates results from
multiple search engines and databases and
supports the OpenSearch
specification.
More details on the installation instructions here.
For the search API refer to https://docs.searxng.org/dev/search_api.html
Quick Start#
In order to use this utility you need to provide the searx host. This can be done
by passing the named parameter searx_host
or exporting the environment variable SEARX_HOST.
Note: this is the only required parameter.
Then create a searx search instance like this:
from langchain.utilities import SearxSearchWrapper
# when the host starts with `http` SSL is disabled and the connection
# is assumed to be on a private network
searx_host='http://self.hosted'
search = SearxSearchWrapper(searx_host=searx_host)
You can now use the search instance to query the searx API.
Searching#
Use the run() and
results() methods to query the searx API.
Other methods are available for convenience.
SearxResults is a convenience wrapper around the raw json result.
Example usage of the run method to make a search:
s.run(query="what is the best search engine?")
Engine Parameters#
You can pass any accepted searx search API parameters to the
SearxSearchWrapper instance.
In the following example we are using the
engines and the language parameters:
# assuming the searx host is set as above or exported as an env variable | https://python.langchain.com/en/latest/reference/modules/searx_search.html |
6c4c047a51a9-1 | # assuming the searx host is set as above or exported as an env variable
s = SearxSearchWrapper(engines=['google', 'bing'],
language='es')
Search Tips#
Searx offers a special
search syntax
that can also be used instead of passing engine parameters.
For example the following query:
s = SearxSearchWrapper("langchain library", engines=['github'])
# can also be written as:
s = SearxSearchWrapper("langchain library !github")
# or even:
s = SearxSearchWrapper("langchain library !gh")
In some situations you might want to pass an extra string to the search query.
For example when the run() method is called by an agent. The search suffix can
also be used as a way to pass extra parameters to searx or the underlying search
engines.
# select the github engine and pass the search suffix
s = SearchWrapper("langchain library", query_suffix="!gh")
s = SearchWrapper("langchain library")
# select github the conventional google search syntax
s.run("large language models", query_suffix="site:github.com")
NOTE: A search suffix can be defined on both the instance and the method level.
The resulting query will be the concatenation of the two with the former taking
precedence.
See SearxNG Configured Engines and
SearxNG Search Syntax
for more details.
Notes
This wrapper is based on the SearxNG fork searxng/searxng which is
better maintained than the original Searx project and offers more features.
Public searxNG instances often use a rate limiter for API usage, so you might want to
use a self hosted instance and disable the rate limiter. | https://python.langchain.com/en/latest/reference/modules/searx_search.html |
6c4c047a51a9-2 | use a self hosted instance and disable the rate limiter.
If you are self-hosting an instance you can customize the rate limiter for your
own network as described here.
For a list of public SearxNG instances see https://searx.space/
class langchain.utilities.searx_search.SearxResults(data: str)[source]#
Dict like wrapper around search api results.
property answers: Any#
Helper accessor on the json result.
pydantic model langchain.utilities.searx_search.SearxSearchWrapper[source]#
Wrapper for Searx API.
To use you need to provide the searx host by passing the named parameter
searx_host or exporting the environment variable SEARX_HOST.
In some situations you might want to disable SSL verification, for example
if you are running searx locally. You can do this by passing the named parameter
unsecure. You can also pass the host url scheme as http to disable SSL.
Example
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://localhost:8888")
Example with SSL disabled:from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
Validators
disable_ssl_warnings » unsecure
validate_params » all fields
field aiosession: Optional[Any] = None#
field categories: Optional[List[str]] = []#
field engines: Optional[List[str]] = []#
field headers: Optional[dict] = None#
field k: int = 10#
field params: dict [Optional]#
field query_suffix: Optional[str] = ''# | https://python.langchain.com/en/latest/reference/modules/searx_search.html |
6c4c047a51a9-3 | field params: dict [Optional]#
field query_suffix: Optional[str] = ''#
field searx_host: str = ''#
field unsecure: bool = False#
async aresults(query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]#
Asynchronously query with json results.
Uses aiohttp. See results for more info.
async arun(query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]#
Asynchronously version of run.
results(query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]#
Run query through Searx API and returns the results with metadata.
Parameters
query – The query to search for.
query_suffix – Extra suffix appended to the query.
num_results – Limit the number of results to return.
engines – List of engines to use for the query.
categories – List of categories to use for the query.
**kwargs – extra parameters to pass to the searx API.
Returns
{snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
Return type
Dict with the following keys
run(query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]# | https://python.langchain.com/en/latest/reference/modules/searx_search.html |
6c4c047a51a9-4 | Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Parameters
query – The query to search for.
query_suffix – Extra suffix appended to the query.
engines – List of engines to use for the query.
categories – List of categories to use for the query.
**kwargs – extra parameters to pass to the searx API.
Returns
The result of the query.
Return type
str
Raises
ValueError – If an error occured with the query.
Example
This will make a query to the qwant engine:
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
Contents
Quick Start
Searching
Engine Parameters
Search Tips
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/reference/modules/searx_search.html |
7487ca26a34d-0 | .rst
.pdf
Embeddings
Embeddings#
Wrappers around embedding modules.
pydantic model langchain.embeddings.AlephAlphaAsymmetricSemanticEmbedding[source]#
Wrapper for Aleph Alpha’s Asymmetric Embeddings
AA provides you with an endpoint to embed a document and a query.
The models were optimized to make the embeddings of documents and
the query for a document as similar as possible.
To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/
Example
from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding
embeddings = AlephAlphaSymmetricSemanticEmbedding()
document = "This is a content of the document"
query = "What is the content of the document?"
doc_result = embeddings.embed_documents([document])
query_result = embeddings.embed_query(query)
field aleph_alpha_api_key: Optional[str] = None#
API key for Aleph Alpha API.
field compress_to_size: Optional[int] = 128#
Should the returned embeddings come back as an original 5120-dim vector,
or should it be compressed to 128-dim.
field contextual_control_threshold: Optional[int] = None#
Attention control parameters only apply to those tokens that have
explicitly been set in the request.
field control_log_additive: Optional[bool] = True#
Apply controls on prompt items by adding the log(control_factor)
to attention scores.
field hosting: Optional[str] = 'https://api.aleph-alpha.com'#
Optional parameter that specifies which datacenters may process the request.
field model: Optional[str] = 'luminous-base'#
Model name to use.
field normalize: Optional[bool] = True#
Should returned embeddings be normalized
embed_documents(texts: List[str]) → List[List[float]][source]# | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-1 | embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to Aleph Alpha’s asymmetric Document endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to Aleph Alpha’s asymmetric, query embedding endpoint
:param text: The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.AlephAlphaSymmetricSemanticEmbedding[source]#
The symmetric version of the Aleph Alpha’s semantic embeddings.
The main difference is that here, both the documents and
queries are embedded with a SemanticRepresentation.Symmetric
.. rubric:: Example
from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding
embeddings = AlephAlphaAsymmetricSemanticEmbedding()
text = "This is a test text"
doc_result = embeddings.embed_documents([text])
query_result = embeddings.embed_query(text)
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to Aleph Alpha’s Document endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to Aleph Alpha’s asymmetric, query embedding endpoint
:param text: The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.BedrockEmbeddings[source]#
Embeddings provider to invoke Bedrock embedding models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-2 | If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
field credentials_profile_name: Optional[str] = None#
The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
field model_id: str = 'amazon.titan-e1t-medium'#
Id of the model to call, e.g., amazon.titan-e1t-medium, this is
equivalent to the modelId property in the list-foundation-models api
field model_kwargs: Optional[Dict] = None#
Key word arguments to pass to the model.
field region_name: Optional[str] = None#
The aws region e.g., us-west-2. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
embed_documents(texts: List[str], chunk_size: int = 1) → List[List[float]][source]#
Compute doc embeddings using a Bedrock model.
Parameters
texts – The list of texts to embed.
chunk_size – Bedrock currently only allows single string
inputs, so chunk size is always 1. This input is here
only for compatibility with the embeddings interface.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a Bedrock model.
Parameters
text – The text to embed.
Returns
Embeddings for the text. | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-3 | Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.CohereEmbeddings[source]#
Wrapper around Cohere embedding models.
To use, you should have the cohere python package installed, and the
environment variable COHERE_API_KEY set with your API key or pass it
as a named parameter to the constructor.
Example
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v2.0", cohere_api_key="my-api-key"
)
field model: str = 'embed-english-v2.0'#
Model name to use.
field truncate: Optional[str] = None#
Truncate embeddings that are too long from start or end (“NONE”|”START”|”END”)
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to Cohere’s embedding endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to Cohere’s embedding endpoint.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.DeepInfraEmbeddings[source]#
Wrapper around Deep Infra’s embedding inference service.
To use, you should have the
environment variable DEEPINFRA_API_TOKEN set with your API token, or pass
it as a named parameter to the constructor.
There are multiple embeddings models available,
see https://deepinfra.com/models?type=embeddings.
Example
from langchain.embeddings import DeepInfraEmbeddings
deepinfra_emb = DeepInfraEmbeddings( | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-4 | deepinfra_emb = DeepInfraEmbeddings(
model_id="sentence-transformers/clip-ViT-B-32",
deepinfra_api_token="my-api-key"
)
r1 = deepinfra_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
r2 = deepinfra_emb.embed_query(
"What is the second letter of Greek alphabet"
)
field embed_instruction: str = 'passage: '#
Instruction used to embed documents.
field model_id: str = 'sentence-transformers/clip-ViT-B-32'#
Embeddings model to use.
field model_kwargs: Optional[dict] = None#
Other model keyword args
field normalize: bool = False#
whether to normalize the computed embeddings
field query_instruction: str = 'query: '#
Instruction used to embed the query.
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed documents using a Deep Infra deployed embedding model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Embed a query using a Deep Infra deployed embedding model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
class langchain.embeddings.ElasticsearchEmbeddings(client: MlClient, model_id: str, *, input_field: str = 'text_field')[source]#
Wrapper around Elasticsearch embedding models.
This class provides an interface to generate embeddings using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed. | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-5 | In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
embed_documents(texts: List[str]) → List[List[float]][source]#
Generate embeddings for a list of documents.
Parameters
texts (List[str]) – A list of document text strings to generate embeddings
for.
Returns
A list of embeddings, one for each document in the inputlist.
Return type
List[List[float]]
embed_query(text: str) → List[float][source]#
Generate an embedding for a single query text.
Parameters
text (str) – The query text to generate an embedding for.
Returns
The embedding for the input query text.
Return type
List[float]
classmethod from_credentials(model_id: str, *, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = 'text_field') → langchain.embeddings.elasticsearch.ElasticsearchEmbeddings[source]#
Instantiate embeddings from Elasticsearch credentials.
Parameters
model_id (str) – The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str) – The name of the key for the input text field in the
document. Defaults to ‘text_field’.
es_cloud_id – (str, optional): The Elasticsearch cloud ID to connect to.
es_user – (str, optional): Elasticsearch username.
es_password – (str, optional): Elasticsearch password.
Example
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field' | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-6 | # Optional, only if different from 'text_field'
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically
# pulled in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
classmethod from_es_connection(model_id: str, es_connection: Elasticsearch, input_field: str = 'text_field') → ElasticsearchEmbeddings[source]#
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to ‘text_field’.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example
from elasticsearch import Elasticsearch
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-7 | input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
pydantic model langchain.embeddings.FakeEmbeddings[source]#
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed search docs.
embed_query(text: str) → List[float][source]#
Embed query text.
pydantic model langchain.embeddings.HuggingFaceEmbeddings[source]#
Wrapper around sentence_transformers embedding models.
To use, you should have the sentence_transformers python package installed.
Example
from langchain.embeddings import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
hf = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
field cache_folder: Optional[str] = None#
Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.
field encode_kwargs: Dict[str, Any] [Optional]#
Key word arguments to pass when calling the encode method of the model.
field model_kwargs: Dict[str, Any] [Optional]#
Key word arguments to pass to the model.
field model_name: str = 'sentence-transformers/all-mpnet-base-v2'# | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-8 | field model_name: str = 'sentence-transformers/all-mpnet-base-v2'#
Model name to use.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace transformer model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace transformer model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.HuggingFaceHubEmbeddings[source]#
Wrapper around HuggingFaceHub embedding models.
To use, you should have the huggingface_hub python package installed, and the
environment variable HUGGINGFACEHUB_API_TOKEN set with your API token, or pass
it as a named parameter to the constructor.
Example
from langchain.embeddings import HuggingFaceHubEmbeddings
repo_id = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceHubEmbeddings(
repo_id=repo_id,
task="feature-extraction",
huggingfacehub_api_token="my-api-key",
)
field model_kwargs: Optional[dict] = None#
Key word arguments to pass to the model.
field repo_id: str = 'sentence-transformers/all-mpnet-base-v2'#
Model name to use.
field task: Optional[str] = 'feature-extraction'#
Task to call the model with.
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to HuggingFaceHub’s embedding endpoint for embedding search docs.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text. | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-9 | Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to HuggingFaceHub’s embedding endpoint for embedding query text.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.HuggingFaceInstructEmbeddings[source]#
Wrapper around sentence_transformers embedding models.
To use, you should have the sentence_transformers
and InstructorEmbedding python packages installed.
Example
from langchain.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-large"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
hf = HuggingFaceInstructEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
field cache_folder: Optional[str] = None#
Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.
field embed_instruction: str = 'Represent the document for retrieval: '#
Instruction to use for embedding documents.
field encode_kwargs: Dict[str, Any] [Optional]#
Key word arguments to pass when calling the encode method of the model.
field model_kwargs: Dict[str, Any] [Optional]#
Key word arguments to pass to the model.
field model_name: str = 'hkunlp/instructor-large'#
Model name to use.
field query_instruction: str = 'Represent the question for retrieving supporting documents: '#
Instruction to use for embedding query.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace instruct model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text. | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-10 | Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace instruct model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.LlamaCppEmbeddings[source]#
Wrapper around llama.cpp embedding models.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: abetlen/llama-cpp-python
Example
from langchain.embeddings import LlamaCppEmbeddings
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
field f16_kv: bool = False#
Use half-precision for key/value cache.
field logits_all: bool = False#
Return logits for all tokens, not just the last token.
field n_batch: Optional[int] = 8#
Number of tokens to process in parallel.
Should be a number between 1 and n_ctx.
field n_ctx: int = 512#
Token context window.
field n_gpu_layers: Optional[int] = None#
Number of layers to be loaded into gpu memory. Default None.
field n_parts: int = -1#
Number of parts to split the model into.
If -1, the number of parts is automatically determined.
field n_threads: Optional[int] = None#
Number of threads to use. If None, the number
of threads is automatically determined.
field seed: int = -1#
Seed. If -1, a random seed is used.
field use_mlock: bool = False#
Force system to keep model in RAM.
field vocab_only: bool = False#
Only load the vocabulary, no weights. | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
7487ca26a34d-11 | field vocab_only: bool = False#
Only load the vocabulary, no weights.
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed a list of documents using the Llama model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Embed a query using the Llama model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.MiniMaxEmbeddings[source]#
Wrapper around MiniMax’s embedding inference service.
To use, you should have the environment variable MINIMAX_GROUP_ID and
MINIMAX_API_KEY set with your API token, or pass it as a named parameter to
the constructor.
Example
from langchain.embeddings import MiniMaxEmbeddings
embeddings = MiniMaxEmbeddings()
query_text = "This is a test query."
query_result = embeddings.embed_query(query_text)
document_text = "This is a test document."
document_result = embeddings.embed_documents([document_text])
field embed_type_db: str = 'db'#
For embed_documents
field embed_type_query: str = 'query'#
For embed_query
field endpoint_url: str = 'https://api.minimax.chat/v1/embeddings'#
Endpoint URL to use.
field minimax_api_key: Optional[str] = None#
API Key for MiniMax API.
field minimax_group_id: Optional[str] = None#
Group ID for MiniMax API.
field model: str = 'embo-01'#
Embeddings model name to use.
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed documents using a MiniMax embedding endpoint.
Parameters | https://python.langchain.com/en/latest/reference/modules/embeddings.html |
Subsets and Splits