id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 49
117
|
---|---|---|
b1b004957e72-0 | Source code for langchain.memory.chat_message_histories.momento
from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
from langchain.utils import get_from_env
if TYPE_CHECKING:
import momento
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
[docs]class MomentoChatMessageHistory(BaseChatMessageHistory):
"""Chat message history cache that uses Momento as a backend.
See https://gomomento.com/"""
def __init__(
self,
session_id: str,
cache_client: momento.CacheClient,
cache_name: str,
*,
key_prefix: str = "message_store:",
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory, | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
b1b004957e72-1 | Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
"""
try:
from momento import CacheClient
from momento.requests import CollectionTtl
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.key = key_prefix + session_id
self.cache_client = cache_client
self.cache_name = cache_name
if ttl is not None:
self.ttl = CollectionTtl.of(ttl)
else:
self.ttl = CollectionTtl.from_cache_ttl()
[docs] @classmethod
def from_client_params(
cls,
session_id: str, | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
b1b004957e72-2 | def from_client_params(
cls,
session_id: str,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> MomentoChatMessageHistory:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
credentials = CredentialProvider.from_string(auth_token)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(session_id, cache_client, cache_name, ttl=ttl, **kwargs)
@property
def messages(self) -> list[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from Momento.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
Returns:
list[BaseMessage]: List of cached messages
"""
from momento.responses import CacheListFetch
fetch_response = self.cache_client.list_fetch(self.cache_name, self.key)
if isinstance(fetch_response, CacheListFetch.Hit):
items = [json.loads(m) for m in fetch_response.value_list_string]
return messages_from_dict(items)
elif isinstance(fetch_response, CacheListFetch.Miss):
return []
elif isinstance(fetch_response, CacheListFetch.Error): | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
b1b004957e72-3 | return []
elif isinstance(fetch_response, CacheListFetch.Error):
raise fetch_response.inner_exception
else:
raise Exception(f"Unexpected response: {fetch_response}")
[docs] def add_message(self, message: BaseMessage) -> None:
"""Store a message in the cache.
Args:
message (BaseMessage): The message object to store.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheListPushBack
item = json.dumps(_message_to_dict(message))
push_response = self.cache_client.list_push_back(
self.cache_name, self.key, item, ttl=self.ttl
)
if isinstance(push_response, CacheListPushBack.Success):
return None
elif isinstance(push_response, CacheListPushBack.Error):
raise push_response.inner_exception
else:
raise Exception(f"Unexpected response: {push_response}")
[docs] def clear(self) -> None:
"""Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheDelete
delete_response = self.cache_client.delete(self.cache_name, self.key)
if isinstance(delete_response, CacheDelete.Success):
return None
elif isinstance(delete_response, CacheDelete.Error):
raise delete_response.inner_exception
else:
raise Exception(f"Unexpected response: {delete_response}")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
036e63e13d64-0 | Source code for langchain.memory.chat_message_histories.cosmos_db
"""Azure CosmosDB Memory History."""
from __future__ import annotations
import logging
from types import TracebackType
from typing import TYPE_CHECKING, Any, List, Optional, Type
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from azure.cosmos import ContainerProxy
[docs]class CosmosDBChatMessageHistory(BaseChatMessageHistory):
"""Chat history backed by Azure CosmosDB."""
def __init__(
self,
cosmos_endpoint: str,
cosmos_database: str,
cosmos_container: str,
session_id: str,
user_id: str,
credential: Any = None,
connection_string: Optional[str] = None,
ttl: Optional[int] = None,
cosmos_client_kwargs: Optional[dict] = None,
):
"""
Initializes a new instance of the CosmosDBChatMessageHistory class.
Make sure to call prepare_cosmos or use the context manager to make
sure your database is ready.
Either a credential or a connection string must be provided.
:param cosmos_endpoint: The connection endpoint for the Azure Cosmos DB account.
:param cosmos_database: The name of the database to use.
:param cosmos_container: The name of the container to use.
:param session_id: The session ID to use, can be overwritten while loading.
:param user_id: The user ID to use, can be overwritten while loading.
:param credential: The credential to use to authenticate to Azure Cosmos DB.
:param connection_string: The connection string to use to authenticate. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html |
036e63e13d64-1 | :param connection_string: The connection string to use to authenticate.
:param ttl: The time to live (in seconds) to use for documents in the container.
:param cosmos_client_kwargs: Additional kwargs to pass to the CosmosClient.
"""
self.cosmos_endpoint = cosmos_endpoint
self.cosmos_database = cosmos_database
self.cosmos_container = cosmos_container
self.credential = credential
self.conn_string = connection_string
self.session_id = session_id
self.user_id = user_id
self.ttl = ttl
self.messages: List[BaseMessage] = []
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosClient,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
) from exc
if self.credential:
self._client = CosmosClient(
url=self.cosmos_endpoint,
credential=self.credential,
**cosmos_client_kwargs or {},
)
elif self.conn_string:
self._client = CosmosClient.from_connection_string(
conn_str=self.conn_string,
**cosmos_client_kwargs or {},
)
else:
raise ValueError("Either a connection string or a credential must be set.")
self._container: Optional[ContainerProxy] = None
[docs] def prepare_cosmos(self) -> None:
"""Prepare the CosmosDB client.
Use this function or the context manager to make sure your database is ready.
"""
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501 | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html |
036e63e13d64-2 | PartitionKey,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
) from exc
database = self._client.create_database_if_not_exists(self.cosmos_database)
self._container = database.create_container_if_not_exists(
self.cosmos_container,
partition_key=PartitionKey("/user_id"),
default_ttl=self.ttl,
)
self.load_messages()
def __enter__(self) -> "CosmosDBChatMessageHistory":
"""Context manager entry point."""
self._client.__enter__()
self.prepare_cosmos()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Context manager exit"""
self.upsert_messages()
self._client.__exit__(exc_type, exc_val, traceback)
[docs] def load_messages(self) -> None:
"""Retrieve the messages from Cosmos"""
if not self._container:
raise ValueError("Container not initialized")
try:
from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosHttpResponseError,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
) from exc
try:
item = self._container.read_item(
item=self.session_id, partition_key=self.user_id
)
except CosmosHttpResponseError: | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html |
036e63e13d64-3 | )
except CosmosHttpResponseError:
logger.info("no session found")
return
if "messages" in item and len(item["messages"]) > 0:
self.messages = messages_from_dict(item["messages"])
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
self.upsert_messages()
[docs] def upsert_messages(self) -> None:
"""Update the cosmosdb item."""
if not self._container:
raise ValueError("Container not initialized")
self._container.upsert_item(
body={
"id": self.session_id,
"user_id": self.user_id,
"messages": messages_to_dict(self.messages),
}
)
[docs] def clear(self) -> None:
"""Clear session memory from this memory and cosmos."""
self.messages = []
if self._container:
self._container.delete_item(
item=self.session_id, partition_key=self.user_id
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html |
73f930225d82-0 | Source code for langchain.memory.chat_message_histories.in_memory
from typing import List
from pydantic import BaseModel
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
)
[docs]class ChatMessageHistory(BaseChatMessageHistory, BaseModel):
messages: List[BaseMessage] = []
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
[docs] def clear(self) -> None:
self.messages = []
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/in_memory.html |
9642bda89cc0-0 | Source code for langchain.memory.chat_message_histories.dynamodb
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
[docs]class DynamoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in AWS DynamoDB.
This class expects that a DynamoDB table with name `table_name`
and a partition Key of `SessionId` is present.
Args:
table_name: name of the DynamoDB table
session_id: arbitrary key that is used to store the messages
of a single chat session.
"""
def __init__(self, table_name: str, session_id: str):
import boto3
client = boto3.resource("dynamodb")
self.table = client.Table(table_name)
self.session_id = session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from DynamoDB"""
from botocore.exceptions import ClientError
try:
response = self.table.get_item(Key={"SessionId": self.session_id})
except ClientError as error:
if error.response["Error"]["Code"] == "ResourceNotFoundException":
logger.warning("No record found with session id: %s", self.session_id)
else:
logger.error(error)
if response and "Item" in response:
items = response["Item"]["History"]
else:
items = []
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None: | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/dynamodb.html |
9642bda89cc0-1 | [docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in DynamoDB"""
from botocore.exceptions import ClientError
messages = messages_to_dict(self.messages)
_message = _message_to_dict(message)
messages.append(_message)
try:
self.table.put_item(
Item={"SessionId": self.session_id, "History": messages}
)
except ClientError as err:
logger.error(err)
[docs] def clear(self) -> None:
"""Clear session memory from DynamoDB"""
from botocore.exceptions import ClientError
try:
self.table.delete_item(Key={"SessionId": self.session_id})
except ClientError as err:
logger.error(err)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/dynamodb.html |
a7c01c5c092c-0 | Source code for langchain.memory.chat_message_histories.postgres
import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_STRING = "postgresql://postgres:mypassword@localhost/chat_history"
[docs]class PostgresChatMessageHistory(BaseChatMessageHistory):
def __init__(
self,
session_id: str,
connection_string: str = DEFAULT_CONNECTION_STRING,
table_name: str = "message_store",
):
import psycopg
from psycopg.rows import dict_row
try:
self.connection = psycopg.connect(connection_string)
self.cursor = self.connection.cursor(row_factory=dict_row)
except psycopg.OperationalError as error:
logger.error(error)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} (
id SERIAL PRIMARY KEY,
session_id TEXT NOT NULL,
message JSONB NOT NULL
);"""
self.cursor.execute(create_table_query)
self.connection.commit()
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from PostgreSQL"""
query = f"SELECT message FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
items = [record["message"] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/postgres.html |
a7c01c5c092c-1 | messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format(
sql.Identifier(self.table_name)
)
self.cursor.execute(
query, (self.session_id, json.dumps(_message_to_dict(message)))
)
self.connection.commit()
[docs] def clear(self) -> None:
"""Clear session memory from PostgreSQL"""
query = f"DELETE FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
self.connection.commit()
def __del__(self) -> None:
if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/postgres.html |
e03d6ed546dc-0 | .md
.pdf
Wikipedia
Contents
Installation and Setup
Document Loader
Retriever
Wikipedia#
Wikipedia is a multilingual free online encyclopedia written and maintained by a community of volunteers, known as Wikipedians, through open collaboration and using a wiki-based editing system called MediaWiki. Wikipedia is the largest and most-read reference work in history.
Installation and Setup#
pip install wikipedia
Document Loader#
See a usage example.
from langchain.document_loaders import WikipediaLoader
Retriever#
See a usage example.
from langchain.retrievers import WikipediaRetriever
previous
WhyLabs
next
Wolfram Alpha
Contents
Installation and Setup
Document Loader
Retriever
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/wikipedia.html |
b9bee2f54197-0 | .md
.pdf
Azure Blob Storage
Contents
Installation and Setup
Document Loader
Azure Blob Storage#
Azure Blob Storage is Microsoft’s object storage solution for the cloud. Blob Storage is optimized for storing massive amounts of unstructured data. Unstructured data is data that doesn’t adhere to a particular data model or definition, such as text or binary data.
Azure Files offers fully managed
file shares in the cloud that are accessible via the industry standard Server Message Block (SMB) protocol,
Network File System (NFS) protocol, and Azure Files REST API. Azure Files are based on the Azure Blob Storage.
Azure Blob Storage is designed for:
Serving images or documents directly to a browser.
Storing files for distributed access.
Streaming video and audio.
Writing to log files.
Storing data for backup and restore, disaster recovery, and archiving.
Storing data for analysis by an on-premises or Azure-hosted service.
Installation and Setup#
pip install azure-storage-blob
Document Loader#
See a usage example for the Azure Blob Storage.
from langchain.document_loaders import AzureBlobStorageContainerLoader
See a usage example for the Azure Files.
from langchain.document_loaders import AzureBlobStorageFileLoader
previous
AZLyrics
next
Azure Cognitive Search
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/azure_blob_storage.html |
85dd1d7f9efe-0 | .md
.pdf
Qdrant
Contents
Installation and Setup
Wrappers
VectorStore
Qdrant#
This page covers how to use the Qdrant ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific Qdrant wrappers.
Installation and Setup#
Install the Python SDK with pip install qdrant-client
Wrappers#
VectorStore#
There exists a wrapper around Qdrant indexes, allowing you to use it as a vectorstore,
whether for semantic search or example selection.
To import this vectorstore:
from langchain.vectorstores import Qdrant
For a more detailed walkthrough of the Qdrant wrapper, see this notebook
previous
Psychic
next
Ray Serve
Contents
Installation and Setup
Wrappers
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/qdrant.html |
dee3cd664090-0 | .md
.pdf
Roam
Contents
Installation and Setup
Document Loader
Roam#
ROAM is a note-taking tool for networked thought, designed to create a personal knowledge base.
Installation and Setup#
There isn’t any special setup for it.
Document Loader#
See a usage example.
from langchain.document_loaders import RoamLoader
previous
Replicate
next
Runhouse
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/roam.html |
b4f4ed7d39c2-0 | .md
.pdf
Banana
Contents
Installation and Setup
Define your Banana Template
Build the Banana app
Wrappers
LLM
Banana#
This page covers how to use the Banana ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific Banana wrappers.
Installation and Setup#
Install with pip install banana-dev
Get an Banana api key and set it as an environment variable (BANANA_API_KEY)
Define your Banana Template#
If you want to use an available language model template you can find one here.
This template uses the Palmyra-Base model by Writer.
You can check out an example Banana repository here.
Build the Banana app#
Banana Apps must include the “output” key in the return json.
There is a rigid response structure.
# Return the results as a dictionary
result = {'output': result}
An example inference function would be:
def inference(model_inputs:dict) -> dict:
global model
global tokenizer
# Parse out your arguments
prompt = model_inputs.get('prompt', None)
if prompt == None:
return {'message': "No prompt provided"}
# Run the model
input_ids = tokenizer.encode(prompt, return_tensors='pt').cuda()
output = model.generate(
input_ids,
max_length=100,
do_sample=True,
top_k=50,
top_p=0.95,
num_return_sequences=1,
temperature=0.9,
early_stopping=True,
no_repeat_ngram_size=3,
num_beams=5,
length_penalty=1.5,
repetition_penalty=1.5,
bad_words_ids=[[tokenizer.encode(' ', add_prefix_space=True)[0]]]
) | https://python.langchain.com/en/latest/integrations/bananadev.html |
b4f4ed7d39c2-1 | )
result = tokenizer.decode(output[0], skip_special_tokens=True)
# Return the results as a dictionary
result = {'output': result}
return result
You can find a full example of a Banana app here.
Wrappers#
LLM#
There exists an Banana LLM wrapper, which you can access with
from langchain.llms import Banana
You need to provide a model key located in the dashboard:
llm = Banana(model_key="YOUR_MODEL_KEY")
previous
Azure OpenAI
next
Beam
Contents
Installation and Setup
Define your Banana Template
Build the Banana app
Wrappers
LLM
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/bananadev.html |
1529a3438b4e-0 | .md
.pdf
Modern Treasury
Contents
Installation and Setup
Document Loader
Modern Treasury#
Modern Treasury simplifies complex payment operations. It is a unified platform to power products and processes that move money.
Connect to banks and payment systems
Track transactions and balances in real-time
Automate payment operations for scale
Installation and Setup#
There isn’t any special setup for it.
Document Loader#
See a usage example.
from langchain.document_loaders import ModernTreasuryLoader
previous
Modal
next
Momento
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/modern_treasury.html |
7196926e79b3-0 | .md
.pdf
OpenAI
Contents
Installation and Setup
LLM
Text Embedding Model
Chat Model
Tokenizer
Chain
Document Loader
Retriever
OpenAI#
OpenAI is American artificial intelligence (AI) research laboratory
consisting of the non-profit OpenAI Incorporated
and its for-profit subsidiary corporation OpenAI Limited Partnership.
OpenAI conducts AI research with the declared intention of promoting and developing a friendly AI.
OpenAI systems run on an Azure-based supercomputing platform from Microsoft.
The OpenAI API is powered by a diverse set of models with different capabilities and price points.
ChatGPT is the Artificial Intelligence (AI) chatbot developed by OpenAI.
Installation and Setup#
Install the Python SDK with
pip install openai
Get an OpenAI api key and set it as an environment variable (OPENAI_API_KEY)
If you want to use OpenAI’s tokenizer (only available for Python 3.9+), install it
pip install tiktoken
LLM#
from langchain.llms import OpenAI
If you are using a model hosted on Azure, you should use different wrapper for that:
from langchain.llms import AzureOpenAI
For a more detailed walkthrough of the Azure wrapper, see this notebook
Text Embedding Model#
from langchain.embeddings import OpenAIEmbeddings
For a more detailed walkthrough of this, see this notebook
Chat Model#
from langchain.chat_models import ChatOpenAI
For a more detailed walkthrough of this, see this notebook
Tokenizer#
There are several places you can use the tiktoken tokenizer. By default, it is used to count tokens
for OpenAI LLMs.
You can also use it to count tokens when splitting documents with
from langchain.text_splitter import CharacterTextSplitter
CharacterTextSplitter.from_tiktoken_encoder(...) | https://python.langchain.com/en/latest/integrations/openai.html |
7196926e79b3-1 | CharacterTextSplitter.from_tiktoken_encoder(...)
For a more detailed walkthrough of this, see this notebook
Chain#
See a usage example.
from langchain.chains import OpenAIModerationChain
Document Loader#
See a usage example.
from langchain.document_loaders.chatgpt import ChatGPTLoader
Retriever#
See a usage example.
from langchain.retrievers import ChatGPTPluginRetriever
previous
Obsidian
next
OpenSearch
Contents
Installation and Setup
LLM
Text Embedding Model
Chat Model
Tokenizer
Chain
Document Loader
Retriever
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/openai.html |
7ee80a4d648b-0 | .md
.pdf
Hugging Face
Contents
Installation and Setup
Wrappers
LLM
Embeddings
Tokenizer
Datasets
Hugging Face#
This page covers how to use the Hugging Face ecosystem (including the Hugging Face Hub) within LangChain.
It is broken into two parts: installation and setup, and then references to specific Hugging Face wrappers.
Installation and Setup#
If you want to work with the Hugging Face Hub:
Install the Hub client library with pip install huggingface_hub
Create a Hugging Face account (it’s free!)
Create an access token and set it as an environment variable (HUGGINGFACEHUB_API_TOKEN)
If you want work with the Hugging Face Python libraries:
Install pip install transformers for working with models and tokenizers
Install pip install datasets for working with datasets
Wrappers#
LLM#
There exists two Hugging Face LLM wrappers, one for a local pipeline and one for a model hosted on Hugging Face Hub.
Note that these wrappers only work for models that support the following tasks: text2text-generation, text-generation
To use the local pipeline wrapper:
from langchain.llms import HuggingFacePipeline
To use a the wrapper for a model hosted on Hugging Face Hub:
from langchain.llms import HuggingFaceHub
For a more detailed walkthrough of the Hugging Face Hub wrapper, see this notebook
Embeddings#
There exists two Hugging Face Embeddings wrappers, one for a local model and one for a model hosted on Hugging Face Hub.
Note that these wrappers only work for sentence-transformers models.
To use the local pipeline wrapper:
from langchain.embeddings import HuggingFaceEmbeddings
To use a the wrapper for a model hosted on Hugging Face Hub:
from langchain.embeddings import HuggingFaceHubEmbeddings | https://python.langchain.com/en/latest/integrations/huggingface.html |
7ee80a4d648b-1 | from langchain.embeddings import HuggingFaceHubEmbeddings
For a more detailed walkthrough of this, see this notebook
Tokenizer#
There are several places you can use tokenizers available through the transformers package.
By default, it is used to count tokens for all LLMs.
You can also use it to count tokens when splitting documents with
from langchain.text_splitter import CharacterTextSplitter
CharacterTextSplitter.from_huggingface_tokenizer(...)
For a more detailed walkthrough of this, see this notebook
Datasets#
The Hugging Face Hub has lots of great datasets that can be used to evaluate your LLM chains.
For a detailed walkthrough of how to use them to do so, see this notebook
previous
Helicone
next
iFixit
Contents
Installation and Setup
Wrappers
LLM
Embeddings
Tokenizer
Datasets
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/huggingface.html |
af7376122af1-0 | .md
.pdf
Apify
Contents
Overview
Installation and Setup
Wrappers
Utility
Loader
Apify#
This page covers how to use Apify within LangChain.
Overview#
Apify is a cloud platform for web scraping and data extraction,
which provides an ecosystem of more than a thousand
ready-made apps called Actors for various scraping, crawling, and extraction use cases.
This integration enables you run Actors on the Apify platform and load their results into LangChain to feed your vector
indexes with documents and data from the web, e.g. to generate answers from websites with documentation,
blogs, or knowledge bases.
Installation and Setup#
Install the Apify API client for Python with pip install apify-client
Get your Apify API token and either set it as
an environment variable (APIFY_API_TOKEN) or pass it to the ApifyWrapper as apify_api_token in the constructor.
Wrappers#
Utility#
You can use the ApifyWrapper to run Actors on the Apify platform.
from langchain.utilities import ApifyWrapper
For a more detailed walkthrough of this wrapper, see this notebook.
Loader#
You can also use our ApifyDatasetLoader to get data from Apify dataset.
from langchain.document_loaders import ApifyDatasetLoader
For a more detailed walkthrough of this loader, see this notebook.
previous
Anyscale
next
Argilla
Contents
Overview
Installation and Setup
Wrappers
Utility
Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/apify.html |
dc15704e8d0a-0 | .md
.pdf
Twitter
Contents
Installation and Setup
Document Loader
Twitter#
Twitter is an online social media and social networking service.
Installation and Setup#
pip install tweepy
We must initialize the loader with the Twitter API token, and we need to set up the Twitter username.
Document Loader#
See a usage example.
from langchain.document_loaders import TwitterTweetLoader
previous
Trello
next
Unstructured
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/twitter.html |
7976992d63be-0 | .md
.pdf
Google Cloud Storage
Contents
Installation and Setup
Document Loader
Google Cloud Storage#
Google Cloud Storage is a managed service for storing unstructured data.
Installation and Setup#
First, you need to install google-cloud-bigquery python package.
pip install google-cloud-storage
Document Loader#
There are two loaders for the Google Cloud Storage: the Directory and the File loaders.
See a usage example.
from langchain.document_loaders import GCSDirectoryLoader
See a usage example.
from langchain.document_loaders import GCSFileLoader
previous
Google BigQuery
next
Google Drive
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/google_cloud_storage.html |
9402091978ed-0 | .md
.pdf
OpenWeatherMap
Contents
Installation and Setup
Wrappers
Utility
Tool
OpenWeatherMap#
OpenWeatherMap provides all essential weather data for a specific location:
Current weather
Minute forecast for 1 hour
Hourly forecast for 48 hours
Daily forecast for 8 days
National weather alerts
Historical weather data for 40+ years back
This page covers how to use the OpenWeatherMap API within LangChain.
Installation and Setup#
Install requirements with
pip install pyowm
Go to OpenWeatherMap and sign up for an account to get your API key here
Set your API key as OPENWEATHERMAP_API_KEY environment variable
Wrappers#
Utility#
There exists a OpenWeatherMapAPIWrapper utility which wraps this API. To import this utility:
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
For a more detailed walkthrough of this wrapper, see this notebook.
Tool#
You can also easily load this wrapper as a Tool (to use with an Agent).
You can do this with:
from langchain.agents import load_tools
tools = load_tools(["openweathermap-api"])
For more information on this, see this page
previous
OpenSearch
next
Petals
Contents
Installation and Setup
Wrappers
Utility
Tool
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/openweathermap.html |
8e969de5fd2a-0 | .md
.pdf
Jina
Contents
Installation and Setup
Wrappers
Embeddings
Jina#
This page covers how to use the Jina ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific Jina wrappers.
Installation and Setup#
Install the Python SDK with pip install jina
Get a Jina AI Cloud auth token from here and set it as an environment variable (JINA_AUTH_TOKEN)
Wrappers#
Embeddings#
There exists a Jina Embeddings wrapper, which you can access with
from langchain.embeddings import JinaEmbeddings
For a more detailed walkthrough of this, see this notebook
previous
IMSDb
next
LanceDB
Contents
Installation and Setup
Wrappers
Embeddings
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/jina.html |
760e29681214-0 | .md
.pdf
AnalyticDB
Contents
VectorStore
AnalyticDB#
This page covers how to use the AnalyticDB ecosystem within LangChain.
VectorStore#
There exists a wrapper around AnalyticDB, allowing you to use it as a vectorstore,
whether for semantic search or example selection.
To import this vectorstore:
from langchain.vectorstores import AnalyticDB
For a more detailed walkthrough of the AnalyticDB wrapper, see this notebook
previous
Amazon Bedrock
next
Annoy
Contents
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/analyticdb.html |
163f314152f4-0 | .md
.pdf
WhatsApp
Contents
Installation and Setup
Document Loader
WhatsApp#
WhatsApp (also called WhatsApp Messenger) is a freeware, cross-platform, centralized instant messaging (IM) and voice-over-IP (VoIP) service. It allows users to send text and voice messages, make voice and video calls, and share images, documents, user locations, and other content.
Installation and Setup#
There isn’t any special setup for it.
Document Loader#
See a usage example.
from langchain.document_loaders import WhatsAppChatLoader
previous
Weaviate
next
WhyLabs
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/whatsapp.html |
c8e9fc2a8bda-0 | .md
.pdf
Obsidian
Contents
Installation and Setup
Document Loader
Obsidian#
Obsidian is a powerful and extensible knowledge base
that works on top of your local folder of plain text files.
Installation and Setup#
All instructions are in examples below.
Document Loader#
See a usage example.
from langchain.document_loaders import ObsidianLoader
previous
Notion DB
next
OpenAI
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/obsidian.html |
82b2c77cb36b-0 | .md
.pdf
CerebriumAI
Contents
Installation and Setup
Wrappers
LLM
CerebriumAI#
This page covers how to use the CerebriumAI ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific CerebriumAI wrappers.
Installation and Setup#
Install with pip install cerebrium
Get an CerebriumAI api key and set it as an environment variable (CEREBRIUMAI_API_KEY)
Wrappers#
LLM#
There exists an CerebriumAI LLM wrapper, which you can access with
from langchain.llms import CerebriumAI
previous
Cassandra
next
Chroma
Contents
Installation and Setup
Wrappers
LLM
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/cerebriumai.html |
0cb7aa3fa4f0-0 | .md
.pdf
Psychic
Contents
Installation and Setup
Advantages vs Other Document Loaders
Psychic#
Psychic is a platform for integrating with SaaS tools like Notion, Zendesk,
Confluence, and Google Drive via OAuth and syncing documents from these applications to your SQL or vector
database. You can think of it like Plaid for unstructured data.
Installation and Setup#
pip install psychicapi
Psychic is easy to set up - you import the react library and configure it with your Sidekick API key, which you get
from the Psychic dashboard. When you connect the applications, you
view these connections from the dashboard and retrieve data using the server-side libraries.
Create an account in the dashboard.
Use the react library to add the Psychic link modal to your frontend react app. You will use this to connect the SaaS apps.
Once you have created a connection, you can use the PsychicLoader by following the example notebook
Advantages vs Other Document Loaders#
Universal API: Instead of building OAuth flows and learning the APIs for every SaaS app, you integrate Psychic once and leverage our universal API to retrieve data.
Data Syncs: Data in your customers’ SaaS apps can get stale fast. With Psychic you can configure webhooks to keep your documents up to date on a daily or realtime basis.
Simplified OAuth: Psychic handles OAuth end-to-end so that you don’t have to spend time creating OAuth clients for each integration, keeping access tokens fresh, and handling OAuth redirect logic.
previous
PromptLayer
next
Qdrant
Contents
Installation and Setup
Advantages vs Other Document Loaders
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/psychic.html |
b2f00661eaef-0 | .md
.pdf
Google Vertex AI
Contents
Installation and Setup
Chat Models
Google Vertex AI#
Vertex AI is a machine learning (ML)
platform that lets you train and deploy ML models and AI applications.
Vertex AI combines data engineering, data science, and ML engineering workflows, enabling your teams to
collaborate using a common toolset.
Installation and Setup#
pip install google-cloud-aiplatform
See the setup instructions
Chat Models#
See a usage example
from langchain.chat_models import ChatVertexAI
previous
Google Serper
next
GooseAI
Contents
Installation and Setup
Chat Models
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/google_vertex_ai.html |
23ad792e35ee-0 | .md
.pdf
SearxNG Search API
Contents
Installation and Setup
Self Hosted Instance:
Wrappers
Utility
Tool
SearxNG Search API#
This page covers how to use the SearxNG search API within LangChain.
It is broken into two parts: installation and setup, and then references to the specific SearxNG API wrapper.
Installation and Setup#
While it is possible to utilize the wrapper in conjunction with public searx
instances these instances frequently do not permit API
access (see note on output format below) and have limitations on the frequency
of requests. It is recommended to opt for a self-hosted instance instead.
Self Hosted Instance:#
See this page for installation instructions.
When you install SearxNG, the only active output format by default is the HTML format.
You need to activate the json format to use the API. This can be done by adding the following line to the settings.yml file:
search:
formats:
- html
- json
You can make sure that the API is working by issuing a curl request to the API endpoint:
curl -kLX GET --data-urlencode q='langchain' -d format=json http://localhost:8888
This should return a JSON object with the results.
Wrappers#
Utility#
To use the wrapper we need to pass the host of the SearxNG instance to the wrapper with:
1. the named parameter searx_host when creating the instance.
2. exporting the environment variable SEARXNG_HOST.
You can use the wrapper to get results from a SearxNG instance.
from langchain.utilities import SearxSearchWrapper
s = SearxSearchWrapper(searx_host="http://localhost:8888")
s.run("what is a large language model?") | https://python.langchain.com/en/latest/integrations/searx.html |
23ad792e35ee-1 | s.run("what is a large language model?")
Tool#
You can also load this wrapper as a Tool (to use with an Agent).
You can do this with:
from langchain.agents import load_tools
tools = load_tools(["searx-search"],
searx_host="http://localhost:8888",
engines=["github"])
Note that we could optionally pass custom engines to use.
If you want to obtain results with metadata as json you can use:
tools = load_tools(["searx-search-results-json"],
searx_host="http://localhost:8888",
num_results=5)
For more information on tools, see this page
previous
SageMaker Endpoint
next
SerpAPI
Contents
Installation and Setup
Self Hosted Instance:
Wrappers
Utility
Tool
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/searx.html |
2b2b38a42ebf-0 | .md
.pdf
Trello
Contents
Installation and Setup
Document Loader
Trello#
Trello is a web-based project management and collaboration tool that allows individuals and teams to organize and track their tasks and projects. It provides a visual interface known as a “board” where users can create lists and cards to represent their tasks and activities.
The TrelloLoader allows us to load cards from a Trello board.
Installation and Setup#
pip install py-trello beautifulsoup4
See setup instructions.
Document Loader#
See a usage example.
from langchain.document_loaders import TrelloLoader
previous
2Markdown
next
Twitter
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/trello.html |
02686d41ba7b-0 | .md
.pdf
scikit-learn
Contents
Installation and Setup
Wrappers
VectorStore
scikit-learn#
This page covers how to use the scikit-learn package within LangChain.
It is broken into two parts: installation and setup, and then references to specific scikit-learn wrappers.
Installation and Setup#
Install the Python package with pip install scikit-learn
Wrappers#
VectorStore#
SKLearnVectorStore provides a simple wrapper around the nearest neighbor implementation in the
scikit-learn package, allowing you to use it as a vectorstore.
To import this vectorstore:
from langchain.vectorstores import SKLearnVectorStore
For a more detailed walkthrough of the SKLearnVectorStore wrapper, see this notebook.
previous
SerpAPI
next
Slack
Contents
Installation and Setup
Wrappers
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/sklearn.html |
f403f667e834-0 | .ipynb
.pdf
MLflow
MLflow#
This notebook goes over how to track your LangChain experiments into your MLflow Server
!pip install azureml-mlflow
!pip install pandas
!pip install textstat
!pip install spacy
!pip install openai
!pip install google-search-results
!python -m spacy download en_core_web_sm
import os
os.environ["MLFLOW_TRACKING_URI"] = ""
os.environ["OPENAI_API_KEY"] = ""
os.environ["SERPAPI_API_KEY"] = ""
from langchain.callbacks import MlflowCallbackHandler
from langchain.llms import OpenAI
"""Main function.
This function is used to try the callback handler.
Scenarios:
1. OpenAI LLM
2. Chain with multiple SubChains on multiple generations
3. Agent with Tools
"""
mlflow_callback = MlflowCallbackHandler()
llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0, callbacks=[mlflow_callback], verbose=True)
# SCENARIO 1 - LLM
llm_result = llm.generate(["Tell me a joke"])
mlflow_callback.flush_tracker(llm)
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
# SCENARIO 2 - Chain
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=[mlflow_callback])
test_prompts = [
{ | https://python.langchain.com/en/latest/integrations/mlflow_tracking.html |
f403f667e834-1 | test_prompts = [
{
"title": "documentary about good video games that push the boundary of game design"
},
]
synopsis_chain.apply(test_prompts)
mlflow_callback.flush_tracker(synopsis_chain)
from langchain.agents import initialize_agent, load_tools
from langchain.agents import AgentType
# SCENARIO 3 - Agent with Tools
tools = load_tools(["serpapi", "llm-math"], llm=llm, callbacks=[mlflow_callback])
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callbacks=[mlflow_callback],
verbose=True,
)
agent.run(
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?"
)
mlflow_callback.flush_tracker(agent, finish=True)
previous
Milvus
next
Modal
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/mlflow_tracking.html |
20a7dd8f6754-0 | .ipynb
.pdf
Databricks
Contents
Installation and Setup
Connecting to Databricks
Syntax
Required Parameters
Optional Parameters
Examples
SQL Chain example
SQL Database Agent example
Databricks#
This notebook covers how to connect to the Databricks runtimes and Databricks SQL using the SQLDatabase wrapper of LangChain.
It is broken into 3 parts: installation and setup, connecting to Databricks, and examples.
Installation and Setup#
!pip install databricks-sql-connector
Connecting to Databricks#
You can connect to Databricks runtimes and Databricks SQL using the SQLDatabase.from_databricks() method.
Syntax#
SQLDatabase.from_databricks(
catalog: str,
schema: str,
host: Optional[str] = None,
api_token: Optional[str] = None,
warehouse_id: Optional[str] = None,
cluster_id: Optional[str] = None,
engine_args: Optional[dict] = None,
**kwargs: Any)
Required Parameters#
catalog: The catalog name in the Databricks database.
schema: The schema name in the catalog.
Optional Parameters#
There following parameters are optional. When executing the method in a Databricks notebook, you don’t need to provide them in most of the cases.
host: The Databricks workspace hostname, excluding ‘https://’ part. Defaults to ‘DATABRICKS_HOST’ environment variable or current workspace if in a Databricks notebook.
api_token: The Databricks personal access token for accessing the Databricks SQL warehouse or the cluster. Defaults to ‘DATABRICKS_TOKEN’ environment variable or a temporary one is generated if in a Databricks notebook.
warehouse_id: The warehouse ID in the Databricks SQL. | https://python.langchain.com/en/latest/integrations/databricks.html |
20a7dd8f6754-1 | warehouse_id: The warehouse ID in the Databricks SQL.
cluster_id: The cluster ID in the Databricks Runtime. If running in a Databricks notebook and both ‘warehouse_id’ and ‘cluster_id’ are None, it uses the ID of the cluster the notebook is attached to.
engine_args: The arguments to be used when connecting Databricks.
**kwargs: Additional keyword arguments for the SQLDatabase.from_uri method.
Examples#
# Connecting to Databricks with SQLDatabase wrapper
from langchain import SQLDatabase
db = SQLDatabase.from_databricks(catalog='samples', schema='nyctaxi')
# Creating a OpenAI Chat LLM wrapper
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(temperature=0, model_name="gpt-4")
SQL Chain example#
This example demonstrates the use of the SQL Chain for answering a question over a Databricks database.
from langchain import SQLDatabaseChain
db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)
db_chain.run("What is the average duration of taxi rides that start between midnight and 6am?")
> Entering new SQLDatabaseChain chain...
What is the average duration of taxi rides that start between midnight and 6am?
SQLQuery:SELECT AVG(UNIX_TIMESTAMP(tpep_dropoff_datetime) - UNIX_TIMESTAMP(tpep_pickup_datetime)) as avg_duration
FROM trips
WHERE HOUR(tpep_pickup_datetime) >= 0 AND HOUR(tpep_pickup_datetime) < 6
SQLResult: [(987.8122786304605,)]
Answer:The average duration of taxi rides that start between midnight and 6am is 987.81 seconds.
> Finished chain.
'The average duration of taxi rides that start between midnight and 6am is 987.81 seconds.' | https://python.langchain.com/en/latest/integrations/databricks.html |
20a7dd8f6754-2 | SQL Database Agent example#
This example demonstrates the use of the SQL Database Agent for answering questions over a Databricks database.
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True
)
agent.run("What is the longest trip distance and how long did it take?")
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input:
Observation: trips
Thought:I should check the schema of the trips table to see if it has the necessary columns for trip distance and duration.
Action: schema_sql_db
Action Input: trips
Observation:
CREATE TABLE trips (
tpep_pickup_datetime TIMESTAMP,
tpep_dropoff_datetime TIMESTAMP,
trip_distance FLOAT,
fare_amount FLOAT,
pickup_zip INT,
dropoff_zip INT
) USING DELTA
/*
3 rows from trips table:
tpep_pickup_datetime tpep_dropoff_datetime trip_distance fare_amount pickup_zip dropoff_zip
2016-02-14 16:52:13+00:00 2016-02-14 17:16:04+00:00 4.94 19.0 10282 10171
2016-02-04 18:44:19+00:00 2016-02-04 18:46:00+00:00 0.28 3.5 10110 10110 | https://python.langchain.com/en/latest/integrations/databricks.html |
20a7dd8f6754-3 | 2016-02-17 17:13:57+00:00 2016-02-17 17:17:55+00:00 0.7 5.0 10103 10023
*/
Thought:The trips table has the necessary columns for trip distance and duration. I will write a query to find the longest trip distance and its duration.
Action: query_checker_sql_db
Action Input: SELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1
Observation: SELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1
Thought:The query is correct. I will now execute it to find the longest trip distance and its duration.
Action: query_sql_db
Action Input: SELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1
Observation: [(30.6, '0 00:43:31.000000000')]
Thought:I now know the final answer.
Final Answer: The longest trip distance is 30.6 miles and it took 43 minutes and 31 seconds.
> Finished chain.
'The longest trip distance is 30.6 miles and it took 43 minutes and 31 seconds.'
previous
Databerry
next
DeepInfra
Contents
Installation and Setup
Connecting to Databricks
Syntax
Required Parameters
Optional Parameters
Examples
SQL Chain example
SQL Database Agent example
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/databricks.html |
97c3d8616e34-0 | .md
.pdf
EverNote
Contents
Installation and Setup
Document Loader
EverNote#
EverNote is intended for archiving and creating notes in which photos, audio and saved web content can be embedded. Notes are stored in virtual “notebooks” and can be tagged, annotated, edited, searched, and exported.
Installation and Setup#
First, you need to install lxml and html2text python packages.
pip install lxml
pip install html2text
Document Loader#
See a usage example.
from langchain.document_loaders import EverNoteLoader
previous
Elasticsearch
next
Facebook Chat
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/evernote.html |
de37d7541c1e-0 | .md
.pdf
Arxiv
Contents
Installation and Setup
Document Loader
Retriever
Arxiv#
arXiv is an open-access archive for 2 million scholarly articles in the fields of physics,
mathematics, computer science, quantitative biology, quantitative finance, statistics, electrical engineering and
systems science, and economics.
Installation and Setup#
First, you need to install arxiv python package.
pip install arxiv
Second, you need to install PyMuPDF python package which transforms PDF files downloaded from the arxiv.org site into the text format.
pip install pymupdf
Document Loader#
See a usage example.
from langchain.document_loaders import ArxivLoader
Retriever#
See a usage example.
from langchain.retrievers import ArxivRetriever
previous
Argilla
next
AtlasDB
Contents
Installation and Setup
Document Loader
Retriever
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/arxiv.html |
5a6cb9844c07-0 | .md
.pdf
Facebook Chat
Contents
Installation and Setup
Document Loader
Facebook Chat#
Messenger is an American proprietary instant messaging app and
platform developed by Meta Platforms. Originally developed as Facebook Chat in 2008, the company revamped its
messaging service in 2010.
Installation and Setup#
First, you need to install pandas python package.
pip install pandas
Document Loader#
See a usage example.
from langchain.document_loaders import FacebookChatLoader
previous
EverNote
next
Figma
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/facebook_chat.html |
e15f0c12d946-0 | .md
.pdf
Runhouse
Contents
Installation and Setup
Self-hosted LLMs
Self-hosted Embeddings
Runhouse#
This page covers how to use the Runhouse ecosystem within LangChain.
It is broken into three parts: installation and setup, LLMs, and Embeddings.
Installation and Setup#
Install the Python SDK with pip install runhouse
If you’d like to use on-demand cluster, check your cloud credentials with sky check
Self-hosted LLMs#
For a basic self-hosted LLM, you can use the SelfHostedHuggingFaceLLM class. For more
custom LLMs, you can use the SelfHostedPipeline parent class.
from langchain.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM
For a more detailed walkthrough of the Self-hosted LLMs, see this notebook
Self-hosted Embeddings#
There are several ways to use self-hosted embeddings with LangChain via Runhouse.
For a basic self-hosted embedding from a Hugging Face Transformers model, you can use
the SelfHostedEmbedding class.
from langchain.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM
For a more detailed walkthrough of the Self-hosted Embeddings, see this notebook
previous
Roam
next
RWKV-4
Contents
Installation and Setup
Self-hosted LLMs
Self-hosted Embeddings
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/runhouse.html |
b41496cfa91b-0 | .md
.pdf
MyScale
Contents
Introduction
Installation and Setup
Setting up envrionments
Wrappers
VectorStore
MyScale#
This page covers how to use MyScale vector database within LangChain.
It is broken into two parts: installation and setup, and then references to specific MyScale wrappers.
With MyScale, you can manage both structured and unstructured (vectorized) data, and perform joint queries and analytics on both types of data using SQL. Plus, MyScale’s cloud-native OLAP architecture, built on top of ClickHouse, enables lightning-fast data processing even on massive datasets.
Introduction#
Overview to MyScale and High performance vector search
You can now register on our SaaS and start a cluster now!
If you are also interested in how we managed to integrate SQL and vector, please refer to this document for further syntax reference.
We also deliver with live demo on huggingface! Please checkout our huggingface space! They search millions of vector within a blink!
Installation and Setup#
Install the Python SDK with pip install clickhouse-connect
Setting up envrionments#
There are two ways to set up parameters for myscale index.
Environment Variables
Before you run the app, please set the environment variable with export:
export MYSCALE_URL='<your-endpoints-url>' MYSCALE_PORT=<your-endpoints-port> MYSCALE_USERNAME=<your-username> MYSCALE_PASSWORD=<your-password> ...
You can easily find your account, password and other info on our SaaS. For details please refer to this document
Every attributes under MyScaleSettings can be set with prefix MYSCALE_ and is case insensitive.
Create MyScaleSettings object with parameters
from langchain.vectorstores import MyScale, MyScaleSettings
config = MyScaleSetting(host="<your-backend-url>", port=8443, ...)
index = MyScale(embedding_function, config) | https://python.langchain.com/en/latest/integrations/myscale.html |
b41496cfa91b-1 | index = MyScale(embedding_function, config)
index.add_documents(...)
Wrappers#
supported functions:
add_texts
add_documents
from_texts
from_documents
similarity_search
asimilarity_search
similarity_search_by_vector
asimilarity_search_by_vector
similarity_search_with_relevance_scores
VectorStore#
There exists a wrapper around MyScale database, allowing you to use it as a vectorstore,
whether for semantic search or similar example retrieval.
To import this vectorstore:
from langchain.vectorstores import MyScale
For a more detailed walkthrough of the MyScale wrapper, see this notebook
previous
Momento
next
NLPCloud
Contents
Introduction
Installation and Setup
Setting up envrionments
Wrappers
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/myscale.html |
bc3d58baf88a-0 | .md
.pdf
Databerry
Contents
Installation and Setup
Retriever
Databerry#
Databerry is an open source document retrieval platform that helps to connect your personal data with Large Language Models.
Installation and Setup#
We need to sign up for Databerry, create a datastore, add some data and get your datastore api endpoint url.
We need the API Key.
Retriever#
See a usage example.
from langchain.retrievers import DataberryRetriever
previous
C Transformers
next
Databricks
Contents
Installation and Setup
Retriever
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/databerry.html |
ffe08e685586-0 | .md
.pdf
ForefrontAI
Contents
Installation and Setup
Wrappers
LLM
ForefrontAI#
This page covers how to use the ForefrontAI ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific ForefrontAI wrappers.
Installation and Setup#
Get an ForefrontAI api key and set it as an environment variable (FOREFRONTAI_API_KEY)
Wrappers#
LLM#
There exists an ForefrontAI LLM wrapper, which you can access with
from langchain.llms import ForefrontAI
previous
Figma
next
Git
Contents
Installation and Setup
Wrappers
LLM
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/forefrontai.html |
9c70a653dd94-0 | .md
.pdf
Milvus
Contents
Installation and Setup
Wrappers
VectorStore
Milvus#
This page covers how to use the Milvus ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific Milvus wrappers.
Installation and Setup#
Install the Python SDK with pip install pymilvus
Wrappers#
VectorStore#
There exists a wrapper around Milvus indexes, allowing you to use it as a vectorstore,
whether for semantic search or example selection.
To import this vectorstore:
from langchain.vectorstores import Milvus
For a more detailed walkthrough of the Miluvs wrapper, see this notebook
previous
Microsoft Word
next
MLflow
Contents
Installation and Setup
Wrappers
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/milvus.html |
6a5b62cc5dc6-0 | .md
.pdf
Weaviate
Contents
Installation and Setup
Wrappers
VectorStore
Weaviate#
This page covers how to use the Weaviate ecosystem within LangChain.
What is Weaviate?
Weaviate in a nutshell:
Weaviate is an open-source database of the type vector search engine.
Weaviate allows you to store JSON documents in a class property-like fashion while attaching machine learning vectors to these documents to represent them in vector space.
Weaviate can be used stand-alone (aka bring your vectors) or with a variety of modules that can do the vectorization for you and extend the core capabilities.
Weaviate has a GraphQL-API to access your data easily.
We aim to bring your vector search set up to production to query in mere milliseconds (check our open source benchmarks to see if Weaviate fits your use case).
Get to know Weaviate in the basics getting started guide in under five minutes.
Weaviate in detail:
Weaviate is a low-latency vector search engine with out-of-the-box support for different media types (text, images, etc.). It offers Semantic Search, Question-Answer Extraction, Classification, Customizable Models (PyTorch/TensorFlow/Keras), etc. Built from scratch in Go, Weaviate stores both objects and vectors, allowing for combining vector search with structured filtering and the fault tolerance of a cloud-native database. It is all accessible through GraphQL, REST, and various client-side programming languages.
Installation and Setup#
Install the Python SDK with pip install weaviate-client
Wrappers#
VectorStore#
There exists a wrapper around Weaviate indexes, allowing you to use it as a vectorstore,
whether for semantic search or example selection.
To import this vectorstore:
from langchain.vectorstores import Weaviate | https://python.langchain.com/en/latest/integrations/weaviate.html |
6a5b62cc5dc6-1 | To import this vectorstore:
from langchain.vectorstores import Weaviate
For a more detailed walkthrough of the Weaviate wrapper, see this notebook
previous
Weather
next
WhatsApp
Contents
Installation and Setup
Wrappers
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/weaviate.html |
dc201895e660-0 | .md
.pdf
Hazy Research
Contents
Installation and Setup
Wrappers
LLM
Hazy Research#
This page covers how to use the Hazy Research ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific Hazy Research wrappers.
Installation and Setup#
To use the manifest, install it with pip install manifest-ml
Wrappers#
LLM#
There exists an LLM wrapper around Hazy Research’s manifest library.
manifest is a python library which is itself a wrapper around many model providers, and adds in caching, history, and more.
To use this wrapper:
from langchain.llms.manifest import ManifestWrapper
previous
Hacker News
next
Helicone
Contents
Installation and Setup
Wrappers
LLM
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/hazy_research.html |
e1cf42006aa0-0 | .md
.pdf
SageMaker Endpoint
Contents
Installation and Setup
LLM
Text Embedding Models
SageMaker Endpoint#
Amazon SageMaker is a system that can build, train, and deploy machine learning (ML) models with fully managed infrastructure, tools, and workflows.
We use SageMaker to host our model and expose it as the SageMaker Endpoint.
Installation and Setup#
pip install boto3
For instructions on how to expose model as a SageMaker Endpoint, please see here.
Note: In order to handle batched requests, we need to adjust the return line in the predict_fn() function within the custom inference.py script:
Change from
return {"vectors": sentence_embeddings[0].tolist()}
to:
return {"vectors": sentence_embeddings.tolist()}
We have to set up following required parameters of the SagemakerEndpoint call:
endpoint_name: The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region.
credentials_profile_name: The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See this guide.
LLM#
See a usage example.
from langchain import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
Text Embedding Models#
See a usage example.
from langchain.embeddings import SagemakerEndpointEmbeddings
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
previous
RWKV-4
next
SearxNG Search API
Contents
Installation and Setup
LLM
Text Embedding Models
By Harrison Chase
© Copyright 2023, Harrison Chase. | https://python.langchain.com/en/latest/integrations/sagemaker_endpoint.html |
e1cf42006aa0-1 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/sagemaker_endpoint.html |
f95a74ff81b4-0 | .ipynb
.pdf
Comet
Contents
Install Comet and Dependencies
Initialize Comet and Set your Credentials
Set OpenAI and SerpAPI credentials
Scenario 1: Using just an LLM
Scenario 2: Using an LLM in a Chain
Scenario 3: Using An Agent with Tools
Scenario 4: Using Custom Evaluation Metrics
Comet#
In this guide we will demonstrate how to track your Langchain Experiments, Evaluation Metrics, and LLM Sessions with Comet.
Example Project: Comet with LangChain
Install Comet and Dependencies#
%pip install comet_ml langchain openai google-search-results spacy textstat pandas
import sys
!{sys.executable} -m spacy download en_core_web_sm
Initialize Comet and Set your Credentials#
You can grab your Comet API Key here or click the link after initializing Comet
import comet_ml
comet_ml.init(project_name="comet-example-langchain")
Set OpenAI and SerpAPI credentials#
You will need an OpenAI API Key and a SerpAPI API Key to run the following examples
import os
os.environ["OPENAI_API_KEY"] = "..."
#os.environ["OPENAI_ORGANIZATION"] = "..."
os.environ["SERPAPI_API_KEY"] = "..."
Scenario 1: Using just an LLM#
from datetime import datetime
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain.llms import OpenAI
comet_callback = CometCallbackHandler(
project_name="comet-example-langchain",
complexity_metrics=True,
stream_logs=True,
tags=["llm"],
visualizations=["dep"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks, verbose=True) | https://python.langchain.com/en/latest/integrations/comet_tracking.html |
f95a74ff81b4-1 | llm = OpenAI(temperature=0.9, callbacks=callbacks, verbose=True)
llm_result = llm.generate(["Tell me a joke", "Tell me a poem", "Tell me a fact"] * 3)
print("LLM result", llm_result)
comet_callback.flush_tracker(llm, finish=True)
Scenario 2: Using an LLM in a Chain#
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
comet_callback = CometCallbackHandler(
complexity_metrics=True,
project_name="comet-example-langchain",
stream_logs=True,
tags=["synopsis-chain"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks)
test_prompts = [{"title": "Documentary about Bigfoot in Paris"}]
print(synopsis_chain.apply(test_prompts))
comet_callback.flush_tracker(synopsis_chain, finish=True)
Scenario 3: Using An Agent with Tools#
from langchain.agents import initialize_agent, load_tools
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain.llms import OpenAI
comet_callback = CometCallbackHandler(
project_name="comet-example-langchain",
complexity_metrics=True, | https://python.langchain.com/en/latest/integrations/comet_tracking.html |
f95a74ff81b4-2 | project_name="comet-example-langchain",
complexity_metrics=True,
stream_logs=True,
tags=["agent"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
tools = load_tools(["serpapi", "llm-math"], llm=llm, callbacks=callbacks)
agent = initialize_agent(
tools,
llm,
agent="zero-shot-react-description",
callbacks=callbacks,
verbose=True,
)
agent.run(
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?"
)
comet_callback.flush_tracker(agent, finish=True)
Scenario 4: Using Custom Evaluation Metrics#
The CometCallbackManager also allows you to define and use Custom Evaluation Metrics to assess generated outputs from your model. Let’s take a look at how this works.
In the snippet below, we will use the ROUGE metric to evaluate the quality of a generated summary of an input prompt.
%pip install rouge-score
from rouge_score import rouge_scorer
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
class Rouge:
def __init__(self, reference):
self.reference = reference
self.scorer = rouge_scorer.RougeScorer(["rougeLsum"], use_stemmer=True)
def compute_metric(self, generation, prompt_idx, gen_idx):
prediction = generation.text
results = self.scorer.score(target=self.reference, prediction=prediction)
return { | https://python.langchain.com/en/latest/integrations/comet_tracking.html |
f95a74ff81b4-3 | return {
"rougeLsum_score": results["rougeLsum"].fmeasure,
"reference": self.reference,
}
reference = """
The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building.
It was the first structure to reach a height of 300 metres.
It is now taller than the Chrysler Building in New York City by 5.2 metres (17 ft)
Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France .
"""
rouge_score = Rouge(reference=reference)
template = """Given the following article, it is your job to write a summary.
Article:
{article}
Summary: This is the summary for the above article:"""
prompt_template = PromptTemplate(input_variables=["article"], template=template)
comet_callback = CometCallbackHandler(
project_name="comet-example-langchain",
complexity_metrics=False,
stream_logs=True,
tags=["custom_metrics"],
custom_metrics=rouge_score.compute_metric,
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template)
test_prompts = [
{
"article": """
The tower is 324 metres (1,063 ft) tall, about the same height as
an 81-storey building, and the tallest structure in Paris. Its base is square,
measuring 125 metres (410 ft) on each side.
During its construction, the Eiffel Tower surpassed the
Washington Monument to become the tallest man-made structure in the world,
a title it held for 41 years until the Chrysler Building | https://python.langchain.com/en/latest/integrations/comet_tracking.html |
f95a74ff81b4-4 | a title it held for 41 years until the Chrysler Building
in New York City was finished in 1930.
It was the first structure to reach a height of 300 metres.
Due to the addition of a broadcasting aerial at the top of the tower in 1957,
it is now taller than the Chrysler Building by 5.2 metres (17 ft).
Excluding transmitters, the Eiffel Tower is the second tallest
free-standing structure in France after the Millau Viaduct.
"""
}
]
print(synopsis_chain.apply(test_prompts, callbacks=callbacks))
comet_callback.flush_tracker(synopsis_chain, finish=True)
previous
College Confidential
next
Confluence
Contents
Install Comet and Dependencies
Initialize Comet and Set your Credentials
Set OpenAI and SerpAPI credentials
Scenario 1: Using just an LLM
Scenario 2: Using an LLM in a Chain
Scenario 3: Using An Agent with Tools
Scenario 4: Using Custom Evaluation Metrics
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/comet_tracking.html |
5328d4e691da-0 | .md
.pdf
Diffbot
Contents
Installation and Setup
Document Loader
Diffbot#
Diffbot is a service to read web pages. Unlike traditional web scraping tools,
Diffbot doesn’t require any rules to read the content on a page.
It starts with computer vision, which classifies a page into one of 20 possible types. Content is then interpreted by a machine learning model trained to identify the key attributes on a page based on its type.
The result is a website transformed into clean-structured data (like JSON or CSV), ready for your application.
Installation and Setup#
Read instructions how to get the Diffbot API Token.
Document Loader#
See a usage example.
from langchain.document_loaders import DiffbotLoader
previous
Deep Lake
next
Discord
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/diffbot.html |
a32130e8b7d9-0 | .md
.pdf
Tair
Contents
Installation and Setup
Wrappers
VectorStore
Tair#
This page covers how to use the Tair ecosystem within LangChain.
Installation and Setup#
Install Tair Python SDK with pip install tair.
Wrappers#
VectorStore#
There exists a wrapper around TairVector, allowing you to use it as a vectorstore,
whether for semantic search or example selection.
To import this vectorstore:
from langchain.vectorstores import Tair
For a more detailed walkthrough of the Tair wrapper, see this notebook
previous
Stripe
next
Telegram
Contents
Installation and Setup
Wrappers
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/tair.html |
08c1d790da57-0 | .md
.pdf
Modal
Contents
Installation and Setup
Define your Modal Functions and Webhooks
Wrappers
LLM
Modal#
This page covers how to use the Modal ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific Modal wrappers.
Installation and Setup#
Install with pip install modal-client
Run modal token new
Define your Modal Functions and Webhooks#
You must include a prompt. There is a rigid response structure.
class Item(BaseModel):
prompt: str
@stub.webhook(method="POST")
def my_webhook(item: Item):
return {"prompt": my_function.call(item.prompt)}
An example with GPT2:
from pydantic import BaseModel
import modal
stub = modal.Stub("example-get-started")
volume = modal.SharedVolume().persist("gpt2_model_vol")
CACHE_PATH = "/root/model_cache"
@stub.function(
gpu="any",
image=modal.Image.debian_slim().pip_install(
"tokenizers", "transformers", "torch", "accelerate"
),
shared_volumes={CACHE_PATH: volume},
retries=3,
)
def run_gpt2(text: str):
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
encoded_input = tokenizer(text, return_tensors='pt').input_ids
output = model.generate(encoded_input, max_length=50, do_sample=True)
return tokenizer.decode(output[0], skip_special_tokens=True)
class Item(BaseModel):
prompt: str
@stub.webhook(method="POST")
def get_text(item: Item): | https://python.langchain.com/en/latest/integrations/modal.html |
08c1d790da57-1 | @stub.webhook(method="POST")
def get_text(item: Item):
return {"prompt": run_gpt2.call(item.prompt)}
Wrappers#
LLM#
There exists an Modal LLM wrapper, which you can access with
from langchain.llms import Modal
previous
MLflow
next
Modern Treasury
Contents
Installation and Setup
Define your Modal Functions and Webhooks
Wrappers
LLM
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/modal.html |
16d77bbf971d-0 | .md
.pdf
Microsoft PowerPoint
Contents
Installation and Setup
Document Loader
Microsoft PowerPoint#
Microsoft PowerPoint is a presentation program by Microsoft.
Installation and Setup#
There isn’t any special setup for it.
Document Loader#
See a usage example.
from langchain.document_loaders import UnstructuredPowerPointLoader
previous
Microsoft OneDrive
next
Microsoft Word
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/microsoft_powerpoint.html |
fd3f9424130c-0 | .md
.pdf
YouTube
Contents
Installation and Setup
Document Loader
YouTube#
YouTube is an online video sharing and social media platform created by Google.
We download the YouTube transcripts and video information.
Installation and Setup#
pip install youtube-transcript-api
pip install pytube
See a usage example.
Document Loader#
See a usage example.
from langchain.document_loaders import YoutubeLoader
from langchain.document_loaders import GoogleApiYoutubeLoader
previous
Yeager.ai
next
Zep
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/youtube.html |
44b4b2726477-0 | .md
.pdf
Cassandra
Contents
Installation and Setup
Memory
Cassandra#
Cassandra is a free and open-source, distributed, wide-column
store, NoSQL database management system designed to handle large amounts of data across many commodity servers,
providing high availability with no single point of failure. Cassandra offers support for clusters spanning
multiple datacenters, with asynchronous masterless replication allowing low latency operations for all clients.
Cassandra was designed to implement a combination of Amazon's Dynamo distributed storage and replication
techniques combined with Google's Bigtable data and storage engine model.
Installation and Setup#
pip install cassandra-drive
Memory#
See a usage example.
from langchain.memory import CassandraChatMessageHistory
previous
Blackboard
next
CerebriumAI
Contents
Installation and Setup
Memory
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/cassandra.html |
0fc5e342900a-0 | .md
.pdf
NLPCloud
Contents
Installation and Setup
Wrappers
LLM
NLPCloud#
This page covers how to use the NLPCloud ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific NLPCloud wrappers.
Installation and Setup#
Install the Python SDK with pip install nlpcloud
Get an NLPCloud api key and set it as an environment variable (NLPCLOUD_API_KEY)
Wrappers#
LLM#
There exists an NLPCloud LLM wrapper, which you can access with
from langchain.llms import NLPCloud
previous
MyScale
next
Notion DB
Contents
Installation and Setup
Wrappers
LLM
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/nlpcloud.html |
5c911b9dd2f3-0 | .md
.pdf
Azure OpenAI
Contents
Installation and Setup
LLM
Text Embedding Models
Chat Models
Azure OpenAI#
Microsoft Azure, often referred to as Azure is a cloud computing platform run by Microsoft, which offers access, management, and development of applications and services through global data centers. It provides a range of capabilities, including software as a service (SaaS), platform as a service (PaaS), and infrastructure as a service (IaaS). Microsoft Azure supports many programming languages, tools, and frameworks, including Microsoft-specific and third-party software and systems.
Azure OpenAI is an Azure service with powerful language models from OpenAI including the GPT-3, Codex and Embeddings model series for content generation, summarization, semantic search, and natural language to code translation.
Installation and Setup#
pip install openai
pip install tiktoken
Set the environment variables to get access to the Azure OpenAI service.
import os
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/"
os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
LLM#
See a usage example.
from langchain.llms import AzureOpenAI
Text Embedding Models#
See a usage example
from langchain.embeddings import OpenAIEmbeddings
Chat Models#
See a usage example
from langchain.chat_models import AzureChatOpenAI
previous
Azure Cognitive Search
next
Banana
Contents
Installation and Setup
LLM
Text Embedding Models
Chat Models
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/azure_openai.html |
fd51cb6f92dc-0 | .md
.pdf
Graphsignal
Contents
Installation and Setup
Tracing and Monitoring
Graphsignal#
This page covers how to use Graphsignal to trace and monitor LangChain. Graphsignal enables full visibility into your application. It provides latency breakdowns by chains and tools, exceptions with full context, data monitoring, compute/GPU utilization, OpenAI cost analytics, and more.
Installation and Setup#
Install the Python library with pip install graphsignal
Create free Graphsignal account here
Get an API key and set it as an environment variable (GRAPHSIGNAL_API_KEY)
Tracing and Monitoring#
Graphsignal automatically instruments and starts tracing and monitoring chains. Traces and metrics are then available in your Graphsignal dashboards.
Initialize the tracer by providing a deployment name:
import graphsignal
graphsignal.configure(deployment='my-langchain-app-prod')
To additionally trace any function or code, you can use a decorator or a context manager:
@graphsignal.trace_function
def handle_request():
chain.run("some initial text")
with graphsignal.start_trace('my-chain'):
chain.run("some initial text")
Optionally, enable profiling to record function-level statistics for each trace.
with graphsignal.start_trace(
'my-chain', options=graphsignal.TraceOptions(enable_profiling=True)):
chain.run("some initial text")
See the Quick Start guide for complete setup instructions.
previous
GPT4All
next
Gutenberg
Contents
Installation and Setup
Tracing and Monitoring
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/graphsignal.html |
0291c233d247-0 | .md
.pdf
LanceDB
Contents
Installation and Setup
Wrappers
VectorStore
LanceDB#
This page covers how to use LanceDB within LangChain.
It is broken into two parts: installation and setup, and then references to specific LanceDB wrappers.
Installation and Setup#
Install the Python SDK with pip install lancedb
Wrappers#
VectorStore#
There exists a wrapper around LanceDB databases, allowing you to use it as a vectorstore,
whether for semantic search or example selection.
To import this vectorstore:
from langchain.vectorstores import LanceDB
For a more detailed walkthrough of the LanceDB wrapper, see this notebook
previous
Jina
next
Llama.cpp
Contents
Installation and Setup
Wrappers
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/lancedb.html |
fb4913f9635a-0 | .md
.pdf
Tensorflow Hub
Contents
Installation and Setup
Text Embedding Models
Tensorflow Hub#
TensorFlow Hub is a repository of trained machine learning models ready for fine-tuning and deployable anywhere.
TensorFlow Hub lets you search and discover hundreds of trained, ready-to-deploy machine learning models in one place.
Installation and Setup#
pip install tensorflow-hub
pip install tensorflow_text
Text Embedding Models#
See a usage example
from langchain.embeddings import TensorflowHubEmbeddings
previous
Telegram
next
2Markdown
Contents
Installation and Setup
Text Embedding Models
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/tensorflow_hub.html |
396dbe0a6c04-0 | .md
.pdf
Anthropic
Contents
Installation and Setup
Chat Models
Anthropic#
Anthropic is an American artificial intelligence (AI) startup and
public-benefit corporation, founded by former members of OpenAI. Anthropic specializes in developing general AI
systems and language models, with a company ethos of responsible AI usage.
Anthropic develops a chatbot, named Claude. Similar to ChatGPT, Claude uses a messaging
interface where users can submit questions or requests and receive highly detailed and relevant responses.
Installation and Setup#
pip install anthropic
See the setup documentation.
Chat Models#
See a usage example
from langchain.chat_models import ChatAnthropic
previous
Annoy
next
Anyscale
Contents
Installation and Setup
Chat Models
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/anthropic.html |
53d2c448fe90-0 | .md
.pdf
Azure Cognitive Search
Contents
Installation and Setup
Retriever
Azure Cognitive Search#
Azure Cognitive Search (formerly known as Azure Search) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications.
Search is foundational to any app that surfaces text to users, where common scenarios include catalog or document search, online retail apps, or data exploration over proprietary content. When you create a search service, you’ll work with the following capabilities:
A search engine for full text search over a search index containing user-owned content
Rich indexing, with lexical analysis and optional AI enrichment for content extraction and transformation
Rich query syntax for text search, fuzzy search, autocomplete, geo-search and more
Programmability through REST APIs and client libraries in Azure SDKs
Azure integration at the data layer, machine learning layer, and AI (Cognitive Services)
Installation and Setup#
See set up instructions.
Retriever#
See a usage example.
from langchain.retrievers import AzureCognitiveSearchRetriever
previous
Azure Blob Storage
next
Azure OpenAI
Contents
Installation and Setup
Retriever
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/azure_cognitive_search_.html |
d81608c6c013-0 | .md
.pdf
Beam
Contents
Installation and Setup
LLM
Example of the Beam app
Deploy the Beam app
Call the Beam app
Beam#
Beam makes it easy to run code on GPUs, deploy scalable web APIs,
schedule cron jobs, and run massively parallel workloads — without managing any infrastructure.
Installation and Setup#
Create an account
Install the Beam CLI with curl https://raw.githubusercontent.com/slai-labs/get-beam/main/get-beam.sh -sSfL | sh
Register API keys with beam configure
Set environment variables (BEAM_CLIENT_ID) and (BEAM_CLIENT_SECRET)
Install the Beam SDK:
pip install beam-sdk
LLM#
from langchain.llms.beam import Beam
Example of the Beam app#
This is the environment you’ll be developing against once you start the app.
It’s also used to define the maximum response length from the model.
llm = Beam(model_name="gpt2",
name="langchain-gpt2-test",
cpu=8,
memory="32Gi",
gpu="A10G",
python_version="python3.8",
python_packages=[
"diffusers[torch]>=0.10",
"transformers",
"torch",
"pillow",
"accelerate",
"safetensors",
"xformers",],
max_length="50",
verbose=False)
Deploy the Beam app#
Once defined, you can deploy your Beam app by calling your model’s _deploy() method.
llm._deploy()
Call the Beam app#
Once a beam model is deployed, it can be called by calling your model’s _call() method.
This returns the GPT2 text response to your prompt. | https://python.langchain.com/en/latest/integrations/beam.html |
d81608c6c013-1 | This returns the GPT2 text response to your prompt.
response = llm._call("Running machine learning on a remote GPU")
An example script which deploys the model and calls it would be:
from langchain.llms.beam import Beam
import time
llm = Beam(model_name="gpt2",
name="langchain-gpt2-test",
cpu=8,
memory="32Gi",
gpu="A10G",
python_version="python3.8",
python_packages=[
"diffusers[torch]>=0.10",
"transformers",
"torch",
"pillow",
"accelerate",
"safetensors",
"xformers",],
max_length="50",
verbose=False)
llm._deploy()
response = llm._call("Running machine learning on a remote GPU")
print(response)
previous
Banana
next
BiliBili
Contents
Installation and Setup
LLM
Example of the Beam app
Deploy the Beam app
Call the Beam app
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/beam.html |
34dcd8f56f42-0 | .md
.pdf
Figma
Contents
Installation and Setup
Document Loader
Figma#
Figma is a collaborative web application for interface design.
Installation and Setup#
The Figma API requires an access token, node_ids, and a file key.
The file key can be pulled from the URL. https://www.figma.com/file/{filekey}/sampleFilename
Node IDs are also available in the URL. Click on anything and look for the ‘?node-id={node_id}’ param.
Access token instructions.
Document Loader#
See a usage example.
from langchain.document_loaders import FigmaFileLoader
previous
Facebook Chat
next
ForefrontAI
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/figma.html |
d86ba1b52d68-0 | .md
.pdf
StochasticAI
Contents
Installation and Setup
Wrappers
LLM
StochasticAI#
This page covers how to use the StochasticAI ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific StochasticAI wrappers.
Installation and Setup#
Install with pip install stochasticx
Get an StochasticAI api key and set it as an environment variable (STOCHASTICAI_API_KEY)
Wrappers#
LLM#
There exists an StochasticAI LLM wrapper, which you can access with
from langchain.llms import StochasticAI
previous
Spreedly
next
Stripe
Contents
Installation and Setup
Wrappers
LLM
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/stochasticai.html |
8a053a0982de-0 | .md
.pdf
Zilliz
Contents
Installation and Setup
Vectorstore
Zilliz#
Zilliz Cloud is a fully managed service on cloud for LF AI Milvus®,
Installation and Setup#
Install the Python SDK:
pip install pymilvus
Vectorstore#
A wrapper around Zilliz indexes allows you to use it as a vectorstore,
whether for semantic search or example selection.
from langchain.vectorstores import Milvus
For a more detailed walkthrough of the Miluvs wrapper, see this notebook
previous
Zep
next
Dependents
Contents
Installation and Setup
Vectorstore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/zilliz.html |
18d122b6e93c-0 | .md
.pdf
2Markdown
Contents
Installation and Setup
Document Loader
2Markdown#
2markdown service transforms website content into structured markdown files.
Installation and Setup#
We need the API key. See instructions how to get it.
Document Loader#
See a usage example.
from langchain.document_loaders import ToMarkdownLoader
previous
Tensorflow Hub
next
Trello
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/tomarkdown.html |
44299dcaec64-0 | .md
.pdf
Telegram
Contents
Installation and Setup
Document Loader
Telegram#
Telegram Messenger is a globally accessible freemium, cross-platform, encrypted, cloud-based and centralized instant messaging service. The application also provides optional end-to-end encrypted chats and video calling, VoIP, file sharing and several other features.
Installation and Setup#
See setup instructions.
Document Loader#
See a usage example.
from langchain.document_loaders import TelegramChatFileLoader
from langchain.document_loaders import TelegramChatApiLoader
previous
Tair
next
Tensorflow Hub
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/telegram.html |
a83efa021cd1-0 | .ipynb
.pdf
Tracing Walkthrough
Tracing Walkthrough#
There are two recommended ways to trace your LangChains:
Setting the LANGCHAIN_WANDB_TRACING environment variable to “true”.
Using a context manager with tracing_enabled() to trace a particular block of code.
Note if the environment variable is set, all code will be traced, regardless of whether or not it’s within the context manager.
import os
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
# wandb documentation to configure wandb using env variables
# https://docs.wandb.ai/guides/track/advanced/environment-variables
# here we are configuring the wandb project name
os.environ["WANDB_PROJECT"] = "langchain-tracing"
from langchain.agents import initialize_agent, load_tools
from langchain.agents import AgentType
from langchain.llms import OpenAI
from langchain.callbacks import wandb_tracing_enabled
# Agent run with tracing. Ensure that OPENAI_API_KEY is set appropriately to run this example.
llm = OpenAI(temperature=0)
tools = load_tools(["llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?") # this should be traced
# A url with for the trace sesion like the following should print in your console:
# https://wandb.ai/<wandb_entity>/<wandb_project>/runs/<run_id>
# The url can be used to view the trace session in wandb.
# Now, we unset the environment variable and use a context manager.
if "LANGCHAIN_WANDB_TRACING" in os.environ: | https://python.langchain.com/en/latest/integrations/agent_with_wandb_tracing.html |
a83efa021cd1-1 | if "LANGCHAIN_WANDB_TRACING" in os.environ:
del os.environ["LANGCHAIN_WANDB_TRACING"]
# enable tracing using a context manager
with wandb_tracing_enabled():
agent.run("What is 5 raised to .123243 power?") # this should be traced
agent.run("What is 2 raised to .123243 power?") # this should not be traced
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 5^.123243
Observation: Answer: 1.2193914912400514
Thought: I now know the final answer.
Final Answer: 1.2193914912400514
> Finished chain.
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 2^.123243
Observation: Answer: 1.0891804557407723
Thought: I now know the final answer.
Final Answer: 1.0891804557407723
> Finished chain.
'1.0891804557407723'
Here’s a view of wandb dashboard for the above tracing session:
previous
Integrations
next
AI21 Labs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/agent_with_wandb_tracing.html |
6fee710ff870-0 | .md
.pdf
Writer
Contents
Installation and Setup
Wrappers
LLM
Writer#
This page covers how to use the Writer ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific Writer wrappers.
Installation and Setup#
Get an Writer api key and set it as an environment variable (WRITER_API_KEY)
Wrappers#
LLM#
There exists an Writer LLM wrapper, which you can access with
from langchain.llms import Writer
previous
Wolfram Alpha
next
Yeager.ai
Contents
Installation and Setup
Wrappers
LLM
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/writer.html |
6ff4d3aff75a-0 | .md
.pdf
Hacker News
Contents
Installation and Setup
Document Loader
Hacker News#
Hacker News (sometimes abbreviated as HN) is a social news
website focusing on computer science and entrepreneurship. It is run by the investment fund and startup
incubator Y Combinator. In general, content that can be submitted is defined as “anything that gratifies
one’s intellectual curiosity.”
Installation and Setup#
There isn’t any special setup for it.
Document Loader#
See a usage example.
from langchain.document_loaders import HNLoader
previous
Gutenberg
next
Hazy Research
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/hacker_news.html |
b4888f77a913-0 | .md
.pdf
Slack
Contents
Installation and Setup
Document Loader
Slack#
Slack is an instant messaging program.
Installation and Setup#
There isn’t any special setup for it.
Document Loader#
See a usage example.
from langchain.document_loaders import SlackDirectoryLoader
previous
scikit-learn
next
spaCy
Contents
Installation and Setup
Document Loader
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/slack.html |
321d0ba04aed-0 | .ipynb
.pdf
Weights & Biases
Weights & Biases#
This notebook goes over how to track your LangChain experiments into one centralized Weights and Biases dashboard. To learn more about prompt engineering and the callback please refer to this Report which explains both alongside the resultant dashboards you can expect to see.
View Report
Note: the WandbCallbackHandler is being deprecated in favour of the WandbTracer . In future please use the WandbTracer as it is more flexible and allows for more granular logging. To know more about the WandbTracer refer to the agent_with_wandb_tracing.ipynb notebook or use the following colab notebook. To know more about Weights & Biases Prompts refer to the following prompts documentation.
!pip install wandb
!pip install pandas
!pip install textstat
!pip install spacy
!python -m spacy download en_core_web_sm
import os
os.environ["WANDB_API_KEY"] = ""
# os.environ["OPENAI_API_KEY"] = ""
# os.environ["SERPAPI_API_KEY"] = ""
from datetime import datetime
from langchain.callbacks import WandbCallbackHandler, StdOutCallbackHandler
from langchain.llms import OpenAI
Callback Handler that logs to Weights and Biases.
Parameters:
job_type (str): The type of job.
project (str): The project to log to.
entity (str): The entity to log to.
tags (list): The tags to log.
group (str): The group to log to.
name (str): The name of the run.
notes (str): The notes to log.
visualize (bool): Whether to visualize the run.
complexity_metrics (bool): Whether to log complexity metrics. | https://python.langchain.com/en/latest/integrations/wandb_tracking.html |
321d0ba04aed-1 | complexity_metrics (bool): Whether to log complexity metrics.
stream_logs (bool): Whether to stream callback actions to W&B
Default values for WandbCallbackHandler(...)
visualize: bool = False,
complexity_metrics: bool = False,
stream_logs: bool = False,
NOTE: For beta workflows we have made the default analysis based on textstat and the visualizations based on spacy
"""Main function.
This function is used to try the callback handler.
Scenarios:
1. OpenAI LLM
2. Chain with multiple SubChains on multiple generations
3. Agent with Tools
"""
session_group = datetime.now().strftime("%m.%d.%Y_%H.%M.%S")
wandb_callback = WandbCallbackHandler(
job_type="inference",
project="langchain_callback_demo",
group=f"minimal_{session_group}",
name="llm",
tags=["test"],
)
callbacks = [StdOutCallbackHandler(), wandb_callback]
llm = OpenAI(temperature=0, callbacks=callbacks)
wandb: Currently logged in as: harrison-chase. Use `wandb login --relogin` to force relogin | https://python.langchain.com/en/latest/integrations/wandb_tracking.html |
321d0ba04aed-2 | Tracking run with wandb version 0.14.0Run data is saved locally in /Users/harrisonchase/workplace/langchain/docs/ecosystem/wandb/run-20230318_150408-e47j1914Syncing run llm to Weights & Biases (docs) View project at https://wandb.ai/harrison-chase/langchain_callback_demo View run at https://wandb.ai/harrison-chase/langchain_callback_demo/runs/e47j1914wandb: WARNING The wandb callback is currently in beta and is subject to change based on updates to `langchain`. Please report any issues to https://github.com/wandb/wandb/issues with the tag `langchain`.
# Defaults for WandbCallbackHandler.flush_tracker(...)
reset: bool = True,
finish: bool = False,
The flush_tracker function is used to log LangChain sessions to Weights & Biases. It takes in the LangChain module or agent, and logs at minimum the prompts and generations alongside the serialized form of the LangChain module to the specified Weights & Biases project. By default we reset the session as opposed to concluding the session outright.
# SCENARIO 1 - LLM
llm_result = llm.generate(["Tell me a joke", "Tell me a poem"] * 3)
wandb_callback.flush_tracker(llm, name="simple_sequential") | https://python.langchain.com/en/latest/integrations/wandb_tracking.html |
321d0ba04aed-3 | wandb_callback.flush_tracker(llm, name="simple_sequential")
Waiting for W&B process to finish... (success). View run llm at: https://wandb.ai/harrison-chase/langchain_callback_demo/runs/e47j1914Synced 5 W&B file(s), 2 media file(s), 5 artifact file(s) and 0 other file(s)Find logs at: ./wandb/run-20230318_150408-e47j1914/logsTracking run with wandb version 0.14.0Run data is saved locally in /Users/harrisonchase/workplace/langchain/docs/ecosystem/wandb/run-20230318_150534-jyxma7huSyncing run simple_sequential to Weights & Biases (docs) View project at https://wandb.ai/harrison-chase/langchain_callback_demo View run at https://wandb.ai/harrison-chase/langchain_callback_demo/runs/jyxma7hu
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
# SCENARIO 2 - Chain
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks)
test_prompts = [
{
"title": "documentary about good video games that push the boundary of game design"
},
{"title": "cocaine bear vs heroin wolf"},
{"title": "the best in class mlops tooling"},
]
synopsis_chain.apply(test_prompts) | https://python.langchain.com/en/latest/integrations/wandb_tracking.html |
321d0ba04aed-4 | ]
synopsis_chain.apply(test_prompts)
wandb_callback.flush_tracker(synopsis_chain, name="agent")
Waiting for W&B process to finish... (success). View run simple_sequential at: https://wandb.ai/harrison-chase/langchain_callback_demo/runs/jyxma7huSynced 4 W&B file(s), 2 media file(s), 6 artifact file(s) and 0 other file(s)Find logs at: ./wandb/run-20230318_150534-jyxma7hu/logsTracking run with wandb version 0.14.0Run data is saved locally in /Users/harrisonchase/workplace/langchain/docs/ecosystem/wandb/run-20230318_150550-wzy59zjqSyncing run agent to Weights & Biases (docs) View project at https://wandb.ai/harrison-chase/langchain_callback_demo View run at https://wandb.ai/harrison-chase/langchain_callback_demo/runs/wzy59zjq
from langchain.agents import initialize_agent, load_tools
from langchain.agents import AgentType
# SCENARIO 3 - Agent with Tools
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
agent.run(
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?",
callbacks=callbacks,
)
wandb_callback.flush_tracker(agent, reset=False, finish=True)
> Entering new AgentExecutor chain...
I need to find out who Leo DiCaprio's girlfriend is and then calculate her age raised to the 0.43 power.
Action: Search | https://python.langchain.com/en/latest/integrations/wandb_tracking.html |
321d0ba04aed-5 | Action: Search
Action Input: "Leo DiCaprio girlfriend"
Observation: DiCaprio had a steady girlfriend in Camila Morrone. He had been with the model turned actress for nearly five years, as they were first said to be dating at the end of 2017. And the now 26-year-old Morrone is no stranger to Hollywood.
Thought: I need to calculate her age raised to the 0.43 power.
Action: Calculator
Action Input: 26^0.43
Observation: Answer: 4.059182145592686
Thought: I now know the final answer.
Final Answer: Leo DiCaprio's girlfriend is Camila Morrone and her current age raised to the 0.43 power is 4.059182145592686.
> Finished chain.
Waiting for W&B process to finish... (success). View run agent at: https://wandb.ai/harrison-chase/langchain_callback_demo/runs/wzy59zjqSynced 5 W&B file(s), 2 media file(s), 7 artifact file(s) and 0 other file(s)Find logs at: ./wandb/run-20230318_150550-wzy59zjq/logs
previous
Vespa
next
Weather
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/wandb_tracking.html |
eca532edaeef-0 | .md
.pdf
ClickHouse
Contents
Installation
Configure clickhouse vector index
Wrappers
VectorStore
ClickHouse#
This page covers how to use ClickHouse Vector Search within LangChain.
ClickHouse is a open source real-time OLAP database with full SQL support and a wide range of functions to assist users in writing analytical queries. Some of these functions and data structures perform distance operations between vectors, enabling ClickHouse to be used as a vector database.
Due to the fully parallelized query pipeline, ClickHouse can process vector search operations very quickly, especially when performing exact matching through a linear scan over all rows, delivering processing speed comparable to dedicated vector databases.
High compression levels, tunable through custom compression codecs, enable very large datasets to be stored and queried. ClickHouse is not memory-bound, allowing multi-TB datasets containing embeddings to be queried.
The capabilities for computing the distance between two vectors are just another SQL function and can be effectively combined with more traditional SQL filtering and aggregation capabilities. This allows vectors to be stored and queried alongside metadata, and even rich text, enabling a broad array of use cases and applications.
Finally, experimental ClickHouse capabilities like Approximate Nearest Neighbour (ANN) indices support faster approximate matching of vectors and provide a promising development aimed to further enhance the vector matching capabilities of ClickHouse.
Installation#
Install clickhouse server by binary or docker image
Install the Python SDK with pip install clickhouse-connect
Configure clickhouse vector index#
Customize ClickhouseSettings object with parameters
```python
from langchain.vectorstores import ClickHouse, ClickhouseSettings
config = ClickhouseSettings(host="<clickhouse-server-host>", port=8123, ...)
index = Clickhouse(embedding_function, config)
index.add_documents(...)
```
Wrappers#
supported functions:
add_texts
add_documents
from_texts
from_documents
similarity_search
asimilarity_search | https://python.langchain.com/en/latest/integrations/clickhouse.html |
eca532edaeef-1 | add_documents
from_texts
from_documents
similarity_search
asimilarity_search
similarity_search_by_vector
asimilarity_search_by_vector
similarity_search_with_relevance_scores
VectorStore#
There exists a wrapper around open source Clickhouse database, allowing you to use it as a vectorstore,
whether for semantic search or similar example retrieval.
To import this vectorstore:
from langchain.vectorstores import Clickhouse
For a more detailed walkthrough of the MyScale wrapper, see this notebook
previous
ClearML
next
Cohere
Contents
Installation
Configure clickhouse vector index
Wrappers
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/clickhouse.html |
a5f3dee36b94-0 | .md
.pdf
Deep Lake
Contents
Why Deep Lake?
More Resources
Installation and Setup
Wrappers
VectorStore
Deep Lake#
This page covers how to use the Deep Lake ecosystem within LangChain.
Why Deep Lake?#
More than just a (multi-modal) vector store. You can later use the dataset to fine-tune your own LLM models.
Not only stores embeddings, but also the original data with automatic version control.
Truly serverless. Doesn’t require another service and can be used with major cloud providers (AWS S3, GCS, etc.)
More Resources#
Ultimate Guide to LangChain & Deep Lake: Build ChatGPT to Answer Questions on Your Financial Data
Twitter the-algorithm codebase analysis with Deep Lake
Here is whitepaper and academic paper for Deep Lake
Here is a set of additional resources available for review: Deep Lake, Getting Started and Tutorials
Installation and Setup#
Install the Python package with pip install deeplake
Wrappers#
VectorStore#
There exists a wrapper around Deep Lake, a data lake for Deep Learning applications, allowing you to use it as a vector store (for now), whether for semantic search or example selection.
To import this vectorstore:
from langchain.vectorstores import DeepLake
For a more detailed walkthrough of the Deep Lake wrapper, see this notebook
previous
DeepInfra
next
Diffbot
Contents
Why Deep Lake?
More Resources
Installation and Setup
Wrappers
VectorStore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 07, 2023. | https://python.langchain.com/en/latest/integrations/deeplake.html |
4a178038f0cf-0 | .ipynb
.pdf
Aim
Aim#
Aim makes it super easy to visualize and debug LangChain executions. Aim tracks inputs and outputs of LLMs and tools, as well as actions of agents.
With Aim, you can easily debug and examine an individual execution:
Additionally, you have the option to compare multiple executions side by side:
Aim is fully open source, learn more about Aim on GitHub.
Let’s move forward and see how to enable and configure Aim callback.
Tracking LangChain Executions with AimIn this notebook we will explore three usage scenarios. To start off, we will install the necessary packages and import certain modules. Subsequently, we will configure two environment variables that can be established either within the Python script or through the terminal.
!pip install aim
!pip install langchain
!pip install openai
!pip install google-search-results
import os
from datetime import datetime
from langchain.llms import OpenAI
from langchain.callbacks import AimCallbackHandler, StdOutCallbackHandler
Our examples use a GPT model as the LLM, and OpenAI offers an API for this purpose. You can obtain the key from the following link: https://platform.openai.com/account/api-keys .
We will use the SerpApi to retrieve search results from Google. To acquire the SerpApi key, please go to https://serpapi.com/manage-api-key .
os.environ["OPENAI_API_KEY"] = "..."
os.environ["SERPAPI_API_KEY"] = "..."
The event methods of AimCallbackHandler accept the LangChain module or agent as input and log at least the prompts and generated results, as well as the serialized version of the LangChain module, to the designated Aim run.
session_group = datetime.now().strftime("%m.%d.%Y_%H.%M.%S")
aim_callback = AimCallbackHandler(
repo=".", | https://python.langchain.com/en/latest/integrations/aim_tracking.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.