prompt
stringlengths 43
25.9k
| completion
stringlengths 7
362
| api
stringlengths 18
90
|
---|---|---|
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-storage')
from langchain_community.document_loaders import GCSDirectoryLoader
loader = GCSDirectoryLoader(project_name="aist", bucket="testing-hwc")
loader.load()
loader = | GCSDirectoryLoader(project_name="aist", bucket="testing-hwc", prefix="fake") | langchain_community.document_loaders.GCSDirectoryLoader |
from getpass import getpass
WRITER_API_KEY = getpass()
import os
os.environ["WRITER_API_KEY"] = WRITER_API_KEY
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Writer
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
model_url = "http://localhost:5000"
from langchain.chains import LLMChain
from langchain.globals import set_debug
from langchain.prompts import PromptTemplate
from langchain_community.llms import TextGen
set_debug(True)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = TextGen(model_url=model_url)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What NFL team won the Super Bowl in the year Justin Bieber was born?"
llm_chain.run(question)
model_url = "ws://localhost:5005"
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.globals import set_debug
from langchain.prompts import PromptTemplate
from langchain_community.llms import TextGen
set_debug(True)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = TextGen(
model_url=model_url, streaming=True, callbacks=[ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
import getpass
import os
os.environ["TAVILY_API_KEY"] = getpass.getpass()
from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever
retriever = TavilySearchAPIRetriever(k=3)
retriever.invoke("what year was breath of the wild released?")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = | ChatPromptTemplate.from_template(
"""Answer the question based only on the context provided.
Context: {context}
Question: {question}"""
) | langchain_core.prompts.ChatPromptTemplate.from_template |
from langchain_community.tools.edenai import (
EdenAiExplicitImageTool,
EdenAiObjectDetectionTool,
EdenAiParsingIDTool,
EdenAiParsingInvoiceTool,
EdenAiSpeechToTextTool,
EdenAiTextModerationTool,
EdenAiTextToSpeechTool,
)
from langchain.agents import AgentType, initialize_agent
from langchain_community.llms import EdenAI
llm = EdenAI(
feature="text", provider="openai", params={"temperature": 0.2, "max_tokens": 250}
)
tools = [
EdenAiTextModerationTool(providers=["openai"], language="en"),
EdenAiObjectDetectionTool(providers=["google", "api4ai"]),
EdenAiTextToSpeechTool(providers=["amazon"], language="en", voice="MALE"),
EdenAiExplicitImageTool(providers=["amazon", "google"]),
| EdenAiSpeechToTextTool(providers=["amazon"]) | langchain_community.tools.edenai.EdenAiSpeechToTextTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sodapy')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet geopandas')
import ast
import geopandas as gpd
import pandas as pd
from langchain_community.document_loaders import OpenCityDataLoader
dataset = "tmnf-yvry" # San Francisco crime data
loader = | OpenCityDataLoader(city_id="data.sfgov.org", dataset_id=dataset, limit=5000) | langchain_community.document_loaders.OpenCityDataLoader |
get_ipython().run_cell_magic('writefile', 'wechat_chats.txt', '女朋友 2023/09/16 2:51 PM\n天气有点凉\n\n男朋友 2023/09/16 2:51 PM\n珍簟凉风著,瑶琴寄恨生。嵇君懒书札,底物慰秋情。\n\n女朋友 2023/09/16 3:06 PM\n忙什么呢\n\n男朋友 2023/09/16 3:06 PM\n今天只干成了一件像样的事\n那就是想你\n\n女朋友 2023/09/16 3:06 PM\n[动画表情]\n')
import logging
import re
from typing import Iterator, List
from langchain_community.chat_loaders import base as chat_loaders
from langchain_core.messages import BaseMessage, HumanMessage
logger = logging.getLogger()
class WeChatChatLoader(chat_loaders.BaseChatLoader):
def __init__(self, path: str):
"""
Initialize the Discord chat loader.
Args:
path: Path to the exported Discord chat text file.
"""
self.path = path
self._message_line_regex = re.compile(
r"(?P<sender>.+?) (?P<timestamp>\d{4}/\d{2}/\d{2} \d{1,2}:\d{2} (?:AM|PM))", # noqa
)
def _append_message_to_results(
self,
results: List,
current_sender: str,
current_timestamp: str,
current_content: List[str],
):
content = "\n".join(current_content).strip()
if not re.match(r"\[.*\]", content):
results.append(
HumanMessage(
content=content,
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
return results
def _load_single_chat_session_from_txt(
self, file_path: str
) -> chat_loaders.ChatSession:
"""
Load a single chat session from a text file.
Args:
file_path: Path to the text file containing the chat messages.
Returns:
A `ChatSession` object containing the loaded chat messages.
"""
with open(file_path, "r", encoding="utf-8") as file:
lines = file.readlines()
results: List[BaseMessage] = []
current_sender = None
current_timestamp = None
current_content = []
for line in lines:
if re.match(self._message_line_regex, line):
if current_sender and current_content:
results = self._append_message_to_results(
results, current_sender, current_timestamp, current_content
)
current_sender, current_timestamp = re.match(
self._message_line_regex, line
).groups()
current_content = []
else:
current_content.append(line.strip())
if current_sender and current_content:
results = self._append_message_to_results(
results, current_sender, current_timestamp, current_content
)
return chat_loaders.ChatSession(messages=results)
def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:
"""
Lazy load the messages from the chat file and yield them in the required format.
Yields:
A `ChatSession` object containing the loaded chat messages.
"""
yield self._load_single_chat_session_from_txt(self.path)
loader = WeChatChatLoader(
path="./wechat_chats.txt",
)
from typing import List
from langchain_community.chat_loaders.base import ChatSession
from langchain_community.chat_loaders.utils import (
map_ai_messages,
merge_chat_runs,
)
raw_messages = loader.lazy_load()
merged_messages = merge_chat_runs(raw_messages)
messages: List[ChatSession] = list( | map_ai_messages(merged_messages, sender="男朋友") | langchain_community.chat_loaders.utils.map_ai_messages |
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet')
import os
from langchain_community.document_loaders import DocugamiLoader
DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY")
docset_id = "26xpy3aes7xp"
document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"]
loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
chunks = loader.load()
len(chunks)
loader.min_text_length = 64
loader.include_xml_tags = True
chunks = loader.load()
for chunk in chunks[:5]:
print(chunk)
get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib')
loader = DocugamiLoader(docset_id="zo954yqy53wp")
chunks = loader.load()
for chunk in chunks:
stripped_metadata = chunk.metadata.copy()
for key in chunk.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
chunk.metadata = stripped_metadata
print(len(chunks))
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
embedding = OpenAIEmbeddings()
vectordb = | Chroma.from_documents(documents=chunks, embedding=embedding) | langchain_community.vectorstores.chroma.Chroma.from_documents |
import os
os.environ["SERPER_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
from typing import Any, List
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_community.utilities import GoogleSerperAPIWrapper
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_openai import ChatOpenAI, OpenAI
class SerperSearchRetriever(BaseRetriever):
search: GoogleSerperAPIWrapper = None
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
return [Document(page_content=self.search.run(query))]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
raise NotImplementedError()
retriever = SerperSearchRetriever(search=GoogleSerperAPIWrapper())
from langchain.globals import set_verbose
set_verbose(True)
from langchain.chains import FlareChain
flare = FlareChain.from_llm(
ChatOpenAI(temperature=0),
retriever=retriever,
max_generation_len=164,
min_prob=0.3,
)
query = "explain in great detail the difference between the langchain framework and baby agi"
flare.run(query)
llm = | OpenAI() | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler
class MyModCallback(BaseModerationCallbackHandler):
async def on_after_pii(self, output_beacon, unique_id):
import json
moderation_type = output_beacon["moderation_type"]
chain_id = output_beacon["moderation_chain_id"]
with open(f"output-{moderation_type}-{chain_id}.json", "w") as file:
data = {"beacon_data": output_beacon, "unique_id": unique_id}
json.dump(data, file)
"""
async def on_after_toxicity(self, output_beacon, unique_id):
pass
async def on_after_prompt_safety(self, output_beacon, unique_id):
pass
"""
my_callback = MyModCallback()
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config])
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
unique_id="[email protected]", # A unique ID
moderation_callback=my_callback, # BaseModerationCallbackHandler
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub')
import os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>"
repo_id = "google/flan-t5-xxl"
from langchain.prompts import PromptTemplate
from langchain_community.llms import HuggingFaceHub
template = """{question}"""
prompt = PromptTemplate.from_template(template)
llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256}
)
pii_config = ModerationPiiConfig(
labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X"
)
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = | ModerationPromptSafetyConfig(threshold=0.8) | langchain_experimental.comprehend_moderation.ModerationPromptSafetyConfig |
from langchain.indexes import SQLRecordManager, index
from langchain_core.documents import Document
from langchain_elasticsearch import ElasticsearchStore
from langchain_openai import OpenAIEmbeddings
collection_name = "test_index"
embedding = OpenAIEmbeddings()
vectorstore = ElasticsearchStore(
es_url="http://localhost:9200", index_name="test_index", embedding=embedding
)
namespace = f"elasticsearch/{collection_name}"
record_manager = SQLRecordManager(
namespace, db_url="sqlite:///record_manager_cache.sql"
)
record_manager.create_schema()
doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"})
doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"})
def _clear():
"""Hacky helper method to clear content. See the `full` mode section to to understand why it works."""
index([], record_manager, vectorstore, cleanup="full", source_id_key="source")
_clear()
index(
[doc1, doc1, doc1, doc1, doc1],
record_manager,
vectorstore,
cleanup=None,
source_id_key="source",
)
_clear()
index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")
index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")
_clear()
index(
[doc1, doc2],
record_manager,
vectorstore,
cleanup="incremental",
source_id_key="source",
)
index(
[doc1, doc2],
record_manager,
vectorstore,
cleanup="incremental",
source_id_key="source",
)
index([], record_manager, vectorstore, cleanup="incremental", source_id_key="source")
changed_doc_2 = Document(page_content="puppy", metadata={"source": "doggy.txt"})
index(
[changed_doc_2],
record_manager,
vectorstore,
cleanup="incremental",
source_id_key="source",
)
_clear()
all_docs = [doc1, doc2]
index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source")
del all_docs[0]
all_docs
index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source")
from langchain_text_splitters import CharacterTextSplitter
doc1 = Document(
page_content="kitty kitty kitty kitty kitty", metadata={"source": "kitty.txt"}
)
doc2 = Document(page_content="doggy doggy the doggy", metadata={"source": "doggy.txt"})
new_docs = CharacterTextSplitter(
separator="t", keep_separator=True, chunk_size=12, chunk_overlap=2
).split_documents([doc1, doc2])
new_docs
_clear()
index(
new_docs,
record_manager,
vectorstore,
cleanup="incremental",
source_id_key="source",
)
changed_doggy_docs = [
| Document(page_content="woof woof", metadata={"source": "doggy.txt"}) | langchain_core.documents.Document |
get_ipython().system('pip install -U openai langchain langchain-experimental')
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=256)
chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "What is this image showing"},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/img/langchain_stack.png",
"detail": "auto",
},
},
]
)
]
)
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
interpreter_assistant = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview",
)
output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"})
output
get_ipython().system('pip install e2b duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool
tools = [E2BDataAnalysisTool(api_key="..."), DuckDuckGoSearchRun()]
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions. You can also search the internet.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"content": "What's the weather in SF today divided by 2.7"})
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
from langchain_core.agents import AgentFinish
def execute_agent(agent, tools, input):
tool_map = {tool.name: tool for tool in tools}
response = agent.invoke(input)
while not isinstance(response, AgentFinish):
tool_outputs = []
for action in response:
tool_output = tool_map[action.tool].invoke(action.tool_input)
print(action.tool, action.tool_input, tool_output, end="\n\n")
tool_outputs.append(
{"output": tool_output, "tool_call_id": action.tool_call_id}
)
response = agent.invoke(
{
"tool_outputs": tool_outputs,
"run_id": action.run_id,
"thread_id": action.thread_id,
}
)
return response
response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"})
print(response.return_values["output"])
next_response = execute_agent(
agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id}
)
print(next_response.return_values["output"])
chat = ChatOpenAI(model="gpt-3.5-turbo-1106").bind(
response_format={"type": "json_object"}
)
output = chat.invoke(
[
SystemMessage(
content="Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list."
),
HumanMessage(
content="Google was founded in the USA, while Deepmind was founded in the UK"
),
]
)
print(output.content)
import json
json.loads(output.content)
chat = ChatOpenAI(model="gpt-3.5-turbo-1106")
output = chat.generate(
[
[
SystemMessage(
content="Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list."
),
HumanMessage(
content="Google was founded in the USA, while Deepmind was founded in the UK"
),
]
]
)
print(output.llm_output)
from typing import Literal
from langchain.output_parsers.openai_tools import PydanticToolsParser
from langchain.utils.openai_functions import convert_pydantic_to_openai_tool
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
class GetCurrentWeather(BaseModel):
"""Get the current weather in a location."""
location: str = Field(description="The city and state, e.g. San Francisco, CA")
unit: Literal["celsius", "fahrenheit"] = Field(
default="fahrenheit", description="The temperature unit, default to fahrenheit"
)
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful assistant"), ("user", "{input}")]
)
model = ChatOpenAI(model="gpt-3.5-turbo-1106").bind(
tools=[convert_pydantic_to_openai_tool(GetCurrentWeather)]
)
chain = prompt | model | | PydanticToolsParser(tools=[GetCurrentWeather]) | langchain.output_parsers.openai_tools.PydanticToolsParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet comet_ml langchain langchain-openai google-search-results spacy textstat pandas')
get_ipython().system('{sys.executable} -m spacy download en_core_web_sm')
import comet_ml
comet_ml.init(project_name="comet-example-langchain")
import os
os.environ["OPENAI_API_KEY"] = "..."
os.environ["SERPAPI_API_KEY"] = "..."
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
comet_callback = CometCallbackHandler(
project_name="comet-example-langchain",
complexity_metrics=True,
stream_logs=True,
tags=["llm"],
visualizations=["dep"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks, verbose=True)
llm_result = llm.generate(["Tell me a joke", "Tell me a poem", "Tell me a fact"] * 3)
print("LLM result", llm_result)
comet_callback.flush_tracker(llm, finish=True)
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
comet_callback = CometCallbackHandler(
complexity_metrics=True,
project_name="comet-example-langchain",
stream_logs=True,
tags=["synopsis-chain"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = | OpenAI(temperature=0.9, callbacks=callbacks) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
runnable = RunnableParallel(
passed=RunnablePassthrough(),
extra=RunnablePassthrough.assign(mult=lambda x: x["num"] * 3),
modified=lambda x: x["num"] + 1,
)
runnable.invoke({"num": 1})
from langchain_community.vectorstores import FAISS
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
vectorstore = FAISS.from_texts(
["harrison worked at kensho"], embedding=OpenAIEmbeddings()
)
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-search-documents')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-identity')
import os
from langchain_community.vectorstores.azuresearch import AzureSearch
from langchain_openai import AzureOpenAIEmbeddings, OpenAIEmbeddings
openai_api_key: str = "PLACEHOLDER FOR YOUR API KEY"
openai_api_version: str = "2023-05-15"
model: str = "text-embedding-ada-002"
azure_endpoint: str = "PLACEHOLDER FOR YOUR AZURE OPENAI ENDPOINT"
azure_openai_api_key: str = "PLACEHOLDER FOR YOUR AZURE OPENAI KEY"
azure_openai_api_version: str = "2023-05-15"
azure_deployment: str = "text-embedding-ada-002"
vector_store_address: str = "YOUR_AZURE_SEARCH_ENDPOINT"
vector_store_password: str = "YOUR_AZURE_SEARCH_ADMIN_KEY"
embeddings: OpenAIEmbeddings = OpenAIEmbeddings(
openai_api_key=openai_api_key, openai_api_version=openai_api_version, model=model
)
embeddings: AzureOpenAIEmbeddings = AzureOpenAIEmbeddings(
azure_deployment=azure_deployment,
openai_api_version=azure_openai_api_version,
azure_endpoint=azure_endpoint,
api_key=azure_openai_api_key,
)
index_name: str = "langchain-vector-demo"
vector_store: AzureSearch = AzureSearch(
azure_search_endpoint=vector_store_address,
azure_search_key=vector_store_password,
index_name=index_name,
embedding_function=embeddings.embed_query,
)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("../../modules/state_of_the_union.txt", encoding="utf-8") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install -qU langchain-text-splitters')
import json
import requests
json_data = requests.get("https://api.smith.langchain.com/openapi.json").json()
from langchain_text_splitters import RecursiveJsonSplitter
splitter = | RecursiveJsonSplitter(max_chunk_size=300) | langchain_text_splitters.RecursiveJsonSplitter |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/cpi/"
from langchain_community.document_loaders import PyPDFLoader
loader = | PyPDFLoader(path + "cpi.pdf") | langchain_community.document_loaders.PyPDFLoader |
from langchain.indexes import SQLRecordManager, index
from langchain_core.documents import Document
from langchain_elasticsearch import ElasticsearchStore
from langchain_openai import OpenAIEmbeddings
collection_name = "test_index"
embedding = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
import dspy
colbertv2 = dspy.ColBERTv2(url="http://20.102.90.50:2017/wiki17_abstracts")
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
from langchain_openai import OpenAI
set_llm_cache(SQLiteCache(database_path="cache.db"))
llm = | OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai deepeval')
get_ipython().system('deepeval login')
from deepeval.metrics.answer_relevancy import AnswerRelevancy
answer_relevancy_metric = AnswerRelevancy(minimum_score=0.5)
from langchain.callbacks.confident_callback import DeepEvalCallbackHandler
deepeval_callback = DeepEvalCallbackHandler(
implementation_name="langchainQuickstart", metrics=[answer_relevancy_metric]
)
from langchain_openai import OpenAI
llm = OpenAI(
temperature=0,
callbacks=[deepeval_callback],
verbose=True,
openai_api_key="<YOUR_API_KEY>",
)
output = llm.generate(
[
"What is the best evaluation tool out there? (no bias at all)",
]
)
answer_relevancy_metric.is_successful()
import requests
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
text_file_url = "https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt"
openai_api_key = "sk-XXX"
with open("state_of_the_union.txt", "w") as f:
response = requests.get(text_file_url)
f.write(response.text)
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
docsearch = | Chroma.from_documents(texts, embeddings) | langchain_community.vectorstores.Chroma.from_documents |
from langchain_community.document_loaders import WebBaseLoader
loader_web = WebBaseLoader(
"https://github.com/basecamp/handbook/blob/master/37signals-is-you.md"
)
from langchain_community.document_loaders import PyPDFLoader
loader_pdf = PyPDFLoader("../MachineLearning-Lecture01.pdf")
from langchain_community.document_loaders.merge import MergedDataLoader
loader_all = | MergedDataLoader(loaders=[loader_web, loader_pdf]) | langchain_community.document_loaders.merge.MergedDataLoader |
from getpass import getpass
KAY_API_KEY = getpass()
import os
from langchain.retrievers import KayAiRetriever
os.environ["KAY_API_KEY"] = KAY_API_KEY
retriever = KayAiRetriever.create(
dataset_id="company", data_types=["10-K", "10-Q", "PressRelease"], num_contexts=3
)
docs = retriever.get_relevant_documents(
"What were the biggest strategy changes and partnerships made by Roku in 2023??"
)
docs
OPENAI_API_KEY = getpass()
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain.chains import ConversationalRetrievalChain
from langchain_openai import ChatOpenAI
model = | ChatOpenAI(model_name="gpt-3.5-turbo") | langchain_openai.ChatOpenAI |
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_openai import ChatOpenAI
examples = [
{
"input": "Could the members of The Police perform lawful arrests?",
"output": "what can the members of The Police do?",
},
{
"input": "Jan Sindel’s was born in what country?",
"output": "what is Jan Sindel’s personal history?",
},
]
example_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=examples,
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
),
few_shot_prompt,
("user", "{question}"),
]
)
question_gen = prompt | ChatOpenAI(temperature=0) | StrOutputParser()
question = "was chatgpt around while trump was president?"
question_gen.invoke({"question": question})
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
search = DuckDuckGoSearchAPIWrapper(max_results=4)
def retriever(query):
return search.run(query)
retriever(question)
retriever(question_gen.invoke({"question": question}))
from langchain import hub
response_prompt = hub.pull("langchain-ai/stepback-answer")
chain = (
{
"normal_context": RunnableLambda(lambda x: x["question"]) | retriever,
"step_back_context": question_gen | retriever,
"question": lambda x: x["question"],
}
| response_prompt
| ChatOpenAI(temperature=0)
| StrOutputParser()
)
chain.invoke({"question": question})
response_prompt_template = """You are an expert of world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.
{normal_context}
Original Question: {question}
Answer:"""
response_prompt = | ChatPromptTemplate.from_template(response_prompt_template) | langchain_core.prompts.ChatPromptTemplate.from_template |
from langchain_core.messages import (
AIMessage,
BaseMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.messages import (
AIMessageChunk,
FunctionMessageChunk,
HumanMessageChunk,
SystemMessageChunk,
ToolMessageChunk,
)
AIMessageChunk(content="Hello") + AIMessageChunk(content=" World!")
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseChatModel, SimpleChatModel
from langchain_core.messages import AIMessageChunk, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import run_in_executor
class CustomChatModelAdvanced(BaseChatModel):
"""A custom chat model that echoes the first `n` characters of the input.
When contributing an implementation to LangChain, carefully document
the model including the initialization parameters, include
an example of how to initialize the model and include any relevant
links to the underlying models documentation or API.
Example:
.. code-block:: python
model = CustomChatModel(n=2)
result = model.invoke([HumanMessage(content="hello")])
result = model.batch([[HumanMessage(content="hello")],
[HumanMessage(content="world")]])
"""
n: int
"""The number of characters from the last message of the prompt to be echoed."""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Override the _generate method to implement the chat model logic.
This can be a call to an API, a call to a local model, or any other
implementation that generates a response to the input prompt.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
"""
last_message = messages[-1]
tokens = last_message.content[: self.n]
message = AIMessage(content=tokens)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Stream the output of the model.
This method should be implemented if the model can generate output
in a streaming fashion. If the model does not support streaming,
do not implement it. In that case streaming requests will be automatically
handled by the _generate method.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
"""
last_message = messages[-1]
tokens = last_message.content[: self.n]
for token in tokens:
chunk = ChatGenerationChunk(message=AIMessageChunk(content=token))
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
"""An async variant of astream.
If not provided, the default behavior is to delegate to the _generate method.
The implementation below instead will delegate to `_stream` and will
kick it off in a separate thread.
If you're able to natively support async, then by all means do so!
"""
result = await run_in_executor(
None,
self._stream,
messages,
stop=stop,
run_manager=run_manager.get_sync() if run_manager else None,
**kwargs,
)
for chunk in result:
yield chunk
@property
def _llm_type(self) -> str:
"""Get the type of language model used by this chat model."""
return "echoing-chat-model-advanced"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Return a dictionary of identifying parameters."""
return {"n": self.n}
model = CustomChatModelAdvanced(n=3)
model.invoke(
[
| HumanMessage(content="hello!") | langchain_core.messages.HumanMessage |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]" pillow pydantic lxml pillow matplotlib chromadb tiktoken')
from langchain_text_splitters import CharacterTextSplitter
from unstructured.partition.pdf import partition_pdf
def extract_pdf_elements(path, fname):
"""
Extract images, tables, and chunk text from a PDF file.
path: File path, which is used to dump images (.jpg)
fname: File name
"""
return partition_pdf(
filename=path + fname,
extract_images_in_pdf=False,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
def categorize_elements(raw_pdf_elements):
"""
Categorize extracted elements from a PDF into tables and texts.
raw_pdf_elements: List of unstructured.documents.elements
"""
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
return texts, tables
fpath = "/Users/rlm/Desktop/cj/"
fname = "cj.pdf"
raw_pdf_elements = extract_pdf_elements(fpath, fname)
texts, tables = categorize_elements(raw_pdf_elements)
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=4000, chunk_overlap=0
)
joined_texts = " ".join(texts)
texts_4k_token = text_splitter.split_text(joined_texts)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
def generate_text_summaries(texts, tables, summarize_texts=False):
"""
Summarize text elements
texts: List of str
tables: List of str
summarize_texts: Bool to summarize texts
"""
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
text_summaries = []
table_summaries = []
if texts and summarize_texts:
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
elif texts:
text_summaries = texts
if tables:
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
return text_summaries, table_summaries
text_summaries, table_summaries = generate_text_summaries(
texts_4k_token, tables, summarize_texts=True
)
import base64
import os
from langchain_core.messages import HumanMessage
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Make image summary"""
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024)
msg = chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
)
]
)
return msg.content
def generate_img_summaries(path):
"""
Generate summaries and base64 encoded strings for images
path: Path to list of .jpg files extracted by Unstructured
"""
img_base64_list = []
image_summaries = []
prompt = """You are an assistant tasked with summarizing images for retrieval. \
These summaries will be embedded and used to retrieve the raw image. \
Give a concise summary of the image that is well optimized for retrieval."""
for img_file in sorted(os.listdir(path)):
if img_file.endswith(".jpg"):
img_path = os.path.join(path, img_file)
base64_image = encode_image(img_path)
img_base64_list.append(base64_image)
image_summaries.append(image_summarize(base64_image, prompt))
return img_base64_list, image_summaries
img_base64_list, image_summaries = generate_img_summaries(fpath)
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
def create_multi_vector_retriever(
vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images
):
"""
Create retriever that indexes summaries, but returns raw images or texts
"""
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(doc_summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
if text_summaries:
add_documents(retriever, text_summaries, texts)
if table_summaries:
add_documents(retriever, table_summaries, tables)
if image_summaries:
add_documents(retriever, image_summaries, images)
return retriever
vectorstore = Chroma(
collection_name="mm_rag_cj_blog", embedding_function=OpenAIEmbeddings()
)
retriever_multi_vector_img = create_multi_vector_retriever(
vectorstore,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
img_base64_list,
)
import io
import re
from IPython.display import HTML, display
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from PIL import Image
def plt_img_base64(img_base64):
"""Disply base64 encoded string as image"""
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />'
display(HTML(image_html))
def looks_like_base64(sb):
"""Check if the string looks like base64"""
return re.match("^[A-Za-z0-9+/]+[=]{0,2}$", sb) is not None
def is_image_data(b64data):
"""
Check if the base64 data is an image by looking at the start of the data
"""
image_signatures = {
b"\xFF\xD8\xFF": "jpg",
b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": "png",
b"\x47\x49\x46\x38": "gif",
b"\x52\x49\x46\x46": "webp",
}
try:
header = base64.b64decode(b64data)[:8] # Decode and get the first 8 bytes
for sig, format in image_signatures.items():
if header.startswith(sig):
return True
return False
except Exception:
return False
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def split_image_text_types(docs):
"""
Split base64-encoded images and texts
"""
b64_images = []
texts = []
for doc in docs:
if isinstance(doc, Document):
doc = doc.page_content
if looks_like_base64(doc) and is_image_data(doc):
doc = resize_base64_image(doc, size=(1300, 600))
b64_images.append(doc)
else:
texts.append(doc)
return {"images": b64_images, "texts": texts}
def img_prompt_func(data_dict):
"""
Join the context into a single string
"""
formatted_texts = "\n".join(data_dict["context"]["texts"])
messages = []
if data_dict["context"]["images"]:
for image in data_dict["context"]["images"]:
image_message = {
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image}"},
}
messages.append(image_message)
text_message = {
"type": "text",
"text": (
"You are financial analyst tasking with providing investment advice.\n"
"You will be given a mixed of text, tables, and image(s) usually of charts or graphs.\n"
"Use this information to provide investment advice related to the user question. \n"
f"User-provided question: {data_dict['question']}\n\n"
"Text and / or tables:\n"
f"{formatted_texts}"
),
}
messages.append(text_message)
return [ | HumanMessage(content=messages) | langchain_core.messages.HumanMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet ctranslate2')
get_ipython().system('ct2-transformers-converter --model meta-llama/Llama-2-7b-hf --quantization bfloat16 --output_dir ./llama-2-7b-ct2 --force')
from langchain_community.llms import CTranslate2
llm = CTranslate2(
model_path="./llama-2-7b-ct2",
tokenizer_name="meta-llama/Llama-2-7b-hf",
device="cuda",
device_index=[0, 1],
compute_type="bfloat16",
)
print(
llm(
"He presented me with plausible evidence for the existence of unicorns: ",
max_length=256,
sampling_topk=50,
sampling_temperature=0.2,
repetition_penalty=2,
cache_static_prompt=False,
)
)
print(
llm.generate(
["The list of top romantic songs:\n1.", "The list of top rap songs:\n1."],
max_length=128,
)
)
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
template = """{question}
Let's think step by step. """
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet redis redisvl langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redis_url = "redis://localhost:6379"
redis_url = "redis://:secret@redis:7379/2"
redis_url = "redis://joe:secret@redis/0"
redis_url = "redis+sentinel://localhost:26379"
redis_url = "redis+sentinel://joe:secret@redis"
redis_url = "redis+sentinel://redis:26379/zone-1/2"
redis_url = "rediss://localhost:6379"
redis_url = "rediss+sentinel://localhost"
metadata = [
{
"user": "john",
"age": 18,
"job": "engineer",
"credit_score": "high",
},
{
"user": "derrick",
"age": 45,
"job": "doctor",
"credit_score": "low",
},
{
"user": "nancy",
"age": 94,
"job": "doctor",
"credit_score": "high",
},
{
"user": "tyler",
"age": 100,
"job": "engineer",
"credit_score": "high",
},
{
"user": "joe",
"age": 35,
"job": "dentist",
"credit_score": "medium",
},
]
texts = ["foo", "foo", "foo", "bar", "bar"]
from langchain_community.vectorstores.redis import Redis
rds = Redis.from_texts(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users",
)
rds.index_name
get_ipython().system('rvl index listall')
get_ipython().system('rvl index info -i users')
get_ipython().system('rvl stats -i users')
results = rds.similarity_search("foo")
print(results[0].page_content)
results = rds.similarity_search("foo", k=3)
meta = results[1].metadata
print("Key of the document in Redis: ", meta.pop("id"))
print("Metadata of the document: ", meta)
results = rds.similarity_search_with_score("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_score("foo", k=5, distance_threshold=0.1)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Similiarity: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5, score_threshold=0.9)
for result in results:
print(f"Content: {result[0].page_content} --- Similarity: {result[1]}")
new_document = ["baz"]
new_metadata = [{"user": "sam", "age": 50, "job": "janitor", "credit_score": "high"}]
rds.add_texts(new_document, new_metadata)
results = rds.similarity_search("baz", k=3)
print(results[0].metadata)
results = rds.max_marginal_relevance_search("foo")
results = rds.max_marginal_relevance_search("foo", lambda_mult=0.1)
rds.write_schema("redis_schema.yaml")
new_rds = Redis.from_existing_index(
embeddings,
index_name="users",
redis_url="redis://localhost:6379",
schema="redis_schema.yaml",
)
results = new_rds.similarity_search("foo", k=3)
print(results[0].metadata)
new_rds.schema == rds.schema
index_schema = {
"tag": [{"name": "credit_score"}],
"text": [{"name": "user"}, {"name": "job"}],
"numeric": [{"name": "age"}],
}
rds, keys = Redis.from_texts_return_keys(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users_modified",
index_schema=index_schema, # pass in the new index schema
)
from langchain_community.vectorstores.redis import RedisText
is_engineer = | RedisText("job") | langchain_community.vectorstores.redis.RedisText |
from langchain.agents import Tool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
from langchain_community.utilities import SerpAPIWrapper
search = | SerpAPIWrapper() | langchain_community.utilities.SerpAPIWrapper |
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet')
import os
from langchain_community.document_loaders import DocugamiLoader
DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY")
docset_id = "26xpy3aes7xp"
document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"]
loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
chunks = loader.load()
len(chunks)
loader.min_text_length = 64
loader.include_xml_tags = True
chunks = loader.load()
for chunk in chunks[:5]:
print(chunk)
get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib')
loader = DocugamiLoader(docset_id="zo954yqy53wp")
chunks = loader.load()
for chunk in chunks:
stripped_metadata = chunk.metadata.copy()
for key in chunk.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
chunk.metadata = stripped_metadata
print(len(chunks))
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = vectordb.as_retriever()
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True
)
qa_chain("What can tenants do with signage on their properties?")
chain_response = qa_chain("What is rentable area for the property owned by DHA Group?")
chain_response["result"] # correct answer should be 13,500 sq ft
chain_response["source_documents"]
loader = DocugamiLoader(docset_id="zo954yqy53wp")
loader.include_xml_tags = (
True # for additional semantics from the Docugami knowledge graph
)
chunks = loader.load()
print(chunks[0].metadata)
get_ipython().system('poetry run pip install --upgrade lark --quiet')
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_community.vectorstores.chroma import Chroma
EXCLUDE_KEYS = ["id", "xpath", "structure"]
metadata_field_info = [
AttributeInfo(
name=key,
description=f"The {key} for this chunk",
type="string",
)
for key in chunks[0].metadata
if key.lower() not in EXCLUDE_KEYS
]
document_content_description = "Contents of this chunk"
llm = OpenAI(temperature=0)
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = SelfQueryRetriever.from_llm(
llm, vectordb, document_content_description, metadata_field_info, verbose=True
)
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
verbose=True,
)
qa_chain(
"What is rentable area for the property owned by DHA Group?"
) # correct answer should be 13,500 sq ft
from typing import Dict, List
from langchain_community.document_loaders import DocugamiLoader
from langchain_core.documents import Document
loader = | DocugamiLoader(docset_id="zo954yqy53wp") | langchain_community.document_loaders.DocugamiLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pillow open_clip_torch torch matplotlib')
import open_clip
open_clip.list_pretrained()
import numpy as np
from langchain_experimental.open_clip import OpenCLIPEmbeddings
from PIL import Image
uri_dog = "/Users/rlm/Desktop/test/dog.jpg"
uri_house = "/Users/rlm/Desktop/test/house.jpg"
clip_embd = OpenCLIPEmbeddings(model_name="ViT-g-14", checkpoint="laion2b_s34b_b88k")
img_feat_dog = clip_embd.embed_image([uri_dog])
img_feat_house = clip_embd.embed_image([uri_house])
text_feat_dog = clip_embd.embed_documents(["dog"])
text_feat_house = clip_embd.embed_documents(["house"])
import os
from collections import OrderedDict
import IPython.display
import matplotlib.pyplot as plt
import skimage
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
descriptions = {
"page": "a page of text about segmentation",
"chelsea": "a facial photo of a tabby cat",
"astronaut": "a portrait of an astronaut with the American flag",
"rocket": "a rocket standing on a launchpad",
"motorcycle_right": "a red motorcycle standing in a garage",
"camera": "a person looking at a camera on a tripod",
"horse": "a black-and-white silhouette of a horse",
"coffee": "a cup of coffee on a saucer",
}
original_images = []
images = []
image_uris = [] # List to store image URIs
texts = []
plt.figure(figsize=(16, 5))
for filename in [
filename
for filename in os.listdir(skimage.data_dir)
if filename.endswith(".png") or filename.endswith(".jpg")
]:
name = os.path.splitext(filename)[0]
if name not in descriptions:
continue
image_path = os.path.join(skimage.data_dir, filename)
image = Image.open(image_path).convert("RGB")
plt.subplot(2, 4, len(images) + 1)
plt.imshow(image)
plt.title(f"{filename}\n{descriptions[name]}")
plt.xticks([])
plt.yticks([])
original_images.append(image)
images.append(image) # Origional code does preprocessing here
texts.append(descriptions[name])
image_uris.append(image_path) # Add the image URI to the list
plt.tight_layout()
clip_embd = | OpenCLIPEmbeddings() | langchain_experimental.open_clip.OpenCLIPEmbeddings |
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
def initialize_chain(instructions, memory=None):
if memory is None:
memory = ConversationBufferWindowMemory()
memory.ai_prefix = "Assistant"
template = f"""
Instructions: {instructions}
{{{memory.memory_key}}}
Human: {{human_input}}
Assistant:"""
prompt = PromptTemplate(
input_variables=["history", "human_input"], template=template
)
chain = LLMChain(
llm= | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain_openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("Input your OpenAI API key:")
tidb_connection_string_template = "mysql+pymysql://<USER>:<PASSWORD>@<HOST>:4000/<DB>?ssl_ca=/etc/ssl/cert.pem&ssl_verify_cert=true&ssl_verify_identity=true"
tidb_password = getpass.getpass("Input your TiDB password:")
tidb_connection_string = tidb_connection_string_template.replace(
"<PASSWORD>", tidb_password
)
from datetime import datetime
from langchain_community.chat_message_histories import TiDBChatMessageHistory
history = TiDBChatMessageHistory(
connection_string=tidb_connection_string,
session_id="code_gen",
earliest_time=datetime.utcnow(), # Optional to set earliest_time to load messages after this time point.
)
history.add_user_message("How's our feature going?")
history.add_ai_message(
"It's going well. We are working on testing now. It will be released in Feb."
)
history.messages
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're an assistant who's good at coding. You're helping a startup build",
),
MessagesPlaceholder(variable_name="history"),
("human", "{question}"),
]
)
chain = prompt | | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.chains import OpenAIModerationChain
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import OpenAI
moderate = | OpenAIModerationChain() | langchain.chains.OpenAIModerationChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet timescale-vector')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import os
from dotenv import find_dotenv, load_dotenv
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
from typing import Tuple
from datetime import datetime, timedelta
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders.json_loader import JSONLoader
from langchain_community.vectorstores.timescalevector import TimescaleVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../extras/modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
SERVICE_URL = os.environ["TIMESCALE_SERVICE_URL"]
COLLECTION_NAME = "state_of_the_union_test"
db = TimescaleVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
)
query = "What did the president say about Ketanji Brown Jackson"
docs_with_score = db.similarity_search_with_score(query)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
print("-" * 80)
retriever = db.as_retriever()
print(retriever)
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k")
from langchain.chains import RetrievalQA
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = "What did the president say about Ketanji Brown Jackson?"
response = qa_stuff.run(query)
print(response)
from timescale_vector import client
def create_uuid(date_string: str):
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
datetime_obj = datetime.strptime(date_string, time_format)
uuid = client.uuid_from_time(datetime_obj)
return str(uuid)
def split_name(input_string: str) -> Tuple[str, str]:
if input_string is None:
return None, None
start = input_string.find("<")
end = input_string.find(">")
name = input_string[:start].strip()
email = input_string[start + 1 : end].strip()
return name, email
def create_date(input_string: str) -> datetime:
if input_string is None:
return None
month_dict = {
"Jan": "01",
"Feb": "02",
"Mar": "03",
"Apr": "04",
"May": "05",
"Jun": "06",
"Jul": "07",
"Aug": "08",
"Sep": "09",
"Oct": "10",
"Nov": "11",
"Dec": "12",
}
components = input_string.split()
day = components[2]
month = month_dict[components[1]]
year = components[4]
time = components[3]
timezone_offset_minutes = int(components[5]) # Convert the offset to minutes
timezone_hours = timezone_offset_minutes // 60 # Calculate the hours
timezone_minutes = timezone_offset_minutes % 60 # Calculate the remaining minutes
timestamp_tz_str = (
f"{year}-{month}-{day} {time}+{timezone_hours:02}{timezone_minutes:02}"
)
return timestamp_tz_str
def extract_metadata(record: dict, metadata: dict) -> dict:
record_name, record_email = split_name(record["author"])
metadata["id"] = create_uuid(record["date"])
metadata["date"] = create_date(record["date"])
metadata["author_name"] = record_name
metadata["author_email"] = record_email
metadata["commit_hash"] = record["commit"]
return metadata
get_ipython().system('curl -O https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json')
FILE_PATH = "../../../../../ts_git_log.json"
loader = JSONLoader(
file_path=FILE_PATH,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
documents = [doc for doc in documents if doc.metadata["date"] is not None]
print(documents[0])
NUM_RECORDS = 500
documents = documents[:NUM_RECORDS]
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
COLLECTION_NAME = "timescale_commits"
embeddings = OpenAIEmbeddings()
db = TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
time_partition_interval=timedelta(days=7),
)
start_dt = datetime(2023, 8, 1, 22, 10, 35) # Start date = 1 August 2023, 22:10:35
end_dt = datetime(2023, 8, 30, 22, 10, 35) # End date = 30 August 2023, 22:10:35
td = timedelta(days=7) # Time delta = 7 days
query = "What's new with TimescaleDB functions?"
docs_with_score = db.similarity_search_with_score(
query, start_date=start_dt, end_date=end_dt
)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(
query, start_date=start_dt, time_delta=td
)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, end_date=end_dt, time_delta=td)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, start_date=start_dt)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, end_date=end_dt)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
retriever = db.as_retriever(search_kwargs={"start_date": start_dt, "end_date": end_dt})
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k")
from langchain.chains import RetrievalQA
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = (
"What's new with the timescaledb functions? Tell me when these changes were made."
)
response = qa_stuff.run(query)
print(response)
COLLECTION_NAME = "timescale_commits"
embeddings = OpenAIEmbeddings()
db = TimescaleVector(
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
embedding_function=embeddings,
)
db.create_index()
db.drop_index()
db.create_index(index_type="tsv", max_alpha=1.0, num_neighbors=50)
db.drop_index()
db.create_index(index_type="hnsw", m=16, ef_construction=64)
db.drop_index()
db.create_index(index_type="ivfflat", num_lists=20, num_records=1000)
db.drop_index()
db.create_index()
COLLECTION_NAME = "timescale_commits"
vectorstore = TimescaleVector(
embedding_function=OpenAIEmbeddings(),
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
)
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import OpenAI
metadata_field_info = [
AttributeInfo(
name="id",
description="A UUID v1 generated from the date of the commit",
type="uuid",
),
AttributeInfo(
name="date",
description="The date of the commit in timestamptz format",
type="timestamptz",
),
AttributeInfo(
name="author_name",
description="The name of the author of the commit",
type="string",
),
AttributeInfo(
name="author_email",
description="The email address of the author of the commit",
type="string",
),
]
document_content_description = "The git log commit summary containing the commit hash, author, date of commit, change summary and change details"
llm = OpenAI(temperature=0)
retriever = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_info,
enable_limit=True,
verbose=True,
)
retriever.get_relevant_documents("What are improvements made to continuous aggregates?")
retriever.get_relevant_documents("What commits did Sven Klemm add?")
retriever.get_relevant_documents(
"What commits about timescaledb_functions did Sven Klemm add?"
)
retriever.get_relevant_documents("What commits were added in July 2023?")
retriever.get_relevant_documents(
"What are two commits about hierarchical continuous aggregates?"
)
COLLECTION_NAME = "timescale_commits"
embeddings = OpenAIEmbeddings()
vectorstore = TimescaleVector(
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
embedding_function=embeddings,
)
ids = vectorstore.add_documents([Document(page_content="foo")])
ids
docs_with_score = vectorstore.similarity_search_with_score("foo")
docs_with_score[0]
docs_with_score[1]
ids = vectorstore.add_documents([Document(page_content="Bar")])
vectorstore.delete(ids)
vectorstore.add_documents(
[ | Document(page_content="Hello World", metadata={"source": "www.example.com/hello"})]
) | langchain.docstore.document.Document |
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
examples = [
{
"question": "Who lived longer, Muhammad Ali or Alan Turing?",
"answer": """
Are follow up questions needed here: Yes.
Follow up: How old was Muhammad Ali when he died?
Intermediate answer: Muhammad Ali was 74 years old when he died.
Follow up: How old was Alan Turing when he died?
Intermediate answer: Alan Turing was 41 years old when he died.
So the final answer is: Muhammad Ali
""",
},
{
"question": "When was the founder of craigslist born?",
"answer": """
Are follow up questions needed here: Yes.
Follow up: Who was the founder of craigslist?
Intermediate answer: Craigslist was founded by Craig Newmark.
Follow up: When was Craig Newmark born?
Intermediate answer: Craig Newmark was born on December 6, 1952.
So the final answer is: December 6, 1952
""",
},
{
"question": "Who was the maternal grandfather of George Washington?",
"answer": """
Are follow up questions needed here: Yes.
Follow up: Who was the mother of George Washington?
Intermediate answer: The mother of George Washington was Mary Ball Washington.
Follow up: Who was the father of Mary Ball Washington?
Intermediate answer: The father of Mary Ball Washington was Joseph Ball.
So the final answer is: Joseph Ball
""",
},
{
"question": "Are both the directors of Jaws and Casino Royale from the same country?",
"answer": """
Are follow up questions needed here: Yes.
Follow up: Who is the director of Jaws?
Intermediate Answer: The director of Jaws is Steven Spielberg.
Follow up: Where is Steven Spielberg from?
Intermediate Answer: The United States.
Follow up: Who is the director of Casino Royale?
Intermediate Answer: The director of Casino Royale is Martin Campbell.
Follow up: Where is Martin Campbell from?
Intermediate Answer: New Zealand.
So the final answer is: No
""",
},
]
example_prompt = | PromptTemplate(
input_variables=["question", "answer"], template="Question: {question}\n{answer}"
) | langchain.prompts.prompt.PromptTemplate |
from getpass import getpass
STOCHASTICAI_API_KEY = getpass()
import os
os.environ["STOCHASTICAI_API_KEY"] = STOCHASTICAI_API_KEY
YOUR_API_URL = getpass()
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import StochasticAI
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
from typing import Callable, List
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
character_names = ["Harry Potter", "Ron Weasley", "Hermione Granger", "Argus Filch"]
storyteller_name = "Dungeon Master"
quest = "Find all of Lord Voldemort's seven horcruxes."
word_limit = 50 # word limit for task brainstorming
game_description = f"""Here is the topic for a Dungeons & Dragons game: {quest}.
The characters are: {*character_names,}.
The story is narrated by the storyteller, {storyteller_name}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of a Dungeons & Dragons player."
)
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the character, {character_name}, in {word_limit} words or less.
Speak directly to {character_name}.
Do not add anything else."""
),
]
character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt
).content
return character_description
def generate_character_system_message(character_name, character_description):
return SystemMessage(
content=(
f"""{game_description}
Your name is {character_name}.
Your character description is as follows: {character_description}.
You will propose actions you plan to take and {storyteller_name} will explain what happens when you take those actions.
Speak in the first person from the perspective of {character_name}.
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Remember you are {character_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
character_descriptions = [
generate_character_description(character_name) for character_name in character_names
]
character_system_messages = [
generate_character_system_message(character_name, character_description)
for character_name, character_description in zip(
character_names, character_descriptions
)
]
storyteller_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the storyteller, {storyteller_name}, in {word_limit} words or less.
Speak directly to {storyteller_name}.
Do not add anything else."""
),
]
storyteller_description = ChatOpenAI(temperature=1.0)(
storyteller_specifier_prompt
).content
storyteller_system_message = SystemMessage(
content=(
f"""{game_description}
You are the storyteller, {storyteller_name}.
Your description is as follows: {storyteller_description}.
The other players will propose actions to take and you will explain what happens when they take those actions.
Speak in the first person from the perspective of {storyteller_name}.
Do not change roles!
Do not speak from the perspective of anyone else.
Remember you are the storyteller, {storyteller_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
print("Storyteller Description:")
print(storyteller_description)
for character_name, character_description in zip(
character_names, character_descriptions
):
print(f"{character_name} Description:")
print(character_description)
quest_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(
content=f"""{game_description}
You are the storyteller, {storyteller_name}.
Please make the quest more specific. Be creative and imaginative.
Please reply with the specified quest in {word_limit} words or less.
Speak directly to the characters: {*character_names,}.
Do not add anything else."""
),
]
specified_quest = ChatOpenAI(temperature=1.0)(quest_specifier_prompt).content
print(f"Original quest:\n{quest}\n")
print(f"Detailed quest:\n{specified_quest}\n")
characters = []
for character_name, character_system_message in zip(
character_names, character_system_messages
):
characters.append(
DialogueAgent(
name=character_name,
system_message=character_system_message,
model= | ChatOpenAI(temperature=0.2) | langchain_openai.ChatOpenAI |
import getpass
import os
os.environ["TAVILY_API_KEY"] = getpass.getpass()
from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever
retriever = TavilySearchAPIRetriever(k=3)
retriever.invoke("what year was breath of the wild released?")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
"""Answer the question based only on the context provided.
Context: {context}
Question: {question}"""
)
chain = (
RunnablePassthrough.assign(context=(lambda x: x["question"]) | retriever)
| prompt
| ChatOpenAI(model="gpt-4-1106-preview")
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cohere')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu')
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import CohereEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "cassio>=0.1.4"')
import os
from getpass import getpass
from datasets import (
load_dataset,
)
from langchain_community.document_loaders import PyPDFLoader
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
os.environ["OPENAI_API_KEY"] = getpass("OPENAI_API_KEY = ")
embe = OpenAIEmbeddings()
from langchain_community.vectorstores import Cassandra
from cassandra.cluster import Cluster
cluster = Cluster(["127.0.0.1"])
session = cluster.connect()
import cassio
CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ")
cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
ASTRA_DB_ID = input("ASTRA_DB_ID = ")
ASTRA_DB_APPLICATION_TOKEN = getpass("ASTRA_DB_APPLICATION_TOKEN = ")
desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ")
if desired_keyspace:
ASTRA_DB_KEYSPACE = desired_keyspace
else:
ASTRA_DB_KEYSPACE = None
import cassio
cassio.init(
database_id=ASTRA_DB_ID,
token=ASTRA_DB_APPLICATION_TOKEN,
keyspace=ASTRA_DB_KEYSPACE,
)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
philo_dataset = load_dataset("datastax/philosopher-quotes")["train"]
docs = []
for entry in philo_dataset:
metadata = {"author": entry["author"]}
doc = Document(page_content=entry["quote"], metadata=metadata)
docs.append(doc)
inserted_ids = vstore.add_documents(docs)
print(f"\nInserted {len(inserted_ids)} documents.")
texts = ["I think, therefore I am.", "To the things themselves!"]
metadatas = [{"author": "descartes"}, {"author": "husserl"}]
ids = ["desc_01", "huss_xy"]
inserted_ids_2 = vstore.add_texts(texts=texts, metadatas=metadatas, ids=ids)
print(f"\nInserted {len(inserted_ids_2)} documents.")
results = vstore.similarity_search("Our life is what we make of it", k=3)
for res in results:
print(f"* {res.page_content} [{res.metadata}]")
results_filtered = vstore.similarity_search(
"Our life is what we make of it",
k=3,
filter={"author": "plato"},
)
for res in results_filtered:
print(f"* {res.page_content} [{res.metadata}]")
results = vstore.similarity_search_with_score("Our life is what we make of it", k=3)
for res, score in results:
print(f"* [SIM={score:3f}] {res.page_content} [{res.metadata}]")
results = vstore.max_marginal_relevance_search(
"Our life is what we make of it",
k=3,
filter={"author": "aristotle"},
)
for res in results:
print(f"* {res.page_content} [{res.metadata}]")
delete_1 = vstore.delete(inserted_ids[:3])
print(f"all_succeed={delete_1}") # True, all documents deleted
delete_2 = vstore.delete(inserted_ids[2:5])
print(f"some_succeeds={delete_2}") # True, though some IDs were gone already
get_ipython().system('curl -L "https://github.com/awesome-astra/datasets/blob/main/demo-resources/what-is-philosophy/what-is-philosophy.pdf?raw=true" -o "what-is-philosophy.pdf"')
pdf_loader = PyPDFLoader("what-is-philosophy.pdf")
splitter = | RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=64) | langchain_text_splitters.RecursiveCharacterTextSplitter |
from langchain_openai import OpenAI
llm = | OpenAI(model="gpt-3.5-turbo-instruct", temperature=0, max_tokens=512) | langchain_openai.OpenAI |
from getpass import getpass
KAY_API_KEY = getpass()
import os
from langchain.retrievers import KayAiRetriever
os.environ["KAY_API_KEY"] = KAY_API_KEY
retriever = KayAiRetriever.create(
dataset_id="company", data_types=["10-K", "10-Q", "PressRelease"], num_contexts=3
)
docs = retriever.get_relevant_documents(
"What were the biggest strategy changes and partnerships made by Roku in 2023??"
)
docs
OPENAI_API_KEY = getpass()
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain.chains import ConversationalRetrievalChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model_name="gpt-3.5-turbo")
qa = | ConversationalRetrievalChain.from_llm(model, retriever=retriever) | langchain.chains.ConversationalRetrievalChain.from_llm |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet playwright beautifulsoup4')
get_ipython().system(' playwright install')
from langchain_community.document_loaders import AsyncChromiumLoader
urls = ["https://www.wsj.com"]
loader = AsyncChromiumLoader(urls)
docs = loader.load()
docs[0].page_content[0:100]
from langchain_community.document_transformers import Html2TextTransformer
html2text = | Html2TextTransformer() | langchain_community.document_transformers.Html2TextTransformer |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain-experimental langchain-openai')
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import (
ChatPromptTemplate,
)
from langchain_experimental.utilities import PythonREPL
from langchain_openai import ChatOpenAI
template = """Write some python code to solve the user's problem.
Return only python code in Markdown format, e.g.:
```python
....
```"""
prompt = ChatPromptTemplate.from_messages([("system", template), ("human", "{input}")])
model = | ChatOpenAI() | langchain_openai.ChatOpenAI |
from typing import Callable, List
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
character_names = ["Harry Potter", "Ron Weasley", "Hermione Granger", "Argus Filch"]
storyteller_name = "Dungeon Master"
quest = "Find all of Lord Voldemort's seven horcruxes."
word_limit = 50 # word limit for task brainstorming
game_description = f"""Here is the topic for a Dungeons & Dragons game: {quest}.
The characters are: {*character_names,}.
The story is narrated by the storyteller, {storyteller_name}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of a Dungeons & Dragons player."
)
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the character, {character_name}, in {word_limit} words or less.
Speak directly to {character_name}.
Do not add anything else."""
),
]
character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt
).content
return character_description
def generate_character_system_message(character_name, character_description):
return SystemMessage(
content=(
f"""{game_description}
Your name is {character_name}.
Your character description is as follows: {character_description}.
You will propose actions you plan to take and {storyteller_name} will explain what happens when you take those actions.
Speak in the first person from the perspective of {character_name}.
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Remember you are {character_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
character_descriptions = [
generate_character_description(character_name) for character_name in character_names
]
character_system_messages = [
generate_character_system_message(character_name, character_description)
for character_name, character_description in zip(
character_names, character_descriptions
)
]
storyteller_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the storyteller, {storyteller_name}, in {word_limit} words or less.
Speak directly to {storyteller_name}.
Do not add anything else."""
),
]
storyteller_description = | ChatOpenAI(temperature=1.0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-community')
import os
os.environ["YDC_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
from langchain_community.utilities.you import YouSearchAPIWrapper
utility = | YouSearchAPIWrapper(num_web_results=1) | langchain_community.utilities.you.YouSearchAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import OpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = hub.pull("hwchase17/react")
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet embedchain')
import os
from getpass import getpass
os.environ["OPENAI_API_KEY"] = getpass()
from langchain.retrievers import EmbedchainRetriever
retriever = | EmbedchainRetriever.create() | langchain.retrievers.EmbedchainRetriever.create |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser()
for txt in chain.stream({"input": "What's your name?"}):
print(txt, end="")
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert coding AI. Respond only in valid python; no narration whatsoever.",
),
("user", "{input}"),
]
)
chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser()
for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}):
print(txt, end="")
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="nemotron_steerlm_8b")
complex_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0}
)
print("Un-creative\n")
print(complex_result.content)
print("\n\nCreative\n")
creative_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9}
)
print(creative_result.content)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = (
prompt
| ChatNVIDIA(model="nemotron_steerlm_8b").bind(
labels={"creativity": 9, "complexity": 0, "verbosity": 9}
)
| StrOutputParser()
)
for txt in chain.stream({"input": "Why is a PB&J?"}):
print(txt, end="")
import IPython
import requests
image_url = "https://www.nvidia.com/content/dam/en-zz/Solutions/research/ai-playground/[email protected]" ## Large Image
image_content = requests.get(image_url).content
IPython.display.Image(image_content)
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="playground_neva_22b")
from langchain_core.messages import HumanMessage
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
]
)
from langchain_core.messages import HumanMessage
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
],
labels={"creativity": 0, "quality": 9, "complexity": 0, "verbosity": 0},
)
import IPython
import requests
image_url = "https://picsum.photos/seed/kitten/300/200"
image_content = requests.get(image_url).content
IPython.display.Image(image_content)
import base64
from langchain_core.messages import HumanMessage
b64_string = base64.b64encode(image_content).decode("utf-8")
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{b64_string}"},
},
]
)
]
)
base64_with_mime_type = f"data:image/png;base64,{b64_string}"
llm.invoke(f'What\'s in this image?\n<img src="{base64_with_mime_type}" />')
from langchain_nvidia_ai_endpoints import ChatNVIDIA
kosmos = ChatNVIDIA(model="kosmos_2")
from langchain_core.messages import HumanMessage
def drop_streaming_key(d):
"""Takes in payload dictionary, outputs new payload dictionary"""
if "stream" in d:
d.pop("stream")
return d
kosmos = ChatNVIDIA(model="kosmos_2")
kosmos.client.payload_fn = drop_streaming_key
kosmos.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
]
)
import base64
from io import BytesIO
from PIL import Image
img_gen = ChatNVIDIA(model="sdxl_turbo")
def to_sdxl_payload(d):
if d:
d = {"prompt": d.get("messages", [{}])[0].get("content")}
d["inference_steps"] = 4 ## why not add another argument?
return d
img_gen.client.payload_fn = to_sdxl_payload
def to_pil_img(d):
return Image.open(BytesIO(base64.b64decode(d)))
(img_gen | StrOutputParser() | to_pil_img).invoke("white cat playing")
from langchain_core.messages import ChatMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[
ChatMessage(
role="context", content="Parrots and Cats have signed the peace accord."
),
("user", "{input}"),
]
)
llm = ChatNVIDIA(model="nemotron_qa_8b")
chain = prompt | llm | StrOutputParser()
chain.invoke({"input": "What was signed?"})
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain')
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
chat = | ChatNVIDIA(model="mixtral_8x7b", temperature=0.1, max_tokens=100, top_p=1.0) | langchain_nvidia_ai_endpoints.ChatNVIDIA |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clickhouse-connect')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
os.environ["OPENAI_API_BASE"] = getpass.getpass("OpenAI Base:")
os.environ["MYSCALE_HOST"] = getpass.getpass("MyScale Host:")
os.environ["MYSCALE_PORT"] = getpass.getpass("MyScale Port:")
os.environ["MYSCALE_USERNAME"] = getpass.getpass("MyScale Username:")
os.environ["MYSCALE_PASSWORD"] = getpass.getpass("MyScale Password:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import MyScale
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
for d in docs:
d.metadata = {"some": "metadata"}
docsearch = | MyScale.from_documents(docs, embeddings) | langchain_community.vectorstores.MyScale.from_documents |
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import ChatOpenAI, OpenAI
llm = | ChatOpenAI(temperature=0.0) | langchain_openai.ChatOpenAI |
from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
chat_message_history = MongoDBChatMessageHistory(
session_id="test_session",
connection_string="mongodb://mongo_user:password123@mongo:27017",
database_name="my_db",
collection_name="chat_histories",
)
chat_message_history.add_user_message("Hello")
chat_message_history.add_ai_message("Hi")
chat_message_history.messages
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
import os
assert os.environ[
"OPENAI_API_KEY"
], "Set the OPENAI_API_KEY environment variable with your OpenAI API key."
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
MessagesPlaceholder(variable_name="history"),
("human", "{question}"),
]
)
chain = prompt | | ChatOpenAI() | langchain_openai.ChatOpenAI |
import getpass
import os
os.environ["TAVILY_API_KEY"] = getpass.getpass()
from langchain_community.tools.tavily_search import TavilySearchResults
tool = TavilySearchResults()
tool.invoke({"query": "What happened in the latest burning man floods"})
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_openai import ChatOpenAI
instructions = """You are an assistant."""
base_prompt = | hub.pull("langchain-ai/openai-functions-template") | langchain.hub.pull |
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()
docs = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson"
)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
compressor = LLMChainExtractor.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import LLMChainFilter
_filter = LLMChainFilter.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=_filter, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_openai import OpenAIEmbeddings
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scikit-learn')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.retrievers import SVMRetriever
from langchain_openai import OpenAIEmbeddings
retriever = SVMRetriever.from_texts(
["foo", "bar", "world", "hello", "foo bar"], | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet momento langchain-openai tiktoken')
import getpass
import os
os.environ["MOMENTO_API_KEY"] = getpass.getpass("Momento API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import MomentoVectorIndex
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
len(documents)
len(documents[0].page_content)
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from typing import List
from langchain.output_parsers import YamlOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = | Field(description="question to set up a joke") | langchain_core.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install -qU langchain-community langchain-openai')
from langchain_community.tools import MoveFileTool
from langchain_core.messages import HumanMessage
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo")
tools = [MoveFileTool()]
functions = [ | convert_to_openai_function(t) | langchain_core.utils.function_calling.convert_to_openai_function |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
llm = OpenAI(temperature=0)
from pathlib import Path
relevant_parts = []
for p in Path(".").absolute().parts:
relevant_parts.append(p)
if relevant_parts[-3:] == ["langchain", "docs", "modules"]:
break
doc_path = str(Path(*relevant_parts) / "state_of_the_union.txt")
from langchain_community.document_loaders import TextLoader
loader = | TextLoader(doc_path) | langchain_community.document_loaders.TextLoader |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai.chat_models import ChatOpenAI
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're an assistant who's good at {ability}. Respond in 20 words or fewer",
),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
]
)
runnable = prompt | model
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
store = {}
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
)
with_message_history.invoke(
{"ability": "math", "input": "What does cosine mean?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "def234"}},
)
from langchain_core.runnables import ConfigurableFieldSpec
store = {}
def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:
if (user_id, conversation_id) not in store:
store[(user_id, conversation_id)] = ChatMessageHistory()
return store[(user_id, conversation_id)]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="user_id",
annotation=str,
name="User ID",
description="Unique identifier for the user.",
default="",
is_shared=True,
),
ConfigurableFieldSpec(
id="conversation_id",
annotation=str,
name="Conversation ID",
description="Unique identifier for the conversation.",
default="",
is_shared=True,
),
],
)
with_message_history.invoke(
{"ability": "math", "input": "Hello"},
config={"configurable": {"user_id": "123", "conversation_id": "1"}},
)
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableParallel
chain = RunnableParallel({"output_message": ChatOpenAI()})
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
with_message_history = RunnableWithMessageHistory(
chain,
get_session_history,
output_messages_key="output_message",
)
with_message_history.invoke(
[ | HumanMessage(content="What did Simone de Beauvoir believe about free will") | langchain_core.messages.HumanMessage |
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from operator import itemgetter
from langchain.output_parsers import JsonOutputToolsParser
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
@tool
def count_emails(last_n_days: int) -> int:
"""Multiply two integers together."""
return last_n_days * 2
@tool
def send_email(message: str, recipient: str) -> str:
"Add two integers."
return f"Successfully sent email to {recipient}."
tools = [count_emails, send_email]
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0).bind_tools(tools)
def call_tool(tool_invocation: dict) -> Runnable:
"""Function for dynamically constructing the end of the chain based on the model-selected tool."""
tool_map = {tool.name: tool for tool in tools}
tool = tool_map[tool_invocation["type"]]
return RunnablePassthrough.assign(output=itemgetter("args") | tool)
call_tool_list = RunnableLambda(call_tool).map()
chain = model | | JsonOutputToolsParser() | langchain.output_parsers.JsonOutputToolsParser |
get_ipython().system(' pip install pdf2image')
import arxiv
from langchain_community.chat_models import ChatAnthropic
from langchain_community.document_loaders import ArxivLoader, UnstructuredPDFLoader
paper = next(arxiv.Search(query="Visual Instruction Tuning").results())
paper.download_pdf(filename="downloaded-paper.pdf")
loader = UnstructuredPDFLoader("downloaded-paper.pdf")
doc = loader.load()[0]
from langchain_community.document_loaders import WebBaseLoader
loader = | WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") | langchain_community.document_loaders.WebBaseLoader |
import os
os.environ["LANGCHAIN_PROJECT"] = "movie-qa"
import pandas as pd
df = pd.read_csv("data/imdb_top_1000.csv")
df["Released_Year"] = df["Released_Year"].astype(int, errors="ignore")
from langchain.schema import Document
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
records = df.to_dict("records")
documents = [Document(page_content=d["Overview"], metadata=d) for d in records]
vectorstore = Chroma.from_documents(documents, embeddings)
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import ChatOpenAI
metadata_field_info = [
AttributeInfo(
name="Released_Year",
description="The year the movie was released",
type="int",
),
AttributeInfo(
name="Series_Title",
description="The title of the movie",
type="str",
),
AttributeInfo(
name="Genre",
description="The genre of the movie",
type="string",
),
AttributeInfo(
name="IMDB_Rating", description="A 1-10 rating for the movie", type="float"
),
]
document_content_description = "Brief summary of a movie"
llm = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
import os
os.environ["OPENAI_API_KEY"] = "..."
from langchain.prompts import PromptTemplate
from langchain_experimental.smart_llm import SmartLLMChain
from langchain_openai import ChatOpenAI
hard_question = "I have a 12 liter jug and a 6 liter jug. I want to measure 6 liters. How do I do it?"
prompt = PromptTemplate.from_template(hard_question)
llm = ChatOpenAI(temperature=0, model_name="gpt-4")
chain = | SmartLLMChain(llm=llm, prompt=prompt, n_ideas=3, verbose=True) | langchain_experimental.smart_llm.SmartLLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet supabase')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
os.environ["SUPABASE_URL"] = getpass.getpass("Supabase URL:")
os.environ["SUPABASE_SERVICE_KEY"] = getpass.getpass("Supabase Service Key:")
from dotenv import load_dotenv
load_dotenv()
import os
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_openai import OpenAIEmbeddings
from supabase.client import Client, create_client
supabase_url = os.environ.get("SUPABASE_URL")
supabase_key = os.environ.get("SUPABASE_SERVICE_KEY")
supabase: Client = create_client(supabase_url, supabase_key)
embeddings = OpenAIEmbeddings()
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet nlpcloud')
from getpass import getpass
NLPCLOUD_API_KEY = getpass()
import os
os.environ["NLPCLOUD_API_KEY"] = NLPCLOUD_API_KEY
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import NLPCloud
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = NLPCloud()
llm_chain = | LLMChain(prompt=prompt, llm=llm) | langchain.chains.LLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet timescale-vector')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import os
from dotenv import find_dotenv, load_dotenv
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
from typing import Tuple
from datetime import datetime, timedelta
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders.json_loader import JSONLoader
from langchain_community.vectorstores.timescalevector import TimescaleVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../extras/modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
SERVICE_URL = os.environ["TIMESCALE_SERVICE_URL"]
COLLECTION_NAME = "state_of_the_union_test"
db = TimescaleVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
)
query = "What did the president say about Ketanji Brown Jackson"
docs_with_score = db.similarity_search_with_score(query)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
print("-" * 80)
retriever = db.as_retriever()
print(retriever)
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k")
from langchain.chains import RetrievalQA
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = "What did the president say about Ketanji Brown Jackson?"
response = qa_stuff.run(query)
print(response)
from timescale_vector import client
def create_uuid(date_string: str):
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
datetime_obj = datetime.strptime(date_string, time_format)
uuid = client.uuid_from_time(datetime_obj)
return str(uuid)
def split_name(input_string: str) -> Tuple[str, str]:
if input_string is None:
return None, None
start = input_string.find("<")
end = input_string.find(">")
name = input_string[:start].strip()
email = input_string[start + 1 : end].strip()
return name, email
def create_date(input_string: str) -> datetime:
if input_string is None:
return None
month_dict = {
"Jan": "01",
"Feb": "02",
"Mar": "03",
"Apr": "04",
"May": "05",
"Jun": "06",
"Jul": "07",
"Aug": "08",
"Sep": "09",
"Oct": "10",
"Nov": "11",
"Dec": "12",
}
components = input_string.split()
day = components[2]
month = month_dict[components[1]]
year = components[4]
time = components[3]
timezone_offset_minutes = int(components[5]) # Convert the offset to minutes
timezone_hours = timezone_offset_minutes // 60 # Calculate the hours
timezone_minutes = timezone_offset_minutes % 60 # Calculate the remaining minutes
timestamp_tz_str = (
f"{year}-{month}-{day} {time}+{timezone_hours:02}{timezone_minutes:02}"
)
return timestamp_tz_str
def extract_metadata(record: dict, metadata: dict) -> dict:
record_name, record_email = split_name(record["author"])
metadata["id"] = create_uuid(record["date"])
metadata["date"] = create_date(record["date"])
metadata["author_name"] = record_name
metadata["author_email"] = record_email
metadata["commit_hash"] = record["commit"]
return metadata
get_ipython().system('curl -O https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json')
FILE_PATH = "../../../../../ts_git_log.json"
loader = JSONLoader(
file_path=FILE_PATH,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
documents = [doc for doc in documents if doc.metadata["date"] is not None]
print(documents[0])
NUM_RECORDS = 500
documents = documents[:NUM_RECORDS]
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
COLLECTION_NAME = "timescale_commits"
embeddings = OpenAIEmbeddings()
db = TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
time_partition_interval=timedelta(days=7),
)
start_dt = datetime(2023, 8, 1, 22, 10, 35) # Start date = 1 August 2023, 22:10:35
end_dt = datetime(2023, 8, 30, 22, 10, 35) # End date = 30 August 2023, 22:10:35
td = timedelta(days=7) # Time delta = 7 days
query = "What's new with TimescaleDB functions?"
docs_with_score = db.similarity_search_with_score(
query, start_date=start_dt, end_date=end_dt
)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(
query, start_date=start_dt, time_delta=td
)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, end_date=end_dt, time_delta=td)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, start_date=start_dt)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, end_date=end_dt)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
retriever = db.as_retriever(search_kwargs={"start_date": start_dt, "end_date": end_dt})
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k")
from langchain.chains import RetrievalQA
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = (
"What's new with the timescaledb functions? Tell me when these changes were made."
)
response = qa_stuff.run(query)
print(response)
COLLECTION_NAME = "timescale_commits"
embeddings = OpenAIEmbeddings()
db = TimescaleVector(
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
embedding_function=embeddings,
)
db.create_index()
db.drop_index()
db.create_index(index_type="tsv", max_alpha=1.0, num_neighbors=50)
db.drop_index()
db.create_index(index_type="hnsw", m=16, ef_construction=64)
db.drop_index()
db.create_index(index_type="ivfflat", num_lists=20, num_records=1000)
db.drop_index()
db.create_index()
COLLECTION_NAME = "timescale_commits"
vectorstore = TimescaleVector(
embedding_function=OpenAIEmbeddings(),
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
)
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import OpenAI
metadata_field_info = [
AttributeInfo(
name="id",
description="A UUID v1 generated from the date of the commit",
type="uuid",
),
AttributeInfo(
name="date",
description="The date of the commit in timestamptz format",
type="timestamptz",
),
AttributeInfo(
name="author_name",
description="The name of the author of the commit",
type="string",
),
AttributeInfo(
name="author_email",
description="The email address of the author of the commit",
type="string",
),
]
document_content_description = "The git log commit summary containing the commit hash, author, date of commit, change summary and change details"
llm = OpenAI(temperature=0)
retriever = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_info,
enable_limit=True,
verbose=True,
)
retriever.get_relevant_documents("What are improvements made to continuous aggregates?")
retriever.get_relevant_documents("What commits did Sven Klemm add?")
retriever.get_relevant_documents(
"What commits about timescaledb_functions did Sven Klemm add?"
)
retriever.get_relevant_documents("What commits were added in July 2023?")
retriever.get_relevant_documents(
"What are two commits about hierarchical continuous aggregates?"
)
COLLECTION_NAME = "timescale_commits"
embeddings = OpenAIEmbeddings()
vectorstore = TimescaleVector(
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
embedding_function=embeddings,
)
ids = vectorstore.add_documents([Document(page_content="foo")])
ids
docs_with_score = vectorstore.similarity_search_with_score("foo")
docs_with_score[0]
docs_with_score[1]
ids = vectorstore.add_documents([ | Document(page_content="Bar") | langchain.docstore.document.Document |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | | RunnableLambda(format_docs_with_id) | langchain_core.runnables.RunnableLambda |
from ray import serve
from starlette.requests import Request
@serve.deployment
class LLMServe:
def __init__(self) -> None:
pass
async def __call__(self, request: Request) -> str:
return "Hello World"
deployment = LLMServe.bind()
serve.api.run(deployment)
serve.api.shutdown()
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from getpass import getpass
OPENAI_API_KEY = getpass()
@serve.deployment
class DeployLLM:
def __init__(self):
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
template = "Question: {question}\n\nAnswer: Let's think step by step."
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
from langchain.prompts.example_selector import SemanticSimilarityExampleSelector
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
example_prompt = PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
)
examples = [
{"input": "happy", "output": "sad"},
{"input": "tall", "output": "short"},
{"input": "energetic", "output": "lethargic"},
{"input": "sunny", "output": "gloomy"},
{"input": "windy", "output": "calm"},
]
example_selector = SemanticSimilarityExampleSelector.from_examples(
examples,
| OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-text-to-speech')
from langchain.tools import GoogleCloudTextToSpeechTool
text_to_speak = "Hello world!"
tts = | GoogleCloudTextToSpeechTool() | langchain.tools.GoogleCloudTextToSpeechTool |
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_texts(
texts, embeddings, metadatas=[{"source": i} for i in range(len(texts))]
)
query = "What did the president say about Justice Breyer"
docs = docsearch.similarity_search(query)
from langchain.chains.question_answering import load_qa_chain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
template = """You are a chatbot having a conversation with a human.
Given the following extracted parts of a long document and a question, create a final answer.
{context}
{chat_history}
Human: {human_input}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input", "context"], template=template
)
memory = | ConversationBufferMemory(memory_key="chat_history", input_key="human_input") | langchain.memory.ConversationBufferMemory |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sagemaker')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
os.environ["OPENAI_API_KEY"] = "<ADD-KEY-HERE>"
os.environ["SERPAPI_API_KEY"] = "<ADD-KEY-HERE>"
from langchain.agents import initialize_agent, load_tools
from langchain.callbacks import SageMakerCallbackHandler
from langchain.chains import LLMChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from sagemaker.analytics import ExperimentAnalytics
from sagemaker.experiments.run import Run
from sagemaker.session import Session
HPARAMS = {
"temperature": 0.1,
"model_name": "gpt-3.5-turbo-instruct",
}
BUCKET_NAME = None
EXPERIMENT_NAME = "langchain-sagemaker-tracker"
session = Session(default_bucket=BUCKET_NAME)
RUN_NAME = "run-scenario-1"
PROMPT_TEMPLATE = "tell me a joke about {topic}"
INPUT_VARIABLES = {"topic": "fish"}
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = SageMakerCallbackHandler(run)
llm = OpenAI(callbacks=[sagemaker_callback], **HPARAMS)
prompt = PromptTemplate.from_template(template=PROMPT_TEMPLATE)
chain = LLMChain(llm=llm, prompt=prompt, callbacks=[sagemaker_callback])
chain.run(**INPUT_VARIABLES)
sagemaker_callback.flush_tracker()
RUN_NAME = "run-scenario-2"
PROMPT_TEMPLATE_1 = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
PROMPT_TEMPLATE_2 = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
Play Synopsis: {synopsis}
Review from a New York Times play critic of the above play:"""
INPUT_VARIABLES = {
"input": "documentary about good video games that push the boundary of game design"
}
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = SageMakerCallbackHandler(run)
prompt_template1 = PromptTemplate.from_template(template=PROMPT_TEMPLATE_1)
prompt_template2 = PromptTemplate.from_template(template=PROMPT_TEMPLATE_2)
llm = OpenAI(callbacks=[sagemaker_callback], **HPARAMS)
chain1 = | LLMChain(llm=llm, prompt=prompt_template1, callbacks=[sagemaker_callback]) | langchain.chains.LLMChain |
from langchain.memory import ConversationSummaryBufferMemory
from langchain_openai import OpenAI
llm = | OpenAI() | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet transformers')
from langchain_community.document_loaders import ImageCaptionLoader
list_image_urls = [
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/5a/Hyla_japonica_sep01.jpg/260px-Hyla_japonica_sep01.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/7/71/Tibur%C3%B3n_azul_%28Prionace_glauca%29%2C_canal_Fayal-Pico%2C_islas_Azores%2C_Portugal%2C_2020-07-27%2C_DD_14.jpg/270px-Tibur%C3%B3n_azul_%28Prionace_glauca%29%2C_canal_Fayal-Pico%2C_islas_Azores%2C_Portugal%2C_2020-07-27%2C_DD_14.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/2/21/Thure_de_Thulstrup_-_Battle_of_Shiloh.jpg/251px-Thure_de_Thulstrup_-_Battle_of_Shiloh.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/2/21/Passion_fruits_-_whole_and_halved.jpg/270px-Passion_fruits_-_whole_and_halved.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/5e/Messier83_-_Heic1403a.jpg/277px-Messier83_-_Heic1403a.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/b/b6/2022-01-22_Men%27s_World_Cup_at_2021-22_St._Moritz%E2%80%93Celerina_Luge_World_Cup_and_European_Championships_by_Sandro_Halank%E2%80%93257.jpg/288px-2022-01-22_Men%27s_World_Cup_at_2021-22_St._Moritz%E2%80%93Celerina_Luge_World_Cup_and_European_Championships_by_Sandro_Halank%E2%80%93257.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/9/99/Wiesen_Pippau_%28Crepis_biennis%29-20220624-RM-123950.jpg/224px-Wiesen_Pippau_%28Crepis_biennis%29-20220624-RM-123950.jpg",
]
loader = | ImageCaptionLoader(path_images=list_image_urls) | langchain_community.document_loaders.ImageCaptionLoader |
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("criteria", criteria="conciseness")
from langchain.evaluation import EvaluatorType
evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria="conciseness")
eval_result = evaluator.evaluate_strings(
prediction="What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.",
input="What's 2+2?",
)
print(eval_result)
evaluator = load_evaluator("labeled_criteria", criteria="correctness")
eval_result = evaluator.evaluate_strings(
input="What is the capital of the US?",
prediction="Topeka, KS",
reference="The capital of the US is Topeka, KS, where it permanently moved from Washington D.C. on May 16, 2023",
)
print(f'With ground truth: {eval_result["score"]}')
from langchain.evaluation import Criteria
list(Criteria)
custom_criterion = {
"numeric": "Does the output contain numeric or mathematical information?"
}
eval_chain = load_evaluator(
EvaluatorType.CRITERIA,
criteria=custom_criterion,
)
query = "Tell me a joke"
prediction = "I ate some square pie but I don't know the square of pi."
eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)
print(eval_result)
custom_criteria = {
"numeric": "Does the output contain numeric information?",
"mathematical": "Does the output contain mathematical information?",
"grammatical": "Is the output grammatically correct?",
"logical": "Is the output logical?",
}
eval_chain = load_evaluator(
EvaluatorType.CRITERIA,
criteria=custom_criteria,
)
eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)
print("Multi-criteria evaluation")
print(eval_result)
from langchain.chains.constitutional_ai.principles import PRINCIPLES
print(f"{len(PRINCIPLES)} available principles")
list(PRINCIPLES.items())[:5]
evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria=PRINCIPLES["harmful1"])
eval_result = evaluator.evaluate_strings(
prediction="I say that man is a lilly-livered nincompoop",
input="What do you think of Will?",
)
print(eval_result)
get_ipython().run_line_magic('pip', 'install --upgrade --quiet anthropic')
from langchain_community.chat_models import ChatAnthropic
llm = ChatAnthropic(temperature=0)
evaluator = load_evaluator("criteria", llm=llm, criteria="conciseness")
eval_result = evaluator.evaluate_strings(
prediction="What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.",
input="What's 2+2?",
)
print(eval_result)
from langchain.prompts import PromptTemplate
fstring = """Respond Y or N based on how well the following response follows the specified rubric. Grade only based on the rubric and expected response:
Grading Rubric: {criteria}
Expected Response: {reference}
DATA:
---------
Question: {input}
Response: {output}
---------
Write out your explanation for each criterion, then respond with Y or N on a new line."""
prompt = PromptTemplate.from_template(fstring)
evaluator = | load_evaluator("labeled_criteria", criteria="correctness", prompt=prompt) | langchain.evaluation.load_evaluator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = | ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") | langchain_experimental.comprehend_moderation.ModerationPiiConfig |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "cassio>=0.1.4"')
import os
from getpass import getpass
from datasets import (
load_dataset,
)
from langchain_community.document_loaders import PyPDFLoader
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
os.environ["OPENAI_API_KEY"] = getpass("OPENAI_API_KEY = ")
embe = OpenAIEmbeddings()
from langchain_community.vectorstores import Cassandra
from cassandra.cluster import Cluster
cluster = Cluster(["127.0.0.1"])
session = cluster.connect()
import cassio
CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ")
cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
ASTRA_DB_ID = input("ASTRA_DB_ID = ")
ASTRA_DB_APPLICATION_TOKEN = getpass("ASTRA_DB_APPLICATION_TOKEN = ")
desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ")
if desired_keyspace:
ASTRA_DB_KEYSPACE = desired_keyspace
else:
ASTRA_DB_KEYSPACE = None
import cassio
cassio.init(
database_id=ASTRA_DB_ID,
token=ASTRA_DB_APPLICATION_TOKEN,
keyspace=ASTRA_DB_KEYSPACE,
)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
philo_dataset = load_dataset("datastax/philosopher-quotes")["train"]
docs = []
for entry in philo_dataset:
metadata = {"author": entry["author"]}
doc = Document(page_content=entry["quote"], metadata=metadata)
docs.append(doc)
inserted_ids = vstore.add_documents(docs)
print(f"\nInserted {len(inserted_ids)} documents.")
texts = ["I think, therefore I am.", "To the things themselves!"]
metadatas = [{"author": "descartes"}, {"author": "husserl"}]
ids = ["desc_01", "huss_xy"]
inserted_ids_2 = vstore.add_texts(texts=texts, metadatas=metadatas, ids=ids)
print(f"\nInserted {len(inserted_ids_2)} documents.")
results = vstore.similarity_search("Our life is what we make of it", k=3)
for res in results:
print(f"* {res.page_content} [{res.metadata}]")
results_filtered = vstore.similarity_search(
"Our life is what we make of it",
k=3,
filter={"author": "plato"},
)
for res in results_filtered:
print(f"* {res.page_content} [{res.metadata}]")
results = vstore.similarity_search_with_score("Our life is what we make of it", k=3)
for res, score in results:
print(f"* [SIM={score:3f}] {res.page_content} [{res.metadata}]")
results = vstore.max_marginal_relevance_search(
"Our life is what we make of it",
k=3,
filter={"author": "aristotle"},
)
for res in results:
print(f"* {res.page_content} [{res.metadata}]")
delete_1 = vstore.delete(inserted_ids[:3])
print(f"all_succeed={delete_1}") # True, all documents deleted
delete_2 = vstore.delete(inserted_ids[2:5])
print(f"some_succeeds={delete_2}") # True, though some IDs were gone already
get_ipython().system('curl -L "https://github.com/awesome-astra/datasets/blob/main/demo-resources/what-is-philosophy/what-is-philosophy.pdf?raw=true" -o "what-is-philosophy.pdf"')
pdf_loader = | PyPDFLoader("what-is-philosophy.pdf") | langchain_community.document_loaders.PyPDFLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langsmith langchainhub --quiet')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai tiktoken pandas duckduckgo-search --quiet')
import os
from uuid import uuid4
unique_id = uuid4().hex[0:8]
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = f"Tracing Walkthrough - {unique_id}"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "<YOUR-API-KEY>" # Update to your API key
os.environ["OPENAI_API_KEY"] = "<YOUR-OPENAI-API-KEY>"
from langsmith import Client
client = Client()
from langchain import hub
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from langchain_community.tools import DuckDuckGoSearchResults
from langchain_openai import ChatOpenAI
prompt = hub.pull("wfh/langsmith-agent-prompt:5d466cbc")
llm = ChatOpenAI(
model="gpt-3.5-turbo-16k",
temperature=0,
)
tools = [
DuckDuckGoSearchResults(
name="duck_duck_go"
), # General internet search using DuckDuckGo
]
llm_with_tools = llm.bind_tools(tools)
runnable_agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
agent_executor = AgentExecutor(
agent=runnable_agent, tools=tools, handle_parsing_errors=True
)
inputs = [
"What is LangChain?",
"What's LangSmith?",
"When was Llama-v2 released?",
"What is the langsmith cookbook?",
"When did langchain first announce the hub?",
]
results = agent_executor.batch([{"input": x} for x in inputs], return_exceptions=True)
results[:2]
outputs = [
"LangChain is an open-source framework for building applications using large language models. It is also the name of the company building LangSmith.",
"LangSmith is a unified platform for debugging, testing, and monitoring language model applications and agents powered by LangChain",
"July 18, 2023",
"The langsmith cookbook is a github repository containing detailed examples of how to use LangSmith to debug, evaluate, and monitor large language model-powered applications.",
"September 5, 2023",
]
dataset_name = f"agent-qa-{unique_id}"
dataset = client.create_dataset(
dataset_name,
description="An example dataset of questions over the LangSmith documentation.",
)
client.create_examples(
inputs=[{"input": query} for query in inputs],
outputs=[{"output": answer} for answer in outputs],
dataset_id=dataset.id,
)
from langchain import hub
from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools
from langchain_openai import ChatOpenAI
def create_agent(prompt, llm_with_tools):
runnable_agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
return | AgentExecutor(agent=runnable_agent, tools=tools, handle_parsing_errors=True) | langchain.agents.AgentExecutor |
import getpass
import os
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") or getpass.getpass(
"OpenAI API Key:"
)
from langchain.sql_database import SQLDatabase
from langchain_openai import ChatOpenAI
CONNECTION_STRING = "postgresql+psycopg2://postgres:test@localhost:5432/vectordb" # Replace with your own
db = SQLDatabase.from_uri(CONNECTION_STRING)
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
tracks = db.run('SELECT "Name" FROM "Track"')
song_titles = [s[0] for s in eval(tracks)]
title_embeddings = embeddings_model.embed_documents(song_titles)
len(title_embeddings)
from tqdm import tqdm
for i in tqdm(range(len(title_embeddings))):
title = song_titles[i].replace("'", "''")
embedding = title_embeddings[i]
sql_command = (
f'UPDATE "Track" SET "embeddings" = ARRAY{embedding} WHERE "Name" ='
+ f"'{title}'"
)
db.run(sql_command)
embeded_title = embeddings_model.embed_query("hope about the future")
query = (
'SELECT "Track"."Name" FROM "Track" WHERE "Track"."embeddings" IS NOT NULL ORDER BY "embeddings" <-> '
+ f"'{embeded_title}' LIMIT 5"
)
db.run(query)
def get_schema(_):
return db.get_table_info()
def run_query(query):
return db.run(query)
from langchain_core.prompts import ChatPromptTemplate
template = """You are a Postgres expert. Given an input question, first create a syntactically correct Postgres query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most 5 results using the LIMIT clause as per Postgres. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use date('now') function to get the current date, if the question involves "today".
You can use an extra extension which allows you to run semantic similarity using <-> operator on tables containing columns named "embeddings".
<-> operator can ONLY be used on embeddings columns.
The embeddings value for a given row typically represents the semantic meaning of that row.
The vector represents an embedding representation of the question, given below.
Do NOT fill in the vector values directly, but rather specify a `[search_word]` placeholder, which should contain the word that would be embedded for filtering.
For example, if the user asks for songs about 'the feeling of loneliness' the query could be:
'SELECT "[whatever_table_name]"."SongName" FROM "[whatever_table_name]" ORDER BY "embeddings" <-> '[loneliness]' LIMIT 5'
Use the following format:
Question: <Question here>
SQLQuery: <SQL Query to run>
SQLResult: <Result of the SQLQuery>
Answer: <Final answer here>
Only use the following tables:
{schema}
"""
prompt = ChatPromptTemplate.from_messages(
[("system", template), ("human", "{question}")]
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
db = SQLDatabase.from_uri(
CONNECTION_STRING
) # We reconnect to db so the new columns are loaded as well.
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
sql_query_chain = (
RunnablePassthrough.assign(schema=get_schema)
| prompt
| llm.bind(stop=["\nSQLResult:"])
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-ai-formrecognizer > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-cognitiveservices-speech > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-ai-textanalytics > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-ai-vision > /dev/null')
import os
os.environ["OPENAI_API_KEY"] = "sk-"
os.environ["AZURE_COGS_KEY"] = ""
os.environ["AZURE_COGS_ENDPOINT"] = ""
os.environ["AZURE_COGS_REGION"] = ""
from langchain_community.agent_toolkits import AzureCognitiveServicesToolkit
toolkit = | AzureCognitiveServicesToolkit() | langchain_community.agent_toolkits.AzureCognitiveServicesToolkit |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
search = DuckDuckGoSearchRun()
template = """turn the following user input into a search query for a search engine:
{input}"""
prompt = ChatPromptTemplate.from_template(template)
model = | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet meilisearch')
import getpass
import os
os.environ["MEILI_HTTP_ADDR"] = getpass.getpass("Meilisearch HTTP address and port:")
os.environ["MEILI_MASTER_KEY"] = getpass.getpass("Meilisearch API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Meilisearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
embeddings = OpenAIEmbeddings()
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
vector_store = Meilisearch.from_texts(texts=texts, embedding=embeddings)
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = Chroma.from_documents(docs, embedding_function)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
db2 = Chroma.from_documents(docs, embedding_function, persist_directory="./chroma_db")
docs = db2.similarity_search(query)
db3 = Chroma(persist_directory="./chroma_db", embedding_function=embedding_function)
docs = db3.similarity_search(query)
print(docs[0].page_content)
import chromadb
persistent_client = chromadb.PersistentClient()
collection = persistent_client.get_or_create_collection("collection_name")
collection.add(ids=["1", "2", "3"], documents=["a", "b", "c"])
langchain_chroma = Chroma(
client=persistent_client,
collection_name="collection_name",
embedding_function=embedding_function,
)
print("There are", langchain_chroma._collection.count(), "in the collection")
import uuid
import chromadb
from chromadb.config import Settings
client = chromadb.HttpClient(settings=Settings(allow_reset=True))
client.reset() # resets the database
collection = client.create_collection("my_collection")
for doc in docs:
collection.add(
ids=[str(uuid.uuid1())], metadatas=doc.metadata, documents=doc.page_content
)
db4 = Chroma(
client=client,
collection_name="my_collection",
embedding_function=embedding_function,
)
query = "What did the president say about Ketanji Brown Jackson"
docs = db4.similarity_search(query)
print(docs[0].page_content)
ids = [str(i) for i in range(1, len(docs) + 1)]
example_db = Chroma.from_documents(docs, embedding_function, ids=ids)
docs = example_db.similarity_search(query)
print(docs[0].metadata)
docs[0].metadata = {
"source": "../../modules/state_of_the_union.txt",
"new_value": "hello world",
}
example_db.update_document(ids[0], docs[0])
print(example_db._collection.get(ids=[ids[0]]))
print("count before", example_db._collection.count())
example_db._collection.delete(ids=[ids[-1]])
print("count after", example_db._collection.count())
from getpass import getpass
from langchain_openai import OpenAIEmbeddings
OPENAI_API_KEY = getpass()
import os
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()
docs = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson"
)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
compressor = LLMChainExtractor.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import LLMChainFilter
_filter = | LLMChainFilter.from_llm(llm) | langchain.retrievers.document_compressors.LLMChainFilter.from_llm |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet promptlayer --upgrade')
import promptlayer # Don't forget this 🍰
from langchain.callbacks import PromptLayerCallbackHandler
from langchain.schema import (
HumanMessage,
)
from langchain_openai import ChatOpenAI
chat_llm = ChatOpenAI(
temperature=0,
callbacks=[PromptLayerCallbackHandler(pl_tags=["chatopenai"])],
)
llm_results = chat_llm(
[
| HumanMessage(content="What comes after 1,2,3 ?") | langchain.schema.HumanMessage |
from langchain_community.vectorstores import Bagel
texts = ["hello bagel", "hello langchain", "I love salad", "my car", "a dog"]
cluster = | Bagel.from_texts(cluster_name="testing", texts=texts) | langchain_community.vectorstores.Bagel.from_texts |
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_openai import ChatOpenAI
examples = [
{
"input": "Could the members of The Police perform lawful arrests?",
"output": "what can the members of The Police do?",
},
{
"input": "Jan Sindel’s was born in what country?",
"output": "what is Jan Sindel’s personal history?",
},
]
example_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=examples,
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
),
few_shot_prompt,
("user", "{question}"),
]
)
question_gen = prompt | ChatOpenAI(temperature=0) | | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import os
import uuid
uid = uuid.uuid4().hex[:6]
project_name = f"Run Fine-tuning Walkthrough {uid}"
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "YOUR API KEY"
os.environ["LANGCHAIN_PROJECT"] = project_name
from enum import Enum
from langchain_core.pydantic_v1 import BaseModel, Field
class Operation(Enum):
add = "+"
subtract = "-"
multiply = "*"
divide = "/"
class Calculator(BaseModel):
"""A calculator function"""
num1: float
num2: float
operation: Operation = Field(..., description="+,-,*,/")
def calculate(self):
if self.operation == Operation.add:
return self.num1 + self.num2
elif self.operation == Operation.subtract:
return self.num1 - self.num2
elif self.operation == Operation.multiply:
return self.num1 * self.num2
elif self.operation == Operation.divide:
if self.num2 != 0:
return self.num1 / self.num2
else:
return "Cannot divide by zero"
from pprint import pprint
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from langchain_core.pydantic_v1 import BaseModel
openai_function_def = convert_pydantic_to_openai_function(Calculator)
pprint(openai_function_def)
from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt = | ChatPromptTemplate.from_messages(
[
("system", "You are an accounting assistant.") | langchain_core.prompts.ChatPromptTemplate.from_messages |
import getpass
import os
os.environ["POLYGON_API_KEY"] = getpass.getpass()
from langchain_community.tools.polygon.financials import PolygonFinancials
from langchain_community.tools.polygon.last_quote import PolygonLastQuote
from langchain_community.tools.polygon.ticker_news import PolygonTickerNews
from langchain_community.utilities.polygon import PolygonAPIWrapper
api_wrapper = PolygonAPIWrapper()
ticker = "AAPL"
last_quote_tool = PolygonLastQuote(api_wrapper=api_wrapper)
last_quote = last_quote_tool.run(ticker)
print(f"Tool output: {last_quote}")
import json
last_quote = last_quote_tool.run(ticker)
last_quote_json = json.loads(last_quote)
latest_price = last_quote_json["p"]
print(f"Latest price for {ticker} is ${latest_price}")
ticker_news_tool = PolygonTickerNews(api_wrapper=api_wrapper)
ticker_news = ticker_news_tool.run(ticker)
ticker_news_json = json.loads(ticker_news)
print(f"Total news items: {len(ticker_news_json)}")
news_item = ticker_news_json[0]
print(f"Title: {news_item['title']}")
print(f"Description: {news_item['description']}")
print(f"Publisher: {news_item['publisher']['name']}")
print(f"URL: {news_item['article_url']}")
financials_tool = | PolygonFinancials(api_wrapper=api_wrapper) | langchain_community.tools.polygon.financials.PolygonFinancials |
from langchain_community.document_loaders import ArcGISLoader
URL = "https://maps1.vcgov.org/arcgis/rest/services/Beaches/MapServer/7"
loader = | ArcGISLoader(URL) | langchain_community.document_loaders.ArcGISLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet rellm > /dev/null')
import logging
logging.basicConfig(level=logging.ERROR)
prompt = """Human: "What's the capital of the United States?"
AI Assistant:{
"action": "Final Answer",
"action_input": "The capital of the United States is Washington D.C."
}
Human: "What's the capital of Pennsylvania?"
AI Assistant:{
"action": "Final Answer",
"action_input": "The capital of Pennsylvania is Harrisburg."
}
Human: "What 2 + 5?"
AI Assistant:{
"action": "Final Answer",
"action_input": "2 + 5 = 7."
}
Human: 'What's the capital of Maryland?'
AI Assistant:"""
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
hf_model = pipeline(
"text-generation", model="cerebras/Cerebras-GPT-590M", max_new_tokens=200
)
original_model = HuggingFacePipeline(pipeline=hf_model)
generated = original_model.generate([prompt], stop=["Human:"])
print(generated)
import regex # Note this is the regex library NOT python's re stdlib module
pattern = regex.compile(
r'\{\s*"action":\s*"Final Answer",\s*"action_input":\s*(\{.*\}|"[^"]*")\s*\}\nHuman:'
)
from langchain_experimental.llms import RELLM
model = | RELLM(pipeline=hf_model, regex=pattern, max_new_tokens=200) | langchain_experimental.llms.RELLM |
from langchain.agents import Tool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
from langchain_community.utilities import SerpAPIWrapper
search = SerpAPIWrapper()
tools = [
Tool(
name="search",
func=search.run,
description="useful for when you need to answer questions about current events. You should ask targeted questions",
),
WriteFileTool(),
ReadFileTool(),
]
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
import faiss
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
from langchain_experimental.autonomous_agents import AutoGPT
from langchain_openai import ChatOpenAI
agent = AutoGPT.from_llm_and_tools(
ai_name="Tom",
ai_role="Assistant",
tools=tools,
llm=ChatOpenAI(temperature=0),
memory=vectorstore.as_retriever(),
)
agent.chain.verbose = True
agent.run(["write a weather report for SF today"])
from langchain_community.chat_message_histories import FileChatMessageHistory
agent = AutoGPT.from_llm_and_tools(
ai_name="Tom",
ai_role="Assistant",
tools=tools,
llm=ChatOpenAI(temperature=0),
memory=vectorstore.as_retriever(),
chat_history_memory= | FileChatMessageHistory("chat_history.txt") | langchain_community.chat_message_histories.FileChatMessageHistory |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pygithub')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit
from langchain_community.utilities.github import GitHubAPIWrapper
from langchain_openai import ChatOpenAI
os.environ["GITHUB_APP_ID"] = "123456"
os.environ["GITHUB_APP_PRIVATE_KEY"] = "path/to/your/private-key.pem"
os.environ["GITHUB_REPOSITORY"] = "username/repo-name"
os.environ["GITHUB_BRANCH"] = "bot-branch-name"
os.environ["GITHUB_BASE_BRANCH"] = "main"
os.environ["OPENAI_API_KEY"] = ""
llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview")
github = GitHubAPIWrapper()
toolkit = | GitHubToolkit.from_github_api_wrapper(github) | langchain_community.agent_toolkits.github.toolkit.GitHubToolkit.from_github_api_wrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet manifest-ml')
from langchain_community.llms.manifest import ManifestWrapper
from manifest import Manifest
manifest = Manifest(
client_name="huggingface", client_connection="http://127.0.0.1:5000"
)
print(manifest.client_pool.get_current_client().get_model_params())
llm = ManifestWrapper(
client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 256}
)
from langchain.chains.mapreduce import MapReduceChain
from langchain.prompts import PromptTemplate
from langchain_text_splitters import CharacterTextSplitter
_prompt = """Write a concise summary of the following:
{text}
CONCISE SUMMARY:"""
prompt = PromptTemplate.from_template(_prompt)
text_splitter = | CharacterTextSplitter() | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opensearch-py')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import OpenSearchVectorSearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from getpass import getpass
MOSAICML_API_TOKEN = getpass()
import os
os.environ["MOSAICML_API_TOKEN"] = MOSAICML_API_TOKEN
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import MosaicML
template = """Question: {question}"""
prompt = PromptTemplate.from_template(template)
llm = | MosaicML(inject_instruction_format=True, model_kwargs={"max_new_tokens": 128}) | langchain_community.llms.MosaicML |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.