prompt
stringlengths 70
19.8k
| completion
stringlengths 8
303
| api
stringlengths 23
93
|
---|---|---|
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import tiktoken
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode
)
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2)
Settings.callback_manager = | CallbackManager([token_counter]) | llama_index.core.callbacks.CallbackManager |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-langchain')
get_ipython().system('pip install llama-index')
from langchain.chat_models import ChatAnyscale, ChatOpenAI
from llama_index.llms.langchain import LangChainLLM
from llama_index.core import PromptTemplate
llm = LangChainLLM(ChatOpenAI())
stream = await llm.astream(PromptTemplate("Hi, write a short story"))
async for token in stream:
print(token, end="")
llm = LangChainLLM(ChatAnyscale())
stream = llm.stream(
| PromptTemplate("Hi, Which NFL team have most Super Bowl wins") | llama_index.core.PromptTemplate |
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(
input_files=["./data/paul_graham/paul_graham_essay.txt"]
)
docs = reader.load_data()
text = docs[0].text
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.types import BaseModel
from typing import List
class Biography(BaseModel):
"""Data model for a biography."""
name: str
best_known_for: List[str]
extra_info: str
summarizer = | TreeSummarize(verbose=True, output_cls=Biography) | llama_index.core.response_synthesizers.TreeSummarize |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
| HTMLNodeParser.from_defaults() | llama_index.core.node_parser.HTMLNodeParser.from_defaults |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index import SimpleDirectoryReader, VectorStoreIndex
import requests
response = requests.get(
"https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1"
)
essay_txt = response.text
with open("pg_essay.txt", "w") as fp:
fp.write(essay_txt)
documents = SimpleDirectoryReader(input_files=["pg_essay.txt"]).load_data()
index = | VectorStoreIndex.from_documents(documents) | llama_index.VectorStoreIndex.from_documents |
get_ipython().system('pip install llama-index')
from llama_index.core.chat_engine import SimpleChatEngine
chat_engine = SimpleChatEngine.from_defaults()
response = chat_engine.chat(
"Say something profound and romantic about fourth of July"
)
print(response)
from llama_index.core.chat_engine import SimpleChatEngine
from llama_index.core.prompts.system import SHAKESPEARE_WRITING_ASSISTANT
chat_engine = SimpleChatEngine.from_defaults(
system_prompt=SHAKESPEARE_WRITING_ASSISTANT
)
response = chat_engine.chat(
"Say something profound and romantic about fourth of July"
)
print(response)
from llama_index.core.chat_engine import SimpleChatEngine
from llama_index.core.prompts.system import MARKETING_WRITING_ASSISTANT
chat_engine = SimpleChatEngine.from_defaults(
system_prompt=MARKETING_WRITING_ASSISTANT
)
response = chat_engine.chat(
"Say something profound and romantic about fourth of July"
)
print(response)
from llama_index.core.chat_engine import SimpleChatEngine
from llama_index.core.prompts.system import IRS_TAX_CHATBOT
chat_engine = | SimpleChatEngine.from_defaults(system_prompt=IRS_TAX_CHATBOT) | llama_index.core.chat_engine.SimpleChatEngine.from_defaults |
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import (
FixedRecencyPostprocessor,
EmbeddingRecencyPostprocessor,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import StorageContext
def get_file_metadata(file_name: str):
"""Get file metadata."""
if "v1" in file_name:
return {"date": "2020-01-01"}
elif "v2" in file_name:
return {"date": "2020-02-03"}
elif "v3" in file_name:
return {"date": "2022-04-12"}
else:
raise ValueError("invalid file")
documents = SimpleDirectoryReader(
input_files=[
"test_versioned_data/paul_graham_essay_v1.txt",
"test_versioned_data/paul_graham_essay_v2.txt",
"test_versioned_data/paul_graham_essay_v3.txt",
],
file_metadata=get_file_metadata,
).load_data()
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
nodes = Settings.text_splitter.get_nodes_from_documents(documents)
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
storage_context = StorageContext.from_defaults(docstore=docstore)
print(documents[2].get_text())
index = VectorStoreIndex(nodes, storage_context=storage_context)
node_postprocessor = FixedRecencyPostprocessor()
node_postprocessor_emb = EmbeddingRecencyPostprocessor()
query_engine = index.as_query_engine(
similarity_top_k=3,
)
response = query_engine.query(
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?",
)
query_engine = index.as_query_engine(
similarity_top_k=3, node_postprocessors=[node_postprocessor]
)
response = query_engine.query(
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?",
)
query_engine = index.as_query_engine(
similarity_top_k=3, node_postprocessors=[node_postprocessor_emb]
)
response = query_engine.query(
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?",
)
from llama_index.core import SummaryIndex
query_str = (
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?"
)
query_engine = index.as_query_engine(
similarity_top_k=3, response_mode="no_text"
)
init_response = query_engine.query(
query_str,
)
resp_nodes = [n.node for n in init_response.source_nodes]
summary_index = | SummaryIndex(resp_nodes) | llama_index.core.SummaryIndex |
get_ipython().system('pip install llama-index')
import logging
import sys
from IPython.display import Markdown, display
import pandas as pd
from llama_index.core.query_engine import PandasQueryEngine
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
df = pd.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000],
}
)
query_engine = PandasQueryEngine(df=df, verbose=True)
response = query_engine.query(
"What is the city with the highest population?",
)
display(Markdown(f"<b>{response}</b>"))
print(response.metadata["pandas_instruction_str"])
query_engine = PandasQueryEngine(df=df, verbose=True, synthesize_response=True)
response = query_engine.query(
"What is the city with the highest population? Give both the city and population",
)
print(str(response))
get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'")
df = pd.read_csv("./titanic_train.csv")
query_engine = PandasQueryEngine(df=df, verbose=True)
response = query_engine.query(
"What is the correlation between survival and age?",
)
display(Markdown(f"<b>{response}</b>"))
print(response.metadata["pandas_instruction_str"])
from llama_index.core import PromptTemplate
query_engine = | PandasQueryEngine(df=df, verbose=True) | llama_index.core.query_engine.PandasQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/3.csv")
df
from llama_index.packs.tables.chain_of_table.base import (
ChainOfTableQueryEngine,
serialize_table,
)
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"ChainOfTablePack",
"./chain_of_table_pack",
skip_load=True,
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import pandas as pd
df = pd.read_csv("~/Downloads/WikiTableQuestions/csv/200-csv/11.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("Who won best Director in the 1972 Academy Awards?")
str(response.response)
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/42.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("What was the precipitation in inches during June?")
str(response)
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
prompt_str = """\
Here's a serialized table.
{serialized_table}
Given this table please answer the question: {question}
Answer: """
prompt = PromptTemplate(prompt_str)
prompt_c = prompt.as_query_component(partial={"serialized_table": serialize_table(df)})
qp = QueryPipeline(chain=[prompt_c, llm])
response = qp.run("What was the precipitation in inches during June?")
print(str(response))
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/203-csv/114.csv")
df
query_engine = | ChainOfTableQueryEngine(df, llm=llm, verbose=True) | llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import camelot
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.schema import IndexNode
from llama_index.llms.openai import OpenAI
from llama_index.readers.file import PyMuPDFReader
from typing import List
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
file_path = "billionaires_page.pdf"
reader = | PyMuPDFReader() | llama_index.readers.file.PyMuPDFReader |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index llama-hub')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
domain = "docs.llamaindex.ai"
docs_url = "https://docs.llamaindex.ai/en/latest/"
get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}')
from llama_index.readers.file import UnstructuredReader
reader = UnstructuredReader()
from pathlib import Path
all_files_gen = Path("./docs.llamaindex.ai/").rglob("*")
all_files = [f.resolve() for f in all_files_gen]
all_html_files = [f for f in all_files if f.suffix.lower() == ".html"]
len(all_html_files)
from llama_index.core import Document
doc_limit = 100
docs = []
for idx, f in enumerate(all_html_files):
if idx > doc_limit:
break
print(f"Idx {idx}/{len(all_html_files)}")
loaded_docs = reader.load_data(file=f, split_documents=True)
start_idx = 72
loaded_doc = Document(
text="\n\n".join([d.get_content() for d in loaded_docs[72:]]),
metadata={"path": str(f)},
)
print(loaded_doc.metadata["path"])
docs.append(loaded_doc)
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import nest_asyncio
nest_asyncio.apply()
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.agent.openai import OpenAIAgent
from llama_index.core import (
load_index_from_storage,
StorageContext,
VectorStoreIndex,
)
from llama_index.core import SummaryIndex
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.node_parser import SentenceSplitter
import os
from tqdm.notebook import tqdm
import pickle
async def build_agent_per_doc(nodes, file_base):
print(file_base)
vi_out_path = f"./data/llamaindex_docs/{file_base}"
summary_out_path = f"./data/llamaindex_docs/{file_base}_summary.pkl"
if not os.path.exists(vi_out_path):
Path("./data/llamaindex_docs/").mkdir(parents=True, exist_ok=True)
vector_index = VectorStoreIndex(nodes)
vector_index.storage_context.persist(persist_dir=vi_out_path)
else:
vector_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=vi_out_path),
)
summary_index = SummaryIndex(nodes)
vector_query_engine = vector_index.as_query_engine(llm=llm)
summary_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize", llm=llm
)
if not os.path.exists(summary_out_path):
Path(summary_out_path).parent.mkdir(parents=True, exist_ok=True)
summary = str(
await summary_query_engine.aquery(
"Extract a concise 1-2 line summary of this document"
)
)
pickle.dump(summary, open(summary_out_path, "wb"))
else:
summary = pickle.load(open(summary_out_path, "rb"))
query_engine_tools = [
QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name=f"vector_tool_{file_base}",
description=f"Useful for questions related to specific facts",
),
),
QueryEngineTool(
query_engine=summary_query_engine,
metadata=ToolMetadata(
name=f"summary_tool_{file_base}",
description=f"Useful for summarization questions",
),
),
]
function_llm = OpenAI(model="gpt-4")
agent = OpenAIAgent.from_tools(
query_engine_tools,
llm=function_llm,
verbose=True,
system_prompt=f"""\
You are a specialized agent designed to answer queries about the `{file_base}.html` part of the LlamaIndex docs.
You must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge.\
""",
)
return agent, summary
async def build_agents(docs):
node_parser = SentenceSplitter()
agents_dict = {}
extra_info_dict = {}
for idx, doc in enumerate(tqdm(docs)):
nodes = node_parser.get_nodes_from_documents([doc])
file_path = Path(doc.metadata["path"])
file_base = str(file_path.parent.stem) + "_" + str(file_path.stem)
agent, summary = await build_agent_per_doc(nodes, file_base)
agents_dict[file_base] = agent
extra_info_dict[file_base] = {"summary": summary, "nodes": nodes}
return agents_dict, extra_info_dict
agents_dict, extra_info_dict = await build_agents(docs)
all_tools = []
for file_base, agent in agents_dict.items():
summary = extra_info_dict[file_base]["summary"]
doc_tool = QueryEngineTool(
query_engine=agent,
metadata=ToolMetadata(
name=f"tool_{file_base}",
description=summary,
),
)
all_tools.append(doc_tool)
print(all_tools[0].metadata)
from llama_index.core import VectorStoreIndex
from llama_index.core.objects import (
ObjectIndex,
SimpleToolNodeMapping,
ObjectRetriever,
)
from llama_index.core.retrievers import BaseRetriever
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.query_engine import SubQuestionQueryEngine
from llama_index.llms.openai import OpenAI
llm = OpenAI(model_name="gpt-4-0613")
tool_mapping = SimpleToolNodeMapping.from_objects(all_tools)
obj_index = ObjectIndex.from_objects(
all_tools,
tool_mapping,
VectorStoreIndex,
)
vector_node_retriever = obj_index.as_node_retriever(similarity_top_k=10)
class CustomRetriever(BaseRetriever):
def __init__(self, vector_retriever, postprocessor=None):
self._vector_retriever = vector_retriever
self._postprocessor = postprocessor or | CohereRerank(top_n=5) | llama_index.postprocessor.cohere_rerank.CohereRerank |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('mkdir -p data')
get_ipython().system('echo "This is a test file: one!" > data/test1.txt')
get_ipython().system('echo "This is a test file: two!" > data/test2.txt')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data", filename_as_id=True).load_data()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.core.node_parser import SentenceSplitter
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(),
HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5"),
],
docstore= | SimpleDocumentStore() | llama_index.core.storage.docstore.SimpleDocumentStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
HTMLNodeParser.from_defaults(),
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
eval_nodes = pipeline.run(documents=docs)
eval_llm = OpenAI(model="gpt-3.5-turbo")
dataset_generator = DatasetGenerator(
eval_nodes[:100],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
len(eval_dataset.qr_pairs)
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/tesla10k_eval_dataset.json"
)
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_eval_runner = BatchEvalRunner(
evaluator_dict, workers=2, show_progress=True
)
from llama_index.core import VectorStoreIndex
async def run_evals(
pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref
):
nodes = pipeline.run(documents=docs)
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
pred_responses = get_responses(eval_qs, query_engine, show_progress=True)
eval_results = await batch_eval_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_responses_ref
)
return eval_results
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0)
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600)
html_parser = HTMLNodeParser.from_defaults()
parser_dict = {
"sent_parser_o0": sent_parser_o0,
"sent_parser_o200": sent_parser_o200,
"sent_parser_o500": sent_parser_o500,
}
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
pipeline_dict = {}
for k, parser in parser_dict.items():
pipeline = IngestionPipeline(
documents=docs,
transformations=[
html_parser,
parser,
OpenAIEmbedding(),
],
)
pipeline_dict[k] = pipeline
eval_results_dict = {}
for k, pipeline in pipeline_dict.items():
eval_results = await run_evals(
pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs
)
eval_results_dict[k] = eval_results
import pickle
pickle.dump(eval_results_dict, open("eval_results_1.pkl", "wb"))
eval_results_list = list(eval_results_dict.items())
results_df = get_results_df(
[v for _, v in eval_results_list],
[k for k, _ in eval_results_list],
["correctness", "semantic_similarity"],
)
display(results_df)
for k, pipeline in pipeline_dict.items():
pipeline.cache.persist(f"./cache/{k}.json")
from llama_index.core.extractors import (
TitleExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
)
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
extractor_dict = {
"summary": SummaryExtractor(in_place=False),
"qa": QuestionsAnsweredExtractor(in_place=False),
"default": None,
}
html_parser = HTMLNodeParser.from_defaults()
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
pipeline_dict = {}
html_parser = | HTMLNodeParser.from_defaults() | llama_index.core.node_parser.HTMLNodeParser.from_defaults |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = | CallbackManager() | llama_index.core.callbacks.CallbackManager |
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm')
get_ipython().system('pip install llama-index')
import os
from llama_index.llms.litellm import LiteLLM
from llama_index.core.llms import ChatMessage
os.environ["OPENAI_API_KEY"] = "your-api-key"
os.environ["COHERE_API_KEY"] = "your-api-key"
message = ChatMessage(role="user", content="Hey! how's it going?")
llm = LiteLLM("gpt-3.5-turbo")
chat_response = llm.chat([message])
llm = LiteLLM("command-nightly")
chat_response = llm.chat([message])
from llama_index.core.llms import ChatMessage
from llama_index.llms.litellm import LiteLLM
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = LiteLLM("gpt-3.5-turbo").chat(messages)
print(resp)
from llama_index.llms.litellm import LiteLLM
llm = LiteLLM("gpt-3.5-turbo")
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.litellm import LiteLLM
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
llm = | LiteLLM("gpt-3.5-turbo") | llama_index.llms.litellm.LiteLLM |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = OpenAI(model="gpt-3.5-turbo-1106")
agent = OpenAIAgent.from_tools(
[multiply_tool, add_tool], llm=llm, verbose=True
)
response = agent.chat("What is (121 * 3) + 42?")
print(str(response))
response = agent.stream_chat("What is (121 * 3) + 42?")
import nest_asyncio
nest_asyncio.apply()
response = await agent.achat("What is (121 * 3) + 42?")
print(str(response))
response = await agent.astream_chat("What is (121 * 3) + 42?")
response_gen = response.response_gen
async for token in response.async_response_gen():
print(token, end="")
import json
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps(
{"location": location, "temperature": "10", "unit": "celsius"}
)
elif "san francisco" in location.lower():
return json.dumps(
{"location": location, "temperature": "72", "unit": "fahrenheit"}
)
else:
return json.dumps(
{"location": location, "temperature": "22", "unit": "celsius"}
)
weather_tool = | FunctionTool.from_defaults(fn=get_current_weather) | llama_index.core.tools.FunctionTool.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.node_parser import SentenceWindowNodeParser
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
text_splitter = SentenceSplitter()
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
embed_model = HuggingFaceEmbedding(
model_name="sentence-transformers/all-mpnet-base-v2", max_length=512
)
from llama_index.core import Settings
Settings.llm = llm
Settings.embed_model = embed_model
Settings.text_splitter = text_splitter
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
input_files=["./IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
nodes = node_parser.get_nodes_from_documents(documents)
base_nodes = text_splitter.get_nodes_from_documents(documents)
from llama_index.core import VectorStoreIndex
sentence_index = VectorStoreIndex(nodes)
base_index = VectorStoreIndex(base_nodes)
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
query_engine = sentence_index.as_query_engine(
similarity_top_k=2,
node_postprocessors=[
MetadataReplacementPostProcessor(target_metadata_key="window")
],
)
window_response = query_engine.query(
"What are the concerns surrounding the AMOC?"
)
print(window_response)
window = window_response.source_nodes[0].node.metadata["window"]
sentence = window_response.source_nodes[0].node.metadata["original_text"]
print(f"Window: {window}")
print("------------------")
print(f"Original Sentence: {sentence}")
query_engine = base_index.as_query_engine(similarity_top_k=2)
vector_response = query_engine.query(
"What are the concerns surrounding the AMOC?"
)
print(vector_response)
query_engine = base_index.as_query_engine(similarity_top_k=5)
vector_response = query_engine.query(
"What are the concerns surrounding the AMOC?"
)
print(vector_response)
for source_node in window_response.source_nodes:
print(source_node.node.metadata["original_text"])
print("--------")
for node in vector_response.source_nodes:
print("AMOC mentioned?", "AMOC" in node.node.text)
print("--------")
print(vector_response.source_nodes[2].node.text)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
import random
nest_asyncio.apply()
len(base_nodes)
num_nodes_eval = 30
sample_eval_nodes = random.sample(base_nodes[:200], num_nodes_eval)
dataset_generator = DatasetGenerator(
sample_eval_nodes,
llm=OpenAI(model="gpt-4"),
show_progress=True,
num_questions_per_chunk=2,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes()
eval_dataset.save_json("data/ipcc_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json("data/ipcc_eval_qr_dataset.json")
import asyncio
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
RelevancyEvaluator,
FaithfulnessEvaluator,
PairwiseComparisonEvaluator,
)
from collections import defaultdict
import pandas as pd
evaluator_c = CorrectnessEvaluator(llm=OpenAI(model="gpt-4"))
evaluator_s = SemanticSimilarityEvaluator()
evaluator_r = RelevancyEvaluator(llm=OpenAI(model="gpt-4"))
evaluator_f = FaithfulnessEvaluator(llm=OpenAI(model="gpt-4"))
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
max_samples = 30
eval_qs = eval_dataset.questions
ref_response_strs = [r for (_, r) in eval_dataset.qr_pairs]
base_query_engine = base_index.as_query_engine(similarity_top_k=2)
query_engine = sentence_index.as_query_engine(
similarity_top_k=2,
node_postprocessors=[
MetadataReplacementPostProcessor(target_metadata_key="window")
],
)
import numpy as np
base_pred_responses = get_responses(
eval_qs[:max_samples], base_query_engine, show_progress=True
)
pred_responses = get_responses(
eval_qs[:max_samples], query_engine, show_progress=True
)
pred_response_strs = [str(p) for p in pred_responses]
base_pred_response_strs = [str(p) for p in base_pred_responses]
evaluator_dict = {
"correctness": evaluator_c,
"faithfulness": evaluator_f,
"relevancy": evaluator_r,
"semantic_similarity": evaluator_s,
}
batch_runner = | BatchEvalRunner(evaluator_dict, workers=2, show_progress=True) | llama_index.core.evaluation.BatchEvalRunner |
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks')
get_ipython().run_line_magic('pip', 'install llama-index')
from llama_index.llms.fireworks import Fireworks
resp = Fireworks().complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.fireworks import Fireworks
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Fireworks().chat(messages)
print(resp)
from llama_index.llms.fireworks import Fireworks
llm = Fireworks()
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.fireworks import Fireworks
from llama_index.core.llms import ChatMessage
llm = | Fireworks() | llama_index.llms.fireworks.Fireworks |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-flag-embedding-reranker')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install git+https://github.com/FlagOpen/FlagEmbedding.git')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-small-en-v1.5"
)
index = VectorStoreIndex.from_documents(documents=documents)
from llama_index.postprocessor.flag_embedding_reranker import (
FlagEmbeddingReranker,
)
rerank = | FlagEmbeddingReranker(model="BAAI/bge-reranker-large", top_n=5) | llama_index.postprocessor.flag_embedding_reranker.FlagEmbeddingReranker |
get_ipython().system('pip install llama-index-llms-ollama')
get_ipython().system('pip install llama-index')
from llama_index.llms.ollama import Ollama
gemma_2b = Ollama(model="gemma:2b", request_timeout=30.0)
gemma_7b = | Ollama(model="gemma:7b", request_timeout=30.0) | llama_index.llms.ollama.Ollama |
get_ipython().run_line_magic('pip', 'install llama-index-readers-dashvector')
get_ipython().system('pip install llama-index')
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
api_key = os.environ["DASHVECTOR_API_KEY"]
from llama_index.readers.dashvector import DashVectorReader
reader = DashVectorReader(api_key=api_key)
import numpy as np
id_to_text_map = {
"id1": "text blob 1",
"id2": "text blob 2",
}
query_vector = [n1, n2, n3, ...]
documents = reader.load_data(
collection_name="quickstart",
id_to_text_map=id_to_text_map,
top_k=3,
vector=query_vector,
filter="key = 'value'",
)
from llama_index.core import ListIndex
from IPython.display import Markdown, display
index = | ListIndex.from_documents(documents) | llama_index.core.ListIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate')
get_ipython().run_line_magic('', 'pip install replicate')
import os
REPLICATE_API_TOKEN = "" # Your Relicate API token here
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from PIL import Image
import requests
from io import BytesIO
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
from llama_index.core.schema import ImageDocument
if not os.path.exists("test_images"):
os.makedirs("test_images")
image_urls = [
"https://www.sportsnet.ca/wp-content/uploads/2023/11/CP1688996471-1040x572.jpg",
"https://res.cloudinary.com/hello-tickets/image/upload/c_limit,f_auto,q_auto,w_1920/v1640835927/o3pfl41q7m5bj8jardk0.jpg",
"https://www.cleverfiles.com/howto/wp-content/uploads/2018/03/minion.jpg",
]
for idx, image_url in enumerate(image_urls):
response = requests.get(image_url)
img = Image.open(BytesIO(response.content))
img.save(f"test_images/{idx}.png")
image_documents = [
ImageDocument(image_path=f"test_images/{idx}.png")
for idx in range(len(image_urls))
]
import matplotlib.pyplot as plt
from llama_index.core.response.notebook_utils import display_image_uris
image_paths = [str(img_doc.image_path) for img_doc in image_documents]
| display_image_uris(image_paths) | llama_index.core.response.notebook_utils.display_image_uris |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
import os
os.environ["OPENAI_API_KEY"] = "INSERT OPENAI KEY"
get_ipython().system('pip install llama-index')
from llama_index.core import download_loader
from llama_index.readers.wikipedia import WikipediaReader
loader = WikipediaReader()
documents = loader.load_data(pages=["Berlin"])
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
import time
from llama_index.core import VectorStoreIndex
from llama_index.core.postprocessor import SentenceEmbeddingOptimizer
print("Without optimization")
start_time = time.time()
query_engine = index.as_query_engine()
res = query_engine.query("What is the population of Berlin?")
end_time = time.time()
print("Total time elapsed: {}".format(end_time - start_time))
print("Answer: {}".format(res))
print("With optimization")
start_time = time.time()
query_engine = index.as_query_engine(
node_postprocessors=[SentenceEmbeddingOptimizer(percentile_cutoff=0.5)]
)
res = query_engine.query("What is the population of Berlin?")
end_time = time.time()
print("Total time elapsed: {}".format(end_time - start_time))
print("Answer: {}".format(res))
print("Alternate optimization cutoff")
start_time = time.time()
query_engine = index.as_query_engine(
node_postprocessors=[ | SentenceEmbeddingOptimizer(threshold_cutoff=0.7) | llama_index.core.postprocessor.SentenceEmbeddingOptimizer |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(response_mode="tree_summarize")
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
prompts_dict = query_engine.response_synthesizer.get_prompts()
display_prompt_dict(prompts_dict)
query_engine = index.as_query_engine(response_mode="compact")
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
response = query_engine.query("What did the author do growing up?")
print(str(response))
from llama_index.core import PromptTemplate
query_engine = index.as_query_engine(response_mode="tree_summarize")
new_summary_tmpl_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query in the style of a Shakespeare play.\n"
"Query: {query_str}\n"
"Answer: "
)
new_summary_tmpl = PromptTemplate(new_summary_tmpl_str)
query_engine.update_prompts(
{"response_synthesizer:summary_template": new_summary_tmpl}
)
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
response = query_engine.query("What did the author do growing up?")
print(str(response))
from llama_index.core.query_engine import (
RouterQueryEngine,
FLAREInstructQueryEngine,
)
from llama_index.core.selectors import LLMMultiSelector
from llama_index.core.evaluation import FaithfulnessEvaluator, DatasetGenerator
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.tools import QueryEngineTool
query_tool = QueryEngineTool.from_defaults(
query_engine=query_engine, description="test description"
)
router_query_engine = | RouterQueryEngine.from_defaults([query_tool]) | llama_index.core.query_engine.RouterQueryEngine.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
splitter = | SentenceSplitter(chunk_size=256) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core.evaluation.benchmarks import HotpotQAEvaluator
from llama_index.core import VectorStoreIndex
from llama_index.core import Document
from llama_index.llms.openai import OpenAI
from llama_index.core.embeddings import resolve_embed_model
llm = OpenAI(model="gpt-3.5-turbo")
embed_model = resolve_embed_model(
"local:sentence-transformers/all-MiniLM-L6-v2"
)
index = VectorStoreIndex.from_documents(
[Document.example()], embed_model=embed_model, show_progress=True
)
engine = index.as_query_engine(llm=llm)
HotpotQAEvaluator().run(engine, queries=5, show_result=True)
from llama_index.core.postprocessor import SentenceTransformerRerank
rerank = SentenceTransformerRerank(top_n=3)
engine = index.as_query_engine(
llm=llm,
node_postprocessors=[rerank],
)
| HotpotQAEvaluator() | llama_index.core.evaluation.benchmarks.HotpotQAEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from pydantic import BaseModel
from unstructured.partition.html import partition_html
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs_2021 = reader.load_data(Path("tesla_2021_10k.htm"))
docs_2020 = reader.load_data(Path("tesla_2020_10k.htm"))
from llama_index.core.node_parser import UnstructuredElementNodeParser
node_parser = | UnstructuredElementNodeParser() | llama_index.core.node_parser.UnstructuredElementNodeParser |
get_ipython().run_line_magic('pip', 'install llama-index-readers-mongodb')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system('pip install llama-index pymongo')
from llama_index.core import SummaryIndex
from llama_index.readers.mongodb import SimpleMongoReader
from IPython.display import Markdown, display
import os
host = "<host>"
port = "<port>"
db_name = "<db_name>"
collection_name = "<collection_name>"
query_dict = {}
field_names = ["text"]
reader = SimpleMongoReader(host, port)
documents = reader.load_data(
db_name, collection_name, field_names, query_dict=query_dict
)
index = | SummaryIndex.from_documents(documents) | llama_index.core.SummaryIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
llm = OpenAI()
data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(data)
chat_engine = index.as_chat_engine(chat_mode="react", llm=llm, verbose=True)
response = chat_engine.chat(
"Use the tool to answer what did Paul Graham do in the summer of 1995?"
)
print(response)
llm = | Anthropic() | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-llms-friendli')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('env', 'FRIENDLI_TOKEN=...')
from llama_index.llms.friendli import Friendli
llm = Friendli()
from llama_index.core.llms import ChatMessage, MessageRole
message = ChatMessage(role=MessageRole.USER, content="Tell me a joke.")
resp = llm.chat([message])
print(resp)
resp = llm.stream_chat([message])
for r in resp:
print(r.delta, end="")
resp = await llm.achat([message])
print(resp)
resp = await llm.astream_chat([message])
async for r in resp:
print(r.delta, end="")
prompt = "Draft a cover letter for a role in software engineering."
resp = llm.complete(prompt)
print(resp)
resp = llm.stream_complete(prompt)
for r in resp:
print(r.delta, end="")
resp = await llm.acomplete(prompt)
print(resp)
resp = await llm.astream_complete(prompt)
async for r in resp:
print(r.delta, end="")
from llama_index.llms.friendli import Friendli
llm = | Friendli(model="llama-2-70b-chat") | llama_index.llms.friendli.Friendli |
from llama_index.agent import OpenAIAgent
import openai
openai.api_key = "sk-api-key"
from llama_index.tools.gmail.base import GmailToolSpec
from llama_index.tools.google_calendar.base import GoogleCalendarToolSpec
from llama_index.tools.google_search.base import GoogleSearchToolSpec
gmail_tools = GmailToolSpec().to_tool_list()
gcal_tools = | GoogleCalendarToolSpec() | llama_index.tools.google_calendar.base.GoogleCalendarToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
qa_prompt_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
refine_prompt_str = (
"We have the opportunity to refine the original answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question: {query_str}. "
"If the context isn't useful, output the original answer again.\n"
"Original Answer: {existing_answer}"
)
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core import ChatPromptTemplate
chat_text_qa_msgs = [
ChatMessage(
role=MessageRole.SYSTEM,
content=(
"Always answer the question, even if the context isn't helpful."
),
),
ChatMessage(role=MessageRole.USER, content=qa_prompt_str),
]
text_qa_template = ChatPromptTemplate(chat_text_qa_msgs)
chat_refine_msgs = [
ChatMessage(
role=MessageRole.SYSTEM,
content=(
"Always answer the question, even if the context isn't helpful."
),
),
ChatMessage(role=MessageRole.USER, content=refine_prompt_str),
]
refine_template = ChatPromptTemplate(chat_refine_msgs)
from llama_index.core import ChatPromptTemplate
chat_text_qa_msgs = [
(
"system",
"Always answer the question, even if the context isn't helpful.",
),
("user", qa_prompt_str),
]
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
chat_refine_msgs = [
(
"system",
"Always answer the question, even if the context isn't helpful.",
),
("user", refine_prompt_str),
]
refine_template = | ChatPromptTemplate.from_messages(chat_refine_msgs) | llama_index.core.ChatPromptTemplate.from_messages |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
SimpleKeywordTableIndex,
)
from llama_index.core import SummaryIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)
list_retriever = summary_index.as_retriever()
vector_retriever = vector_index.as_retriever()
keyword_retriever = keyword_index.as_retriever()
from llama_index.core.tools import RetrieverTool
list_tool = RetrieverTool.from_defaults(
retriever=list_retriever,
description=(
"Will retrieve all context from Paul Graham's essay on What I Worked"
" On. Don't use if the question only requires more specific context."
),
)
vector_tool = RetrieverTool.from_defaults(
retriever=vector_retriever,
description=(
"Useful for retrieving specific context from Paul Graham essay on What"
" I Worked On."
),
)
keyword_tool = RetrieverTool.from_defaults(
retriever=keyword_retriever,
description=(
"Useful for retrieving specific context from Paul Graham essay on What"
" I Worked On (using entities mentioned in query)"
),
)
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.response.notebook_utils import display_source_node
retriever = RouterRetriever(
selector=PydanticSingleSelector.from_defaults(llm=llm),
retriever_tools=[
list_tool,
vector_tool,
],
)
nodes = retriever.retrieve(
"Can you give me all the context regarding the author's life?"
)
for node in nodes:
| display_source_node(node) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install -U qdrant_client')
import os
OPENAI_API_TOKEN = ""
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from pathlib import Path
input_image_path = Path("input_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1nUhsBRiSWxcVQv8t8Cvvro8HJZ88LCzj" -O ./input_images/long_range_spec.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=19pLwx0nVqsop7lo0ubUSYTzQfMtKJJtJ" -O ./input_images/model_y.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1utu3iD9XEgR5Sb7PrbtMf1qw8T1WdNmF" -O ./input_images/performance_spec.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1dpUakWMqaXR4Jjn1kHuZfB0pAXvjn2-i" -O ./input_images/price.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1qNeT201QAesnAP5va1ty0Ky5Q_jKkguV" -O ./input_images/real_wheel_spec.png')
from PIL import Image
import matplotlib.pyplot as plt
import os
image_paths = []
for img_path in os.listdir("./input_images"):
image_paths.append(str(os.path.join("./input_images", img_path)))
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(2, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 9:
break
plot_images(image_paths)
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
image_documents = | SimpleDirectoryReader("./input_images") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("../data/paul_graham").load_data()
from llama_index.core import Settings
Settings.chunk_size = 1024
nodes = | Settings.node_parser.get_nodes_from_documents(documents) | llama_index.core.Settings.node_parser.get_nodes_from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-readers-elasticsearch')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-opensearch')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-ollama')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from os import getenv
from llama_index.core import SimpleDirectoryReader
from llama_index.vector_stores.opensearch import (
OpensearchVectorStore,
OpensearchVectorClient,
)
from llama_index.core import VectorStoreIndex, StorageContext
endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200")
idx = getenv("OPENSEARCH_INDEX", "gpt-index-demo")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
text_field = "content"
embedding_field = "embedding"
client = OpensearchVectorClient(
endpoint, idx, 1536, embedding_field=embedding_field, text_field=text_field
)
vector_store = OpensearchVectorStore(client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents=documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
res = query_engine.query("What did the author do growing up?")
res.response
from llama_index.core import Document
from llama_index.core.vector_stores import MetadataFilters, ExactMatchFilter
import regex as re
text_chunks = documents[0].text.split("\n\n")
footnotes = [
Document(
text=chunk,
id=documents[0].doc_id,
metadata={"is_footnote": bool(re.search(r"^\s*\[\d+\]\s*", chunk))},
)
for chunk in text_chunks
if bool(re.search(r"^\s*\[\d+\]\s*", chunk))
]
for f in footnotes:
index.insert(f)
footnote_query_engine = index.as_query_engine(
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="term", value='{"metadata.is_footnote": "true"}'
),
ExactMatchFilter(
key="query_string",
value='{"query": "content: space AND content: lisp"}',
),
]
)
)
res = footnote_query_engine.query(
"What did the author about space aliens and lisp?"
)
res.response
from llama_index.readers.elasticsearch import ElasticsearchReader
rdr = | ElasticsearchReader(endpoint, idx) | llama_index.readers.elasticsearch.ElasticsearchReader |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-small-en-v1.5"
)
index = | VectorStoreIndex.from_documents(documents=documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm')
get_ipython().system('pip install llama-index')
import os
from llama_index.llms.litellm import LiteLLM
from llama_index.core.llms import ChatMessage
os.environ["OPENAI_API_KEY"] = "your-api-key"
os.environ["COHERE_API_KEY"] = "your-api-key"
message = ChatMessage(role="user", content="Hey! how's it going?")
llm = | LiteLLM("gpt-3.5-turbo") | llama_index.llms.litellm.LiteLLM |
get_ipython().system('pip install llama-index')
get_ipython().system('pip install duckdb')
get_ipython().system('pip install llama-index-vector-stores-duckdb')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.duckdb import DuckDBVectorStore
from llama_index.core import StorageContext
from IPython.display import Markdown, display
import os
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("data/paul_graham/").load_data()
vector_store = DuckDBVectorStore()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
documents = SimpleDirectoryReader("data/paul_graham/").load_data()
vector_store = | DuckDBVectorStore("pg.duckdb", persist_dir="./persist/") | llama_index.vector_stores.duckdb.DuckDBVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-notion')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system('pip install llama-index')
from llama_index.core import SummaryIndex
from llama_index.readers.notion import NotionPageReader
from IPython.display import Markdown, display
import os
integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
page_ids = ["<page_id>"]
documents = NotionPageReader(integration_token=integration_token).load_data(
page_ids=page_ids
)
index = | SummaryIndex.from_documents(documents) | llama_index.core.SummaryIndex.from_documents |
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import TransformQueryEngine
from IPython.display import Markdown, display
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_str = "what did paul graham do after going to RISD"
query_engine = index.as_query_engine()
response = query_engine.query(query_str)
display(Markdown(f"<b>{response}</b>"))
hyde = HyDEQueryTransform(include_original=True)
hyde_query_engine = | TransformQueryEngine(query_engine, hyde) | llama_index.core.query_engine.TransformQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.readers.file import PDFReader
reader = | PDFReader() | llama_index.readers.file.PDFReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import openai
import os
os.environ["OPENAI_API_KEY"] = "[You API key]"
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp-free")
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True)
from llama_index.core import StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import VectorStoreIndex
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, namespace="wiki_cities"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
vector_index = VectorStoreIndex([], storage_context=storage_context)
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
get_ipython().system('pip install wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import LLMRerank
from llama_index.llms.openai import OpenAI
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
from llama_index.core import Settings
Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.chunk_overlap = 0
Settings.chunk_size = 128
documents = SimpleDirectoryReader(
input_files=["./data/10k/lyft_2021.pdf"]
).load_data()
index = VectorStoreIndex.from_documents(
documents,
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core import QueryBundle
import pandas as pd
from IPython.display import display, HTML
from copy import deepcopy
pd.set_option("display.max_colwidth", -1)
def get_retrieved_nodes(
query_str, vector_top_k=10, reranker_top_n=3, with_reranker=False
):
query_bundle = | QueryBundle(query_str) | llama_index.core.QueryBundle |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/3.csv")
df
from llama_index.packs.tables.chain_of_table.base import (
ChainOfTableQueryEngine,
serialize_table,
)
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"ChainOfTablePack",
"./chain_of_table_pack",
skip_load=True,
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import pandas as pd
df = pd.read_csv("~/Downloads/WikiTableQuestions/csv/200-csv/11.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("Who won best Director in the 1972 Academy Awards?")
str(response.response)
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/42.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("What was the precipitation in inches during June?")
str(response)
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
prompt_str = """\
Here's a serialized table.
{serialized_table}
Given this table please answer the question: {question}
Answer: """
prompt = PromptTemplate(prompt_str)
prompt_c = prompt.as_query_component(partial={"serialized_table": serialize_table(df)})
qp = QueryPipeline(chain=[prompt_c, llm])
response = qp.run("What was the precipitation in inches during June?")
print(str(response))
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/203-csv/114.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("Which televised ABC game had the greatest attendance?")
print(str(response))
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
prompt_str = """\
Here's a serialized table.
{serialized_table}
Given this table please answer the question: {question}
Answer: """
prompt = PromptTemplate(prompt_str)
prompt_c = prompt.as_query_component(partial={"serialized_table": | serialize_table(df) | llama_index.packs.tables.chain_of_table.base.serialize_table |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import os
os.environ[
"PINECONE_API_KEY"
] = "<Your Pinecone API key, from app.pinecone.io>"
from pinecone import Pinecone
from pinecone import ServerlessSpec
api_key = os.environ["PINECONE_API_KEY"]
pc = Pinecone(api_key=api_key)
try:
pc.create_index(
"quickstart-index",
dimension=1536,
metric="euclidean",
spec=ServerlessSpec(cloud="aws", region="us-west-2"),
)
except Exception as e:
print(e)
pass
pinecone_index = pc.Index("quickstart-index")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Fiction",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index,
namespace="test",
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.retrievers import VectorIndexAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="famous books and movies",
metadata_info=[
MetadataInfo(
name="director",
type="str",
description=("Name of the director"),
),
MetadataInfo(
name="theme",
type="str",
description=("Theme of the book/movie"),
),
MetadataInfo(
name="year",
type="int",
description=("Year of the book/movie"),
),
],
)
retriever = VectorIndexAutoRetriever(
index,
vector_store_info=vector_store_info,
empty_query_top_k=10,
default_empty_query_vector=[0] * 1536,
verbose=True,
)
nodes = retriever.retrieve(
"Tell me about some books/movies after the year 2000"
)
for node in nodes:
print(node.text)
print(node.metadata)
nodes = retriever.retrieve("Tell me about some books that are Fiction")
for node in nodes:
print(node.text)
print(node.metadata)
from llama_index.core.vector_stores import MetadataFilters
filter_dicts = [{"key": "year", "operator": "==", "value": 1997}]
filters = | MetadataFilters.from_dicts(filter_dicts) | llama_index.core.vector_stores.MetadataFilters.from_dicts |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.storage.kvstore.firestore import FirestoreKVStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.index_store.firestore import FirestoreIndexStore
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore=FirestoreDocumentStore(kvstore),
index_store=FirestoreIndexStore(kvstore),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore=FirestoreDocumentStore(kvstore),
index_store= | FirestoreIndexStore(kvstore) | llama_index.storage.index_store.firestore.FirestoreIndexStore |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import datasets
dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR")
dataset
from llama_index.core import get_tokenizer
import re
from typing import Set, List
tokenizer = get_tokenizer()
sample_size = 5
def get_reactions_row(raw_target: str) -> List[str]:
"""Get reactions from a single row."""
reaction_pattern = re.compile(r"reactions:\s*(.*)")
reaction_match = reaction_pattern.search(raw_target)
if reaction_match:
reactions = reaction_match.group(1).split(",")
reactions = [r.strip().lower() for r in reactions]
else:
reactions = []
return reactions
def get_reactions_set(dataset) -> Set[str]:
"""Get set of all reactions."""
reactions = set()
for data in dataset["train"]:
reactions.update(set(get_reactions_row(data["target"])))
return reactions
def get_samples(dataset, sample_size: int = 5):
"""Get processed sample.
Contains source text and also the reaction label.
Parse reaction text to specifically extract reactions.
"""
samples = []
for idx, data in enumerate(dataset["train"]):
if idx >= sample_size:
break
text = data["fulltext_processed"]
raw_target = data["target"]
reactions = get_reactions_row(raw_target)
samples.append({"text": text, "reactions": reactions})
return samples
from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack
from llama_index.core.llama_pack import download_llama_pack
InferRetrieveRerankPack = download_llama_pack(
"InferRetrieveRerankPack",
"./irr_pack",
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-16k")
pred_context = """\
The output predictins should be a list of comma-separated adverse \
drug reactions. \
"""
reranker_top_n = 10
pack = InferRetrieveRerankPack(
get_reactions_set(dataset),
llm=llm,
pred_context=pred_context,
reranker_top_n=reranker_top_n,
verbose=True,
)
samples = get_samples(dataset, sample_size=5)
pred_reactions = pack.run(inputs=[s["text"] for s in samples])
gt_reactions = [s["reactions"] for s in samples]
pred_reactions[2]
gt_reactions[2]
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank
from llama_index.core.output_parsers import ChainableOutputParser
from typing import List
import random
all_reactions = get_reactions_set(dataset)
random.sample(all_reactions, 5)
from llama_index.core.schema import TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core import VectorStoreIndex
reaction_nodes = [TextNode(text=r) for r in all_reactions]
pipeline = IngestionPipeline(transformations=[OpenAIEmbedding()])
reaction_nodes = await pipeline.arun(documents=reaction_nodes)
index = VectorStoreIndex(reaction_nodes)
reaction_nodes[0].embedding
reaction_retriever = index.as_retriever(similarity_top_k=2)
nodes = reaction_retriever.retrieve("abdominal")
print([n.get_content() for n in nodes])
infer_prompt_str = """\
Your job is to output a list of predictions given context from a given piece of text. The text context,
and information regarding the set of valid predictions is given below.
Return the predictions as a comma-separated list of strings.
Text Context:
{doc_context}
Prediction Info:
{pred_context}
Predictions: """
infer_prompt = | PromptTemplate(infer_prompt_str) | llama_index.core.PromptTemplate |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
from llama_index.core.llama_dataset import (
LabelledRagDataExample,
CreatedByType,
CreatedBy,
)
query = "This is a test query, is it not?"
query_by = CreatedBy(type=CreatedByType.AI, model_name="gpt-4")
reference_answer = "Yes it is."
reference_answer_by = CreatedBy(type=CreatedByType.HUMAN)
reference_contexts = ["This is a sample context"]
rag_example = LabelledRagDataExample(
query=query,
query_by=query_by,
reference_contexts=reference_contexts,
reference_answer=reference_answer,
reference_answer_by=reference_answer_by,
)
print(rag_example.json())
LabelledRagDataExample.parse_raw(rag_example.json())
rag_example.dict()
LabelledRagDataExample.parse_obj(rag_example.dict())
query = "This is a test query, is it so?"
reference_answer = "I think yes, it is."
reference_contexts = ["This is a second sample context"]
rag_example_2 = LabelledRagDataExample(
query=query,
query_by=query_by,
reference_contexts=reference_contexts,
reference_answer=reference_answer,
reference_answer_by=reference_answer_by,
)
from llama_index.core.llama_dataset import LabelledRagDataset
rag_dataset = LabelledRagDataset(examples=[rag_example, rag_example_2])
rag_dataset.to_pandas()
rag_dataset.save_json("rag_dataset.json")
reload_rag_dataset = | LabelledRagDataset.from_json("rag_dataset.json") | llama_index.core.llama_dataset.LabelledRagDataset.from_json |
import warnings
warnings.filterwarnings("ignore")
import os
from llama_index.tools.cogniswitch import CogniswitchToolSpec
from llama_index.agent import ReActAgent
toolspec = CogniswitchToolSpec(cs_token=cs_token, apiKey=oauth_token)
tool_lst = toolspec.to_tool_list()
agent = | ReActAgent.from_tools(tool_lst) | llama_index.agent.ReActAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
from llama_index.agent.openai import OpenAIAgent
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
def useless_tool() -> int:
"""This is a uselss tool."""
return "This is a uselss output."
useless_tool = | FunctionTool.from_defaults(fn=useless_tool) | llama_index.core.tools.FunctionTool.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.core.storage.docstore import SimpleDocumentStore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults(docstore=docstore)
summary_index = | SummaryIndex(nodes, storage_context=storage_context) | llama_index.core.SummaryIndex |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-langchain')
get_ipython().system('pip install llama-index')
from langchain.embeddings import HuggingFaceEmbeddings
from llama_index.embeddings.langchain import LangchainEmbedding
lc_embed_model = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-mpnet-base-v2"
)
embed_model = | LangchainEmbedding(lc_embed_model) | llama_index.embeddings.langchain.LangchainEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-readers-pathway')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install pathway')
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/'")
get_ipython().system("wget 'https://gist.githubusercontent.com/janchorowski/dd22a293f3d99d1b726eedc7d46d2fc0/raw/pathway_readme.md' -O 'data/pathway_readme.md'")
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import getpass
import os
if "OPENAI_API_KEY" not in os.environ:
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import pathway as pw
data_sources = []
data_sources.append(
pw.io.fs.read(
"./data",
format="binary",
mode="streaming",
with_metadata=True,
) # This creates a `pathway` connector that tracks
)
from llama_index.core.retrievers import PathwayVectorServer
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.node_parser import TokenTextSplitter
embed_model = OpenAIEmbedding(embed_batch_size=10)
transformations_example = [
TokenTextSplitter(
chunk_size=150,
chunk_overlap=10,
separator=" ",
),
embed_model,
]
processing_pipeline = PathwayVectorServer(
*data_sources,
transformations=transformations_example,
)
PATHWAY_HOST = "127.0.0.1"
PATHWAY_PORT = 8754
processing_pipeline.run_server(
host=PATHWAY_HOST, port=PATHWAY_PORT, with_cache=False, threaded=True
)
from llama_index.readers.pathway import PathwayReader
reader = | PathwayReader(host=PATHWAY_HOST, port=PATHWAY_PORT) | llama_index.readers.pathway.PathwayReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
from llama_index.core.llama_dataset import (
LabelledRagDataExample,
CreatedByType,
CreatedBy,
)
query = "This is a test query, is it not?"
query_by = | CreatedBy(type=CreatedByType.AI, model_name="gpt-4") | llama_index.core.llama_dataset.CreatedBy |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-colbert')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install torch sentence-transformers')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.indices.managed.google import GoogleIndex
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
project_name = "TODO-your-project-name" # @param {type:"string"}
email = "[email protected]" # @param {type:"string"}
client_file_name = "client_secret.json"
get_ipython().system('gcloud config set project $project_name')
get_ipython().system('gcloud config set account $email')
get_ipython().system('gcloud auth application-default login --no-browser --client-id-file=$client_file_name --scopes="https://www.googleapis.com/auth/generative-language.retriever,https://www.googleapis.com/auth/cloud-platform"')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
google_index = GoogleIndex.create_corpus(display_name="My first corpus!")
print(f"Newly created corpus ID is {google_index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
google_index.insert_documents(documents)
google_index = GoogleIndex.from_corpus(corpus_id="")
query_engine = google_index.as_query_engine()
response = query_engine.query("which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.VERBOSE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.EXTRACTIVE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.llms.gemini import Gemini
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.embeddings.gemini import GeminiEmbedding
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.7, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
)
reranker = LLMRerank(
top_n=5,
llm=Gemini(api_key=GOOGLE_API_KEY),
)
retriever = google_index.as_retriever(similarity_top_k=5)
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[reranker],
)
response = query_engine.query("Which program did this author attend?")
print(response.response)
from llama_index.core.postprocessor import SentenceTransformerRerank
sbert_rerank = SentenceTransformerRerank(
model="cross-encoder/ms-marco-MiniLM-L-2-v2", top_n=5
)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.llms.gemini import Gemini
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.embeddings.gemini import GeminiEmbedding
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.1, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
)
retriever = google_index.as_retriever(similarity_top_k=5)
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[sbert_rerank],
)
response = query_engine.query("Which program did this author attend?")
print(response.response)
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import Settings
import qdrant_client
Settings.chunk_size = 256
client = qdrant_client.QdrantClient(path="qdrant_retrieval_2")
vector_store = QdrantVectorStore(client=client, collection_name="collection")
qdrant_index = VectorStoreIndex.from_documents(documents)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
query_engine = qdrant_index.as_query_engine()
response = query_engine.query("Which program did this author attend?")
print(response)
for r in response.source_nodes:
display_source_node(r, source_length=1000)
query_engine = qdrant_index.as_query_engine()
response = query_engine.query(
"Which universities or schools or programs did this author attend?"
)
print(response)
from llama_index.core import get_response_synthesizer
reranker = | LLMRerank(top_n=3) | llama_index.core.postprocessor.LLMRerank |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-experimental-param-tuner')
get_ipython().system('pip install llama-index llama-hub')
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
import nest_asyncio
nest_asyncio.apply()
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.readers.file import UnstructuredReader
from llama_index.readers.file import PyMuPDFReader
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.schema import IndexNode
get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json')
from llama_index.core.evaluation import QueryResponseDataset
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
eval_qs = eval_dataset.questions
ref_response_strs = [r for (_, r) in eval_dataset.qr_pairs]
from llama_index.core import (
VectorStoreIndex,
load_index_from_storage,
StorageContext,
)
from llama_index.experimental.param_tuner import ParamTuner
from llama_index.core.param_tuner.base import TunedResult, RunResult
from llama_index.core.evaluation.eval_utils import (
get_responses,
aget_responses,
)
from llama_index.core.evaluation import (
SemanticSimilarityEvaluator,
BatchEvalRunner,
)
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
import os
import numpy as np
from pathlib import Path
def _build_index(chunk_size, docs):
index_out_path = f"./storage_{chunk_size}"
if not os.path.exists(index_out_path):
Path(index_out_path).mkdir(parents=True, exist_ok=True)
node_parser = SimpleNodeParser.from_defaults(chunk_size=chunk_size)
base_nodes = node_parser.get_nodes_from_documents(docs)
index = VectorStoreIndex(base_nodes)
index.storage_context.persist(index_out_path)
else:
storage_context = StorageContext.from_defaults(
persist_dir=index_out_path
)
index = load_index_from_storage(
storage_context,
)
return index
def _get_eval_batch_runner():
evaluator_s = SemanticSimilarityEvaluator(embed_model=OpenAIEmbedding())
eval_batch_runner = BatchEvalRunner(
{"semantic_similarity": evaluator_s}, workers=2, show_progress=True
)
return eval_batch_runner
def objective_function(params_dict):
chunk_size = params_dict["chunk_size"]
docs = params_dict["docs"]
top_k = params_dict["top_k"]
eval_qs = params_dict["eval_qs"]
ref_response_strs = params_dict["ref_response_strs"]
index = _build_index(chunk_size, docs)
query_engine = index.as_query_engine(similarity_top_k=top_k)
pred_response_objs = get_responses(
eval_qs, query_engine, show_progress=True
)
eval_batch_runner = _get_eval_batch_runner()
eval_results = eval_batch_runner.evaluate_responses(
eval_qs, responses=pred_response_objs, reference=ref_response_strs
)
mean_score = np.array(
[r.score for r in eval_results["semantic_similarity"]]
).mean()
return RunResult(score=mean_score, params=params_dict)
async def aobjective_function(params_dict):
chunk_size = params_dict["chunk_size"]
docs = params_dict["docs"]
top_k = params_dict["top_k"]
eval_qs = params_dict["eval_qs"]
ref_response_strs = params_dict["ref_response_strs"]
index = _build_index(chunk_size, docs)
query_engine = index.as_query_engine(similarity_top_k=top_k)
pred_response_objs = await aget_responses(
eval_qs, query_engine, show_progress=True
)
eval_batch_runner = _get_eval_batch_runner()
eval_results = await eval_batch_runner.aevaluate_responses(
eval_qs, responses=pred_response_objs, reference=ref_response_strs
)
mean_score = np.array(
[r.score for r in eval_results["semantic_similarity"]]
).mean()
return | RunResult(score=mean_score, params=params_dict) | llama_index.core.param_tuner.base.RunResult |
from llama_hub.semanticscholar.base import SemanticScholarReader
import os
import openai
from llama_index.llms import OpenAI
from llama_index.query_engine import CitationQueryEngine
from llama_index import (
VectorStoreIndex,
StorageContext,
load_index_from_storage,
ServiceContext,
)
from llama_index.response.notebook_utils import display_response
s2reader = SemanticScholarReader()
openai.api_key = os.environ["OPENAI_API_KEY"]
service_context = ServiceContext.from_defaults(
llm= | OpenAI(model="gpt-3.5-turbo", temperature=0) | llama_index.llms.OpenAI |
get_ipython().system('pip install llama-index')
get_ipython().system('pip install duckdb')
get_ipython().system('pip install llama-index-vector-stores-duckdb')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.duckdb import DuckDBVectorStore
from llama_index.core import StorageContext
from IPython.display import Markdown, display
import os
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("data/paul_graham/").load_data()
vector_store = | DuckDBVectorStore() | llama_index.vector_stores.duckdb.DuckDBVectorStore |
import os
import sys
import logging
from dotenv import load_dotenv
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger(__name__)
load_dotenv() # take environment variables from .env.
logger.debug(f"NewRelic application: {os.getenv('NEW_RELIC_APP_NAME')}")
import os
from time import time
from nr_openai_observability import monitor
from llama_index import VectorStoreIndex, download_loader
if os.getenv("NEW_RELIC_APP_NAME") and os.getenv("NEW_RELIC_LICENSE_KEY"):
monitor.initialization(application_name=os.getenv("NEW_RELIC_APP_NAME"))
RayyanReader = | download_loader("RayyanReader") | llama_index.download_loader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index qdrant_client')
import qdrant_client
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
client = qdrant_client.QdrantClient(
location=":memory:"
)
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
import os
from llama_index.core import StorageContext
os.environ["OPENAI_API_KEY"] = "sk-..."
vector_store = QdrantVectorStore(
client=client, collection_name="test_collection_1"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator,
)
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import FilterOperator, FilterCondition
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", value="Fiction"),
| MetadataFilter(key="year", value=1997, operator=FilterOperator.GT) | llama_index.core.vector_stores.MetadataFilter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/3.csv")
df
from llama_index.packs.tables.chain_of_table.base import (
ChainOfTableQueryEngine,
serialize_table,
)
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"ChainOfTablePack",
"./chain_of_table_pack",
skip_load=True,
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import pandas as pd
df = pd.read_csv("~/Downloads/WikiTableQuestions/csv/200-csv/11.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("Who won best Director in the 1972 Academy Awards?")
str(response.response)
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/42.csv")
df
query_engine = | ChainOfTableQueryEngine(df, llm=llm, verbose=True) | llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(input_files=["pg_essay.txt"])
documents = reader.load_data()
from llama_index.core.query_pipeline import (
QueryPipeline,
InputComponent,
ArgPackComponent,
)
from typing import Dict, Any, List, Optional
from llama_index.core.llama_pack import BaseLlamaPack
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from llama_index.core import Document, VectorStoreIndex
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.schema import NodeWithScore, TextNode
from llama_index.core.node_parser import SentenceSplitter
llm = OpenAI(model="gpt-3.5-turbo")
chunk_sizes = [128, 256, 512, 1024]
query_engines = {}
for chunk_size in chunk_sizes:
splitter = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=0)
nodes = splitter.get_nodes_from_documents(documents)
vector_index = VectorStoreIndex(nodes)
query_engines[str(chunk_size)] = vector_index.as_query_engine(llm=llm)
p = QueryPipeline(verbose=True)
module_dict = {
**query_engines,
"input": InputComponent(),
"summarizer": | TreeSummarize() | llama_index.core.response_synthesizers.TreeSummarize |
import openai
openai.api_key = "sk-your-key"
import json
from graphql import parse
with open("data/shopify_graphql.txt", "r") as f:
txt = f.read()
ast = parse(txt)
query_root_node = next(
(
defn
for defn in ast.definitions
if defn.kind == "object_type_definition" and defn.name.value == "QueryRoot"
)
)
query_roots = [field.name.value for field in query_root_node.fields]
print(query_roots)
from llama_index.file.sdl.base import SDLReader
from llama_index.tools.ondemand_loader_tool import OnDemandLoaderTool
documentation_tool = OnDemandLoaderTool.from_defaults(
| SDLReader() | llama_index.file.sdl.base.SDLReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-epsilla')
get_ipython().system('pip/pip3 install pyepsilla')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.epsilla import EpsillaVectorStore
import textwrap
import openai
import getpass
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
openai.api_key = OPENAI_API_KEY
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(f"Total documents: {len(documents)}")
print(f"First document, id: {documents[0].doc_id}")
print(f"First document, hash: {documents[0].hash}")
from pyepsilla import vectordb
client = vectordb.Client()
vector_store = | EpsillaVectorStore(client=client, db_path="/tmp/llamastore") | llama_index.vector_stores.epsilla.EpsillaVectorStore |
import openai
openai.api_key = "sk-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.code_interpreter.base import CodeInterpreterToolSpec
code_spec = | CodeInterpreterToolSpec() | llama_index.tools.code_interpreter.base.CodeInterpreterToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-milvus')
get_ipython().system(' pip install llama-index')
import logging
import sys
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.vector_stores.milvus import MilvusVectorStore
from IPython.display import Markdown, display
import textwrap
import openai
openai.api_key = "sk-"
get_ipython().system(" mkdir -p 'data/paul_graham/'")
get_ipython().system(" wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print("Document ID:", documents[0].doc_id)
from llama_index.core import StorageContext
vector_store = MilvusVectorStore(dim=1536, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author learn?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What was a hard moment for the author?")
print(textwrap.fill(str(response), 100))
vector_store = MilvusVectorStore(dim=1536, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
[Document(text="The number that is being searched for is ten.")],
storage_context,
)
query_engine = index.as_query_engine()
res = query_engine.query("Who is the author?")
print("Res:", res)
del index, vector_store, storage_context, query_engine
vector_store = | MilvusVectorStore(overwrite=False) | llama_index.vector_stores.milvus.MilvusVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().system('pip install llama-index')
from llama_index.llms.cohere import Cohere
api_key = "Your api key"
resp = Cohere(api_key=api_key).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.cohere import Cohere
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Cohere(api_key=api_key).chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
print(resp)
from llama_index.llms.openai import OpenAI
llm = Cohere(api_key=api_key)
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.openai import OpenAI
llm = Cohere(api_key=api_key)
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.stream_chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
for r in resp:
print(r.delta, end="")
from llama_index.llms.cohere import Cohere
llm = | Cohere(model="command", api_key=api_key) | llama_index.llms.cohere.Cohere |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
get_ipython().system('pip install llama_hub')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
docs0 = | PyMuPDFReader() | llama_index.readers.file.PyMuPDFReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-graph-stores-kuzu')
import os
os.environ["OPENAI_API_KEY"] = "API_KEY_HERE"
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import shutil
shutil.rmtree("./test1", ignore_errors=True)
shutil.rmtree("./test2", ignore_errors=True)
shutil.rmtree("./test3", ignore_errors=True)
get_ipython().run_line_magic('pip', 'install kuzu')
import kuzu
db = kuzu.Database("test1")
from llama_index.graph_stores.kuzu import KuzuGraphStore
graph_store = KuzuGraphStore(db)
from llama_index.core import SimpleDirectoryReader, KnowledgeGraphIndex
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
from IPython.display import Markdown, display
import kuzu
documents = SimpleDirectoryReader(
"../../../../examples/paul_graham_essay/data"
).load_data()
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = llm
Settings.chunk_size = 512
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults(graph_store=graph_store)
index = KnowledgeGraphIndex.from_documents(
documents,
max_triplets_per_chunk=2,
storage_context=storage_context,
)
query_engine = index.as_query_engine(
include_text=False, response_mode="tree_summarize"
)
response = query_engine.query(
"Tell me more about Interleaf",
)
display(Markdown(f"<b>{response}</b>"))
query_engine = index.as_query_engine(
include_text=True, response_mode="tree_summarize"
)
response = query_engine.query(
"Tell me more about Interleaf",
)
display(Markdown(f"<b>{response}</b>"))
db = kuzu.Database("test2")
graph_store = KuzuGraphStore(db)
storage_context = StorageContext.from_defaults(graph_store=graph_store)
new_index = KnowledgeGraphIndex.from_documents(
documents,
max_triplets_per_chunk=2,
storage_context=storage_context,
include_embeddings=True,
)
rel_map = graph_store.get_rel_map()
query_engine = index.as_query_engine(
include_text=True,
response_mode="tree_summarize",
embedding_mode="hybrid",
similarity_top_k=5,
)
response = query_engine.query(
"Tell me more about what the author worked on at Interleaf",
)
display(Markdown(f"<b>{response}</b>"))
get_ipython().run_line_magic('pip', 'install pyvis')
from pyvis.network import Network
g = index.get_networkx_graph()
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(g)
net.show("kuzugraph_draw.html")
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceSplitter()
nodes = node_parser.get_nodes_from_documents(documents)
db = kuzu.Database("test3")
graph_store = | KuzuGraphStore(db) | llama_index.graph_stores.kuzu.KuzuGraphStore |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = AgentFnComponent(fn=run_tool_fn)
def process_response_fn(
task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep
):
"""Process response."""
state["current_reasoning"].append(response_step)
response_str = response_step.response
state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER))
state["memory"].put(
ChatMessage(content=response_str, role=MessageRole.ASSISTANT)
)
return {"response_str": response_str, "is_done": True}
process_response = AgentFnComponent(fn=process_response_fn)
def process_agent_response_fn(
task: Task, state: Dict[str, Any], response_dict: dict
):
"""Process agent response."""
return (
AgentChatResponse(response_dict["response_str"]),
response_dict["is_done"],
)
process_agent_response = AgentFnComponent(fn=process_agent_response_fn)
from llama_index.core.query_pipeline import QueryPipeline as QP
from llama_index.llms.openai import OpenAI
qp.add_modules(
{
"agent_input": agent_input_component,
"react_prompt": react_prompt_component,
"llm": OpenAI(model="gpt-4-1106-preview"),
"react_output_parser": parse_react_output,
"run_tool": run_tool,
"process_response": process_response,
"process_agent_response": process_agent_response,
}
)
qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"])
qp.add_link(
"react_output_parser",
"run_tool",
condition_fn=lambda x: not x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link(
"react_output_parser",
"process_response",
condition_fn=lambda x: x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link("process_response", "process_agent_response")
qp.add_link("run_tool", "process_agent_response")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.clean_dag)
net.show("agent_dag.html")
from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner
from llama_index.core.callbacks import CallbackManager
agent_worker = QueryPipelineAgentWorker(qp)
agent = AgentRunner(
agent_worker, callback_manager= | CallbackManager([]) | llama_index.core.callbacks.CallbackManager |
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import Settings
Settings.chunk_size = 1024
nodes = | Settings.node_parser.get_nodes_from_documents(documents) | llama_index.core.Settings.node_parser.get_nodes_from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"')
from llama_index.core import download_loader
from llama_index.readers.file import PyMuPDFReader
llama2_docs = PyMuPDFReader().load_data(
file_path="./llama2.pdf", metadata=True
)
attention_docs = PyMuPDFReader().load_data(
file_path="./attention.pdf", metadata=True
)
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core.node_parser import TokenTextSplitter
nodes = TokenTextSplitter(
chunk_size=1024, chunk_overlap=128
).get_nodes_from_documents(llama2_docs + attention_docs)
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes=nodes)
vector_retriever = index.as_retriever(similarity_top_k=2)
bm25_retriever = BM25Retriever.from_defaults(
docstore=docstore, similarity_top_k=2
)
from llama_index.core.schema import IndexNode
vector_obj = IndexNode(
index_id="vector", obj=vector_retriever, text="Vector Retriever"
)
bm25_obj = IndexNode(
index_id="bm25", obj=bm25_retriever, text="BM25 Retriever"
)
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(objects=[vector_obj, bm25_obj])
query_engine = summary_index.as_query_engine(
response_mode="tree_summarize", verbose=True
)
response = await query_engine.aquery(
"How does attention work in transformers?"
)
print(str(response))
response = await query_engine.aquery(
"What is the architecture of Llama2 based on?"
)
print(str(response))
response = await query_engine.aquery(
"What was used before attention in transformers?"
)
print(str(response))
docstore.persist("./docstore.json")
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
docstore = SimpleDocumentStore.from_persist_path("./docstore.json")
client = QdrantClient(path="./qdrant_data")
vector_store = | QdrantVectorStore("composable", client=client) | llama_index.vector_stores.qdrant.QdrantVectorStore |
get_ipython().system('pip install llama-index-llms-dashscope')
get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY')
import os
os.environ["DASHSCOPE_API_KEY"] = "YOUR_DASHSCOPE_API_KEY"
from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels
dashscope_llm = DashScope(model_name=DashScopeGenerationModels.QWEN_MAX)
resp = dashscope_llm.complete("How to make cake?")
print(resp)
responses = dashscope_llm.stream_complete("How to make cake?")
for response in responses:
print(response.delta, end="")
from llama_index.core.base.llms.types import MessageRole, ChatMessage
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are a helpful assistant."
),
ChatMessage(role=MessageRole.USER, content="How to make cake?"),
]
resp = dashscope_llm.chat(messages)
print(resp)
responses = dashscope_llm.stream_chat(messages)
for response in responses:
print(response.delta, end="")
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are a helpful assistant."
),
ChatMessage(role=MessageRole.USER, content="How to make cake?"),
]
resp = dashscope_llm.chat(messages)
print(resp)
messages.append(
ChatMessage(role=MessageRole.ASSISTANT, content=resp.message.content)
)
messages.append(
| ChatMessage(role=MessageRole.USER, content="How to make it without sugar") | llama_index.core.base.llms.types.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('pip install llama-index')
from llama_index.core.node_parser import SimpleFileNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = | FlatReader() | llama_index.readers.file.FlatReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex
from llama_index.core import PromptTemplate
from IPython.display import Markdown, display
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
gpt35_llm = OpenAI(model="gpt-3.5-turbo")
gpt4_llm = OpenAI(model="gpt-4")
index = VectorStoreIndex.from_documents(documents)
query_str = "What are the potential risks associated with the use of Llama 2 as mentioned in the context?"
query_engine = index.as_query_engine(similarity_top_k=2, llm=gpt35_llm)
vector_retriever = index.as_retriever(similarity_top_k=2)
response = query_engine.query(query_str)
print(str(response))
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
from langchain import hub
langchain_prompt = hub.pull("rlm/rag-prompt")
from llama_index.core.prompts import LangchainPromptTemplate
lc_prompt_tmpl = LangchainPromptTemplate(
template=langchain_prompt,
template_var_mappings={"query_str": "question", "context_str": "context"},
)
query_engine.update_prompts(
{"response_synthesizer:text_qa_template": lc_prompt_tmpl}
)
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
response = query_engine.query(query_str)
print(str(response))
from llama_index.core.schema import TextNode
few_shot_nodes = []
for line in open("../llama2_qa_citation_events.jsonl", "r"):
few_shot_nodes.append(TextNode(text=line))
few_shot_index = VectorStoreIndex(few_shot_nodes)
few_shot_retriever = few_shot_index.as_retriever(similarity_top_k=2)
import json
def few_shot_examples_fn(**kwargs):
query_str = kwargs["query_str"]
retrieved_nodes = few_shot_retriever.retrieve(query_str)
result_strs = []
for n in retrieved_nodes:
raw_dict = json.loads(n.get_content())
query = raw_dict["query"]
response_dict = json.loads(raw_dict["response"])
result_str = f"""\
Query: {query}
Response: {response_dict}"""
result_strs.append(result_str)
return "\n\n".join(result_strs)
qa_prompt_tmpl_str = """\
Context information is below.
---------------------
{context_str}
---------------------
Given the context information and not prior knowledge, \
answer the query asking about citations over different topics.
Please provide your answer in the form of a structured JSON format containing \
a list of authors as the citations. Some examples are given below.
{few_shot_examples}
Query: {query_str}
Answer: \
"""
qa_prompt_tmpl = PromptTemplate(
qa_prompt_tmpl_str,
function_mappings={"few_shot_examples": few_shot_examples_fn},
)
citation_query_str = (
"Which citations are mentioned in the section on Safety RLHF?"
)
print(
qa_prompt_tmpl.format(
query_str=citation_query_str, context_str="test_context"
)
)
query_engine.update_prompts(
{"response_synthesizer:text_qa_template": qa_prompt_tmpl}
)
display_prompt_dict(query_engine.get_prompts())
response = query_engine.query(citation_query_str)
print(str(response))
print(response.source_nodes[1].get_content())
from llama_index.core.postprocessor import (
NERPIINodePostprocessor,
SentenceEmbeddingOptimizer,
)
from llama_index.core import QueryBundle
from llama_index.core.schema import NodeWithScore, TextNode
pii_processor = | NERPIINodePostprocessor(llm=gpt4_llm) | llama_index.core.postprocessor.NERPIINodePostprocessor |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, Document
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
from llama_index.core.evaluation import CorrectnessEvaluator
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
uber_docs0 = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
uber_doc = Document(text="\n\n".join([d.get_content() for d in uber_docs0]))
from llama_index.core.utils import globals_helper
num_tokens = len(globals_helper.tokenizer(uber_doc.get_content()))
print(f"NUM TOKENS: {num_tokens}")
context_str = "Jerry's favorite snack is Hot Cheetos."
query_str = "What is Jerry's favorite snack?"
def augment_doc(doc_str, context, position):
"""Augment doc with additional context at a given position."""
doc_str1 = doc_str[:position]
doc_str2 = doc_str[position:]
return f"{doc_str1}...\n\n{context}\n\n...{doc_str2}"
test_str = augment_doc(
uber_doc.get_content(), context_str, int(0.5 * len(uber_doc.get_content()))
)
async def run_experiments(
doc, position_percentiles, context_str, query, llm, response_mode="compact"
):
eval_llm = OpenAI(model="gpt-4-1106-preview")
correctness_evaluator = CorrectnessEvaluator(llm=eval_llm)
eval_scores = {}
for idx, position_percentile in enumerate(position_percentiles):
print(f"Position percentile: {position_percentile}")
position_idx = int(position_percentile * len(uber_doc.get_content()))
new_doc_str = augment_doc(
uber_doc.get_content(), context_str, position_idx
)
new_doc = Document(text=new_doc_str)
index = SummaryIndex.from_documents(
[new_doc],
)
query_engine = index.as_query_engine(
response_mode=response_mode, llm=llm
)
print(f"Query: {query}")
response = query_engine.query(query)
print(f"Response: {str(response)}")
eval_result = correctness_evaluator.evaluate(
query=query, response=str(response), reference=context_str
)
eval_score = eval_result.score
print(f"Eval score: {eval_score}")
eval_scores[position_percentile] = eval_score
return eval_scores
position_percentiles = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
llm = OpenAI(model="gpt-4-1106-preview")
eval_scores_gpt4 = await run_experiments(
[uber_doc],
position_percentiles,
context_str,
query_str,
llm,
response_mode="compact",
)
llm = OpenAI(model="gpt-4-1106-preview")
eval_scores_gpt4_ts = await run_experiments(
[uber_doc],
position_percentiles,
context_str,
query_str,
llm,
response_mode="tree_summarize",
)
llm = | Anthropic(model="claude-2") | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-tools-google')
from llama_index.readers.web import SimpleWebPageReader
reader = | SimpleWebPageReader(html_to_text=True) | llama_index.readers.web.SimpleWebPageReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-txtai')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import txtai
txtai_index = txtai.ann.ANNFactory.create({"backend": "numpy"})
from llama_index.core import (
SimpleDirectoryReader,
load_index_from_storage,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.txtai import TxtaiVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = TxtaiVectorStore(txtai_index=txtai_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
index.storage_context.persist()
vector_store = | TxtaiVectorStore.from_persist_dir("./storage") | llama_index.vector_stores.txtai.TxtaiVectorStore.from_persist_dir |
get_ipython().run_line_magic('pip', 'install llama-index-llms-predibase')
get_ipython().system('pip install llama-index --quiet')
get_ipython().system('pip install predibase --quiet')
get_ipython().system('pip install sentence-transformers --quiet')
import os
os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}"
from llama_index.llms.predibase import PredibaseLLM
llm = PredibaseLLM(
model_name="llama-2-13b", temperature=0.3, max_new_tokens=512
)
result = llm.complete("Can you recommend me a nice dry white wine?")
print(result)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core.node_parser import SentenceSplitter
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
llm = PredibaseLLM(
model_name="llama-2-13b",
temperature=0.3,
max_new_tokens=400,
context_window=1024,
)
embed_model = | resolve_embed_model("local:BAAI/bge-small-en-v1.5") | llama_index.core.embeddings.resolve_embed_model |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-tencentvectordb')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install tcvectordb')
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.vector_stores.tencentvectordb import TencentVectorDB
from llama_index.core.vector_stores.tencentvectordb import (
CollectionParams,
FilterField,
)
import tcvectordb
tcvectordb.debug.DebugEnable = False
import openai
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
openai.api_key = OPENAI_API_KEY
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
print(f"Total documents: {len(documents)}")
print(f"First document, id: {documents[0].doc_id}")
print(f"First document, hash: {documents[0].hash}")
print(
f"First document, text ({len(documents[0].text)} characters):\n{'='*20}\n{documents[0].text[:360]} ..."
)
vector_store = TencentVectorDB(
url="http://10.0.X.X",
key="eC4bLRy2va******************************",
collection_params=CollectionParams(dimension=1536, drop_exists=True),
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("Why did the author choose to work on AI?")
print(response)
query_engine = index.as_query_engine(vector_store_query_mode="mmr")
response = query_engine.query("Why did the author choose to work on AI?")
print(response)
new_vector_store = TencentVectorDB(
url="http://10.0.X.X",
key="eC4bLRy2va******************************",
collection_params=CollectionParams(dimension=1536, drop_exists=False),
)
new_index_instance = VectorStoreIndex.from_vector_store(
vector_store=new_vector_store
)
query_engine = index.as_query_engine(similarity_top_k=5)
response = query_engine.query(
"What did the author study prior to working on AI?"
)
print(response)
retriever = new_index_instance.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_prefetch_factor": 4},
)
nodes_with_scores = retriever.retrieve(
"What did the author study prior to working on AI?"
)
print(f"Found {len(nodes_with_scores)} nodes.")
for idx, node_with_score in enumerate(nodes_with_scores):
print(f" [{idx}] score = {node_with_score.score}")
print(f" id = {node_with_score.node.node_id}")
print(f" text = {node_with_score.node.text[:90]} ...")
print("Nodes' ref_doc_id:")
print("\n".join([nws.node.ref_doc_id for nws in nodes_with_scores]))
new_vector_store.delete(nodes_with_scores[0].node.ref_doc_id)
nodes_with_scores = retriever.retrieve(
"What did the author study prior to working on AI?"
)
print(f"Found {len(nodes_with_scores)} nodes.")
filter_fields = [
FilterField(name="source_type"),
]
md_storage_context = StorageContext.from_defaults(
vector_store=TencentVectorDB(
url="http://10.0.X.X",
key="eC4bLRy2va******************************",
collection_params=CollectionParams(
dimension=1536, drop_exists=True, filter_fields=filter_fields
),
)
)
def my_file_metadata(file_name: str):
"""Depending on the input file name, associate a different metadata."""
if "essay" in file_name:
source_type = "essay"
elif "dinosaur" in file_name:
source_type = "dinos"
else:
source_type = "other"
return {"source_type": source_type}
md_documents = SimpleDirectoryReader(
"../data/paul_graham", file_metadata=my_file_metadata
).load_data()
md_index = VectorStoreIndex.from_documents(
md_documents, storage_context=md_storage_context
)
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
md_query_engine = md_index.as_query_engine(
filters=MetadataFilters(
filters=[ | ExactMatchFilter(key="source_type", value="essay") | llama_index.core.vector_stores.ExactMatchFilter |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lancedb')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lancedb')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-clip')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install -U openai-whisper')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install lancedb')
get_ipython().run_line_magic('pip', 'install moviepy')
get_ipython().run_line_magic('pip', 'install pytube')
get_ipython().run_line_magic('pip', 'install pydub')
get_ipython().run_line_magic('pip', 'install SpeechRecognition')
get_ipython().run_line_magic('pip', 'install ffmpeg-python')
get_ipython().run_line_magic('pip', 'install soundfile')
from moviepy.editor import VideoFileClip
from pathlib import Path
import speech_recognition as sr
from pytube import YouTube
from pprint import pprint
import os
OPENAI_API_TOKEN = ""
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
video_url = "https://www.youtube.com/watch?v=d_qvLDhkg00"
output_video_path = "./video_data/"
output_folder = "./mixed_data/"
output_audio_path = "./mixed_data/output_audio.wav"
filepath = output_video_path + "input_vid.mp4"
Path(output_folder).mkdir(parents=True, exist_ok=True)
from PIL import Image
import matplotlib.pyplot as plt
import os
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(2, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 7:
break
def download_video(url, output_path):
"""
Download a video from a given url and save it to the output path.
Parameters:
url (str): The url of the video to download.
output_path (str): The path to save the video to.
Returns:
dict: A dictionary containing the metadata of the video.
"""
yt = YouTube(url)
metadata = {"Author": yt.author, "Title": yt.title, "Views": yt.views}
yt.streams.get_highest_resolution().download(
output_path=output_path, filename="input_vid.mp4"
)
return metadata
def video_to_images(video_path, output_folder):
"""
Convert a video to a sequence of images and save them to the output folder.
Parameters:
video_path (str): The path to the video file.
output_folder (str): The path to the folder to save the images to.
"""
clip = VideoFileClip(video_path)
clip.write_images_sequence(
os.path.join(output_folder, "frame%04d.png"), fps=0.2
)
def video_to_audio(video_path, output_audio_path):
"""
Convert a video to audio and save it to the output path.
Parameters:
video_path (str): The path to the video file.
output_audio_path (str): The path to save the audio to.
"""
clip = VideoFileClip(video_path)
audio = clip.audio
audio.write_audiofile(output_audio_path)
def audio_to_text(audio_path):
"""
Convert audio to text using the SpeechRecognition library.
Parameters:
audio_path (str): The path to the audio file.
Returns:
test (str): The text recognized from the audio.
"""
recognizer = sr.Recognizer()
audio = sr.AudioFile(audio_path)
with audio as source:
audio_data = recognizer.record(source)
try:
text = recognizer.recognize_whisper(audio_data)
except sr.UnknownValueError:
print("Speech recognition could not understand the audio.")
except sr.RequestError as e:
print(f"Could not request results from service; {e}")
return text
try:
metadata_vid = download_video(video_url, output_video_path)
video_to_images(filepath, output_folder)
video_to_audio(filepath, output_audio_path)
text_data = audio_to_text(output_audio_path)
with open(output_folder + "output_text.txt", "w") as file:
file.write(text_data)
print("Text data saved to file")
file.close()
os.remove(output_audio_path)
print("Audio file removed")
except Exception as e:
raise e
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.vector_stores.lancedb import LanceDBVectorStore
from llama_index.core import SimpleDirectoryReader
text_store = | LanceDBVectorStore(uri="lancedb", table_name="text_collection") | llama_index.vector_stores.lancedb.LanceDBVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-github')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["GITHUB_TOKEN"] = ""
import os
from llama_index.readers.github import GitHubRepositoryIssuesReader, GitHubIssuesClient
github_client = GitHubIssuesClient()
loader = GitHubRepositoryIssuesReader(
github_client,
owner="run-llama",
repo="llama_index",
verbose=True,
)
orig_docs = loader.load_data()
limit = 100
docs = []
for idx, doc in enumerate(orig_docs):
doc.metadata["index_id"] = doc.id_
if idx >= limit:
break
docs.append(doc)
from copy import deepcopy
import asyncio
from tqdm.asyncio import tqdm_asyncio
from llama_index.core.indices import SummaryIndex
from llama_index.core import Document, ServiceContext
from llama_index.llms.openai import OpenAI
from llama_index.core.async_utils import run_jobs
async def aprocess_doc(doc, include_summary: bool = True):
"""Process doc."""
print(f"Processing {doc.id_}")
metadata = doc.metadata
date_tokens = metadata["created_at"].split("T")[0].split("-")
year = int(date_tokens[0])
month = int(date_tokens[1])
day = int(date_tokens[2])
assignee = "" if "assignee" not in doc.metadata else doc.metadata["assignee"]
size = ""
if len(doc.metadata["labels"]) > 0:
size_arr = [l for l in doc.metadata["labels"] if "size:" in l]
size = size_arr[0].split(":")[1] if len(size_arr) > 0 else ""
new_metadata = {
"state": metadata["state"],
"year": year,
"month": month,
"day": day,
"assignee": assignee,
"size": size,
"index_id": doc.id_,
}
summary_index = | SummaryIndex.from_documents([doc]) | llama_index.core.indices.SummaryIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-portkey')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install -U llama_index')
get_ipython().system('pip install -U portkey-ai')
from llama_index.llms.portkey import Portkey
from llama_index.core.llms import ChatMessage
import portkey as pk
import os
os.environ["PORTKEY_API_KEY"] = "PORTKEY_API_KEY"
openai_virtual_key_a = ""
openai_virtual_key_b = ""
anthropic_virtual_key_a = ""
anthropic_virtual_key_b = ""
cohere_virtual_key_a = ""
cohere_virtual_key_b = ""
os.environ["OPENAI_API_KEY"] = ""
os.environ["ANTHROPIC_API_KEY"] = ""
portkey_client = Portkey(
mode="single",
)
openai_llm = pk.LLMOptions(
provider="openai",
model="gpt-4",
virtual_key=openai_virtual_key_a,
)
portkey_client.add_llms(openai_llm)
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
print("Testing Portkey Llamaindex integration:")
response = portkey_client.chat(messages)
print(response)
prompt = "Why is the sky blue?"
print("\nTesting Stream Complete:\n")
response = portkey_client.stream_complete(prompt)
for i in response:
print(i.delta, end="", flush=True)
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
print("\nTesting Stream Chat:\n")
response = portkey_client.stream_chat(messages)
for i in response:
print(i.delta, end="", flush=True)
portkey_client = Portkey(mode="fallback")
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
llm1 = pk.LLMOptions(
provider="openai",
model="gpt-4",
retry_settings={"on_status_codes": [429, 500], "attempts": 2},
virtual_key=openai_virtual_key_a,
)
llm2 = pk.LLMOptions(
provider="openai",
model="gpt-3.5-turbo",
virtual_key=openai_virtual_key_b,
)
portkey_client.add_llms(llm_params=[llm1, llm2])
print("Testing Fallback & Retry functionality:")
response = portkey_client.chat(messages)
print(response)
portkey_client = | Portkey(mode="ab_test") | llama_index.llms.portkey.Portkey |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-.."
openai.api_key = os.environ["OPENAI_API_KEY"]
from IPython.display import Markdown, display
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
)
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
from llama_index.core import SQLDatabase
from llama_index.llms.openai import OpenAI
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo")
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
sql_database = | SQLDatabase(engine, include_tables=["city_stats"]) | llama_index.core.SQLDatabase |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
from pathlib import Path
data_dir = Path("./WikiTableQuestions/csv/200-csv")
csv_files = sorted([f for f in data_dir.glob("*.csv")])
dfs = []
for csv_file in csv_files:
print(f"processing file: {csv_file}")
try:
df = pd.read_csv(csv_file)
dfs.append(df)
except Exception as e:
print(f"Error parsing {csv_file}: {str(e)}")
tableinfo_dir = "WikiTableQuestions_TableInfo"
get_ipython().system('mkdir {tableinfo_dir}')
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.llms.openai import OpenAI
class TableInfo(BaseModel):
"""Information regarding a structured table."""
table_name: str = Field(
..., description="table name (must be underscores and NO spaces)"
)
table_summary: str = Field(
..., description="short, concise summary/caption of the table"
)
prompt_str = """\
Give me a summary of the table with the following JSON format.
- The table name must be unique to the table and describe it while being concise.
- Do NOT output a generic table name (e.g. table, my_table).
Do NOT make the table name one of the following: {exclude_table_name_list}
Table:
{table_str}
Summary: """
program = LLMTextCompletionProgram.from_defaults(
output_cls=TableInfo,
llm=OpenAI(model="gpt-3.5-turbo"),
prompt_template_str=prompt_str,
)
import json
def _get_tableinfo_with_index(idx: int) -> str:
results_gen = Path(tableinfo_dir).glob(f"{idx}_*")
results_list = list(results_gen)
if len(results_list) == 0:
return None
elif len(results_list) == 1:
path = results_list[0]
return TableInfo.parse_file(path)
else:
raise ValueError(
f"More than one file matching index: {list(results_gen)}"
)
table_names = set()
table_infos = []
for idx, df in enumerate(dfs):
table_info = _get_tableinfo_with_index(idx)
if table_info:
table_infos.append(table_info)
else:
while True:
df_str = df.head(10).to_csv()
table_info = program(
table_str=df_str,
exclude_table_name_list=str(list(table_names)),
)
table_name = table_info.table_name
print(f"Processed table: {table_name}")
if table_name not in table_names:
table_names.add(table_name)
break
else:
print(f"Table name {table_name} already exists, trying again.")
pass
out_file = f"{tableinfo_dir}/{idx}_{table_name}.json"
json.dump(table_info.dict(), open(out_file, "w"))
table_infos.append(table_info)
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
)
import re
def sanitize_column_name(col_name):
return re.sub(r"\W+", "_", col_name)
def create_table_from_dataframe(
df: pd.DataFrame, table_name: str, engine, metadata_obj
):
sanitized_columns = {col: sanitize_column_name(col) for col in df.columns}
df = df.rename(columns=sanitized_columns)
columns = [
Column(col, String if dtype == "object" else Integer)
for col, dtype in zip(df.columns, df.dtypes)
]
table = Table(table_name, metadata_obj, *columns)
metadata_obj.create_all(engine)
with engine.connect() as conn:
for _, row in df.iterrows():
insert_stmt = table.insert().values(**row.to_dict())
conn.execute(insert_stmt)
conn.commit()
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
for idx, df in enumerate(dfs):
tableinfo = _get_tableinfo_with_index(idx)
print(f"Creating table: {tableinfo.table_name}")
create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj)
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.objects import (
SQLTableNodeMapping,
ObjectIndex,
SQLTableSchema,
)
from llama_index.core import SQLDatabase, VectorStoreIndex
sql_database = SQLDatabase(engine)
table_node_mapping = SQLTableNodeMapping(sql_database)
table_schema_objs = [
SQLTableSchema(table_name=t.table_name, context_str=t.table_summary)
for t in table_infos
] # add a SQLTableSchema for each table
obj_index = ObjectIndex.from_objects(
table_schema_objs,
table_node_mapping,
VectorStoreIndex,
)
obj_retriever = obj_index.as_retriever(similarity_top_k=3)
from llama_index.core.retrievers import SQLRetriever
from typing import List
from llama_index.core.query_pipeline import FnComponent
sql_retriever = SQLRetriever(sql_database)
def get_table_context_str(table_schema_objs: List[SQLTableSchema]):
"""Get table context string."""
context_strs = []
for table_schema_obj in table_schema_objs:
table_info = sql_database.get_single_table_info(
table_schema_obj.table_name
)
if table_schema_obj.context_str:
table_opt_context = " The table description is: "
table_opt_context += table_schema_obj.context_str
table_info += table_opt_context
context_strs.append(table_info)
return "\n\n".join(context_strs)
table_parser_component = FnComponent(fn=get_table_context_str)
from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import FnComponent
from llama_index.core.llms import ChatResponse
def parse_response_to_sql(response: ChatResponse) -> str:
"""Parse response to SQL."""
response = response.message.content
sql_query_start = response.find("SQLQuery:")
if sql_query_start != -1:
response = response[sql_query_start:]
if response.startswith("SQLQuery:"):
response = response[len("SQLQuery:") :]
sql_result_start = response.find("SQLResult:")
if sql_result_start != -1:
response = response[:sql_result_start]
return response.strip().strip("```").strip()
sql_parser_component = FnComponent(fn=parse_response_to_sql)
text2sql_prompt = DEFAULT_TEXT_TO_SQL_PROMPT.partial_format(
dialect=engine.dialect.name
)
print(text2sql_prompt.template)
response_synthesis_prompt_str = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n"
"SQL: {sql_query}\n"
"SQL Response: {context_str}\n"
"Response: "
)
response_synthesis_prompt = PromptTemplate(
response_synthesis_prompt_str,
)
llm = OpenAI(model="gpt-3.5-turbo")
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
CustomQueryComponent,
)
qp = QP(
modules={
"input": InputComponent(),
"table_retriever": obj_retriever,
"table_output_parser": table_parser_component,
"text2sql_prompt": text2sql_prompt,
"text2sql_llm": llm,
"sql_output_parser": sql_parser_component,
"sql_retriever": sql_retriever,
"response_synthesis_prompt": response_synthesis_prompt,
"response_synthesis_llm": llm,
},
verbose=True,
)
qp.add_chain(["input", "table_retriever", "table_output_parser"])
qp.add_link("input", "text2sql_prompt", dest_key="query_str")
qp.add_link("table_output_parser", "text2sql_prompt", dest_key="schema")
qp.add_chain(
["text2sql_prompt", "text2sql_llm", "sql_output_parser", "sql_retriever"]
)
qp.add_link(
"sql_output_parser", "response_synthesis_prompt", dest_key="sql_query"
)
qp.add_link(
"sql_retriever", "response_synthesis_prompt", dest_key="context_str"
)
qp.add_link("input", "response_synthesis_prompt", dest_key="query_str")
qp.add_link("response_synthesis_prompt", "response_synthesis_llm")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.dag)
net.show("text2sql_dag.html")
response = qp.run(
query="What was the year that The Notorious B.I.G was signed to Bad Boy?"
)
print(str(response))
response = qp.run(query="Who won best director in the 1972 academy awards")
print(str(response))
response = qp.run(query="What was the term of Pasquale Preziosa?")
print(str(response))
from llama_index.core import VectorStoreIndex, load_index_from_storage
from sqlalchemy import text
from llama_index.core.schema import TextNode
from llama_index.core import StorageContext
import os
from pathlib import Path
from typing import Dict
def index_all_tables(
sql_database: SQLDatabase, table_index_dir: str = "table_index_dir"
) -> Dict[str, VectorStoreIndex]:
"""Index all tables."""
if not Path(table_index_dir).exists():
os.makedirs(table_index_dir)
vector_index_dict = {}
engine = sql_database.engine
for table_name in sql_database.get_usable_table_names():
print(f"Indexing rows in table: {table_name}")
if not os.path.exists(f"{table_index_dir}/{table_name}"):
with engine.connect() as conn:
cursor = conn.execute(text(f'SELECT * FROM "{table_name}"'))
result = cursor.fetchall()
row_tups = []
for row in result:
row_tups.append(tuple(row))
nodes = [TextNode(text=str(t)) for t in row_tups]
index = VectorStoreIndex(nodes)
index.set_index_id("vector_index")
index.storage_context.persist(f"{table_index_dir}/{table_name}")
else:
storage_context = StorageContext.from_defaults(
persist_dir=f"{table_index_dir}/{table_name}"
)
index = load_index_from_storage(
storage_context, index_id="vector_index"
)
vector_index_dict[table_name] = index
return vector_index_dict
vector_index_dict = index_all_tables(sql_database)
test_retriever = vector_index_dict["Bad_Boy_Artists"].as_retriever(
similarity_top_k=1
)
nodes = test_retriever.retrieve("P. Diddy")
print(nodes[0].get_content())
from llama_index.core.retrievers import SQLRetriever
from typing import List
from llama_index.core.query_pipeline import FnComponent
sql_retriever = SQLRetriever(sql_database)
def get_table_context_and_rows_str(
query_str: str, table_schema_objs: List[SQLTableSchema]
):
"""Get table context string."""
context_strs = []
for table_schema_obj in table_schema_objs:
table_info = sql_database.get_single_table_info(
table_schema_obj.table_name
)
if table_schema_obj.context_str:
table_opt_context = " The table description is: "
table_opt_context += table_schema_obj.context_str
table_info += table_opt_context
vector_retriever = vector_index_dict[
table_schema_obj.table_name
].as_retriever(similarity_top_k=2)
relevant_nodes = vector_retriever.retrieve(query_str)
if len(relevant_nodes) > 0:
table_row_context = "\nHere are some relevant example rows (values in the same order as columns above)\n"
for node in relevant_nodes:
table_row_context += str(node.get_content()) + "\n"
table_info += table_row_context
context_strs.append(table_info)
return "\n\n".join(context_strs)
table_parser_component = FnComponent(fn=get_table_context_and_rows_str)
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
CustomQueryComponent,
)
qp = QP(
modules={
"input": | InputComponent() | llama_index.core.query_pipeline.InputComponent |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openllm')
get_ipython().system('pip install "openllm" # use \'openllm[vllm]\' if you have access to GPU')
get_ipython().system('pip install llama-index')
import os
from typing import List, Optional
from llama_index.llms.openllm import OpenLLM, OpenLLMAPI
from llama_index.core.llms import ChatMessage
os.environ[
"OPENLLM_ENDPOINT"
] = "na" # Change this to a remote server that you might run OpenLLM at.
local_llm = | OpenLLM("HuggingFaceH4/zephyr-7b-alpha") | llama_index.llms.openllm.OpenLLM |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
get_ipython().system('pip install llama-index')
from llama_index.llms.anthropic import Anthropic
from llama_index.core import Settings
tokenizer = Anthropic().tokenizer
Settings.tokenizer = tokenizer
import os
os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY"
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-opus-20240229")
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.anthropic import Anthropic
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = Anthropic(model="claude-3-opus-20240229").chat(messages)
print(resp)
from llama_index.llms.anthropic import Anthropic
llm = | Anthropic(model="claude-3-opus-20240229", max_tokens=100) | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().system('pip install llama-index')
from llama_index.llms.cohere import Cohere
api_key = "Your api key"
resp = Cohere(api_key=api_key).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.cohere import Cohere
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Cohere(api_key=api_key).chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
print(resp)
from llama_index.llms.openai import OpenAI
llm = Cohere(api_key=api_key)
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.openai import OpenAI
llm = Cohere(api_key=api_key)
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.stream_chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
for r in resp:
print(r.delta, end="")
from llama_index.llms.cohere import Cohere
llm = Cohere(model="command", api_key=api_key)
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.llms.cohere import Cohere
llm = | Cohere(model="command", api_key=api_key) | llama_index.llms.cohere.Cohere |
get_ipython().run_line_magic('pip', 'install -q llama-index-vector-stores-chroma llama-index-llms-fireworks llama-index-embeddings-fireworks==0.1.2')
get_ipython().run_line_magic('pip', 'install -q llama-index')
get_ipython().system('pip install llama-index chromadb --quiet')
get_ipython().system('pip install -q chromadb')
get_ipython().system('pip install -q pydantic==1.10.11')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.fireworks import FireworksEmbedding
from llama_index.llms.fireworks import Fireworks
from IPython.display import Markdown, display
import chromadb
import getpass
fw_api_key = getpass.getpass("Fireworks API Key:")
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.llms.fireworks import Fireworks
from llama_index.embeddings.fireworks import FireworksEmbedding
llm = Fireworks(
temperature=0, model="accounts/fireworks/models/mixtral-8x7b-instruct"
)
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
embed_model = FireworksEmbedding(
model_name="nomic-ai/nomic-embed-text-v1.5",
)
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=embed_model
)
query_engine = index.as_query_engine(llm=llm)
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("quickstart")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
embed_model = FireworksEmbedding(
model_name="nomic-ai/nomic-embed-text-v1.5",
api_base="https://api.fireworks.ai/inference/v1",
dimensions=128,
)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=embed_model
)
db2 = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db2.get_or_create_collection("quickstart")
vector_store = | ChromaVectorStore(chroma_collection=chroma_collection) | llama_index.vector_stores.chroma.ChromaVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-graph-stores-kuzu')
import os
os.environ["OPENAI_API_KEY"] = "API_KEY_HERE"
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import shutil
shutil.rmtree("./test1", ignore_errors=True)
shutil.rmtree("./test2", ignore_errors=True)
shutil.rmtree("./test3", ignore_errors=True)
get_ipython().run_line_magic('pip', 'install kuzu')
import kuzu
db = kuzu.Database("test1")
from llama_index.graph_stores.kuzu import KuzuGraphStore
graph_store = | KuzuGraphStore(db) | llama_index.graph_stores.kuzu.KuzuGraphStore |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-epsilla')
get_ipython().system('pip/pip3 install pyepsilla')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.epsilla import EpsillaVectorStore
import textwrap
import openai
import getpass
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
openai.api_key = OPENAI_API_KEY
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(f"Total documents: {len(documents)}")
print(f"First document, id: {documents[0].doc_id}")
print(f"First document, hash: {documents[0].hash}")
from pyepsilla import vectordb
client = vectordb.Client()
vector_store = EpsillaVectorStore(client=client, db_path="/tmp/llamastore")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("How did the author learn about AI?")
print(textwrap.fill(str(response), 100))
vector_store = EpsillaVectorStore(client=client, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
single_doc = | Document(text="Epsilla is the vector database we are using.") | llama_index.core.Document |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate')
get_ipython().run_line_magic('', 'pip install replicate')
import os
REPLICATE_API_TOKEN = "" # Your Relicate API token here
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from PIL import Image
import requests
from io import BytesIO
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
from llama_index.core.schema import ImageDocument
if not os.path.exists("test_images"):
os.makedirs("test_images")
image_urls = [
"https://www.sportsnet.ca/wp-content/uploads/2023/11/CP1688996471-1040x572.jpg",
"https://res.cloudinary.com/hello-tickets/image/upload/c_limit,f_auto,q_auto,w_1920/v1640835927/o3pfl41q7m5bj8jardk0.jpg",
"https://www.cleverfiles.com/howto/wp-content/uploads/2018/03/minion.jpg",
]
for idx, image_url in enumerate(image_urls):
response = requests.get(image_url)
img = Image.open(BytesIO(response.content))
img.save(f"test_images/{idx}.png")
image_documents = [
| ImageDocument(image_path=f"test_images/{idx}.png") | llama_index.core.schema.ImageDocument |
get_ipython().system('pip install llama-index')
get_ipython().system('pip install wget')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-azureaisearch')
get_ipython().run_line_magic('pip', 'install azure-search-documents==11.4.0')
get_ipython().run_line_magic('llama-index-embeddings-azure-openai', '')
get_ipython().run_line_magic('llama-index-llms-azure-openai', '')
import logging
import sys
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from IPython.display import Markdown, display
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.core.settings import Settings
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.vector_stores.azureaisearch import AzureAISearchVectorStore
from llama_index.vector_stores.azureaisearch import (
IndexManagement,
MetadataIndexFieldType,
)
aoai_api_key = "YOUR_AZURE_OPENAI_API_KEY"
aoai_endpoint = "YOUR_AZURE_OPENAI_ENDPOINT"
aoai_api_version = "2023-05-15"
llm = AzureOpenAI(
model="YOUR_AZURE_OPENAI_COMPLETION_MODEL_NAME",
deployment_name="YOUR_AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME",
api_key=aoai_api_key,
azure_endpoint=aoai_endpoint,
api_version=aoai_api_version,
)
embed_model = AzureOpenAIEmbedding(
model="YOUR_AZURE_OPENAI_EMBEDDING_MODEL_NAME",
deployment_name="YOUR_AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME",
api_key=aoai_api_key,
azure_endpoint=aoai_endpoint,
api_version=aoai_api_version,
)
search_service_api_key = "YOUR-AZURE-SEARCH-SERVICE-ADMIN-KEY"
search_service_endpoint = "YOUR-AZURE-SEARCH-SERVICE-ENDPOINT"
search_service_api_version = "2023-11-01"
credential = AzureKeyCredential(search_service_api_key)
index_name = "llamaindex-vector-demo"
index_client = SearchIndexClient(
endpoint=search_service_endpoint,
credential=credential,
)
search_client = SearchClient(
endpoint=search_service_endpoint,
index_name=index_name,
credential=credential,
)
metadata_fields = {
"author": "author",
"theme": ("topic", MetadataIndexFieldType.STRING),
"director": "director",
}
vector_store = AzureAISearchVectorStore(
search_or_index_client=index_client,
filterable_metadata_field_keys=metadata_fields,
index_name=index_name,
index_management=IndexManagement.CREATE_IF_NOT_EXISTS,
id_field_key="id",
chunk_field_key="chunk",
embedding_field_key="embedding",
embedding_dimensionality=1536,
metadata_string_field_key="metadata",
doc_id_field_key="doc_id",
language_analyzer="en.lucene",
vector_algorithm_type="exhaustiveKnn",
)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("../data/paul_graham/").load_data()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
Settings.llm = llm
Settings.embed_model = embed_model
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine(similarity_top_k=3)
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
response = query_engine.query(
"What did the author learn?",
)
display(Markdown(f"<b>{response}</b>"))
index_name = "llamaindex-vector-demo"
metadata_fields = {
"author": "author",
"theme": ("topic", MetadataIndexFieldType.STRING),
"director": "director",
}
vector_store = AzureAISearchVectorStore(
search_or_index_client=search_client,
filterable_metadata_field_keys=metadata_fields,
index_management=IndexManagement.VALIDATE_INDEX,
id_field_key="id",
chunk_field_key="chunk",
embedding_field_key="embedding",
embedding_dimensionality=1536,
metadata_string_field_key="metadata",
doc_id_field_key="doc_id",
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
[],
storage_context=storage_context,
)
query_engine = index.as_query_engine()
response = query_engine.query("What was a hard moment for the author?")
display(Markdown(f"<b>{response}</b>"))
response = query_engine.query("Who is the author?")
display(Markdown(f"<b>{response}</b>"))
import time
query_engine = index.as_query_engine(streaming=True)
response = query_engine.query("What happened at interleaf?")
start_time = time.time()
token_count = 0
for token in response.response_gen:
print(token, end="")
token_count += 1
time_elapsed = time.time() - start_time
tokens_per_second = token_count / time_elapsed
print(f"\n\nStreamed output at {tokens_per_second} tokens/s")
response = query_engine.query("What colour is the sky?")
display(Markdown(f"<b>{response}</b>"))
from llama_index.core import Document
index.insert_nodes([ | Document(text="The sky is indigo today") | llama_index.core.Document |
get_ipython().run_line_magic('pip', 'install llama-index-readers-elasticsearch')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-opensearch')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-ollama')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from os import getenv
from llama_index.core import SimpleDirectoryReader
from llama_index.vector_stores.opensearch import (
OpensearchVectorStore,
OpensearchVectorClient,
)
from llama_index.core import VectorStoreIndex, StorageContext
endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200")
idx = getenv("OPENSEARCH_INDEX", "gpt-index-demo")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
text_field = "content"
embedding_field = "embedding"
client = OpensearchVectorClient(
endpoint, idx, 1536, embedding_field=embedding_field, text_field=text_field
)
vector_store = | OpensearchVectorStore(client) | llama_index.vector_stores.opensearch.OpensearchVectorStore |
from utils import get_train_str, get_train_and_eval_data, get_eval_preds, train_prompt
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter("ignore")
train_df, train_labels, eval_df, eval_labels = get_train_and_eval_data("data/train.csv")
print(train_prompt.template)
train_n = 10
eval_n = 40
train_str = get_train_str(train_df, train_labels, train_n=train_n)
print(f"Example datapoints in `train_str`: \n{train_str}")
from sklearn.metrics import accuracy_score
import numpy as np
eval_preds = get_eval_preds(train_prompt, train_str, eval_df, n=eval_n)
eval_label_chunk = eval_labels[:eval_n]
acc = accuracy_score(eval_label_chunk, np.array(eval_preds).round())
print(f"ACCURACY: {acc}")
from sklearn.metrics import accuracy_score
import numpy as np
eval_preds_null = get_eval_preds(train_prompt, "", eval_df, n=eval_n)
eval_label_chunk = eval_labels[:eval_n]
acc_null = accuracy_score(eval_label_chunk, np.array(eval_preds_null).round())
print(f"ACCURACY: {acc_null}")
from llama_index import SummaryIndex
from llama_index.schema import Document
index = | SummaryIndex([]) | llama_index.SummaryIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-program-evaporate')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
from llama_index.core import SimpleDirectoryReader
city_docs = {}
for wiki_title in wiki_titles:
city_docs[wiki_title] = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.chunk_size = 512
city_nodes = {}
for wiki_title in wiki_titles:
docs = city_docs[wiki_title]
nodes = Settings.node_parser.get_nodes_from_documents(docs)
city_nodes[wiki_title] = nodes
from llama_index.program.evaporate import DFEvaporateProgram
program = DFEvaporateProgram.from_defaults(
fields_to_extract=["population"],
)
program.fit_fields(city_nodes["Toronto"][:1])
print(program.get_function_str("population"))
seattle_df = program(nodes=city_nodes["Seattle"][:1])
seattle_df
Settings.llm = OpenAI(temperature=0, model="gpt-4")
Settings.chunk_size = 1024
Settings.chunk_overlap = 0
from llama_index.core.data_structs import Node
train_text = """
<table class="wikitable sortable" style="margin-top:0; text-align:center; font-size:90%;">
<tbody><tr>
<th>Team (IOC code)
</th>
<th>No. Summer
</th>
<th>No. Winter
</th>
<th>No. Games
</th></tr>
<tr>
<td align="left"><span id="ALB"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/3/36/Flag_of_Albania.svg/22px-Flag_of_Albania.svg.png" decoding="async" width="22" height="16" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/36/Flag_of_Albania.svg/33px-Flag_of_Albania.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/36/Flag_of_Albania.svg/44px-Flag_of_Albania.svg.png 2x" data-file-width="980" data-file-height="700" /> <a href="/wiki/Albania_at_the_Olympics" title="Albania at the Olympics">Albania</a> <span style="font-size:90%;">(ALB)</span></span>
</td>
<td style="background:#f2f2ce;">9</td>
<td style="background:#cedff2;">5</td>
<td>14
</td></tr>
<tr>
<td align="left"><span id="ASA"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/8/87/Flag_of_American_Samoa.svg/22px-Flag_of_American_Samoa.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/87/Flag_of_American_Samoa.svg/33px-Flag_of_American_Samoa.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/87/Flag_of_American_Samoa.svg/44px-Flag_of_American_Samoa.svg.png 2x" data-file-width="1000" data-file-height="500" /> <a href="/wiki/American_Samoa_at_the_Olympics" title="American Samoa at the Olympics">American Samoa</a> <span style="font-size:90%;">(ASA)</span></span>
</td>
<td style="background:#f2f2ce;">9</td>
<td style="background:#cedff2;">2</td>
<td>11
</td></tr>
<tr>
<td align="left"><span id="AND"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/1/19/Flag_of_Andorra.svg/22px-Flag_of_Andorra.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/19/Flag_of_Andorra.svg/33px-Flag_of_Andorra.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/1/19/Flag_of_Andorra.svg/44px-Flag_of_Andorra.svg.png 2x" data-file-width="1000" data-file-height="700" /> <a href="/wiki/Andorra_at_the_Olympics" title="Andorra at the Olympics">Andorra</a> <span style="font-size:90%;">(AND)</span></span>
</td>
<td style="background:#f2f2ce;">12</td>
<td style="background:#cedff2;">13</td>
<td>25
</td></tr>
<tr>
<td align="left"><span id="ANG"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Flag_of_Angola.svg/22px-Flag_of_Angola.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Flag_of_Angola.svg/33px-Flag_of_Angola.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Flag_of_Angola.svg/44px-Flag_of_Angola.svg.png 2x" data-file-width="900" data-file-height="600" /> <a href="/wiki/Angola_at_the_Olympics" title="Angola at the Olympics">Angola</a> <span style="font-size:90%;">(ANG)</span></span>
</td>
<td style="background:#f2f2ce;">10</td>
<td style="background:#cedff2;">0</td>
<td>10
</td></tr>
<tr>
<td align="left"><span id="ANT"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/8/89/Flag_of_Antigua_and_Barbuda.svg/22px-Flag_of_Antigua_and_Barbuda.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/89/Flag_of_Antigua_and_Barbuda.svg/33px-Flag_of_Antigua_and_Barbuda.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/89/Flag_of_Antigua_and_Barbuda.svg/44px-Flag_of_Antigua_and_Barbuda.svg.png 2x" data-file-width="900" data-file-height="600" /> <a href="/wiki/Antigua_and_Barbuda_at_the_Olympics" title="Antigua and Barbuda at the Olympics">Antigua and Barbuda</a> <span style="font-size:90%;">(ANT)</span></span>
</td>
<td style="background:#f2f2ce;">11</td>
<td style="background:#cedff2;">0</td>
<td>11
</td></tr>
<tr>
<td align="left"><span id="ARU"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/f/f6/Flag_of_Aruba.svg/22px-Flag_of_Aruba.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/f/f6/Flag_of_Aruba.svg/33px-Flag_of_Aruba.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/f/f6/Flag_of_Aruba.svg/44px-Flag_of_Aruba.svg.png 2x" data-file-width="900" data-file-height="600" /> <a href="/wiki/Aruba_at_the_Olympics" title="Aruba at the Olympics">Aruba</a> <span style="font-size:90%;">(ARU)</span></span>
</td>
<td style="background:#f2f2ce;">9</td>
<td style="background:#cedff2;">0</td>
<td>9
</td></tr>
"""
train_nodes = [Node(text=train_text)]
infer_text = """
<td align="left"><span id="BAN"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/f/f9/Flag_of_Bangladesh.svg/22px-Flag_of_Bangladesh.svg.png" decoding="async" width="22" height="13" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/f/f9/Flag_of_Bangladesh.svg/33px-Flag_of_Bangladesh.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/f/f9/Flag_of_Bangladesh.svg/44px-Flag_of_Bangladesh.svg.png 2x" data-file-width="1000" data-file-height="600" /> <a href="/wiki/Bangladesh_at_the_Olympics" title="Bangladesh at the Olympics">Bangladesh</a> <span style="font-size:90%;">(BAN)</span></span>
</td>
<td style="background:#f2f2ce;">10</td>
<td style="background:#cedff2;">0</td>
<td>10
</td></tr>
<tr>
<td align="left"><span id="BIZ"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Flag_of_Belize.svg/22px-Flag_of_Belize.svg.png" decoding="async" width="22" height="13" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Flag_of_Belize.svg/33px-Flag_of_Belize.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Flag_of_Belize.svg/44px-Flag_of_Belize.svg.png 2x" data-file-width="1000" data-file-height="600" /> <a href="/wiki/Belize_at_the_Olympics" title="Belize at the Olympics">Belize</a> <span style="font-size:90%;">(BIZ)</span></span> <sup class="reference" id="ref_BIZBIZ"><a href="#endnote_BIZBIZ">[BIZ]</a></sup>
</td>
<td style="background:#f2f2ce;">13</td>
<td style="background:#cedff2;">0</td>
<td>13
</td></tr>
<tr>
<td align="left"><span id="BEN"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0a/Flag_of_Benin.svg/22px-Flag_of_Benin.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0a/Flag_of_Benin.svg/33px-Flag_of_Benin.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0a/Flag_of_Benin.svg/44px-Flag_of_Benin.svg.png 2x" data-file-width="900" data-file-height="600" /> <a href="/wiki/Benin_at_the_Olympics" title="Benin at the Olympics">Benin</a> <span style="font-size:90%;">(BEN)</span></span> <sup class="reference" id="ref_BENBEN"><a href="#endnote_BENBEN">[BEN]</a></sup>
</td>
<td style="background:#f2f2ce;">12</td>
<td style="background:#cedff2;">0</td>
<td>12
</td></tr>
<tr>
<td align="left"><span id="BHU"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/91/Flag_of_Bhutan.svg/22px-Flag_of_Bhutan.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/91/Flag_of_Bhutan.svg/33px-Flag_of_Bhutan.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/91/Flag_of_Bhutan.svg/44px-Flag_of_Bhutan.svg.png 2x" data-file-width="900" data-file-height="600" /> <a href="/wiki/Bhutan_at_the_Olympics" title="Bhutan at the Olympics">Bhutan</a> <span style="font-size:90%;">(BHU)</span></span>
</td>
<td style="background:#f2f2ce;">10</td>
<td style="background:#cedff2;">0</td>
<td>10
</td></tr>
<tr>
<td align="left"><span id="BOL"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/48/Flag_of_Bolivia.svg/22px-Flag_of_Bolivia.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/48/Flag_of_Bolivia.svg/33px-Flag_of_Bolivia.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/48/Flag_of_Bolivia.svg/44px-Flag_of_Bolivia.svg.png 2x" data-file-width="1100" data-file-height="750" /> <a href="/wiki/Bolivia_at_the_Olympics" title="Bolivia at the Olympics">Bolivia</a> <span style="font-size:90%;">(BOL)</span></span>
</td>
<td style="background:#f2f2ce;">15</td>
<td style="background:#cedff2;">7</td>
<td>22
</td></tr>
<tr>
<td align="left"><span id="BIH"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/b/bf/Flag_of_Bosnia_and_Herzegovina.svg/22px-Flag_of_Bosnia_and_Herzegovina.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/b/bf/Flag_of_Bosnia_and_Herzegovina.svg/33px-Flag_of_Bosnia_and_Herzegovina.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/b/bf/Flag_of_Bosnia_and_Herzegovina.svg/44px-Flag_of_Bosnia_and_Herzegovina.svg.png 2x" data-file-width="800" data-file-height="400" /> <a href="/wiki/Bosnia_and_Herzegovina_at_the_Olympics" title="Bosnia and Herzegovina at the Olympics">Bosnia and Herzegovina</a> <span style="font-size:90%;">(BIH)</span></span>
</td>
<td style="background:#f2f2ce;">8</td>
<td style="background:#cedff2;">8</td>
<td>16
</td></tr>
<tr>
<td align="left"><span id="IVB"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/42/Flag_of_the_British_Virgin_Islands.svg/22px-Flag_of_the_British_Virgin_Islands.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/42/Flag_of_the_British_Virgin_Islands.svg/33px-Flag_of_the_British_Virgin_Islands.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/42/Flag_of_the_British_Virgin_Islands.svg/44px-Flag_of_the_British_Virgin_Islands.svg.png 2x" data-file-width="1200" data-file-height="600" /> <a href="/wiki/British_Virgin_Islands_at_the_Olympics" title="British Virgin Islands at the Olympics">British Virgin Islands</a> <span style="font-size:90%;">(IVB)</span></span>
</td>
<td style="background:#f2f2ce;">10</td>
<td style="background:#cedff2;">2</td>
<td>12
</td></tr>
<tr>
<td align="left"><span id="BRU"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Flag_of_Brunei.svg/22px-Flag_of_Brunei.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Flag_of_Brunei.svg/33px-Flag_of_Brunei.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Flag_of_Brunei.svg/44px-Flag_of_Brunei.svg.png 2x" data-file-width="1440" data-file-height="720" /> <a href="/wiki/Brunei_at_the_Olympics" title="Brunei at the Olympics">Brunei</a> <span style="font-size:90%;">(BRU)</span></span> <sup class="reference" id="ref_AA"><a href="#endnote_AA">[A]</a></sup>
</td>
<td style="background:#f2f2ce;">6</td>
<td style="background:#cedff2;">0</td>
<td>6
</td></tr>
<tr>
<td align="left"><span id="CAM"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/8/83/Flag_of_Cambodia.svg/22px-Flag_of_Cambodia.svg.png" decoding="async" width="22" height="14" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/83/Flag_of_Cambodia.svg/33px-Flag_of_Cambodia.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/83/Flag_of_Cambodia.svg/44px-Flag_of_Cambodia.svg.png 2x" data-file-width="1000" data-file-height="640" /> <a href="/wiki/Cambodia_at_the_Olympics" title="Cambodia at the Olympics">Cambodia</a> <span style="font-size:90%;">(CAM)</span></span>
</td>
<td style="background:#f2f2ce;">10</td>
<td style="background:#cedff2;">0</td>
<td>10
</td></tr>
<tr>
<td align="left"><span id="CPV"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/3/38/Flag_of_Cape_Verde.svg/22px-Flag_of_Cape_Verde.svg.png" decoding="async" width="22" height="13" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/38/Flag_of_Cape_Verde.svg/33px-Flag_of_Cape_Verde.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/38/Flag_of_Cape_Verde.svg/44px-Flag_of_Cape_Verde.svg.png 2x" data-file-width="1020" data-file-height="600" /> <a href="/wiki/Cape_Verde_at_the_Olympics" title="Cape Verde at the Olympics">Cape Verde</a> <span style="font-size:90%;">(CPV)</span></span>
</td>
<td style="background:#f2f2ce;">7</td>
<td style="background:#cedff2;">0</td>
<td>7
</td></tr>
<tr>
<td align="left"><span id="CAY"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0f/Flag_of_the_Cayman_Islands.svg/22px-Flag_of_the_Cayman_Islands.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0f/Flag_of_the_Cayman_Islands.svg/33px-Flag_of_the_Cayman_Islands.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0f/Flag_of_the_Cayman_Islands.svg/44px-Flag_of_the_Cayman_Islands.svg.png 2x" data-file-width="1200" data-file-height="600" /> <a href="/wiki/Cayman_Islands_at_the_Olympics" title="Cayman Islands at the Olympics">Cayman Islands</a> <span style="font-size:90%;">(CAY)</span></span>
</td>
<td style="background:#f2f2ce;">11</td>
<td style="background:#cedff2;">2</td>
<td>13
</td></tr>
<tr>
<td align="left"><span id="CAF"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Central_African_Republic.svg/22px-Flag_of_the_Central_African_Republic.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Central_African_Republic.svg/33px-Flag_of_the_Central_African_Republic.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Central_African_Republic.svg/44px-Flag_of_the_Central_African_Republic.svg.png 2x" data-file-width="900" data-file-height="600" /> <a href="/wiki/Central_African_Republic_at_the_Olympics" title="Central African Republic at the Olympics">Central African Republic</a> <span style="font-size:90%;">(CAF)</span></span>
</td>
<td style="background:#f2f2ce;">11</td>
<td style="background:#cedff2;">0</td>
<td>11
</td></tr>
<tr>
<td align="left"><span id="CHA"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/4b/Flag_of_Chad.svg/22px-Flag_of_Chad.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/4b/Flag_of_Chad.svg/33px-Flag_of_Chad.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/4b/Flag_of_Chad.svg/44px-Flag_of_Chad.svg.png 2x" data-file-width="900" data-file-height="600" /> <a href="/wiki/Chad_at_the_Olympics" title="Chad at the Olympics">Chad</a> <span style="font-size:90%;">(CHA)</span></span>
</td>
<td style="background:#f2f2ce;">13</td>
<td style="background:#cedff2;">0</td>
<td>13
</td></tr>
<tr>
<td align="left"><span id="COM"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/94/Flag_of_the_Comoros.svg/22px-Flag_of_the_Comoros.svg.png" decoding="async" width="22" height="13" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/94/Flag_of_the_Comoros.svg/33px-Flag_of_the_Comoros.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/94/Flag_of_the_Comoros.svg/44px-Flag_of_the_Comoros.svg.png 2x" data-file-width="1000" data-file-height="600" /> <a href="/wiki/Comoros_at_the_Olympics" title="Comoros at the Olympics">Comoros</a> <span style="font-size:90%;">(COM)</span></span>
</td>
<td style="background:#f2f2ce;">7</td>
<td style="background:#cedff2;">0</td>
<td>7
</td></tr>
<tr>
<td align="left"><span id="CGO"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/92/Flag_of_the_Republic_of_the_Congo.svg/22px-Flag_of_the_Republic_of_the_Congo.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/92/Flag_of_the_Republic_of_the_Congo.svg/33px-Flag_of_the_Republic_of_the_Congo.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/92/Flag_of_the_Republic_of_the_Congo.svg/44px-Flag_of_the_Republic_of_the_Congo.svg.png 2x" data-file-width="900" data-file-height="600" /> <a href="/wiki/Republic_of_the_Congo_at_the_Olympics" title="Republic of the Congo at the Olympics">Republic of the Congo</a> <span style="font-size:90%;">(CGO)</span></span>
</td>
<td style="background:#f2f2ce;">13</td>
<td style="background:#cedff2;">0</td>
<td>13
</td></tr>
<tr>
<td align="left"><span id="COD"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Democratic_Republic_of_the_Congo.svg/22px-Flag_of_the_Democratic_Republic_of_the_Congo.svg.png" decoding="async" width="22" height="17" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Democratic_Republic_of_the_Congo.svg/33px-Flag_of_the_Democratic_Republic_of_the_Congo.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Democratic_Republic_of_the_Congo.svg/44px-Flag_of_the_Democratic_Republic_of_the_Congo.svg.png 2x" data-file-width="800" data-file-height="600" /> <a href="/wiki/Democratic_Republic_of_the_Congo_at_the_Olympics" title="Democratic Republic of the Congo at the Olympics">Democratic Republic of the Congo</a> <span style="font-size:90%;">(COD)</span></span> <sup class="reference" id="ref_CODCOD"><a href="#endnote_CODCOD">[COD]</a></sup>
</td>
<td style="background:#f2f2ce;">11</td>
<td style="background:#cedff2;">0</td>
<td>11
</td></tr>
"""
infer_nodes = [ | Node(text=infer_text) | llama_index.core.data_structs.Node |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = | AgentFnComponent(fn=run_tool_fn) | llama_index.core.query_pipeline.AgentFnComponent |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/3.csv")
df
from llama_index.packs.tables.chain_of_table.base import (
ChainOfTableQueryEngine,
serialize_table,
)
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"ChainOfTablePack",
"./chain_of_table_pack",
skip_load=True,
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import pandas as pd
df = pd.read_csv("~/Downloads/WikiTableQuestions/csv/200-csv/11.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("Who won best Director in the 1972 Academy Awards?")
str(response.response)
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/42.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("What was the precipitation in inches during June?")
str(response)
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
prompt_str = """\
Here's a serialized table.
{serialized_table}
Given this table please answer the question: {question}
Answer: """
prompt = PromptTemplate(prompt_str)
prompt_c = prompt.as_query_component(partial={"serialized_table": | serialize_table(df) | llama_index.packs.tables.chain_of_table.base.serialize_table |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import camelot
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.schema import IndexNode
from llama_index.llms.openai import OpenAI
from llama_index.readers.file import PyMuPDFReader
from typing import List
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
file_path = "billionaires_page.pdf"
reader = PyMuPDFReader()
docs = reader.load(file_path)
def get_tables(path: str, pages: List[int]):
table_dfs = []
for page in pages:
table_list = camelot.read_pdf(path, pages=str(page))
table_df = table_list[0].df
table_df = (
table_df.rename(columns=table_df.iloc[0])
.drop(table_df.index[0])
.reset_index(drop=True)
)
table_dfs.append(table_df)
return table_dfs
table_dfs = get_tables(file_path, pages=[3, 25])
table_dfs[0]
table_dfs[1]
llm = OpenAI(model="gpt-4")
df_query_engines = [
PandasQueryEngine(table_df, llm=llm) for table_df in table_dfs
]
response = df_query_engines[0].query(
"What's the net worth of the second richest billionaire in 2023?"
)
print(str(response))
response = df_query_engines[1].query(
"How many billionaires were there in 2009?"
)
print(str(response))
from llama_index.core import Settings
doc_nodes = Settings.node_parser.get_nodes_from_documents(docs)
summaries = [
(
"This node provides information about the world's richest billionaires"
" in 2023"
),
(
"This node provides information on the number of billionaires and"
" their combined net worth from 2000 to 2023."
),
]
df_nodes = [
| IndexNode(text=summary, index_id=f"pandas{idx}") | llama_index.core.schema.IndexNode |