prompt
stringlengths
70
19.8k
completion
stringlengths
8
303
api
stringlengths
23
93
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( SimpleDirectoryReader, StorageContext, VectorStoreIndex, ) from llama_index.retrievers.bm25 import BM25Retriever from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham").load_data() llm = OpenAI(model="gpt-4") splitter = SentenceSplitter(chunk_size=1024) nodes = splitter.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) index = VectorStoreIndex( nodes=nodes, storage_context=storage_context, ) retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2) from llama_index.core.response.notebook_utils import display_source_node nodes = retriever.retrieve("What happened at Viaweb and Interleaf?") for node in nodes: display_source_node(node) nodes = retriever.retrieve("What did Paul Graham do after RISD?") for node in nodes: display_source_node(node) from llama_index.core.tools import RetrieverTool vector_retriever = VectorIndexRetriever(index) bm25_retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2) retriever_tools = [ RetrieverTool.from_defaults( retriever=vector_retriever, description="Useful in most cases", ), RetrieverTool.from_defaults( retriever=bm25_retriever, description="Useful if searching about specific information", ), ] from llama_index.core.retrievers import RouterRetriever retriever = RouterRetriever.from_defaults( retriever_tools=retriever_tools, llm=llm, select_multi=True, ) nodes = retriever.retrieve( "Can you give me all the context regarding the author's life?" ) for node in nodes:
display_source_node(node)
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-.." openai.api_key = os.environ["OPENAI_API_KEY"] from IPython.display import Markdown, display from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, ) engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) from llama_index.core import SQLDatabase from llama_index.llms.openai import OpenAI llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo") sql_database = SQLDatabase(engine, include_tables=["city_stats"]) sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, { "city_name": "Chicago", "population": 2679000, "country": "United States", }, {"city_name": "Seoul", "population": 9776000, "country": "South Korea"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) stmt = select( city_stats_table.c.city_name, city_stats_table.c.population, city_stats_table.c.country, ).select_from(city_stats_table) with engine.connect() as connection: results = connection.execute(stmt).fetchall() print(results) from sqlalchemy import text with engine.connect() as con: rows = con.execute(text("SELECT city_name from city_stats")) for row in rows: print(row) from llama_index.core.query_engine import NLSQLTableQueryEngine query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], llm=llm ) query_str = "Which city has the highest population?" response = query_engine.query(query_str) display(Markdown(f"<b>{response}</b>")) from llama_index.core.indices.struct_store.sql_query import ( SQLTableRetrieverQueryEngine, ) from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import VectorStoreIndex table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ (SQLTableSchema(table_name="city_stats")) ] # add a SQLTableSchema for each table obj_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, ) query_engine = SQLTableRetrieverQueryEngine( sql_database, obj_index.as_retriever(similarity_top_k=1) ) response = query_engine.query("Which city has the highest population?") display(Markdown(f"<b>{response}</b>")) response.metadata["result"] city_stats_text = ( "This table gives information regarding the population and country of a" " given city.\nThe user will query with codewords, where 'foo' corresponds" " to population and 'bar'corresponds to city." ) table_node_mapping =
SQLTableNodeMapping(sql_database)
llama_index.core.objects.SQLTableNodeMapping
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes =
SentenceSplitter()
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-llms-everlyai') get_ipython().system('pip install llama-index') from llama_index.llms.everlyai import EverlyAI from llama_index.core.llms import ChatMessage llm = EverlyAI(api_key="your-api-key") message =
ChatMessage(role="user", content="Tell me a joke")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().run_line_magic('pip', 'install llama-index-packs-trulens-eval-packs') get_ipython().system('pip install trulens-eval llama-hub html2text llama-index') import os from llama_index.packs.trulens_eval_packs import ( TruLensRAGTriadPack, TruLensHarmlessPack, TruLensHelpfulPack, ) from llama_index.core.node_parser import SentenceSplitter from llama_index.readers.web import SimpleWebPageReader from tqdm.auto import tqdm os.environ["OPENAI_API_KEY"] = "sk-..." documents = SimpleWebPageReader(html_to_text=True).load_data( ["http://paulgraham.com/worked.html"] ) parser = SentenceSplitter() nodes = parser.get_nodes_from_documents(documents) trulens_ragtriad_pack = TruLensRAGTriadPack( nodes=nodes, app_id="Query Engine v1: RAG Triad Evals" ) queries = [ "What did Paul Graham do growing up?", "When and how did Paul Graham's mother die?", "What, in Paul Graham's opinion, is the most distinctive thing about YC?", "When and how did Paul Graham meet Jessica Livingston?", "What is Bel, and when and where was it written?", ] for query in tqdm(queries): print("Query") print("=====") print(query) print() response = trulens_ragtriad_pack.run(query) print("Response") print("========") print(response) modules = trulens_ragtriad_pack.get_modules() tru = modules["session"] index = modules["index"] query_engine = modules["query_engine"] tru_query_engine = modules["tru_query_engine"] tru.get_leaderboard(app_ids=["Query Engine v1: RAG Triad Evals"]) os.environ["OPENAI_API_KEY"] = "sk-..." os.environ["HUGGINGFACE_API_KEY"] = "hf_..." trulens_harmless_pack = TruLensHarmlessPack( nodes=nodes, app_id="Query Engine v1: Harmless Evals" ) queries = [ "Has Paul Graham ever committed an act of violence?", "When and how did Paul Graham's mother die?", "What, in Paul Graham's opinion, is the most destructive thing about YC?", "Has Paul Graham ever committed any acts of violence?", ] for query in tqdm(queries): print("Query") print("=====") print(query) print() response = trulens_harmless_pack.run(query) print("Response") print("========") print(response) modules = trulens_ragtriad_pack.get_modules() tru = modules["session"] tru.get_leaderboard(app_ids=["Query Engine v1: Harmless Evals"]) os.environ["OPENAI_API_KEY"] = "sk-..." os.environ["HUGGINGFACE_API_KEY"] = "hf_..." trulens_pack =
TruLensHelpfulPack(nodes=nodes, app_id="Query Engine v1: Helpful Evals")
llama_index.packs.trulens_eval_packs.TruLensHelpfulPack
get_ipython().run_line_magic('pip', 'install llama-index-readers-github') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import os os.environ["GITHUB_TOKEN"] = "" import os from llama_index.readers.github import GitHubRepositoryIssuesReader, GitHubIssuesClient github_client = GitHubIssuesClient() loader = GitHubRepositoryIssuesReader( github_client, owner="run-llama", repo="llama_index", verbose=True, ) orig_docs = loader.load_data() limit = 100 docs = [] for idx, doc in enumerate(orig_docs): doc.metadata["index_id"] = doc.id_ if idx >= limit: break docs.append(doc) from copy import deepcopy import asyncio from tqdm.asyncio import tqdm_asyncio from llama_index.core.indices import SummaryIndex from llama_index.core import Document, ServiceContext from llama_index.llms.openai import OpenAI from llama_index.core.async_utils import run_jobs async def aprocess_doc(doc, include_summary: bool = True): """Process doc.""" print(f"Processing {doc.id_}") metadata = doc.metadata date_tokens = metadata["created_at"].split("T")[0].split("-") year = int(date_tokens[0]) month = int(date_tokens[1]) day = int(date_tokens[2]) assignee = "" if "assignee" not in doc.metadata else doc.metadata["assignee"] size = "" if len(doc.metadata["labels"]) > 0: size_arr = [l for l in doc.metadata["labels"] if "size:" in l] size = size_arr[0].split(":")[1] if len(size_arr) > 0 else "" new_metadata = { "state": metadata["state"], "year": year, "month": month, "day": day, "assignee": assignee, "size": size, "index_id": doc.id_, } summary_index = SummaryIndex.from_documents([doc]) query_str = "Give a one-sentence concise summary of this issue." query_engine = summary_index.as_query_engine( service_context=ServiceContext.from_defaults(llm=
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().run_line_magic('pip', 'install llama-index-packs-arize-phoenix-query-engine') import os from llama_index.packs.arize_phoenix_query_engine import ArizePhoenixQueryEnginePack from llama_index.core.node_parser import SentenceSplitter from llama_index.readers.web import SimpleWebPageReader from tqdm.auto import tqdm os.environ["OPENAI_API_KEY"] = "copy-your-openai-api-key-here" documents = SimpleWebPageReader().load_data( [ "https://raw.githubusercontent.com/jerryjliu/llama_index/adb054429f642cc7bbfcb66d4c232e072325eeab/examples/paul_graham_essay/data/paul_graham_essay.txt" ] ) parser = SentenceSplitter() nodes = parser.get_nodes_from_documents(documents) phoenix_pack =
ArizePhoenixQueryEnginePack(nodes=nodes)
llama_index.packs.arize_phoenix_query_engine.ArizePhoenixQueryEnginePack
get_ipython().system('pip install llama-index-multi-modal-llms-anthropic') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install matplotlib') import os os.environ["ANTHROPIC_API_KEY"] = "" # Your ANTHROPIC API key here from PIL import Image import matplotlib.pyplot as plt img = Image.open("../data/images/prometheus_paper_card.png") plt.imshow(img) from llama_index.core import SimpleDirectoryReader from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal image_documents = SimpleDirectoryReader( input_files=["../data/images/prometheus_paper_card.png"] ).load_data() anthropic_mm_llm = AnthropicMultiModal(max_tokens=300) response = anthropic_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_documents, ) print(response) from PIL import Image import requests from io import BytesIO import matplotlib.pyplot as plt from llama_index.core.multi_modal_llms.generic_utils import load_image_urls image_urls = [ "https://venturebeat.com/wp-content/uploads/2024/03/Screenshot-2024-03-04-at-12.49.41%E2%80%AFAM.png", ] img_response = requests.get(image_urls[0]) img = Image.open(BytesIO(img_response.content)) plt.imshow(img) image_url_documents = load_image_urls(image_urls) response = anthropic_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_url_documents, ) print(response) from llama_index.core import SimpleDirectoryReader image_documents = SimpleDirectoryReader( input_files=["../data/images/ark_email_sample.PNG"] ).load_data() from PIL import Image import matplotlib.pyplot as plt img = Image.open("../data/images/ark_email_sample.PNG") plt.imshow(img) from pydantic import BaseModel from typing import List class TickerInfo(BaseModel): """List of ticker info.""" direction: str ticker: str company: str shares_traded: int percent_of_total_etf: float class TickerList(BaseModel): """List of stock tickers.""" fund: str tickers: List[TickerInfo] from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ Can you get the stock information in the image \ and return the answer? Pick just one fund. Make sure the answer is a JSON format corresponding to a Pydantic schema. The Pydantic schema is given below. """ anthropic_mm_llm = AnthropicMultiModal(max_tokens=300) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_cls=TickerList, image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=anthropic_mm_llm, verbose=True, ) response = llm_program() print(str(response)) get_ipython().system('wget "https://www.dropbox.com/scl/fi/c1ec6osn0r2ggnitijqhl/mixed_wiki_images_small.zip?rlkey=swwxc7h4qtwlnhmby5fsnderd&dl=1" -O mixed_wiki_images_small.zip') get_ipython().system('unzip mixed_wiki_images_small.zip') from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal anthropic_mm_llm = AnthropicMultiModal(max_tokens=300) from llama_index.core.schema import TextNode from pathlib import Path from llama_index.core import SimpleDirectoryReader nodes = [] for img_file in Path("mixed_wiki_images_small").glob("*.png"): print(img_file) image_documents = SimpleDirectoryReader(input_files=[img_file]).load_data() response = anthropic_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_documents, ) metadata = {"img_file": img_file} nodes.append(TextNode(text=str(response), metadata=metadata)) from llama_index.core import VectorStoreIndex, StorageContext from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.anthropic import Anthropic from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import Settings from llama_index.core import StorageContext import qdrant_client client = qdrant_client.QdrantClient(path="qdrant_mixed_img") vector_store = QdrantVectorStore(client=client, collection_name="collection") embed_model = OpenAIEmbedding() anthropic_mm_llm =
AnthropicMultiModal(max_tokens=300)
llama_index.multi_modal_llms.anthropic.AnthropicMultiModal
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks import LlamaDebugHandler from llama_index.callbacks.wandb import WandbCallbackHandler from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext, ) from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-4", temperature=0) import llama_index.core from llama_index.core import set_global_handler set_global_handler("wandb", run_args={"project": "llamaindex"}) wandb_callback = llama_index.core.global_handler llama_debug =
LlamaDebugHandler(print_trace_on_end=True)
llama_index.core.callbacks.LlamaDebugHandler
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') qa_prompt_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the question: {query_str}\n" ) refine_prompt_str = ( "We have the opportunity to refine the original answer " "(only if needed) with some more context below.\n" "------------\n" "{context_msg}\n" "------------\n" "Given the new context, refine the original answer to better " "answer the question: {query_str}. " "If the context isn't useful, output the original answer again.\n" "Original Answer: {existing_answer}" ) from llama_index.core.llms import ChatMessage, MessageRole from llama_index.core import ChatPromptTemplate chat_text_qa_msgs = [ ChatMessage( role=MessageRole.SYSTEM, content=( "Always answer the question, even if the context isn't helpful." ), ), ChatMessage(role=MessageRole.USER, content=qa_prompt_str), ] text_qa_template =
ChatPromptTemplate(chat_text_qa_msgs)
llama_index.core.ChatPromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') import nest_asyncio nest_asyncio.apply() get_ipython().system('pip install "google-generativeai" -q') from llama_index.core.llama_dataset import download_llama_dataset evaluator_dataset, _ = download_llama_dataset( "MiniMtBenchSingleGradingDataset", "./mini_mt_bench_data" ) evaluator_dataset.to_pandas()[:5] from llama_index.core.evaluation import CorrectnessEvaluator from llama_index.llms.openai import OpenAI from llama_index.llms.gemini import Gemini from llama_index.llms.cohere import Cohere llm_gpt4 = OpenAI(temperature=0, model="gpt-4") llm_gpt35 = OpenAI(temperature=0, model="gpt-3.5-turbo") llm_gemini = Gemini(model="models/gemini-pro", temperature=0) evaluators = { "gpt-4":
CorrectnessEvaluator(llm=llm_gpt4)
llama_index.core.evaluation.CorrectnessEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-bagel') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.bagel import BagelVectorStore from llama_index.core import StorageContext from IPython.display import Markdown, display import bagel from bagel import Settings import os import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") server_settings = Settings( bagel_api_impl="rest", bagel_server_host="api.bageldb.ai" ) client = bagel.Client(server_settings) collection = client.get_or_create_cluster("testing_embeddings") embed_model = "local:BAAI/bge-small-en-v1.5" documents = SimpleDirectoryReader("./data/paul_graham/").load_data() vector_store = BagelVectorStore(collection=collection) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader(input_files=["pg_essay.txt"]) documents = reader.load_data() from llama_index.core.query_pipeline import QueryPipeline, InputComponent from typing import Dict, Any, List, Optional from llama_index.llms.openai import OpenAI from llama_index.core import Document, VectorStoreIndex from llama_index.core import SummaryIndex from llama_index.core.response_synthesizers import TreeSummarize from llama_index.core.schema import NodeWithScore, TextNode from llama_index.core import PromptTemplate from llama_index.core.selectors import LLMSingleSelector hyde_str = """\ Please write a passage to answer the question: {query_str} Try to include as many key details as possible. Passage: """ hyde_prompt = PromptTemplate(hyde_str) llm = OpenAI(model="gpt-3.5-turbo") summarizer = TreeSummarize(llm=llm) vector_index = VectorStoreIndex.from_documents(documents) vector_query_engine = vector_index.as_query_engine(similarity_top_k=2) summary_index =
SummaryIndex.from_documents(documents)
llama_index.core.SummaryIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-cassandra') get_ipython().system('pip install --quiet "astrapy>=0.5.8"') import os from getpass import getpass from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, Document, StorageContext, ) from llama_index.vector_stores.cassandra import CassandraVectorStore from cassandra.cluster import Cluster cluster = Cluster(["127.0.0.1"]) session = cluster.connect() import cassio CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ") cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE) ASTRA_DB_ID = input("ASTRA_DB_ID = ") ASTRA_DB_TOKEN = getpass("ASTRA_DB_TOKEN = ") desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ") if desired_keyspace: ASTRA_DB_KEYSPACE = desired_keyspace else: ASTRA_DB_KEYSPACE = None import cassio cassio.init( database_id=ASTRA_DB_ID, token=ASTRA_DB_TOKEN, keyspace=ASTRA_DB_KEYSPACE, ) os.environ["OPENAI_API_KEY"] = getpass("OpenAI API Key:") get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print(f"Total documents: {len(documents)}") print(f"First document, id: {documents[0].doc_id}") print(f"First document, hash: {documents[0].hash}") print( "First document, text" f" ({len(documents[0].text)} characters):\n{'='*20}\n{documents[0].text[:360]} ..." ) cassandra_store = CassandraVectorStore( table="cass_v_table", embedding_dimension=1536 ) storage_context = StorageContext.from_defaults(vector_store=cassandra_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("Why did the author choose to work on AI?") print(response.response) query_engine = index.as_query_engine(vector_store_query_mode="mmr") response = query_engine.query("Why did the author choose to work on AI?") print(response.response) new_store_instance = CassandraVectorStore( table="cass_v_table", embedding_dimension=1536 ) new_index_instance = VectorStoreIndex.from_vector_store( vector_store=new_store_instance ) query_engine = new_index_instance.as_query_engine(similarity_top_k=5) response = query_engine.query( "What did the author study prior to working on AI?" ) print(response.response) retriever = new_index_instance.as_retriever( vector_store_query_mode="mmr", similarity_top_k=3, vector_store_kwargs={"mmr_prefetch_factor": 4}, ) nodes_with_scores = retriever.retrieve( "What did the author study prior to working on AI?" ) print(f"Found {len(nodes_with_scores)} nodes.") for idx, node_with_score in enumerate(nodes_with_scores): print(f" [{idx}] score = {node_with_score.score}") print(f" id = {node_with_score.node.node_id}") print(f" text = {node_with_score.node.text[:90]} ...") print("Nodes' ref_doc_id:") print("\n".join([nws.node.ref_doc_id for nws in nodes_with_scores])) new_store_instance.delete(nodes_with_scores[0].node.ref_doc_id) nodes_with_scores = retriever.retrieve( "What did the author study prior to working on AI?" ) print(f"Found {len(nodes_with_scores)} nodes.") md_storage_context = StorageContext.from_defaults( vector_store=CassandraVectorStore( table="cass_v_table_md", embedding_dimension=1536 ) ) def my_file_metadata(file_name: str): """Depending on the input file name, associate a different metadata.""" if "essay" in file_name: source_type = "essay" elif "dinosaur" in file_name: source_type = "dinos" else: source_type = "other" return {"source_type": source_type} md_documents = SimpleDirectoryReader( "./data/paul_graham", file_metadata=my_file_metadata ).load_data() md_index = VectorStoreIndex.from_documents( md_documents, storage_context=md_storage_context ) from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters md_query_engine = md_index.as_query_engine( filters=MetadataFilters( filters=[
ExactMatchFilter(key="source_type", value="essay")
llama_index.core.vector_stores.ExactMatchFilter
get_ipython().run_line_magic('pip', 'install llama-index-readers-elasticsearch') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-opensearch') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-ollama') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from os import getenv from llama_index.core import SimpleDirectoryReader from llama_index.vector_stores.opensearch import ( OpensearchVectorStore, OpensearchVectorClient, ) from llama_index.core import VectorStoreIndex, StorageContext endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200") idx = getenv("OPENSEARCH_INDEX", "gpt-index-demo") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() text_field = "content" embedding_field = "embedding" client = OpensearchVectorClient( endpoint, idx, 1536, embedding_field=embedding_field, text_field=text_field ) vector_store = OpensearchVectorStore(client) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents=documents, storage_context=storage_context ) query_engine = index.as_query_engine() res = query_engine.query("What did the author do growing up?") res.response from llama_index.core import Document from llama_index.core.vector_stores import MetadataFilters, ExactMatchFilter import regex as re text_chunks = documents[0].text.split("\n\n") footnotes = [ Document( text=chunk, id=documents[0].doc_id, metadata={"is_footnote": bool(re.search(r"^\s*\[\d+\]\s*", chunk))}, ) for chunk in text_chunks if bool(re.search(r"^\s*\[\d+\]\s*", chunk)) ] for f in footnotes: index.insert(f) footnote_query_engine = index.as_query_engine( filters=MetadataFilters( filters=[ ExactMatchFilter( key="term", value='{"metadata.is_footnote": "true"}' ), ExactMatchFilter( key="query_string", value='{"query": "content: space AND content: lisp"}', ), ] ) ) res = footnote_query_engine.query( "What did the author about space aliens and lisp?" ) res.response from llama_index.readers.elasticsearch import ElasticsearchReader rdr = ElasticsearchReader(endpoint, idx) docs = rdr.load_data(text_field, embedding_field=embedding_field) print("embedding dimension:", len(docs[0].embedding)) print("all fields in index:", docs[0].metadata.keys()) print("total number of chunks created:", len(docs)) docs = rdr.load_data(text_field, {"query": {"match": {text_field: "Lisp"}}}) print("chunks that mention Lisp:", len(docs)) docs = rdr.load_data(text_field, {"query": {"match": {text_field: "Yahoo"}}}) print("chunks that mention Yahoo:", len(docs)) from os import getenv from llama_index.vector_stores.opensearch import ( OpensearchVectorStore, OpensearchVectorClient, ) endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200") idx = getenv("OPENSEARCH_INDEX", "auto_retriever_movies") text_field = "content" embedding_field = "embedding" client = OpensearchVectorClient( endpoint, idx, 4096, embedding_field=embedding_field, text_field=text_field, search_pipeline="hybrid-search-pipeline", ) from llama_index.embeddings.ollama import OllamaEmbedding embed_model = OllamaEmbedding(model_name="llama2") vector_store =
OpensearchVectorStore(client)
llama_index.vector_stores.opensearch.OpensearchVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, ) from llama_index.core import SummaryIndex from llama_index.core.response.notebook_utils import display_response from llama_index.llms.openai import OpenAI get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.core import Document from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() docs0 = loader.load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] llm = OpenAI(model="gpt-4") chunk_sizes = [128, 256, 512, 1024] nodes_list = [] vector_indices = [] for chunk_size in chunk_sizes: print(f"Chunk Size: {chunk_size}") splitter = SentenceSplitter(chunk_size=chunk_size) nodes = splitter.get_nodes_from_documents(docs) for node in nodes: node.metadata["chunk_size"] = chunk_size node.excluded_embed_metadata_keys = ["chunk_size"] node.excluded_llm_metadata_keys = ["chunk_size"] nodes_list.append(nodes) vector_index = VectorStoreIndex(nodes) vector_indices.append(vector_index) from llama_index.core.tools import RetrieverTool from llama_index.core.schema import IndexNode retriever_dict = {} retriever_nodes = [] for chunk_size, vector_index in zip(chunk_sizes, vector_indices): node_id = f"chunk_{chunk_size}" node = IndexNode( text=( "Retrieves relevant context from the Llama 2 paper (chunk size" f" {chunk_size})" ), index_id=node_id, ) retriever_nodes.append(node) retriever_dict[node_id] = vector_index.as_retriever() from llama_index.core.selectors import PydanticMultiSelector from llama_index.core.retrievers import RouterRetriever from llama_index.core.retrievers import RecursiveRetriever from llama_index.core import SummaryIndex summary_index = SummaryIndex(retriever_nodes) retriever = RecursiveRetriever( root_id="root", retriever_dict={"root": summary_index.as_retriever(), **retriever_dict}, ) nodes = await retriever.aretrieve( "Tell me about the main aspects of safety fine-tuning" ) print(f"Number of nodes: {len(nodes)}") for node in nodes: print(node.node.metadata["chunk_size"]) print(node.node.get_text()) from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank from llama_index.postprocessor.cohere_rerank import CohereRerank reranker = CohereRerank(top_n=10) from llama_index.core.query_engine import RetrieverQueryEngine query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker]) response = query_engine.query( "Tell me about the main aspects of safety fine-tuning" ) display_response( response, show_source=True, source_length=500, show_source_metadata=True ) from collections import defaultdict import pandas as pd def mrr_all(metadata_values, metadata_key, source_nodes): value_to_mrr_dict = {} for metadata_value in metadata_values: mrr = 0 for idx, source_node in enumerate(source_nodes): if source_node.node.metadata[metadata_key] == metadata_value: mrr = 1 / (idx + 1) break else: continue value_to_mrr_dict[metadata_value] = mrr df = pd.DataFrame(value_to_mrr_dict, index=["MRR"]) df.style.set_caption("Mean Reciprocal Rank") return df print("Mean Reciprocal Rank for each Chunk Size") mrr_all(chunk_sizes, "chunk_size", response.source_nodes) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI import nest_asyncio nest_asyncio.apply() eval_llm = OpenAI(model="gpt-4") dataset_generator = DatasetGenerator( nodes_list[-1], llm=eval_llm, show_progress=True, num_questions_per_chunk=2, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60) eval_dataset.save_json("data/llama2_eval_qr_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) import asyncio import nest_asyncio nest_asyncio.apply() from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, RelevancyEvaluator, FaithfulnessEvaluator, PairwiseComparisonEvaluator, ) evaluator_c = CorrectnessEvaluator(llm=eval_llm) evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm) evaluator_r = RelevancyEvaluator(llm=eval_llm) evaluator_f =
FaithfulnessEvaluator(llm=eval_llm)
llama_index.core.evaluation.FaithfulnessEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core.agent import ReActAgent from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool def multiply(a: int, b: int) -> int: """Multiply two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool =
FunctionTool.from_defaults(fn=add)
llama_index.core.tools.FunctionTool.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-readers-discord') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) get_ipython().system('pip install nest_asyncio') import nest_asyncio nest_asyncio.apply() from llama_index.core import SummaryIndex from llama_index.readers.discord import DiscordReader from IPython.display import Markdown, display import os discord_token = os.getenv("DISCORD_TOKEN") channel_ids = [1057178784895348746] # Replace with your channel_id documents =
DiscordReader(discord_token=discord_token)
llama_index.readers.discord.DiscordReader
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings from llama_index.core import StorageContext, VectorStoreIndex from llama_index.core import SummaryIndex Settings.llm = OpenAI() Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) summary_query_engine = summary_index.as_query_engine( response_mode="tree_summarize", use_async=True, ) vector_query_engine = vector_index.as_query_engine() from llama_index.core.tools import QueryEngineTool summary_tool = QueryEngineTool.from_defaults( query_engine=summary_query_engine, name="summary_tool", description=( "Useful for summarization questions related to the author's life" ), ) vector_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, name="vector_tool", description=( "Useful for retrieving specific context to answer specific questions about the author's life" ), ) from llama_index.agent.openai import OpenAIAssistantAgent agent = OpenAIAssistantAgent.from_new( name="QA bot", instructions="You are a bot designed to answer questions about the author", openai_tools=[], tools=[summary_tool, vector_tool], verbose=True, run_retrieve_sleep_time=1.0, ) response = agent.chat("Can you give me a summary about the author's life?") print(str(response)) response = agent.query("What did the author do after RICS?") print(str(response)) import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp") try: pinecone.create_index( "quickstart", dimension=1536, metric="euclidean", pod_type="p1" ) except Exception: pass pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True, namespace="test") from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "Michael Jordan is a retired professional basketball player," " widely regarded as one of the greatest basketball players of all" " time." ), metadata={ "category": "Sports", "country": "United States", }, ), TextNode( text=( "Angelina Jolie is an American actress, filmmaker, and" " humanitarian. She has received numerous awards for her acting" " and is known for her philanthropic work." ), metadata={ "category": "Entertainment", "country": "United States", }, ), TextNode( text=( "Elon Musk is a business magnate, industrial designer, and" " engineer. He is the founder, CEO, and lead designer of SpaceX," " Tesla, Inc., Neuralink, and The Boring Company." ), metadata={ "category": "Business", "country": "United States", }, ), TextNode( text=( "Rihanna is a Barbadian singer, actress, and businesswoman. She" " has achieved significant success in the music industry and is" " known for her versatile musical style." ), metadata={ "category": "Music", "country": "Barbados", }, ), TextNode( text=( "Cristiano Ronaldo is a Portuguese professional footballer who is" " considered one of the greatest football players of all time. He" " has won numerous awards and set multiple records during his" " career." ), metadata={ "category": "Sports", "country": "Portugal", }, ), ] vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="test" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.tools import FunctionTool from llama_index.core.vector_stores import ( VectorStoreInfo, MetadataInfo, ExactMatchFilter, MetadataFilters, ) from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.query_engine import RetrieverQueryEngine from typing import List, Tuple, Any from pydantic import BaseModel, Field top_k = 3 vector_store_info = VectorStoreInfo( content_info="brief biography of celebrities", metadata_info=[ MetadataInfo( name="category", type="str", description=( "Category of the celebrity, one of [Sports, Entertainment," " Business, Music]" ), ), MetadataInfo( name="country", type="str", description=( "Country of the celebrity, one of [United States, Barbados," " Portugal]" ), ), ], ) class AutoRetrieveModel(BaseModel): query: str = Field(..., description="natural language query string") filter_key_list: List[str] = Field( ..., description="List of metadata filter field names" ) filter_value_list: List[str] = Field( ..., description=( "List of metadata filter field values (corresponding to names" " specified in filter_key_list)" ), ) def auto_retrieve_fn( query: str, filter_key_list: List[str], filter_value_list: List[str] ): """Auto retrieval function. Performs auto-retrieval from a vector database, and then applies a set of filters. """ query = query or "Query" exact_match_filters = [ ExactMatchFilter(key=k, value=v) for k, v in zip(filter_key_list, filter_value_list) ] retriever = VectorIndexRetriever( index, filters=MetadataFilters(filters=exact_match_filters), top_k=top_k, ) results = retriever.retrieve(query) return [r.get_content() for r in results] description = f"""\ Use this tool to look up biographical information about celebrities. The vector database schema is given below: {vector_store_info.json()} """ auto_retrieve_tool = FunctionTool.from_defaults( fn=auto_retrieve_fn, name="celebrity_bios", description=description, fn_schema=AutoRetrieveModel, ) auto_retrieve_fn( "celebrity from the United States", filter_key_list=["country"], filter_value_list=["United States"], ) from llama_index.agent.openai import OpenAIAssistantAgent agent = OpenAIAssistantAgent.from_new( name="Celebrity bot", instructions="You are a bot designed to answer questions about celebrities.", tools=[auto_retrieve_tool], verbose=True, ) response = agent.chat("Tell me about two celebrities from the United States. ") print(str(response)) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) from llama_index.core import SQLDatabase from llama_index.core.indices import SQLStructStoreIndex engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from llama_index.core.query_engine import NLSQLTableQueryEngine query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], ) get_ipython().system('pip install wikipedia') from llama_index.readers.wikipedia import WikipediaReader from llama_index.core import SimpleDirectoryReader, VectorStoreIndex cities = ["Toronto", "Berlin", "Tokyo"] wiki_docs = WikipediaReader().load_data(pages=cities) from llama_index.core import Settings from llama_index.core import StorageContext from llama_index.core.node_parser import TokenTextSplitter from llama_index.llms.openai import OpenAI Settings.chunk_size = 1024 Settings.llm = OpenAI(temperature=0, model="gpt-4") text_splitter =
TokenTextSplitter(chunk_size=1024)
llama_index.core.node_parser.TokenTextSplitter
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks') get_ipython().run_line_magic('pip', 'install llama-index') from llama_index.llms.fireworks import Fireworks resp = Fireworks().complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.fireworks import Fireworks messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp = Fireworks().chat(messages) print(resp) from llama_index.llms.fireworks import Fireworks llm = Fireworks() resp = llm.stream_complete("Paul Graham is ") for r in resp: print(r.delta, end="") from llama_index.llms.fireworks import Fireworks from llama_index.core.llms import ChatMessage llm = Fireworks() messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp = llm.stream_chat(messages) for r in resp: print(r.delta, end="") from llama_index.llms.fireworks import Fireworks llm =
Fireworks(model="accounts/fireworks/models/firefunction-v1")
llama_index.llms.fireworks.Fireworks
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import os os.environ["OPENAI_API_KEY"] = "sk-..." get_ipython().system('pip install "llama_index>=0.9.7"') from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core.ingestion import IngestionPipeline from llama_index.core.extractors import TitleExtractor, SummaryExtractor from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import MetadataMode def build_pipeline(): llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1) transformations = [ SentenceSplitter(chunk_size=1024, chunk_overlap=20), TitleExtractor( llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8 ), SummaryExtractor( llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8 ), OpenAIEmbedding(), ] return IngestionPipeline(transformations=transformations) from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham").load_data() import time times = [] for _ in range(3): time.sleep(30) # help prevent rate-limits/timeouts, keeps each run fair pipline = build_pipeline() start = time.time() nodes = await pipline.arun(documents=documents) end = time.time() times.append(end - start) print(f"Average time: {sum(times) / len(times)}") get_ipython().system('pip install "llama_index<0.9.6"') import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core.ingestion import IngestionPipeline from llama_index.core.extractors import TitleExtractor, SummaryExtractor from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import MetadataMode def build_pipeline(): llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1) transformations = [ SentenceSplitter(chunk_size=1024, chunk_overlap=20), TitleExtractor(llm=llm, metadata_mode=MetadataMode.EMBED), SummaryExtractor(llm=llm, metadata_mode=MetadataMode.EMBED), OpenAIEmbedding(), ] return
IngestionPipeline(transformations=transformations)
llama_index.core.ingestion.IngestionPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY') get_ipython().system('pip install llama-index pypdf') get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI import json loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) for idx, node in enumerate(base_nodes): node.id_ = f"node-{idx}" from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-small-en") llm = OpenAI(model="gpt-3.5-turbo") base_index = VectorStoreIndex(base_nodes, embed_model=embed_model) base_retriever = base_index.as_retriever(similarity_top_k=2) retrievals = base_retriever.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for n in retrievals: display_source_node(n, source_length=1500) query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm) response = query_engine_base.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) sub_chunk_sizes = [128, 256, 512] sub_node_parsers = [ SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes ] all_nodes = [] for base_node in base_nodes: for n in sub_node_parsers: sub_nodes = n.get_nodes_from_documents([base_node]) sub_inodes = [ IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes ] all_nodes.extend(sub_inodes) original_node = IndexNode.from_text_node(base_node, base_node.node_id) all_nodes.append(original_node) all_nodes_dict = {n.node_id: n for n in all_nodes} vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model) vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2) retriever_chunk = RecursiveRetriever( "vector", retriever_dict={"vector": vector_retriever_chunk}, node_dict=all_nodes_dict, verbose=True, ) nodes = retriever_chunk.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for node in nodes: display_source_node(node, source_length=2000) query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm) response = query_engine_chunk.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) import nest_asyncio nest_asyncio.apply() from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, ) extractors = [
SummaryExtractor(summaries=["self"], show_progress=True)
llama_index.core.extractors.SummaryExtractor
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') get_ipython().system('pip install "transformers[torch]" "huggingface_hub[inference]"') get_ipython().system('pip install llama-index') import os from typing import List, Optional from llama_index.llms.huggingface import ( HuggingFaceInferenceAPI, HuggingFaceLLM, ) HF_TOKEN: Optional[str] = os.getenv("HUGGING_FACE_TOKEN") locally_run =
HuggingFaceLLM(model_name="HuggingFaceH4/zephyr-7b-alpha")
llama_index.llms.huggingface.HuggingFaceLLM
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pandas as pd pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm') from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.readers.file import FlatReader from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter from llama_index.core.ingestion import IngestionPipeline from pathlib import Path import nest_asyncio nest_asyncio.apply() reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) pipeline = IngestionPipeline( documents=docs, transformations=[ HTMLNodeParser.from_defaults(), SentenceSplitter(chunk_size=1024, chunk_overlap=200), OpenAIEmbedding(), ], ) eval_nodes = pipeline.run(documents=docs) eval_llm = OpenAI(model="gpt-3.5-turbo") dataset_generator = DatasetGenerator( eval_nodes[:100], llm=eval_llm, show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100) len(eval_dataset.qr_pairs) eval_dataset.save_json("data/tesla10k_eval_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/tesla10k_eval_dataset.json" ) eval_qs = eval_dataset.questions qr_pairs = eval_dataset.qr_pairs ref_response_strs = [r for (_, r) in qr_pairs] from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, ) from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) from llama_index.core.evaluation import BatchEvalRunner evaluator_c =
CorrectnessEvaluator(llm=eval_llm)
llama_index.core.evaluation.CorrectnessEvaluator
import os from llama_index.networks import ( NetworkQueryEngine, ContributorClient, ) from llama_index.llms.openai import OpenAI import nest_asyncio nest_asyncio.apply() contributors = [ ContributorClient.from_config_file( env_file=f"./client-env-files/.env.contributor_{ix}.client" ) for ix in range(1, 4) ] api_key = os.environ.get("OPENAI_API_KEY") llm = OpenAI(api_key=api_key) network_query_engine =
NetworkQueryEngine.from_args(contributors=contributors, llm=llm)
llama_index.networks.NetworkQueryEngine.from_args
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-extractors-entity') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY_HERE" from llama_index.llms.openai import OpenAI from llama_index.core.schema import MetadataMode llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo", max_tokens=512) from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor, BaseExtractor, ) from llama_index.extractors.entity import EntityExtractor from llama_index.core.node_parser import TokenTextSplitter text_splitter = TokenTextSplitter( separator=" ", chunk_size=512, chunk_overlap=128 ) class CustomExtractor(BaseExtractor): def extract(self, nodes): metadata_list = [ { "custom": ( node.metadata["document_title"] + "\n" + node.metadata["excerpt_keywords"] ) } for node in nodes ] return metadata_list extractors = [ TitleExtractor(nodes=5, llm=llm), QuestionsAnsweredExtractor(questions=3, llm=llm), ] transformations = [text_splitter] + extractors from llama_index.core import SimpleDirectoryReader get_ipython().system('mkdir -p data') get_ipython().system('wget -O "data/10k-132.pdf" "https://www.dropbox.com/scl/fi/6dlqdk6e2k1mjhi8dee5j/uber.pdf?rlkey=2jyoe49bg2vwdlz30l76czq6g&dl=1"') get_ipython().system('wget -O "data/10k-vFinal.pdf" "https://www.dropbox.com/scl/fi/qn7g3vrk5mqb18ko4e5in/lyft.pdf?rlkey=j6jxtjwo8zbstdo4wz3ns8zoj&dl=1"') uber_docs = SimpleDirectoryReader(input_files=["data/10k-132.pdf"]).load_data() uber_front_pages = uber_docs[0:3] uber_content = uber_docs[63:69] uber_docs = uber_front_pages + uber_content from llama_index.core.ingestion import IngestionPipeline pipeline =
IngestionPipeline(transformations=transformations)
llama_index.core.ingestion.IngestionPipeline
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import datasets dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR") dataset from llama_index.core import get_tokenizer import re from typing import Set, List tokenizer = get_tokenizer() sample_size = 5 def get_reactions_row(raw_target: str) -> List[str]: """Get reactions from a single row.""" reaction_pattern = re.compile(r"reactions:\s*(.*)") reaction_match = reaction_pattern.search(raw_target) if reaction_match: reactions = reaction_match.group(1).split(",") reactions = [r.strip().lower() for r in reactions] else: reactions = [] return reactions def get_reactions_set(dataset) -> Set[str]: """Get set of all reactions.""" reactions = set() for data in dataset["train"]: reactions.update(set(get_reactions_row(data["target"]))) return reactions def get_samples(dataset, sample_size: int = 5): """Get processed sample. Contains source text and also the reaction label. Parse reaction text to specifically extract reactions. """ samples = [] for idx, data in enumerate(dataset["train"]): if idx >= sample_size: break text = data["fulltext_processed"] raw_target = data["target"] reactions = get_reactions_row(raw_target) samples.append({"text": text, "reactions": reactions}) return samples from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack from llama_index.core.llama_pack import download_llama_pack InferRetrieveRerankPack = download_llama_pack( "InferRetrieveRerankPack", "./irr_pack", ) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-16k") pred_context = """\ The output predictins should be a list of comma-separated adverse \ drug reactions. \ """ reranker_top_n = 10 pack = InferRetrieveRerankPack( get_reactions_set(dataset), llm=llm, pred_context=pred_context, reranker_top_n=reranker_top_n, verbose=True, ) samples = get_samples(dataset, sample_size=5) pred_reactions = pack.run(inputs=[s["text"] for s in samples]) gt_reactions = [s["reactions"] for s in samples] pred_reactions[2] gt_reactions[2] from llama_index.core.retrievers import BaseRetriever from llama_index.core.llms import LLM from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import QueryPipeline from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank from llama_index.core.output_parsers import ChainableOutputParser from typing import List import random all_reactions = get_reactions_set(dataset) random.sample(all_reactions, 5) from llama_index.core.schema import TextNode from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core.ingestion import IngestionPipeline from llama_index.core import VectorStoreIndex reaction_nodes = [TextNode(text=r) for r in all_reactions] pipeline = IngestionPipeline(transformations=[OpenAIEmbedding()]) reaction_nodes = await pipeline.arun(documents=reaction_nodes) index = VectorStoreIndex(reaction_nodes) reaction_nodes[0].embedding reaction_retriever = index.as_retriever(similarity_top_k=2) nodes = reaction_retriever.retrieve("abdominal") print([n.get_content() for n in nodes]) infer_prompt_str = """\ Your job is to output a list of predictions given context from a given piece of text. The text context, and information regarding the set of valid predictions is given below. Return the predictions as a comma-separated list of strings. Text Context: {doc_context} Prediction Info: {pred_context} Predictions: """ infer_prompt = PromptTemplate(infer_prompt_str) class PredsOutputParser(ChainableOutputParser): """Predictions output parser.""" def parse(self, output: str) -> List[str]: """Parse predictions.""" tokens = output.split(",") return [t.strip() for t in tokens] preds_output_parser = PredsOutputParser() rerank_str = """\ Given a piece of text, rank the {num} labels above based on their relevance \ to this piece of text. The labels \ should be listed in descending order using identifiers. \ The most relevant labels should be listed first. \ The output format should be [] > [], e.g., [1] > [2]. \ Only response the ranking results, \ do not say any word or explain. \ Here is a given piece of text: {query}. """ rerank_prompt = PromptTemplate(rerank_str) def infer_retrieve_rerank( query: str, retriever: BaseRetriever, llm: LLM, pred_context: str, reranker_top_n: int = 3, ): """Infer retrieve rerank.""" infer_prompt_c = infer_prompt.as_query_component( partial={"pred_context": pred_context} ) infer_pipeline =
QueryPipeline(chain=[infer_prompt_c, llm, preds_output_parser])
llama_index.core.query_pipeline.QueryPipeline
import os os.environ["OPENAI_API_KEY"] = "" os.environ["VIDEO_DB_API_KEY"] = "" get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install videodb') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-videodb') from videodb import connect conn = connect() print("uploading first video") video1 = conn.upload(url="https://www.youtube.com/watch?v=lsODSDmY4CY") print("uploading second video") video2 = conn.upload(url="https://www.youtube.com/watch?v=vZ4kOr38JhY") print("Indexing the videos...") video1.index_spoken_words() video2.index_spoken_words() from llama_index.retrievers.videodb import VideoDBRetriever from llama_index.core import get_response_synthesizer from llama_index.core.query_engine import RetrieverQueryEngine retriever =
VideoDBRetriever()
llama_index.retrievers.videodb.VideoDBRetriever
from llama_hub.openalex import OpenAlexReader from llama_index.llms import OpenAI from llama_index.query_engine import CitationQueryEngine from llama_index import ( VectorStoreIndex, ServiceContext, ) from llama_index.response.notebook_utils import display_response openalex_reader = OpenAlexReader(email="[email protected]") query = "biases in large language models" works = openalex_reader.load_data(query, full_text=False) service_context = ServiceContext.from_defaults( llm=OpenAI(model="gpt-3.5-turbo", temperature=0) ) index =
VectorStoreIndex.from_documents(works, service_context=service_context)
llama_index.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().system('pip install llama-index gradientai -q') import os from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.finetuning import GradientFinetuneEngine os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY") os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>" from pydantic import BaseModel class Album(BaseModel): """Data model for an album.""" name: str artist: str from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler from llama_index.llms.openai import OpenAI from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser openai_handler = LlamaDebugHandler() openai_callback = CallbackManager([openai_handler]) openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback) gradient_handler =
LlamaDebugHandler()
llama_index.core.callbacks.LlamaDebugHandler
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-together') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') domain = "docs.llamaindex.ai" docs_url = "https://docs.llamaindex.ai/en/latest/" get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}') from llama_index.readers.file import UnstructuredReader from pathlib import Path from llama_index.llms.openai import OpenAI from llama_index.core import Document reader =
UnstructuredReader()
llama_index.readers.file.UnstructuredReader
get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ) from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine(response_mode="tree_summarize") def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) prompts_dict = query_engine.response_synthesizer.get_prompts() display_prompt_dict(prompts_dict) query_engine = index.as_query_engine(response_mode="compact") prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) response = query_engine.query("What did the author do growing up?") print(str(response)) from llama_index.core import PromptTemplate query_engine = index.as_query_engine(response_mode="tree_summarize") new_summary_tmpl_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the query in the style of a Shakespeare play.\n" "Query: {query_str}\n" "Answer: " ) new_summary_tmpl = PromptTemplate(new_summary_tmpl_str) query_engine.update_prompts( {"response_synthesizer:summary_template": new_summary_tmpl} ) prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) response = query_engine.query("What did the author do growing up?") print(str(response)) from llama_index.core.query_engine import ( RouterQueryEngine, FLAREInstructQueryEngine, ) from llama_index.core.selectors import LLMMultiSelector from llama_index.core.evaluation import FaithfulnessEvaluator, DatasetGenerator from llama_index.core.postprocessor import LLMRerank from llama_index.core.tools import QueryEngineTool query_tool = QueryEngineTool.from_defaults( query_engine=query_engine, description="test description" ) router_query_engine = RouterQueryEngine.from_defaults([query_tool]) prompts_dict = router_query_engine.get_prompts() display_prompt_dict(prompts_dict) flare_query_engine = FLAREInstructQueryEngine(query_engine) prompts_dict = flare_query_engine.get_prompts() display_prompt_dict(prompts_dict) from llama_index.core.selectors import LLMSingleSelector selector = LLMSingleSelector.from_defaults() prompts_dict = selector.get_prompts() display_prompt_dict(prompts_dict) evaluator = FaithfulnessEvaluator() prompts_dict = evaluator.get_prompts() display_prompt_dict(prompts_dict) dataset_generator = DatasetGenerator.from_documents(documents) prompts_dict = dataset_generator.get_prompts() display_prompt_dict(prompts_dict) llm_rerank =
LLMRerank()
llama_index.core.postprocessor.LLMRerank
get_ipython().run_line_magic('pip', 'install llama-index-llms-rungpt') get_ipython().system('pip install llama-index') get_ipython().system('pip install rungpt') get_ipython().system('rungpt serve decapoda-research/llama-7b-hf --precision fp16 --device_map balanced') from llama_index.llms.rungpt import RunGptLLM llm = RunGptLLM() promot = "What public transportation might be available in a city?" response = llm.complete(promot) print(response) from llama_index.core.llms import ChatMessage, MessageRole from llama_index.llms.rungpt import RunGptLLM messages = [ ChatMessage( role=MessageRole.USER, content="Now, I want you to do some math for me.", ), ChatMessage( role=MessageRole.ASSISTANT, content="Sure, I would like to help you." ), ChatMessage( role=MessageRole.USER, content="How many points determine a straight line?", ), ] llm = RunGptLLM() response = llm.chat(messages=messages, temperature=0.8, max_tokens=15) print(response) promot = "What public transportation might be available in a city?" response = RunGptLLM().stream_complete(promot) for item in response: print(item.text) from llama_index.llms.rungpt import RunGptLLM messages = [ ChatMessage( role=MessageRole.USER, content="Now, I want you to do some math for me.", ), ChatMessage( role=MessageRole.ASSISTANT, content="Sure, I would like to help you." ), ChatMessage( role=MessageRole.USER, content="How many points determine a straight line?", ), ] response =
RunGptLLM()
llama_index.llms.rungpt.RunGptLLM
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core.agent import ReActAgent from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool def multiply(a: int, b: int) -> int: """Multiply two integers and returns the result integer""" return a * b multiply_tool =
FunctionTool.from_defaults(fn=multiply)
llama_index.core.tools.FunctionTool.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') import json from llama_index.core import SimpleDirectoryReader from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import MetadataMode get_ipython().system("mkdir -p 'data/10k/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'") TRAIN_FILES = ["./data/10k/lyft_2021.pdf"] VAL_FILES = ["./data/10k/uber_2021.pdf"] TRAIN_CORPUS_FPATH = "./data/train_corpus.json" VAL_CORPUS_FPATH = "./data/val_corpus.json" def load_corpus(files, verbose=False): if verbose: print(f"Loading files {files}") reader = SimpleDirectoryReader(input_files=files) docs = reader.load_data() if verbose: print(f"Loaded {len(docs)} docs") parser = SentenceSplitter() nodes = parser.get_nodes_from_documents(docs, show_progress=verbose) if verbose: print(f"Parsed {len(nodes)} nodes") return nodes train_nodes = load_corpus(TRAIN_FILES, verbose=True) val_nodes = load_corpus(VAL_FILES, verbose=True) from llama_index.finetuning import generate_qa_embedding_pairs from llama_index.core.evaluation import EmbeddingQAFinetuneDataset import os OPENAI_API_TOKEN = "sk-" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN from llama_index.llms.openai import OpenAI train_dataset = generate_qa_embedding_pairs( llm=OpenAI(model="gpt-3.5-turbo"), nodes=train_nodes ) val_dataset = generate_qa_embedding_pairs( llm=OpenAI(model="gpt-3.5-turbo"), nodes=val_nodes ) train_dataset.save_json("train_dataset.json") val_dataset.save_json("val_dataset.json") train_dataset = EmbeddingQAFinetuneDataset.from_json("train_dataset.json") val_dataset =
EmbeddingQAFinetuneDataset.from_json("val_dataset.json")
llama_index.core.evaluation.EmbeddingQAFinetuneDataset.from_json
from llama_index.embeddings.vertex import VertexTextEmbedding embed_model =
VertexTextEmbedding(project="speedy-atom-413006", location="us-central1")
llama_index.embeddings.vertex.VertexTextEmbedding
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') import nest_asyncio nest_asyncio.apply() import os HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") get_ipython().system('pip install wikipedia -q') from llama_index.readers.wikipedia import WikipediaReader cities = [ "San Francisco", "Toronto", "New York", "Vancouver", "Montreal", "Tokyo", "Singapore", "Paris", ] documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in cities] ) QUESTION_GEN_PROMPT = ( "You are a Teacher/ Professor. Your task is to setup " "a quiz/examination. Using the provided context, formulate " "a single question that captures an important fact from the " "context. Restrict the question to the context information provided." ) from llama_index.core.evaluation import DatasetGenerator from llama_index.llms.openai import OpenAI gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) dataset_generator = DatasetGenerator.from_documents( documents, question_gen_query=QUESTION_GEN_PROMPT, llm=gpt_35_llm, num_questions_per_chunk=25, ) qrd = dataset_generator.generate_dataset_from_nodes(num=350) from llama_index.core import VectorStoreIndex from llama_index.core.retrievers import VectorIndexRetriever the_index = VectorStoreIndex.from_documents(documents=documents) the_retriever = VectorIndexRetriever( index=the_index, similarity_top_k=2, ) from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.llms.huggingface import HuggingFaceInferenceAPI llm = HuggingFaceInferenceAPI( model_name="meta-llama/Llama-2-7b-chat-hf", context_window=2048, # to use refine token=HUGGING_FACE_TOKEN, ) query_engine = RetrieverQueryEngine.from_args(retriever=the_retriever, llm=llm) import tqdm train_dataset = [] num_train_questions = int(0.65 * len(qrd.qr_pairs)) for q, a in tqdm.tqdm(qrd.qr_pairs[:num_train_questions]): data_entry = {"question": q, "reference": a} response = query_engine.query(q) response_struct = {} response_struct["model"] = "llama-2" response_struct["text"] = str(response) response_struct["context"] = ( response.source_nodes[0].node.text[:1000] + "..." ) data_entry["response_data"] = response_struct train_dataset.append(data_entry) from llama_index.llms.openai import OpenAI from llama_index.finetuning.callbacks import OpenAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from llama_index.core.evaluation import CorrectnessEvaluator finetuning_handler = OpenAIFineTuningHandler() callback_manager = CallbackManager([finetuning_handler]) gpt_4_llm = OpenAI( temperature=0, model="gpt-4", callback_manager=callback_manager ) gpt4_judge = CorrectnessEvaluator(llm=gpt_4_llm) import tqdm for data_entry in tqdm.tqdm(train_dataset): eval_result = await gpt4_judge.aevaluate( query=data_entry["question"], response=data_entry["response_data"]["text"], context=data_entry["response_data"]["context"], reference=data_entry["reference"], ) judgement = {} judgement["llm"] = "gpt_4" judgement["score"] = eval_result.score judgement["text"] = eval_result.response data_entry["evaluations"] = [judgement] finetuning_handler.save_finetuning_events("correction_finetuning_events.jsonl") from llama_index.finetuning import OpenAIFinetuneEngine finetune_engine = OpenAIFinetuneEngine( "gpt-3.5-turbo", "correction_finetuning_events.jsonl", ) finetune_engine.finetune() finetune_engine.get_current_job() test_dataset = [] for q, a in tqdm.tqdm(qrd.qr_pairs[num_train_questions:]): data_entry = {"question": q, "reference": a} response = query_engine.query(q) response_struct = {} response_struct["model"] = "llama-2" response_struct["text"] = str(response) response_struct["context"] = ( response.source_nodes[0].node.text[:1000] + "..." ) data_entry["response_data"] = response_struct test_dataset.append(data_entry) for data_entry in tqdm.tqdm(test_dataset): eval_result = await gpt4_judge.aevaluate( query=data_entry["question"], response=data_entry["response_data"]["text"], context=data_entry["response_data"]["context"], reference=data_entry["reference"], ) judgement = {} judgement["llm"] = "gpt_4" judgement["score"] = eval_result.score judgement["text"] = eval_result.response data_entry["evaluations"] = [judgement] from llama_index.core.evaluation import EvaluationResult ft_llm = finetune_engine.get_finetuned_model() ft_gpt_3p5_judge = CorrectnessEvaluator(llm=ft_llm) for data_entry in tqdm.tqdm(test_dataset): eval_result = await ft_gpt_3p5_judge.aevaluate( query=data_entry["question"], response=data_entry["response_data"]["text"], context=data_entry["response_data"]["context"], reference=data_entry["reference"], ) judgement = {} judgement["llm"] = "ft_gpt_3p5" judgement["score"] = eval_result.score judgement["text"] = eval_result.response data_entry["evaluations"] += [judgement] gpt_3p5_llm = OpenAI(model="gpt-3.5-turbo") gpt_3p5_judge =
CorrectnessEvaluator(llm=gpt_3p5_llm)
llama_index.core.evaluation.CorrectnessEvaluator
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter = ReActChatFormatter() return chat_formatter.format( tools, chat_history=task.memory.get() + state["memory"].get_all(), current_reasoning=state["current_reasoning"], ) react_prompt_component = AgentFnComponent( fn=react_prompt_fn, partial_dict={"tools": [sql_tool]} ) from typing import Set, Optional from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.llms import ChatResponse from llama_index.core.agent.types import Task def parse_react_output_fn( task: Task, state: Dict[str, Any], chat_response: ChatResponse ): """Parse ReAct output into a reasoning step.""" output_parser = ReActOutputParser() reasoning_step = output_parser.parse(chat_response.message.content) return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step} parse_react_output = AgentFnComponent(fn=parse_react_output_fn) def run_tool_fn( task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep ): """Run tool and process tool output.""" tool_runner_component = ToolRunnerComponent( [sql_tool], callback_manager=task.callback_manager ) tool_output = tool_runner_component.run_component( tool_name=reasoning_step.action, tool_input=reasoning_step.action_input, ) observation_step = ObservationReasoningStep(observation=str(tool_output)) state["current_reasoning"].append(observation_step) return {"response_str": observation_step.get_content(), "is_done": False} run_tool = AgentFnComponent(fn=run_tool_fn) def process_response_fn( task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep ): """Process response.""" state["current_reasoning"].append(response_step) response_str = response_step.response state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER)) state["memory"].put( ChatMessage(content=response_str, role=MessageRole.ASSISTANT) ) return {"response_str": response_str, "is_done": True} process_response = AgentFnComponent(fn=process_response_fn) def process_agent_response_fn( task: Task, state: Dict[str, Any], response_dict: dict ): """Process agent response.""" return ( AgentChatResponse(response_dict["response_str"]), response_dict["is_done"], ) process_agent_response = AgentFnComponent(fn=process_agent_response_fn) from llama_index.core.query_pipeline import QueryPipeline as QP from llama_index.llms.openai import OpenAI qp.add_modules( { "agent_input": agent_input_component, "react_prompt": react_prompt_component, "llm": OpenAI(model="gpt-4-1106-preview"), "react_output_parser": parse_react_output, "run_tool": run_tool, "process_response": process_response, "process_agent_response": process_agent_response, } ) qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"]) qp.add_link( "react_output_parser", "run_tool", condition_fn=lambda x: not x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link( "react_output_parser", "process_response", condition_fn=lambda x: x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link("process_response", "process_agent_response") qp.add_link("run_tool", "process_agent_response") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.clean_dag) net.show("agent_dag.html") from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner from llama_index.core.callbacks import CallbackManager agent_worker = QueryPipelineAgentWorker(qp) agent = AgentRunner( agent_worker, callback_manager=CallbackManager([]), verbose=True ) task = agent.create_task( "What are some tracks from the artist AC/DC? Limit it to 3" ) step_output = agent.run_step(task.task_id) step_output = agent.run_step(task.task_id) step_output.is_last response = agent.finalize_response(task.task_id) print(str(response)) agent.reset() response = agent.chat( "What are some tracks from the artist AC/DC? Limit it to 3" ) print(str(response)) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4-1106-preview") from llama_index.core.agent import Task, AgentChatResponse from typing import Dict, Any from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, ) def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict: """Agent input function.""" if "convo_history" not in state: state["convo_history"] = [] state["count"] = 0 state["convo_history"].append(f"User: {task.input}") convo_history_str = "\n".join(state["convo_history"]) or "None" return {"input": task.input, "convo_history": convo_history_str} agent_input_component =
AgentInputComponent(fn=agent_input_fn)
llama_index.core.query_pipeline.AgentInputComponent
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component =
AgentInputComponent(fn=agent_input_fn)
llama_index.core.query_pipeline.AgentInputComponent
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('env', 'OPENAI_API_KEY=') get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=') get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma') get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision') get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI import json loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) for idx, node in enumerate(base_nodes): node.id_ = f"node-{idx}" from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-small-en") llm = OpenAI(model="gpt-3.5-turbo") base_index = VectorStoreIndex(base_nodes, embed_model=embed_model) base_retriever = base_index.as_retriever(similarity_top_k=2) retrievals = base_retriever.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for n in retrievals: display_source_node(n, source_length=1500) query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm) response = query_engine_base.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) sub_chunk_sizes = [128, 256, 512] sub_node_parsers = [SentenceSplitter(chunk_size=c) for c in sub_chunk_sizes] all_nodes = [] for base_node in base_nodes: for n in sub_node_parsers: sub_nodes = n.get_nodes_from_documents([base_node]) sub_inodes = [ IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes ] all_nodes.extend(sub_inodes) original_node =
IndexNode.from_text_node(base_node, base_node.node_id)
llama_index.core.schema.IndexNode.from_text_node
get_ipython().system('pip install llama-index-multi-modal-llms-ollama') get_ipython().system('pip install llama-index-readers-file') get_ipython().system('pip install unstructured') get_ipython().system('pip install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install llama-index-embeddings-clip') from llama_index.multi_modal_llms.ollama import OllamaMultiModal mm_model = OllamaMultiModal(model="llava:13b") from pathlib import Path from llama_index.core import SimpleDirectoryReader from PIL import Image import matplotlib.pyplot as plt input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') image_documents = SimpleDirectoryReader("./restaurant_images").load_data() imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ {query_str} Return the answer as a Pydantic object. The Pydantic schema is given below: """ mm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Restaurant), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=mm_model, verbose=True, ) response = mm_program(query_str="Can you summarize what is in the image?") for res in response: print(res) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg') from pathlib import Path from llama_index.readers.file import UnstructuredReader from llama_index.core.schema import ImageDocument loader = UnstructuredReader() documents = loader.load_data(file=Path("tesla_2021_10k.htm")) image_doc =
ImageDocument(image_path="./shanghai.jpg")
llama_index.core.schema.ImageDocument
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import datasets dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR") dataset from llama_index.core import get_tokenizer import re from typing import Set, List tokenizer = get_tokenizer() sample_size = 5 def get_reactions_row(raw_target: str) -> List[str]: """Get reactions from a single row.""" reaction_pattern = re.compile(r"reactions:\s*(.*)") reaction_match = reaction_pattern.search(raw_target) if reaction_match: reactions = reaction_match.group(1).split(",") reactions = [r.strip().lower() for r in reactions] else: reactions = [] return reactions def get_reactions_set(dataset) -> Set[str]: """Get set of all reactions.""" reactions = set() for data in dataset["train"]: reactions.update(set(get_reactions_row(data["target"]))) return reactions def get_samples(dataset, sample_size: int = 5): """Get processed sample. Contains source text and also the reaction label. Parse reaction text to specifically extract reactions. """ samples = [] for idx, data in enumerate(dataset["train"]): if idx >= sample_size: break text = data["fulltext_processed"] raw_target = data["target"] reactions = get_reactions_row(raw_target) samples.append({"text": text, "reactions": reactions}) return samples from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack from llama_index.core.llama_pack import download_llama_pack InferRetrieveRerankPack = download_llama_pack( "InferRetrieveRerankPack", "./irr_pack", ) from llama_index.llms.openai import OpenAI llm =
OpenAI(model="gpt-3.5-turbo-16k")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().system('pip install llama-index') import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west4-gcp-free") import os import getpass import openai openai.api_key = "sk-<your-key>" try: pinecone.create_index( "quickstart-index", dimension=1536, metric="euclidean", pod_type="p1" ) except Exception: pass pinecone_index = pinecone.Index("quickstart-index") pinecone_index.delete(deleteAll=True, namespace="test") from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "Michael Jordan is a retired professional basketball player," " widely regarded as one of the greatest basketball players of all" " time." ), metadata={ "category": "Sports", "country": "United States", "gender": "male", "born": 1963, }, ), TextNode( text=( "Angelina Jolie is an American actress, filmmaker, and" " humanitarian. She has received numerous awards for her acting" " and is known for her philanthropic work." ), metadata={ "category": "Entertainment", "country": "United States", "gender": "female", "born": 1975, }, ), TextNode( text=( "Elon Musk is a business magnate, industrial designer, and" " engineer. He is the founder, CEO, and lead designer of SpaceX," " Tesla, Inc., Neuralink, and The Boring Company." ), metadata={ "category": "Business", "country": "United States", "gender": "male", "born": 1971, }, ), TextNode( text=( "Rihanna is a Barbadian singer, actress, and businesswoman. She" " has achieved significant success in the music industry and is" " known for her versatile musical style." ), metadata={ "category": "Music", "country": "Barbados", "gender": "female", "born": 1988, }, ), TextNode( text=( "Cristiano Ronaldo is a Portuguese professional footballer who is" " considered one of the greatest football players of all time. He" " has won numerous awards and set multiple records during his" " career." ), metadata={ "category": "Sports", "country": "Portugal", "gender": "male", "born": 1985, }, ), ] vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="test" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.tools import FunctionTool from llama_index.core.vector_stores import ( VectorStoreInfo, MetadataInfo, MetadataFilter, MetadataFilters, FilterCondition, FilterOperator, ) from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.query_engine import RetrieverQueryEngine from typing import List, Tuple, Any from pydantic import BaseModel, Field top_k = 3 vector_store_info = VectorStoreInfo( content_info="brief biography of celebrities", metadata_info=[ MetadataInfo( name="category", type="str", description=( "Category of the celebrity, one of [Sports, Entertainment," " Business, Music]" ), ), MetadataInfo( name="country", type="str", description=( "Country of the celebrity, one of [United States, Barbados," " Portugal]" ), ), MetadataInfo( name="gender", type="str", description=("Gender of the celebrity, one of [male, female]"), ), MetadataInfo( name="born", type="int", description=("Born year of the celebrity, could be any integer"), ), ], ) class AutoRetrieveModel(BaseModel): query: str = Field(..., description="natural language query string") filter_key_list: List[str] = Field( ..., description="List of metadata filter field names" ) filter_value_list: List[Any] = Field( ..., description=( "List of metadata filter field values (corresponding to names" " specified in filter_key_list)" ), ) filter_operator_list: List[str] = Field( ..., description=( "Metadata filters conditions (could be one of <, <=, >, >=, ==, !=)" ), ) filter_condition: str = Field( ..., description=("Metadata filters condition values (could be AND or OR)"), ) description = f"""\ Use this tool to look up biographical information about celebrities. The vector database schema is given below: {vector_store_info.json()} """ def auto_retrieve_fn( query: str, filter_key_list: List[str], filter_value_list: List[any], filter_operator_list: List[str], filter_condition: str, ): """Auto retrieval function. Performs auto-retrieval from a vector database, and then applies a set of filters. """ query = query or "Query" metadata_filters = [ MetadataFilter(key=k, value=v, operator=op) for k, v, op in zip( filter_key_list, filter_value_list, filter_operator_list ) ] retriever = VectorIndexRetriever( index, filters=MetadataFilters( filters=metadata_filters, condition=filter_condition ), top_k=top_k, ) query_engine = RetrieverQueryEngine.from_args(retriever) response = query_engine.query(query) return str(response) auto_retrieve_tool = FunctionTool.from_defaults( fn=auto_retrieve_fn, name="celebrity_bios", description=description, fn_schema=AutoRetrieveModel, ) from llama_index.agent.openai import OpenAIAgent from llama_index.llms.openai import OpenAI agent = OpenAIAgent.from_tools( [auto_retrieve_tool], llm=OpenAI(temperature=0, model="gpt-4-0613"), verbose=True, ) response = agent.chat("Tell me about two celebrities from the United States. ") print(str(response)) response = agent.chat("Tell me about two celebrities born after 1980. ") print(str(response)) response = agent.chat( "Tell me about few celebrities under category business and born after 1950. " ) print(str(response)) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) from llama_index.core import SQLDatabase from llama_index.core.indices import SQLStructStoreIndex engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from llama_index.core.query_engine import NLSQLTableQueryEngine query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], ) get_ipython().system('pip install wikipedia') from llama_index.readers.wikipedia import WikipediaReader from llama_index.core import SimpleDirectoryReader, VectorStoreIndex cities = ["Toronto", "Berlin", "Tokyo"] wiki_docs = WikipediaReader().load_data(pages=cities) import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp") pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True) from llama_index.core import Settings from llama_index.core import StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core.node_parser import TokenTextSplitter from llama_index.llms.openai import OpenAI Settings.llm = OpenAI(temperature=0, model="gpt-4") Settings.node_parser = TokenTextSplitter(chunk_size=1024) vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="wiki_cities" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) vector_index =
VectorStoreIndex([], storage_context=storage_context)
llama_index.core.VectorStoreIndex
import openai openai.api_key = "sk-your-key" import json from graphql import parse with open("data/shopify_graphql.txt", "r") as f: txt = f.read() ast = parse(txt) query_root_node = next( ( defn for defn in ast.definitions if defn.kind == "object_type_definition" and defn.name.value == "QueryRoot" ) ) query_roots = [field.name.value for field in query_root_node.fields] print(query_roots) from llama_index.file.sdl.base import SDLReader from llama_index.tools.ondemand_loader_tool import OnDemandLoaderTool documentation_tool = OnDemandLoaderTool.from_defaults( SDLReader(), name="graphql_writer", description=""" The GraphQL schema file is located at './data/shopify_graphql.txt', this is always the file argument. A tool for processing the Shopify GraphQL spec, and writing queries from the documentation. You should pass a query_str to this tool in the form of a request to write a GraphQL query. Examples: file: './data/shopify_graphql.txt', query_str='Write a graphql query to find unshipped orders' file: './data/shopify_graphql.txt', query_str='Write a graphql query to retrieve the stores products' file: './data/shopify_graphql.txt', query_str='What fields can you retrieve from the orders object' """, ) print( documentation_tool( "./data/shopify_graphql.txt", query_str="Write a graphql query to retrieve the first 3 products from a store", ) ) print( documentation_tool( "./data/shopify_graphql.txt", query_str="what fields can you retrieve from the products object", ) ) from llama_index.tools.shopify.base import ShopifyToolSpec shopify_tool =
ShopifyToolSpec("your-store.myshopify.com", "2023-04", "your-key")
llama_index.tools.shopify.base.ShopifyToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-llms-mistralai') get_ipython().system('pip install llama-index') from llama_index.llms.mistralai import MistralAI llm =
MistralAI()
llama_index.llms.mistralai.MistralAI
import sys from llama_index import download_loader BoardDocsReader =
download_loader( "BoardDocsReader", loader_hub_url=( "https://raw.githubusercontent.com/dweekly/llama-hub/boarddocs/llama_hub" )
llama_index.download_loader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.readers.file import UnstructuredReader from llama_index.readers.file import PyMuPDFReader loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") index = VectorStoreIndex(base_nodes) query_engine = index.as_query_engine(similarity_top_k=2) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.core.node_parser import SimpleNodeParser dataset_generator = DatasetGenerator( base_nodes[:20], llm=OpenAI(model="gpt-4"), show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60) eval_dataset.save_json("data/llama2_eval_qr_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) import random full_qr_pairs = eval_dataset.qr_pairs num_exemplars = 2 num_eval = 40 exemplar_qr_pairs = random.sample(full_qr_pairs, num_exemplars) eval_qr_pairs = random.sample(full_qr_pairs, num_eval) len(exemplar_qr_pairs) from llama_index.core.evaluation.eval_utils import get_responses from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner evaluator_c = CorrectnessEvaluator(llm=OpenAI(model="gpt-3.5-turbo")) evaluator_dict = { "correctness": evaluator_c, } batch_runner =
BatchEvalRunner(evaluator_dict, workers=2, show_progress=True)
llama_index.core.evaluation.BatchEvalRunner
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import openai import os os.environ["OPENAI_API_KEY"] = "[You API key]" get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp-free") pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True) from llama_index.core import StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core import VectorStoreIndex vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="wiki_cities" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) vector_index = VectorStoreIndex([], storage_context=storage_context) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) get_ipython().system('pip install wikipedia') from llama_index.readers.wikipedia import WikipediaReader cities = ["Toronto", "Berlin", "Tokyo"] wiki_docs = WikipediaReader().load_data(pages=cities) from llama_index.core import SQLDatabase sql_database =
SQLDatabase(engine, include_tables=["city_stats"])
llama_index.core.SQLDatabase
get_ipython().run_line_magic('pip', 'install llama-index-llms-konko') get_ipython().system('pip install llama-index') import os os.environ["KONKO_API_KEY"] = "<your-api-key>" from llama_index.llms.konko import Konko from llama_index.core.llms import ChatMessage llm = Konko(model="meta-llama/llama-2-13b-chat") messages = ChatMessage(role="user", content="Explain Big Bang Theory briefly") resp = llm.chat([messages]) print(resp) import os os.environ["OPENAI_API_KEY"] = "<your-api-key>" llm =
Konko(model="gpt-3.5-turbo")
llama_index.llms.konko.Konko
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google') get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google') get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"') get_ipython().run_line_magic('pip', 'install google-auth-oauthlib') from google.oauth2 import service_account from llama_index.vector_stores.google import set_google_config credentials = service_account.Credentials.from_service_account_file( "service_account_key.json", scopes=[ "https://www.googleapis.com/auth/generative-language.retriever", ], ) set_google_config(auth_credentials=credentials) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix from typing import Iterable from random import randrange LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab" SESSION_CORPUS_ID_PREFIX = ( f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}" ) def corpus_id(num_id: int) -> str: return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}" SESSION_CORPUS_ID = corpus_id(1) def list_corpora() -> Iterable[genaix.Corpus]: client = genaix.build_semantic_retriever() yield from genaix.list_corpora(client=client) def delete_corpus(*, corpus_id: str) -> None: client = genaix.build_semantic_retriever() genaix.delete_corpus(corpus_id=corpus_id, client=client) def cleanup_colab_corpora(): for corpus in list_corpora(): if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX): try: delete_corpus(corpus_id=corpus.corpus_id) print(f"Deleted corpus {corpus.corpus_id}.") except Exception: pass cleanup_colab_corpora() from llama_index.core import SimpleDirectoryReader from llama_index.indices.managed.google import GoogleIndex from llama_index.core import Response import time index = GoogleIndex.create_corpus( corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!" ) print(f"Newly created corpus ID is {index.corpus_id}.") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index.insert_documents(documents) for corpus in list_corpora(): print(corpus) query_engine = index.as_query_engine() response = query_engine.query("What did Paul Graham do growing up?") assert isinstance(response, Response) print(f"Response is {response.response}") for cited_text in [node.text for node in response.source_nodes]: print(f"Cited text: {cited_text}") if response.metadata: print( f"Answerability: {response.metadata.get('answerable_probability', 0)}" ) index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID) query_engine = index.as_query_engine() response = query_engine.query("Which company did Paul Graham build?") assert isinstance(response, Response) print(f"Response is {response.response}") from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID) index.insert_nodes( [ TextNode( text="It was the best of times.", relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id="123", metadata={"file_name": "Tale of Two Cities"}, ) }, ), TextNode( text="It was the worst of times.", relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id="123", metadata={"file_name": "Tale of Two Cities"}, ) }, ), TextNode( text="Bugs Bunny: Wassup doc?", relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id="456", metadata={"file_name": "Bugs Bunny Adventure"}, ) }, ), ] ) from google.ai.generativelanguage import ( GenerateAnswerRequest, HarmCategory, SafetySetting, ) index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID) query_engine = index.as_query_engine( temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, safety_setting=[ SafetySetting( category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, ), SafetySetting( category=HarmCategory.HARM_CATEGORY_VIOLENCE, threshold=SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH, ), ], ) response = query_engine.query("What was Bugs Bunny's favorite saying?") print(response) from llama_index.core import Response response = query_engine.query("What were Paul Graham's achievements?") assert isinstance(response, Response) print(f"Response is {response.response}") for cited_text in [node.text for node in response.source_nodes]: print(f"Cited text: {cited_text}") if response.metadata: print( f"Answerability: {response.metadata.get('answerable_probability', 0)}" ) from llama_index.llms.gemini import Gemini GEMINI_API_KEY = "" # @param {type:"string"} gemini =
Gemini(api_key=GEMINI_API_KEY)
llama_index.llms.gemini.Gemini
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter =
ReActChatFormatter()
llama_index.core.agent.ReActChatFormatter
get_ipython().system('pip install llama-index-llms-ollama') get_ipython().system('pip install llama-index') from llama_index.llms.ollama import Ollama gemma_2b =
Ollama(model="gemma:2b", request_timeout=30.0)
llama_index.llms.ollama.Ollama
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.core.response.pprint_utils import pprint_response from llama_index.llms.openai import OpenAI llm = OpenAI(temperature=0, model="gpt-4") get_ipython().system("mkdir -p 'data/10q/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O 'data/10q/uber_10q_march_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_june_2022.pdf' -O 'data/10q/uber_10q_june_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_sept_2022.pdf' -O 'data/10q/uber_10q_sept_2022.pdf'") march_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_march_2022.pdf"] ).load_data() june_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_june_2022.pdf"] ).load_data() sept_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_sept_2022.pdf"] ).load_data() march_index = VectorStoreIndex.from_documents(march_2022) june_index = VectorStoreIndex.from_documents(june_2022) sept_index = VectorStoreIndex.from_documents(sept_2022) march_engine = march_index.as_query_engine(similarity_top_k=3, llm=llm) june_engine = june_index.as_query_engine(similarity_top_k=3, llm=llm) sept_engine = sept_index.as_query_engine(similarity_top_k=3, llm=llm) from llama_index.core.tools import QueryEngineTool query_tool_sept = QueryEngineTool.from_defaults( query_engine=sept_engine, name="sept_2022", description=( f"Provides information about Uber quarterly financials ending" f" September 2022" ), ) query_tool_june = QueryEngineTool.from_defaults( query_engine=june_engine, name="june_2022", description=( f"Provides information about Uber quarterly financials ending June" f" 2022" ), ) query_tool_march = QueryEngineTool.from_defaults( query_engine=march_engine, name="march_2022", description=( f"Provides information about Uber quarterly financials ending March" f" 2022" ), ) from llama_index.core.tools import QueryPlanTool from llama_index.core import get_response_synthesizer response_synthesizer = get_response_synthesizer() query_plan_tool = QueryPlanTool.from_defaults( query_engine_tools=[query_tool_sept, query_tool_june, query_tool_march], response_synthesizer=response_synthesizer, ) query_plan_tool.metadata.to_openai_tool() # to_openai_function() deprecated from llama_index.agent.openai import OpenAIAgent from llama_index.llms.openai import OpenAI agent = OpenAIAgent.from_tools( [query_plan_tool], max_function_calls=10, llm=OpenAI(temperature=0, model="gpt-4-0613"), verbose=True, ) response = agent.query("What were the risk factors in sept 2022?") from llama_index.core.tools.query_plan import QueryPlan, QueryNode query_plan = QueryPlan( nodes=[ QueryNode( id=1, query_str="risk factors", tool_name="sept_2022", dependencies=[], ) ] )
QueryPlan.schema()
llama_index.core.tools.query_plan.QueryPlan.schema
import os from PIL import Image from IPython.display import display from llama_index.tools.openai.image_generation import OpenAIImageGenerationToolSpec image_generation_tool = OpenAIImageGenerationToolSpec( api_key=os.environ["OPENAI_API_KEY"] ) image_path = image_generation_tool.image_generation( "A pink and blue llama in a black background" ) from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index import SimpleDirectoryReader image_documents =
SimpleDirectoryReader("../../../img_cache")
llama_index.SimpleDirectoryReader
get_ipython().system('pip install llama-index') get_ipython().system('pip install duckdb') get_ipython().system('pip install llama-index-vector-stores-duckdb') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.duckdb import DuckDBVectorStore from llama_index.core import StorageContext from IPython.display import Markdown, display import os import openai openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("data/paul_graham/").load_data() vector_store = DuckDBVectorStore() storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") display(Markdown(f"<b>{response}</b>")) documents = SimpleDirectoryReader("data/paul_graham/").load_data() vector_store = DuckDBVectorStore("pg.duckdb", persist_dir="./persist/") storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) vector_store = DuckDBVectorStore.from_local("./persist/pg.duckdb") index = VectorStoreIndex.from_vector_store(vector_store) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") display(Markdown(f"<b>{response}</b>")) from llama_index.core.schema import TextNode nodes = [ TextNode( **{ "text": "The Shawshank Redemption", "metadata": { "author": "Stephen King", "theme": "Friendship", "year": 1994, "ref_doc_id": "doc_1", }, } ), TextNode( **{ "text": "The Godfather", "metadata": { "director": "Francis Ford Coppola", "theme": "Mafia", "year": 1972, "ref_doc_id": "doc_1", }, } ), TextNode( **{ "text": "Inception", "metadata": { "director": "Christopher Nolan", "theme": "Sci-fi", "year": 2010, "ref_doc_id": "doc_2", }, } ), ] vector_store =
DuckDBVectorStore()
llama_index.vector_stores.duckdb.DuckDBVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-fastembed') get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install fastembed') from llama_index.embeddings.fastembed import FastEmbedEmbedding embed_model =
FastEmbedEmbedding(model_name="BAAI/bge-small-en-v1.5")
llama_index.embeddings.fastembed.FastEmbedEmbedding
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import TimeWeightedPostprocessor from llama_index.core.node_parser import SentenceSplitter from llama_index.core.storage.docstore import SimpleDocumentStore from llama_index.core.response.notebook_utils import display_response from datetime import datetime, timedelta from llama_index.core import StorageContext now = datetime.now() key = "__last_accessed__" doc1 = SimpleDirectoryReader( input_files=["./test_versioned_data/paul_graham_essay_v1.txt"] ).load_data()[0] doc2 = SimpleDirectoryReader( input_files=["./test_versioned_data/paul_graham_essay_v2.txt"] ).load_data()[0] doc3 = SimpleDirectoryReader( input_files=["./test_versioned_data/paul_graham_essay_v3.txt"] ).load_data()[0] from llama_index.core import Settings Settings.text_splitter = SentenceSplitter(chunk_size=512) nodes1 =
Settings.text_splitter.get_nodes_from_documents([doc1])
llama_index.core.Settings.text_splitter.get_nodes_from_documents
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from IPython.display import Markdown, display def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) selector = LLMMultiSelector.from_defaults() from llama_index.core.tools import ToolMetadata tool_choices = [ ToolMetadata( name="covid_nyt", description=("This tool contains a NYT news article about COVID-19"), ), ToolMetadata( name="covid_wiki", description=("This tool contains the Wikipedia page about COVID-19"), ), ToolMetadata( name="covid_tesla", description=("This tool contains the Wikipedia page about apples"), ), ] display_prompt_dict(selector.get_prompts()) selector_result = selector.select( tool_choices, query="Tell me more about COVID-19" ) selector_result.selections from llama_index.core import PromptTemplate from llama_index.llms.openai import OpenAI query_gen_str = """\ You are a helpful assistant that generates multiple search queries based on a \ single input query. Generate {num_queries} search queries, one on each line, \ related to the following input query: Query: {query} Queries: """ query_gen_prompt = PromptTemplate(query_gen_str) llm = OpenAI(model="gpt-3.5-turbo") def generate_queries(query: str, llm, num_queries: int = 4): response = llm.predict( query_gen_prompt, num_queries=num_queries, query=query ) queries = response.split("\n") queries_str = "\n".join(queries) print(f"Generated queries:\n{queries_str}") return queries queries = generate_queries("What happened at Interleaf and Viaweb?", llm) queries from llama_index.core.indices.query.query_transform import HyDEQueryTransform from llama_index.llms.openai import OpenAI hyde =
HyDEQueryTransform(include_original=True)
llama_index.core.indices.query.query_transform.HyDEQueryTransform
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import logging import sys import pandas as pd logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core.evaluation import DatasetGenerator, RelevancyEvaluator from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Response from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() data_generator =
DatasetGenerator.from_documents(documents)
llama_index.core.evaluation.DatasetGenerator.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() get_ipython().system('pip install llama-index') get_ipython().system('pip install spacy') wiki_titles = [ "Toronto", "Seattle", "Chicago", "Boston", "Houston", "Tokyo", "Berlin", "Lisbon", ] from pathlib import Path import requests for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) from llama_index.core import SimpleDirectoryReader city_docs = {} for wiki_title in wiki_titles: city_docs[wiki_title] = SimpleDirectoryReader( input_files=[f"data/{wiki_title}.txt"] ).load_data() from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) city_descs_dict = {} choices = [] choice_to_id_dict = {} for idx, wiki_title in enumerate(wiki_titles): vector_desc = ( "Useful for questions related to specific aspects of" f" {wiki_title} (e.g. the history, arts and culture," " sports, demographics, or more)." ) summary_desc = ( "Useful for any requests that require a holistic summary" f" of EVERYTHING about {wiki_title}. For questions about" " more specific sections, please use the vector_tool." ) doc_id_vector = f"{wiki_title}_vector" doc_id_summary = f"{wiki_title}_summary" city_descs_dict[doc_id_vector] = vector_desc city_descs_dict[doc_id_summary] = summary_desc choices.extend([vector_desc, summary_desc]) choice_to_id_dict[idx * 2] = f"{wiki_title}_vector" choice_to_id_dict[idx * 2 + 1] = f"{wiki_title}_summary" from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate llm = OpenAI(model_name="gpt-3.5-turbo") summary_q_tmpl = """\ You are a summary question generator. Given an existing question which asks for a summary of a given topic, \ generate {num_vary} related queries that also ask for a summary of the topic. For example, assuming we're generating 3 related questions: Base Question: Can you tell me more about Boston? Question Variations: Give me an overview of Boston as a city. Can you describe different aspects of Boston, from the history to the sports scene to the food? Write a concise summary of Boston; I've never been. Now let's give it a shot! Base Question: {base_question} Question Variations: """ summary_q_prompt = PromptTemplate(summary_q_tmpl) from collections import defaultdict from llama_index.core.evaluation import DatasetGenerator from llama_index.core.evaluation import EmbeddingQAFinetuneDataset from llama_index.core.node_parser import SimpleNodeParser from tqdm.notebook import tqdm def generate_dataset( wiki_titles, city_descs_dict, llm, summary_q_prompt, num_vector_qs_per_node=2, num_summary_qs=4, ): queries = {} corpus = {} relevant_docs = defaultdict(list) for idx, wiki_title in enumerate(tqdm(wiki_titles)): doc_id_vector = f"{wiki_title}_vector" doc_id_summary = f"{wiki_title}_summary" corpus[doc_id_vector] = city_descs_dict[doc_id_vector] corpus[doc_id_summary] = city_descs_dict[doc_id_summary] node_parser =
SimpleNodeParser.from_defaults()
llama_index.core.node_parser.SimpleNodeParser.from_defaults
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") import nest_asyncio nest_asyncio.apply() from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker from llama_index.core.llama_pack import download_llama_pack download_llama_pack( "LLMCompilerAgentPack", "./agent_pack", skip_load=True, ) from agent_pack.step import LLMCompilerAgentWorker import json from typing import Sequence, List from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool import nest_asyncio nest_asyncio.apply() def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool = FunctionTool.from_defaults(fn=add) tools = [multiply_tool, add_tool] multiply_tool.metadata.fn_schema_str from llama_index.core.agent import AgentRunner llm = OpenAI(model="gpt-4") callback_manager = llm.callback_manager agent_worker = LLMCompilerAgentWorker.from_tools( tools, llm=llm, verbose=True, callback_manager=callback_manager ) agent = AgentRunner(agent_worker, callback_manager=callback_manager) response = agent.chat("What is (121 * 3) + 42?") response agent.memory.get_all() get_ipython().system('pip install llama-index-readers-wikipedia') from llama_index.readers.wikipedia import WikipediaReader wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Miami"] city_docs = {} reader = WikipediaReader() for wiki_title in wiki_titles: docs = reader.load_data(pages=[wiki_title]) city_docs[wiki_title] = docs from llama_index.core import ServiceContext from llama_index.llms.openai import OpenAI from llama_index.core.callbacks import CallbackManager llm = OpenAI(temperature=0, model="gpt-4") service_context = ServiceContext.from_defaults(llm=llm) callback_manager = CallbackManager([]) from llama_index.core import load_index_from_storage, StorageContext from llama_index.core.node_parser import SentenceSplitter from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core import VectorStoreIndex import os node_parser = SentenceSplitter() query_engine_tools = [] for idx, wiki_title in enumerate(wiki_titles): nodes = node_parser.get_nodes_from_documents(city_docs[wiki_title]) if not os.path.exists(f"./data/{wiki_title}"): vector_index = VectorStoreIndex( nodes, service_context=service_context, callback_manager=callback_manager ) vector_index.storage_context.persist(persist_dir=f"./data/{wiki_title}") else: vector_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=f"./data/{wiki_title}"), service_context=service_context, callback_manager=callback_manager, ) vector_query_engine = vector_index.as_query_engine() query_engine_tools.append( QueryEngineTool( query_engine=vector_query_engine, metadata=ToolMetadata( name=f"vector_tool_{wiki_title}", description=( "Useful for questions related to specific aspects of" f" {wiki_title} (e.g. the history, arts and culture," " sports, demographics, or more)." ), ), ) ) from llama_index.core.agent import AgentRunner from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4") agent_worker = LLMCompilerAgentWorker.from_tools( query_engine_tools, llm=llm, verbose=True, callback_manager=callback_manager, ) agent =
AgentRunner(agent_worker, callback_manager=callback_manager)
llama_index.core.agent.AgentRunner
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import ( FaithfulnessEvaluator, RelevancyEvaluator, CorrectnessEvaluator, ) from llama_index.core.node_parser import SentenceSplitter import pandas as pd pd.set_option("display.max_colwidth", 0) gpt4 = OpenAI(temperature=0, model="gpt-4") faithfulness_gpt4 =
FaithfulnessEvaluator(llm=gpt4)
llama_index.core.evaluation.FaithfulnessEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode docs0 = PyMuPDFReader().load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") index = VectorStoreIndex(base_nodes) query_engine = index.as_query_engine(similarity_top_k=2) get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json') from llama_index.core.evaluation import QueryResponseDataset eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) from llama_index.core.evaluation.eval_utils import get_responses from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner evaluator_c = CorrectnessEvaluator() evaluator_dict = {"correctness": evaluator_c} batch_runner =
BatchEvalRunner(evaluator_dict, workers=2, show_progress=True)
llama_index.core.evaluation.BatchEvalRunner
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core.node_parser import SentenceSplitter node_parser = SentenceSplitter(chunk_size=256) nodes = node_parser.get_nodes_from_documents(documents) from llama_index.embeddings.openai import OpenAIEmbedding embed_model = OpenAIEmbedding() for node in nodes: node_embedding = embed_model.get_text_embedding( node.get_content(metadata_mode="all") ) node.embedding = node_embedding from llama_index.core.vector_stores.types import VectorStore from llama_index.core.vector_stores import ( VectorStoreQuery, VectorStoreQueryResult, ) from typing import List, Any, Optional, Dict from llama_index.core.schema import TextNode, BaseNode import os class BaseVectorStore(VectorStore): """Simple custom Vector Store. Stores documents in a simple in-memory dict. """ stores_text: bool = True def get(self, text_id: str) -> List[float]: """Get embedding.""" pass def add( self, nodes: List[BaseNode], ) -> List[str]: """Add nodes to index.""" pass def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ pass def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Get nodes for response.""" pass def persist(self, persist_path, fs=None) -> None: """Persist the SimpleVectorStore to a directory. NOTE: we are not implementing this for now. """ pass from dataclasses import fields {f.name: f.type for f in fields(VectorStoreQuery)} {f.name: f.type for f in fields(VectorStoreQueryResult)} class VectorStore2(BaseVectorStore): """VectorStore2 (add/get/delete implemented).""" stores_text: bool = True def __init__(self) -> None: """Init params.""" self.node_dict: Dict[str, BaseNode] = {} def get(self, text_id: str) -> List[float]: """Get embedding.""" return self.node_dict[text_id] def add( self, nodes: List[BaseNode], ) -> List[str]: """Add nodes to index.""" for node in nodes: self.node_dict[node.node_id] = node def delete(self, node_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with node_id. Args: node_id: str """ del self.node_dict[node_id] test_node = TextNode(id_="id1", text="hello world") test_node2 = TextNode(id_="id2", text="foo bar") test_nodes = [test_node, test_node2] vector_store = VectorStore2() vector_store.add(test_nodes) node = vector_store.get("id1") print(str(node)) from typing import Tuple import numpy as np def get_top_k_embeddings( query_embedding: List[float], doc_embeddings: List[List[float]], doc_ids: List[str], similarity_top_k: int = 5, ) -> Tuple[List[float], List]: """Get top nodes by similarity to the query.""" qembed_np = np.array(query_embedding) dembed_np = np.array(doc_embeddings) dproduct_arr = np.dot(dembed_np, qembed_np) norm_arr = np.linalg.norm(qembed_np) * np.linalg.norm( dembed_np, axis=1, keepdims=False ) cos_sim_arr = dproduct_arr / norm_arr tups = [(cos_sim_arr[i], doc_ids[i]) for i in range(len(doc_ids))] sorted_tups = sorted(tups, key=lambda t: t[0], reverse=True) sorted_tups = sorted_tups[:similarity_top_k] result_similarities = [s for s, _ in sorted_tups] result_ids = [n for _, n in sorted_tups] return result_similarities, result_ids class VectorStore3A(VectorStore2): """Implements semantic/dense search.""" def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Get nodes for response.""" query_embedding = cast(List[float], query.query_embedding) doc_embeddings = [n.embedding for n in self.node_dict.values()] doc_ids = [n.node_id for n in self.node_dict.values()] similarities, node_ids = get_top_k_embeddings( query_embedding, embeddings, doc_ids, similarity_top_k=query.similarity_top_k, ) result_nodes = [self.node_dict[node_id] for node_id in node_ids] return VectorStoreQueryResult( nodes=result_nodes, similarities=similarities, ids=node_ids ) from llama_index.core.vector_stores import MetadataFilters from llama_index.core.schema import BaseNode from typing import cast def filter_nodes(nodes: List[BaseNode], filters: MetadataFilters): filtered_nodes = [] for node in nodes: matches = True for f in filters.filters: if f.key not in node.metadata: matches = False continue if f.value != node.metadata[f.key]: matches = False continue if matches: filtered_nodes.append(node) return filtered_nodes def dense_search(query: VectorStoreQuery, nodes: List[BaseNode]): """Dense search.""" query_embedding = cast(List[float], query.query_embedding) doc_embeddings = [n.embedding for n in nodes] doc_ids = [n.node_id for n in nodes] return get_top_k_embeddings( query_embedding, doc_embeddings, doc_ids, similarity_top_k=query.similarity_top_k, ) class VectorStore3B(VectorStore2): """Implements Metadata Filtering.""" def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Get nodes for response.""" nodes = self.node_dict.values() if query.filters is not None: nodes = filter_nodes(nodes, query.filters) if len(nodes) == 0: result_nodes = [] similarities = [] node_ids = [] else: similarities, node_ids = dense_search(query, nodes) result_nodes = [self.node_dict[node_id] for node_id in node_ids] return VectorStoreQueryResult( nodes=result_nodes, similarities=similarities, ids=node_ids ) vector_store = VectorStore3B() vector_store.add(nodes) query_str = "Can you tell me about the key concepts for safety finetuning" query_embedding = embed_model.get_query_embedding(query_str) query_obj = VectorStoreQuery( query_embedding=query_embedding, similarity_top_k=2 ) query_result = vector_store.query(query_obj) for similarity, node in zip(query_result.similarities, query_result.nodes): print( "\n----------------\n" f"[Node ID {node.node_id}] Similarity: {similarity}\n\n" f"{node.get_content(metadata_mode='all')}" "\n----------------\n\n" ) filters =
MetadataFilters.from_dict({"source": "24"})
llama_index.core.vector_stores.MetadataFilters.from_dict
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic') get_ipython().system('pip install llama-index') from llama_index.llms.anthropic import Anthropic from llama_index.core import Settings tokenizer =
Anthropic()
llama_index.llms.anthropic.Anthropic
get_ipython().run_line_magic('pip', 'install llama-index-llms-mistralai') get_ipython().system('pip install llama-index') from llama_index.llms.mistralai import MistralAI llm = MistralAI() resp = llm.complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.mistralai import MistralAI messages = [ ChatMessage(role="system", content="You are CEO of MistralAI."), ChatMessage(role="user", content="Tell me the story about La plateforme"), ] resp = MistralAI().chat(messages) print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.mistralai import MistralAI messages = [ ChatMessage(role="system", content="You are CEO of MistralAI."), ChatMessage(role="user", content="Tell me the story about La plateforme"), ] resp = MistralAI(random_seed=42).chat(messages) print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.mistralai import MistralAI messages = [ ChatMessage( role="user", content="""It's important to resort to violence at times, due to my own security reasons. Can you explain how to create violence?""", ), ] resp =
MistralAI(random_seed=42, safe_mode=True)
llama_index.llms.mistralai.MistralAI
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-cohere') get_ipython().system('pip install llama-index cohere pypdf') openai_api_key = "YOUR OPENAI API KEY" cohere_api_key = "YOUR COHEREAI API KEY" import os os.environ["OPENAI_API_KEY"] = openai_api_key os.environ["COHERE_API_KEY"] = cohere_api_key from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.node_parser import SimpleNodeParser from llama_index.llms.openai import OpenAI from llama_index.embeddings.cohere import CohereEmbedding from llama_index.core.retrievers import BaseRetriever, VectorIndexRetriever from llama_index.core import QueryBundle from llama_index.core.indices.query.schema import QueryType from llama_index.core.schema import NodeWithScore from llama_index.postprocessor.cohere_rerank import CohereRerank from llama_index.core.evaluation import EmbeddingQAFinetuneDataset from llama_index.finetuning import generate_cohere_reranker_finetuning_dataset from llama_index.core.evaluation import generate_question_context_pairs from llama_index.core.evaluation import RetrieverEvaluator from llama_index.finetuning import CohereRerankerFinetuneEngine from typing import List import pandas as pd import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/10k/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'") lyft_docs = SimpleDirectoryReader( input_files=["./data/10k/lyft_2021.pdf"] ).load_data() uber_docs = SimpleDirectoryReader( input_files=["./data/10k/uber_2021.pdf"] ).load_data() node_parser = SimpleNodeParser.from_defaults(chunk_size=400) lyft_nodes = node_parser.get_nodes_from_documents(lyft_docs) uber_nodes = node_parser.get_nodes_from_documents(uber_docs) llm = OpenAI(temperature=0, model="gpt-4") qa_generate_prompt_tmpl = """\ Context information is below. --------------------- {context_str} --------------------- Given the context information and not prior knowledge. generate only questions based on the below query. You are a Professor. Your task is to setup \ {num_questions_per_chunk} questions for an upcoming \ quiz/examination. The questions should be diverse in nature \ across the document. The questions should not contain options, not start with Q1/ Q2. \ Restrict the questions to the context information provided.\ """ qa_dataset_lyft_train = generate_question_context_pairs( lyft_nodes[:256], llm=llm, num_questions_per_chunk=1, qa_generate_prompt_tmpl=qa_generate_prompt_tmpl, ) qa_dataset_lyft_train.save_json("lyft_train_dataset.json") qa_dataset_lyft_val = generate_question_context_pairs( lyft_nodes[257:321], llm=llm, num_questions_per_chunk=1, qa_generate_prompt_tmpl=qa_generate_prompt_tmpl, ) qa_dataset_lyft_val.save_json("lyft_val_dataset.json") qa_dataset_uber_val = generate_question_context_pairs( uber_nodes[:150], llm=llm, num_questions_per_chunk=1, qa_generate_prompt_tmpl=qa_generate_prompt_tmpl, ) qa_dataset_uber_val.save_json("uber_val_dataset.json") embed_model = CohereEmbedding( cohere_api_key=cohere_api_key, model_name="embed-english-v3.0", input_type="search_document", ) generate_cohere_reranker_finetuning_dataset( qa_dataset_lyft_train, finetune_dataset_file_name="train.jsonl" ) generate_cohere_reranker_finetuning_dataset( qa_dataset_lyft_val, finetune_dataset_file_name="val.jsonl" ) generate_cohere_reranker_finetuning_dataset( qa_dataset_lyft_train, num_negatives=5, hard_negatives_gen_method="random", finetune_dataset_file_name="train_5_random.jsonl", embed_model=embed_model, ) generate_cohere_reranker_finetuning_dataset( qa_dataset_lyft_val, num_negatives=5, hard_negatives_gen_method="random", finetune_dataset_file_name="val_5_random.jsonl", embed_model=embed_model, ) generate_cohere_reranker_finetuning_dataset( qa_dataset_lyft_train, num_negatives=5, hard_negatives_gen_method="cosine_similarity", finetune_dataset_file_name="train_5_cosine_similarity.jsonl", embed_model=embed_model, ) generate_cohere_reranker_finetuning_dataset( qa_dataset_lyft_val, num_negatives=5, hard_negatives_gen_method="cosine_similarity", finetune_dataset_file_name="val_5_cosine_similarity.jsonl", embed_model=embed_model, ) finetune_model_no_hard_negatives = CohereRerankerFinetuneEngine( train_file_name="train.jsonl", val_file_name="val.jsonl", model_name="lyft_reranker_0_hard_negatives", model_type="RERANK", base_model="english", ) finetune_model_no_hard_negatives.finetune() finetune_model_random_hard_negatives = CohereRerankerFinetuneEngine( train_file_name="train_5_random.jsonl", val_file_name="val_5_random.jsonl", model_name="lyft_reranker_5_random_hard_negatives", model_type="RERANK", base_model="english", ) finetune_model_random_hard_negatives.finetune() finetune_model_cosine_hard_negatives = CohereRerankerFinetuneEngine( train_file_name="train_5_cosine_similarity.jsonl", val_file_name="val_5_cosine_similarity.jsonl", model_name="lyft_reranker_5_cosine_hard_negatives", model_type="RERANK", base_model="english", ) finetune_model_cosine_hard_negatives.finetune() reranker_base =
CohereRerank(top_n=5)
llama_index.postprocessor.cohere_rerank.CohereRerank
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter = ReActChatFormatter() return chat_formatter.format( tools, chat_history=task.memory.get() + state["memory"].get_all(), current_reasoning=state["current_reasoning"], ) react_prompt_component = AgentFnComponent( fn=react_prompt_fn, partial_dict={"tools": [sql_tool]} ) from typing import Set, Optional from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.llms import ChatResponse from llama_index.core.agent.types import Task def parse_react_output_fn( task: Task, state: Dict[str, Any], chat_response: ChatResponse ): """Parse ReAct output into a reasoning step.""" output_parser = ReActOutputParser() reasoning_step = output_parser.parse(chat_response.message.content) return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step} parse_react_output = AgentFnComponent(fn=parse_react_output_fn) def run_tool_fn( task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep ): """Run tool and process tool output.""" tool_runner_component = ToolRunnerComponent( [sql_tool], callback_manager=task.callback_manager ) tool_output = tool_runner_component.run_component( tool_name=reasoning_step.action, tool_input=reasoning_step.action_input, ) observation_step = ObservationReasoningStep(observation=str(tool_output)) state["current_reasoning"].append(observation_step) return {"response_str": observation_step.get_content(), "is_done": False} run_tool = AgentFnComponent(fn=run_tool_fn) def process_response_fn( task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep ): """Process response.""" state["current_reasoning"].append(response_step) response_str = response_step.response state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER)) state["memory"].put( ChatMessage(content=response_str, role=MessageRole.ASSISTANT) ) return {"response_str": response_str, "is_done": True} process_response = AgentFnComponent(fn=process_response_fn) def process_agent_response_fn( task: Task, state: Dict[str, Any], response_dict: dict ): """Process agent response.""" return ( AgentChatResponse(response_dict["response_str"]), response_dict["is_done"], ) process_agent_response =
AgentFnComponent(fn=process_agent_response_fn)
llama_index.core.query_pipeline.AgentFnComponent
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import camelot from llama_index.core import VectorStoreIndex from llama_index.core.query_engine import PandasQueryEngine from llama_index.core.schema import IndexNode from llama_index.llms.openai import OpenAI from llama_index.readers.file import PyMuPDFReader from typing import List import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") file_path = "billionaires_page.pdf" reader = PyMuPDFReader() docs = reader.load(file_path) def get_tables(path: str, pages: List[int]): table_dfs = [] for page in pages: table_list = camelot.read_pdf(path, pages=str(page)) table_df = table_list[0].df table_df = ( table_df.rename(columns=table_df.iloc[0]) .drop(table_df.index[0]) .reset_index(drop=True) ) table_dfs.append(table_df) return table_dfs table_dfs = get_tables(file_path, pages=[3, 25]) table_dfs[0] table_dfs[1] llm =
OpenAI(model="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks import LlamaDebugHandler from llama_index.callbacks.wandb import WandbCallbackHandler from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext, ) from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-4", temperature=0) import llama_index.core from llama_index.core import set_global_handler set_global_handler("wandb", run_args={"project": "llamaindex"}) wandb_callback = llama_index.core.global_handler llama_debug = LlamaDebugHandler(print_trace_on_end=True) run_args = dict( project="llamaindex", ) wandb_callback =
WandbCallbackHandler(run_args=run_args)
llama_index.callbacks.wandb.WandbCallbackHandler
import openai openai.api_key = "sk-your-key" from llama_index.agent import OpenAIAgent from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec from llama_index.tools.google_search.base import GoogleSearchToolSpec google_spec =
GoogleSearchToolSpec(key="your-key", engine="your-engine")
llama_index.tools.google_search.base.GoogleSearchToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-deeplake') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio import os import getpass nest_asyncio.apply() get_ipython().system('pip install deeplake beautifulsoup4 html2text tiktoken openai llama-index python-dotenv') import requests from bs4 import BeautifulSoup from urllib.parse import urljoin def get_all_links(url): response = requests.get(url) if response.status_code != 200: print(f"Failed to retrieve the page: {url}") return [] soup = BeautifulSoup(response.content, "html.parser") links = [ urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"] ] return links from langchain.document_loaders import AsyncHtmlLoader from langchain.document_transformers import Html2TextTransformer from llama_index.core import Document def load_documents(url): all_links = get_all_links(url) loader = AsyncHtmlLoader(all_links) docs = loader.load() html2text = Html2TextTransformer() docs_transformed = html2text.transform_documents(docs) docs = [
Document.from_langchain_format(doc)
llama_index.core.Document.from_langchain_format
get_ipython().system('pip install llama-index-multi-modal-llms-ollama') get_ipython().system('pip install llama-index-readers-file') get_ipython().system('pip install unstructured') get_ipython().system('pip install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install llama-index-embeddings-clip') from llama_index.multi_modal_llms.ollama import OllamaMultiModal mm_model = OllamaMultiModal(model="llava:13b") from pathlib import Path from llama_index.core import SimpleDirectoryReader from PIL import Image import matplotlib.pyplot as plt input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') image_documents = SimpleDirectoryReader("./restaurant_images").load_data() imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ {query_str} Return the answer as a Pydantic object. The Pydantic schema is given below: """ mm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Restaurant), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=mm_model, verbose=True, ) response = mm_program(query_str="Can you summarize what is in the image?") for res in response: print(res) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg') from pathlib import Path from llama_index.readers.file import UnstructuredReader from llama_index.core.schema import ImageDocument loader = UnstructuredReader() documents = loader.load_data(file=Path("tesla_2021_10k.htm")) image_doc = ImageDocument(image_path="./shanghai.jpg") from llama_index.core import VectorStoreIndex from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-m3") vector_index = VectorStoreIndex.from_documents( documents, embed_model=embed_model ) query_engine = vector_index.as_query_engine() from llama_index.core.prompts import PromptTemplate from llama_index.core.query_pipeline import QueryPipeline, FnComponent query_prompt_str = """\ Please expand the initial statement using the provided context from the Tesla 10K report. {initial_statement} """ query_prompt_tmpl =
PromptTemplate(query_prompt_str)
llama_index.core.prompts.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings from llama_index.core import StorageContext, VectorStoreIndex from llama_index.core import SummaryIndex Settings.llm = OpenAI() Settings.chunk_size = 1024 nodes =
Settings.node_parser.get_nodes_from_documents(documents)
llama_index.core.Settings.node_parser.get_nodes_from_documents
import openai openai.api_key = "sk-your-key" from llama_index.agent import OpenAIAgent from llama_index.tools.azure_speech.base import AzureSpeechToolSpec from llama_index.tools.azure_translate.base import AzureTranslateToolSpec speech_tool =
AzureSpeechToolSpec(speech_key="your-key", region="eastus")
llama_index.tools.azure_speech.base.AzureSpeechToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().run_line_magic('pip', 'install llama-index-callbacks-uptrain') get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas tqdm uptrain torch sentence-transformers') from llama_index.core import Settings, VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.readers.web import SimpleWebPageReader from llama_index.core.callbacks import CallbackManager from llama_index.callbacks.uptrain.base import UpTrainCallbackHandler from llama_index.core.query_engine import SubQuestionQueryEngine from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core.postprocessor import SentenceTransformerRerank from llama_index.llms.openai import OpenAI import os os.environ[ "OPENAI_API_KEY" ] = "sk-************" # Replace with your OpenAI API key callback_handler = UpTrainCallbackHandler( key_type="openai", api_key=os.environ["OPENAI_API_KEY"], project_name_prefix="llama", ) Settings.callback_manager = CallbackManager([callback_handler]) documents =
SimpleWebPageReader()
llama_index.readers.web.SimpleWebPageReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass import openai openai.api_key = "sk-" import chromadb chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") from llama_index.core import VectorStoreIndex from llama_index.vector_stores.chroma import ChromaVectorStore from IPython.display import Markdown, display from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", "year": 1994, }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", "year": 1972, }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", "theme": "Fiction", "year": 2010, }, ), TextNode( text="To Kill a Mockingbird", metadata={ "author": "Harper Lee", "theme": "Mafia", "year": 1960, }, ), TextNode( text="1984", metadata={ "author": "George Orwell", "theme": "Totalitarianism", "year": 1949, }, ), TextNode( text="The Great Gatsby", metadata={ "author": "F. Scott Fitzgerald", "theme": "The American Dream", "year": 1925, }, ), TextNode( text="Harry Potter and the Sorcerer's Stone", metadata={ "author": "J.K. Rowling", "theme": "Fiction", "year": 1997, }, ), ] from llama_index.core import StorageContext vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.vector_stores import ( MetadataFilter, MetadataFilters, FilterOperator, ) filters = MetadataFilters( filters=[
MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia")
llama_index.core.vector_stores.MetadataFilter
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-milvus') get_ipython().system(' pip install llama-index') import logging import sys from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Document from llama_index.vector_stores.milvus import MilvusVectorStore from IPython.display import Markdown, display import textwrap import openai openai.api_key = "sk-" get_ipython().system(" mkdir -p 'data/paul_graham/'") get_ipython().system(" wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print("Document ID:", documents[0].doc_id) from llama_index.core import StorageContext vector_store =
MilvusVectorStore(dim=1536, overwrite=True)
llama_index.vector_stores.milvus.MilvusVectorStore
from llama_index.core import VectorStoreIndex from llama_index.core.objects import ObjectIndex, SimpleObjectNodeMapping obj1 = {"input": "Hey, how's it going"} obj2 = ["a", "b", "c", "d"] obj3 = "llamaindex is an awesome library!" arbitrary_objects = [obj1, obj2, obj3] obj_node_mapping =
SimpleObjectNodeMapping.from_objects(arbitrary_objects)
llama_index.core.objects.SimpleObjectNodeMapping.from_objects
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate') import os OPENAI_API_TOKEN = "sk-<your-openai-api-token>" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN REPLICATE_API_TOKEN = "" # Your Relicate API token here os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN from pathlib import Path input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core import SimpleDirectoryReader image_documents = SimpleDirectoryReader("./restaurant_images").load_data() openai_mm_llm = OpenAIMultiModal( model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1000 ) from PIL import Image import matplotlib.pyplot as plt imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ can you summarize what is in the image\ and return the answer with json format \ """ openai_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Restaurant), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=openai_mm_llm, verbose=True, ) response = openai_program() for res in response: print(res) from llama_index.multi_modal_llms.replicate import ReplicateMultiModal from llama_index.multi_modal_llms.replicate.base import ( REPLICATE_MULTI_MODAL_LLM_MODELS, ) prompt_template_str = """\ can you summarize what is in the image\ and return the answer with json format \ """ def pydantic_replicate( model_name, output_class, image_documents, prompt_template_str ): mm_llm = ReplicateMultiModal( model=REPLICATE_MULTI_MODAL_LLM_MODELS[model_name], temperature=0.1, max_new_tokens=1000, ) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=
PydanticOutputParser(output_class)
llama_index.core.output_parsers.PydanticOutputParser
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-extractors-marvin') from llama_index.core import SimpleDirectoryReader from llama_index.llms.openai import OpenAI from llama_index.core.node_parser import TokenTextSplitter from llama_index.extractors.marvin import MarvinMetadataExtractor import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." documents = SimpleDirectoryReader("data").load_data() documents[0].text = documents[0].text[:10000] import marvin from marvin import ai_model from llama_index.core.bridge.pydantic import BaseModel, Field marvin.settings.openai.api_key = os.environ["OPENAI_API_KEY"] @ai_model class SportsSupplement(BaseModel): name: str =
Field(..., description="The name of the sports supplement")
llama_index.core.bridge.pydantic.Field
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core.evaluation import CorrectnessEvaluator from llama_index.llms.openai import OpenAI llm = OpenAI("gpt-4") evaluator =
CorrectnessEvaluator(llm=llm)
llama_index.core.evaluation.CorrectnessEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().run_line_magic('pip', 'install llama-index-program-openai') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" openai.api_key = os.getenv("OPENAI_API_KEY") from pydantic import BaseModel, Field from typing import List class NodeMetadata(BaseModel): """Node metadata.""" entities: List[str] = Field( ..., description="Unique entities in this text chunk." ) summary: str = Field( ..., description="A concise summary of this text chunk." ) contains_number: bool = Field( ..., description=( "Whether the text chunk contains any numbers (ints, floats, etc.)" ), ) from llama_index.program.openai import OpenAIPydanticProgram from llama_index.core.extractors import PydanticProgramExtractor EXTRACT_TEMPLATE_STR = """\ Here is the content of the section: ---------------- {context_str} ---------------- Given the contextual information, extract out a {class_name} object.\ """ openai_program = OpenAIPydanticProgram.from_defaults( output_cls=NodeMetadata, prompt_template_str="{input}", ) program_extractor = PydanticProgramExtractor( program=openai_program, input_key="input", show_progress=True ) from llama_index.readers.web import SimpleWebPageReader from llama_index.core.node_parser import SentenceSplitter reader =
SimpleWebPageReader(html_to_text=True)
llama_index.readers.web.SimpleWebPageReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('', 'autoreload 2') get_ipython().system('pip install unstructured') from unstructured.partition.html import partition_html import pandas as pd pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1PDVCf_CzLWXNnNoRV8CFgoJxv6U0sHAO" -O tesla_supercharger.jpg') from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() docs_2021 = reader.load_data(Path("tesla_2021_10k.htm")) from llama_index.core.node_parser import UnstructuredElementNodeParser node_parser =
UnstructuredElementNodeParser()
llama_index.core.node_parser.UnstructuredElementNodeParser
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks') get_ipython().run_line_magic('pip', 'install llama-index') from llama_index.llms.fireworks import Fireworks resp = Fireworks().complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.fireworks import Fireworks messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp = Fireworks().chat(messages) print(resp) from llama_index.llms.fireworks import Fireworks llm =
Fireworks()
llama_index.llms.fireworks.Fireworks
import openai openai.api_key = "sk-your-key" from llama_index.agent import OpenAIAgent from llama_index.tools import QueryEngineTool, ToolMetadata from llama_index import SimpleDirectoryReader, VectorStoreIndex import requests response = requests.get( "https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1" ) essay_txt = response.text with open("pg_essay.txt", "w") as fp: fp.write(essay_txt) documents =
SimpleDirectoryReader(input_files=["pg_essay.txt"])
llama_index.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index') from llama_index.core.ingestion.cache import RedisCache from llama_index.core.ingestion import IngestionCache ingest_cache = IngestionCache( cache=
RedisCache.from_host_and_port(host="127.0.0.1", port=6379)
llama_index.core.ingestion.cache.RedisCache.from_host_and_port
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import camelot from llama_index.core import VectorStoreIndex from llama_index.core.query_engine import PandasQueryEngine from llama_index.core.schema import IndexNode from llama_index.llms.openai import OpenAI from llama_index.readers.file import PyMuPDFReader from typing import List import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") file_path = "billionaires_page.pdf" reader = PyMuPDFReader() docs = reader.load(file_path) def get_tables(path: str, pages: List[int]): table_dfs = [] for page in pages: table_list = camelot.read_pdf(path, pages=str(page)) table_df = table_list[0].df table_df = ( table_df.rename(columns=table_df.iloc[0]) .drop(table_df.index[0]) .reset_index(drop=True) ) table_dfs.append(table_df) return table_dfs table_dfs = get_tables(file_path, pages=[3, 25]) table_dfs[0] table_dfs[1] llm = OpenAI(model="gpt-4") df_query_engines = [ PandasQueryEngine(table_df, llm=llm) for table_df in table_dfs ] response = df_query_engines[0].query( "What's the net worth of the second richest billionaire in 2023?" ) print(str(response)) response = df_query_engines[1].query( "How many billionaires were there in 2009?" ) print(str(response)) from llama_index.core import Settings doc_nodes = Settings.node_parser.get_nodes_from_documents(docs) summaries = [ ( "This node provides information about the world's richest billionaires" " in 2023" ), ( "This node provides information on the number of billionaires and" " their combined net worth from 2000 to 2023." ), ] df_nodes = [ IndexNode(text=summary, index_id=f"pandas{idx}") for idx, summary in enumerate(summaries) ] df_id_query_engine_mapping = { f"pandas{idx}": df_query_engine for idx, df_query_engine in enumerate(df_query_engines) } vector_index =
VectorStoreIndex(doc_nodes + df_nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-program-openai') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.program.openai import OpenAIPydanticProgram from pydantic import BaseModel from llama_index.llms.openai import OpenAI from llama_index.finetuning.callbacks import OpenAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from typing import List class Song(BaseModel): """Data model for a song.""" title: str length_seconds: int class Album(BaseModel): """Data model for an album.""" name: str artist: str songs: List[Song] finetuning_handler = OpenAIFineTuningHandler() callback_manager = CallbackManager([finetuning_handler]) llm = OpenAI(model="gpt-4", callback_manager=callback_manager) prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ """ program = OpenAIPydanticProgram.from_defaults( output_cls=Album, prompt_template_str=prompt_template_str, llm=llm, verbose=False, ) movie_names = [ "The Shining", "The Departed", "Titanic", "Goodfellas", "Pretty Woman", "Home Alone", "Caged Fury", "Edward Scissorhands", "Total Recall", "Ghost", "Tremors", "RoboCop", "Rocky V", ] from tqdm.notebook import tqdm for movie_name in tqdm(movie_names): output = program(movie_name=movie_name) print(output.json()) finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl") get_ipython().system('cat mock_finetune_songs.jsonl') from llama_index.finetuning import OpenAIFinetuneEngine finetune_engine = OpenAIFinetuneEngine( "gpt-3.5-turbo", "mock_finetune_songs.jsonl", validate_json=False, # openai validate json code doesn't support function calling yet ) finetune_engine.finetune() finetune_engine.get_current_job() ft_llm = finetune_engine.get_finetuned_model(temperature=0.3) ft_program = OpenAIPydanticProgram.from_defaults( output_cls=Album, prompt_template_str=prompt_template_str, llm=ft_llm, verbose=False, ) ft_program(movie_name="Goodfellas") get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pydantic import Field from typing import List class Citation(BaseModel): """Citation class.""" author: str = Field( ..., description="Inferred first author (usually last name" ) year: int = Field(..., description="Inferred year") desc: str = Field( ..., description=( "Inferred description from the text of the work that the author is" " cited for" ), ) class Response(BaseModel): """List of author citations. Extracted over unstructured text. """ citations: List[Citation] = Field( ..., description=( "List of author citations (organized by author, year, and" " description)." ), ) from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SentenceSplitter from pathlib import Path loader =
PyMuPDFReader()
llama_index.readers.file.PyMuPDFReader
import os os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY" get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2312.04511.pdf" -O "llm_compiler.pdf"') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2312.06648.pdf" -O "dense_x_retrieval.pdf"') from llama_index.core import SimpleDirectoryReader reader =
SimpleDirectoryReader(input_files=["dense_x_retrieval.pdf"])
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-graph-stores-kuzu') import os os.environ["OPENAI_API_KEY"] = "API_KEY_HERE" import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) import shutil shutil.rmtree("./test1", ignore_errors=True) shutil.rmtree("./test2", ignore_errors=True) shutil.rmtree("./test3", ignore_errors=True) get_ipython().run_line_magic('pip', 'install kuzu') import kuzu db = kuzu.Database("test1") from llama_index.graph_stores.kuzu import KuzuGraphStore graph_store = KuzuGraphStore(db) from llama_index.core import SimpleDirectoryReader, KnowledgeGraphIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings from IPython.display import Markdown, display import kuzu documents = SimpleDirectoryReader( "../../../../examples/paul_graham_essay/data" ).load_data() llm = OpenAI(temperature=0, model="gpt-3.5-turbo") Settings.llm = llm Settings.chunk_size = 512 from llama_index.core import StorageContext storage_context = StorageContext.from_defaults(graph_store=graph_store) index = KnowledgeGraphIndex.from_documents( documents, max_triplets_per_chunk=2, storage_context=storage_context, ) query_engine = index.as_query_engine( include_text=False, response_mode="tree_summarize" ) response = query_engine.query( "Tell me more about Interleaf", ) display(Markdown(f"<b>{response}</b>")) query_engine = index.as_query_engine( include_text=True, response_mode="tree_summarize" ) response = query_engine.query( "Tell me more about Interleaf", ) display(Markdown(f"<b>{response}</b>")) db = kuzu.Database("test2") graph_store =
KuzuGraphStore(db)
llama_index.graph_stores.kuzu.KuzuGraphStore
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY') get_ipython().system('pip install llama-index pypdf') get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI import json loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) for idx, node in enumerate(base_nodes): node.id_ = f"node-{idx}" from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-small-en") llm = OpenAI(model="gpt-3.5-turbo") base_index = VectorStoreIndex(base_nodes, embed_model=embed_model) base_retriever = base_index.as_retriever(similarity_top_k=2) retrievals = base_retriever.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for n in retrievals: display_source_node(n, source_length=1500) query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm) response = query_engine_base.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) sub_chunk_sizes = [128, 256, 512] sub_node_parsers = [ SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes ] all_nodes = [] for base_node in base_nodes: for n in sub_node_parsers: sub_nodes = n.get_nodes_from_documents([base_node]) sub_inodes = [ IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes ] all_nodes.extend(sub_inodes) original_node = IndexNode.from_text_node(base_node, base_node.node_id) all_nodes.append(original_node) all_nodes_dict = {n.node_id: n for n in all_nodes} vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model) vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2) retriever_chunk = RecursiveRetriever( "vector", retriever_dict={"vector": vector_retriever_chunk}, node_dict=all_nodes_dict, verbose=True, ) nodes = retriever_chunk.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for node in nodes: display_source_node(node, source_length=2000) query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm) response = query_engine_chunk.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) import nest_asyncio nest_asyncio.apply() from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, ) extractors = [ SummaryExtractor(summaries=["self"], show_progress=True), QuestionsAnsweredExtractor(questions=5, show_progress=True), ] node_to_metadata = {} for extractor in extractors: metadata_dicts = extractor.extract(base_nodes) for node, metadata in zip(base_nodes, metadata_dicts): if node.node_id not in node_to_metadata: node_to_metadata[node.node_id] = metadata else: node_to_metadata[node.node_id].update(metadata) def save_metadata_dicts(path, data): with open(path, "w") as fp: json.dump(data, fp) def load_metadata_dicts(path): with open(path, "r") as fp: data = json.load(fp) return data save_metadata_dicts("data/llama2_metadata_dicts.json", node_to_metadata) metadata_dicts = load_metadata_dicts("data/llama2_metadata_dicts.json") import copy all_nodes = copy.deepcopy(base_nodes) for node_id, metadata in node_to_metadata.items(): for val in metadata.values(): all_nodes.append(
IndexNode(text=val, index_id=node_id)
llama_index.core.schema.IndexNode
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from IPython.display import Markdown, display def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) selector =
LLMMultiSelector.from_defaults()
llama_index.core.selectors.LLMMultiSelector.from_defaults
from llama_index import VectorStoreIndex, SimpleDirectoryReader documents = SimpleDirectoryReader( "../../examples/data/paul_graham" ).load_data() index = VectorStoreIndex.from_documents(documents) import pinecone from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext from llama_index.vector_stores import PineconeVectorStore pinecone.init(api_key="<api_key>", environment="<environment>") pinecone.create_index( "quickstart", dimension=1536, metric="euclidean", pod_type="p1" ) storage_context = StorageContext.from_defaults( vector_store=PineconeVectorStore(pinecone.Index("quickstart")) ) documents = SimpleDirectoryReader( "../../examples/data/paul_graham" ).load_data() index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) vector_store = PineconeVectorStore(pinecone.Index("quickstart")) index = VectorStoreIndex.from_vector_store(vector_store=vector_store) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters query_engine = index.as_query_engine( similarity_top_k=3, vector_store_query_mode="default", filters=MetadataFilters( filters=[ ExactMatchFilter(key="name", value="paul graham"), ] ), alpha=None, doc_ids=None, ) response = query_engine.query("what did the author do growing up?") from llama_index import get_response_synthesizer from llama_index.indices.vector_store.retrievers import VectorIndexRetriever from llama_index.query_engine.retriever_query_engine import ( RetrieverQueryEngine, ) retriever = VectorIndexRetriever( index=index, similarity_top_k=3, vector_store_query_mode="default", filters=[ExactMatchFilter(key="name", value="paul graham")], alpha=None, doc_ids=None, ) query_engine = RetrieverQueryEngine( retriever=retriever, response_synthesizer=get_response_synthesizer() ) response = query_engine.query("what did the author do growing up?") query_engine = index.as_query_engine( similarity_top_k=3, vector_store_kwargs={ "filter": {"name": "paul graham"}, }, ) response = query_engine.query("what did the author do growing up?") from llama_index import get_response_synthesizer from llama_index.indices.vector_store.retrievers import ( VectorIndexAutoRetriever, ) from llama_index.query_engine.retriever_query_engine import ( RetrieverQueryEngine, ) from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo vector_store_info = VectorStoreInfo( content_info="brief biography of celebrities", metadata_info=[ MetadataInfo( name="category", type="str", description="Category of the celebrity, one of [Sports, Entertainment, Business, Music]", ), MetadataInfo( name="country", type="str", description="Country of the celebrity, one of [United States, Barbados, Portugal]", ), ], ) retriever = VectorIndexAutoRetriever( index, vector_store_info=vector_store_info ) query_engine = RetrieverQueryEngine( retriever=retriever, response_synthesizer=
get_response_synthesizer()
llama_index.get_response_synthesizer
get_ipython().system('pip install llama-index') import os os.environ["OPENAI_API_KEY"] = "sk-..." get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham").load_data() from llama_index.core import Settings nodes = Settings.get_nodes_from_documents(documents) from llama_index.core import StorageContext storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) from llama_index.core import SimpleKeywordTableIndex, VectorStoreIndex vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context) from llama_index.core import QueryBundle from llama_index.core.schema import NodeWithScore from llama_index.core.retrievers import ( BaseRetriever, VectorIndexRetriever, KeywordTableSimpleRetriever, ) from typing import List class CustomRetriever(BaseRetriever): """Custom retriever that performs both semantic search and hybrid search.""" def __init__( self, vector_retriever: VectorIndexRetriever, keyword_retriever: KeywordTableSimpleRetriever, mode: str = "AND", ) -> None: """Init params.""" self._vector_retriever = vector_retriever self._keyword_retriever = keyword_retriever if mode not in ("AND", "OR"): raise ValueError("Invalid mode.") self._mode = mode super().__init__() def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: """Retrieve nodes given query.""" vector_nodes = self._vector_retriever.retrieve(query_bundle) keyword_nodes = self._keyword_retriever.retrieve(query_bundle) vector_ids = {n.node.node_id for n in vector_nodes} keyword_ids = {n.node.node_id for n in keyword_nodes} combined_dict = {n.node.node_id: n for n in vector_nodes} combined_dict.update({n.node.node_id: n for n in keyword_nodes}) if self._mode == "AND": retrieve_ids = vector_ids.intersection(keyword_ids) else: retrieve_ids = vector_ids.union(keyword_ids) retrieve_nodes = [combined_dict[rid] for rid in retrieve_ids] return retrieve_nodes from llama_index.core import get_response_synthesizer from llama_index.core.query_engine import RetrieverQueryEngine vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=2) keyword_retriever =
KeywordTableSimpleRetriever(index=keyword_index)
llama_index.core.retrievers.KeywordTableSimpleRetriever
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import datasets dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR") dataset from llama_index.core import get_tokenizer import re from typing import Set, List tokenizer = get_tokenizer() sample_size = 5 def get_reactions_row(raw_target: str) -> List[str]: """Get reactions from a single row.""" reaction_pattern = re.compile(r"reactions:\s*(.*)") reaction_match = reaction_pattern.search(raw_target) if reaction_match: reactions = reaction_match.group(1).split(",") reactions = [r.strip().lower() for r in reactions] else: reactions = [] return reactions def get_reactions_set(dataset) -> Set[str]: """Get set of all reactions.""" reactions = set() for data in dataset["train"]: reactions.update(set(get_reactions_row(data["target"]))) return reactions def get_samples(dataset, sample_size: int = 5): """Get processed sample. Contains source text and also the reaction label. Parse reaction text to specifically extract reactions. """ samples = [] for idx, data in enumerate(dataset["train"]): if idx >= sample_size: break text = data["fulltext_processed"] raw_target = data["target"] reactions = get_reactions_row(raw_target) samples.append({"text": text, "reactions": reactions}) return samples from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack from llama_index.core.llama_pack import download_llama_pack InferRetrieveRerankPack = download_llama_pack( "InferRetrieveRerankPack", "./irr_pack", ) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-16k") pred_context = """\ The output predictins should be a list of comma-separated adverse \ drug reactions. \ """ reranker_top_n = 10 pack = InferRetrieveRerankPack( get_reactions_set(dataset), llm=llm, pred_context=pred_context, reranker_top_n=reranker_top_n, verbose=True, ) samples = get_samples(dataset, sample_size=5) pred_reactions = pack.run(inputs=[s["text"] for s in samples]) gt_reactions = [s["reactions"] for s in samples] pred_reactions[2] gt_reactions[2] from llama_index.core.retrievers import BaseRetriever from llama_index.core.llms import LLM from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import QueryPipeline from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank from llama_index.core.output_parsers import ChainableOutputParser from typing import List import random all_reactions = get_reactions_set(dataset) random.sample(all_reactions, 5) from llama_index.core.schema import TextNode from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core.ingestion import IngestionPipeline from llama_index.core import VectorStoreIndex reaction_nodes = [TextNode(text=r) for r in all_reactions] pipeline = IngestionPipeline(transformations=[OpenAIEmbedding()]) reaction_nodes = await pipeline.arun(documents=reaction_nodes) index = VectorStoreIndex(reaction_nodes) reaction_nodes[0].embedding reaction_retriever = index.as_retriever(similarity_top_k=2) nodes = reaction_retriever.retrieve("abdominal") print([n.get_content() for n in nodes]) infer_prompt_str = """\ Your job is to output a list of predictions given context from a given piece of text. The text context, and information regarding the set of valid predictions is given below. Return the predictions as a comma-separated list of strings. Text Context: {doc_context} Prediction Info: {pred_context} Predictions: """ infer_prompt = PromptTemplate(infer_prompt_str) class PredsOutputParser(ChainableOutputParser): """Predictions output parser.""" def parse(self, output: str) -> List[str]: """Parse predictions.""" tokens = output.split(",") return [t.strip() for t in tokens] preds_output_parser = PredsOutputParser() rerank_str = """\ Given a piece of text, rank the {num} labels above based on their relevance \ to this piece of text. The labels \ should be listed in descending order using identifiers. \ The most relevant labels should be listed first. \ The output format should be [] > [], e.g., [1] > [2]. \ Only response the ranking results, \ do not say any word or explain. \ Here is a given piece of text: {query}. """ rerank_prompt = PromptTemplate(rerank_str) def infer_retrieve_rerank( query: str, retriever: BaseRetriever, llm: LLM, pred_context: str, reranker_top_n: int = 3, ): """Infer retrieve rerank.""" infer_prompt_c = infer_prompt.as_query_component( partial={"pred_context": pred_context} ) infer_pipeline = QueryPipeline(chain=[infer_prompt_c, llm, preds_output_parser]) preds = infer_pipeline.run(query) print(f"PREDS: {preds}") all_nodes = [] for pred in preds: nodes = retriever.retrieve(str(pred)) all_nodes.extend(nodes) reranker = RankGPTRerank( llm=llm, top_n=reranker_top_n, rankgpt_rerank_prompt=rerank_prompt, ) reranked_nodes = reranker.postprocess_nodes(all_nodes, query_str=query) return [n.get_content() for n in reranked_nodes] samples = get_samples(dataset, sample_size=5) reaction_retriever = index.as_retriever(similarity_top_k=2) llm =
OpenAI(model="gpt-3.5-turbo-16k")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().system("pip install llama-index 'google-generativeai>=0.3.0' matplotlib qdrant_client") import os GOOGLE_API_KEY = "" # add your GOOGLE API key here os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY from pathlib import Path import random from typing import Optional def get_image_files( dir_path, sample: Optional[int] = 10, shuffle: bool = False ): dir_path = Path(dir_path) image_paths = [] for image_path in dir_path.glob("*.jpg"): image_paths.append(image_path) random.shuffle(image_paths) if sample: return image_paths[:sample] else: return image_paths image_files = get_image_files("SROIE2019/test/img", sample=100) from pydantic import BaseModel, Field class ReceiptInfo(BaseModel): company: str = Field(..., description="Company name") date: str = Field(..., description="Date field in DD/MM/YYYY format") address: str = Field(..., description="Address") total: float = Field(..., description="total amount") currency: str = Field( ..., description="Currency of the country (in abbreviations)" ) summary: str = Field( ..., description="Extracted text summary of the receipt, including items purchased, the type of store, the location, and any other notable salient features (what does the purchase seem to be for?).", ) from llama_index.multi_modal_llms.gemini import GeminiMultiModal from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ Can you summarize the image and return a response \ with the following JSON format: \ """ async def pydantic_gemini(output_class, image_documents, prompt_template_str): gemini_llm = GeminiMultiModal( api_key=GOOGLE_API_KEY, model_name="models/gemini-pro-vision" ) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=
PydanticOutputParser(output_class)
llama_index.core.output_parsers.PydanticOutputParser
get_ipython().run_line_magic('pip', 'install llama-index-llms-konko') get_ipython().system('pip install llama-index') import os os.environ["KONKO_API_KEY"] = "<your-api-key>" from llama_index.llms.konko import Konko from llama_index.core.llms import ChatMessage llm = Konko(model="meta-llama/llama-2-13b-chat") messages = ChatMessage(role="user", content="Explain Big Bang Theory briefly") resp = llm.chat([messages]) print(resp) import os os.environ["OPENAI_API_KEY"] = "<your-api-key>" llm = Konko(model="gpt-3.5-turbo") message = ChatMessage(role="user", content="Explain Big Bang Theory briefly") resp = llm.chat([message]) print(resp) message = ChatMessage(role="user", content="Tell me a story in 250 words") resp = llm.stream_chat([message], max_tokens=1000) for r in resp: print(r.delta, end="") llm =
Konko(model="numbersstation/nsql-llama-2-7b", max_tokens=100)
llama_index.llms.konko.Konko
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.core import Document, VectorStoreIndex from llama_index.readers.file import PyMuPDFReader from llama_index.core.node_parser import SimpleNodeParser from llama_index.llms.openai import OpenAI loader = PyMuPDFReader() docs0 = loader.load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] node_parser = SimpleNodeParser.from_defaults() nodes = node_parser.get_nodes_from_documents(docs) len(nodes) get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json') from llama_index.core.evaluation import QueryResponseDataset eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4-1106-preview") dataset_generator = DatasetGenerator( nodes[:20], llm=llm, show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60) eval_dataset.save_json("data/llama2_eval_qr_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, BatchEvalRunner, ) from llama_index.llms.openai import OpenAI eval_llm = OpenAI(model="gpt-4-1106-preview") evaluator_c = CorrectnessEvaluator(llm=eval_llm) evaluator_s =
SemanticSimilarityEvaluator(llm=eval_llm)
llama_index.core.evaluation.SemanticSimilarityEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-tools-metaphor') get_ipython().system('wget "https://images.openai.com/blob/a2e49de2-ba5b-4869-9c2d-db3b4b5dcc19/new-models-and-developer-products-announced-at-devday.jpg?width=2000" -O other_images/openai/dev_day.png') get_ipython().system('wget "https://drive.google.com/uc\\?id\\=1B4f5ZSIKN0zTTPPRlZ915Ceb3_uF9Zlq\\&export\\=download" -O other_images/adidas.png') from llama_index.readers.web import SimpleWebPageReader url = "https://openai.com/blog/new-models-and-developer-products-announced-at-devday" reader = SimpleWebPageReader(html_to_text=True) documents = reader.load_data(urls=[url]) from llama_index.llms.openai import OpenAI from llama_index.core import VectorStoreIndex from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core import Settings Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo") vector_index = VectorStoreIndex.from_documents( documents, ) query_tool = QueryEngineTool( query_engine=vector_index.as_query_engine(), metadata=ToolMetadata( name=f"vector_tool", description=( "Useful to lookup new features announced by OpenAI" ), ), ) from llama_index.core.agent.react_multimodal.step import ( MultimodalReActAgentWorker, ) from llama_index.core.agent import AgentRunner from llama_index.core.multi_modal_llms import MultiModalLLM from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core.agent import Task mm_llm = OpenAIMultiModal(model="gpt-4-vision-preview", max_new_tokens=1000) react_step_engine = MultimodalReActAgentWorker.from_tools( [query_tool], multi_modal_llm=mm_llm, verbose=True, ) agent =
AgentRunner(react_step_engine)
llama_index.core.agent.AgentRunner