code
stringlengths 161
233k
| apis
sequencelengths 1
24
| extract_api
stringlengths 162
68.5k
|
---|---|---|
from fastapi import FastAPI
from pydantic import BaseModel
import asyncio
import whisper
from llama_index.embeddings import OpenAIEmbedding
from llama_index.llms import OpenAI
from llama_index.ingestion import IngestionPipeline
from llama_index.extractors import TitleExtractor, SummaryExtractor
from llama_index.text_splitter import SentenceSplitter
from llama_index.schema import MetadataMode
# from langchain.embeddings.huggingface import HuggingFaceBgeEmbeddings
# from llama_index.embeddings import *
from llama_index.embeddings import HuggingFaceEmbedding
from llm import LLMClient
from llama_index.llms import Ollama
from llama_index import ServiceContext
from llama_index.vector_stores import AstraDBVectorStore
from llama_index import Document
from llama_index.text_splitter import TokenTextSplitter
from llama_index import set_global_service_context
from llama_index.llms import LangChainLLM
from langchain_nvidia_ai_endpoints import ChatNVIDIA
import os
import asyncio
token = os.environ['token']
api_endpoint = os.environ['api_endpoint']
def create_pipeline_astra_db(llm_type='nvidia',embed_model='local',collection_name='video_transcript'):
print("Loading Pipeline")
if embed_model=='local':
print("embed_model local")
embed_model = "BAAI/bge-base-en"
embed_model_dim = 768
embed_model = HuggingFaceEmbedding(model_name=embed_model)
elif embed_model=='nvidia':
print("embed_model nviida")
embed_model_dim = 1024
embed_model = HuggingFaceEmbedding(model_name=embed_model)
else:
print("embed_model else")
embed_model = HuggingFaceEmbedding(model_name=embed_model)
if llm_type=='nvidia':
print('llm nvidia')
nvai_llm = ChatNVIDIA(model='llama2_70b')
llm = LangChainLLM(llm=nvai_llm)
elif llm_type=='ollama':
print('llm_ollama')
llm = Ollama(model='stablelm2', temperature=0.1)
else:
print('llm else')
llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
service_context = ServiceContext.from_defaults(embed_model=embed_model, llm=llm)
set_global_service_context(service_context)
astra_db_store = AstraDBVectorStore(
token=token,
api_endpoint=api_endpoint,
collection_name=collection_name,
embedding_dimension=embed_model_dim,
)
transformations = [
SentenceSplitter(chunk_size=1024, chunk_overlap=100),
# TitleExtractor(llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8),
# SummaryExtractor(llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8),
embed_model,
]
# text_splitter = TokenTextSplitter(chunk_size=512)
return IngestionPipeline(transformations=transformations,vector_store=astra_db_store)
class AudioData(BaseModel):
audio_file_path: str
pipeline = create_pipeline_astra_db(llm_type='nvidia',collection_name="video_test_collection")
app = FastAPI()
@app.get("/status")
def read_status():
return {"status": "running"}
@app.post("/transcribe")
async def transcribe(audio_data:AudioData):
return {"transcription": whisper.transcribe(audio_data.audio_file_path)["text"]}
@app.post("/transcribe_v2")
async def transcribe(audio_data:AudioData):
return {"transcription": whisper.transcribe(audio_data.audio_file_path,path_or_hf_repo="mlx-community/whisper-medium-mlx")["text"]}
class ingest_data(BaseModel):
text: str
metadata:dict
@app.post("/ingest")
def ingest(ingest_data:ingest_data):
doc = [Document(text=ingest_data.text,metadata=ingest_data.metadata)]
pipeline.run(documents=doc,num_workers=4)
return {"Done"} | [
"llama_index.vector_stores.AstraDBVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.LangChainLLM",
"llama_index.ingestion.IngestionPipeline",
"llama_index.set_global_service_context",
"llama_index.text_splitter.SentenceSplitter",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.llms.Ollama",
"llama_index.Document"
] | [((2944, 2953), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (2951, 2953), False, 'from fastapi import FastAPI\n'), ((2058, 2120), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'llm'}), '(embed_model=embed_model, llm=llm)\n', (2086, 2120), False, 'from llama_index import ServiceContext\n'), ((2125, 2168), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2151, 2168), False, 'from llama_index import set_global_service_context\n'), ((2191, 2324), 'llama_index.vector_stores.AstraDBVectorStore', 'AstraDBVectorStore', ([], {'token': 'token', 'api_endpoint': 'api_endpoint', 'collection_name': 'collection_name', 'embedding_dimension': 'embed_model_dim'}), '(token=token, api_endpoint=api_endpoint, collection_name=\n collection_name, embedding_dimension=embed_model_dim)\n', (2209, 2324), False, 'from llama_index.vector_stores import AstraDBVectorStore\n'), ((2710, 2789), 'llama_index.ingestion.IngestionPipeline', 'IngestionPipeline', ([], {'transformations': 'transformations', 'vector_store': 'astra_db_store'}), '(transformations=transformations, vector_store=astra_db_store)\n', (2727, 2789), False, 'from llama_index.ingestion import IngestionPipeline\n'), ((1345, 1389), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'embed_model'}), '(model_name=embed_model)\n', (1365, 1389), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1746, 1776), 'langchain_nvidia_ai_endpoints.ChatNVIDIA', 'ChatNVIDIA', ([], {'model': '"""llama2_70b"""'}), "(model='llama2_70b')\n", (1756, 1776), False, 'from langchain_nvidia_ai_endpoints import ChatNVIDIA\n'), ((1791, 1817), 'llama_index.llms.LangChainLLM', 'LangChainLLM', ([], {'llm': 'nvai_llm'}), '(llm=nvai_llm)\n', (1803, 1817), False, 'from llama_index.llms import LangChainLLM\n'), ((2392, 2444), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(100)'}), '(chunk_size=1024, chunk_overlap=100)\n', (2408, 2444), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((3529, 3591), 'llama_index.Document', 'Document', ([], {'text': 'ingest_data.text', 'metadata': 'ingest_data.metadata'}), '(text=ingest_data.text, metadata=ingest_data.metadata)\n', (3537, 3591), False, 'from llama_index import Document\n'), ((1511, 1555), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'embed_model'}), '(model_name=embed_model)\n', (1531, 1555), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1622, 1666), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'embed_model'}), '(model_name=embed_model)\n', (1642, 1666), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1889, 1931), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': '"""stablelm2"""', 'temperature': '(0.1)'}), "(model='stablelm2', temperature=0.1)\n", (1895, 1931), False, 'from llama_index.llms import Ollama\n'), ((1982, 2033), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-1106"""', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo-1106', temperature=0.1)\n", (1988, 2033), False, 'from llama_index.llms import OpenAI\n'), ((3129, 3175), 'whisper.transcribe', 'whisper.transcribe', (['audio_data.audio_file_path'], {}), '(audio_data.audio_file_path)\n', (3147, 3175), False, 'import whisper\n'), ((3289, 3392), 'whisper.transcribe', 'whisper.transcribe', (['audio_data.audio_file_path'], {'path_or_hf_repo': '"""mlx-community/whisper-medium-mlx"""'}), "(audio_data.audio_file_path, path_or_hf_repo=\n 'mlx-community/whisper-medium-mlx')\n", (3307, 3392), False, 'import whisper\n')] |
from llama_index.core.postprocessor import KeywordNodePostprocessor
from llama_index.core.schema import TextNode, NodeWithScore
nodes = [
TextNode(
text="Entry no: 1, <SECRET> - Attack at Dawn"
),
TextNode(
text="Entry no: 2, <RESTRICTED> - Go to point Bravo"
),
TextNode(
text="Entry no: 3, <PUBLIC> - Roses are Red"
),
]
node_with_score_list = [
NodeWithScore(node=node) for node in nodes
]
pp = KeywordNodePostprocessor(
exclude_keywords=["SECRET", "RESTRICTED"]
)
remaining_nodes = pp.postprocess_nodes(
node_with_score_list
)
print('Remaining nodes:')
for node_with_score in remaining_nodes:
node = node_with_score.node
print(f"Text: {node.text}")
| [
"llama_index.core.schema.NodeWithScore",
"llama_index.core.schema.TextNode",
"llama_index.core.postprocessor.KeywordNodePostprocessor"
] | [((452, 519), 'llama_index.core.postprocessor.KeywordNodePostprocessor', 'KeywordNodePostprocessor', ([], {'exclude_keywords': "['SECRET', 'RESTRICTED']"}), "(exclude_keywords=['SECRET', 'RESTRICTED'])\n", (476, 519), False, 'from llama_index.core.postprocessor import KeywordNodePostprocessor\n'), ((143, 198), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': '"""Entry no: 1, <SECRET> - Attack at Dawn"""'}), "(text='Entry no: 1, <SECRET> - Attack at Dawn')\n", (151, 198), False, 'from llama_index.core.schema import TextNode, NodeWithScore\n'), ((218, 280), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': '"""Entry no: 2, <RESTRICTED> - Go to point Bravo"""'}), "(text='Entry no: 2, <RESTRICTED> - Go to point Bravo')\n", (226, 280), False, 'from llama_index.core.schema import TextNode, NodeWithScore\n'), ((300, 354), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': '"""Entry no: 3, <PUBLIC> - Roses are Red"""'}), "(text='Entry no: 3, <PUBLIC> - Roses are Red')\n", (308, 354), False, 'from llama_index.core.schema import TextNode, NodeWithScore\n'), ((402, 426), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'node'}), '(node=node)\n', (415, 426), False, 'from llama_index.core.schema import TextNode, NodeWithScore\n')] |
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.llms import Ollama
from llama_index.vector_stores import WeaviateVectorStore
import weaviate
import box
import yaml
def load_embedding_model(model_name):
embeddings = LangchainEmbedding(
HuggingFaceEmbeddings(model_name=model_name)
)
return embeddings
def load_index(chunk_size, llm, embed_model, weaviate_client, index_name):
service_context = ServiceContext.from_defaults(
chunk_size=chunk_size,
llm=llm,
embed_model=embed_model
)
vector_store = WeaviateVectorStore(weaviate_client=weaviate_client, index_name=index_name)
index = VectorStoreIndex.from_vector_store(
vector_store, service_context=service_context
)
return index
def build_rag_pipeline():
"""
Constructs and configures a RAG pipeline for retrieval-augmented generation tasks.
This function performs the following steps to set up the RAG pipeline:
1. **Configuration Loading:**
- Reads configuration variables from a specified YAML file (`config.yml`).
- Stores the loaded configuration as a `box.Box` object for convenient access.
2. **Weaviate Connection:**
- Establishes a connection to the Weaviate server using the provided URL in the configuration.
- Creates a Weaviate client object for interacting with the Weaviate database.
3. **LLAMA Model Loading:**
- Loads the specified Ollama language model based on the `LLM` key in the configuration.
- Sets the model temperature to 0 for a more deterministic response generation.
4. **Embedding Model Loading:**
- Utilizes the `load_embedding_model` function to retrieve a pre-trained Hugging Face model configured for Langchain.
- This model will be used to embed documents and queries for efficient search and retrieval.
5. **Vector Store Index Loading:**
- Fetches the pre-built Weaviate Vector Store index named in the configuration (`INDEX_NAME`).
- Connects the index to the Weaviate client and embeds relevant context using the selected service context.
6. **Query Engine Construction:**
- Converts the loaded Vector Store index into a dedicated query engine for efficient retrieval.
- Sets the `streaming` flag to `False` to return the final response after the entire query is processed.
7. **Pipeline Return:**
- Returns the fully constructed and configured RAG pipeline represented by the `query_engine` object.
Notes:
- This function relies on a separate `config.yml` file for storing configuration values.
- Ensure that the configuration file contains valid values for all required keys.
"""
# Import configuration specified in config.yml
with open('config.yml', 'r', encoding='utf8') as ymlfile:
cfg = box.Box(yaml.safe_load(ymlfile))
print("Connecting to Weaviate")
client = weaviate.Client(cfg.WEAVIATE_URL)
print("Loading Ollama...")
llm = Ollama(model=cfg.LLM, temperature=0)
print("Loading embedding model...")
embeddings = load_embedding_model(model_name=cfg.EMBEDDINGS)
print("Loading index...")
index = load_index(cfg.CHUNK_SIZE, llm, embeddings, client, cfg.INDEX_NAME)
print("Constructing query engine...")
query_engine = index.as_query_engine(streaming=False)
return query_engine
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.WeaviateVectorStore",
"llama_index.llms.Ollama"
] | [((568, 658), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': 'chunk_size', 'llm': 'llm', 'embed_model': 'embed_model'}), '(chunk_size=chunk_size, llm=llm, embed_model=\n embed_model)\n', (596, 658), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((704, 779), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'weaviate_client', 'index_name': 'index_name'}), '(weaviate_client=weaviate_client, index_name=index_name)\n', (723, 779), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((793, 879), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (827, 879), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((3087, 3120), 'weaviate.Client', 'weaviate.Client', (['cfg.WEAVIATE_URL'], {}), '(cfg.WEAVIATE_URL)\n', (3102, 3120), False, 'import weaviate\n'), ((3163, 3199), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': 'cfg.LLM', 'temperature': '(0)'}), '(model=cfg.LLM, temperature=0)\n', (3169, 3199), False, 'from llama_index.llms import Ollama\n'), ((396, 440), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (417, 440), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((3012, 3035), 'yaml.safe_load', 'yaml.safe_load', (['ymlfile'], {}), '(ymlfile)\n', (3026, 3035), False, 'import yaml\n')] |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import KeywordExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_from_documents(documents)
key_extractor = KeywordExtractor(keywords=3)
metadata_list = key_extractor.extract(nodes)
print(metadata_list)
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.extractors.KeywordExtractor",
"llama_index.core.node_parser.SentenceSplitter"
] | [((176, 206), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (197, 206), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((247, 291), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), '(include_prev_next_rel=True)\n', (263, 291), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((360, 388), 'llama_index.core.extractors.KeywordExtractor', 'KeywordExtractor', ([], {'keywords': '(3)'}), '(keywords=3)\n', (376, 388), False, 'from llama_index.core.extractors import KeywordExtractor\n')] |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import SummaryExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_from_documents(documents)
summary_extractor = SummaryExtractor(
summaries=["prev", "self", "next"]
)
metadata_list = summary_extractor.extract(nodes)
print(metadata_list) | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.extractors.SummaryExtractor"
] | [((176, 206), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (197, 206), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((247, 291), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), '(include_prev_next_rel=True)\n', (263, 291), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((364, 416), 'llama_index.core.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'summaries': "['prev', 'self', 'next']"}), "(summaries=['prev', 'self', 'next'])\n", (380, 416), False, 'from llama_index.core.extractors import SummaryExtractor\n')] |
import argparse
from typing import Optional, Any
from mlx_lm import load, generate
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings
from llama_index.core.llms.callbacks import llm_completion_callback
from llama_index.core.llms import CustomLLM, CompletionResponse, CompletionResponseGen, LLMMetadata
from pydantic import BaseModel
class OurLLM(CustomLLM, BaseModel):
model: Optional[Any] = None
tokenizer: Optional[Any] = None
def __init__(self, model_name: str, **data):
super().__init__(**data) # Initialize BaseModel part with data
# Directly load the model and tokenizer
self.model, self.tokenizer = load(model_name)
context_window: int = 2096
max_tokens : int = 100
model_name: str = "custom"
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
return LLMMetadata(
context_window = self.context_window,
model_name=self.model_name,
max_tokens=self.max_tokens
)
def process_generated_text(self, text: str) -> str:
token_pos = text.find("\n\n")
if token_pos != -1:
# Truncate text at the first occurrence of two new lines
return text[:token_pos]
return text
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
# Remove 'formatted' argument if present
kwargs.pop('formatted', None)
generated_text = generate(self.model, self.tokenizer, prompt=prompt, verbose=False, **kwargs)
processed_text = self.process_generated_text(generated_text)
return CompletionResponse(text=processed_text)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
generated_text = generate(self.model, self.tokenizer, prompt=prompt, verbose=False, **kwargs)
processed_text = self.process_generated_text(generated_text)
for char in processed_text:
yield CompletionResponse(text=char, delta=char)
def main():
parser = argparse.ArgumentParser(description="Query a document collection with an LLM.")
parser.add_argument("--model_name", type=str, required=True, help="Model name to use for the LLM.")
parser.add_argument("--directory", type=str, required=True, help="Directory containing the documents to index.")
parser.add_argument("--embed_model", type=str, required=True, help="Embed model to use for vectorizing documents.")
parser.add_argument("--query", type=str, required=True, help="Query to perform on the document collection.")
args = parser.parse_args()
# Convert input from the user into strings
model_name = str(args.model_name)
directory = str(args.directory)
embed_model = str(args.embed_model)
query_str = str(args.query)
# Setup the LLM and embed model
Settings.llm = OurLLM(model_name=model_name)
Settings.embed_model = embed_model
# Load documents and create index
documents = SimpleDirectoryReader(directory).load_data()
index = VectorStoreIndex.from_documents(documents, show_progress=True)
# Perform query and print response
query_engine = index.as_query_engine()
response = query_engine.query(query_str)
print(response)
if __name__ == "__main__":
main()
#Usage: python mlx_rag.py --model_name "mlx-community/Mistral-7B-v0.1-hf-4bit-mlx" --directory "data" --embed_model "local:BAAI/bge-base-en-v1.5" --query "Complete the sentence: In all criminal prosecutions, the accused shall enjoy"
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llms.CompletionResponse",
"llama_index.core.llms.callbacks.llm_completion_callback",
"llama_index.core.llms.LLMMetadata",
"llama_index.core.SimpleDirectoryReader"
] | [((1291, 1316), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1314, 1316), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1715, 1740), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1738, 1740), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((2121, 2200), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query a document collection with an LLM."""'}), "(description='Query a document collection with an LLM.')\n", (2144, 2200), False, 'import argparse\n'), ((3118, 3180), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'show_progress': '(True)'}), '(documents, show_progress=True)\n', (3149, 3180), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings\n'), ((674, 690), 'mlx_lm.load', 'load', (['model_name'], {}), '(model_name)\n', (678, 690), False, 'from mlx_lm import load, generate\n'), ((885, 992), 'llama_index.core.llms.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'model_name': 'self.model_name', 'max_tokens': 'self.max_tokens'}), '(context_window=self.context_window, model_name=self.model_name,\n max_tokens=self.max_tokens)\n', (896, 992), False, 'from llama_index.core.llms import CustomLLM, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1507, 1583), 'mlx_lm.generate', 'generate', (['self.model', 'self.tokenizer'], {'prompt': 'prompt', 'verbose': '(False)'}), '(self.model, self.tokenizer, prompt=prompt, verbose=False, **kwargs)\n', (1515, 1583), False, 'from mlx_lm import load, generate\n'), ((1668, 1707), 'llama_index.core.llms.CompletionResponse', 'CompletionResponse', ([], {'text': 'processed_text'}), '(text=processed_text)\n', (1686, 1707), False, 'from llama_index.core.llms import CustomLLM, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1850, 1926), 'mlx_lm.generate', 'generate', (['self.model', 'self.tokenizer'], {'prompt': 'prompt', 'verbose': '(False)'}), '(self.model, self.tokenizer, prompt=prompt, verbose=False, **kwargs)\n', (1858, 1926), False, 'from mlx_lm import load, generate\n'), ((3061, 3093), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory'], {}), '(directory)\n', (3082, 3093), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings\n'), ((2052, 2093), 'llama_index.core.llms.CompletionResponse', 'CompletionResponse', ([], {'text': 'char', 'delta': 'char'}), '(text=char, delta=char)\n', (2070, 2093), False, 'from llama_index.core.llms import CustomLLM, CompletionResponse, CompletionResponseGen, LLMMetadata\n')] |
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/sample_document1.txt"))
print(f"Metadata: {document[0].metadata}")
print(f"Text: {document[0].text}")
| [
"llama_index.readers.file.FlatReader"
] | [((83, 95), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (93, 95), False, 'from llama_index.readers.file import FlatReader\n'), ((124, 158), 'pathlib.Path', 'Path', (['"""files/sample_document1.txt"""'], {}), "('files/sample_document1.txt')\n", (128, 158), False, 'from pathlib import Path\n')] |
import logging
import os
import sys
from llama_index.core import (
StorageContext,
SummaryIndex,
load_index_from_storage,
)
from llama_index.readers.github import GithubRepositoryReader
from uglychain.llm.llama_index import LlamaIndexLLM
from uglychain import Model
from uglygpt.utils.config import config
import nest_asyncio
nest_asyncio.apply()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# load the documents and create the index
owner = "rjmacarthy"
repo = "twinny"
branch = "master"
# check if storage already exists
PERSIST_DIR = "./data/github/" + owner + "/" + repo
if not os.path.exists(PERSIST_DIR):
documents = GithubRepositoryReader(
github_token=config.github_token,
owner=owner,
repo=repo,
use_parser=False,
verbose=False,
ignore_directories=["examples"],
).load_data(branch=branch)
index = SummaryIndex.from_documents(documents, show_progress=True, build_tree=True)
# store it for later
index.storage_context.persist(persist_dir=PERSIST_DIR)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine(
llm=LlamaIndexLLM(model=Model.YI),
retriever_mode="all_leaf",
response_mode="tree_summarize",
)
response = query_engine.query("这个项目是如何实现补全功能的?给我看一看具体的代码。")
print(response)
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.SummaryIndex.from_documents",
"llama_index.readers.github.GithubRepositoryReader"
] | [((340, 360), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (358, 360), False, 'import nest_asyncio\n'), ((361, 419), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (380, 419), False, 'import logging\n'), ((451, 491), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (472, 491), False, 'import logging\n'), ((685, 712), 'os.path.exists', 'os.path.exists', (['PERSIST_DIR'], {}), '(PERSIST_DIR)\n', (699, 712), False, 'import os\n'), ((969, 1044), 'llama_index.core.SummaryIndex.from_documents', 'SummaryIndex.from_documents', (['documents'], {'show_progress': '(True)', 'build_tree': '(True)'}), '(documents, show_progress=True, build_tree=True)\n', (996, 1044), False, 'from llama_index.core import StorageContext, SummaryIndex, load_index_from_storage\n'), ((1187, 1240), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (1215, 1240), False, 'from llama_index.core import StorageContext, SummaryIndex, load_index_from_storage\n'), ((1253, 1293), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1276, 1293), False, 'from llama_index.core import StorageContext, SummaryIndex, load_index_from_storage\n'), ((420, 439), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (437, 439), False, 'import logging\n'), ((1341, 1370), 'uglychain.llm.llama_index.LlamaIndexLLM', 'LlamaIndexLLM', ([], {'model': 'Model.YI'}), '(model=Model.YI)\n', (1354, 1370), False, 'from uglychain.llm.llama_index import LlamaIndexLLM\n'), ((730, 881), 'llama_index.readers.github.GithubRepositoryReader', 'GithubRepositoryReader', ([], {'github_token': 'config.github_token', 'owner': 'owner', 'repo': 'repo', 'use_parser': '(False)', 'verbose': '(False)', 'ignore_directories': "['examples']"}), "(github_token=config.github_token, owner=owner, repo=\n repo, use_parser=False, verbose=False, ignore_directories=['examples'])\n", (752, 881), False, 'from llama_index.readers.github import GithubRepositoryReader\n')] |
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader('./llama_data').load_data()
index = GPTVectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
print(query_engine.query("Vision Pro はARデバイスですか?"))
# インデックスの保存
index.storage_context.persist() | [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((138, 183), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (172, 183), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n'), ((80, 117), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./llama_data"""'], {}), "('./llama_data')\n", (101, 117), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n')] |
from llama_index import SimpleDirectoryReader
import logging
import sys
from pathlib import Path
from llama_index import download_loader
PandasExcelReader = download_loader("PandasExcelReader")
loader = PandasExcelReader(pandas_config={"header": 0})
documents = loader.load_data(file=Path('src/data/train_dataset.xlsx'))
print(f'Loaded {len(documents)} docs')
print(type(documents[0]))
'''
db = DatabaseReader(
scheme="postgresql", # Database Scheme
host="localhost", # Database Host
port="5432", # Database Port
user="postgres", # Database User
password="FakeExamplePassword", # Database Password
dbname="postgres", # Database Name
)
print(type(db))
# DatabaseReader available method:
print(type(db.load_data))
### SQLDatabase class ###
# db.sql is an instance of SQLDatabase:
print(type(db.sql_database))
# SQLDatabase available methods:
print(type(db.sql_database.from_uri))
print(type(db.sql_database.get_single_table_info))
print(type(db.sql_database.get_table_columns))
print(type(db.sql_database.get_table_info))
print(type(db.sql_database.get_table_names))
print(type(db.sql_database.insert_into_table))
print(type(db.sql_database.run))
print(type(db.sql_database.run_sql))
# SQLDatabase available properties:
print(type(db.sql_database.dialect))
print(type(db.sql_database.engine))
print(type(db.sql_database.table_info))
query = f"""
SELECT
CONCAT(name, ' is ', age, ' years old.') AS text
FROM public.users
WHERE age >= 18
"""
documents = db.load_data(query=query)
# Display type(documents) and documents
# type(documents) must return <class 'list'>
print(type(documents))
# Documents must return a list of Document objects
print(documents)
documents = SimpleDirectoryReader('./data').load_data()
index = VectorStoreIndex.from_documents(documents)
''' | [
"llama_index.download_loader"
] | [((164, 200), 'llama_index.download_loader', 'download_loader', (['"""PandasExcelReader"""'], {}), "('PandasExcelReader')\n", (179, 200), False, 'from llama_index import download_loader\n'), ((295, 330), 'pathlib.Path', 'Path', (['"""src/data/train_dataset.xlsx"""'], {}), "('src/data/train_dataset.xlsx')\n", (299, 330), False, 'from pathlib import Path\n')] |
"""
# The core idea of a Multi-Document Agent
The core idea of a Multi-Document Agent is to simulate a knowledgeable assistant that can draw upon information from
multiple separate documents to provide informed, accurate answers to user queries. Unlike a traditional, single-document
agent that can only access and understand information from one source, a multi-document agent has the capability to
consider a broader range of information sources, similar to how a human expert would consult various references to
answer complex questions.
## How a multi-document agent works
Here's an outline of how a multi-document agent works:
1. **Document Agents**: Each document (or set of related documents) is paired with a document agent. This agent is
responsible for understanding the content within its assigned document(s). It has capabilities such as semantic search,
to find relevant snippets within the document, and summarization, to distill the document's content into a concise form.
2. **Top-Level Agent**: A top-level agent oversees the document agents. When a user asks a question, the top-level
agent determines which document agents might have relevant information and directs the query appropriately.
3. **Tool Retrieval**: The top-level agent uses a retriever mechanism to identify which tools (i.e., query engines of
the individual document agents) are most relevant for the given query.
4. **Reranking**: Retrieved documents or relevant snippets are reranked (possibly by an external system like Cohere)
to refine the set of candidate responses, ensuring that the most relevant information is considered.
5. **Query Planning Tool**: This tool is dynamically created based on the retrieved tools to plan out an effective
strategy for leveraging the selected documents to answer the user's query.
6. **Answering Queries**: To answer a query, the top-level agent orchestrates the use of the retrieved and reranked
tools, conducting a "chain of thought" process over the set of relevant documents to formulate a comprehensive response.
7. **Integration**: The responses from the document agents are integrated into a single coherent answer for the user.
This setup is particularly powerful for complex queries that require cross-referencing information, understanding
nuanced details, or comparing statements from multiple sources. The multi-document agent is designed to mimic a human
expert's approach to research and analysis, using a rich set of data and tools for a more robust and informed response.
"""
# System imports
from contextlib import asynccontextmanager
from dataclasses import dataclass
import json
from pathlib import Path
import os
import pickle
from urllib.parse import urlparse
from typing import Sequence
# Third-party imports
from dotenv import load_dotenv
from llama_index import download_loader
from llama_index.llms import OpenAI
from llama_index import ServiceContext
from llama_index import Document
from llama_index import VectorStoreIndex, SummaryIndex
from llama_index.agent import OpenAIAgent
from llama_index import load_index_from_storage, StorageContext
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.node_parser import SentenceSplitter
from llama_index import VectorStoreIndex
from llama_index.objects import (
ObjectIndex,
SimpleToolNodeMapping,
ObjectRetriever,
)
from llama_index.retrievers import BaseRetriever
from llama_index.postprocessor import CohereRerank
from llama_index.tools import QueryPlanTool
from llama_index.query_engine import SubQuestionQueryEngine
from llama_index.llms import OpenAI
from llama_index.agent import FnRetrieverOpenAIAgent, ReActAgent
from tqdm import tqdm
from llama_index.schema import BaseNode
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
#
load_dotenv()
#
class CustomRetriever(BaseRetriever):
def __init__(self, vector_retriever, postprocessor=None) -> None:
self._vector_retriever = vector_retriever
self._postprocessor = postprocessor or CohereRerank(
top_n=5, api_key=os.getenv("COHERE_API_KEY")
)
super().__init__()
def _retrieve(self, query_bundle):
retrieved_nodes = self._vector_retriever.retrieve(query_bundle)
filtered_nodes = self._postprocessor.postprocess_nodes(
retrieved_nodes, query_bundle=query_bundle
)
return filtered_nodes
class CustomObjectRetriever(ObjectRetriever):
def __init__(self, retriever, object_node_mapping, all_tools, llm=None) -> None:
self._retriever = retriever
self._object_node_mapping = object_node_mapping
self._llm = llm or OpenAI(
"gpt-4-0613",
api_key=os.getenv("OPENAI_API_KEY"),
)
def retrieve(self, query_bundle):
nodes = self._retriever.retrieve(query_bundle)
tools = [self._object_node_mapping.from_node(n.node) for n in nodes]
sub_question_sc = ServiceContext.from_defaults(llm=self._llm)
sub_question_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=tools, service_context=sub_question_sc
)
sub_question_description = f"""\
Useful for any queries that involve comparing multiple documents. ALWAYS use this tool for comparison queries - make sure to call this \
tool with the original query. Do NOT use the other tools for any queries involving multiple documents.
"""
sub_question_tool = QueryEngineTool(
query_engine=sub_question_engine,
metadata=ToolMetadata(
name="compare_tool", description=sub_question_description
),
)
return tools + [sub_question_tool]
def download_website(
url: str,
corpus_name: str,
data_base_path: str = "./data",
) -> str:
domain = urlparse(url).netloc
corpus_path = os.path.join(data_base_path, corpus_name)
domain_path = os.path.join(corpus_path, domain)
if not os.path.exists(domain_path):
os.system(
f"wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent -P {corpus_path} {url}"
)
return domain_path
def load_documents_from_directory(
directory_path: str,
suffix_filter: str | None,
limit: int | None = None,
) -> list[Document]:
UnstructuredReader = download_loader("UnstructuredReader")
reader = UnstructuredReader()
all_files_gen = Path(directory_path).rglob("*")
all_files = [f.resolve() for f in all_files_gen]
if suffix_filter is not None:
all_files = [f for f in all_files if f.suffix.lower() == suffix_filter]
if limit is not None:
all_files = all_files[:limit]
docs = []
for idx, f in tqdm(enumerate(all_files), desc="Loading documents"):
loaded_docs = reader.load_data(file=f, split_documents=True)
loaded_doc = Document(
text="\n\n".join([d.get_content() for d in loaded_docs]),
metadata={"path": str(f)},
)
docs.append(loaded_doc)
return docs
def get_agents_base_path(
data_base_path: str,
corpus_name: str,
) -> str:
return os.path.join(data_base_path, corpus_name, "agents")
def get_agent_path(
data_base_path: str,
corpus_name: str,
file_base: str,
) -> str:
return os.path.join(get_agents_base_path(data_base_path, corpus_name), file_base)
@dataclass
class AgentPaths:
data_base_path: str
corpus_name: str
file_base: str
agent_path: str
vector_idx_path: str
summary_idx_path: str
summary_file_path: str
def get_agent_paths(
data_base_path: str,
corpus_name: str,
file_base: str,
) -> AgentPaths:
agent_path = get_agent_path(data_base_path, corpus_name, file_base)
vector_idx_path = os.path.join(agent_path, "vector_idx")
summary_idx_path = os.path.join(agent_path, "summary_idx")
summary_file_path = os.path.join(agent_path, "summary.pkl")
paths = AgentPaths(
data_base_path=data_base_path,
corpus_name=corpus_name,
file_base=file_base,
agent_path=agent_path,
vector_idx_path=vector_idx_path,
summary_idx_path=summary_idx_path,
summary_file_path=summary_file_path,
)
return paths
async def load_document_agent(
agent_paths: AgentPaths,
service_context: ServiceContext,
) -> tuple[OpenAIAgent, str]:
vector_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=agent_paths.vector_idx_path),
service_context=service_context,
)
summary_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=agent_paths.summary_idx_path),
service_context=service_context,
)
summary = pickle.load(open(agent_paths.summary_file_path, "rb"))
# Create query engines for the vector and summary indices
vector_query_engine = vector_index.as_query_engine()
summary_query_engine = summary_index.as_query_engine(response_mode="tree_summarize")
query_engine_tools = [
QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name=f"vector_tool_{agent_paths.file_base}",
description="Useful for questions related to specific facts",
),
),
QueryEngineTool(
query_engine=summary_query_engine,
metadata=ToolMetadata(
name=f"summary_tool_{agent_paths.file_base}",
description="Useful for summarization questions",
),
),
]
function_llm = OpenAI(
model="gpt-4",
api_key=os.getenv("OPENAI_API_KEY"),
)
agent = OpenAIAgent.from_tools(
query_engine_tools,
llm=function_llm,
verbose=True,
system_prompt=f"""\
You are a specialized agent designed to answer queries about the `{file_base}` part of the {corpus_name} docs.
You must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge.\
""",
)
return agent, summary
async def create_document_agent(
nodes: Sequence[BaseNode],
agent_paths: AgentPaths,
service_context: ServiceContext,
) -> tuple[OpenAIAgent, str]:
# Create the vector index
Path(agent_paths.vector_idx_path).mkdir(parents=True, exist_ok=True)
vector_index = VectorStoreIndex(nodes, service_context=service_context)
vector_index.storage_context.persist(persist_dir=agent_paths.vector_idx_path)
# Create the summary index
Path(agent_paths.summary_idx_path).mkdir(parents=True, exist_ok=True)
summary_index = SummaryIndex(nodes, service_context=service_context)
summary_index.storage_context.persist(persist_dir=agent_paths.summary_idx_path)
# Create the summary
summary_query_engine = summary_index.as_query_engine(response_mode="tree_summarize")
summary = str(
await summary_query_engine.aquery(
"Extract a concise 1-2 line summary of this document"
)
)
pickle.dump(summary, open(agent_paths.summary_file_path, "wb"))
return await load_document_agent(
agent_paths=agent_paths,
service_context=service_context,
)
async def create_document_agents(
docs: list[Document],
data_base_path: str,
corpus_name: str,
service_context: ServiceContext,
) -> tuple[dict, dict]:
node_parser = SentenceSplitter()
document_agents = {}
extra_info = {}
for doc in tqdm(docs, desc="Creating document agents"):
# ID will be base + parent
file_path = Path(doc.metadata["path"])
file_base = str(file_path.parent.stem) + "_" + str(file_path.stem)
file_base = file_base.replace(".", "_")
agent_paths = get_agent_paths(
data_base_path=data_base_path,
corpus_name=corpus_name,
file_base=file_base,
)
if os.path.exists(agent_paths.agent_path):
agent, summary = await load_document_agent(
agent_paths=agent_paths,
service_context=service_context,
)
else:
nodes = node_parser.get_nodes_from_documents([doc])
agent, summary = await create_document_agent(
nodes=nodes,
agent_paths=agent_paths,
service_context=service_context,
)
document_agents[file_base] = agent
extra_info[file_base] = {"summary": summary, "nodes": nodes}
return document_agents, extra_info
async def create_multidoc_agent(
url: str,
corpus_name: str,
) -> None:
data_base_path = os.getenv("DATA_BASE_PATH", "./data")
llm = OpenAI(
model_name="gpt-4-0613",
api_key=os.getenv("OPENAI_API_KEY"),
)
service_context = ServiceContext.from_defaults(llm=llm)
#
corpus_path = download_website(
url=url,
corpus_name=corpus_name,
data_base_path=data_base_path,
)
docs = load_documents_from_directory(
directory_path=corpus_path,
suffix_filter=".html",
# limit=10,
)
document_agents, extra_info = await create_document_agents(
docs=docs,
data_base_path=data_base_path,
corpus_name=corpus_name,
service_context=service_context,
)
all_tools = []
for file_base, agent in document_agents.items():
summary = extra_info[file_base]["summary"]
doc_tool = QueryEngineTool(
query_engine=agent,
metadata=ToolMetadata(
name=f"tool_{file_base}",
description=summary,
),
)
all_tools.append(doc_tool)
tool_mapping = SimpleToolNodeMapping.from_objects(all_tools)
obj_index = ObjectIndex.from_objects(
all_tools,
tool_mapping,
VectorStoreIndex,
)
vector_node_retriever = obj_index.as_node_retriever(similarity_top_k=10)
custom_node_retriever = CustomRetriever(vector_node_retriever)
custom_obj_retriever = CustomObjectRetriever(
custom_node_retriever,
tool_mapping,
all_tools,
llm=llm,
)
top_agent = FnRetrieverOpenAIAgent.from_retriever(
custom_obj_retriever,
system_prompt=""" \
You are an agent designed to answer queries about the documentation.
Please always use the tools provided to answer a question. Do not rely on prior knowledge.\
""",
llm=llm,
verbose=True,
)
return top_agent
#
# Store agents in a dictionary
agents = {}
# Load agents from the JSON file when the server starts
async def load_agents():
try:
data_base_path = os.getenv("DATA_BASE_PATH", "./data")
agent_registry_file = os.path.join(data_base_path, "agents.json")
with open(agent_registry_file, "r") as f:
agents_registry = json.load(f)
for corpus_name, url in agents_registry.items():
agents[corpus_name] = await create_multidoc_agent(url, corpus_name)
except FileNotFoundError:
pass # It's okay if the file doesn't exist
def save_agent(
corpus_name: str,
url: str,
):
data_base_path = os.getenv("DATA_BASE_PATH", "./data")
agent_registry = os.path.join(data_base_path, "agents.json")
try:
with open(agent_registry, "r") as f:
data = json.load(f)
except FileNotFoundError:
data = {}
data.update({corpus_name: url})
with open(agent_registry, "w") as f:
json.dump(data, f)
#
@asynccontextmanager
async def lifespan(app: FastAPI):
await load_agents() # Call the function when the server starts
yield
app = FastAPI(lifespan=lifespan)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Replace "*" with the appropriate origins
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
#
@app.post("/agent/")
async def create_agent(
url: str,
corpus_name: str,
) -> Response:
agent = await create_multidoc_agent(url, corpus_name)
agents[corpus_name] = agent
save_agent(corpus_name=corpus_name, url=url)
response = Response(status_code=201)
response.headers["Location"] = f"/agent/{corpus_name}"
return response
@app.post("/agent/{corpus_name}/query")
async def query_agent(
corpus_name: str,
query: str,
) -> str:
agent = agents.get(corpus_name)
if agent is None:
return Response(status_code=404)
response = await agent.aquery(query)
return response.response
#
# url = "https://docs.llamaindex.ai/en/latest/"
# corpus_name = "llamaindex_docs"
| [
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.StorageContext.from_defaults",
"llama_index.agent.FnRetrieverOpenAIAgent.from_retriever",
"llama_index.objects.ObjectIndex.from_objects",
"llama_index.objects.SimpleToolNodeMapping.from_objects",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_index.VectorStoreIndex",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.SummaryIndex",
"llama_index.node_parser.SentenceSplitter"
] | [((3825, 3838), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (3836, 3838), False, 'from dotenv import load_dotenv\n'), ((15763, 15789), 'fastapi.FastAPI', 'FastAPI', ([], {'lifespan': 'lifespan'}), '(lifespan=lifespan)\n', (15770, 15789), False, 'from fastapi import FastAPI, Response\n'), ((5877, 5918), 'os.path.join', 'os.path.join', (['data_base_path', 'corpus_name'], {}), '(data_base_path, corpus_name)\n', (5889, 5918), False, 'import os\n'), ((5937, 5970), 'os.path.join', 'os.path.join', (['corpus_path', 'domain'], {}), '(corpus_path, domain)\n', (5949, 5970), False, 'import os\n'), ((6426, 6463), 'llama_index.download_loader', 'download_loader', (['"""UnstructuredReader"""'], {}), "('UnstructuredReader')\n", (6441, 6463), False, 'from llama_index import download_loader\n'), ((7232, 7283), 'os.path.join', 'os.path.join', (['data_base_path', 'corpus_name', '"""agents"""'], {}), "(data_base_path, corpus_name, 'agents')\n", (7244, 7283), False, 'import os\n'), ((7863, 7901), 'os.path.join', 'os.path.join', (['agent_path', '"""vector_idx"""'], {}), "(agent_path, 'vector_idx')\n", (7875, 7901), False, 'import os\n'), ((7925, 7964), 'os.path.join', 'os.path.join', (['agent_path', '"""summary_idx"""'], {}), "(agent_path, 'summary_idx')\n", (7937, 7964), False, 'import os\n'), ((7989, 8028), 'os.path.join', 'os.path.join', (['agent_path', '"""summary.pkl"""'], {}), "(agent_path, 'summary.pkl')\n", (8001, 8028), False, 'import os\n'), ((9767, 10103), 'llama_index.agent.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', (['query_engine_tools'], {'llm': 'function_llm', 'verbose': '(True)', 'system_prompt': 'f"""\nYou are a specialized agent designed to answer queries about the `{file_base}` part of the {corpus_name} docs.\nYou must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge."""'}), '(query_engine_tools, llm=function_llm, verbose=True,\n system_prompt=\n f"""\nYou are a specialized agent designed to answer queries about the `{file_base}` part of the {corpus_name} docs.\nYou must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge."""\n )\n', (9789, 10103), False, 'from llama_index.agent import OpenAIAgent\n'), ((10443, 10499), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'service_context': 'service_context'}), '(nodes, service_context=service_context)\n', (10459, 10499), False, 'from llama_index import VectorStoreIndex\n'), ((10708, 10760), 'llama_index.SummaryIndex', 'SummaryIndex', (['nodes'], {'service_context': 'service_context'}), '(nodes, service_context=service_context)\n', (10720, 10760), False, 'from llama_index import VectorStoreIndex, SummaryIndex\n'), ((11479, 11497), 'llama_index.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (11495, 11497), False, 'from llama_index.node_parser import SentenceSplitter\n'), ((11560, 11603), 'tqdm.tqdm', 'tqdm', (['docs'], {'desc': '"""Creating document agents"""'}), "(docs, desc='Creating document agents')\n", (11564, 11603), False, 'from tqdm import tqdm\n'), ((12710, 12747), 'os.getenv', 'os.getenv', (['"""DATA_BASE_PATH"""', '"""./data"""'], {}), "('DATA_BASE_PATH', './data')\n", (12719, 12747), False, 'import os\n'), ((12873, 12910), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (12901, 12910), False, 'from llama_index import ServiceContext\n'), ((13775, 13820), 'llama_index.objects.SimpleToolNodeMapping.from_objects', 'SimpleToolNodeMapping.from_objects', (['all_tools'], {}), '(all_tools)\n', (13809, 13820), False, 'from llama_index.objects import ObjectIndex, SimpleToolNodeMapping, ObjectRetriever\n'), ((13837, 13904), 'llama_index.objects.ObjectIndex.from_objects', 'ObjectIndex.from_objects', (['all_tools', 'tool_mapping', 'VectorStoreIndex'], {}), '(all_tools, tool_mapping, VectorStoreIndex)\n', (13861, 13904), False, 'from llama_index.objects import ObjectIndex, SimpleToolNodeMapping, ObjectRetriever\n'), ((14245, 14544), 'llama_index.agent.FnRetrieverOpenAIAgent.from_retriever', 'FnRetrieverOpenAIAgent.from_retriever', (['custom_obj_retriever'], {'system_prompt': '""" You are an agent designed to answer queries about the documentation.\n Please always use the tools provided to answer a question. Do not rely on prior knowledge.\n """', 'llm': 'llm', 'verbose': '(True)'}), '(custom_obj_retriever, system_prompt=\n """ You are an agent designed to answer queries about the documentation.\n Please always use the tools provided to answer a question. Do not rely on prior knowledge.\n """\n , llm=llm, verbose=True)\n', (14282, 14544), False, 'from llama_index.agent import FnRetrieverOpenAIAgent, ReActAgent\n'), ((15275, 15312), 'os.getenv', 'os.getenv', (['"""DATA_BASE_PATH"""', '"""./data"""'], {}), "('DATA_BASE_PATH', './data')\n", (15284, 15312), False, 'import os\n'), ((15334, 15377), 'os.path.join', 'os.path.join', (['data_base_path', '"""agents.json"""'], {}), "(data_base_path, 'agents.json')\n", (15346, 15377), False, 'import os\n'), ((16234, 16259), 'fastapi.Response', 'Response', ([], {'status_code': '(201)'}), '(status_code=201)\n', (16242, 16259), False, 'from fastapi import FastAPI, Response\n'), ((4972, 5015), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'self._llm'}), '(llm=self._llm)\n', (5000, 5015), False, 'from llama_index import ServiceContext\n'), ((5046, 5145), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'tools', 'service_context': 'sub_question_sc'}), '(query_engine_tools=tools,\n service_context=sub_question_sc)\n', (5082, 5145), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((5838, 5851), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (5846, 5851), False, 'from urllib.parse import urlparse\n'), ((5982, 6009), 'os.path.exists', 'os.path.exists', (['domain_path'], {}), '(domain_path)\n', (5996, 6009), False, 'import os\n'), ((6019, 6221), 'os.system', 'os.system', (['f"""wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent -P {corpus_path} {url}"""'], {}), "(\n f'wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent -P {corpus_path} {url}'\n )\n", (6028, 6221), False, 'import os\n'), ((8518, 8587), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'agent_paths.vector_idx_path'}), '(persist_dir=agent_paths.vector_idx_path)\n', (8546, 8587), False, 'from llama_index import load_index_from_storage, StorageContext\n'), ((8690, 8760), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'agent_paths.summary_idx_path'}), '(persist_dir=agent_paths.summary_idx_path)\n', (8718, 8760), False, 'from llama_index import load_index_from_storage, StorageContext\n'), ((11660, 11686), 'pathlib.Path', 'Path', (["doc.metadata['path']"], {}), "(doc.metadata['path'])\n", (11664, 11686), False, 'from pathlib import Path\n'), ((11985, 12023), 'os.path.exists', 'os.path.exists', (['agent_paths.agent_path'], {}), '(agent_paths.agent_path)\n', (11999, 12023), False, 'import os\n'), ((14765, 14802), 'os.getenv', 'os.getenv', (['"""DATA_BASE_PATH"""', '"""./data"""'], {}), "('DATA_BASE_PATH', './data')\n", (14774, 14802), False, 'import os\n'), ((14833, 14876), 'os.path.join', 'os.path.join', (['data_base_path', '"""agents.json"""'], {}), "(data_base_path, 'agents.json')\n", (14845, 14876), False, 'import os\n'), ((15597, 15615), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (15606, 15615), False, 'import json\n'), ((16525, 16550), 'fastapi.Response', 'Response', ([], {'status_code': '(404)'}), '(status_code=404)\n', (16533, 16550), False, 'from fastapi import FastAPI, Response\n'), ((6518, 6538), 'pathlib.Path', 'Path', (['directory_path'], {}), '(directory_path)\n', (6522, 6538), False, 'from pathlib import Path\n'), ((9720, 9747), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (9729, 9747), False, 'import os\n'), ((10355, 10388), 'pathlib.Path', 'Path', (['agent_paths.vector_idx_path'], {}), '(agent_paths.vector_idx_path)\n', (10359, 10388), False, 'from pathlib import Path\n'), ((10618, 10652), 'pathlib.Path', 'Path', (['agent_paths.summary_idx_path'], {}), '(agent_paths.summary_idx_path)\n', (10622, 10652), False, 'from pathlib import Path\n'), ((12816, 12843), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (12825, 12843), False, 'import os\n'), ((14957, 14969), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14966, 14969), False, 'import json\n'), ((15451, 15463), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15460, 15463), False, 'import json\n'), ((5562, 5633), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""compare_tool"""', 'description': 'sub_question_description'}), "(name='compare_tool', description=sub_question_description)\n", (5574, 5633), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((9207, 9331), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': 'f"""vector_tool_{agent_paths.file_base}"""', 'description': '"""Useful for questions related to specific facts"""'}), "(name=f'vector_tool_{agent_paths.file_base}', description=\n 'Useful for questions related to specific facts')\n", (9219, 9331), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((9479, 9592), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': 'f"""summary_tool_{agent_paths.file_base}"""', 'description': '"""Useful for summarization questions"""'}), "(name=f'summary_tool_{agent_paths.file_base}', description=\n 'Useful for summarization questions')\n", (9491, 9592), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((13602, 13661), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': 'f"""tool_{file_base}"""', 'description': 'summary'}), "(name=f'tool_{file_base}', description=summary)\n", (13614, 13661), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((4092, 4119), 'os.getenv', 'os.getenv', (['"""COHERE_API_KEY"""'], {}), "('COHERE_API_KEY')\n", (4101, 4119), False, 'import os\n'), ((4735, 4762), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (4744, 4762), False, 'import os\n')] |
from typing import Dict, List
from pathlib import Path
from llama_index import download_loader
from llama_index import Document
# Add your OpenAI API Key here before running the script.
import os
if "OPENAI_API_KEY" not in os.environ:
raise RuntimeError("Please add the OPENAI_API_KEY environment variable to run this script. Run the following in your terminal `export OPENAI_API_KEY=...`")
# Step 1: Logic for loading and parsing the files into llama_index documents.
UnstructuredReader = download_loader("UnstructuredReader")
loader = UnstructuredReader()
def load_and_parse_files(file_row: Dict[str, Path]) -> List[Dict[str, Document]]:
documents = []
file = file_row["path"]
if file.is_dir():
return []
# Skip all non-html files like png, jpg, etc.
if file.suffix.lower() == ".html":
loaded_doc = loader.load_data(file=file, split_documents=False)
loaded_doc[0].extra_info = {"path": str(file)}
documents.extend(loaded_doc)
return [{"doc": doc} for doc in documents]
# Step 2: Convert the loaded documents into llama_index Nodes. This will split the documents into chunks.
from llama_index.node_parser import SimpleNodeParser
from llama_index.data_structs import Node
def convert_documents_into_nodes(documents: Dict[str, Document]) -> List[Dict[str, Node]]:
parser = SimpleNodeParser()
document = documents["doc"]
nodes = parser.get_nodes_from_documents([document])
return [{"node": node} for node in nodes]
# Step 3: Embed each node using a local embedding model.
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
class EmbedNodes:
def __init__(self):
self.embedding_model = HuggingFaceEmbeddings(
# Use all-mpnet-base-v2 Sentence_transformer.
# This is the default embedding model for LlamaIndex/Langchain.
model_name="sentence-transformers/all-mpnet-base-v2",
model_kwargs={"device": "cuda"},
# Use GPU for embedding and specify a large enough batch size to maximize GPU utilization.
# Remove the "device": "cuda" to use CPU instead.
encode_kwargs={"device": "cuda", "batch_size": 100}
)
def __call__(self, node_batch: Dict[str, List[Node]]) -> Dict[str, List[Node]]:
nodes = node_batch["node"]
text = [node.text for node in nodes]
embeddings = self.embedding_model.embed_documents(text)
assert len(nodes) == len(embeddings)
for node, embedding in zip(nodes, embeddings):
node.embedding = embedding
return {"embedded_nodes": nodes}
# Step 4: Stitch together all of the above into a Ray Data pipeline.
import ray
from ray.data import ActorPoolStrategy
# First, download the Ray documentation locally
# wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains docs.ray.io --no-parent https://docs.ray.io/en/master/
# Get the paths for the locally downloaded documentation.
all_docs_gen = Path("./docs.ray.io/").rglob("*")
all_docs = [{"path": doc.resolve()} for doc in all_docs_gen]
# Create the Ray Dataset pipeline
ds = ray.data.from_items(all_docs)
# Use `flat_map` since there is a 1:N relationship. Each filepath returns multiple documents.
loaded_docs = ds.flat_map(load_and_parse_files)
# Use `flat_map` since there is a 1:N relationship. Each document returns multiple nodes.
nodes = loaded_docs.flat_map(convert_documents_into_nodes)
# Use `map_batches` to specify a batch size to maximize GPU utilization.
# We define `EmbedNodes` as a class instead of a function so we only initialize the embedding model once.
# This state can be reused for multiple batches.
embedded_nodes = nodes.map_batches(
EmbedNodes,
batch_size=100,
# Use 1 GPU per actor.
num_gpus=1,
# There are 4 GPUs in the cluster. Each actor uses 1 GPU. So we want 4 total actors.
# Set the size of the ActorPool to the number of GPUs in the cluster.
compute=ActorPoolStrategy(size=4),
)
# Step 5: Trigger execution and collect all the embedded nodes.
ray_docs_nodes = []
for row in embedded_nodes.iter_rows():
node = row["embedded_nodes"]
assert node.embedding is not None
ray_docs_nodes.append(node)
# Step 6: Store the embedded nodes in a local vector store, and persist to disk.
print("Storing Ray Documentation embeddings in vector index.")
from llama_index import GPTVectorStoreIndex
ray_docs_index = GPTVectorStoreIndex(nodes=ray_docs_nodes)
ray_docs_index.storage_context.persist(persist_dir="/tmp/ray_docs_index")
# Repeat the same steps for the Anyscale blogs
# Download the Anyscale blogs locally
# wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains anyscale.com --no-parent https://www.anyscale.com/blog
all_blogs_gen = Path("./www.anyscale.com/blog/").rglob("*")
all_blogs = [{"path": blog.resolve()} for blog in all_blogs_gen]
ds = ray.data.from_items(all_blogs)
loaded_docs = ds.flat_map(load_and_parse_files)
nodes = loaded_docs.flat_map(convert_documents_into_nodes)
embedded_nodes = nodes.map_batches(
EmbedNodes,
batch_size=100,
compute=ActorPoolStrategy(size=4),
num_gpus=1)
ray_blogs_nodes = []
for row in embedded_nodes.iter_rows():
node = row["embedded_nodes"]
assert node.embedding is not None
ray_blogs_nodes.append(node)
print("Storing Anyscale blog post embeddings in vector index.")
ray_blogs_index = GPTVectorStoreIndex(nodes=ray_blogs_nodes)
ray_blogs_index.storage_context.persist(persist_dir="/tmp/ray_blogs_index") | [
"llama_index.GPTVectorStoreIndex",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.download_loader"
] | [((497, 534), 'llama_index.download_loader', 'download_loader', (['"""UnstructuredReader"""'], {}), "('UnstructuredReader')\n", (512, 534), False, 'from llama_index import download_loader\n'), ((3192, 3221), 'ray.data.from_items', 'ray.data.from_items', (['all_docs'], {}), '(all_docs)\n', (3211, 3221), False, 'import ray\n'), ((4503, 4544), 'llama_index.GPTVectorStoreIndex', 'GPTVectorStoreIndex', ([], {'nodes': 'ray_docs_nodes'}), '(nodes=ray_docs_nodes)\n', (4522, 4544), False, 'from llama_index import GPTVectorStoreIndex\n'), ((5028, 5058), 'ray.data.from_items', 'ray.data.from_items', (['all_blogs'], {}), '(all_blogs)\n', (5047, 5058), False, 'import ray\n'), ((5544, 5586), 'llama_index.GPTVectorStoreIndex', 'GPTVectorStoreIndex', ([], {'nodes': 'ray_blogs_nodes'}), '(nodes=ray_blogs_nodes)\n', (5563, 5586), False, 'from llama_index import GPTVectorStoreIndex\n'), ((1343, 1361), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (1359, 1361), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1696, 1865), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-mpnet-base-v2"""', 'model_kwargs': "{'device': 'cuda'}", 'encode_kwargs': "{'device': 'cuda', 'batch_size': 100}"}), "(model_name='sentence-transformers/all-mpnet-base-v2',\n model_kwargs={'device': 'cuda'}, encode_kwargs={'device': 'cuda',\n 'batch_size': 100})\n", (1717, 1865), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((3057, 3079), 'pathlib.Path', 'Path', (['"""./docs.ray.io/"""'], {}), "('./docs.ray.io/')\n", (3061, 3079), False, 'from pathlib import Path\n'), ((4036, 4061), 'ray.data.ActorPoolStrategy', 'ActorPoolStrategy', ([], {'size': '(4)'}), '(size=4)\n', (4053, 4061), False, 'from ray.data import ActorPoolStrategy\n'), ((4913, 4945), 'pathlib.Path', 'Path', (['"""./www.anyscale.com/blog/"""'], {}), "('./www.anyscale.com/blog/')\n", (4917, 4945), False, 'from pathlib import Path\n'), ((5252, 5277), 'ray.data.ActorPoolStrategy', 'ActorPoolStrategy', ([], {'size': '(4)'}), '(size=4)\n', (5269, 5277), False, 'from ray.data import ActorPoolStrategy\n')] |
import os
import sys
import openai
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, load_index_from_storage
from llama_index.vector_stores import ChromaVectorStore, FaissVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index.embeddings import HuggingFaceEmbedding
import chromadb
sys.path.append(os.getcwd())
from constant import VECTORINDEX_PATH, VECTORINDEX_GPT_PATH
def vector_retriever(similarity_top_k:int=5, gpt:bool=True):
if gpt==True:
vector_path = VECTORINDEX_GPT_PATH
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_base = os.getenv('OPENAI_ENDPOINT')
service_context = ServiceContext.from_defaults(llm=None)
else:
vector_path = VECTORINDEX_PATH
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-zh-v1.5")
service_context = ServiceContext.from_defaults(embed_model=embed_model,llm=None)
db = chromadb.PersistentClient(path=vector_path)
chroma_collection = db.get_or_create_collection("quickstart")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index_rebuild = VectorStoreIndex.from_vector_store(vector_store,service_context=service_context)
retriever = index_rebuild.as_retriever(similarity_top_k=similarity_top_k)
return retriever
if __name__ == "__main__" :
retriever = vector_retriever(5)
result = retriever.retrieve("如何加速pod启动")
print(result)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.vector_stores.ChromaVectorStore"
] | [((367, 378), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (376, 378), False, 'import os\n'), ((962, 1005), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'vector_path'}), '(path=vector_path)\n', (987, 1005), False, 'import chromadb\n'), ((1091, 1145), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (1108, 1145), False, 'from llama_index.vector_stores import ChromaVectorStore, FaissVectorStore\n'), ((1166, 1252), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (1200, 1252), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, load_index_from_storage\n'), ((588, 615), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (597, 615), False, 'import os\n'), ((642, 670), 'os.getenv', 'os.getenv', (['"""OPENAI_ENDPOINT"""'], {}), "('OPENAI_ENDPOINT')\n", (651, 670), False, 'import os\n'), ((697, 735), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None'}), '(llm=None)\n', (725, 735), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, load_index_from_storage\n'), ((807, 863), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-base-zh-v1.5"""'}), "(model_name='BAAI/bge-base-zh-v1.5')\n", (827, 863), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((890, 953), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'None'}), '(embed_model=embed_model, llm=None)\n', (918, 953), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, load_index_from_storage\n')] |
import os
from dotenv import load_dotenv
load_dotenv()
openai_key = os.getenv("OPENAI_API_KEY")
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
ServiceContext,
set_global_service_context
)
from llama_index.llms import OpenAI
from llama_index.prompts import PromptTemplate
llm = OpenAI(
system_prompt="Always respond in croatian language"
)
service_context = ServiceContext.from_defaults(
llm=llm
)
set_global_service_context(service_context)
documents = SimpleDirectoryReader(
input_files=["./whitepapers/bitcoin.pdf"]
).load_data()
index = VectorStoreIndex.from_documents(
documents
)
# za pronalaženje relevantnih čvorova i dobivanje točnih odgovora potrebno je
# imati dobar upit (eng. prompt), idealna situacija jest kada se llm-u daju
# točni koraci koje treba izvršiti i to se radi sa prilagođenim upitima
# prema default postavkama query_engine koristi "text_qa_template" za svaki upit, a
# ako je dohvaćeni kontekst predugačak za samo jedan llm poziv, onda se koristi i
# "refine_template"
query_engine = index.as_query_engine()
prompts_dict = query_engine.get_prompts()
for k, v in prompts_dict.items():
print("Prompt key -> ", k)
print(v.get_template())
print(f"\n\n")
# # za mijenjanje šablone upita uvijek je potrebno imati varijable "context_str" za dohvaćeni kontekst i
# # "query_str" za upit
# custom_qa_prompt = PromptTemplate(
# "Context information is below.\n"
# "---------------------\n"
# "{context_str}\n"
# "---------------------\n"
# "Given the context information and not prior knowledge, "
# "answer the query in the style of a Shakespeare play.\n"
# "Query: {query_str}\n"
# "Answer: "
# )
# query_engine = index.as_query_engine(
# text_qa_template=custom_qa_prompt
# )
# prompts_dict = query_engine.get_prompts()
# for k, v in prompts_dict.items():
# print("Prompt key -> ", k)
# print(v.get_template())
# print(f"\n\n")
# # ponekad je potrebno dinamički mijenjati upit
# qa_prompt_tmpl_str = """\
# Context information is below.
# ---------------------
# {context_str}
# ---------------------
# Given the context information and not prior knowledge, answer the query.
# Please write the answer in the style of {tone_name}
# Query: {query_str}
# Answer: \
# """
# prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
# partial_prompt_tmpl = prompt_tmpl.partial_format(tone_name="Shakespeare")
# query_engine = index.as_query_engine(
# text_qa_template=partial_prompt_tmpl
# )
# # prompts_dict = query_engine.get_prompts()
# # for k, v in prompts_dict.items():
# # print("Prompt key -> ", k)
# # print(v.get_template())
# # print(f"\n\n")
# response = query_engine.query("o cemu se radi")
# print(response)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.set_global_service_context"
] | [((42, 55), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (53, 55), False, 'from dotenv import load_dotenv\n'), ((69, 96), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (78, 96), False, 'import os\n'), ((316, 375), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'system_prompt': '"""Always respond in croatian language"""'}), "(system_prompt='Always respond in croatian language')\n", (322, 375), False, 'from llama_index.llms import OpenAI\n'), ((398, 435), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (426, 435), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, set_global_service_context\n'), ((442, 485), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (468, 485), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, set_global_service_context\n'), ((591, 633), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (622, 633), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, set_global_service_context\n'), ((499, 563), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['./whitepapers/bitcoin.pdf']"}), "(input_files=['./whitepapers/bitcoin.pdf'])\n", (520, 563), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, set_global_service_context\n')] |
import os
from llama_index import StorageContext, load_index_from_storage
from dotenv import load_dotenv
from llama_index import VectorStoreIndex, SimpleDirectoryReader
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
LIBRARY_DIRECTORY = os.getenv('LIBRARY_DIRECTORY')
documents = SimpleDirectoryReader(LIBRARY_DIRECTORY).load_data()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist()
# rebuild storage context
# storage_context = StorageContext.from_defaults(persist_dir="./storage")
# load index
# index = load_index_from_storage(storage_context)
# query_engine = index.as_query_engine()
# query_engine.query("YOUR_QUESTION")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader"
] | [((170, 183), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (181, 183), False, 'from dotenv import load_dotenv\n'), ((217, 244), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (226, 244), False, 'import os\n'), ((265, 295), 'os.getenv', 'os.getenv', (['"""LIBRARY_DIRECTORY"""'], {}), "('LIBRARY_DIRECTORY')\n", (274, 295), False, 'import os\n'), ((371, 413), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (402, 413), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((310, 350), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['LIBRARY_DIRECTORY'], {}), '(LIBRARY_DIRECTORY)\n', (331, 350), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n')] |
import os
import json
from llmsherpa.readers import LayoutPDFReader
from llmsherpa.readers.layout_reader import LayoutReader
from llama_index.core import Document
from pinecone import Pinecone, ServerlessSpec
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.node_parser import HierarchicalNodeParser, SentenceSplitter
from llama_index.core import StorageContext
from langchain_openai import ChatOpenAI
from spacy.tokens import Doc
from spacy_llm.registry import registry
from spacy_llm.util import assemble
import logging
import spacy_llm
spacy_llm.logger.addHandler(logging.StreamHandler())
spacy_llm.logger.setLevel(logging.DEBUG)
nlp = assemble("config.cfg")
api_key = os.environ.get("PINECONE_API_KEY","")
pc = Pinecone(api_key=api_key)
pinecone_index = pc.Index("replace_with_index")
model_name = "voyage-02"
voyage_api_key = os.environ.get("VOYAGE_API_KEY", "")
embed_model = HuggingFaceEmbedding(model_name="Drewskidang/ANTI_BERT")
def get_extra_info():
print("\nPlease enter the following information for each document:")
print("Book/Case information, Relevant Rules, Case Name, Related Cases")
law_subject_area = input("Law subject area: ")
relevant_rules = input("Relevant Rules: ")
case_name = input("Case Name: ")
related_cases = input("Related Cases: ")
return {
"law_subject_area": law_subject_area,
"relevant_rules": relevant_rules,
"extra_data": case_name,
"related_cases": related_cases
}
inference_server_url="http://localhost:8000/v1"
llm=ChatOpenAI(
openai_api_key="EMPTY",
openai_api_base=inference_server_url,
model_name="Drewskidang/Marlin-AWQ",
max_tokens=512
)
Settings.llm=llm
Settings.embed_model=embed_model
def process_pdfs(pdf_directory):
parser_api_url = "http://localhost:5010/api/parseDocument?renderFormat=all&applyOcr=yes"
pdf_reader = LayoutPDFReader(parser_api_url)
data = []
for filename in os.listdir(pdf_directory):
if filename.endswith(".pdf"):
pdf_path = os.path.join(pdf_directory, filename)
print(f"\nProcessing document: {filename}") # Display the name of the document
docs = pdf_reader.read_pdf(pdf_path) # Move the call to get_extra_info() here, so it's called once per document
extra_info = get_extra_info() # Get extra info from the user for each document
for chunk in docs.chunks():
chunk_text = chunk.to_context_text()
docs= nlp(chunk_text)
document = Document(
text=chunk.to_text(include_children=True, recurse=True),
extra_info=extra_info,
docs=docs # Use the same extra_info for all paragraphs of the document
)
data.append(document)
return data
def convert_nodes(data):
name_space = 'criminal'
vector_store = PineconeVectorStore(pinecone_index=pinecone_index, name_space=name_space)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
data, storage_context=storage_context
)
pdf_directory = "data" # Replace with your actual directory path
processed_data = process_pdfs(pdf_directory) # Call the function once and store its result
convert_nodes(processed_data) # Pass the result to the second function
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.vector_stores.pinecone.PineconeVectorStore",
"llama_index.core.StorageContext.from_defaults"
] | [((808, 848), 'spacy_llm.logger.setLevel', 'spacy_llm.logger.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (833, 848), False, 'import spacy_llm\n'), ((855, 877), 'spacy_llm.util.assemble', 'assemble', (['"""config.cfg"""'], {}), "('config.cfg')\n", (863, 877), False, 'from spacy_llm.util import assemble\n'), ((889, 927), 'os.environ.get', 'os.environ.get', (['"""PINECONE_API_KEY"""', '""""""'], {}), "('PINECONE_API_KEY', '')\n", (903, 927), False, 'import os\n'), ((932, 957), 'pinecone.Pinecone', 'Pinecone', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (940, 957), False, 'from pinecone import Pinecone, ServerlessSpec\n'), ((1048, 1084), 'os.environ.get', 'os.environ.get', (['"""VOYAGE_API_KEY"""', '""""""'], {}), "('VOYAGE_API_KEY', '')\n", (1062, 1084), False, 'import os\n'), ((1100, 1156), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""Drewskidang/ANTI_BERT"""'}), "(model_name='Drewskidang/ANTI_BERT')\n", (1120, 1156), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((1747, 1876), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': '"""EMPTY"""', 'openai_api_base': 'inference_server_url', 'model_name': '"""Drewskidang/Marlin-AWQ"""', 'max_tokens': '(512)'}), "(openai_api_key='EMPTY', openai_api_base=inference_server_url,\n model_name='Drewskidang/Marlin-AWQ', max_tokens=512)\n", (1757, 1876), False, 'from langchain_openai import ChatOpenAI\n'), ((783, 806), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (804, 806), False, 'import logging\n'), ((2110, 2141), 'llmsherpa.readers.LayoutPDFReader', 'LayoutPDFReader', (['parser_api_url'], {}), '(parser_api_url)\n', (2125, 2141), False, 'from llmsherpa.readers import LayoutPDFReader\n'), ((2177, 2202), 'os.listdir', 'os.listdir', (['pdf_directory'], {}), '(pdf_directory)\n', (2187, 2202), False, 'import os\n'), ((3190, 3263), 'llama_index.vector_stores.pinecone.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pinecone_index', 'name_space': 'name_space'}), '(pinecone_index=pinecone_index, name_space=name_space)\n', (3209, 3263), False, 'from llama_index.vector_stores.pinecone import PineconeVectorStore\n'), ((3286, 3341), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (3314, 3341), False, 'from llama_index.core import StorageContext\n'), ((3354, 3424), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['data'], {'storage_context': 'storage_context'}), '(data, storage_context=storage_context)\n', (3385, 3424), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n'), ((2265, 2302), 'os.path.join', 'os.path.join', (['pdf_directory', 'filename'], {}), '(pdf_directory, filename)\n', (2277, 2302), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import os.path
import re
import sys
import gin
import shutil
import logging
import tempfile
import requests
import subprocess
from pathlib import Path
from urllib.parse import urlparse
from llama_index import ServiceContext, StorageContext
from llama_index import VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex
from llama_index import set_global_service_context
from processors.markdown import MarkdownReader
from processors.embedding import get_embedding
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
sdr_exclude = [
"*.rst", "*.ipynb", "*.py", "*.bat", "*.txt", "*.png", "*.jpg", "*.jpeg",
"*.csv", "*.html", "*.js", "*.css", "*.pdf", "*.json"
]
re_github = r"https://(?P<token>.*?)github\.com/(?P<org>[^/]+)/(?P<repo>[^/\s]+)/?(?P<type>(tree|blob)/?(?P<version>[^/\s]+)/?(?P<path>.+)?)?"
def file_reader(file_path: str):
return MarkdownReader().load_data(Path(file_path))
def dir_reader(dir_path: str):
return SimpleDirectoryReader(
input_dir=dir_path,
exclude=sdr_exclude,
file_extractor={
".md": MarkdownReader()
},
recursive=True,
).load_data()
def url_reader(url: str):
logging.info(f"{url} start")
resp = requests.get(url, timeout=300)
if "text/" in resp.headers.get('content-type', ""):
f = tempfile.NamedTemporaryFile(suffix=".md", delete=False)
f.write(resp.content)
f.close()
docs = file_reader(f.name)
shutil.rmtree(f.name, ignore_errors=True)
return docs
return []
def github_reader(urlParse: re.Match):
urlReGroups = urlParse.groups()
token = urlReGroups[0]
org = urlReGroups[1]
repo = urlReGroups[2]
version = urlReGroups[4] # None|tree|blob
branch = urlReGroups[5] # tag_name|branch_name|commit_id
# version == tree, path is dir; version == blob, path is file
sub_path = "" if urlReGroups[6] == None else urlReGroups[6]
if version == "blob":
url = f'https://{token}raw.githubusercontent.com/{org}/{repo}/{branch}/{sub_path}'
return url_reader(url)
if version not in [None, "tree"]:
return []
url = f'https://{token}github.com/{org}/{repo}'
if branch:
args = ["git", "clone", "--depth", "1", "--branch", branch, url, "."]
else:
args = ["git", "clone", "--depth", "1", url, "."]
del_not_md = '''find . -type f ! -name "*.md" | xargs rm -rf'''
logging.info(f"{args} start")
with tempfile.TemporaryDirectory() as tmpdirname:
subprocess.run(args, check=True, timeout=300, cwd=tmpdirname)
subprocess.run(del_not_md, shell=True, timeout=300, cwd=tmpdirname)
logging.info(f"{args} ended")
docs = dir_reader(os.path.join(tmpdirname, sub_path))
return docs
map_func = {
"dir": dir_reader,
"file": file_reader,
"github": github_reader,
"url": url_reader,
}
# python rag-index index_persist_path collection_path...
# collection_path
# data/
# /data
# https://abc.com/xyz.md
# https://<token>@github.com/<org>/<repo>
# https://<token>@github.com/<org>/<repo>/tree/<tag_name|branch_name>/<sub_dir>
# https://<token>@github.com/<org>/<repo>/blob/<tag_name|branch_name|commit_id>/<sub_dir>/<file_name>.md
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.exit(0)
# We assume that there output directory is the first argument, and the rest is input directory
output = sys.argv[1]
gin.parse_config_file('index.gin')
# init download hugging fact model
service_context = ServiceContext.from_defaults(
llm=None,
llm_predictor=None,
embed_model=get_embedding(),
)
storage_context = StorageContext.from_defaults()
set_global_service_context(service_context)
documents = []
for file_path in sys.argv[2:]:
if os.path.isfile(file_path) and file_path.endswith(".md"):
print(map_func["file"])
documents.extend(map_func["file"](file_path))
elif os.path.isdir(file_path):
documents.extend(map_func["dir"](file_path))
else:
match_github = re.search(re_github, file_path)
if match_github:
documents.extend(map_func["github"](match_github))
continue
match_url = urlparse(file_path)
if match_url.scheme and match_url.netloc:
documents.extend(map_func["url"](file_path))
continue
# exclude these things from considerations.
for doc in documents:
doc.excluded_llm_metadata_keys = ["file_name", "content_type"]
doc.excluded_embed_metadata_keys = ["file_name", "content_type"]
try:
embedding_index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context)
keyword_index = SimpleKeywordTableIndex(
documents, storage_context=storage_context)
embedding_index.set_index_id("embedding")
embedding_index.storage_context.persist(persist_dir=output)
keyword_index.set_index_id("keyword")
keyword_index.storage_context.persist(persist_dir=output)
except Exception as e:
print(str(e))
shutil.rmtree(output, ignore_errors=True)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.set_global_service_context",
"llama_index.StorageContext.from_defaults",
"llama_index.SimpleKeywordTableIndex"
] | [((535, 594), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (554, 594), False, 'import logging\n'), ((626, 666), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (647, 666), False, 'import logging\n'), ((1327, 1355), 'logging.info', 'logging.info', (['f"""{url} start"""'], {}), "(f'{url} start')\n", (1339, 1355), False, 'import logging\n'), ((1367, 1397), 'requests.get', 'requests.get', (['url'], {'timeout': '(300)'}), '(url, timeout=300)\n', (1379, 1397), False, 'import requests\n'), ((2577, 2606), 'logging.info', 'logging.info', (['f"""{args} start"""'], {}), "(f'{args} start')\n", (2589, 2606), False, 'import logging\n'), ((3614, 3648), 'gin.parse_config_file', 'gin.parse_config_file', (['"""index.gin"""'], {}), "('index.gin')\n", (3635, 3648), False, 'import gin\n'), ((3853, 3883), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (3881, 3883), False, 'from llama_index import ServiceContext, StorageContext\n'), ((3889, 3932), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (3915, 3932), False, 'from llama_index import set_global_service_context\n'), ((595, 614), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (612, 614), False, 'import logging\n'), ((1040, 1055), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (1044, 1055), False, 'from pathlib import Path\n'), ((1466, 1521), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".md"""', 'delete': '(False)'}), "(suffix='.md', delete=False)\n", (1493, 1521), False, 'import tempfile\n'), ((1613, 1654), 'shutil.rmtree', 'shutil.rmtree', (['f.name'], {'ignore_errors': '(True)'}), '(f.name, ignore_errors=True)\n', (1626, 1654), False, 'import shutil\n'), ((2616, 2645), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2643, 2645), False, 'import tempfile\n'), ((2669, 2730), 'subprocess.run', 'subprocess.run', (['args'], {'check': '(True)', 'timeout': '(300)', 'cwd': 'tmpdirname'}), '(args, check=True, timeout=300, cwd=tmpdirname)\n', (2683, 2730), False, 'import subprocess\n'), ((2739, 2806), 'subprocess.run', 'subprocess.run', (['del_not_md'], {'shell': '(True)', 'timeout': '(300)', 'cwd': 'tmpdirname'}), '(del_not_md, shell=True, timeout=300, cwd=tmpdirname)\n', (2753, 2806), False, 'import subprocess\n'), ((2815, 2844), 'logging.info', 'logging.info', (['f"""{args} ended"""'], {}), "(f'{args} ended')\n", (2827, 2844), False, 'import logging\n'), ((3473, 3484), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3481, 3484), False, 'import sys\n'), ((4880, 4955), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (4911, 4955), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex\n'), ((4993, 5060), 'llama_index.SimpleKeywordTableIndex', 'SimpleKeywordTableIndex', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (5016, 5060), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex\n'), ((1013, 1029), 'processors.markdown.MarkdownReader', 'MarkdownReader', ([], {}), '()\n', (1027, 1029), False, 'from processors.markdown import MarkdownReader\n'), ((2871, 2905), 'os.path.join', 'os.path.join', (['tmpdirname', 'sub_path'], {}), '(tmpdirname, sub_path)\n', (2883, 2905), False, 'import os\n'), ((3807, 3822), 'processors.embedding.get_embedding', 'get_embedding', ([], {}), '()\n', (3820, 3822), False, 'from processors.embedding import get_embedding\n'), ((3999, 4024), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (4013, 4024), False, 'import os\n'), ((4163, 4187), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (4176, 4187), False, 'import os\n'), ((5362, 5403), 'shutil.rmtree', 'shutil.rmtree', (['output'], {'ignore_errors': '(True)'}), '(output, ignore_errors=True)\n', (5375, 5403), False, 'import shutil\n'), ((4287, 4318), 're.search', 're.search', (['re_github', 'file_path'], {}), '(re_github, file_path)\n', (4296, 4318), False, 'import re\n'), ((4465, 4484), 'urllib.parse.urlparse', 'urlparse', (['file_path'], {}), '(file_path)\n', (4473, 4484), False, 'from urllib.parse import urlparse\n'), ((1225, 1241), 'processors.markdown.MarkdownReader', 'MarkdownReader', ([], {}), '()\n', (1239, 1241), False, 'from processors.markdown import MarkdownReader\n')] |
from llama_index.core.node_parser import HierarchicalNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/sample_document1.txt"))
hierarchical_parser = HierarchicalNodeParser.from_defaults(
chunk_sizes=[128, 64, 32],
chunk_overlap=0,
)
nodes = hierarchical_parser.get_nodes_from_documents(document)
for node in nodes:
print(f"Metadata: {node.metadata} \nText: {node.text}")
| [
"llama_index.readers.file.FlatReader",
"llama_index.core.node_parser.HierarchicalNodeParser.from_defaults"
] | [((147, 159), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (157, 159), False, 'from llama_index.readers.file import FlatReader\n'), ((247, 332), 'llama_index.core.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': '[128, 64, 32]', 'chunk_overlap': '(0)'}), '(chunk_sizes=[128, 64, 32], chunk_overlap=0\n )\n', (283, 332), False, 'from llama_index.core.node_parser import HierarchicalNodeParser\n'), ((188, 222), 'pathlib.Path', 'Path', (['"""files/sample_document1.txt"""'], {}), "('files/sample_document1.txt')\n", (192, 222), False, 'from pathlib import Path\n')] |
from typing_extensions import override
import os
from llama_index.indices.query.query_transform.base import BaseQueryTransform
from llama_index.llms import ChatMessage, MessageRole
from llama_index.llms.base import BaseLLM
from llama_index.llms.llm import MessagesToPromptType
from llama_index.postprocessor.types import BaseNodePostprocessor
from llama_index.prompts import PromptTemplate
from llama_index.query_engine.custom import CustomQueryEngine, STR_OR_RESPONSE_TYPE
from llama_index.retrievers import BaseRetriever
from llama_index.schema import BaseNode, NodeWithScore, QueryBundle
from pydantic import Field
import llama_index.node_parser.text.sentence_window as sentence_window
class QueryEngine(CustomQueryEngine):
"""
A retrieval-augmented query engine.
"""
query_transform: BaseQueryTransform
context_entry_template: str = "{content}"
"""
Template parameters:
- `source`: the source name (the file name without extension)
- `content`: the relevant source fragment
"""
augmented_query_template1: PromptTemplate
"""
Template parameters:
- `context`: the combined context entries
- `query`: the original query
"""
# TODO: Consider supporting multiple refinement iterations.
augmented_query_template2: PromptTemplate
"""
A refinement template. Parameters:
- `context`: the combined context entries
- `response`: the initial response
- `query`: the original query
"""
messages_to_prompt: MessagesToPromptType = Field(exclude=True)
retriever: BaseRetriever
reranker: BaseNodePostprocessor | None
llm: BaseLLM
messages: list[ChatMessage] = []
@override
def custom_query(self, query: str) -> STR_OR_RESPONSE_TYPE:
query_bundle1 = self.query_transform.run(query)
context_nodes = self.retriever.retrieve(query_bundle1)
if self.reranker:
context_nodes = self.reranker.postprocess_nodes(nodes=context_nodes, query_bundle=query_bundle1)
context = self._format_context_nodes(context_nodes)
augmented_query = self.augmented_query_template1.format(context=context, query=query)
prompt = self.messages_to_prompt(
self.messages + [ChatMessage(role=MessageRole.USER, content=augmented_query)]
)
response = str(self.llm.complete(prompt)).strip()
# Refining the response
query_bundle2 = QueryBundle(
query_str=query,
custom_embedding_strs=(query_bundle1.custom_embedding_strs or []) + split_expert_group_response(response),
)
context_nodes = self.retriever.retrieve(query_bundle2)
if self.reranker:
context_nodes = self.reranker.postprocess_nodes(nodes=context_nodes, query_bundle=query_bundle2)
context = self._format_context_nodes(context_nodes)
augmented_query = self.augmented_query_template2.format(context=context, response=response, query=query)
prompt = self.messages_to_prompt(
self.messages + [ChatMessage(role=MessageRole.USER, content=augmented_query)]
)
response = str(self.llm.complete(prompt)).strip()
return response
def _format_context_nodes(self, nodes: list[NodeWithScore]) -> str:
# Put the most relevant entries in the end (of the prompt), where they may have more impact on the generation.
return "\n\n".join([self._format_context_node(node_with_score.node) for node_with_score in reversed(nodes)])
def _format_context_node(self, node: BaseNode):
source_node = node.source_node or node
title = source_node.metadata.get("title") or self._file_name_without_ext(source_node.metadata.get("file_name")) or "Unknown"
return self.context_entry_template.format(
source=title,
content= node.metadata.get(sentence_window.DEFAULT_WINDOW_METADATA_KEY) or node.get_content()
)
def _file_name_without_ext(self, path: str | None) -> str | None:
if path is not None:
file_name = os.path.basename(path)
return os.path.splitext(file_name)[0]
else:
return None
import re
EXPERT_MARKER = re.compile(r"^.*?Expert[^:]*:[\s_*]*|\n\n", flags=re.MULTILINE | re.IGNORECASE)
def split_expert_group_response(response: str) -> list[str]:
return [s for s in [s.strip() for s in EXPERT_MARKER.split(response)] if s]
| [
"llama_index.llms.ChatMessage"
] | [((3960, 4046), 're.compile', 're.compile', (['"""^.*?Expert[^:]*:[\\\\s_*]*|\\\\n\\\\n"""'], {'flags': '(re.MULTILINE | re.IGNORECASE)'}), "('^.*?Expert[^:]*:[\\\\s_*]*|\\\\n\\\\n', flags=re.MULTILINE | re.\n IGNORECASE)\n", (3970, 4046), False, 'import re\n'), ((1480, 1499), 'pydantic.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (1485, 1499), False, 'from pydantic import Field\n'), ((3837, 3859), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3853, 3859), False, 'import os\n'), ((3873, 3900), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (3889, 3900), False, 'import os\n'), ((2143, 2202), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'augmented_query'}), '(role=MessageRole.USER, content=augmented_query)\n', (2154, 2202), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((2884, 2943), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'augmented_query'}), '(role=MessageRole.USER, content=augmented_query)\n', (2895, 2943), False, 'from llama_index.llms import ChatMessage, MessageRole\n')] |
import logging
from typing import Any, Literal
from llama_index.bridge.pydantic import Field, PrivateAttr
from llama_index.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
Embedding,
)
from llama_index.embeddings.multi_modal_base import MultiModalEmbedding
from llama_index.schema import ImageType
logger = logging.getLogger(__name__)
AVAILABLE_CLIP_CPP_MODELS = (
"CLIP-ViT-B-32-laion2B-s34B-b79K",
"CLIP-ViT-H-14-laion2B-s32B-b79K",
"CLIP-ViT-L-14-laion2B-s32B-b82K",
"clip-vit-base-patch32",
"clip-vit-large-patch14",
)
DEFAULT_CLIP_CPP_MODEL = "CLIP-ViT-B-32-laion2B-s34B-b79K"
class ClipCppEmbedding(MultiModalEmbedding):
"""CLIP embedding models for encoding text and image for Multi-Modal purpose.
This class provides an interface to generate embeddings using a model
deployed in clip_cpp. At the initialization it requires a model name
of clip_cpp.
Note:
Requires `clip_cpp` package to be available in the PYTHONPATH. It can be installed with
`pip install clip_cpp`.
"""
embed_batch_size: int = Field(default=DEFAULT_EMBED_BATCH_SIZE, gt=0)
_model: Any = PrivateAttr()
@classmethod
def class_name(cls) -> str:
return "ClipCppEmbedding"
def __init__(
self,
*,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
model_name: str = DEFAULT_CLIP_CPP_MODEL,
float_type: Literal["fp32", "fp16"] = "fp16",
verbosity: int = 0,
**kwargs,
):
"""Initializes the ClipCppEmbedding class.
During the initialization the `clip_cpp` package is imported.
Args:
embed_batch_size (int, optional): The batch size for embedding generation. Defaults to 10,
must be > 0 and <= 100.
model_name (str): The model name of Clip model.
Raises:
ImportError: If the `clip_cpp` package is not available in the PYTHONPATH.
ValueError: If the model cannot be fetched from huggingface. or if the embed_batch_size
is not in the range (0, 100].
"""
if embed_batch_size <= 0:
raise ValueError(f"Embed batch size {embed_batch_size} must be > 0.")
repo_id = f"mys/ggml_{model_name}"
model_file = f"{model_name}_ggml-model-{float_type}.gguf"
try:
from clip_cpp import Clip
except ImportError:
raise ImportError("ClipCppEmbedding requires `pip install clip_cpp`.")
super().__init__(
embed_batch_size=embed_batch_size, model_name=model_name, **kwargs
)
try:
if self.model_name not in AVAILABLE_CLIP_CPP_MODELS:
raise ValueError(
f"Model name {self.model_name} is not available in clip_cpp."
)
self._model = Clip(
model_path_or_repo_id=repo_id,
model_file=model_file,
verbosity=verbosity,
)
except Exception as e:
logger.error(f"Error while loading clip_cpp model.")
raise ValueError("Unable to fetch the requested embeddings model") from e
# TEXT EMBEDDINGS
async def _aget_query_embedding(self, query: str) -> Embedding:
return self._get_query_embedding(query)
def _get_text_embedding(self, text: str) -> Embedding:
return self._get_text_embeddings([text])[0]
def _get_text_embeddings(self, texts: list[str]) -> list[Embedding]:
return [self._model.encode_text(self._model.tokenize(text)) for text in texts]
def _get_query_embedding(self, query: str) -> Embedding:
return self._get_text_embedding(query)
# IMAGE EMBEDDINGS
async def _aget_image_embedding(self, img_file_path: ImageType) -> Embedding:
return self._get_image_embedding(img_file_path)
def _get_image_embedding(self, img_file_path: ImageType) -> Embedding:
return self._model.load_preprocess_encode_image(img_file_path)
| [
"llama_index.bridge.pydantic.Field",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((320, 347), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (337, 347), False, 'import logging\n'), ((1086, 1131), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_EMBED_BATCH_SIZE', 'gt': '(0)'}), '(default=DEFAULT_EMBED_BATCH_SIZE, gt=0)\n', (1091, 1131), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((1151, 1164), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1162, 1164), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2853, 2932), 'clip_cpp.Clip', 'Clip', ([], {'model_path_or_repo_id': 'repo_id', 'model_file': 'model_file', 'verbosity': 'verbosity'}), '(model_path_or_repo_id=repo_id, model_file=model_file, verbosity=verbosity)\n', (2857, 2932), False, 'from clip_cpp import Clip\n')] |
import json
from pydantic import create_model
from .utility import CreateOutputModel
"""
The MIT License
Copyright (c) Jerry Liu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from llama_index.core.response_synthesizers import TreeSummarize
# JSON Composer Tool
class LLMJsonComposer:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"text_input": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Data..."}),
"classifier_list": ("STRING", {"multiline": False, "dynamicPrompts": False}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow..."}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("json_output",)
FUNCTION = "compose_json"
CATEGORY = "SALT/Llama-Index/Tools/JSON"
def compose_json(self, llm_model, text_input, classifier_list, extra_directions=""):
classifier_list = [item.strip() for item in classifier_list.split(",") if item.strip()]
prompt = f"{text_input}\n\n###\n\nGiven the above text, create a valid JSON object utilizing *all* of the data; using the following classifiers: {classifier_list}.\n\n{extra_directions}\n\nPlease ensure the JSON output is properly formatted, and does not omit any data."
response = llm_model.complete(prompt)
return (response.text,)
class LLMJsonRepair:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"text_input": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Malformed JSON..."}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow..."}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("json_output",)
FUNCTION = "compose_json"
CATEGORY = "SALT/Llama-Index/Tools/JSON"
def compose_json(self, llm_model, text_input, extra_directions=""):
prompt = (
f"{text_input}\n\n###\n\n"
"Given the above malformed JSON, please inspect it and repair it so that it's valid JSON, without changing or loosing any data if possible."
f"{extra_directions}\n\n"
"Please ensure the JSON output is properly formatted, and does not omit any data."
)
response = llm_model.complete(prompt)
return (response.text,)
class LLMYamlComposer:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"text_input": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Data..."}),
"classifier_list": ("STRING", {"multiline": False, "dynamicPrompts": False}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow..."}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("yaml_output",)
FUNCTION = "compose_yaml"
CATEGORY = "SALT/Llama-Index/Tools/YAML"
def compose_yaml(self, llm_model, text_input, classifier_list, extra_directions=""):
classifier_list = [item.strip() for item in classifier_list.split(",") if item.strip()]
prompt = (
f"{text_input}\n\n###\n\n"
"Given the above text, create a valid YAML document utilizing *all* of the data; "
f"using the following classifiers: {classifier_list}.\n\n"
f"{extra_directions}\n\n"
"Please ensure the YAML output is properly formatted, and does not omit any data."
)
response = llm_model.complete(prompt)
return (response.text,)
class LLMYamlRepair:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"text_input": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Malformed YAML..."}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow..."}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("yaml_output",)
FUNCTION = "repair_yaml"
CATEGORY = "SALT/Llama-Index/Tools/YAML"
def repair_yaml(self, llm_model, text_input, extra_directions=""):
prompt = (
f"{text_input}\n\n###\n\n"
"Given the above malformed YAML, please inspect it and repair it so that it's valid YAML, without changing or losing any data if possible."
f"{extra_directions}\n\n"
"Please ensure the YAML output is properly formatted, and does not omit any data."
)
response = llm_model.complete(prompt)
return (response.text,)
class LLMMarkdownComposer:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"text_input": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Data..."}),
"classifier_list": ("STRING", {"multiline": False, "dynamicPrompts": False}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow..."}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("markdown_output",)
FUNCTION = "compose_markdown"
CATEGORY = "SALT/Llama-Index/Tools/Markdown"
def compose_markdown(self, llm_model, text_input, classifier_list, extra_directions=""):
classifier_list = [item.strip() for item in classifier_list.split(",") if item.strip()]
prompt = (
f"{text_input}\n\n###\n\n"
"Given the above text, create a valid Markdown document utilizing *all* of the data; "
f"using the following classifiers: {classifier_list}.\n\n"
f"{extra_directions}\n\n"
"Please ensure the Markdown output is well-structured, and does not omit any data."
)
response = llm_model.complete(prompt)
return (response.text,)
class LLMMarkdownRepair:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"text_input": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Malformed Markdown..."}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow..."}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("markdown_output",)
FUNCTION = "repair_markdown"
CATEGORY = "SALT/Llama-Index/Tools/Markdown"
def repair_markdown(self, llm_model, text_input, extra_directions=""):
prompt = (
f"{text_input}\n\n###\n\n"
"Given the above malformed Markdown, please inspect it and repair it so that it's valid Markdown, without changing or losing any data if possible."
f"{extra_directions}\n\n"
"Please ensure the Markdown output is well-structured, and does not omit any data."
)
response = llm_model.complete(prompt)
return (response.text,)
class LLMHtmlComposer:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"text_input": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Data..."}),
"classifier_list": ("STRING", {"multiline": False, "dynamicPrompts": False}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow..."}),
"composer_mode": (["full_markup", "blocked_markup"],)
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("html_output",)
FUNCTION = "compose_html"
CATEGORY = "SALT/Llama-Index/Tools/HTML"
def compose_html(self, llm_model, text_input, classifier_list, extra_directions="", composer_mode="full_markup"):
classifier_list = [item.strip() for item in classifier_list.split(",") if item.strip()]
markup_style = "full HTML page document" if composer_mode == "full_markup" else "HTML snippet (without html, head, body or any extraneous containers)"
prompt = (
f"{text_input}\n\n###\n\n"
f"Given the above text, create a valid {markup_style} utilizing *all* of the data, intact; "
f"using the following classifiers: {classifier_list}.\n\n"
f"{extra_directions}\n\n"
"Please ensure the HTML output is well-structured, valid, and does not omit any data."
)
response = llm_model.complete(prompt)
return (response.text,)
class LLMHtmlRepair:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"text_input": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Malformed HTML..."}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow..."}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("html_output",)
FUNCTION = "repair_html"
CATEGORY = "SALT/Llama-Index/Tools/HTML"
def repair_html(self, llm_model, text_input, extra_directions=""):
prompt = (
f"{text_input}\n\n###\n\n"
"Given the above malformed HTML, please inspect it and repair it so that it's valid HTML, without changing or losing any data if possible."
f"{extra_directions}\n\n"
"Please ensure the HTML output is well-structured, valid,, and does not omit any data."
)
response = llm_model.complete(prompt)
return (response.text,)
class LLMRegexCreator:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"description": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Describe regex pattern to create, optionally provide example"}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow..."}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("regex_pattern",)
FUNCTION = "create_regex"
CATEGORY = "SALT/Llama-Index/Tools/Regex"
def create_regex(self, llm_model, description, extra_directions=""):
prompt = (
f"Create only a well formed regex pattern based on the following description:\n\n"
f"{description}\n\n"
f"{extra_directions}\n\n"
"Please ensure the regex pattern is concise and accurately matches the described criteria."
)
response = llm_model.complete(prompt)
return (response.text,)
class LLMRegexRepair:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"text_input": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Enter the malformed regex pattern here"}),
"description": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Describe what the regex pattern does wrong, and what it should do."}),
},
"optional": {
"extra_directions": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "Extra directions for the LLM to follow, such as specific constraints or formats"}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("repaired_regex_pattern",)
FUNCTION = "repair_regex"
CATEGORY = "SALT/Llama-Index/Tools/Regex"
def repair_regex(self, llm_model, text_input, description, extra_directions=""):
prompt = (
f"Given the potentially malformed or incorrect regex pattern:\n\n{text_input}\n\n"
f"and the following description of what the pattern should match:\n\n{description}\n\n"
f"{extra_directions}\n\n"
"Please repair the regex pattern so it is well-formed and accurately matches the described criteria."
)
response = llm_model.complete(prompt)
return (response.text,)
# Formatting
class LLMPydanticOutput:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"llm_model": ("LLM_MODEL",),
"llm_documents": ("LLM_DOCUMENTS",),
"output_model_name": ("STRING", {"default": "OutputModel"}),
"output_model": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": '''{
"name": "",
"age": 0,
"best_known_for": [""],
"extra_info": "",
"dictionary_example": {}
}'''}),
},
"optional": {
"summary_query": ("STRING", {"default": "Summarize"})
}
}
RETURN_TYPES = ("STRING", "LIST")
RETURN_NAMES = ("string_responses", "response_objects_list")
FUNCTION = "generate_summary"
CATEGORY = "SALT/Llama-Index/Summarization"
def generate_summary(self, llm_model, llm_documents, output_model_name, output_model, summary_query="Summarize"):
output_model_json = json.loads(output_model)
OutputModel = CreateOutputModel.create(output_model_json, output_model_name)
summarizer = TreeSummarize(verbose=True, output_cls=OutputModel, llm=llm_model)
responses = []
for doc in llm_documents:
response = summarizer.get_response(summary_query, doc.text)
from pprint import pprint
pprint(response)
responses.append(response)
string_response = repr(responses)
return (string_response, responses)
NODE_CLASS_MAPPINGS = {
"LLMJsonComposer": LLMJsonComposer,
"LLMJsonRepair": LLMJsonRepair,
"LLMYamlComposer": LLMYamlComposer,
"LLMYamlRepair": LLMYamlRepair,
"LLMMarkdownComposer": LLMMarkdownComposer,
"LLMMarkdownRepair": LLMMarkdownRepair,
"LLMHtmlComposer": LLMHtmlComposer,
"LLMHtmlRepair": LLMHtmlRepair,
"LLMRegexCreator": LLMRegexCreator,
"LLMRegexRepair": LLMRegexRepair,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"LLMJsonComposer": "LLM JSON Composer",
"LLMJsonRepair": "LLM JSON Repair",
"LLMYamlComposer": "LLM YAML Composer",
"LLMYamlRepair": "LLM YAML Repair",
"LLMMarkdownComposer": "LLM Markdown Composer",
"LLMMarkdownRepair": "LLM Markdown Repair",
"LLMHtmlComposer": "LLM HTML Composer",
"LLMHtmlRepair": "LLM HTML Repair",
"LLMRegexCreator": "LLM Regex Creator",
"LLMRegexRepair": "LLM Regex Repair"
}
| [
"llama_index.core.response_synthesizers.TreeSummarize"
] | [((15271, 15295), 'json.loads', 'json.loads', (['output_model'], {}), '(output_model)\n', (15281, 15295), False, 'import json\n'), ((15402, 15468), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'verbose': '(True)', 'output_cls': 'OutputModel', 'llm': 'llm_model'}), '(verbose=True, output_cls=OutputModel, llm=llm_model)\n', (15415, 15468), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((15649, 15665), 'pprint.pprint', 'pprint', (['response'], {}), '(response)\n', (15655, 15665), False, 'from pprint import pprint\n')] |
import logging
import os
import sys
from shutil import rmtree
import openai
from llama_index import ServiceContext, SimpleDirectoryReader, TreeIndex
from llama_index.llms.openai import OpenAI
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
openai.api_key = "OPENAI_KEY"
service_context = ServiceContext.from_defaults(llm=OpenAI())
def build_index(data_dir: str, knowledge_base_dir: str) -> None:
"""Build the vector index from the markdown files in the directory."""
print("Building vector index...")
documents = SimpleDirectoryReader(data_dir).load_data()
index = TreeIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir=knowledge_base_dir)
print("Done.")
def main() -> None:
"""Build the vector index from the markdown files in the directory."""
base_dir = os.path.dirname(os.path.abspath(__file__))
knowledge_base_dir = os.path.join(base_dir, "kb")
# Delete Storage Directory
if os.path.exists(knowledge_base_dir):
rmtree(knowledge_base_dir)
data_dir = os.path.join(base_dir, "content", "blogs")
build_index(data_dir, knowledge_base_dir)
if __name__ == "__main__":
main()
| [
"llama_index.SimpleDirectoryReader",
"llama_index.TreeIndex.from_documents",
"llama_index.llms.openai.OpenAI"
] | [((194, 252), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (213, 252), False, 'import logging\n'), ((284, 324), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (305, 324), False, 'import logging\n'), ((673, 741), 'llama_index.TreeIndex.from_documents', 'TreeIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (697, 741), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, TreeIndex\n'), ((1011, 1039), 'os.path.join', 'os.path.join', (['base_dir', '"""kb"""'], {}), "(base_dir, 'kb')\n", (1023, 1039), False, 'import os\n'), ((1078, 1112), 'os.path.exists', 'os.path.exists', (['knowledge_base_dir'], {}), '(knowledge_base_dir)\n', (1092, 1112), False, 'import os\n'), ((1164, 1206), 'os.path.join', 'os.path.join', (['base_dir', '"""content"""', '"""blogs"""'], {}), "(base_dir, 'content', 'blogs')\n", (1176, 1206), False, 'import os\n'), ((253, 272), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (270, 272), False, 'import logging\n'), ((410, 418), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (416, 418), False, 'from llama_index.llms.openai import OpenAI\n'), ((959, 984), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (974, 984), False, 'import os\n'), ((1122, 1148), 'shutil.rmtree', 'rmtree', (['knowledge_base_dir'], {}), '(knowledge_base_dir)\n', (1128, 1148), False, 'from shutil import rmtree\n'), ((616, 647), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['data_dir'], {}), '(data_dir)\n', (637, 647), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, TreeIndex\n')] |
from llama_index.core.tools import FunctionTool
import os
note_file = os.path.join('data', 'notes.txt')
def save_notes(note):
if not os.path.exists(note_file):
open(note_file, 'w')
with open(note_file, 'a') as f:
f.writelines([note + '\n'])
return "Note saved"
note_engine = FunctionTool.from_defaults(
fn=save_notes,
name="note_saver",
description="Save text based note to a file for the user"
) | [
"llama_index.core.tools.FunctionTool.from_defaults"
] | [((70, 103), 'os.path.join', 'os.path.join', (['"""data"""', '"""notes.txt"""'], {}), "('data', 'notes.txt')\n", (82, 103), False, 'import os\n'), ((308, 432), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'save_notes', 'name': '"""note_saver"""', 'description': '"""Save text based note to a file for the user"""'}), "(fn=save_notes, name='note_saver', description=\n 'Save text based note to a file for the user')\n", (334, 432), False, 'from llama_index.core.tools import FunctionTool\n'), ((138, 163), 'os.path.exists', 'os.path.exists', (['note_file'], {}), '(note_file)\n', (152, 163), False, 'import os\n')] |
import boto3
import os
import json
# from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain_community.llms import Bedrock
from llama_index.node_parser import SimpleNodeParser
from llama_index.embeddings import LangchainEmbedding
from langchain_community.embeddings import BedrockEmbeddings
from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper
from llama_index import VectorStoreIndex
from llama_index import download_loader
from llama_index.postprocessor.cohere_rerank import CohereRerank
from credentials import cohere_api_key , openai_api_key
import pandas as pd
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
from llama_index.llms import OpenAI as llama_openai
from model import *
from questions import *
import random
import logging
from collections import defaultdict
import shutil
import numpy as np
from llama_index.node_parser import SentenceWindowNodeParser, SimpleNodeParser
logging.basicConfig(filename='rag.log', encoding='utf-8', level=logging.INFO)
class rag_on_s3:
def __init__(self):
session = boto3.Session()
self.bedrock_runtime = boto3.client("bedrock-runtime")
self.s3 = session.client('s3')
self.cohere_api_key = cohere_api_key
self.S3Reader = download_loader("S3Reader")
self.root_bucket = "demo-industry-specific"
os.environ['OPENAI_API_KEY'] = openai_api_key
self.model_configuration("OpenAI")
def get_model_name(self):
model_names = list(models_ids.keys())
return model_names
def get_industry_names(self):
return list(industry_specific_bucket.keys())
def model_configuration(self,model_name):
model_id = models_ids.get(model_name)
model_kwargs = model_parameters.get(model_name)
if model_id == "OpenAI":
llm = llama_openai(model='gpt-3.5-turbo', temperature=0, max_tokens=256)
embed_model = OpenAIEmbedding()
else:
llm = Bedrock(
client=self.bedrock_runtime,
model_id=model_id,
model_kwargs=model_kwargs
)
# bedrock_embedding = BedrockEmbeddings(
# client=self.bedrock_runtime,
# model_id="amazon.titan-embed-text-v1",
# )
# embed_model = LangchainEmbedding(bedrock_embedding)
embed_model = OpenAIEmbedding()
self.service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt="You are an AI assistant answering questions"
)
response = f"{model_name} successfully loaded"
return response
def get_indexes(self, industry_name):
self.industry_name = industry_specific_bucket[industry_name]
logging.info("getting indexes")
folder_path = "/home/ubuntu/RAG_API/"+self.industry_name+"-index"
logging.info(folder_path)
if os.path.exists(folder_path):
logging.info("folder exists")
if os.path.isdir(folder_path):
logging.info("removing dir")
shutil.rmtree(folder_path, ignore_errors=True)
logging.info("creting indexes")
loader = self.S3Reader(bucket=self.industry_name)
docs = loader.load_data()
sentence_node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text")
# base_node_parser = SimpleNodeParser()
nodes = sentence_node_parser.get_nodes_from_documents(docs)
# base_nodes = base_node_parser.get_nodes_from_documents(docs)
self.service_context = ServiceContext.from_service_context(
service_context = self.service_context,
node_parser = nodes
)
index = VectorStoreIndex(
nodes,
service_context=self.service_context)
index.storage_context.persist(folder_path)
logging.info("indexing successful")
response = "Index successfully created"
return response
# to handle text data eg : .txt , .pdf, .docx
def get_docs(self, industry_name):
self.industry_name = industry_specific_bucket[industry_name]
folder_path = "/home/ubuntu/RAG_API/"+self.industry_name+"-index"
if (os.path.exists(folder_path)==False):
self.get_indexes(industry_name)
filenames = []
files = self.s3.list_objects_v2(Bucket=self.industry_name, Delimiter='/')
for item in files['Contents']:
f = item['Key']
filenames.append(f)
logging.info(filenames)
response = str(len(filenames)) + " Documents sucessfully indexed"
ques = question_list[industry_name]
ques_list = random.sample(ques, 4)
total_docs = len(filenames)
filetypes = defaultdict()
for fn in filenames:
ftype = fn.split('.')[-1]
if ftype not in filetypes:
filetypes[ftype] = []
filetypes[ftype].append(fn)
logging.info(filetypes)
filetype_response = "{total_docs} documents loaded. ".format(total_docs=total_docs)
for key in filetypes:
l = len(filetypes[key])
filetype_response += "{l} {key} files ".format(l=l, key=key)
filetypes["All"] = filenames
logging.info(filetype_response)
return filetype_response, ques_list, filetypes
def open_file(self, filename):
response = self.s3.generate_presigned_url('get_object',
Params = {'Bucket': self.industry_name,
'Key':filename},
ExpiresIn=3000)
return response
def query_text(self,query,index,model_name):
cohere_rerank = CohereRerank(api_key=self.cohere_api_key, top_n=2)
query_engine = index.as_query_engine(
similarity_top_k=5,
node_postprocessors=[cohere_rerank],
service_context=self.service_context
)
if model_name == "Anthropic":
response = query_engine.query(f"\n\nHuman:{query}\n\nAssistant:")
else:
response = query_engine.query(query)
file_names = [details['file_name'] for key, details in response.metadata.items()]
score = [node_with_score.score for node_with_score in response.source_nodes]
logging.info(response.metadata)
source = set()
if(score[0] < 0.8 and score[0] > 0.5):
source.add(file_names[0])
else:
for i in range(len(score)):
if score[i] >= 0.8 :
source.add(file_names[i])
print(score)
source = list(source)
if len(source) == 0:
final_source = "Not Found"
final_response = "Sorry, I couldn't find the relevant information to the given query !!"
else :
final_source = source
final_response = response.response
return final_response,final_source
# to handle .csv data
def display_csv_files(self):
response = self.s3.list_objects_v2(Bucket="berrywise-database")
object_keys = [obj['Key'] for obj in response['Contents']]
return object_keys
def get_csv_file(self,csv_file_key):
directory_path = "/home/ubuntu/RAG_API/csv_file"
files_in_directory = os.listdir(directory_path)
for filename in files_in_directory:
file_path = os.path.join(directory_path, filename) # Construct full path
os.remove(file_path)
self.s3.download_file('berrywise-database', csv_file_key, "/home/ubuntu/RAG_API/csv_file/"+csv_file_key)
file_name= os.listdir(directory_path)
df = pd.read_csv(f"/home/ubuntu/RAG_API/csv_file/{file_name[0]}")
data = df.head(5)
columns = df.columns.tolist()
# logging.info(data)
json_output = data.to_json(orient='records', force_ascii=False)
parsed_json = json.loads(json_output)
output = json.dumps(parsed_json)
logging.info(output)
print(output)
return output, columns
def csv_query(self,query):
folder_path = "/home/ubuntu/RAG_API/csv_file"
file_name= os.listdir(folder_path)
df = pd.read_csv(f"/home/ubuntu/RAG_API/csv_file/{file_name[0]}")
llm = OpenAI(api_token=openai_api_key)
df = SmartDataframe(df, config={"llm": llm})
result = df.chat(query)
if(type(result) == np.integer):
return int(result)
if (type(result) == int or type(result) == np.float64):
json_output = json.dumps(result, default=str)
return json_output
if(type(result) == SmartDataframe):
result = result.to_dict(orient='records')
return result
return result
# if (type(result) == int or type(result) == np.float64):
# json_output = json.dumps(result)
# return json_output
# elif type(result) == str :
# json_output = json.dumps(result)
# return json_output
# else:
# json_output = json.dumps(result.to_dict())
# # json_output = json.dumps(result)
# return json_output
# # llm = OpenAI(api_token=openai_api_key)
# agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df, verbose=True)
# prompt = {
# "tool_names" : ["pandas"],
# "tools" : {
# "pandas" : {
# "df":df.to_dict()
# }
# },
# "input":query
# }
# result = agent.run(prompt)
| [
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.OpenAIEmbedding",
"llama_index.VectorStoreIndex",
"llama_index.postprocessor.cohere_rerank.CohereRerank",
"llama_index.ServiceContext.from_service_context"
] | [((974, 1051), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""rag.log"""', 'encoding': '"""utf-8"""', 'level': 'logging.INFO'}), "(filename='rag.log', encoding='utf-8', level=logging.INFO)\n", (993, 1051), False, 'import logging\n'), ((1113, 1128), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (1126, 1128), False, 'import boto3\n'), ((1160, 1191), 'boto3.client', 'boto3.client', (['"""bedrock-runtime"""'], {}), "('bedrock-runtime')\n", (1172, 1191), False, 'import boto3\n'), ((1300, 1327), 'llama_index.download_loader', 'download_loader', (['"""S3Reader"""'], {}), "('S3Reader')\n", (1315, 1327), False, 'from llama_index import download_loader\n'), ((2497, 2624), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': '"""You are an AI assistant answering questions"""'}), "(llm=llm, embed_model=embed_model,\n system_prompt='You are an AI assistant answering questions')\n", (2525, 2624), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper\n'), ((2857, 2888), 'logging.info', 'logging.info', (['"""getting indexes"""'], {}), "('getting indexes')\n", (2869, 2888), False, 'import logging\n'), ((2971, 2996), 'logging.info', 'logging.info', (['folder_path'], {}), '(folder_path)\n', (2983, 2996), False, 'import logging\n'), ((3008, 3035), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (3022, 3035), False, 'import os\n'), ((3247, 3278), 'logging.info', 'logging.info', (['"""creting indexes"""'], {}), "('creting indexes')\n", (3259, 3278), False, 'import logging\n'), ((3403, 3535), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text')\n", (3441, 3535), False, 'from llama_index.node_parser import SentenceWindowNodeParser, SimpleNodeParser\n'), ((3793, 3889), 'llama_index.ServiceContext.from_service_context', 'ServiceContext.from_service_context', ([], {'service_context': 'self.service_context', 'node_parser': 'nodes'}), '(service_context=self.service_context,\n node_parser=nodes)\n', (3828, 3889), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper\n'), ((3946, 4007), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'service_context': 'self.service_context'}), '(nodes, service_context=self.service_context)\n', (3962, 4007), False, 'from llama_index import VectorStoreIndex\n'), ((4093, 4128), 'logging.info', 'logging.info', (['"""indexing successful"""'], {}), "('indexing successful')\n", (4105, 4128), False, 'import logging\n'), ((4751, 4774), 'logging.info', 'logging.info', (['filenames'], {}), '(filenames)\n', (4763, 4774), False, 'import logging\n'), ((4923, 4945), 'random.sample', 'random.sample', (['ques', '(4)'], {}), '(ques, 4)\n', (4936, 4945), False, 'import random\n'), ((5003, 5016), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (5014, 5016), False, 'from collections import defaultdict\n'), ((5210, 5233), 'logging.info', 'logging.info', (['filetypes'], {}), '(filetypes)\n', (5222, 5233), False, 'import logging\n'), ((5511, 5542), 'logging.info', 'logging.info', (['filetype_response'], {}), '(filetype_response)\n', (5523, 5542), False, 'import logging\n'), ((6040, 6090), 'llama_index.postprocessor.cohere_rerank.CohereRerank', 'CohereRerank', ([], {'api_key': 'self.cohere_api_key', 'top_n': '(2)'}), '(api_key=self.cohere_api_key, top_n=2)\n', (6052, 6090), False, 'from llama_index.postprocessor.cohere_rerank import CohereRerank\n'), ((6650, 6681), 'logging.info', 'logging.info', (['response.metadata'], {}), '(response.metadata)\n', (6662, 6681), False, 'import logging\n'), ((7642, 7668), 'os.listdir', 'os.listdir', (['directory_path'], {}), '(directory_path)\n', (7652, 7668), False, 'import os\n'), ((7965, 7991), 'os.listdir', 'os.listdir', (['directory_path'], {}), '(directory_path)\n', (7975, 7991), False, 'import os\n'), ((8005, 8065), 'pandas.read_csv', 'pd.read_csv', (['f"""/home/ubuntu/RAG_API/csv_file/{file_name[0]}"""'], {}), "(f'/home/ubuntu/RAG_API/csv_file/{file_name[0]}')\n", (8016, 8065), True, 'import pandas as pd\n'), ((8271, 8294), 'json.loads', 'json.loads', (['json_output'], {}), '(json_output)\n', (8281, 8294), False, 'import json\n'), ((8312, 8335), 'json.dumps', 'json.dumps', (['parsed_json'], {}), '(parsed_json)\n', (8322, 8335), False, 'import json\n'), ((8358, 8378), 'logging.info', 'logging.info', (['output'], {}), '(output)\n', (8370, 8378), False, 'import logging\n'), ((8546, 8569), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (8556, 8569), False, 'import os\n'), ((8583, 8643), 'pandas.read_csv', 'pd.read_csv', (['f"""/home/ubuntu/RAG_API/csv_file/{file_name[0]}"""'], {}), "(f'/home/ubuntu/RAG_API/csv_file/{file_name[0]}')\n", (8594, 8643), True, 'import pandas as pd\n'), ((8658, 8690), 'pandasai.llm.OpenAI', 'OpenAI', ([], {'api_token': 'openai_api_key'}), '(api_token=openai_api_key)\n', (8664, 8690), False, 'from pandasai.llm import OpenAI\n'), ((8704, 8743), 'pandasai.SmartDataframe', 'SmartDataframe', (['df'], {'config': "{'llm': llm}"}), "(df, config={'llm': llm})\n", (8718, 8743), False, 'from pandasai import SmartDataframe\n'), ((1881, 1947), 'llama_index.llms.OpenAI', 'llama_openai', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'max_tokens': '(256)'}), "(model='gpt-3.5-turbo', temperature=0, max_tokens=256)\n", (1893, 1947), True, 'from llama_index.llms import OpenAI as llama_openai\n'), ((1974, 1991), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1989, 1991), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper\n'), ((2037, 2124), 'langchain_community.llms.Bedrock', 'Bedrock', ([], {'client': 'self.bedrock_runtime', 'model_id': 'model_id', 'model_kwargs': 'model_kwargs'}), '(client=self.bedrock_runtime, model_id=model_id, model_kwargs=\n model_kwargs)\n', (2044, 2124), False, 'from langchain_community.llms import Bedrock\n'), ((2447, 2464), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (2462, 2464), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper\n'), ((3049, 3078), 'logging.info', 'logging.info', (['"""folder exists"""'], {}), "('folder exists')\n", (3061, 3078), False, 'import logging\n'), ((3094, 3120), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (3107, 3120), False, 'import os\n'), ((4457, 4484), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (4471, 4484), False, 'import os\n'), ((7738, 7776), 'os.path.join', 'os.path.join', (['directory_path', 'filename'], {}), '(directory_path, filename)\n', (7750, 7776), False, 'import os\n'), ((7812, 7832), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (7821, 7832), False, 'import os\n'), ((8940, 8971), 'json.dumps', 'json.dumps', (['result'], {'default': 'str'}), '(result, default=str)\n', (8950, 8971), False, 'import json\n'), ((3138, 3166), 'logging.info', 'logging.info', (['"""removing dir"""'], {}), "('removing dir')\n", (3150, 3166), False, 'import logging\n'), ((3183, 3229), 'shutil.rmtree', 'shutil.rmtree', (['folder_path'], {'ignore_errors': '(True)'}), '(folder_path, ignore_errors=True)\n', (3196, 3229), False, 'import shutil\n')] |
from llama_index.tools import FunctionTool
import os
note_file = os.path.join("data", "notes.txt")
def save_note(note):
if not os.path.exists(note_file):
open(note_file, "w")
with open(note_file, "a") as f:
f.writelines([note + "\n"])
return "note saved"
note_engine = FunctionTool.from_defaults(
fn=save_note,
name="note_saver",
description="this tool can save a text based note to a file for the user",
) | [
"llama_index.tools.FunctionTool.from_defaults"
] | [((66, 99), 'os.path.join', 'os.path.join', (['"""data"""', '"""notes.txt"""'], {}), "('data', 'notes.txt')\n", (78, 99), False, 'import os\n'), ((304, 443), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'save_note', 'name': '"""note_saver"""', 'description': '"""this tool can save a text based note to a file for the user"""'}), "(fn=save_note, name='note_saver', description=\n 'this tool can save a text based note to a file for the user')\n", (330, 443), False, 'from llama_index.tools import FunctionTool\n'), ((134, 159), 'os.path.exists', 'os.path.exists', (['note_file'], {}), '(note_file)\n', (148, 159), False, 'import os\n')] |
import asyncio
import json
from typing import Any, Tuple, List
from langchain.base_language import BaseLanguageModel
from langchain.tools import DuckDuckGoSearchResults, BaseTool
from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext
from llama_index.response_synthesizers import TreeSummarize
class WebSearchTool(DuckDuckGoSearchResults):
name: str = "web_search"
description: str = \
"Useful for when you need to search answer in the internet. " \
"Input should be a search query (like you would google it). " \
"If relevant, include location and date to get more accurate results. " \
"You will get a list of urls and a short snippet of the page. "
async def _arun(self, *args: Any, **kwargs: Any) -> Any:
return self._run(*args, **kwargs)
class AskPagesTool(BaseTool):
llm: BaseLanguageModel
_page_loader = download_loader("SimpleWebPageReader")(html_to_text=True) # noqa
name: str = "ask_urls"
description: str = \
"You can ask a question about a URL. " \
"That smart tool will parse URL content and answer your question. " \
"Provide provide urls and questions in json format. " \
"urls is a list of urls to ask corresponding question from questions list" \
'Example: {"urls": ["https://en.wikipedia.org/wiki/Cat", "https://en.wikipedia.org/wiki/Dog"], ' \
'"questions": ["How many cats in the world?", "How many dogs in the world?"]}'
def _get_page_index(self, page: Document) -> GPTListIndex:
llm_predictor_chatgpt = LLMPredictor(self.llm)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt, chunk_size=1024)
doc_summary_index = GPTListIndex.from_documents(
[page],
service_context=service_context,
response_synthesizer=TreeSummarize(service_context=service_context)
)
return doc_summary_index
def _get_url_index(self, url: str) -> GPTListIndex:
page = self._page_loader.load_data(urls=[url])[0]
return self._get_page_index(page)
@staticmethod
def _parse_args(*args, **kwargs) -> List[Tuple[str, str]]:
if len(args) == 1:
urls_and_questions_dict = json.loads(args[0])
urls = urls_and_questions_dict["urls"]
questions = urls_and_questions_dict["questions"]
else:
urls = kwargs["urls"]
questions = kwargs["questions"]
if len(urls) > 1 and len(questions) == 1:
questions = questions * len(urls)
if len(questions) > 1 and len(urls) == 1:
urls = urls * len(questions)
if len(urls) != len(questions):
raise ValueError("Number of urls and questions should be equal")
return list(zip(urls, questions))
def _run_single(self, url: str, question: str) -> str:
page_index = self._get_url_index(url)
llm_predictor_chatgpt = LLMPredictor(self.llm)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt, chunk_size=1024)
query_engine = page_index.as_query_engine(
response_synthesizer=TreeSummarize(service_context=service_context), use_async=False)
response = query_engine.query(question)
return response.response
async def _arun_single(self, url: str, question: str) -> str:
page_index = self._get_url_index(url)
llm_predictor_chatgpt = LLMPredictor(self.llm)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt, chunk_size=1024)
query_engine = page_index.as_query_engine(
response_synthesizer=TreeSummarize(service_context=service_context), use_async=False)
response = await query_engine.aquery(question)
return response.response
def _run(self, *args, **kwargs) -> Any:
try:
urls_with_questions = self._parse_args(*args, **kwargs)
full_response = ""
for url, question in urls_with_questions:
answer = self._run_single(url, question)
full_response += f"Question: {question} to {url}\nAnswer: {answer}\n"
except Exception as e:
full_response = f"Error: {e}"
return full_response
async def _arun(self, *args, **kwargs) -> Any:
try:
urls_with_questions = self._parse_args(*args, **kwargs)
tasks = []
for url, question in urls_with_questions:
tasks.append(self._arun_single(url, question))
answers = await asyncio.gather(*tasks)
full_response = ""
for i in range(len(urls_with_questions)):
url, question = urls_with_questions[i]
answer = answers[i]
full_response += f"Question: {question} to {url}\nAnswer: {answer}\n"
except Exception as e:
full_response = f"Error: {e}"
return full_response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.response_synthesizers.TreeSummarize",
"llama_index.download_loader",
"llama_index.LLMPredictor"
] | [((916, 954), 'llama_index.download_loader', 'download_loader', (['"""SimpleWebPageReader"""'], {}), "('SimpleWebPageReader')\n", (931, 954), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((1600, 1622), 'llama_index.LLMPredictor', 'LLMPredictor', (['self.llm'], {}), '(self.llm)\n', (1612, 1622), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((1649, 1735), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor_chatgpt,\n chunk_size=1024)\n', (1677, 1735), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((2989, 3011), 'llama_index.LLMPredictor', 'LLMPredictor', (['self.llm'], {}), '(self.llm)\n', (3001, 3011), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((3038, 3124), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor_chatgpt,\n chunk_size=1024)\n', (3066, 3124), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((3496, 3518), 'llama_index.LLMPredictor', 'LLMPredictor', (['self.llm'], {}), '(self.llm)\n', (3508, 3518), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((3545, 3631), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor_chatgpt,\n chunk_size=1024)\n', (3573, 3631), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((2281, 2300), 'json.loads', 'json.loads', (['args[0]'], {}), '(args[0])\n', (2291, 2300), False, 'import json\n'), ((1887, 1933), 'llama_index.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (1900, 1933), False, 'from llama_index.response_synthesizers import TreeSummarize\n'), ((3205, 3251), 'llama_index.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (3218, 3251), False, 'from llama_index.response_synthesizers import TreeSummarize\n'), ((3712, 3758), 'llama_index.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (3725, 3758), False, 'from llama_index.response_synthesizers import TreeSummarize\n'), ((4622, 4644), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (4636, 4644), False, 'import asyncio\n')] |
"""
This script demonstrates how to use the llama_index library to create and query a vector store index.
It loads documents from a directory, creates an index, and allows querying the index.
usage: python hello_persist.py "What is the author's name and job now?"
"""
import os
import sys
import argparse
import logging
from dotenv import load_dotenv
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
Settings,
)
from llama_index.embeddings.openai import OpenAIEmbedding
def main(query):
try:
# Load environment variables
load_dotenv()
# Configure logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# Configure embedding model
Settings.embed_model = OpenAIEmbedding(model_name="text-embedding-3-small")
# Set up storage directory
storage_directory = "./storage"
if not os.path.exists(storage_directory):
logging.info("Creating new index...")
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=storage_directory)
else:
logging.info("Loading existing index...")
storage_context = StorageContext.from_defaults(persist_dir=storage_directory)
index = load_index_from_storage(storage_context)
# Query the index
query_engine = index.as_query_engine()
response = query_engine.query(query)
print(response)
except Exception as e:
logging.error(f"An error occurred: {str(e)}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Query a vector store index.")
parser.add_argument("--query", default="What is the author's name and job now?", help="The query to ask the index.")
args = parser.parse_args()
main(args.query) | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.SimpleDirectoryReader",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1804, 1870), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query a vector store index."""'}), "(description='Query a vector store index.')\n", (1827, 1870), False, 'import argparse\n'), ((628, 641), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (639, 641), False, 'from dotenv import load_dotenv\n'), ((679, 737), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (698, 737), False, 'import logging\n'), ((887, 939), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model_name': '"""text-embedding-3-small"""'}), "(model_name='text-embedding-3-small')\n", (902, 939), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((777, 817), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (798, 817), False, 'import logging\n'), ((1032, 1065), 'os.path.exists', 'os.path.exists', (['storage_directory'], {}), '(storage_directory)\n', (1046, 1065), False, 'import os\n'), ((1079, 1116), 'logging.info', 'logging.info', (['"""Creating new index..."""'], {}), "('Creating new index...')\n", (1091, 1116), False, 'import logging\n'), ((1203, 1245), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1234, 1245), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage, Settings\n'), ((1345, 1386), 'logging.info', 'logging.info', (['"""Loading existing index..."""'], {}), "('Loading existing index...')\n", (1357, 1386), False, 'import logging\n'), ((1417, 1476), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'storage_directory'}), '(persist_dir=storage_directory)\n', (1445, 1476), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage, Settings\n'), ((1497, 1537), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1520, 1537), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage, Settings\n'), ((746, 765), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (763, 765), False, 'import logging\n'), ((1141, 1170), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1162, 1170), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage, Settings\n')] |
import streamlit as st
from llama_hub.youtube_transcript import YoutubeTranscriptReader
from llama_hub.youtube_transcript import is_youtube_video
from llama_index import (
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.prompts import ChatMessage, MessageRole
from llama_index.tools import QueryEngineTool, ToolMetadata
import os
# import openai
from llama_hub.tools.wikipedia import WikipediaToolSpec
from llama_index.agent import OpenAIAgent
from fetch_yt_metadata import fetch_youtube_metadata
video_url = None
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
if openai_api_key:
os.environ["OPENAI_API_KEY"] = openai_api_key
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
video_url = st.text_input("'Enter your video url here:", key="video_url")
if video_url:
st.video(video_url)
if is_youtube_video(video_url):
metadata = fetch_youtube_metadata(video_url)
st.session_state["metadata"] = metadata
st.header("Metadata:")
for k, v in metadata.items():
if k == "video_description":
st.text_area("Description:", height=200, value=v, disabled=True)
else:
st.write(f"{k}: {v}")
st.text_area("Transcript:", height=200, value=st.session_state.get("transcript", ""))
if st.session_state.get("video_url"):
url = st.session_state.get("video_url")
st.write(f"Chat with {url}")
if "counter" not in st.session_state:
st.session_state.counter = 0
st.session_state.counter += 1
st.header(f"This page has run {st.session_state.counter} times.")
st.button("Run it again")
query_engine = None
transcript = None
if video_url:
video_id = video_url.split('=')[1].split('&')[0]
# check if storage already exists
PERSIST_DIR = f"./storage/{video_id}"
if not os.path.exists(PERSIST_DIR):
# load the documents and create the index
# documents = SimpleDirectoryReader("data").load_data()
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=[url])
# save the documents to disk using the video_id.sbt
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=PERSIST_DIR)
with open(f"{PERSIST_DIR}/transcript.txt", "w") as f:
for doc in documents:
f.write(doc.text)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# either way we can now query the index
query_engine = index.as_query_engine()
if not st.session_state.get("summary"):
summary = query_engine.query("What's the video about?").response
st.session_state["summary"] = summary
if not st.session_state.get("transcript"):
transcript = open(f"{PERSIST_DIR}/transcript.txt").read()
st.session_state["transcript"] = transcript
st.title('💬 Talk2YouTube')
st.write(st.session_state.get("summary",'Load a youtube video and chat with it'))
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
vector_tool = QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(
name=f"VideoTranscript",
description=f"useful for when you want to answer queries about the content of the video.",
),
)
wiki_tool_spec = WikipediaToolSpec()
tools = wiki_tool_spec.to_tool_list() #+ query_engine_tools
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
agent = OpenAIAgent.from_tools([vector_tool], verbose=True, openai_api_key=st.session_state.get("chatbot_api_key"))
chat_history = [ChatMessage(role=MessageRole.USER if x.get("role","assistant") == "user" else "assistant", content=x.get("content","")) for x in st.session_state.messages]
response = agent.chat(prompt, chat_history=chat_history)
msg = {"role":"assistant", "content":response.response}
st.session_state.messages.append(msg)
st.chat_message("assistant").write(msg.get("content"))
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.tools.ToolMetadata",
"llama_index.StorageContext.from_defaults"
] | [((1466, 1499), 'streamlit.session_state.get', 'st.session_state.get', (['"""video_url"""'], {}), "('video_url')\n", (1486, 1499), True, 'import streamlit as st\n'), ((1684, 1749), 'streamlit.header', 'st.header', (['f"""This page has run {st.session_state.counter} times."""'], {}), "(f'This page has run {st.session_state.counter} times.')\n", (1693, 1749), True, 'import streamlit as st\n'), ((1750, 1775), 'streamlit.button', 'st.button', (['"""Run it again"""'], {}), "('Run it again')\n", (1759, 1775), True, 'import streamlit as st\n'), ((3161, 3187), 'streamlit.title', 'st.title', (['"""💬 Talk2YouTube"""'], {}), "('💬 Talk2YouTube')\n", (3169, 3187), True, 'import streamlit as st\n'), ((599, 670), 'streamlit.text_input', 'st.text_input', (['"""OpenAI API Key"""'], {'key': '"""chatbot_api_key"""', 'type': '"""password"""'}), "('OpenAI API Key', key='chatbot_api_key', type='password')\n", (612, 670), True, 'import streamlit as st\n'), ((840, 901), 'streamlit.text_input', 'st.text_input', (['"""\'Enter your video url here:"""'], {'key': '"""video_url"""'}), '("\'Enter your video url here:", key=\'video_url\')\n', (853, 901), True, 'import streamlit as st\n'), ((1511, 1544), 'streamlit.session_state.get', 'st.session_state.get', (['"""video_url"""'], {}), "('video_url')\n", (1531, 1544), True, 'import streamlit as st\n'), ((1549, 1577), 'streamlit.write', 'st.write', (['f"""Chat with {url}"""'], {}), "(f'Chat with {url}')\n", (1557, 1577), True, 'import streamlit as st\n'), ((3197, 3269), 'streamlit.session_state.get', 'st.session_state.get', (['"""summary"""', '"""Load a youtube video and chat with it"""'], {}), "('summary', 'Load a youtube video and chat with it')\n", (3217, 3269), True, 'import streamlit as st\n'), ((3511, 3526), 'streamlit.chat_input', 'st.chat_input', ([], {}), '()\n', (3524, 3526), True, 'import streamlit as st\n'), ((3926, 3945), 'llama_hub.tools.wikipedia.WikipediaToolSpec', 'WikipediaToolSpec', ([], {}), '()\n', (3943, 3945), False, 'from llama_hub.tools.wikipedia import WikipediaToolSpec\n'), ((4014, 4083), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (4046, 4083), True, 'import streamlit as st\n'), ((4547, 4584), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['msg'], {}), '(msg)\n', (4579, 4584), True, 'import streamlit as st\n'), ((928, 947), 'streamlit.video', 'st.video', (['video_url'], {}), '(video_url)\n', (936, 947), True, 'import streamlit as st\n'), ((959, 986), 'llama_hub.youtube_transcript.is_youtube_video', 'is_youtube_video', (['video_url'], {}), '(video_url)\n', (975, 986), False, 'from llama_hub.youtube_transcript import is_youtube_video\n'), ((1973, 2000), 'os.path.exists', 'os.path.exists', (['PERSIST_DIR'], {}), '(PERSIST_DIR)\n', (1987, 2000), False, 'import os\n'), ((2133, 2158), 'llama_hub.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (2156, 2158), False, 'from llama_hub.youtube_transcript import YoutubeTranscriptReader\n'), ((2288, 2330), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2319, 2330), False, 'from llama_index import VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2627, 2680), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (2655, 2680), False, 'from llama_index import VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2697, 2737), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2720, 2737), False, 'from llama_index import VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2836, 2867), 'streamlit.session_state.get', 'st.session_state.get', (['"""summary"""'], {}), "('summary')\n", (2856, 2867), True, 'import streamlit as st\n'), ((2999, 3033), 'streamlit.session_state.get', 'st.session_state.get', (['"""transcript"""'], {}), "('transcript')\n", (3019, 3033), True, 'import streamlit as st\n'), ((3563, 3617), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (3570, 3617), True, 'import streamlit as st\n'), ((3626, 3635), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (3633, 3635), True, 'import streamlit as st\n'), ((1011, 1044), 'fetch_yt_metadata.fetch_youtube_metadata', 'fetch_youtube_metadata', (['video_url'], {}), '(video_url)\n', (1033, 1044), False, 'from fetch_yt_metadata import fetch_youtube_metadata\n'), ((1109, 1131), 'streamlit.header', 'st.header', (['"""Metadata:"""'], {}), "('Metadata:')\n", (1118, 1131), True, 'import streamlit as st\n'), ((3446, 3474), 'streamlit.chat_message', 'st.chat_message', (["msg['role']"], {}), "(msg['role'])\n", (3461, 3474), True, 'import streamlit as st\n'), ((3729, 3867), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': 'f"""VideoTranscript"""', 'description': 'f"""useful for when you want to answer queries about the content of the video."""'}), "(name=f'VideoTranscript', description=\n f'useful for when you want to answer queries about the content of the video.'\n )\n", (3741, 3867), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((4088, 4111), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (4103, 4111), True, 'import streamlit as st\n'), ((4205, 4244), 'streamlit.session_state.get', 'st.session_state.get', (['"""chatbot_api_key"""'], {}), "('chatbot_api_key')\n", (4225, 4244), True, 'import streamlit as st\n'), ((4589, 4617), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (4604, 4617), True, 'import streamlit as st\n'), ((1422, 1460), 'streamlit.session_state.get', 'st.session_state.get', (['"""transcript"""', '""""""'], {}), "('transcript', '')\n", (1442, 1460), True, 'import streamlit as st\n'), ((1239, 1303), 'streamlit.text_area', 'st.text_area', (['"""Description:"""'], {'height': '(200)', 'value': 'v', 'disabled': '(True)'}), "('Description:', height=200, value=v, disabled=True)\n", (1251, 1303), True, 'import streamlit as st\n'), ((1346, 1367), 'streamlit.write', 'st.write', (['f"""{k}: {v}"""'], {}), "(f'{k}: {v}')\n", (1354, 1367), True, 'import streamlit as st\n')] |
import os
from typing import Any, Optional
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
DEFAULT_TOKEN_JSON_PATH = 'token.json'
DEFAULT_SERVICE_ACCOUNT_JSON_PATH = 'service_account.json'
DEFAULT_CREDENTIALS_JSON_PATH = 'credentials.json'
HEADING_STYLE_TEMPLATE = 'HEADING_{}'
DEFAULT_QUESTION_HEADING_STYLE_NUM = 3
EXCLUDED_LLM_METADATA_KEYS = ['source', 'title', 'section_name']
EXCLUDED_EMBED_METADATA_KEYS = ['source', 'title']
SCOPES = ["https://www.googleapis.com/auth/documents.readonly"]
class FAQGoogleDocsReader(BasePydanticReader):
token_json_path: str = DEFAULT_TOKEN_JSON_PATH
service_account_json_path: str = DEFAULT_SERVICE_ACCOUNT_JSON_PATH
credentials_json_path: str = DEFAULT_CREDENTIALS_JSON_PATH
question_heading_style_num: int = DEFAULT_QUESTION_HEADING_STYLE_NUM
is_remote: bool = True
def __init__(self,
token_json_path: Optional[str] = DEFAULT_TOKEN_JSON_PATH,
service_account_json_path: Optional[str] = DEFAULT_SERVICE_ACCOUNT_JSON_PATH,
credentials_json_path: Optional[str] = DEFAULT_CREDENTIALS_JSON_PATH,
question_heading_style_num: Optional[int] = DEFAULT_QUESTION_HEADING_STYLE_NUM
) -> None:
"""Initialize with parameters."""
try:
import google # noqa
import google_auth_oauthlib # noqa
import googleapiclient # noqa
except ImportError as e:
raise ImportError(
'`google_auth_oauthlib`, `googleapiclient` and `google` '
'must be installed to use the GoogleDocsReader.\n'
'Please run `pip install --upgrade google-api-python-client '
'google-auth-httplib2 google-auth-oauthlib`.'
) from e
super().__init__(token_json_path=token_json_path,
service_account_json_path=service_account_json_path,
credentials_json_path=credentials_json_path,
question_heading_style_num=question_heading_style_num)
@classmethod
def class_name(cls) -> str:
return 'CustomGoogleDocsReader'
def load_data(self, document_ids: [str]) -> [Document]:
"""Load data from the input directory.
Args:
document_ids (List[str]): a list of document ids.
"""
if document_ids is None:
raise ValueError('Must specify a "document_ids" in `load_kwargs`.')
results = []
for document_id in document_ids:
docs = self._load_docs(document_id)
results.extend(docs)
return results
def _load_docs(self, document_id: str) -> [Document]:
"""Load a document from Google Docs.
Args:
document_id: the document id.
Returns:
The document text.
"""
import googleapiclient.discovery as discovery
credentials = self._get_credentials()
docs_service = discovery.build('docs', 'v1', credentials=credentials)
doc = docs_service.documents().get(documentId=document_id).execute()
doc_content = doc.get('body').get('content')
doc_source = f'https://docs.google.com/document/d/{document_id}/edit#heading='
return self._structural_elements_to_docs(doc_content, doc_source)
def _get_credentials(self) -> Any:
"""Get valid user credentials from storage.
The file token.json stores the user's access and refresh tokens, and is
created automatically when the authorization flow completes for the first
time.
Returns:
Credentials, the obtained credential.
"""
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
creds = None
if os.path.exists(self.token_json_path):
creds = Credentials.from_authorized_user_file(self.token_json_path, SCOPES)
elif os.path.exists(self.service_account_json_path):
return service_account.Credentials.from_service_account_file(
self.service_account_json_path, scopes=SCOPES
)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
self.credentials_json_path, SCOPES
)
creds = flow.run_local_server(port=8080)
# Save the credentials for the next run
with open(self.token_json_path, 'w') as token:
token.write(creds.to_json())
return creds
@staticmethod
def _read_paragraph_element(element: Any) -> Any:
"""Return the text in the given ParagraphElement.
Args:
element: a ParagraphElement from a Google Doc.
"""
text_run = element.get('textRun')
return text_run.get('content') if text_run else ''
@staticmethod
def _get_text_from_paragraph_elements(elements: [Any]) -> Any:
return ''.join(FAQGoogleDocsReader._read_paragraph_element(elem) for elem in elements)
def _structural_elements_to_docs(self,
doc_elements: [Any],
doc_source: str) -> [Document]:
"""Recurse through a list of Structural Elements.
Read a document's text where text may be in nested elements.
Args:
doc_elements: a list of Structural Elements.
"""
docs = []
text = ''
heading_id = ''
section_name = ''
question_heading_style = HEADING_STYLE_TEMPLATE.format(self.question_heading_style_num)
section_heading_style = HEADING_STYLE_TEMPLATE.format(self.question_heading_style_num - 1)
for value in doc_elements:
if 'paragraph' in value:
paragraph = value['paragraph']
elements = paragraph.get('elements')
paragraph_text = FAQGoogleDocsReader._get_text_from_paragraph_elements(elements)
if 'paragraphStyle' in paragraph and 'headingId' in paragraph['paragraphStyle']:
named_style_type = paragraph['paragraphStyle']['namedStyleType']
if named_style_type in [
question_heading_style,
section_heading_style,
]:
# create previous document checking if it's not empty
if text != '':
node_metadata = {
'source': doc_source + heading_id,
'section_name': section_name,
'title': 'FAQ'
}
prev_doc = Document(text=text,
metadata=node_metadata,
excluded_embed_metadata_keys=EXCLUDED_EMBED_METADATA_KEYS,
excluded_llm_metadata_keys=EXCLUDED_LLM_METADATA_KEYS)
docs.append(prev_doc)
if named_style_type == question_heading_style:
heading_id = paragraph['paragraphStyle']['headingId']
text = paragraph_text
else:
section_name = paragraph_text
text = ''
else:
text += paragraph_text
return docs
if __name__ == '__main__':
reader = FAQGoogleDocsReader(service_account_json_path='../keys/service_account_key.json')
docs = reader.load_data(['1LpPanc33QJJ6BSsyxVg-pWNMplal84TdZtq10naIhD8'])
print(docs)
| [
"llama_index.core.schema.Document"
] | [((3044, 3098), 'googleapiclient.discovery.build', 'discovery.build', (['"""docs"""', '"""v1"""'], {'credentials': 'credentials'}), "('docs', 'v1', credentials=credentials)\n", (3059, 3098), True, 'import googleapiclient.discovery as discovery\n'), ((4002, 4038), 'os.path.exists', 'os.path.exists', (['self.token_json_path'], {}), '(self.token_json_path)\n', (4016, 4038), False, 'import os\n'), ((4060, 4127), 'google.oauth2.credentials.Credentials.from_authorized_user_file', 'Credentials.from_authorized_user_file', (['self.token_json_path', 'SCOPES'], {}), '(self.token_json_path, SCOPES)\n', (4097, 4127), False, 'from google.oauth2.credentials import Credentials\n'), ((4141, 4187), 'os.path.exists', 'os.path.exists', (['self.service_account_json_path'], {}), '(self.service_account_json_path)\n', (4155, 4187), False, 'import os\n'), ((4208, 4313), 'google.oauth2.service_account.Credentials.from_service_account_file', 'service_account.Credentials.from_service_account_file', (['self.service_account_json_path'], {'scopes': 'SCOPES'}), '(self.\n service_account_json_path, scopes=SCOPES)\n', (4261, 4313), False, 'from google.oauth2 import service_account\n'), ((4604, 4681), 'google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file', 'InstalledAppFlow.from_client_secrets_file', (['self.credentials_json_path', 'SCOPES'], {}), '(self.credentials_json_path, SCOPES)\n', (4645, 4681), False, 'from google_auth_oauthlib.flow import InstalledAppFlow\n'), ((4552, 4561), 'google.auth.transport.requests.Request', 'Request', ([], {}), '()\n', (4559, 4561), False, 'from google.auth.transport.requests import Request\n'), ((7140, 7307), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'text', 'metadata': 'node_metadata', 'excluded_embed_metadata_keys': 'EXCLUDED_EMBED_METADATA_KEYS', 'excluded_llm_metadata_keys': 'EXCLUDED_LLM_METADATA_KEYS'}), '(text=text, metadata=node_metadata, excluded_embed_metadata_keys=\n EXCLUDED_EMBED_METADATA_KEYS, excluded_llm_metadata_keys=\n EXCLUDED_LLM_METADATA_KEYS)\n', (7148, 7307), False, 'from llama_index.core.schema import Document\n')] |
import utils
import os
import openai
import sys
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("API_KEY")
openai.api_key
os.environ['OPENAI_API_KEY'] = api_key
#
# examples
# https://github.com/kevintsai/Building-and-Evaluating-Advanced-RAG-Applications
#
# SimpleDirectoryReader is a class that reads all the files in a directory and returns a list of documents
# It will select the best file reader based on the file extensions
# https://docs.llamaindex.ai/en/stable/examples/data_connectors/simple_directory_reader.html
#
# Load all (top-level) files from directory
# ,input_dir="/"
# ,input=files="/asdf.pdf"
# ,required_exts=[".pdf", ".txt", ".md"] <- extensions to read
# ,recursive=True
# docs = reader.load_data()
# print(f"Loaded {len(docs)} docs")
#
# llamaindex
from llama_index import SimpleDirectoryReader,VectorStoreIndex,ServiceContext,Document
from llama_index.llms import OpenAI
#from langchain_community.llms import OpenAI
documents = SimpleDirectoryReader(
input_files=["data/Analisis_Decreto_de_Necesidad_y_Urgencia_Bases_para_la_Reconstrucción.pdf"],
).load_data()
print(type(documents), "\n")
print(len(documents), "\n")
print(type(documents[0]))
print(documents[0])
print(f"Loaded {len(documents)} pages docs") # pages
# basic RAG pipeline
# Document is a class that represents a document
document = Document(text="\n\n".join([doc.text for doc in documents]))
# llm declare
# bge-small-en-v1.5 is a model that was trained on the BGE dataset
# https://huggingface.co/BAAI/bge-small-en-v1.5
# FlagEmbedding can map any text to a low-dimensional dense vector which can be used for tasks like retrieval, classification, clustering, or semantic search. And it also can be used in vector databases for LLMs.
llm = OpenAI(model="gpt-4-1106-preview", temperature=0.0)
service_context = ServiceContext.from_defaults(
llm=llm, embed_model="local:BAAI/bge-small-en-v1.5"
)
index = VectorStoreIndex.from_documents([document],
service_context=service_context)
query_engine = index.as_query_engine()
# query
response = query_engine.query(
"""
Contexto: Eres el mejor analista de documentos de leyes con un IQ de 150. Tienes que ser minucioso y necesito que revises la totalidad de las paginas del documento, sin dejar nada por fuera. Se experto en el tema y no me falles.
Pregunta: devolver en forma de items la totalidad de los temas que trata el documento presentado de forma minuciosa.
Forma de respuesta: El texto suministrado es en español y la respuesta la necesito en español.
"""
)
print(str(response))
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.SimpleDirectoryReader"
] | [((82, 95), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (93, 95), False, 'from dotenv import load_dotenv\n'), ((106, 126), 'os.getenv', 'os.getenv', (['"""API_KEY"""'], {}), "('API_KEY')\n", (115, 126), False, 'import os\n'), ((1774, 1825), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""', 'temperature': '(0.0)'}), "(model='gpt-4-1106-preview', temperature=0.0)\n", (1780, 1825), False, 'from llama_index.llms import OpenAI\n'), ((1844, 1930), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local:BAAI/bge-small-en-v1.5"""'}), "(llm=llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5')\n", (1872, 1930), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, Document\n'), ((1941, 2017), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[document]'], {'service_context': 'service_context'}), '([document], service_context=service_context)\n', (1972, 2017), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, Document\n'), ((977, 1105), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['data/Analisis_Decreto_de_Necesidad_y_Urgencia_Bases_para_la_Reconstrucción.pdf'\n ]"}), "(input_files=[\n 'data/Analisis_Decreto_de_Necesidad_y_Urgencia_Bases_para_la_Reconstrucción.pdf'\n ])\n", (998, 1105), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, Document\n')] |
from collections import ChainMap
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Protocol,
Sequence,
get_args,
runtime_checkable,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
)
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
StringableInput,
validate_and_convert_stringable,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
root_validator,
validator,
)
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.generic_utils import (
messages_to_prompt as generic_messages_to_prompt,
)
from llama_index.core.base.llms.generic_utils import (
prompt_to_messages,
)
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.types import (
BaseOutputParser,
PydanticProgramMode,
TokenAsyncGen,
TokenGen,
)
# NOTE: These two protocols are needed to appease mypy
@runtime_checkable
class MessagesToPromptType(Protocol):
def __call__(self, messages: Sequence[ChatMessage]) -> str:
pass
@runtime_checkable
class CompletionToPromptType(Protocol):
def __call__(self, prompt: str) -> str:
pass
def stream_completion_response_to_tokens(
completion_response_gen: CompletionResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in completion_response_gen:
yield response.delta or ""
return gen()
def stream_chat_response_to_tokens(
chat_response_gen: ChatResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in chat_response_gen:
yield response.delta or ""
return gen()
async def astream_completion_response_to_tokens(
completion_response_gen: CompletionResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in completion_response_gen:
yield response.delta or ""
return gen()
async def astream_chat_response_to_tokens(
chat_response_gen: ChatResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in chat_response_gen:
yield response.delta or ""
return gen()
def default_completion_to_prompt(prompt: str) -> str:
return prompt
class LLM(BaseLLM):
system_prompt: Optional[str] = Field(
default=None, description="System prompt for LLM calls."
)
messages_to_prompt: Callable = Field(
description="Function to convert a list of messages to an LLM prompt.",
default=None,
exclude=True,
)
completion_to_prompt: Callable = Field(
description="Function to convert a completion to an LLM prompt.",
default=None,
exclude=True,
)
output_parser: Optional[BaseOutputParser] = Field(
description="Output parser to parse, validate, and correct errors programmatically.",
default=None,
exclude=True,
)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT
# deprecated
query_wrapper_prompt: Optional[BasePromptTemplate] = Field(
description="Query wrapper prompt for LLM calls.",
default=None,
exclude=True,
)
@validator("messages_to_prompt", pre=True)
def set_messages_to_prompt(
cls, messages_to_prompt: Optional[MessagesToPromptType]
) -> MessagesToPromptType:
return messages_to_prompt or generic_messages_to_prompt
@validator("completion_to_prompt", pre=True)
def set_completion_to_prompt(
cls, completion_to_prompt: Optional[CompletionToPromptType]
) -> CompletionToPromptType:
return completion_to_prompt or default_completion_to_prompt
@root_validator
def check_prompts(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if values.get("completion_to_prompt") is None:
values["completion_to_prompt"] = default_completion_to_prompt
if values.get("messages_to_prompt") is None:
values["messages_to_prompt"] = generic_messages_to_prompt
return values
def _log_template_data(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> None:
template_vars = {
k: v
for k, v in ChainMap(prompt.kwargs, prompt_args).items()
if k in prompt.template_vars
}
with self.callback_manager.event(
CBEventType.TEMPLATING,
payload={
EventPayload.TEMPLATE: prompt.get_template(llm=self),
EventPayload.TEMPLATE_VARS: template_vars,
EventPayload.SYSTEM_PROMPT: self.system_prompt,
EventPayload.QUERY_WRAPPER_PROMPT: self.query_wrapper_prompt,
},
):
pass
def _get_prompt(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str:
formatted_prompt = prompt.format(
llm=self,
messages_to_prompt=self.messages_to_prompt,
completion_to_prompt=self.completion_to_prompt,
**prompt_args,
)
if self.output_parser is not None:
formatted_prompt = self.output_parser.format(formatted_prompt)
return self._extend_prompt(formatted_prompt)
def _get_messages(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> List[ChatMessage]:
messages = prompt.format_messages(llm=self, **prompt_args)
if self.output_parser is not None:
messages = self.output_parser.format_messages(messages)
return self._extend_messages(messages)
def structured_predict(
self,
output_cls: BaseModel,
prompt: PromptTemplate,
**prompt_args: Any,
) -> BaseModel:
from llama_index.core.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return program(**prompt_args)
async def astructured_predict(
self,
output_cls: BaseModel,
prompt: PromptTemplate,
**prompt_args: Any,
) -> BaseModel:
from llama_index.core.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return await program.acall(**prompt_args)
def _parse_output(self, output: str) -> str:
if self.output_parser is not None:
return str(self.output_parser.parse(output))
return output
def predict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""Predict."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.chat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = self.complete(formatted_prompt, formatted=True)
output = response.text
return self._parse_output(output)
def stream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenGen:
"""Stream."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.stream_chat(messages)
stream_tokens = stream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = self.stream_complete(formatted_prompt, formatted=True)
stream_tokens = stream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
async def apredict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""Async predict."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.achat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = await self.acomplete(formatted_prompt, formatted=True)
output = response.text
return self._parse_output(output)
async def astream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenAsyncGen:
"""Async stream."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.astream_chat(messages)
stream_tokens = await astream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = await self.astream_complete(
formatted_prompt, formatted=True
)
stream_tokens = await astream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
def _extend_prompt(
self,
formatted_prompt: str,
) -> str:
"""Add system and query wrapper prompts to base prompt."""
extended_prompt = formatted_prompt
if self.system_prompt:
extended_prompt = self.system_prompt + "\n\n" + extended_prompt
if self.query_wrapper_prompt:
extended_prompt = self.query_wrapper_prompt.format(
query_str=extended_prompt
)
return extended_prompt
def _extend_messages(self, messages: List[ChatMessage]) -> List[ChatMessage]:
"""Add system prompt to chat message list."""
if self.system_prompt:
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt),
*messages,
]
return messages
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""Return query component."""
if self.metadata.is_chat_model:
return LLMChatComponent(llm=self, **kwargs)
else:
return LLMCompleteComponent(llm=self, **kwargs)
class BaseLLMComponent(QueryComponent):
"""Base LLM component."""
llm: LLM = Field(..., description="LLM")
streaming: bool = Field(default=False, description="Streaming mode")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: Any) -> None:
"""Set callback manager."""
self.llm.callback_manager = callback_manager
class LLMCompleteComponent(BaseLLMComponent):
"""LLM completion component."""
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
if "prompt" not in input:
raise ValueError("Prompt must be in input dict.")
# do special check to see if prompt is a list of chat messages
if isinstance(input["prompt"], get_args(List[ChatMessage])):
input["prompt"] = self.llm.messages_to_prompt(input["prompt"])
input["prompt"] = validate_and_convert_stringable(input["prompt"])
else:
input["prompt"] = validate_and_convert_stringable(input["prompt"])
input["prompt"] = self.llm.completion_to_prompt(input["prompt"])
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
prompt = kwargs["prompt"]
# ignore all other kwargs for now
if self.streaming:
response = self.llm.stream_complete(prompt, formatted=True)
else:
response = self.llm.complete(prompt, formatted=True)
return {"output": response}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
prompt = kwargs["prompt"]
# ignore all other kwargs for now
response = await self.llm.acomplete(prompt, formatted=True)
return {"output": response}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# TODO: support only complete for now
return InputKeys.from_keys({"prompt"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
class LLMChatComponent(BaseLLMComponent):
"""LLM chat component."""
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
if "messages" not in input:
raise ValueError("Messages must be in input dict.")
# if `messages` is a string, convert to a list of chat message
if isinstance(input["messages"], get_args(StringableInput)):
input["messages"] = validate_and_convert_stringable(input["messages"])
input["messages"] = prompt_to_messages(str(input["messages"]))
for message in input["messages"]:
if not isinstance(message, ChatMessage):
raise ValueError("Messages must be a list of ChatMessage")
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
messages = kwargs["messages"]
if self.streaming:
response = self.llm.stream_chat(messages)
else:
response = self.llm.chat(messages)
return {"output": response}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
messages = kwargs["messages"]
if self.streaming:
response = await self.llm.astream_chat(messages)
else:
response = await self.llm.achat(messages)
return {"output": response}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# TODO: support only complete for now
return InputKeys.from_keys({"messages"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
| [
"llama_index.core.bridge.pydantic.validator",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.program.utils.get_program_for_llm",
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.base.query_pipeline.query.validate_and_convert_stringable"
] | [((2866, 2929), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""System prompt for LLM calls."""'}), "(default=None, description='System prompt for LLM calls.')\n", (2871, 2929), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((2979, 3094), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a list of messages to an LLM prompt."""', 'default': 'None', 'exclude': '(True)'}), "(description=\n 'Function to convert a list of messages to an LLM prompt.', default=\n None, exclude=True)\n", (2984, 3094), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3153, 3256), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a completion to an LLM prompt."""', 'default': 'None', 'exclude': '(True)'}), "(description='Function to convert a completion to an LLM prompt.',\n default=None, exclude=True)\n", (3158, 3256), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3332, 3460), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Output parser to parse, validate, and correct errors programmatically."""', 'default': 'None', 'exclude': '(True)'}), "(description=\n 'Output parser to parse, validate, and correct errors programmatically.',\n default=None, exclude=True)\n", (3337, 3460), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3635, 3723), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query wrapper prompt for LLM calls."""', 'default': 'None', 'exclude': '(True)'}), "(description='Query wrapper prompt for LLM calls.', default=None,\n exclude=True)\n", (3640, 3723), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3757, 3798), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""messages_to_prompt"""'], {'pre': '(True)'}), "('messages_to_prompt', pre=True)\n", (3766, 3798), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3996, 4039), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""completion_to_prompt"""'], {'pre': '(True)'}), "('completion_to_prompt', pre=True)\n", (4005, 4039), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((11484, 11513), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""LLM"""'}), "(..., description='LLM')\n", (11489, 11513), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((11536, 11586), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Streaming mode"""'}), "(default=False, description='Streaming mode')\n", (11541, 11586), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((6343, 6443), 'llama_index.core.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6362, 6443), False, 'from llama_index.core.program.utils import get_program_for_llm\n'), ((6788, 6888), 'llama_index.core.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6807, 6888), False, 'from llama_index.core.program.utils import get_program_for_llm\n'), ((13621, 13652), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'prompt'}"], {}), "({'prompt'})\n", (13640, 13652), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((13751, 13783), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (13771, 13783), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15577, 15610), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'messages'}"], {}), "({'messages'})\n", (15596, 15610), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15709, 15741), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (15729, 15741), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((12239, 12266), 'typing.get_args', 'get_args', (['List[ChatMessage]'], {}), '(List[ChatMessage])\n', (12247, 12266), False, 'from typing import Any, Callable, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((12374, 12422), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12405, 12422), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((12467, 12515), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12498, 12515), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((14217, 14242), 'typing.get_args', 'get_args', (['StringableInput'], {}), '(StringableInput)\n', (14225, 14242), False, 'from typing import Any, Callable, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((14277, 14327), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['messages']"], {}), "(input['messages'])\n", (14308, 14327), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((10988, 11052), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': 'self.system_prompt'}), '(role=MessageRole.SYSTEM, content=self.system_prompt)\n', (10999, 11052), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponseAsyncGen, ChatResponseGen, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n'), ((4780, 4816), 'collections.ChainMap', 'ChainMap', (['prompt.kwargs', 'prompt_args'], {}), '(prompt.kwargs, prompt_args)\n', (4788, 4816), False, 'from collections import ChainMap\n')] |
import streamlit as st
import pandas as pd
from PIL import Image
from utils import *
import os
os.environ['OPENAI_API_KEY'] = st.secrets['openai_key']
#from llama_index.llms.openai import OpenAI
#from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core import Document
from llama_index.core import VectorStoreIndex
from llama_index.core import Settings
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
from llama_index.core.response_synthesizers import TreeSummarize
Settings.llm = OpenAI(temperature=0.5, model="gpt-4")
import re
from utils import data_munging, add_cumulative_stats
import streamlit as st
from google.oauth2 import service_account
from googleapiclient.discovery import build
# Set up credentials
credentials = service_account.Credentials.from_service_account_info(
st.secrets["gcp_service_account"],
scopes=[
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/spreadsheets.readonly"
]
)
# Use the Drive API
service = build('drive', 'v3', credentials=credentials)
def return_markdown(filepath):
with open(filepath, 'r') as file:
content = file.read()
# Split the content on 'SKIPABOVE' and get everything below it
_, content_to_display = content.split('SKIPABOVE', 1)
# Display it in Streamlit
return content_to_display
def return_google_markdown(file_id):
request = service.files().get_media(fileId=file_id)
content = request.execute()
_, content_to_display = content.decode("utf-8").split('SKIPABOVE', 1)
return content_to_display
@st.cache_data()
def make_doc_index():
users = ['Beep', 'Tyler', 'LambertDBB', 'Grace', 'Ben', 'Forrest','Spangler',
'Sweet', 'Cody', 'Niko', 'Dan', 'Renzo', 'Sean', 'Shack', 'Frank',
'Blake', 'Todd', 'Taylor', 'Connor']
emails = [st.secrets[x]['email'] for x in users]
fileIds = [st.secrets[x]['fileid'] for x in users]
emails_ids = dict(zip(users, zip(emails, fileIds)))
return_google_markdown(emails_ids['Beep'][1])
documents = []
for k in emails_ids.keys():
text = return_google_markdown(emails_ids[k][1])
cleaned_text = re.sub(r'\n+', ' ', text) # Remove multiple consecutive newlines
cleaned_text = re.sub(r'#+\s*', '', cleaned_text) # Remove hash symbols and spaces at the beginning of the line
document = Document(
text=cleaned_text,
metadata={"filename": emails_ids[k][1], "category": "coachnotes", "person": k}
)
documents.append(document)
extras = { 'battingPhilosophy': 'assets/docs/Batting Order Philosophy.md',
'battingJustification': 'assets/docs/Batting Order Justification.md',
'fieldingPhilosophy': 'assets/docs/Fielding Philosophy.md',
'fieldingJustification': 'assets/docs/Fielding Justification.md',
'baserunningPhilosophy': 'assets/docs/Baserunning Philosophy.md'
}
for k in extras.keys():
text = return_markdown(extras[k])
cleaned_text = re.sub(r'\n+', ' ', text) # Remove multiple consecutive newlines
cleaned_text = re.sub(r'#+\s*', '', cleaned_text) # Remove hash symbols and spaces at the beginning of the line
document = Document(
text=cleaned_text,
metadata={"filename": k, "category": "philosophy", "name": k}
)
documents.append(document)
vector_index = VectorStoreIndex.from_documents(documents)
return vector_index
@st.cache_data()
def get_dadimage_1():
image = Image.open('assets/dadbod_3_9_23.jpg')
return image
def chat():
st.markdown("# Chat")
#df_full, df_full_nums = data_munging()
#df_agg = df_full.groupby(['id', 'name'])[['atbats', 'run', 'rbi', 'walks', 'single', 'double', 'triple', 'homerun', 'games_played']].sum().reset_index()
#df = add_cumulative_stats(df_agg)
vector_index = make_doc_index()
# NOTE: we add an extra tone_name variable here
qa_prompt_tmpl = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"You are Pat Riley having a bad day. \n"
"You are receiving notes from the head coach of a mens softball team."
"Your task is to analyze the notes and provide feedback to the query. Give long analysis.\n"
"Always try to focus on sacrifice for the team and playing hard.\n"
"Query: {query_str}\n"
"Answer: "
)
qa_prompt = PromptTemplate(qa_prompt_tmpl)
coachnotes = vector_index.as_query_engine(text_qa_template=qa_prompt)
with st.sidebar: get_sideBar('Team Stats')
#llm = OpenAI(api_key= st.secrets.openai_key)
if "messages" not in st.session_state.keys():
st.session_state.messages = [
{"role":"assistant", "content":"Somehow you managed to get me, HOF GM and Coach Pat Riley, to answer question about the DBB. I'm having a bad day, so don't expect me to go easy on you..."}
]
#query_engine = PandasQueryEngine(df=df, verbose=True)
if prompt:= st.chat_input("Ask a question"):
st.session_state.messages.append({"role":"user", "content": prompt})
for message in st.session_state.messages:
with st.chat_message(message['role']):
st.write(message['content'])
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = coachnotes.query(prompt)
st.write(response.response)
message = {"role":"assistant", "content": response.response}
st.session_state.messages.append(message)
# response = query_engine.query(
# "What is Josh's batting average?",
# )
#st.write(response)
if __name__ == "__main__":
chat()
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.llms.openai.OpenAI",
"llama_index.core.Document",
"llama_index.core.PromptTemplate"
] | [((549, 587), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'model': '"""gpt-4"""'}), "(temperature=0.5, model='gpt-4')\n", (555, 587), False, 'from llama_index.llms.openai import OpenAI\n'), ((801, 1019), 'google.oauth2.service_account.Credentials.from_service_account_info', 'service_account.Credentials.from_service_account_info', (["st.secrets['gcp_service_account']"], {'scopes': "['https://www.googleapis.com/auth/drive.readonly',\n 'https://www.googleapis.com/auth/spreadsheets.readonly']"}), "(st.secrets[\n 'gcp_service_account'], scopes=[\n 'https://www.googleapis.com/auth/drive.readonly',\n 'https://www.googleapis.com/auth/spreadsheets.readonly'])\n", (854, 1019), False, 'from google.oauth2 import service_account\n'), ((1069, 1114), 'googleapiclient.discovery.build', 'build', (['"""drive"""', '"""v3"""'], {'credentials': 'credentials'}), "('drive', 'v3', credentials=credentials)\n", (1074, 1114), False, 'from googleapiclient.discovery import build\n'), ((1633, 1648), 'streamlit.cache_data', 'st.cache_data', ([], {}), '()\n', (1646, 1648), True, 'import streamlit as st\n'), ((3597, 3612), 'streamlit.cache_data', 'st.cache_data', ([], {}), '()\n', (3610, 3612), True, 'import streamlit as st\n'), ((3528, 3570), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (3559, 3570), False, 'from llama_index.core import VectorStoreIndex\n'), ((3647, 3685), 'PIL.Image.open', 'Image.open', (['"""assets/dadbod_3_9_23.jpg"""'], {}), "('assets/dadbod_3_9_23.jpg')\n", (3657, 3685), False, 'from PIL import Image\n'), ((3721, 3742), 'streamlit.markdown', 'st.markdown', (['"""# Chat"""'], {}), "('# Chat')\n", (3732, 3742), True, 'import streamlit as st\n'), ((4711, 4741), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['qa_prompt_tmpl'], {}), '(qa_prompt_tmpl)\n', (4725, 4741), False, 'from llama_index.core import PromptTemplate\n'), ((2247, 2272), 're.sub', 're.sub', (['"""\\\\n+"""', '""" """', 'text'], {}), "('\\\\n+', ' ', text)\n", (2253, 2272), False, 'import re\n'), ((2336, 2370), 're.sub', 're.sub', (['"""#+\\\\s*"""', '""""""', 'cleaned_text'], {}), "('#+\\\\s*', '', cleaned_text)\n", (2342, 2370), False, 'import re\n'), ((2454, 2565), 'llama_index.core.Document', 'Document', ([], {'text': 'cleaned_text', 'metadata': "{'filename': emails_ids[k][1], 'category': 'coachnotes', 'person': k}"}), "(text=cleaned_text, metadata={'filename': emails_ids[k][1],\n 'category': 'coachnotes', 'person': k})\n", (2462, 2565), False, 'from llama_index.core import Document\n'), ((3142, 3167), 're.sub', 're.sub', (['"""\\\\n+"""', '""" """', 'text'], {}), "('\\\\n+', ' ', text)\n", (3148, 3167), False, 'import re\n'), ((3231, 3265), 're.sub', 're.sub', (['"""#+\\\\s*"""', '""""""', 'cleaned_text'], {}), "('#+\\\\s*', '', cleaned_text)\n", (3237, 3265), False, 'import re\n'), ((3349, 3443), 'llama_index.core.Document', 'Document', ([], {'text': 'cleaned_text', 'metadata': "{'filename': k, 'category': 'philosophy', 'name': k}"}), "(text=cleaned_text, metadata={'filename': k, 'category':\n 'philosophy', 'name': k})\n", (3357, 3443), False, 'from llama_index.core import Document\n'), ((4941, 4964), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4962, 4964), True, 'import streamlit as st\n'), ((5291, 5322), 'streamlit.chat_input', 'st.chat_input', (['"""Ask a question"""'], {}), "('Ask a question')\n", (5304, 5322), True, 'import streamlit as st\n'), ((5332, 5401), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (5364, 5401), True, 'import streamlit as st\n'), ((5461, 5493), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5476, 5493), True, 'import streamlit as st\n'), ((5507, 5535), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (5515, 5535), True, 'import streamlit as st\n'), ((5615, 5643), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (5630, 5643), True, 'import streamlit as st\n'), ((5662, 5687), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (5672, 5687), True, 'import streamlit as st\n'), ((5757, 5784), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (5765, 5784), True, 'import streamlit as st\n'), ((5878, 5919), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (5910, 5919), True, 'import streamlit as st\n')] |
# Import the necessary libraries
import random
import time
from llama_index.llms import OpenAI
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, StorageContext, set_global_service_context
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
import chromadb
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index.node_parser import SentenceSplitter
from llama_index.indices.prompt_helper import PromptHelper
import re
from llama_index.chat_engine import CondensePlusContextChatEngine
from llama_index.indices.vector_store.retrievers import VectorIndexRetriever
from langchain_openai import ChatOpenAI
from llama_index.postprocessor import RankGPTRerank
# Streamlit interface
st.title('🦜🔗 Tourism Assistant Chatbot')
#First run, initialize the context and the chat engine
if "init" not in st.session_state:
st.session_state.init = True
system_prompt = (
'''
#### Task Instructions:
You are a friendly and knowledgeable tourism assistant, helping users with their queries related to tourism, travel, dining, events, and any related questions. Your goal is to provide accurate and useful information. If there's information you don't know, respond truthfully. Add a touch of personality and humor to engage users.
End your responses asking to the user if there's anything else you can help with, everytime.
#### Personalization & Tone:
Maintain an upbeat and helpful tone, embodying the role of a helpful travel assistant. Inject personality and humor into responses to make interactions more enjoyable.
#### Context for User Input:
Always consider the user's input in the context of tourism, travel, and related topics. If a question is outside this scope, respond with a friendly reminder of your expertise and limitations.
If a question is outisde the travel or anything related to the travel domain please kindly remember the user that that question is not in your scope of expertise (cf. "Tell me a joke!" example below).
#### Creativity & Style Guidance:
Craft responses that are not only informative but also creative. Avoid short and plain answers; instead, provide engaging and well-elaborated responses.
#### External Knowledge & Data:
Base your responses on the dataset of events and places, ensuring accuracy in facts. If the dataset doesn't have information, clearly state that you don't have the specific data.
#### Handling Non-Travel Related Questions:
If a user asks a question outside the scope of travel, respond creatively but firmly, reminding the user of the bot's expertise in the travel domain. Redirect the conversation back to travel-related topics or provide a gentle refusal.
#### Rules & Guardrails:
Adhere to ethical standards. If a user request involves prohibited content or actions, respond appropriately and within the bounds of ethical guidelines.
#### Output Verification Standards:
Maintain a commitment to accuracy. If there's uncertainty in information, it's better to express that you're not sure rather than providing potentially inaccurate details.
#### Benefits of System Prompts:
1. **Character Maintenance:** Engage users with a consistent and friendly persona for longer conversations.
2. **Creativity:** Exhibit creative and natural behavior to enhance user experience.
3. **Rule Adherence:** Follow instructions carefully to avoid prohibited tasks or text.
### Example User Interactions:
**User: Recommend a trendy restaurant in Paris.**
> "Ah, Paris - the city of love and incredible cuisine! 🥖 How about checking out 'La Mode Bistro'? It's not just a restaurant; it's a fashion show for your taste buds! 😋"
**User: What's the best way to explore Tokyo on a budget?**
> "Exploring Tokyo without breaking the bank? 🏮 How about hopping on the efficient and cost-friendly metro, grabbing some street food in Harajuku, and exploring the free admission areas of beautiful parks like Ueno! 🌸"
**User: Any upcoming events in New York City?**
> "NYC, the city that never sleeps! 🗽 Let me check my event database for you. One moment... 🕵️♂️ Ah, there's a fantastic art festival in Chelsea this weekend! 🎨"
**User: Tell me a joke!**
> "While I'm better at recommending travel spots, here's a quick one for you: Why don't scientists trust atoms? Because they make up everything! 😄 Now, anything travel-related you'd like to know?"
**User: What's the capital of France?**
> "Ah, testing my geography knowledge, are we? 😄 The capital of France is Paris! 🇫🇷 Now, if you have any travel-related questions, I'm your go-to guide!"
**User: Can you help me with my math homework?**
> "Ah, numbers are a bit outside my travel-savvy brain! 😅 If you have any questions about amazing destinations or travel tips, though, I'm all ears!"
''')
#temperature adjustable at will
st.session_state.service_context = ServiceContext.from_defaults(llm=ChatOpenAI(model="gpt-3.5-turbo", temperature=0.9),
prompt_helper = PromptHelper(),
embed_model= LangchainEmbedding(HuggingFaceEmbeddings(model_name='dangvantuan/sentence-camembert-large')), #in case of new embeddings, possibility to add "model_kwargs = {'device': 'cuda:0'}" to the HuggingFaceEmbeddings call to use GPU
node_parser=SentenceSplitter(),
system_prompt=system_prompt,
)
set_global_service_context(st.session_state.service_context)
# create or get a chroma collection
st.session_state.chroma_collection = chromadb.PersistentClient(path="./chroma_db").get_or_create_collection("tourism_db")
# assign chroma as the vector_store to the context
st.session_state.storage_context = StorageContext.from_defaults(vector_store=ChromaVectorStore(chroma_collection=st.session_state.chroma_collection))
#get the index
st.session_state.index = VectorStoreIndex.from_vector_store(ChromaVectorStore(chroma_collection=st.session_state.chroma_collection),
storage_context=st.session_state.storage_context, service_context=st.session_state.service_context)
#example of context and condense prompt adjustability
#context_prompt= "Base the reply to the user question mainly on the Description field of the context "
#condense_prompt = " "
st.session_state.retriever=VectorIndexRetriever(st.session_state.index, similarity_top_k=10) #or index.as_retriever(service_context=service_context, search_kwargs={"k": 10})
#I chose to use the RankGPTRerank postprocessor to rerank the top 4 results from the retriever over other rerankers like LLMRerank that wasn't working as expected
reranker = RankGPTRerank(
llm=OpenAI(
model="gpt-3.5-turbo",
temperature=0.0),
top_n=4,
verbose=True,
)
st.session_state.chat_engine = CondensePlusContextChatEngine.from_defaults(
retriever=st.session_state.retriever,
query_engine=st.session_state.index.as_query_engine(service_context=st.session_state.service_context,
retriever=st.session_state.retriever),
service_context=st.session_state.service_context,
system_prompt=system_prompt,
node_postprocessors=[reranker],
#condense_prompt=DEFAULT_CONDENSE_PROMPT_TEMPLATE,
#context_prompt=DEFAULT_CONTEXT_PROMPT_TEMPLATE,
verbose=True,
)
#initialize the chat history
st.session_state.messages = []
#initialize the assistant with a random greeting
assistant_response = random.choice(
[
"Hello there! How can I assist you today?",
"Good day human! I'm here to answer questions about travel. What do you need help with?",
"Hello! My name is Minotour2.0. Please feel free to ask me any questions about trips, destinations or planning.",
"Welcome! I'm an AI assistant focused on travel. How may I assist you in finding your next adventure?",
"Greetings! What are your travel plans or questions? I'm happy to provide any information I can.",
"Hi there, traveler! I'm your virtual travel guide - where would you like to go or what do you need help planning?",
"What brings you here today? I'm your assistant for all things related to getting away - what destination interests you?",
"Salutations! Let me know if you need advice on flights, hotels or activities for an upcoming journey.",
"Hello friend, I'm here to help with travel queries. What questions can I answer for you?",
"Welcome, I'm your assistant available to help with transportation, lodging or other travel logistics. How can I assist you?",
]
)
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
def handle_chat(question):
if question.lower() == "reset":
st.session_state.chat_engine.reset()
st.session_state.messages = []
return "The conversation has been reset."
else:
response = st.session_state.chat_engine.chat(question)
cleaned_response = re.sub(r"(AI: |AI Assistant: |assistant: )", "", re.sub(r"^user: .*$", "", str(response), flags=re.MULTILINE))
return cleaned_response
if user_input:= st.chat_input("Please enter your question:"):
if user_input.lower() == "exit":
st.warning('Goodbye')
st.stop()
else:
with st.chat_message("user"):
st.markdown(user_input)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": user_input})
# Handle chat and get the response
response = handle_chat(user_input)
# Display assistant response in chat message container
with st.chat_message("assistant"):
full_response = ""
message_placeholder = st.empty()
for chunk in response.split():
full_response += chunk + " "
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"llama_index.indices.vector_store.retrievers.VectorIndexRetriever",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.llms.OpenAI",
"llama_index.indices.prompt_helper.PromptHelper",
"llama_index.set_global_service_context",
"llama_index.node_parser.SentenceSplitter"
] | [((855, 895), 'streamlit.title', 'st.title', (['"""🦜🔗 Tourism Assistant Chatbot"""'], {}), "('🦜🔗 Tourism Assistant Chatbot')\n", (863, 895), True, 'import streamlit as st\n'), ((5721, 5781), 'llama_index.set_global_service_context', 'set_global_service_context', (['st.session_state.service_context'], {}), '(st.session_state.service_context)\n', (5747, 5781), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext, set_global_service_context\n'), ((6706, 6771), 'llama_index.indices.vector_store.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', (['st.session_state.index'], {'similarity_top_k': '(10)'}), '(st.session_state.index, similarity_top_k=10)\n', (6726, 6771), False, 'from llama_index.indices.vector_store.retrievers import VectorIndexRetriever\n'), ((8706, 9817), 'random.choice', 'random.choice', (['[\'Hello there! How can I assist you today?\',\n "Good day human! I\'m here to answer questions about travel. What do you need help with?"\n ,\n \'Hello! My name is Minotour2.0. Please feel free to ask me any questions about trips, destinations or planning.\'\n ,\n "Welcome! I\'m an AI assistant focused on travel. How may I assist you in finding your next adventure?"\n ,\n "Greetings! What are your travel plans or questions? I\'m happy to provide any information I can."\n ,\n "Hi there, traveler! I\'m your virtual travel guide - where would you like to go or what do you need help planning?"\n ,\n "What brings you here today? I\'m your assistant for all things related to getting away - what destination interests you?"\n ,\n \'Salutations! Let me know if you need advice on flights, hotels or activities for an upcoming journey.\'\n ,\n "Hello friend, I\'m here to help with travel queries. What questions can I answer for you?"\n ,\n "Welcome, I\'m your assistant available to help with transportation, lodging or other travel logistics. How can I assist you?"\n ]'], {}), '([\'Hello there! How can I assist you today?\',\n "Good day human! I\'m here to answer questions about travel. What do you need help with?"\n ,\n \'Hello! My name is Minotour2.0. Please feel free to ask me any questions about trips, destinations or planning.\'\n ,\n "Welcome! I\'m an AI assistant focused on travel. How may I assist you in finding your next adventure?"\n ,\n "Greetings! What are your travel plans or questions? I\'m happy to provide any information I can."\n ,\n "Hi there, traveler! I\'m your virtual travel guide - where would you like to go or what do you need help planning?"\n ,\n "What brings you here today? I\'m your assistant for all things related to getting away - what destination interests you?"\n ,\n \'Salutations! Let me know if you need advice on flights, hotels or activities for an upcoming journey.\'\n ,\n "Hello friend, I\'m here to help with travel queries. What questions can I answer for you?"\n ,\n "Welcome, I\'m your assistant available to help with transportation, lodging or other travel logistics. How can I assist you?"\n ])\n', (8719, 9817), False, 'import random\n'), ((9979, 10069), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': assistant_response}"], {}), "({'role': 'assistant', 'content':\n assistant_response})\n", (10011, 10069), True, 'import streamlit as st\n'), ((10705, 10749), 'streamlit.chat_input', 'st.chat_input', (['"""Please enter your question:"""'], {}), "('Please enter your question:')\n", (10718, 10749), True, 'import streamlit as st\n'), ((6243, 6314), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'st.session_state.chroma_collection'}), '(chroma_collection=st.session_state.chroma_collection)\n', (6260, 6314), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((10169, 10201), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (10184, 10201), True, 'import streamlit as st\n'), ((10211, 10242), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (10222, 10242), True, 'import streamlit as st\n'), ((10315, 10351), 'streamlit.session_state.chat_engine.reset', 'st.session_state.chat_engine.reset', ([], {}), '()\n', (10349, 10351), True, 'import streamlit as st\n'), ((10470, 10513), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['question'], {}), '(question)\n', (10503, 10513), True, 'import streamlit as st\n'), ((10796, 10817), 'streamlit.warning', 'st.warning', (['"""Goodbye"""'], {}), "('Goodbye')\n", (10806, 10817), True, 'import streamlit as st\n'), ((10826, 10835), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (10833, 10835), True, 'import streamlit as st\n'), ((10984, 11057), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': user_input}"], {}), "({'role': 'user', 'content': user_input})\n", (11016, 11057), True, 'import streamlit as st\n'), ((11715, 11800), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (11747, 11800), True, 'import streamlit as st\n'), ((4991, 5041), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model='gpt-3.5-turbo', temperature=0.9)\n", (5001, 5041), False, 'from langchain_openai import ChatOpenAI\n'), ((5128, 5142), 'llama_index.indices.prompt_helper.PromptHelper', 'PromptHelper', ([], {}), '()\n', (5140, 5142), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((5529, 5547), 'llama_index.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (5545, 5547), False, 'from llama_index.node_parser import SentenceSplitter\n'), ((5864, 5909), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (5889, 5909), False, 'import chromadb\n'), ((6086, 6157), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'st.session_state.chroma_collection'}), '(chroma_collection=st.session_state.chroma_collection)\n', (6103, 6157), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((7071, 7117), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.0)'}), "(model='gpt-3.5-turbo', temperature=0.0)\n", (7077, 7117), False, 'from llama_index.llms import OpenAI\n'), ((7506, 7637), 'streamlit.session_state.index.as_query_engine', 'st.session_state.index.as_query_engine', ([], {'service_context': 'st.session_state.service_context', 'retriever': 'st.session_state.retriever'}), '(service_context=st.session_state.\n service_context, retriever=st.session_state.retriever)\n', (7544, 7637), True, 'import streamlit as st\n'), ((10859, 10882), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (10874, 10882), True, 'import streamlit as st\n'), ((10896, 10919), 'streamlit.markdown', 'st.markdown', (['user_input'], {}), '(user_input)\n', (10907, 10919), True, 'import streamlit as st\n'), ((11250, 11278), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (11265, 11278), True, 'import streamlit as st\n'), ((11345, 11355), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (11353, 11355), True, 'import streamlit as st\n'), ((5244, 5316), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""dangvantuan/sentence-camembert-large"""'}), "(model_name='dangvantuan/sentence-camembert-large')\n", (5265, 5316), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((11460, 11476), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (11470, 11476), False, 'import time\n')] |
import os
import json
import logging
import sys
import requests
from dotenv import load_dotenv
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from llama_index.core import VectorStoreIndex, Document
from llama_index.tools.brave_search import BraveSearchToolSpec
from llama_index.readers.web import SimpleWebPageReader
# Constants
USER_AGENT = 'Mozilla/5.0 (compatible; YourBot/1.0; +http://yourwebsite.com/bot.html)'
HEADERS = {'User-Agent': USER_AGENT}
RETRIES = Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
def setup_logging():
"""
Initialize logging configuration to output logs to stdout.
"""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
def load_environment_variables():
"""
Load environment variables from the .env file.
:return: The Brave API key.
"""
load_dotenv()
return os.getenv('BRAVE_API_KEY')
def perform_search(query, api_key):
"""
Perform a search using the Brave Search API.
:param query: The search query.
:param api_key: The Brave API key.
:return: The search response.
"""
tool_spec = BraveSearchToolSpec(api_key=api_key)
return tool_spec.brave_search(query=query)
def extract_search_results(response):
"""
Extract search results from the Brave Search API response.
:param response: The search response.
:return: A list of search results.
"""
documents = [doc.text for doc in response]
search_results = []
for document in documents:
response_data = json.loads(document)
search_results.extend(response_data.get('web', {}).get('results', []))
return search_results
def scrape_web_pages(search_results):
"""
Scrape web pages from the URLs obtained from the search results.
:param search_results: The list of search results.
:return: A list of scraped documents.
"""
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=RETRIES))
session.mount('https://', HTTPAdapter(max_retries=RETRIES))
all_documents = []
for result in search_results:
url = result.get('url')
try:
response = session.get(url, headers=HEADERS, timeout=10)
response.raise_for_status()
doc = Document(text=response.text, url=url)
all_documents.append(doc)
except requests.exceptions.RequestException as e:
logging.error(f"Failed to scrape {url}: {e}")
return all_documents
def main():
"""
Main function to orchestrate the search, scraping, and querying process.
"""
setup_logging()
api_key = load_environment_variables()
my_query = "What is the latest news about llamaindex?"
response = perform_search(my_query, api_key)
search_results = extract_search_results(response)
all_documents = scrape_web_pages(search_results)
# Load all the scraped documents into the vector store
index = VectorStoreIndex.from_documents(all_documents)
# Use the index to query with the language model
query_engine = index.as_query_engine()
response = query_engine.query(my_query)
print(response)
if __name__ == "__main__":
main() | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.Document",
"llama_index.tools.brave_search.BraveSearchToolSpec"
] | [((496, 569), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(0.1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])\n', (501, 569), False, 'from urllib3.util.retry import Retry\n'), ((675, 733), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (694, 733), False, 'import logging\n'), ((949, 962), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (960, 962), False, 'from dotenv import load_dotenv\n'), ((974, 1000), 'os.getenv', 'os.getenv', (['"""BRAVE_API_KEY"""'], {}), "('BRAVE_API_KEY')\n", (983, 1000), False, 'import os\n'), ((1228, 1264), 'llama_index.tools.brave_search.BraveSearchToolSpec', 'BraveSearchToolSpec', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (1247, 1264), False, 'from llama_index.tools.brave_search import BraveSearchToolSpec\n'), ((1998, 2016), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2014, 2016), False, 'import requests\n'), ((3052, 3098), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['all_documents'], {}), '(all_documents)\n', (3083, 3098), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((769, 809), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (790, 809), False, 'import logging\n'), ((1637, 1657), 'json.loads', 'json.loads', (['document'], {}), '(document)\n', (1647, 1657), False, 'import json\n'), ((2046, 2078), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2057, 2078), False, 'from requests.adapters import HTTPAdapter\n'), ((2110, 2142), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2121, 2142), False, 'from requests.adapters import HTTPAdapter\n'), ((738, 757), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (755, 757), False, 'import logging\n'), ((2374, 2411), 'llama_index.core.Document', 'Document', ([], {'text': 'response.text', 'url': 'url'}), '(text=response.text, url=url)\n', (2382, 2411), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((2520, 2565), 'logging.error', 'logging.error', (['f"""Failed to scrape {url}: {e}"""'], {}), "(f'Failed to scrape {url}: {e}')\n", (2533, 2565), False, 'import logging\n')] |
import qdrant_client
from llama_index.llms import Ollama
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.vector_stores.qdrant import QdrantVectorStore
# re-initialize the vector store
client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
vector_store = QdrantVectorStore(client=client, collection_name="tweets")
# get the LLM again
llm = Ollama(model="mistral")
service_context = ServiceContext.from_defaults(llm=llm,embed_model="local")
# load the index from the vector store
index = VectorStoreIndex.from_vector_store(vector_store=vector_store,service_context=service_context)
def rag_pipline(query):
if query is not None:
query_engine = index.as_query_engine(similarity_top_k=20)
response = query_engine.query(query)
return response
else:
return "i am sorry. i cannot answer you for this due to some error in data" | [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.llms.Ollama"
] | [((233, 281), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (259, 281), False, 'import qdrant_client\n'), ((303, 361), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""tweets"""'}), "(client=client, collection_name='tweets')\n", (320, 361), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((389, 412), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': '"""mistral"""'}), "(model='mistral')\n", (395, 412), False, 'from llama_index.llms import Ollama\n'), ((431, 489), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (459, 489), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((536, 634), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (570, 634), False, 'from llama_index import VectorStoreIndex, ServiceContext\n')] |
import os
from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage
from llama_index.readers.file import PDFReader
# def get_index(data, index_name):
# index = None
# if not os.path.exists(index_name):
# print('Building index', index_name)
# index = VectorStoreIndex.from_documents(data, show_progress=True)
# index.storage_context.persist(persist_dir=index_name)
# else :
# index = load_index_from_storage(
# StorageContext.from_defaults(persist_dir=index_name)
# )
# return index
# pdf_path = os.path.join('data', 'Malaysia.pdf')
# malaysia_pdf = PDFReader().load_data(file=pdf_path)
# malaysia_index = get_index(malaysia_pdf, 'malaysia')
# malaysia_engine = malaysia_index.as_query_engine()
# malaysia_engine.query()
def get_index(data_files, index_name):
index = None
data = []
for file_path in data_files:
reader = PDFReader()
data.extend(reader.load_data(file=file_path))
if not os.path.exists(index_name):
print(f'Building index {index_name}')
index = VectorStoreIndex.from_documents(data, show_progress=True)
index.storage_context.persist(persist_dir=index_name)
else:
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=index_name)
)
return index
file_paths = [
os.path.join('data', 'Malaysia.pdf'),
# Add more file paths here
]
combined_index = get_index(file_paths, 'combined_index')
combined_engine = combined_index.as_query_engine() | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.readers.file.PDFReader"
] | [((1404, 1440), 'os.path.join', 'os.path.join', (['"""data"""', '"""Malaysia.pdf"""'], {}), "('data', 'Malaysia.pdf')\n", (1416, 1440), False, 'import os\n'), ((952, 963), 'llama_index.readers.file.PDFReader', 'PDFReader', ([], {}), '()\n', (961, 963), False, 'from llama_index.readers.file import PDFReader\n'), ((1030, 1056), 'os.path.exists', 'os.path.exists', (['index_name'], {}), '(index_name)\n', (1044, 1056), False, 'import os\n'), ((1120, 1177), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['data'], {'show_progress': '(True)'}), '(data, show_progress=True)\n', (1151, 1177), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1303, 1355), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_name'}), '(persist_dir=index_name)\n', (1331, 1355), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n')] |
from llama_index.retrievers import BaseRetriever
from llama_index import QueryBundle
from llama_index.schema import NodeWithScore
from llama_index.vector_stores import VectorStoreQuery
from typing import List, Sequence, Any
from llama_index.tools import BaseTool, adapt_to_async_tool
from llama_index import Document, VectorStoreIndex
class ToolRetriever(BaseRetriever):
def __init__(
self,
tools: Sequence[BaseTool],
sql_tools: Sequence[BaseTool],
embed_model: Any,
index: VectorStoreIndex = None,
message: str = "",
append_sql: bool = True,
similarity_top_k: int = 8,
logger=None,
) -> None:
self._message = message
self._tools = tools
self._index = index
self._sql_tools = sql_tools
self._append_sql = append_sql
self._similarity_top_k = similarity_top_k
self._embed_model = embed_model
self._logger = logger
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
from llama_index.retrievers import VectorIndexRetriever
retriever = VectorIndexRetriever(
index=self._index,
similarity_top_k=self._similarity_top_k,
)
response = retriever.retrieve(query_bundle)
tools_ = []
for n in response:
tools_.append(self._tools[n.metadata["idx"]])
if self._append_sql:
tools_.append(self._sql_tools)
# tools_.append(self._tools[-1]) # add SQL tool
self._logger.debug(f"Tools before: {self._tools}")
_tmp = set(adapt_to_async_tool(t) for t in tools_)
self._logger.debug(f"Tools after: {list(_tmp)}")
return list(_tmp)
# return [adapt_to_async_tool(t) for t in tools_]
def create_vector_index_from_tools(self):
from llama_index.tools import adapt_to_async_tool
get_tools = lambda _: self._tools
tools = [adapt_to_async_tool(t) for t in get_tools("")]
docs = [
str(
"idx: "
+ str(idx)
+ ", name: "
+ str(t.metadata.name)
+ ", description: "
+ str(t.metadata.description)
)
for idx, t in enumerate(tools)
]
documents = [
Document(text=t, metadata={"idx": idx}) for idx, t in enumerate(docs)
]
self._index = VectorStoreIndex.from_documents(
documents, embed_model=self._embed_model
)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.tools.adapt_to_async_tool",
"llama_index.Document"
] | [((1143, 1228), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'self._index', 'similarity_top_k': 'self._similarity_top_k'}), '(index=self._index, similarity_top_k=self._similarity_top_k\n )\n', (1163, 1228), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((2469, 2542), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'embed_model': 'self._embed_model'}), '(documents, embed_model=self._embed_model)\n', (2500, 2542), False, 'from llama_index import Document, VectorStoreIndex\n'), ((1982, 2004), 'llama_index.tools.adapt_to_async_tool', 'adapt_to_async_tool', (['t'], {}), '(t)\n', (2001, 2004), False, 'from llama_index.tools import adapt_to_async_tool\n'), ((2366, 2405), 'llama_index.Document', 'Document', ([], {'text': 't', 'metadata': "{'idx': idx}"}), "(text=t, metadata={'idx': idx})\n", (2374, 2405), False, 'from llama_index import Document, VectorStoreIndex\n'), ((1635, 1657), 'llama_index.tools.adapt_to_async_tool', 'adapt_to_async_tool', (['t'], {}), '(t)\n', (1654, 1657), False, 'from llama_index.tools import adapt_to_async_tool\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from app.utils.json import json_to_model
from app.utils.index import get_index
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index import VectorStoreIndex
from llama_index.llms.base import MessageRole, ChatMessage
from pydantic import BaseModel
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
# Note: To support clients sending a JSON object using content-type "text/plain",
# we need to use Depends(json_to_model(_ChatData)) here
data: _ChatData = Depends(json_to_model(_ChatData)),
index: VectorStoreIndex = Depends(get_index),
):
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
chat_engine = index.as_chat_engine(
chat_mode="context",
sparse_top_k=12,
vector_store_query_mode="hybrid",
similarity_top_k=2,
system_prompt=(
"You are a chatbot, able to have normal interactions, as well as talk"
" about an Grade 3 Unit Tests, Holidays and Dairy of the School."
),
verbose=False,
)
response = chat_engine.stream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
for token in response.response_gen:
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain") | [
"llama_index.llms.base.ChatMessage"
] | [((374, 385), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (383, 385), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((798, 816), 'fastapi.Depends', 'Depends', (['get_index'], {}), '(get_index)\n', (805, 816), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((741, 765), 'app.utils.json.json_to_model', 'json_to_model', (['_ChatData'], {}), '(_ChatData)\n', (754, 765), False, 'from app.utils.json import json_to_model\n'), ((914, 1004), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (927, 1004), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1132, 1232), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (1145, 1232), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1355, 1398), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1366, 1398), False, 'from llama_index.llms.base import MessageRole, ChatMessage\n')] |
import os
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
BOT_NAME = os.environ["BOT_NAME"]
def construct_index(directory_data, directory_index, force_reload=False):
# check if storage already exists
if not os.path.exists(directory_index) or force_reload:
print(f'Creating new index using {directory_data}')
# load the documents and create the index
documents = SimpleDirectoryReader(directory_data).load_data()
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=directory_index)
print(f'Storing new index to {directory_index}')
else:
# load the existing index
print(f'Loading existing index from {directory_index}')
storage_context = StorageContext.from_defaults(persist_dir=directory_index)
index = load_index_from_storage(storage_context)
return index
def query(question, index):
query_engine = index.as_query_engine()
response = query_engine.query(question)
return response
def ask(bot_name):
index = construct_index(directory_data=f'data/{bot_name}', directory_index=f'storage/{bot_name}')
while True:
question = input("What do you want to know?")
response = query(question=question, index=index)
print(f"{bot_name} says: {response}")
if __name__ == '__main__':
ask(BOT_NAME) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((541, 583), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (572, 583), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((871, 928), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'directory_index'}), '(persist_dir=directory_index)\n', (899, 928), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((945, 985), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (968, 985), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((296, 327), 'os.path.exists', 'os.path.exists', (['directory_index'], {}), '(directory_index)\n', (310, 327), False, 'import os\n'), ((475, 512), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_data'], {}), '(directory_data)\n', (496, 512), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n')] |
from llama_index import SimpleDirectoryReader, ServiceContext, VectorStoreIndex
from llama_index.llms import OpenAI, ChatMessage, MessageRole
from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine
from dotenv import load_dotenv
import os
load_dotenv()
vector_index = None
history = []
def initializeService():
global vector_index
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.5)
promptFile = open('./data/prompt.txt')
prompt = promptFile.read()
#print("Using the following system prompt: ", prompt, sep='\n')
service_context = ServiceContext.from_defaults(
llm=llm, system_prompt=prompt)
try:
reader = SimpleDirectoryReader(
input_dir='./data/context', recursive=False)
docs = reader.load_data()
except ValueError:
print(
f"Context directory is empty, using only prompt")
docs = []
vector_index = VectorStoreIndex.from_documents(
docs, service_context=service_context)
def loadChat():
global vector_index
global history
query_engine = vector_index.as_query_engine()
chat_history = list(map(lambda item: ChatMessage(
role=item['source'], content=item['message']),
history
))
chat_engine = CondensePlusContextChatEngine.from_defaults(
query_engine,
chat_history=chat_history
)
return chat_engine
def chat(message):
global history
history.append({'source': MessageRole.USER, 'message': message})
chat_engine = loadChat()
response = chat_engine.chat(message)
history.append({'source': MessageRole.SYSTEM, 'message': response.response})
return response.response
if __name__ == "__main__":
initializeService()
question = input("Ask me anything: ")
while question != "exit":
print(chat(question))
question = input("Ask me anything: ") | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.ChatMessage",
"llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults"
] | [((271, 284), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (282, 284), False, 'from dotenv import load_dotenv\n'), ((379, 425), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)'}), "(model='gpt-3.5-turbo', temperature=0.5)\n", (385, 425), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((592, 651), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'system_prompt': 'prompt'}), '(llm=llm, system_prompt=prompt)\n', (620, 651), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((939, 1009), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (970, 1009), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((1282, 1371), 'llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults', 'CondensePlusContextChatEngine.from_defaults', (['query_engine'], {'chat_history': 'chat_history'}), '(query_engine, chat_history=\n chat_history)\n', (1325, 1371), False, 'from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine\n'), ((687, 753), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./data/context"""', 'recursive': '(False)'}), "(input_dir='./data/context', recursive=False)\n", (708, 753), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((1172, 1229), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': "item['source']", 'content': "item['message']"}), "(role=item['source'], content=item['message'])\n", (1183, 1229), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n')] |
import streamlit as st
import openai
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import FaissVectorStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index import load_index_from_storage
from llama_index.storage.storage_context import StorageContext
from llama_index.query_engine import CitationQueryEngine
@st.cache_resource
def preprocess_prelimnary():
storage_context = StorageContext.from_defaults(docstore = SimpleDocumentStore.from_persist_dir(persist_dir = "persist_new"),
vector_store = FaissVectorStore.from_persist_dir(persist_dir = "persist_new"),
index_store = SimpleIndexStore.from_persist_dir(persist_dir = "persist_new"))
index = load_index_from_storage(storage_context = storage_context)
query_engine = CitationQueryEngine.from_args(index, similarity_top_k = 3, citation_chunk_size = 1024)
return query_engine
openai.api_key = st.secrets['OPENAI_API_KEY']
st.set_page_config(layout = 'wide', page_title = 'Precedents Database')
st.title('Query Precedents')
q_e = preprocess_prelimnary()
query = st.text_area(label = 'Enter your query involving Indian Legal Precedents.')
# model = st.selectbox(label = 'Select a model', options = ['gpt-3.5-turbo', 'gpt-4'])
start = st.button(label = 'Start')
base_append = ""
if start:
st.subheader('Query Response -')
database_answer = q_e.query(query + base_append)
st.write(database_answer.response)
st.subheader('Actual Sources -')
for i in range(len(database_answer.source_nodes)):
st.write(database_answer.source_nodes[i].node.get_text())
st.write(f'Case Name - {database_answer.source_nodes[i].node.extra_info["file_name"]}') | [
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.query_engine.CitationQueryEngine.from_args",
"llama_index.vector_stores.FaissVectorStore.from_persist_dir",
"llama_index.load_index_from_storage"
] | [((983, 1050), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""', 'page_title': '"""Precedents Database"""'}), "(layout='wide', page_title='Precedents Database')\n", (1001, 1050), True, 'import streamlit as st\n'), ((1056, 1084), 'streamlit.title', 'st.title', (['"""Query Precedents"""'], {}), "('Query Precedents')\n", (1064, 1084), True, 'import streamlit as st\n'), ((1125, 1198), 'streamlit.text_area', 'st.text_area', ([], {'label': '"""Enter your query involving Indian Legal Precedents."""'}), "(label='Enter your query involving Indian Legal Precedents.')\n", (1137, 1198), True, 'import streamlit as st\n'), ((1297, 1321), 'streamlit.button', 'st.button', ([], {'label': '"""Start"""'}), "(label='Start')\n", (1306, 1321), True, 'import streamlit as st\n'), ((746, 802), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context'}), '(storage_context=storage_context)\n', (769, 802), False, 'from llama_index import load_index_from_storage\n'), ((824, 910), 'llama_index.query_engine.CitationQueryEngine.from_args', 'CitationQueryEngine.from_args', (['index'], {'similarity_top_k': '(3)', 'citation_chunk_size': '(1024)'}), '(index, similarity_top_k=3,\n citation_chunk_size=1024)\n', (853, 910), False, 'from llama_index.query_engine import CitationQueryEngine\n'), ((1357, 1389), 'streamlit.subheader', 'st.subheader', (['"""Query Response -"""'], {}), "('Query Response -')\n", (1369, 1389), True, 'import streamlit as st\n'), ((1447, 1481), 'streamlit.write', 'st.write', (['database_answer.response'], {}), '(database_answer.response)\n', (1455, 1481), True, 'import streamlit as st\n'), ((1486, 1518), 'streamlit.subheader', 'st.subheader', (['"""Actual Sources -"""'], {}), "('Actual Sources -')\n", (1498, 1518), True, 'import streamlit as st\n'), ((1648, 1745), 'streamlit.write', 'st.write', (['f"""Case Name - {database_answer.source_nodes[i].node.extra_info[\'file_name\']}"""'], {}), '(\n f"Case Name - {database_answer.source_nodes[i].node.extra_info[\'file_name\']}"\n )\n', (1656, 1745), True, 'import streamlit as st\n'), ((494, 557), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""persist_new"""'}), "(persist_dir='persist_new')\n", (530, 557), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((584, 644), 'llama_index.vector_stores.FaissVectorStore.from_persist_dir', 'FaissVectorStore.from_persist_dir', ([], {'persist_dir': '"""persist_new"""'}), "(persist_dir='persist_new')\n", (617, 644), False, 'from llama_index.vector_stores import FaissVectorStore\n'), ((670, 730), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""persist_new"""'}), "(persist_dir='persist_new')\n", (703, 730), False, 'from llama_index.storage.index_store import SimpleIndexStore\n')] |
import streamlit as st
from dotenv import load_dotenv
load_dotenv()
import os
import tempfile
from llama_index import SimpleDirectoryReader, StorageContext, LLMPredictor
from llama_index import VectorStoreIndex
from llama_index import ServiceContext
from llama_index.embeddings.langchain import LangchainEmbedding
from langchain.chat_models import ChatOpenAI
import tiktoken
from langchain.embeddings import CohereEmbeddings
import openai
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
openai.api_key = st.secrets["OPENAI_API_KEY"]
os.environ["COHERE_API_KEY"] = st.secrets["COHERE_API_KEY"]
llm_predictor = LLMPredictor(llm = ChatOpenAI(temperature = 0, model_name = 'gpt-3.5-turbo', max_tokens = -1, openai_api_key = openai.api_key))
embed_model = LangchainEmbedding(CohereEmbeddings(model = "embed-english-light-v2.0"))
storage_context = StorageContext.from_defaults()
service_context = ServiceContext.from_defaults(llm_predictor = llm_predictor, embed_model = embed_model)
def num_tokens_from_string(string: str, encoding_name: str) -> int:
encoding = tiktoken.encoding_for_model(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
@st.cache_resource
def preprocessing(uploaded_file):
if uploaded_file:
temp_dir = tempfile.TemporaryDirectory()
file_path = os.path.join(temp_dir.name, uploaded_file.name)
with open(file_path, "wb") as f:
f.write(uploaded_file.read())
document = SimpleDirectoryReader(input_files = [file_path]).load_data()
tokens = num_tokens_from_string(document[0].text, 'gpt-3.5-turbo')
global context
context = document[0].text
if tokens <= 4000:
print('Case - A')
return context
else:
print('Case - B')
index = VectorStoreIndex.from_documents(document, service_context = service_context, storage_context = storage_context)
global engine
engine = index.as_query_engine(similarity_top_k = 3)
return engine
@st.cache_resource
def run(_query_engine, query):
if type(_query_engine) == str:
print('Executing Case - A')
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant who answers questions given context."},
{"role": "user", "content": f"The question is - {query}\nThe provided context is - {_query_engine}\nAnswer the question to the best of your abilities."},
]
)
st.write(response['choices'][0]['message']['content'])
else:
print('Executing Case - B')
st.write(query_engine.query(query).response)
return True
st.set_page_config(layout = "wide")
st.title("Document Querying")
uploaded_file = st.file_uploader('Upload your file')
query_engine = preprocessing(uploaded_file)
if query_engine:
query = st.text_input('Enter your Query.', key = 'query_input')
if query:
run(query_engine, query) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((55, 68), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (66, 68), False, 'from dotenv import load_dotenv\n'), ((860, 890), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (888, 890), False, 'from llama_index import SimpleDirectoryReader, StorageContext, LLMPredictor\n'), ((909, 996), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model)\n', (937, 996), False, 'from llama_index import ServiceContext\n'), ((2820, 2853), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (2838, 2853), True, 'import streamlit as st\n'), ((2857, 2886), 'streamlit.title', 'st.title', (['"""Document Querying"""'], {}), "('Document Querying')\n", (2865, 2886), True, 'import streamlit as st\n'), ((2904, 2940), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your file"""'], {}), "('Upload your file')\n", (2920, 2940), True, 'import streamlit as st\n'), ((787, 837), 'langchain.embeddings.CohereEmbeddings', 'CohereEmbeddings', ([], {'model': '"""embed-english-light-v2.0"""'}), "(model='embed-english-light-v2.0')\n", (803, 837), False, 'from langchain.embeddings import CohereEmbeddings\n'), ((1080, 1122), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['encoding_name'], {}), '(encoding_name)\n', (1107, 1122), False, 'import tiktoken\n'), ((3016, 3069), 'streamlit.text_input', 'st.text_input', (['"""Enter your Query."""'], {'key': '"""query_input"""'}), "('Enter your Query.', key='query_input')\n", (3029, 3069), True, 'import streamlit as st\n'), ((644, 747), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': '(-1)', 'openai_api_key': 'openai.api_key'}), "(temperature=0, model_name='gpt-3.5-turbo', max_tokens=-1,\n openai_api_key=openai.api_key)\n", (654, 747), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1286, 1315), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1313, 1315), False, 'import tempfile\n'), ((1336, 1383), 'os.path.join', 'os.path.join', (['temp_dir.name', 'uploaded_file.name'], {}), '(temp_dir.name, uploaded_file.name)\n', (1348, 1383), False, 'import os\n'), ((2198, 2537), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': '[{\'role\': \'system\', \'content\':\n \'You are a helpful assistant who answers questions given context.\'}, {\n \'role\': \'user\', \'content\':\n f"""The question is - {query}\nThe provided context is - {_query_engine}\nAnswer the question to the best of your abilities."""\n }]'}), '(model=\'gpt-3.5-turbo\', messages=[{\'role\':\n \'system\', \'content\':\n \'You are a helpful assistant who answers questions given context.\'}, {\n \'role\': \'user\', \'content\':\n f"""The question is - {query}\nThe provided context is - {_query_engine}\nAnswer the question to the best of your abilities."""\n }])\n', (2226, 2537), False, 'import openai\n'), ((2645, 2699), 'streamlit.write', 'st.write', (["response['choices'][0]['message']['content']"], {}), "(response['choices'][0]['message']['content'])\n", (2653, 2699), True, 'import streamlit as st\n'), ((1828, 1939), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['document'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(document, service_context=service_context,\n storage_context=storage_context)\n', (1859, 1939), False, 'from llama_index import VectorStoreIndex\n'), ((1486, 1532), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]'}), '(input_files=[file_path])\n', (1507, 1532), False, 'from llama_index import SimpleDirectoryReader, StorageContext, LLMPredictor\n')] |
from dotenv import load_dotenv
import os
import streamlit as st
import pandas as pd
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from prompts import new_prompt, instruction_str, context
from note_engine import note_engine
from pdf import combined_engine
from pdf import get_index as pdf_get_index
load_dotenv()
population_path = os.path.join("data", "population.csv")
population_df = pd.read_csv(population_path)
population_query_engine = PandasQueryEngine(
df=population_df, verbose=True, instruction_str=instruction_str
)
population_query_engine.update_prompts({"pandas_prompt" : new_prompt})
tools = [
note_engine,
QueryEngineTool(
query_engine=population_query_engine,
metadata=ToolMetadata(
name="population_query_engine",
description="This gives information at the world population and demographic",
),
),
# QueryEngineTool(
# query_engine=malaysia_engine,
# metadata=ToolMetadata(
# name="malaysia_data",
# description="This gives details information about Malaysia country",
# ),
# ),
QueryEngineTool(
query_engine=combined_engine,
metadata=ToolMetadata(
name="combined_data",
description="This gives information from multiple files",
),
),
]
llm = OpenAI(model="gpt-3.5-turbo-0613")
agent = ReActAgent.from_tools(tools, llm=llm, verbose=True, context=context)
# while (prompt := input("Enter a prompt (q to quit): ")) != "q":
# result = agent.query(prompt)
# print(result)
file_paths = []
# File uploader in the sidebar
with st.sidebar:
st.header("Upload PDF file")
file = st.file_uploader("", type=["pdf"])
if file:
file_path = os.path.join("data", file.name)
# Save the uploaded file
with open(file_path, "wb") as f:
f.write(file.getvalue())
# Display confirmation message
st.success(f"File uploaded successfully: {file.name}")
# Add the uploaded file path to the list of file paths
file_paths.append(file_path)
# Check if there are any uploaded files
if file_paths:
# Get combined index for uploaded files
combined_index = pdf_get_index(file_paths, 'combined_index')
combined_engine = combined_index.as_query_engine()
# Add QueryEngineTool for combined data
tools.append(
QueryEngineTool(
query_engine=combined_engine,
metadata=ToolMetadata(
name="combined_data",
description="This gives information from multiple files",
),
)
)
st.title("AgentAI - RAG")
user_input = st.text_input("Enter a prompt:")
if user_input:
if user_input.lower() == 'q':
st.stop()
else:
result = agent.query(user_input)
st.text_area("Response:", value=result, height=100, disabled=False)
| [
"llama_index.core.tools.ToolMetadata",
"llama_index.llms.openai.OpenAI",
"llama_index.core.query_engine.PandasQueryEngine",
"llama_index.core.agent.ReActAgent.from_tools"
] | [((468, 481), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (479, 481), False, 'from dotenv import load_dotenv\n'), ((501, 539), 'os.path.join', 'os.path.join', (['"""data"""', '"""population.csv"""'], {}), "('data', 'population.csv')\n", (513, 539), False, 'import os\n'), ((556, 584), 'pandas.read_csv', 'pd.read_csv', (['population_path'], {}), '(population_path)\n', (567, 584), True, 'import pandas as pd\n'), ((612, 699), 'llama_index.core.query_engine.PandasQueryEngine', 'PandasQueryEngine', ([], {'df': 'population_df', 'verbose': '(True)', 'instruction_str': 'instruction_str'}), '(df=population_df, verbose=True, instruction_str=\n instruction_str)\n', (629, 699), False, 'from llama_index.core.query_engine import PandasQueryEngine\n'), ((1517, 1551), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""'}), "(model='gpt-3.5-turbo-0613')\n", (1523, 1551), False, 'from llama_index.llms.openai import OpenAI\n'), ((1560, 1628), 'llama_index.core.agent.ReActAgent.from_tools', 'ReActAgent.from_tools', (['tools'], {'llm': 'llm', 'verbose': '(True)', 'context': 'context'}), '(tools, llm=llm, verbose=True, context=context)\n', (1581, 1628), False, 'from llama_index.core.agent import ReActAgent\n'), ((2824, 2849), 'streamlit.title', 'st.title', (['"""AgentAI - RAG"""'], {}), "('AgentAI - RAG')\n", (2832, 2849), True, 'import streamlit as st\n'), ((2864, 2896), 'streamlit.text_input', 'st.text_input', (['"""Enter a prompt:"""'], {}), "('Enter a prompt:')\n", (2877, 2896), True, 'import streamlit as st\n'), ((1821, 1849), 'streamlit.header', 'st.header', (['"""Upload PDF file"""'], {}), "('Upload PDF file')\n", (1830, 1849), True, 'import streamlit as st\n'), ((1861, 1895), 'streamlit.file_uploader', 'st.file_uploader', (['""""""'], {'type': "['pdf']"}), "('', type=['pdf'])\n", (1877, 1895), True, 'import streamlit as st\n'), ((2415, 2458), 'pdf.get_index', 'pdf_get_index', (['file_paths', '"""combined_index"""'], {}), "(file_paths, 'combined_index')\n", (2428, 2458), True, 'from pdf import get_index as pdf_get_index\n'), ((1930, 1961), 'os.path.join', 'os.path.join', (['"""data"""', 'file.name'], {}), "('data', file.name)\n", (1942, 1961), False, 'import os\n'), ((2138, 2192), 'streamlit.success', 'st.success', (['f"""File uploaded successfully: {file.name}"""'], {}), "(f'File uploaded successfully: {file.name}')\n", (2148, 2192), True, 'import streamlit as st\n'), ((2954, 2963), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (2961, 2963), True, 'import streamlit as st\n'), ((3023, 3090), 'streamlit.text_area', 'st.text_area', (['"""Response:"""'], {'value': 'result', 'height': '(100)', 'disabled': '(False)'}), "('Response:', value=result, height=100, disabled=False)\n", (3035, 3090), True, 'import streamlit as st\n'), ((888, 1015), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""population_query_engine"""', 'description': '"""This gives information at the world population and demographic"""'}), "(name='population_query_engine', description=\n 'This gives information at the world population and demographic')\n", (900, 1015), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n'), ((1371, 1468), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""combined_data"""', 'description': '"""This gives information from multiple files"""'}), "(name='combined_data', description=\n 'This gives information from multiple files')\n", (1383, 1468), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n'), ((2665, 2762), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""combined_data"""', 'description': '"""This gives information from multiple files"""'}), "(name='combined_data', description=\n 'This gives information from multiple files')\n", (2677, 2762), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n')] |
# general imports
from constants import *
# streamlit imports
import streamlit as st
from utils import *
from streamlit_lottie import st_lottie
# llama index imports
import openai
from llama_index import (
VectorStoreIndex,
download_loader,
ServiceContext,
set_global_service_context,
)
from llama_index.llms import OpenAI
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
openai.api_key = OpenAI_key # from constants.py
system_prompt = """
[INST] <>
You are a helpful bank loan officer. You are going to be given a bank statement
to analyse and you must provide accurate insights about its contents.
If a question doesn't make any sense, or is not factually coherent, explain what is wrong with
the question instead of answering something incorrect. If you don't know the answer, don't share
inaccurate information.
Your goal is to provide insightful answers about the financial background of an individual.
<>
"""
llm = OpenAI(model="gpt-4-1106-preview", system_prompt=system_prompt)
embeddings = LangchainEmbedding(HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2"))
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embeddings)
set_global_service_context(service_context)
# import lottie
lottie_file = load_lottieurl() # animation url
st.set_page_config(page_title="loan_gpt")
st_lottie(lottie_file, height=175, quality="medium")
st.title("**Loan Check: Business Loan Analysis**")
if "uploaded" not in st.session_state:
st.session_state["uploaded"] = False
st.session_state["filename"] = None
st.session_state["initial_response"] = None
if "query_engine" not in st.session_state:
st.session_state["query_engine"] = None
def reset():
st.session_state["uploaded"] = False
st.session_state["filename"] = None
st.session_state["initial_response"] = None
st.session_state["query_engine"] = None
if not st.session_state["uploaded"]:
st.write("Upload a bank statement and analyze loan worthiness.")
input_file = st.file_uploader("Choose a file")
if input_file and does_file_have_pdf_extension(input_file):
path = store_pdf_file(input_file, dir) # default dir is ./statements/
scs = st.success("File successfully uploaded")
filename = input_file.name
with st.spinner("Analyzing document..."):
PyMuPDFReader = download_loader("PyMuPDFReader")
loader = PyMuPDFReader()
documents = loader.load(file_path=path, metadata=True)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
st.session_state["query_engine"] = query_engine
scs.empty()
st.session_state["uploaded"] = True
st.session_state["filename"] = filename
st.rerun()
if st.session_state["uploaded"]:
st.write(
f"Here is a financial summary of the account holder for the uploaded statement:"
)
st.button("Upload New PDF", on_click=reset)
initial_prompt = """
I want to analyze the financial health of the individual based solely on the given statement. Here are some details I want information on:
1. Total monthly deposits (with months and amounts)
2. Total monthly withdrawals (with months and amounts)
3. Any recurring payments (such as rent, utilities, loan repayments - with descriptions, dates, and amounts)
4. Any other noticeable spending habits (with amounts)
Make sure your output is well formatted and is plain-text.
I want to determine if this individual should be awarded a business loan based on the above.
Give me a potential yes, potential no or cannot say answer and evidence your response from details from above. Be sure to highlight any noticeable red-flags or positive signs.
"""
query_engine = st.session_state["query_engine"]
if not st.session_state["initial_response"]:
with st.spinner("Generating initial analysis..."):
response = query_engine.query(initial_prompt)
st.session_state["initial_response"] = response.response
st.write(st.session_state["initial_response"])
prompt = st.text_input("Type any additional queries query")
if prompt:
with st.spinner("Generating response..."):
response = query_engine.query(prompt)
st.write(response.response)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.set_global_service_context"
] | [((1017, 1080), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""', 'system_prompt': 'system_prompt'}), "(model='gpt-4-1106-preview', system_prompt=system_prompt)\n", (1023, 1080), False, 'from llama_index.llms import OpenAI\n'), ((1187, 1248), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embeddings'}), '(llm=llm, embed_model=embeddings)\n', (1215, 1248), False, 'from llama_index import VectorStoreIndex, download_loader, ServiceContext, set_global_service_context\n'), ((1249, 1292), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1275, 1292), False, 'from llama_index import VectorStoreIndex, download_loader, ServiceContext, set_global_service_context\n'), ((1359, 1400), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""loan_gpt"""'}), "(page_title='loan_gpt')\n", (1377, 1400), True, 'import streamlit as st\n'), ((1401, 1453), 'streamlit_lottie.st_lottie', 'st_lottie', (['lottie_file'], {'height': '(175)', 'quality': '"""medium"""'}), "(lottie_file, height=175, quality='medium')\n", (1410, 1453), False, 'from streamlit_lottie import st_lottie\n'), ((1455, 1505), 'streamlit.title', 'st.title', (['"""**Loan Check: Business Loan Analysis**"""'], {}), "('**Loan Check: Business Loan Analysis**')\n", (1463, 1505), True, 'import streamlit as st\n'), ((1114, 1166), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (1135, 1166), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1994, 2058), 'streamlit.write', 'st.write', (['"""Upload a bank statement and analyze loan worthiness."""'], {}), "('Upload a bank statement and analyze loan worthiness.')\n", (2002, 2058), True, 'import streamlit as st\n'), ((2076, 2109), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a file"""'], {}), "('Choose a file')\n", (2092, 2109), True, 'import streamlit as st\n'), ((2905, 3005), 'streamlit.write', 'st.write', (['f"""Here is a financial summary of the account holder for the uploaded statement:"""'], {}), "(\n f'Here is a financial summary of the account holder for the uploaded statement:'\n )\n", (2913, 3005), True, 'import streamlit as st\n'), ((3014, 3057), 'streamlit.button', 'st.button', (['"""Upload New PDF"""'], {'on_click': 'reset'}), "('Upload New PDF', on_click=reset)\n", (3023, 3057), True, 'import streamlit as st\n'), ((4160, 4206), 'streamlit.write', 'st.write', (["st.session_state['initial_response']"], {}), "(st.session_state['initial_response'])\n", (4168, 4206), True, 'import streamlit as st\n'), ((4220, 4270), 'streamlit.text_input', 'st.text_input', (['"""Type any additional queries query"""'], {}), "('Type any additional queries query')\n", (4233, 4270), True, 'import streamlit as st\n'), ((2268, 2308), 'streamlit.success', 'st.success', (['"""File successfully uploaded"""'], {}), "('File successfully uploaded')\n", (2278, 2308), True, 'import streamlit as st\n'), ((2856, 2866), 'streamlit.rerun', 'st.rerun', ([], {}), '()\n', (2864, 2866), True, 'import streamlit as st\n'), ((2358, 2393), 'streamlit.spinner', 'st.spinner', (['"""Analyzing document..."""'], {}), "('Analyzing document...')\n", (2368, 2393), True, 'import streamlit as st\n'), ((2423, 2455), 'llama_index.download_loader', 'download_loader', (['"""PyMuPDFReader"""'], {}), "('PyMuPDFReader')\n", (2438, 2455), False, 'from llama_index import VectorStoreIndex, download_loader, ServiceContext, set_global_service_context\n'), ((2580, 2622), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2611, 2622), False, 'from llama_index import VectorStoreIndex, download_loader, ServiceContext, set_global_service_context\n'), ((3983, 4027), 'streamlit.spinner', 'st.spinner', (['"""Generating initial analysis..."""'], {}), "('Generating initial analysis...')\n", (3993, 4027), True, 'import streamlit as st\n'), ((4299, 4335), 'streamlit.spinner', 'st.spinner', (['"""Generating response..."""'], {}), "('Generating response...')\n", (4309, 4335), True, 'import streamlit as st\n'), ((4399, 4426), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (4407, 4426), True, 'import streamlit as st\n')] |
import pathlib
import tempfile
from io import BytesIO
import openai
import streamlit as st
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.chat_engine import ContextChatEngine
from llama_index.llms.openai import OpenAI
from sidebar import sidebar_params
st.set_page_config(page_title="Chat with Documents", layout="wide", page_icon="🔥")
st.title("Chat with Documents")
@st.cache_resource(show_spinner=False)
def build_chat_engine(file: BytesIO, temperature: float) -> ContextChatEngine:
with st.spinner("Loading and indexing the document..."):
with tempfile.TemporaryDirectory() as temp_dir:
temp_file_path = pathlib.Path(temp_dir) / file.name
with open(temp_file_path, "wb") as f:
f.write(file.getbuffer())
reader = SimpleDirectoryReader(input_files=[temp_file_path])
documents = reader.load_data()
llm = OpenAI(model="gpt-3.5-turbo", temperature=temperature)
index = VectorStoreIndex.from_documents(documents)
return index.as_chat_engine(chat_mode="context", llm=llm, verbose=True)
def add_message(role: str, content: str):
st.session_state.messages.append({"role": role, "content": content})
openai_api_key, temperature = sidebar_params()
uploaded_file = st.file_uploader(
"Upload a pdf, docx, or txt file",
type=["pdf", "docx", "txt"],
)
if not openai_api_key or not uploaded_file:
st.stop()
openai.api_key = openai_api_key
chat_engine = build_chat_engine(uploaded_file, temperature)
if "messages" not in st.session_state or st.sidebar.button("Clear message history"):
st.session_state.messages = [
{
"role": "assistant",
"content": "Ask me questions about the uploaded document!",
},
]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
if user_query := st.chat_input("Ask questions about the document..."):
with st.chat_message("user"):
st.write(user_query)
add_message("user", user_query)
with st.chat_message("assistant"), st.spinner("Generating response..."):
response = chat_engine.chat(user_query).response
st.write(response)
add_message("assistant", response)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI"
] | [((300, 386), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with Documents"""', 'layout': '"""wide"""', 'page_icon': '"""🔥"""'}), "(page_title='Chat with Documents', layout='wide',\n page_icon='🔥')\n", (318, 386), True, 'import streamlit as st\n'), ((383, 414), 'streamlit.title', 'st.title', (['"""Chat with Documents"""'], {}), "('Chat with Documents')\n", (391, 414), True, 'import streamlit as st\n'), ((418, 455), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (435, 455), True, 'import streamlit as st\n'), ((1282, 1298), 'sidebar.sidebar_params', 'sidebar_params', ([], {}), '()\n', (1296, 1298), False, 'from sidebar import sidebar_params\n'), ((1316, 1401), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload a pdf, docx, or txt file"""'], {'type': "['pdf', 'docx', 'txt']"}), "('Upload a pdf, docx, or txt file', type=['pdf', 'docx', 'txt']\n )\n", (1332, 1401), True, 'import streamlit as st\n'), ((1181, 1249), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': role, 'content': content}"], {}), "({'role': role, 'content': content})\n", (1213, 1249), True, 'import streamlit as st\n'), ((1457, 1466), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (1464, 1466), True, 'import streamlit as st\n'), ((1602, 1644), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Clear message history"""'], {}), "('Clear message history')\n", (1619, 1644), True, 'import streamlit as st\n'), ((1953, 2005), 'streamlit.chat_input', 'st.chat_input', (['"""Ask questions about the document..."""'], {}), "('Ask questions about the document...')\n", (1966, 2005), True, 'import streamlit as st\n'), ((544, 594), 'streamlit.spinner', 'st.spinner', (['"""Loading and indexing the document..."""'], {}), "('Loading and indexing the document...')\n", (554, 594), True, 'import streamlit as st\n'), ((939, 993), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': 'temperature'}), "(model='gpt-3.5-turbo', temperature=temperature)\n", (945, 993), False, 'from llama_index.llms.openai import OpenAI\n'), ((1010, 1052), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1041, 1052), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((1864, 1896), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (1879, 1896), True, 'import streamlit as st\n'), ((1906, 1934), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (1914, 1934), True, 'import streamlit as st\n'), ((2016, 2039), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (2031, 2039), True, 'import streamlit as st\n'), ((2049, 2069), 'streamlit.write', 'st.write', (['user_query'], {}), '(user_query)\n', (2057, 2069), True, 'import streamlit as st\n'), ((2116, 2144), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2131, 2144), True, 'import streamlit as st\n'), ((2146, 2182), 'streamlit.spinner', 'st.spinner', (['"""Generating response..."""'], {}), "('Generating response...')\n", (2156, 2182), True, 'import streamlit as st\n'), ((2249, 2267), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (2257, 2267), True, 'import streamlit as st\n'), ((609, 638), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (636, 638), False, 'import tempfile\n'), ((829, 880), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[temp_file_path]'}), '(input_files=[temp_file_path])\n', (850, 880), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((681, 703), 'pathlib.Path', 'pathlib.Path', (['temp_dir'], {}), '(temp_dir)\n', (693, 703), False, 'import pathlib\n')] |
import logging
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
# from IPython.display import Markdown, display
from llama_index.node_parser import SentenceSplitter
from embedding_manager import Embeddings
import chromadb
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DataLoader:
def __init__(self, file_paths):
self.file_paths = file_paths
def read_data(self):
logger.info("Reading data from files: %s", self.file_paths)
# automatically selects the best file reader based on the file extensions
data_loader = SimpleDirectoryReader(input_files=self.file_paths)
return data_loader.load_data()
def chunk_data(self, data, chunk_size=500, chunk_overlap=50):
logger.info("Parsing data")
node_parser = SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separator=" ",
paragraph_separator="\n\n\n",
secondary_chunking_regex="[^,.;。]+[,.;。]?"
)
return node_parser.get_nodes_from_documents(data)
class DatabaseManager:
def __init__(self, db_path, collection_name):
self.db_path = db_path
self.collection_name = collection_name
def get_db(self):
logger.info("Initializing the database at path: %s", self.db_path)
db = chromadb.PersistentClient(path=self.db_path)
collection = db.get_or_create_collection(self.collection_name)
return collection
# class VectorIndexer:
# def __init__(self, nodes, vector_store, embedding_model, llm_model, indexid, index_path):
# self.nodes = nodes
# self.vector_store = vector_store
# self.embedding_model = embedding_model
# self.llm_model = llm_model
# self.indexid = indexid
# self.index_path = index_path
# # self.service_context
# def get_index(self):
# try:
# logger.info(f"Load {self.indexid} from local path {self.index_path}")
# storage_context = StorageContext.from_defaults(vector_store=self.vector_store,
# persist_dir=self.index_path)
# index = load_index_from_storage(storage_context=storage_context, index_id=indexid)
# except Exception as e:
# logger.info("Creating the vector index")
# storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
# service_context = ServiceContext.from_defaults(embed_model=self.embedding_model, llm=self.llm_model)
# index = VectorStoreIndex(
# self.nodes, storage_context=storage_context, service_context=service_context
# )
# index.set_index_id(self.indexid)
# index.storage_context.persist(persist_dir=self.index_path)
# return index
| [
"llama_index.SimpleDirectoryReader",
"llama_index.node_parser.SentenceSplitter"
] | [((373, 412), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (392, 412), False, 'import logging\n'), ((422, 449), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (439, 449), False, 'import logging\n'), ((740, 790), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'self.file_paths'}), '(input_files=self.file_paths)\n', (761, 790), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((955, 1121), 'llama_index.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separator': '""" """', 'paragraph_separator': '"""\n\n\n"""', 'secondary_chunking_regex': '"""[^,.;。]+[,.;。]?"""'}), "(chunk_size=chunk_size, chunk_overlap=chunk_overlap,\n separator=' ', paragraph_separator='\\n\\n\\n', secondary_chunking_regex=\n '[^,.;。]+[,.;。]?')\n", (971, 1121), False, 'from llama_index.node_parser import SentenceSplitter\n'), ((1505, 1549), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'self.db_path'}), '(path=self.db_path)\n', (1530, 1549), False, 'import chromadb\n')] |
from dotenv import load_dotenv
import os
from typing import List
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.settings import Settings
from llama_index.llms.openai import OpenAI
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.response.notebook_utils import display_source_node
os.environ["TOKENIZERS_PARALLELISM"] = "false"
class RAGCreator():
def __init__(self):
self.documents = []
self.nodes = None
self.retriever = None
self.query_engine = None
self.rag_info = {}
def _update_rag_info(self, params:dict):
params.__delitem__("self")
callable_obj_keys = [k for k,v in params.items() if callable(v)]
for k in callable_obj_keys:
params[k] = params[k].__name__
self.rag_info.update(params)
def load_documents(self, data_loader, data_loader_kwargs:dict):
self._update_rag_info(locals())
try:
docs = data_loader().load_data(**data_loader_kwargs)
except Exception as e:
raise TypeError(f"Error loading documents: {e}.")
self.documents = docs
def parse_docs_to_nodes(self, node_parser=SimpleNodeParser, chunk_size=1024):
self._update_rag_info(locals())
node_parser = node_parser.from_defaults(chunk_size=chunk_size)
nodes = node_parser.get_nodes_from_documents(self.documents)
for idx, node in enumerate(nodes):
node.id_ = f"node-{idx}"
self.nodes = nodes
def set_model_settings(self, open_ai_model="gpt-3.5-turbo", embed_model="local:BAAI/bge-small-en"):
self._update_rag_info(locals())
load_dotenv()
Settings.llm = OpenAI(model=open_ai_model)
Settings.embed_model = resolve_embed_model(embed_model)
def create_retriever(self, vector_store_impl=VectorStoreIndex, similarity_top_k=2):
self._update_rag_info(locals())
index = vector_store_impl(self.nodes)
self.retriever = index.as_retriever(similarity_top_k=similarity_top_k)
def create_query_engine(self, query_engine=RetrieverQueryEngine):
self._update_rag_info(locals())
self.query_engine = query_engine.from_args(self.retriever)
def setup_and_deploy_RAG(self, data_loader, data_loader_kwargs,
node_parser=SimpleNodeParser, chunk_size=1024,
open_ai_model="gpt-3.5-turbo", embed_model="local:BAAI/bge-small-en",
vector_store_impl=VectorStoreIndex, similarity_top_k=2,
query_engine=RetrieverQueryEngine):
self.load_documents(data_loader, data_loader_kwargs)
self.parse_docs_to_nodes(node_parser, chunk_size)
self.set_model_settings(open_ai_model, embed_model)
self.create_retriever(vector_store_impl, similarity_top_k)
self.create_query_engine(query_engine)
return self
def query(self, query:str) -> str:
if self.query_engine is not None:
response = self.query_engine.query(query)
return str(response)
else:
raise ValueError("You must set up your RAG and its query engine before submitting queries.")
def query_multiple(self, queries:List[str]) -> List[str]:
if self.query_engine is not None:
responses = []
for query in queries:
response = self.query_engine.query(query)
responses.append(str(response))
return responses
else:
raise ValueError("You must set up your RAG and its query engine before submitting queries.")
def fetch_relevant_info(self, query:str) -> List[str]:
if self.retriever is not None:
retrievals = self.retriever.retrieve(query)
return retrievals
else:
raise ValueError("You must set up your RAG and its retriever before fetching relevant information.")
def display_relevant_info(self, query:str, source_length=1500):
retrievals = self.fetch_relevant_info(query=query)
for retrieval in retrievals:
display_source_node(retrieval, source_length=source_length)
def get_rag_info(self):
return self.rag_info | [
"llama_index.llms.openai.OpenAI",
"llama_index.core.embeddings.resolve_embed_model",
"llama_index.core.response.notebook_utils.display_source_node"
] | [((1816, 1829), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1827, 1829), False, 'from dotenv import load_dotenv\n'), ((1862, 1889), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (1868, 1889), False, 'from llama_index.llms.openai import OpenAI\n'), ((1921, 1953), 'llama_index.core.embeddings.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (1940, 1953), False, 'from llama_index.core.embeddings import resolve_embed_model\n'), ((4320, 4379), 'llama_index.core.response.notebook_utils.display_source_node', 'display_source_node', (['retrieval'], {'source_length': 'source_length'}), '(retrieval, source_length=source_length)\n', (4339, 4379), False, 'from llama_index.core.response.notebook_utils import display_source_node\n')] |
from fastapi import FastAPI, File, UploadFile, HTTPException
import openai
from dotenv import load_dotenv
import os
import json
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleFileNodeParser
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from llama_index.core import VectorStoreIndex, StorageContext
import weaviate
import uvicorn
load_dotenv()
app = FastAPI()
api_key = os.environ.get('OPENAI_API_KEY')
openai.api_key = api_key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
WEAVIATE_API_KEY = os.getenv("WEAVIATE_API_KEY")
WEAVIATE_URL = os.getenv("WEAVIATE_URL")
auth_config = weaviate.AuthApiKey(api_key=WEAVIATE_API_KEY)
client = weaviate.Client(
url=WEAVIATE_URL,
auth_client_secret=auth_config
)
def search_and_query():
blogs = SimpleDirectoryReader("./Data").load_data()
vector_store = WeaviateVectorStore(weaviate_client=client, index_name="DCPR")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
VectorStoreIndex.from_documents(blogs, storage_context=storage_context)
return "Done Embeddings"
def Quert(ask):
vector_store = WeaviateVectorStore(weaviate_client=client, index_name="DCPR")
loaded_index = VectorStoreIndex.from_vector_store(vector_store)
query_engine = loaded_index.as_query_engine()
response = query_engine.query(ask)
return response
def contract_analysis_w_fact_checking(text):
if not text:
raise HTTPException(
status_code=400, detail="Text field is required in the input data.")
print("done 1")
# Perform contract analysis using Quert (assuming Quert is a class or function)
quert_instance = Quert(text)
# Extract relevant information from the Quert response
if quert_instance.response:
contract_results = [{
"LLM Response": quert_instance.response,
"Source_node": [{
"Page_number": key_point.node.metadata.get('page_label', ''),
"File_Name": key_point.node.metadata.get('file_name', ''),
"Text": key_point.node.text,
"Start_Char": key_point.node.start_char_idx,
"End_Char": key_point.node.end_char_idx,
"Score_Matching": key_point.score
} for key_point in quert_instance.source_nodes]
}]
else:
contract_results = []
# Return a standardized response
return {"status": "success", "message": "Contract analysis successful", "model_response": contract_results}
@app.post("/embedd")
async def predict():
try:
dor = search_and_query()
return {"user_content": dor}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/predict")
async def predict(data: dict):
try:
messages = data.get("messages", [])
user_message = next((msg["content"] for msg in messages if msg["role"] == "user"), None)
out = contract_analysis_w_fact_checking(user_message)
if user_message:
return {"user_content": out}
else:
raise HTTPException(
status_code=400, detail="User message not found in input.")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/")
def read_root():
return {"Hello": "World"}
| [
"llama_index.vector_stores.weaviate.WeaviateVectorStore",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader"
] | [((402, 415), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (413, 415), False, 'from dotenv import load_dotenv\n'), ((423, 432), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (430, 432), False, 'from fastapi import FastAPI, File, UploadFile, HTTPException\n'), ((444, 476), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (458, 476), False, 'import os\n'), ((519, 546), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (528, 546), False, 'import os\n'), ((612, 641), 'os.getenv', 'os.getenv', (['"""WEAVIATE_API_KEY"""'], {}), "('WEAVIATE_API_KEY')\n", (621, 641), False, 'import os\n'), ((657, 682), 'os.getenv', 'os.getenv', (['"""WEAVIATE_URL"""'], {}), "('WEAVIATE_URL')\n", (666, 682), False, 'import os\n'), ((697, 742), 'weaviate.AuthApiKey', 'weaviate.AuthApiKey', ([], {'api_key': 'WEAVIATE_API_KEY'}), '(api_key=WEAVIATE_API_KEY)\n', (716, 742), False, 'import weaviate\n'), ((753, 818), 'weaviate.Client', 'weaviate.Client', ([], {'url': 'WEAVIATE_URL', 'auth_client_secret': 'auth_config'}), '(url=WEAVIATE_URL, auth_client_secret=auth_config)\n', (768, 818), False, 'import weaviate\n'), ((926, 988), 'llama_index.vector_stores.weaviate.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'client', 'index_name': '"""DCPR"""'}), "(weaviate_client=client, index_name='DCPR')\n", (945, 988), False, 'from llama_index.vector_stores.weaviate import WeaviateVectorStore\n'), ((1011, 1066), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1039, 1066), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((1071, 1142), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['blogs'], {'storage_context': 'storage_context'}), '(blogs, storage_context=storage_context)\n', (1102, 1142), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((1208, 1270), 'llama_index.vector_stores.weaviate.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'client', 'index_name': '"""DCPR"""'}), "(weaviate_client=client, index_name='DCPR')\n", (1227, 1270), False, 'from llama_index.vector_stores.weaviate import WeaviateVectorStore\n'), ((1290, 1338), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (1324, 1338), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((1527, 1614), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Text field is required in the input data."""'}), "(status_code=400, detail=\n 'Text field is required in the input data.')\n", (1540, 1614), False, 'from fastapi import FastAPI, File, UploadFile, HTTPException\n'), ((863, 894), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./Data"""'], {}), "('./Data')\n", (884, 894), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((3176, 3249), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""User message not found in input."""'}), "(status_code=400, detail='User message not found in input.')\n", (3189, 3249), False, 'from fastapi import FastAPI, File, UploadFile, HTTPException\n')] |
from init import *
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext
from llama_index.node_parser import SimpleNodeParser
from llama_index import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index import download_loader
class Index:
def __init__(self, dir="data"):
"""Initialize the index."""
self.loader = download_loader("UnstructuredReader")()
self.docs = self.load(dir)
self.nodes = SimpleNodeParser.from_defaults().get_nodes_from_documents(self.docs)
llm_predictor = LLMPredictor(llm=OpenAI(model="gpt-3.5-turbo", temperature=0))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=1000)
self.index = VectorStoreIndex(self.nodes, service_context=service_context)
self.query_engine = self.index.as_query_engine(streaming=True)
self.retriever = self.index.as_retriever()
def load(self, dir="data"):
"""Load all documents from a directory."""
print(f"Loading directory: {dir}")
doc_files = []
for path, subdirs, files in os.walk(dir):
for name in files:
doc_files.append(os.path.join(path, name))
docs = []
for f in doc_files:
print(f"Loading file: {f}")
try:
docs += self.loader.load_data(f, split_documents=False)
except Exception as e:
print(e, "Skipping.")
return docs
def query(self, query):
"""Query the index."""
print("Query:", query)
response = self.query_engine.query(query)
print("Response:", response)
return response
if __name__ == "__main__":
index = Index(dir="data")
response = index.query("What is the meaning of life?")
| [
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.VectorStoreIndex"
] | [((653, 727), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(1000)'}), '(llm_predictor=llm_predictor, chunk_size=1000)\n', (681, 727), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((749, 810), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['self.nodes'], {'service_context': 'service_context'}), '(self.nodes, service_context=service_context)\n', (765, 810), False, 'from llama_index import VectorStoreIndex\n'), ((374, 411), 'llama_index.download_loader', 'download_loader', (['"""UnstructuredReader"""'], {}), "('UnstructuredReader')\n", (389, 411), False, 'from llama_index import download_loader\n'), ((470, 502), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {}), '()\n', (500, 502), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((581, 625), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo', temperature=0)\n", (587, 625), False, 'from llama_index.llms import OpenAI\n')] |
def get_agent(list_filters,openai_key,pinecone_key):
import logging
import sys
import os
import pandas as pd
import pinecone
import openai
from llama_index import VectorStoreIndex
from llama_index.vector_stores import PineconeVectorStore
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
from llama_index.agent import OpenAIAgent
from llama_index.llms import OpenAI
from llama_index.tools import BaseTool, FunctionTool
from agent_utils import get_rebate,get_tax
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.llms import ChatMessage
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#Openai and Pinecone private key
openai.api_key = openai_key
api_key = pinecone_key
#Instantiate pinecone vector store
pinecone.init(api_key=api_key, environment="gcp-starter")
pinecone_index = pinecone.Index("quickstart-index")
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index
)
index = VectorStoreIndex.from_vector_store(vector_store)
if not list_filters:
list_filters = ['rates','claim']
else:
list_filters += ['rates','claim']
#Define retriever
retriever = index.as_retriever(
vector_store_kwargs={"filter": {"category": {"$in":list_filters}}},streaming=True)
# assemble query engine
query_engine = RetrieverQueryEngine(retriever=retriever)
#Get agent tools from agent_utils file and instantiate tools
tax_tool = FunctionTool.from_defaults(fn=get_tax)
relief_tool = FunctionTool.from_defaults(fn=get_rebate)
#Create list of tool for agent
tools = [
QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(
name="tax_relief_retriever",
description=(
"Provides information on reliefs for a given item category and information on how to claim tax reliefs"
"Use a detailed plain text question as input to the tool."
),
),
),
tax_tool,relief_tool]
#Define chat agent
llm = OpenAI(model="gpt-3.5-turbo-0613")
#Set a default chat history to handle cases where information is not provided
str_cat = ','.join(list_filters)
chat_history = [ChatMessage(role= 'user', content=f"Assume I earn an income of RM90,000. If I state my income chat, update it to the stated income. I want to buy an items in category {str_cat}")]
system_prompt = "You are a tax advisory agent a you must only use information from the tool to answer queries. Other topics unrelated to personal income tax and does not use on of the tool should not be answered"
agent = OpenAIAgent.from_tools(tools,system_prompt = system_prompt, chat_history = chat_history, verbose=True)
# chat_engine = CondenseQuestionChatEngine.from_defaults(
# query_engine=query_engine,
# # condense_question_prompt=custom_prompt,
# # chat_history=custom_chat_history,
# verbose=True,
# )
return agent
| [
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.llms.OpenAI",
"llama_index.tools.ToolMetadata",
"llama_index.llms.ChatMessage",
"llama_index.tools.FunctionTool.from_defaults",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.query_engine.RetrieverQueryEngine"
] | [((721, 779), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (740, 779), False, 'import logging\n'), ((1006, 1063), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'api_key', 'environment': '"""gcp-starter"""'}), "(api_key=api_key, environment='gcp-starter')\n", (1019, 1063), False, 'import pinecone\n'), ((1085, 1119), 'pinecone.Index', 'pinecone.Index', (['"""quickstart-index"""'], {}), "('quickstart-index')\n", (1099, 1119), False, 'import pinecone\n'), ((1139, 1189), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pinecone_index'}), '(pinecone_index=pinecone_index)\n', (1158, 1189), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((1217, 1265), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (1251, 1265), False, 'from llama_index import VectorStoreIndex\n'), ((1583, 1624), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever'}), '(retriever=retriever)\n', (1603, 1624), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((1706, 1744), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'get_tax'}), '(fn=get_tax)\n', (1732, 1744), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((1763, 1804), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'get_rebate'}), '(fn=get_rebate)\n', (1789, 1804), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((2346, 2380), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""'}), "(model='gpt-3.5-turbo-0613')\n", (2352, 2380), False, 'from llama_index.llms import OpenAI\n'), ((2929, 3033), 'llama_index.agent.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', (['tools'], {'system_prompt': 'system_prompt', 'chat_history': 'chat_history', 'verbose': '(True)'}), '(tools, system_prompt=system_prompt, chat_history=\n chat_history, verbose=True)\n', (2951, 3033), False, 'from llama_index.agent import OpenAIAgent\n'), ((815, 855), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (836, 855), False, 'import logging\n'), ((2520, 2707), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'f"""Assume I earn an income of RM90,000. If I state my income chat, update it to the stated income. I want to buy an items in category {str_cat}"""'}), "(role='user', content=\n f'Assume I earn an income of RM90,000. If I state my income chat, update it to the stated income. I want to buy an items in category {str_cat}'\n )\n", (2531, 2707), False, 'from llama_index.llms import ChatMessage\n'), ((784, 803), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (801, 803), False, 'import logging\n'), ((1945, 2169), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""tax_relief_retriever"""', 'description': '"""Provides information on reliefs for a given item category and information on how to claim tax reliefsUse a detailed plain text question as input to the tool."""'}), "(name='tax_relief_retriever', description=\n 'Provides information on reliefs for a given item category and information on how to claim tax reliefsUse a detailed plain text question as input to the tool.'\n )\n", (1957, 2169), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script allows you to ask questions to the Alice in Wonderland book.
It uses the GPT-3 model to create a vector index of the book, and then
allows you to ask questions to the index.
'''
import os
import yaml
import openai
from llama_index import (
GPTVectorStoreIndex,
StorageContext,
SimpleDirectoryReader,
download_loader,
load_index_from_storage
)
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import SimpleVectorStore
from llama_index.storage.index_store import SimpleIndexStore
from argparse import ArgumentParser
# script configuration
persist_dir = "./indices/alice/"
pdf_file = "alice.pdf"
openai_config = "projects/infrastructure/charts/secrets/values/integration/openai-configuration/openai.yml"
credentials_path = os.path.join(os.path.expanduser('~'), openai_config)
credentials = yaml.safe_load(open(credentials_path, "r"))
os.environ["OPENAI_API_KEY"] = credentials["access_token"]
os.environ["OPENAI_ORGANIZATION"] = credentials["organization_id"]
# Save the index in .JSON file for repeated use. Saves money on ADA API calls
def create_index_from_pdf(persist_dir):
# This example uses PDF reader, there are many options at https://llamahub.ai/
# Use SimpleDirectoryReader to read all the txt files in a folder
PDFReader = download_loader("PDFReader")
loader = PDFReader()
documents = loader.load_data(file=pdf_file)
# Chunking and Embedding of the chunks.
index = GPTVectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=persist_dir)
return index
def load_index(persist_dir):
storage_context = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=persist_dir),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=persist_dir),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=persist_dir),
)
index = load_index_from_storage(storage_context)
return index
def main(args):
print(args.question)
if args.create_index:
index = create_index_from_pdf(persist_dir)
else:
index = load_index(persist_dir)
# Retrieval, node poseprocessing, response synthesis.
query_engine = index.as_query_engine()
# Run the query engine on a user question.
response = query_engine.query(args.question)
print(response)
if __name__ == '__main__':
parser = ArgumentParser(description=__doc__, prog='alice.py', epilog='Have fun!')
parser.add_argument('-c', '--create-index', help='(re)create the index', action='store_true')
parser.add_argument('question', help='question string to ask the index')
args = parser.parse_args()
main(args)
| [
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.download_loader",
"llama_index.vector_stores.SimpleVectorStore.from_persist_dir",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((862, 885), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (880, 885), False, 'import os\n'), ((1376, 1404), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (1391, 1404), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, SimpleDirectoryReader, download_loader, load_index_from_storage\n'), ((1535, 1580), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1569, 1580), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, SimpleDirectoryReader, download_loader, load_index_from_storage\n'), ((2000, 2040), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2023, 2040), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, SimpleDirectoryReader, download_loader, load_index_from_storage\n'), ((2491, 2563), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'prog': '"""alice.py"""', 'epilog': '"""Have fun!"""'}), "(description=__doc__, prog='alice.py', epilog='Have fun!')\n", (2505, 2563), False, 'from argparse import ArgumentParser\n'), ((1756, 1817), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (1792, 1817), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1840, 1899), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (1874, 1899), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1921, 1979), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (1954, 1979), False, 'from llama_index.storage.index_store import SimpleIndexStore\n')] |
from typing import Protocol
import html2text
from bs4 import BeautifulSoup
from llama_index import Document
from playwright.sync_api import sync_playwright
class ISitemapParser(Protocol):
def get_all_urls(self, sitemap_url: str) -> list[str]:
...
class IWebScraper(Protocol):
def scrape(self, urls: list[str]) -> list[Document]:
...
class PlaywrightWebScraper(IWebScraper):
def __init__(self, sitemap_parser: ISitemapParser) -> None:
self.sitemap_parser = sitemap_parser
def _expand_sitemap_urls(self, urls: list[str]) -> list[str]:
processed_urls: list[str] = []
for url in urls:
if "sitemap" not in url:
processed_urls += url
continue
processed_urls += self.sitemap_parser.get_all_urls(url)
return processed_urls
def scrape(self, urls: list[str]) -> list[Document]:
documents: list[Document] = []
all_urls = self._expand_sitemap_urls(urls)
with sync_playwright() as playwright:
browser = playwright.chromium.launch(headless=True)
page = browser.new_page()
for url in all_urls:
page.goto(url, wait_until="networkidle")
page.wait_for_timeout(3000)
html_content = page.content()
soup = BeautifulSoup(html_content, "html.parser")
h2t = html2text.HTML2Text()
h2t.ignore_links = False
h2t.ignore_images = True
markdown_content = h2t.handle((soup.select_one("main") or soup.select_one("body")).prettify())
metadata = {}
title_tag = soup.select_one('meta[property="og:title"]')
if title_tag and title_tag.has_attr("content"):
metadata["og:title"] = title_tag["content"]
url_tag = soup.select_one('meta[property="og:url"]')
if url_tag and url_tag.has_attr("content"):
metadata["og:url"] = url_tag["content"]
description_tag = soup.select_one('meta[property="og:description"]')
if description_tag and description_tag.has_attr("content"):
metadata["og:description"] = description_tag["content"]
documents.append(Document(text=markdown_content, metadata=metadata))
return documents
| [
"llama_index.Document"
] | [((1005, 1022), 'playwright.sync_api.sync_playwright', 'sync_playwright', ([], {}), '()\n', (1020, 1022), False, 'from playwright.sync_api import sync_playwright\n'), ((1345, 1387), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_content', '"""html.parser"""'], {}), "(html_content, 'html.parser')\n", (1358, 1387), False, 'from bs4 import BeautifulSoup\n'), ((1411, 1432), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (1430, 1432), False, 'import html2text\n'), ((2321, 2371), 'llama_index.Document', 'Document', ([], {'text': 'markdown_content', 'metadata': 'metadata'}), '(text=markdown_content, metadata=metadata)\n', (2329, 2371), False, 'from llama_index import Document\n')] |
import os
from dotenv import load_dotenv
from IPython.display import Markdown, display
from llama_index.legacy import VectorStoreIndex, ServiceContext
from llama_index.legacy.vector_stores import ChromaVectorStore
from llama_index.legacy.storage.storage_context import StorageContext
from llama_index.legacy.embeddings import HuggingFaceEmbedding
from llama_index.legacy.llms import Gemini
from llama_index.legacy.node_parser import SentenceWindowNodeParser, SimpleNodeParser
from llama_index.legacy.llms import Gemini
from llama_index.legacy import GPTVectorStoreIndex
from llama_index.legacy.readers.web import BeautifulSoupWebReader
import chromadb
import streamlit as st
# Enable Logging
import logging
import sys
#You can set the logging level to DEBUG for more verbose output,
# or use level=logging.INFO for less detailed information.
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# Load environment variables from the .env file
load_dotenv()
loader = BeautifulSoupWebReader()
urls = [
'https://www.hsph.harvard.edu/nutritionsource/kids-healthy-eating-plate/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-eating-plate/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/whole-grains/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/protein/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/vegetables-and-fruits/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/fats-and-cholesterol/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/fats-and-cholesterol/types-of-fat/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/fats-and-cholesterol/cholesterol/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/fats-and-cholesterol/dietary-fat-and-disease/',
'https://www.hsph.harvard.edu/nutritionsource/vitamins/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/other-healthy-beverage-options/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/drinks-to-consume-in-moderation/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/sugary-drinks/',
'https://www.hsph.harvard.edu/nutritionsource/sports-drinks/',
'https://www.hsph.harvard.edu/nutritionsource/energy-drinks/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/beverages-public-health-concerns/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/artificial-sweeteners/',
'https://www.hsph.harvard.edu/nutritionsource/salt-and-sodium/',
'https://www.hsph.harvard.edu/nutritionsource/salt-and-sodium/take-action-on-salt/',
'https://www.hsph.harvard.edu/nutritionsource/salt-and-sodium/sodium-public-health-concerns/',
'https://www.hsph.harvard.edu/nutritionsource/carbohydrates/',
'https://www.hsph.harvard.edu/nutritionsource/carbohydrates/carbohydrates-and-blood-sugar/',
'https://www.hsph.harvard.edu/nutritionsource/carbohydrates/fiber/',
'https://www.hsph.harvard.edu/nutritionsource/carbohydrates/added-sugar-in-the-diet/',
'https://www.hsph.harvard.edu/nutritionsource/sustainability/',
'https://www.hsph.harvard.edu/nutritionsource/sustainability/plate-and-planet/',
'https://www.hsph.harvard.edu/nutritionsource/sustainability/food-waste/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/measuring-fat/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/best-diet-quality-counts/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/healthy-dietary-styles/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/diet-reviews/',
'https://www.hsph.harvard.edu/nutritionsource/staying-active/',
'https://www.hsph.harvard.edu/nutritionsource/staying-active/active-communities/',
'https://www.hsph.harvard.edu/nutritionsource/stress-and-health/',
'https://www.hsph.harvard.edu/nutritionsource/sleep/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-longevity/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/cardiovascular-disease/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/cardiovascular-disease/preventing-cvd/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/diabetes-prevention/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/diabetes-prevention/preventing-diabetes-full-story/',
'https://www.hsph.harvard.edu/nutritionsource/cancer/',
'https://www.hsph.harvard.edu/nutritionsource/cancer/preventing-cancer/',
'https://www.hsph.harvard.edu/nutritionsource/oral-health/',
'https://www.hsph.harvard.edu/nutritionsource/precision-nutrition/',
'https://www.hsph.harvard.edu/nutritionsource/nutrition-and-immunity/',
'https://www.hsph.harvard.edu/nutritionsource/recipes-2/',
'https://www.hsph.harvard.edu/nutritionsource/asparagus-with-warm-tarragon-pecan-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/asparagus-spears-with-mandarin-orange/',
'https://www.hsph.harvard.edu/nutritionsource/baby-arugula-and-shaved-fennel-with-lemon-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/braised-cabbage-with-leeks-and-sesame-seeds/',
'https://www.hsph.harvard.edu/nutritionsource/braised-oyster-mushrooms-coconut-macadamia/',
'https://www.hsph.harvard.edu/nutritionsource/butternut-squash-soup-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/caesar-salad/',
'https://www.hsph.harvard.edu/nutritionsource/cardamom-roasted-cauliflower/',
'https://www.hsph.harvard.edu/nutritionsource/carrot-and-coriander-soup/',
'https://www.hsph.harvard.edu/nutritionsource/cauliflower-tomato-soup/',
'https://www.hsph.harvard.edu/nutritionsource/cauliflower-walnut-soup/',
'https://www.hsph.harvard.edu/nutritionsource/endive-salad-with-citrus-walnut-dressing/',
'https://www.hsph.harvard.edu/nutritionsource/customizable-stuffed-peppers/',
'https://www.hsph.harvard.edu/nutritionsource/fresh-spinach-with-sesame-seeds/',
'https://www.hsph.harvard.edu/nutritionsource/garlic-braised-greens/',
'https://www.hsph.harvard.edu/nutritionsource/green-beans-with-dried-cherries/',
'https://www.hsph.harvard.edu/nutritionsource/green-beans-with-chili-garlic-sauce/',
'https://www.hsph.harvard.edu/nutritionsource/green-chutney/',
'https://www.hsph.harvard.edu/nutritionsource/grilled-eggplant-cutlets/',
'https://www.hsph.harvard.edu/nutritionsource/kale-with-caramelized-onions/',
'https://www.hsph.harvard.edu/nutritionsource/marinated-shiitake-mushroom-and-cucumber-salad/',
'https://www.hsph.harvard.edu/nutritionsource/mashed-cauliflower/',
'https://www.hsph.harvard.edu/nutritionsource/mushroom-stroganoff/',
'https://www.hsph.harvard.edu/nutritionsource/pan-roasted-wild-mushrooms-with-coffee-and-hazelnuts/',
'https://www.hsph.harvard.edu/nutritionsource/portabella-steak-sandwich/',
'https://www.hsph.harvard.edu/nutritionsource/provencal-vegetables/',
'https://www.hsph.harvard.edu/nutritionsource/vegetable-stock/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-brussels-sprouts/',
'https://www.hsph.harvard.edu/nutritionsource/brussels-sprouts-with-shallots/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-beets-with-balsamic-vinegar/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-balsamic-vegetables/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-squash-with-pomegranate/',
'https://www.hsph.harvard.edu/nutritionsource/sweet-potatoes-with-pecans/',
'https://www.hsph.harvard.edu/nutritionsource/ruby-chard/',
'https://www.hsph.harvard.edu/nutritionsource/sauted-rainbow-swiss-chard/',
'https://www.hsph.harvard.edu/nutritionsource/simple-celery-date-salad/',
'https://www.hsph.harvard.edu/nutritionsource/southwestern-corn-hash/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-broccolini/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-indian-slaw/',
'https://www.hsph.harvard.edu/nutritionsource/stir-fried-vegetables-tomato-curry/',
'https://www.hsph.harvard.edu/nutritionsource/sugar-snap-peas-with-fresh-mint/',
'https://www.hsph.harvard.edu/nutritionsource/tarragon-succotash/',
'https://www.hsph.harvard.edu/nutritionsource/tunisian-carrot-salad/',
'https://www.hsph.harvard.edu/nutritionsource/vegetable-stock-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/vegetarian-shepherds-pie-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/wild-mushroom-soup-with-soba/',
'https://www.hsph.harvard.edu/nutritionsource/yellow-squash-with-sage/',
'https://www.hsph.harvard.edu/nutritionsource/arugula-watermelon-feta-and-mint-salad-with-balsamic-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/citrus-salad/',
'https://www.hsph.harvard.edu/nutritionsource/almond-coconut-macaroons/',
'https://www.hsph.harvard.edu/nutritionsource/dried-fruit-and-nuts/',
'https://www.hsph.harvard.edu/nutritionsource/watermelon-salad/',
'https://www.hsph.harvard.edu/nutritionsource/fruit-compote-spiced-nuts/',
'https://www.hsph.harvard.edu/nutritionsource/strawberry-rhubarb-crisp/',
'https://www.hsph.harvard.edu/nutritionsource/barley-roasted-portobello-and-fennel-salad/',
'https://www.hsph.harvard.edu/nutritionsource/blueberry-muffins/',
'https://www.hsph.harvard.edu/nutritionsource/brown-rice-pancakes/',
'https://www.hsph.harvard.edu/nutritionsource/bulgur-pilaf/',
'https://www.hsph.harvard.edu/nutritionsource/couscous-minted-with-pine-nuts/',
'https://www.hsph.harvard.edu/nutritionsource/couscous-quinoa-tabouli/',
'https://www.hsph.harvard.edu/nutritionsource/cranberry-orange-muffin/',
'https://www.hsph.harvard.edu/nutritionsource/fantastic-bulgur-dish/',
'https://www.hsph.harvard.edu/nutritionsource/farro-risotto-walnut-pesto/',
'https://www.hsph.harvard.edu/nutritionsource/farro-roasted-confetti-vegetables/',
'https://www.hsph.harvard.edu/nutritionsource/hearty-whole-grain-bread/',
'https://www.hsph.harvard.edu/nutritionsource/irish-brown-bread/',
'https://www.hsph.harvard.edu/nutritionsource/jalapeno-cheddar-corn-muffins/',
'https://www.hsph.harvard.edu/nutritionsource/lemon-chickpea-breakfast-muffins/',
'https://www.hsph.harvard.edu/nutritionsource/mediterranean-rice/',
'https://www.hsph.harvard.edu/nutritionsource/mixed-up-grains/',
'https://www.hsph.harvard.edu/nutritionsource/mushroom-barley-risotto/',
'https://www.hsph.harvard.edu/nutritionsource/oatmeal-roti/',
'https://www.hsph.harvard.edu/nutritionsource/pasta-in-zemino/',
'https://www.hsph.harvard.edu/nutritionsource/rigatoni-fresh-basil-pesto-corn-zucchini/',
'https://www.hsph.harvard.edu/nutritionsource/quinoa-chia-edamame-veggie-burger/',
'https://www.hsph.harvard.edu/nutritionsource/quinoa-enchilada-casserole/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-coconut-rice-with-limes/',
'https://www.hsph.harvard.edu/nutritionsource/three-green-wheat-berry-salad-with-mushroom-bacon-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/wheatberries-and-chives/',
'https://www.hsph.harvard.edu/nutritionsource/whole-wheat-banana-nut-muffins/',
'https://www.hsph.harvard.edu/nutritionsource/whole-wheat-penne-with-pistachio-pesto-and-cherry-tomatoes/',
'https://www.hsph.harvard.edu/nutritionsource/wild-rice-with-cranberries/',
'https://www.hsph.harvard.edu/nutritionsource/greek-skordalia/',
'https://www.hsph.harvard.edu/nutritionsource/green-lentil-hummus-herbs-olives/',
'https://www.hsph.harvard.edu/nutritionsource/guacamole/',
'https://www.hsph.harvard.edu/nutritionsource/hot-pepper-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/hummus/',
'https://www.hsph.harvard.edu/nutritionsource/italian-pesto-alla-trapanese/',
'https://www.hsph.harvard.edu/nutritionsource/mint-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/oregano-garlic-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/spanish-romesco-sauce/',
'https://www.hsph.harvard.edu/nutritionsource/turkish-muhammara/',
'https://www.hsph.harvard.edu/nutritionsource/turkish-tarator/',
'https://www.hsph.harvard.edu/nutritionsource/walnut-pesto/',
'https://www.hsph.harvard.edu/nutritionsource/white-bean-and-kale-hummus/',
'https://www.hsph.harvard.edu/nutritionsource/asian-trail-mix/',
'https://www.hsph.harvard.edu/nutritionsource/cozy-red-lentil-mash/',
'https://www.hsph.harvard.edu/nutritionsource/crunchy-roasted-chickpeas/',
'https://www.hsph.harvard.edu/nutritionsource/curried-red-lentil-soup/',
'https://www.hsph.harvard.edu/nutritionsource/dukkah/',
'https://www.hsph.harvard.edu/nutritionsource/french-style-lentils/',
'https://www.hsph.harvard.edu/nutritionsource/garbanzo-beans-with-spinach-and-tomatoes/',
'https://www.hsph.harvard.edu/nutritionsource/green-beans-with-tofu-and-crushed-peanuts/',
'https://www.hsph.harvard.edu/nutritionsource/mushroom-tofu-veggie-burger/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-lemongrass-tofu-with-asian-basil/',
'https://www.hsph.harvard.edu/nutritionsource/sprouted-lentil-cabbage-celery-slaw/',
'https://www.hsph.harvard.edu/nutritionsource/thai-eggplant-salad-with-coconut-tofu-strips/',
'https://www.hsph.harvard.edu/nutritionsource/tomato-and-white-bean-salad/',
'https://www.hsph.harvard.edu/nutritionsource/whole-wheat-penne-with-pistachio-pesto-and-cherry-tomatoes/',
'https://www.hsph.harvard.edu/nutritionsource/white-beans-wild-rice-and-mushrooms/',
'https://www.hsph.harvard.edu/nutritionsource/vegetarian-refried-beans/',
'https://www.hsph.harvard.edu/nutritionsource/cod-and-littleneck-clams/',
'https://www.hsph.harvard.edu/nutritionsource/crawfish-touffe/',
'https://www.hsph.harvard.edu/nutritionsource/crispy-pan-seared-white-fish-walnut-romesco-pea-shoot-salad/',
'https://www.hsph.harvard.edu/nutritionsource/fish-creole/',
'https://www.hsph.harvard.edu/nutritionsource/miso-marinated-salmon-grilled-alder-wood/',
'https://www.hsph.harvard.edu/nutritionsource/pan-roasted-salmon-with-dill-olive-oil-capers/',
'https://www.hsph.harvard.edu/nutritionsource/pan-roasted-salmon/',
'https://www.hsph.harvard.edu/nutritionsource/shaved-fennel-salad-coriander-crusted-hamachi/',
'https://www.hsph.harvard.edu/nutritionsource/shrimp-and-chicken-gumbo/',
'https://www.hsph.harvard.edu/nutritionsource/shrimp-red-curry-crispy-sprouted-lentils/',
'https://www.hsph.harvard.edu/nutritionsource/wild-salmon-salad/',
'https://www.hsph.harvard.edu/nutritionsource/fish-tacos-with-cilantro-slaw/',
'https://www.hsph.harvard.edu/nutritionsource/chicken-shrimp-and-fruit-salad/',
'https://www.hsph.harvard.edu/nutritionsource/lemongrass-marinated-chicken-breast/',
'https://www.hsph.harvard.edu/nutritionsource/olive-oil-dressing-with-chicken-walnuts-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/rosemary-and-lemon-grilled-chicken-breast/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-chicken-kebabs-with-moorish-flavors/',
'https://www.hsph.harvard.edu/nutritionsource/stir-fried-chicken/',
'https://www.hsph.harvard.edu/nutritionsource/moroccan-chicken-stew-with-apricots/',
'https://www.hsph.harvard.edu/nutritionsource/stir-fried-chicken/',
'https://www.hsph.harvard.edu/nutritionsource/baked-ricotta/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-tomatoes-stuffed-goat-cheese-garlic-basil/',
'https://www.hsph.harvard.edu/nutritionsource/fruit-cooler/',
'https://www.hsph.harvard.edu/nutritionsource/iced-tea-with-lemon-and-mint/'
# Add the rest of the URLs here
]
documents = loader.load_data(urls=urls)
# base Query Engine LLM
llm = Gemini(api_key=os.getenv("google_api_key"),model='gemini-pro')
# fine-tuned Embeddings model
embed_model = HuggingFaceEmbedding(
model_name='Revankumar/fine_tuned_embeddings_for_healthy_recipes'
)
# fine-tuned ServiceContext
ctx = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
parser = SimpleNodeParser()
nodes = parser.get_nodes_from_documents(documents)
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("quickstart")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
service_context = ServiceContext.from_defaults(embed_model=embed_model,llm=llm)
VectorStoreIndex.from_documents(
documents, storage_context=storage_context, service_context=service_context
)
| [
"llama_index.legacy.embeddings.HuggingFaceEmbedding",
"llama_index.legacy.VectorStoreIndex.from_documents",
"llama_index.legacy.storage.storage_context.StorageContext.from_defaults",
"llama_index.legacy.vector_stores.ChromaVectorStore",
"llama_index.legacy.node_parser.SimpleNodeParser",
"llama_index.legacy.readers.web.BeautifulSoupWebReader",
"llama_index.legacy.ServiceContext.from_defaults"
] | [((846, 905), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (865, 905), False, 'import logging\n'), ((1029, 1042), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1040, 1042), False, 'from dotenv import load_dotenv\n'), ((1053, 1077), 'llama_index.legacy.readers.web.BeautifulSoupWebReader', 'BeautifulSoupWebReader', ([], {}), '()\n', (1075, 1077), False, 'from llama_index.legacy.readers.web import BeautifulSoupWebReader\n'), ((16483, 16575), 'llama_index.legacy.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""Revankumar/fine_tuned_embeddings_for_healthy_recipes"""'}), "(model_name=\n 'Revankumar/fine_tuned_embeddings_for_healthy_recipes')\n", (16503, 16575), False, 'from llama_index.legacy.embeddings import HuggingFaceEmbedding\n'), ((16613, 16675), 'llama_index.legacy.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (16641, 16675), False, 'from llama_index.legacy import VectorStoreIndex, ServiceContext\n'), ((16697, 16715), 'llama_index.legacy.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (16713, 16715), False, 'from llama_index.legacy.node_parser import SentenceWindowNodeParser, SimpleNodeParser\n'), ((16775, 16820), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (16800, 16820), False, 'import chromadb\n'), ((16901, 16955), 'llama_index.legacy.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (16918, 16955), False, 'from llama_index.legacy.vector_stores import ChromaVectorStore\n'), ((16975, 17030), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (17003, 17030), False, 'from llama_index.legacy.storage.storage_context import StorageContext\n'), ((17050, 17112), 'llama_index.legacy.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'llm'}), '(embed_model=embed_model, llm=llm)\n', (17078, 17112), False, 'from llama_index.legacy import VectorStoreIndex, ServiceContext\n'), ((17113, 17225), 'llama_index.legacy.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n service_context=service_context)\n', (17144, 17225), False, 'from llama_index.legacy import VectorStoreIndex, ServiceContext\n'), ((937, 977), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (958, 977), False, 'import logging\n'), ((906, 925), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (923, 925), False, 'import logging\n'), ((16390, 16417), 'os.getenv', 'os.getenv', (['"""google_api_key"""'], {}), "('google_api_key')\n", (16399, 16417), False, 'import os\n')] |
import weaviate
from llama_index import StorageContext, SimpleDirectoryReader, ServiceContext, VectorStoreIndex
from llama_index.vector_stores import WeaviateVectorStore
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
import box
import yaml
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def load_documents(docs_path):
"""
This function retrieves and returns a list of JPEG documents from a specified directory.
It utilizes the `SimpleDirectoryReader` class from the `pathlib` module to efficiently iterate through all files
with the `.jpg` extension within the provided directory. The function then prints the total number of loaded documents
before returning them as a list.
Args:
docs_path: The absolute or relative path to the directory containing the target JPEG documents.
Returns:
A list of `SimpleDirectoryReader` objects, each representing an individual JPEG file within the directory.
Raises:
FileNotFoundError: If the specified directory path is invalid or inaccessible.
OSError: If an error occurs during file iteration or processing.
"""
documents = SimpleDirectoryReader(docs_path, required_exts=[".jpg"]).load_data()
print(f"Loaded {len(documents)} documents")
return documents
def load_embedding_model(model_name):
"""
Creates and returns a LangchainEmbedding object based on a specified Hugging Face model name.
Args:
model_name: The string identifier of the desired pre-trained embedding model from Hugging Face.
Returns:
A LangchainEmbedding object configured with the chosen Hugging Face model.
Notes:
This function utilizes the `LangchainEmbedding` wrapper around the underlying `HuggingFaceEmbeddings` class.
"""
embeddings = LangchainEmbedding(
HuggingFaceEmbeddings(model_name=model_name)
)
return embeddings
def build_index(weaviate_client, embed_model, documents, index_name):
"""
Constructs and populates a Weaviate Vector Store index with embedded document representations.
This function takes several inputs and performs the following tasks:
1. **Context Creation:**
- Constructs a `ServiceContext` object using the provided embedding model and sets `llm` to None (assuming no language model involved).
- Creates a `WeaviateVectorStore` object for accessing the specified Weaviate client and index name.
- Builds a `StorageContext` based on the generated vector store.
2. **Index Building:**
- Utilizes the `VectorStoreIndex.from_documents` method to construct an index from the provided list of documents.
- This process involves embedding each document using the injected `embed_model` within the service context.
- The generated vector representations are then stored in the specified Weaviate index through the storage context.
3. **Index Return:**
- Finally, the function returns the constructed `VectorStoreIndex` object, representing the populated Weaviate index.
Args:
weaviate_client: An instance of the Weaviate client for accessing the target server.
embed_model: An object capable of generating vector representations for the documents.
documents: A list of documents to be indexed in Weaviate.
index_name: The name of the Weaviate Vector Store index to populate.
Returns:
A `VectorStoreIndex` object representing the newly created and populated Weaviate index.
Notes:
This function assumes that the `embed_model` and provided documents are compatible for generating suitable vector representations.
Additionally, ensure the Weaviate client has access and appropriate permissions to the specified index.
"""
service_context = ServiceContext.from_defaults(embed_model=embed_model, llm=None)
vector_store = WeaviateVectorStore(weaviate_client=weaviate_client, index_name=index_name)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents,
service_context=service_context,
storage_context=storage_context,
)
return index
if __name__ == "__main__":
# Import configuration specified in config.yml
with open('config.yml', 'r', encoding='utf8') as configuration:
cfg = box.Box(yaml.safe_load(configuration))
print("Connecting to Weaviate")
client = weaviate.Client(cfg.WEAVIATE_URL)
print("Loading documents...")
documents = load_documents(cfg.DATA_PATH)
print("Loading embedding model...")
embeddings = load_embedding_model(model_name=cfg.EMBEDDINGS)
print("Building index...")
index = build_index(client, embeddings, documents, cfg.INDEX_NAME) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.vector_stores.WeaviateVectorStore"
] | [((331, 393), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (354, 393), False, 'import warnings\n'), ((3900, 3963), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'None'}), '(embed_model=embed_model, llm=None)\n', (3928, 3963), False, 'from llama_index import StorageContext, SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((3983, 4058), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'weaviate_client', 'index_name': 'index_name'}), '(weaviate_client=weaviate_client, index_name=index_name)\n', (4002, 4058), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((4081, 4136), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4109, 4136), False, 'from llama_index import StorageContext, SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((4150, 4262), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(documents, service_context=service_context,\n storage_context=storage_context)\n', (4181, 4262), False, 'from llama_index import StorageContext, SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((4564, 4597), 'weaviate.Client', 'weaviate.Client', (['cfg.WEAVIATE_URL'], {}), '(cfg.WEAVIATE_URL)\n', (4579, 4597), False, 'import weaviate\n'), ((1927, 1971), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1948, 1971), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1248, 1304), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['docs_path'], {'required_exts': "['.jpg']"}), "(docs_path, required_exts=['.jpg'])\n", (1269, 1304), False, 'from llama_index import StorageContext, SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((4483, 4512), 'yaml.safe_load', 'yaml.safe_load', (['configuration'], {}), '(configuration)\n', (4497, 4512), False, 'import yaml\n')] |
# Just runs .complete to make sure the LLM is listening
from llama_index.llms import Ollama
from pathlib import Path
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
download_loader,
)
from llama_index.llms import Ollama
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
JSONReader = download_loader("JSONReader")
loader = JSONReader()
class Ollama_model:
def __init__(self, model="mistral"):
self.llm = Ollama(model=model)
self.documents = loader.load_data(Path('./data/tinytweets.json'))
self.client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
self.vector_store = QdrantVectorStore(client=self.client, collection_name="tweets")
self.storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
self.service_context = ServiceContext.from_defaults(llm=self.llm,embed_model="local")
self.index = VectorStoreIndex.from_documents(self.documents,service_context=self.service_context,storage_context=self.storage_context)
self.query_engine = self.index.as_query_engine()
def get_answer(self, input):
response = self.query_engine.query(input)
return response
def change_dataset(self, name, filename):
print(filename)
self.documents = loader.load_data(Path('./uploads/'+filename))
self.vector_store = QdrantVectorStore(client=self.client, collection_name=name)
self.storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
self.index = VectorStoreIndex.from_documents(self.documents,service_context=self.service_context,storage_context=self.storage_context)
self.query_engine = self.index.as_query_engine()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.llms.Ollama"
] | [((406, 435), 'llama_index.download_loader', 'download_loader', (['"""JSONReader"""'], {}), "('JSONReader')\n", (421, 435), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((539, 558), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': 'model'}), '(model=model)\n', (545, 558), False, 'from llama_index.llms import Ollama\n'), ((655, 703), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (681, 703), False, 'import qdrant_client\n'), ((754, 817), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'self.client', 'collection_name': '"""tweets"""'}), "(client=self.client, collection_name='tweets')\n", (771, 817), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((849, 909), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'self.vector_store'}), '(vector_store=self.vector_store)\n', (877, 909), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((941, 1004), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'self.llm', 'embed_model': '"""local"""'}), "(llm=self.llm, embed_model='local')\n", (969, 1004), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((1025, 1153), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['self.documents'], {'service_context': 'self.service_context', 'storage_context': 'self.storage_context'}), '(self.documents, service_context=self.\n service_context, storage_context=self.storage_context)\n', (1056, 1153), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((1490, 1549), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'self.client', 'collection_name': 'name'}), '(client=self.client, collection_name=name)\n', (1507, 1549), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((1581, 1641), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'self.vector_store'}), '(vector_store=self.vector_store)\n', (1609, 1641), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1663, 1791), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['self.documents'], {'service_context': 'self.service_context', 'storage_context': 'self.storage_context'}), '(self.documents, service_context=self.\n service_context, storage_context=self.storage_context)\n', (1694, 1791), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((601, 631), 'pathlib.Path', 'Path', (['"""./data/tinytweets.json"""'], {}), "('./data/tinytweets.json')\n", (605, 631), False, 'from pathlib import Path\n'), ((1433, 1462), 'pathlib.Path', 'Path', (["('./uploads/' + filename)"], {}), "('./uploads/' + filename)\n", (1437, 1462), False, 'from pathlib import Path\n')] |
from fastapi import FastAPI
from llama_index import ServiceContext
from llama_index import set_global_service_context
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings import GradientEmbedding
from llama_index.llms import GradientBaseModelLLM
from llama_index.vector_stores import CassandraVectorStore
from copy import deepcopy
from tempfile import NamedTemporaryFile
import json
import os
app = FastAPI()
@st.cache_resource
def create_datastax_connection():
cloud_config= {'secure_connect_bundle': 'secure-connect-final-db.zip'}
with open("final_db-token.json") as f:
secrets = json.load(f)
CLIENT_ID = secrets["clientId"]
CLIENT_SECRET = secrets["secret"]
auth_provider = PlainTextAuthProvider(CLIENT_ID, CLIENT_SECRET)
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)
astra_session = cluster.connect()
return astra_session
def main():
index_placeholder = None
st.set_page_config(page_title = "Chat with Deduct AI ", page_icon="🕵️♀️")
st.header('Hello ! This is Deduct AI built with patience and love by Sunil Kumar 👦')
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "activate_chat" not in st.session_state:
st.session_state.activate_chat = False
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar = message['avatar']):
st.markdown(message["content"])
session = create_datastax_connection()
os.environ['GRADIENT_ACCESS_TOKEN'] = "0ADxK10rN2amETd2KYaSziGf6Avu3gT7"
os.environ['GRADIENT_WORKSPACE_ID'] = "d7f5c932-179e-4944-b4f8-d745c350df4e_workspace"
llm = GradientBaseModelLLM(base_model_slug="llama2-7b-chat", max_tokens=400)
embed_model = GradientEmbedding(
gradient_access_token = os.environ["GRADIENT_ACCESS_TOKEN"],
gradient_workspace_id = os.environ["GRADIENT_WORKSPACE_ID"],
gradient_model_slug="bge-large")
service_context = ServiceContext.from_defaults(
llm = llm,
embed_model = embed_model,
chunk_size=256)
set_global_service_context(service_context)
with st.sidebar:
st.subheader('Feed Deduct AI 🕵️♂️')
docs = st.file_uploader('⬆️ Upload your PDF & Click to process',
accept_multiple_files = False,
type=['pdf'])
if st.button('Process'):
with NamedTemporaryFile(dir='.', suffix='.pdf') as f:
f.write(docs.getbuffer())
with st.spinner('Processing'):
documents = SimpleDirectoryReader(".").load_data()
index = VectorStoreIndex.from_documents(documents,
service_context=service_context)
query_engine = index.as_query_engine()
if "query_engine" not in st.session_state:
st.session_state.query_engine = query_engine
st.session_state.activate_chat = True
if st.session_state.activate_chat == True:
if prompt := st.chat_input("Ask your question to Deduct AI?"):
with st.chat_message("user", avatar = '👨🏻'):
st.markdown(prompt)
st.session_state.messages.append({"role": "user",
"avatar" :'👨🏻',
"content": prompt})
query_index_placeholder = st.session_state.query_engine
pdf_response = query_index_placeholder.query(prompt)
cleaned_response = pdf_response.response
with st.chat_message("assistant", avatar='🤖'):
st.markdown(cleaned_response)
st.session_state.messages.append({"role": "assistant",
"avatar" :'🤖',
"content": cleaned_response})
else:
st.markdown(
'Upload your Content to the chat'
)
@app.get("/")
async def home():
return main()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.GradientBaseModelLLM",
"llama_index.set_global_service_context",
"llama_index.embeddings.GradientEmbedding"
] | [((439, 448), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (446, 448), False, 'from fastapi import FastAPI\n'), ((1810, 1880), 'llama_index.llms.GradientBaseModelLLM', 'GradientBaseModelLLM', ([], {'base_model_slug': '"""llama2-7b-chat"""', 'max_tokens': '(400)'}), "(base_model_slug='llama2-7b-chat', max_tokens=400)\n", (1830, 1880), False, 'from llama_index.llms import GradientBaseModelLLM\n'), ((1900, 2076), 'llama_index.embeddings.GradientEmbedding', 'GradientEmbedding', ([], {'gradient_access_token': "os.environ['GRADIENT_ACCESS_TOKEN']", 'gradient_workspace_id': "os.environ['GRADIENT_WORKSPACE_ID']", 'gradient_model_slug': '"""bge-large"""'}), "(gradient_access_token=os.environ['GRADIENT_ACCESS_TOKEN'],\n gradient_workspace_id=os.environ['GRADIENT_WORKSPACE_ID'],\n gradient_model_slug='bge-large')\n", (1917, 2076), False, 'from llama_index.embeddings import GradientEmbedding\n'), ((2121, 2199), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'chunk_size': '(256)'}), '(llm=llm, embed_model=embed_model, chunk_size=256)\n', (2149, 2199), False, 'from llama_index import ServiceContext\n'), ((2222, 2265), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2248, 2265), False, 'from llama_index import set_global_service_context\n'), ((641, 653), 'json.load', 'json.load', (['f'], {}), '(f)\n', (650, 653), False, 'import json\n'), ((2566, 2608), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'dir': '"""."""', 'suffix': '""".pdf"""'}), "(dir='.', suffix='.pdf')\n", (2584, 2608), False, 'from tempfile import NamedTemporaryFile\n'), ((2803, 2878), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (2834, 2878), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((2736, 2762), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""."""'], {}), "('.')\n", (2757, 2762), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n')] |
from typing import Optional
from llama_index.storage.kvstore.mongodb_kvstore import MongoDBKVStore
from approaches.index.store.base.cosmos_kv_doc_store import CosmosKVDocumentStore
class CosmosDocumentStore(CosmosKVDocumentStore):
"""Mongo Document (Node) store.
A MongoDB store for Document and Node objects.
Args:
mongo_kvstore (MongoDBKVStore): MongoDB key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
mongo_kvstore: MongoDBKVStore,
namespace: Optional[str] = None,
collection: Optional[str] = None,
metadata_collection: Optional[str] = None
) -> None:
"""Init a MongoDocumentStore."""
super().__init__(mongo_kvstore, namespace, collection,metadata_collection)
@classmethod
def from_uri(
cls,
uri: str,
db_name: Optional[str] = None,
namespace: Optional[str] = None,
collection: Optional[str] = None,
metadata_collection: Optional[str] = None
) -> "CosmosDocumentStore":
"""Load a CosmosDocumentStore from a MongoDB URI."""
mongo_kvstore = MongoDBKVStore.from_uri(uri, db_name)
return cls(mongo_kvstore, namespace, collection,metadata_collection)
@classmethod
def from_host_and_port(
cls,
host: str,
port: int,
db_name: Optional[str] = None,
namespace: Optional[str] = None,
collection: Optional[str] = None,
metadata_collection: Optional[str] = None
) -> "CosmosDocumentStore":
"""Load a CosmosDocumentStore from a MongoDB host and port."""
mongo_kvstore = MongoDBKVStore.from_host_and_port(host, port, db_name)
return cls(mongo_kvstore, namespace, collection, metadata_collection)
| [
"llama_index.storage.kvstore.mongodb_kvstore.MongoDBKVStore.from_host_and_port",
"llama_index.storage.kvstore.mongodb_kvstore.MongoDBKVStore.from_uri"
] | [((1156, 1193), 'llama_index.storage.kvstore.mongodb_kvstore.MongoDBKVStore.from_uri', 'MongoDBKVStore.from_uri', (['uri', 'db_name'], {}), '(uri, db_name)\n', (1179, 1193), False, 'from llama_index.storage.kvstore.mongodb_kvstore import MongoDBKVStore\n'), ((1667, 1721), 'llama_index.storage.kvstore.mongodb_kvstore.MongoDBKVStore.from_host_and_port', 'MongoDBKVStore.from_host_and_port', (['host', 'port', 'db_name'], {}), '(host, port, db_name)\n', (1700, 1721), False, 'from llama_index.storage.kvstore.mongodb_kvstore import MongoDBKVStore\n')] |
import os
import openai
from llama_index.indices.query.schema import QueryBundle
from llama_index.retrievers import VectorIndexRetriever
from chatgpt_long_term_memory.llama_index_helpers.config import \
RetrieversConfig
from chatgpt_long_term_memory.openai_engine.config import TokenCounterConfig
from chatgpt_long_term_memory.openai_engine.token_counter import TokenCounter
KEY = os.getenv("OPENAI_API_KEY",
"")
openai.api_kEY = KEY
os.environ["OPENAI_API_KEY"] = KEY
class Retrievers:
"""
Retrievers class handles querying the index with a given question to retrieve relevant responses.
Args:
top_k (int): Number of top results to retrieve.
similarity_threshold (float): Similarity threshold for the retrieval results.
Attributes:
top_k (int): The number of top results to retrieve from the index.
max_tokens (int): The maximum number of tokens allowed for a response.
token_counter (TokenCounter): An instance of the TokenCounter class for counting tokens.
prompt_template (str): A template for the conversation prompt used for generating responses.
"""
def __init__(self, retrieve_config: RetrieversConfig, **kw):
super().__init__(**kw)
self.config = retrieve_config
self.top_k = self.config.top_k
self.max_tokens = self.config.max_tokens
self.token_counter = TokenCounter(
token_counter_config=TokenCounterConfig())
self.prompt_template = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
History: {context}
Human: {question}
Assistant:"""
def _answer_generator(self, question, retrieved_nodes):
"""
Private method to generate a response using the GPT-3.5 model.
Args:
question (str): The user's question or input.
retrieved_nodes (list): A list of retrieved nodes from the index.
Returns:
str: The response generated by the GPT-3.5 model.
"""
context = "\n".join(retrieved_nodes)
prompt = self.prompt_template.format(
context=context, question=question)
messages = [
{"role": "system", "content": prompt},
]
total_token = self.token_counter.token_counter(prompt)
check_token = 16384 - (
self.max_tokens + total_token)
assert check_token > 0, "Reached max tokens limit!"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=messages,
max_tokens=self.max_tokens,
temperature=0.7,
top_p=1,
presence_penalty=0,
frequency_penalty=0,
)
messages.append(
{"role": "assistant", "content": response["choices"][0]["message"].content})
bot_response = response["choices"][0]["message"].to_dict()
return bot_response['content']
def query(self, index, question):
"""
Query the index with a given question to retrieve relevant responses.
Args:
index: The index to query.
question (str): The user's question or input.
Returns:
str: The response retrieved from the index based on the input question.
"""
# Configure the vector retriever
retrieved_nodes = self.get_nodes(question=question, index=index)
response = self._answer_generator(question, retrieved_nodes)
return response
def get_nodes(self, question, index):
"""
Retrieve nodes from the index using the VectorIndexRetriever.
Args:
question (str): The user's question or input.
index: The index to query.
Returns:
list: A list of retrieved nodes (text) from the index.
"""
# Configure the vector retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=self.top_k,
)
# Perform the query and retrieve the response
query_bundle = QueryBundle(question)
nodes = retriever.retrieve(query_bundle)
retrieved_nodes = [i.node.text for i in nodes]
return retrieved_nodes
| [
"llama_index.indices.query.schema.QueryBundle",
"llama_index.retrievers.VectorIndexRetriever"
] | [((388, 419), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (397, 419), False, 'import os\n'), ((3648, 3826), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo-16k"""', 'messages': 'messages', 'max_tokens': 'self.max_tokens', 'temperature': '(0.7)', 'top_p': '(1)', 'presence_penalty': '(0)', 'frequency_penalty': '(0)'}), "(model='gpt-3.5-turbo-16k', messages=messages,\n max_tokens=self.max_tokens, temperature=0.7, top_p=1, presence_penalty=\n 0, frequency_penalty=0)\n", (3676, 3826), False, 'import openai\n'), ((5095, 5157), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': 'self.top_k'}), '(index=index, similarity_top_k=self.top_k)\n', (5115, 5157), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((5270, 5291), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', (['question'], {}), '(question)\n', (5281, 5291), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((1454, 1474), 'chatgpt_long_term_memory.openai_engine.config.TokenCounterConfig', 'TokenCounterConfig', ([], {}), '()\n', (1472, 1474), False, 'from chatgpt_long_term_memory.openai_engine.config import TokenCounterConfig\n')] |
from pathlib import Path
from llama_hub.file.unstructured import UnstructuredReader
from pathlib import Path
from llama_index import download_loader
from llama_index import SimpleDirectoryReader, VectorStoreIndex
from dotenv import load_dotenv
import os
from llama_index.node_parser import SimpleNodeParser
import pinecone
from llama_index.vector_stores import PineconeVectorStore
from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
import openai
####################################################
# #
# This file upserts documents in data to pinecone. #
# #
####################################################
load_dotenv()
openai.api_key = os.getenv('api_key')
# find API key in console at app.pinecone.io
os.environ['PINECONE_API_KEY'] = os.getenv('pinecone_api_key')
# environment is found next to API key in the console
os.environ['PINECONE_ENVIRONMENT'] = os.getenv('pinecone_env')
# loader = UnstructuredReader()
# initialize connection to pinecone
pinecone.init(
api_key=os.environ['PINECONE_API_KEY'],
environment=os.environ['PINECONE_ENVIRONMENT']
)
# setup the index/query process, ie the embedding model (and completion if used)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
# Readers
PDFReader = download_loader("PDFReader")
MarkdownReader = download_loader("MarkdownReader")
# Load docs
def upsert_docs(input_dir: str, index_name: str):
print(f"Building from {input_dir} under index {index_name}...\n")
documents = SimpleDirectoryReader(input_dir=input_dir).load_data()
# create the index if it does not exist already
if index_name not in pinecone.list_indexes():
pinecone.create_index(
name=index_name,
dimension=1536,
metric='cosine'
)
# connect to the index
pineconeIndex = pinecone.Index(index_name)
vectorStore = PineconeVectorStore(
pinecone_index=pineconeIndex
)
# setup our storage (vector db)
storageContext = StorageContext.from_defaults(
vector_store=vectorStore
)
index = GPTVectorStoreIndex.from_documents(
documents=documents,
storage_context=storageContext,
service_context=service_context
)
print(f"Done building !\n")
upsert_docs(input_dir="upsert_doc/docs", index_name="ruikang-guo-knowledge-base")
| [
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((796, 809), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (807, 809), False, 'from dotenv import load_dotenv\n'), ((827, 847), 'os.getenv', 'os.getenv', (['"""api_key"""'], {}), "('api_key')\n", (836, 847), False, 'import os\n'), ((926, 955), 'os.getenv', 'os.getenv', (['"""pinecone_api_key"""'], {}), "('pinecone_api_key')\n", (935, 955), False, 'import os\n'), ((1047, 1072), 'os.getenv', 'os.getenv', (['"""pinecone_env"""'], {}), "('pinecone_env')\n", (1056, 1072), False, 'import os\n'), ((1143, 1249), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINECONE_API_KEY']", 'environment': "os.environ['PINECONE_ENVIRONMENT']"}), "(api_key=os.environ['PINECONE_API_KEY'], environment=os.\n environ['PINECONE_ENVIRONMENT'])\n", (1156, 1249), False, 'import pinecone\n'), ((1351, 1420), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-ada-002', embed_batch_size=100)\n", (1366, 1420), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((1439, 1492), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (1467, 1492), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((1516, 1544), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (1531, 1544), False, 'from llama_index import download_loader\n'), ((1562, 1595), 'llama_index.download_loader', 'download_loader', (['"""MarkdownReader"""'], {}), "('MarkdownReader')\n", (1577, 1595), False, 'from llama_index import download_loader\n'), ((2081, 2107), 'pinecone.Index', 'pinecone.Index', (['index_name'], {}), '(index_name)\n', (2095, 2107), False, 'import pinecone\n'), ((2127, 2176), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pineconeIndex'}), '(pinecone_index=pineconeIndex)\n', (2146, 2176), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((2249, 2303), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vectorStore'}), '(vector_store=vectorStore)\n', (2277, 2303), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((2331, 2456), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', ([], {'documents': 'documents', 'storage_context': 'storageContext', 'service_context': 'service_context'}), '(documents=documents, storage_context=\n storageContext, service_context=service_context)\n', (2365, 2456), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((1882, 1905), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (1903, 1905), False, 'import pinecone\n'), ((1915, 1986), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'index_name', 'dimension': '(1536)', 'metric': '"""cosine"""'}), "(name=index_name, dimension=1536, metric='cosine')\n", (1936, 1986), False, 'import pinecone\n'), ((1745, 1787), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'input_dir'}), '(input_dir=input_dir)\n', (1766, 1787), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n')] |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/loader_hub/file/cjk_pdf/base.py
staticPath = "static"
class CJKPDFReader(BaseReader):
"""CJK PDF reader.
Extract text from PDF including CJK (Chinese, Japanese and Korean) languages using pdfminer.six.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, filepath: Path, filename) -> List[Document]:
"""Parse file."""
# Import pdfminer
from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
# Create a resource manager
rsrcmgr = PDFResourceManager()
# Create an object to store the text
retstr = StringIO()
# Create a text converter
codec = "utf-8"
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
# Create a PDF interpreter
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Open the PDF file
fp = open(filepath, "rb")
# Create a list to store the text of each page
document_list = []
# Extract text from each page
for i, page in enumerate(PDFPage.get_pages(fp)):
interpreter.process_page(page)
# Get the text
text = retstr.getvalue()
sentence_splitter = SentenceSplitter(chunk_size=400)
text_chunks = sentence_splitter.split_text(text)
document_list += [
Document(t, extra_info={"page_no": i + 1}) for t in text_chunks
]
# Clear the text
retstr.truncate(0)
retstr.seek(0)
# Close the file
fp.close()
# Close the device
device.close()
shutil.copy2(filepath, f"{staticPath}/file/{filename}")
return document_list
| [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (1281, 1283), False, 'from pdfminer.layout import LAParams\n'), ((1301, 1363), 'pdfminer.converter.TextConverter', 'TextConverter', (['rsrcmgr', 'retstr'], {'codec': 'codec', 'laparams': 'laparams'}), '(rsrcmgr, retstr, codec=codec, laparams=laparams)\n', (1314, 1363), False, 'from pdfminer.converter import TextConverter\n'), ((1421, 1456), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (1439, 1456), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((2247, 2302), 'shutil.copy2', 'shutil.copy2', (['filepath', 'f"""{staticPath}/file/{filename}"""'], {}), "(filepath, f'{staticPath}/file/{filename}')\n", (2259, 2302), False, 'import shutil\n'), ((1672, 1693), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['fp'], {}), '(fp)\n', (1689, 1693), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1837, 1869), 'llama_index.langchain_helpers.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(400)'}), '(chunk_size=400)\n', (1853, 1869), False, 'from llama_index.langchain_helpers.text_splitter import SentenceSplitter\n'), ((1979, 2021), 'llama_index.readers.schema.base.Document', 'Document', (['t'], {'extra_info': "{'page_no': i + 1}"}), "(t, extra_info={'page_no': i + 1})\n", (1987, 2021), False, 'from llama_index.readers.schema.base import Document\n')] |
import chromadb
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.indices.service_context import ServiceContext
class Vector:
def __init__(self, doc_location):
self.client = chromadb.Client()
self.doc_location = doc_location
self.collection = self.client.create_collection("papers")
self.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
def process_document(self):
"""
Process the document by performing the following steps:
1. Read the document.
2. Set up ChromaVectorStore and load in data.
3. Create a VectorStoreIndex from the documents using the specified storage context, embed model, and service context.
"""
service_context = ServiceContext.from_defaults(chunk_size=100, chunk_overlap=10)
documents = SimpleDirectoryReader(self.doc_location).load_data()
vector_store = ChromaVectorStore(chroma_collection=self.collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
self.index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=self.embed_model, service_context=service_context
)
def query_document(self, query):
query_engine = self.index.as_query_engine()
response = query_engine.query(query)
return response | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.indices.service_context.ServiceContext.from_defaults",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader",
"llama_index.vector_stores.chroma.ChromaVectorStore"
] | [((403, 420), 'chromadb.Client', 'chromadb.Client', ([], {}), '()\n', (418, 420), False, 'import chromadb\n'), ((555, 611), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-base-en-v1.5"""'}), "(model_name='BAAI/bge-base-en-v1.5')\n", (575, 611), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((1002, 1064), 'llama_index.core.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(100)', 'chunk_overlap': '(10)'}), '(chunk_size=100, chunk_overlap=10)\n', (1030, 1064), False, 'from llama_index.core.indices.service_context import ServiceContext\n'), ((1170, 1222), 'llama_index.vector_stores.chroma.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'self.collection'}), '(chroma_collection=self.collection)\n', (1187, 1222), False, 'from llama_index.vector_stores.chroma import ChromaVectorStore\n'), ((1253, 1308), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1281, 1308), False, 'from llama_index.core import StorageContext\n'), ((1334, 1476), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'embed_model': 'self.embed_model', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n embed_model=self.embed_model, service_context=service_context)\n', (1365, 1476), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n'), ((1089, 1129), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['self.doc_location'], {}), '(self.doc_location)\n', (1110, 1129), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n')] |
from llama_index.llms import OpenAI
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
import os
documents = SimpleDirectoryReader("./competition").load_data()
os.environ['OPENAI_API_KEY'] = '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'
embed_model = HuggingFaceEmbedding(model_name='BAAI/bge-large-en-v1.5')
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo")
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model, chunk_size=800, chunk_overlap=20)
index = VectorStoreIndex.from_documents(documents, service_context=service_context, show_progress=True)
index.storage_context.persist()
query_engine = index.as_query_engine(similarity_top_k=2, response_mode='tree_summarize')
# response = query_engine.query(
# "what are the benefits that I can have regarding risk management and portfolio monitoring? What are the charges?"
# )
def answer(question):
return query_engine.query(question)
if __name__ == "__main__":
while True:
question = input("Ask a question: ")
print(answer(question)) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((346, 403), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-large-en-v1.5"""'}), "(model_name='BAAI/bge-large-en-v1.5')\n", (366, 403), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((411, 457), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0.1, model='gpt-3.5-turbo')\n", (417, 457), False, 'from llama_index.llms import OpenAI\n'), ((476, 577), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'chunk_size': '(800)', 'chunk_overlap': '(20)'}), '(llm=llm, embed_model=embed_model, chunk_size=\n 800, chunk_overlap=20)\n', (504, 577), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((581, 680), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'show_progress': '(True)'}), '(documents, service_context=service_context,\n show_progress=True)\n', (612, 680), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((195, 233), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./competition"""'], {}), "('./competition')\n", (216, 233), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n')] |
import os
import openai
import tiktoken
import logging
import sys
from dotenv import load_dotenv
from threading import Lock
from llama_index import (SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
StorageContext,
load_index_from_storage,
set_global_service_context,
download_loader)
from llama_hub.github_repo import GithubRepositoryReader, GithubClient
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.llms import OpenAI
from base_prompt import CHAT_TEXT_QA_PROMPT, CHAT_REFINE_PROMPT
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index.query_engine.router_query_engine import RouterQueryEngine
from llama_index.selectors.pydantic_selectors import PydanticSingleSelector
from llama_index.tools.query_engine import QueryEngineTool
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
GITHUB_API_KEY = os.environ["GITHUB_API_KEY"]
thread_lock = Lock()
# setup token counter
token_counter = TokenCountingHandler(tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode)
callback_manager = CallbackManager([token_counter])
# define LLM
llm = OpenAI(temperature=0, model="gpt-3.5-turbo", streaming=False, max_tokens=1000)
service_context = ServiceContext.from_defaults(llm=llm, callback_manager=callback_manager, embed_model="local")
set_global_service_context(service_context)
# builds new index from our data folder and GitHub repos
def build_index():
global service_context
general_documents = []
code_documents = []
# load directory documents
directory_document = SimpleDirectoryReader('./data', recursive=True).load_data()
general_documents += directory_document
# load github documents
download_loader("GithubRepositoryReader")
github_client = GithubClient(GITHUB_API_KEY)
owner = "pyth-network"
repos = ["pyth-client-py", "pyth-client-js", "pyth-sdk-solidity", "pyth-sdk-rs", "documentation", "pyth-crosschain"]
branch = "main"
# build documents out of all repos
for repo in repos:
if repo == "documentation":
loader = GithubRepositoryReader(github_client,
owner=owner,
repo=repo,
filter_directories=(["images"], GithubRepositoryReader.FilterType.EXCLUDE),
verbose=False,
concurrent_requests=10,
)
document = loader.load_data(branch=branch)
general_documents += document
else:
loader = GithubRepositoryReader(github_client,
owner=owner,
repo=repo,
filter_directories=(["images"], GithubRepositoryReader.FilterType.EXCLUDE),
verbose=False,
concurrent_requests=10,
)
document = loader.load_data(branch=branch)
code_documents += document
# build index and store it
general_index = VectorStoreIndex.from_documents(general_documents, service_context=service_context, show_progress=True)
general_index.storage_context.persist(persist_dir="./storage/general_index")
code_index = VectorStoreIndex.from_documents(code_documents, service_context=service_context, show_progress=True)
code_index.storage_context.persist(persist_dir="./storage/code_index")
# used to add documents to existing stored index
def add_to_index():
download_loader("GithubRepositoryReader")
github_client = GithubClient(GITHUB_API_KEY)
owner = "pyth-network"
repos = ["pyth-serum", "publisher-utils", "solmeet-workshop-june-22", "oracle-sandbox", "pyth-sdk-js",
"program-authority-escrow", "pyth-observer", "audit-reports", "example-publisher", "pyth-agent",
"program-admin", "pyth-client", "pythnet", "governance"]
branch = "main"
combined_documents = []
for repo in repos:
loader = GithubRepositoryReader(github_client, owner=owner, repo=repo, filter_directories=(["images"], GithubRepositoryReader.FilterType.EXCLUDE), verbose=False, concurrent_requests=10)
if repo == "governance":
document = loader.load_data(branch="master")
elif repo == "pythnet":
document = loader.load_data(branch="pyth")
else:
document = loader.load_data(branch=branch)
combined_documents += document
storage_context = StorageContext.from_defaults(persist_dir="./storage")
index = load_index_from_storage(storage_context)
for doc in combined_documents:
index.insert(doc)
index.storage_context.persist()
def pyth_gpt(message):
global service_context
with thread_lock:
# rebuild storage context
general_storage_context = StorageContext.from_defaults(persist_dir="./storage/general_index")
code_storage_context = StorageContext.from_defaults(persist_dir="./storage/code_index")
# load index
general_index = load_index_from_storage(general_storage_context, service_context=service_context)
code_index = load_index_from_storage(code_storage_context, service_context=service_context)
rerank = SentenceTransformerRerank(model="cross-encoder/ms-marco-MiniLM-L-2-v2", top_n=3)
# query the index
general_query_engine = general_index.as_query_engine(text_qa_template=CHAT_TEXT_QA_PROMPT,
refine_template=CHAT_REFINE_PROMPT,
similarity_top_k=10,
streaming=False,
service_context=service_context,
node_postprocessors=[rerank])
code_query_engine = code_index.as_query_engine(text_qa_template=CHAT_TEXT_QA_PROMPT,
refine_template=CHAT_REFINE_PROMPT,
similarity_top_k=10,
streaming=False,
service_context=service_context,
node_postprocessors=[rerank])
general_vector_tool = QueryEngineTool.from_defaults(query_engine=general_query_engine, description="Useful for retrieving general context related to the data source", )
code_vector_tool = QueryEngineTool.from_defaults(query_engine=code_query_engine, description="Useful specifically for coding questions related to the data source ", )
query_engine = RouterQueryEngine(selector=PydanticSingleSelector.from_defaults(),
query_engine_tools=[general_vector_tool, code_vector_tool])
# enter your prompt
response = query_engine.query(message)
# token counter
print('Embedding Tokens: ', token_counter.total_embedding_token_count, '\n',
'LLM Prompt Tokens: ', token_counter.prompt_llm_token_count, '\n',
'LLM Completion Tokens: ', token_counter.completion_llm_token_count, '\n',
'Total LLM Token Count: ', token_counter.total_llm_token_count, '\n')
token_counter.reset_counts()
return str(response)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.selectors.pydantic_selectors.PydanticSingleSelector.from_defaults",
"llama_index.download_loader",
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.set_global_service_context",
"llama_index.load_index_from_storage",
"llama_index.callbacks.CallbackManager"
] | [((991, 1049), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1010, 1049), False, 'import logging\n'), ((1162, 1175), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1173, 1175), False, 'from dotenv import load_dotenv\n'), ((1285, 1291), 'threading.Lock', 'Lock', ([], {}), '()\n', (1289, 1291), False, 'from threading import Lock\n'), ((1438, 1470), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (1453, 1470), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((1492, 1570), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo"""', 'streaming': '(False)', 'max_tokens': '(1000)'}), "(temperature=0, model='gpt-3.5-turbo', streaming=False, max_tokens=1000)\n", (1498, 1570), False, 'from llama_index.llms import OpenAI\n'), ((1590, 1687), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'callback_manager': 'callback_manager', 'embed_model': '"""local"""'}), "(llm=llm, callback_manager=callback_manager,\n embed_model='local')\n", (1618, 1687), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((1685, 1728), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1711, 1728), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((1051, 1070), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1068, 1070), False, 'import logging\n'), ((1117, 1157), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (1138, 1157), False, 'import logging\n'), ((2091, 2132), 'llama_index.download_loader', 'download_loader', (['"""GithubRepositoryReader"""'], {}), "('GithubRepositoryReader')\n", (2106, 2132), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((2154, 2182), 'llama_hub.github_repo.GithubClient', 'GithubClient', (['GITHUB_API_KEY'], {}), '(GITHUB_API_KEY)\n', (2166, 2182), False, 'from llama_hub.github_repo import GithubRepositoryReader, GithubClient\n'), ((3660, 3768), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['general_documents'], {'service_context': 'service_context', 'show_progress': '(True)'}), '(general_documents, service_context=\n service_context, show_progress=True)\n', (3691, 3768), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((3866, 3971), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['code_documents'], {'service_context': 'service_context', 'show_progress': '(True)'}), '(code_documents, service_context=\n service_context, show_progress=True)\n', (3897, 3971), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((4123, 4164), 'llama_index.download_loader', 'download_loader', (['"""GithubRepositoryReader"""'], {}), "('GithubRepositoryReader')\n", (4138, 4164), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((4186, 4214), 'llama_hub.github_repo.GithubClient', 'GithubClient', (['GITHUB_API_KEY'], {}), '(GITHUB_API_KEY)\n', (4198, 4214), False, 'from llama_hub.github_repo import GithubRepositoryReader, GithubClient\n'), ((5121, 5174), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (5149, 5174), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((5188, 5228), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (5211, 5228), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((1086, 1105), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1103, 1105), False, 'import logging\n'), ((4627, 4812), 'llama_hub.github_repo.GithubRepositoryReader', 'GithubRepositoryReader', (['github_client'], {'owner': 'owner', 'repo': 'repo', 'filter_directories': "(['images'], GithubRepositoryReader.FilterType.EXCLUDE)", 'verbose': '(False)', 'concurrent_requests': '(10)'}), "(github_client, owner=owner, repo=repo,\n filter_directories=(['images'], GithubRepositoryReader.FilterType.\n EXCLUDE), verbose=False, concurrent_requests=10)\n", (4649, 4812), False, 'from llama_hub.github_repo import GithubRepositoryReader, GithubClient\n'), ((5480, 5547), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage/general_index"""'}), "(persist_dir='./storage/general_index')\n", (5508, 5547), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((5580, 5644), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage/code_index"""'}), "(persist_dir='./storage/code_index')\n", (5608, 5644), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((5692, 5778), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['general_storage_context'], {'service_context': 'service_context'}), '(general_storage_context, service_context=\n service_context)\n', (5715, 5778), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((5796, 5874), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['code_storage_context'], {'service_context': 'service_context'}), '(code_storage_context, service_context=service_context)\n', (5819, 5874), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((5895, 5980), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'model': '"""cross-encoder/ms-marco-MiniLM-L-2-v2"""', 'top_n': '(3)'}), "(model='cross-encoder/ms-marco-MiniLM-L-2-v2', top_n=3\n )\n", (5920, 5980), False, 'from llama_index.indices.postprocessor import SentenceTransformerRerank\n'), ((7096, 7249), 'llama_index.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'general_query_engine', 'description': '"""Useful for retrieving general context related to the data source"""'}), "(query_engine=general_query_engine,\n description=\n 'Useful for retrieving general context related to the data source')\n", (7125, 7249), False, 'from llama_index.tools.query_engine import QueryEngineTool\n'), ((7271, 7421), 'llama_index.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'code_query_engine', 'description': '"""Useful specifically for coding questions related to the data source """'}), "(query_engine=code_query_engine, description=\n 'Useful specifically for coding questions related to the data source ')\n", (7300, 7421), False, 'from llama_index.tools.query_engine import QueryEngineTool\n'), ((1365, 1409), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (1392, 1409), False, 'import tiktoken\n'), ((1952, 1999), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data"""'], {'recursive': '(True)'}), "('./data', recursive=True)\n", (1973, 1999), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context, download_loader\n'), ((2477, 2662), 'llama_hub.github_repo.GithubRepositoryReader', 'GithubRepositoryReader', (['github_client'], {'owner': 'owner', 'repo': 'repo', 'filter_directories': "(['images'], GithubRepositoryReader.FilterType.EXCLUDE)", 'verbose': '(False)', 'concurrent_requests': '(10)'}), "(github_client, owner=owner, repo=repo,\n filter_directories=(['images'], GithubRepositoryReader.FilterType.\n EXCLUDE), verbose=False, concurrent_requests=10)\n", (2499, 2662), False, 'from llama_hub.github_repo import GithubRepositoryReader, GithubClient\n'), ((3062, 3247), 'llama_hub.github_repo.GithubRepositoryReader', 'GithubRepositoryReader', (['github_client'], {'owner': 'owner', 'repo': 'repo', 'filter_directories': "(['images'], GithubRepositoryReader.FilterType.EXCLUDE)", 'verbose': '(False)', 'concurrent_requests': '(10)'}), "(github_client, owner=owner, repo=repo,\n filter_directories=(['images'], GithubRepositoryReader.FilterType.\n EXCLUDE), verbose=False, concurrent_requests=10)\n", (3084, 3247), False, 'from llama_hub.github_repo import GithubRepositoryReader, GithubClient\n'), ((7472, 7510), 'llama_index.selectors.pydantic_selectors.PydanticSingleSelector.from_defaults', 'PydanticSingleSelector.from_defaults', ([], {}), '()\n', (7508, 7510), False, 'from llama_index.selectors.pydantic_selectors import PydanticSingleSelector\n')] |
from dotenv import load_dotenv
import os
load_dotenv()
import pinecone
from llama_index import (
download_loader,
SimpleDirectoryReader,
ServiceContext,
StorageContext,
VectorStoreIndex
)
from llama_index.llms import OpenAI
from llama_index.vector_stores import PineconeVectorStore
from llama_index.callbacks import LlamaDebugHandler, CallbackManager
from llama_index.postprocessor import SentenceEmbeddingOptimizer
import streamlit as st
#Add callbacks to the ServiceContext
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
callback_manager= CallbackManager(handlers=[llama_debug])
service_context = ServiceContext.from_defaults(callback_manager=callback_manager)
#To run in Terminal, run in the terminal %streamlit run main.py to check
print("***Streamlit LlamaIndex Documentation Helper")
@st.cache_resource(show_spinner=False)
def get_index()-> VectorStoreIndex:
pinecone.init(api_key=os.environ["PINECONE_API_KEY"],
environment=os.environ["PINECONE_ENV"])
pinecone_index = pinecone.Index(index_name="llamaindex-documentation-helper")
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
return VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
index = get_index()
st.set_page_config(page_title="Chat with LlamaIndex docs powered by llamaIndex ",
page_icon="🦙",
layout = "centered",
initial_sidebar_state="auto",
menu_items=None
)
st.title("Chat with LlamaIndex docs 🦙")
if "chat_engine" not in st.session_state.keys():
postprocessor = SentenceEmbeddingOptimizer(
embed_model = service_context.embed_model,
percentile_cutoff = 0.5,
threshold_cutoff = 0.7
)
st.session_state.chat_engine = index.as_chat_engine(
chat_mode="react", verbose = True, node_postprocessors=[postprocessor]
)
if "messages" not in st.session_state.keys():
st.session_state.messages=[
{
"role": "assistant",
"content": "Ask me a question about LlamaIndex's open source python library."
}
]
if prompt := st.chat_input("Your question"):
st.session_state.messages.append({
"role": "user",
"content": prompt
})
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(message=prompt)
st.write(response.response)
nodes = [node for node in response.source_nodes]
for col, node, i in zip(st.columns(len(nodes)), nodes, range(len(nodes))):
with col:
st.header(f"Source Node {i+1}: score = {node.score}")
st.write(node.text)
message = {
"role": "assistant",
"content":response.response
}
st.session_state.messages.append(message)
| [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.postprocessor.SentenceEmbeddingOptimizer",
"llama_index.callbacks.CallbackManager"
] | [((41, 54), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (52, 54), False, 'from dotenv import load_dotenv\n'), ((518, 560), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (535, 560), False, 'from llama_index.callbacks import LlamaDebugHandler, CallbackManager\n'), ((579, 618), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {'handlers': '[llama_debug]'}), '(handlers=[llama_debug])\n', (594, 618), False, 'from llama_index.callbacks import LlamaDebugHandler, CallbackManager\n'), ((637, 700), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'callback_manager': 'callback_manager'}), '(callback_manager=callback_manager)\n', (665, 700), False, 'from llama_index import download_loader, SimpleDirectoryReader, ServiceContext, StorageContext, VectorStoreIndex\n'), ((834, 871), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (851, 871), True, 'import streamlit as st\n'), ((1335, 1506), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with LlamaIndex docs powered by llamaIndex """', 'page_icon': '"""🦙"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=\n 'Chat with LlamaIndex docs powered by llamaIndex ', page_icon='🦙',\n layout='centered', initial_sidebar_state='auto', menu_items=None)\n", (1353, 1506), True, 'import streamlit as st\n'), ((1616, 1655), 'streamlit.title', 'st.title', (['"""Chat with LlamaIndex docs 🦙"""'], {}), "('Chat with LlamaIndex docs 🦙')\n", (1624, 1655), True, 'import streamlit as st\n'), ((921, 1019), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINECONE_API_KEY']", 'environment': "os.environ['PINECONE_ENV']"}), "(api_key=os.environ['PINECONE_API_KEY'], environment=os.\n environ['PINECONE_ENV'])\n", (934, 1019), False, 'import pinecone\n'), ((1053, 1113), 'pinecone.Index', 'pinecone.Index', ([], {'index_name': '"""llamaindex-documentation-helper"""'}), "(index_name='llamaindex-documentation-helper')\n", (1067, 1113), False, 'import pinecone\n'), ((1133, 1183), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pinecone_index'}), '(pinecone_index=pinecone_index)\n', (1152, 1183), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((1209, 1307), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (1243, 1307), False, 'from llama_index import download_loader, SimpleDirectoryReader, ServiceContext, StorageContext, VectorStoreIndex\n'), ((1681, 1704), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1702, 1704), True, 'import streamlit as st\n'), ((1726, 1842), 'llama_index.postprocessor.SentenceEmbeddingOptimizer', 'SentenceEmbeddingOptimizer', ([], {'embed_model': 'service_context.embed_model', 'percentile_cutoff': '(0.5)', 'threshold_cutoff': '(0.7)'}), '(embed_model=service_context.embed_model,\n percentile_cutoff=0.5, threshold_cutoff=0.7)\n', (1752, 1842), False, 'from llama_index.postprocessor import SentenceEmbeddingOptimizer\n'), ((2048, 2071), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2069, 2071), True, 'import streamlit as st\n'), ((2269, 2299), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (2282, 2299), True, 'import streamlit as st\n'), ((2305, 2374), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (2337, 2374), True, 'import streamlit as st\n'), ((2451, 2483), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2466, 2483), True, 'import streamlit as st\n'), ((2494, 2522), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2502, 2522), True, 'import streamlit as st\n'), ((2599, 2627), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2614, 2627), True, 'import streamlit as st\n'), ((2643, 2668), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2653, 2668), True, 'import streamlit as st\n'), ((2693, 2742), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', ([], {'message': 'prompt'}), '(message=prompt)\n', (2726, 2742), True, 'import streamlit as st\n'), ((2756, 2783), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2764, 2783), True, 'import streamlit as st\n'), ((3212, 3253), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (3244, 3253), True, 'import streamlit as st\n'), ((2979, 3034), 'streamlit.header', 'st.header', (['f"""Source Node {i + 1}: score = {node.score}"""'], {}), "(f'Source Node {i + 1}: score = {node.score}')\n", (2988, 3034), True, 'import streamlit as st\n'), ((3053, 3072), 'streamlit.write', 'st.write', (['node.text'], {}), '(node.text)\n', (3061, 3072), True, 'import streamlit as st\n')] |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(response) | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader"
] | [((134, 176), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (165, 176), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n'), ((82, 113), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data"""'], {}), "('./data')\n", (103, 113), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n')] |
from typing import List
from llama_index import Document, TwitterTweetReader
from social_gpt.ingestion.scraper.social_scraper import SocialScraper
class TwitterScraper(SocialScraper):
def scrape(self) -> List[Document]:
TwitterTweetReader() | [
"llama_index.TwitterTweetReader"
] | [((237, 257), 'llama_index.TwitterTweetReader', 'TwitterTweetReader', ([], {}), '()\n', (255, 257), False, 'from llama_index import Document, TwitterTweetReader\n')] |
import os
from typing import List
import googleapiclient
from dotenv import load_dotenv
from llama_index import Document
from progress.bar import IncrementalBar
from youtube_transcript_api import YouTubeTranscriptApi
from social_gpt.ingestion.scraper.social_scraper import SocialScraper
load_dotenv()
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
class YoutubeScraper(SocialScraper):
def scrape(self) -> List[Document]:
print(f"scraping youtube channel ${self.username}")
return self.get_channel_video_docs()
@staticmethod
def get_transcript(video_id):
try:
transcript = YouTubeTranscriptApi.get_transcript(video_id)
return " ".join(list(map(lambda trans: trans['text'], transcript)))
except Exception:
return None
def get_channel_video_docs(self) -> List[Document]:
youtube = googleapiclient.discovery.build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=os.getenv('YOUTUBE_DEVELOPER_KEY'))
request = youtube.search().list(
part="snippet",
channelId=self.username,
maxResults=200, # Change if needed
type="video"
)
response = request.execute()
transcripts = []
bar = IncrementalBar('Transcribing', max=len(response['items']))
for item in response['items']:
transcript = YoutubeScraper.get_transcript(item['id']['videoId'])
if transcript:
transcripts.append(transcript)
bar.next()
bar.finish()
return list(map(lambda transcript: Document(transcript), transcripts))
| [
"llama_index.Document"
] | [((290, 303), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (301, 303), False, 'from dotenv import load_dotenv\n'), ((644, 689), 'youtube_transcript_api.YouTubeTranscriptApi.get_transcript', 'YouTubeTranscriptApi.get_transcript', (['video_id'], {}), '(video_id)\n', (679, 689), False, 'from youtube_transcript_api import YouTubeTranscriptApi\n'), ((1037, 1071), 'os.getenv', 'os.getenv', (['"""YOUTUBE_DEVELOPER_KEY"""'], {}), "('YOUTUBE_DEVELOPER_KEY')\n", (1046, 1071), False, 'import os\n'), ((1678, 1698), 'llama_index.Document', 'Document', (['transcript'], {}), '(transcript)\n', (1686, 1698), False, 'from llama_index import Document\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import logging
import sys
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# read the document of data dir
documents = SimpleDirectoryReader("data").load_data()
# split the document to chunk, max token size=500, convert chunk to vector
index = GPTSimpleVectorIndex(documents)
# save index
index.save_to_disk("index.json") | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader"
] | [((153, 211), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (172, 211), False, 'import logging\n'), ((457, 488), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {}), '(documents)\n', (477, 488), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex\n'), ((243, 283), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (264, 283), False, 'import logging\n'), ((212, 231), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (229, 231), False, 'import logging\n'), ((330, 359), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (351, 359), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex\n')] |
from llama_index.readers import SimpleWebPageReader #as of version 0.9.13
from llama_index.readers import WikipediaReader
cities = [
"Los Angeles", "Houston", "Honolulu", "Tucson", "Mexico City",
"Cincinatti", "Chicago"
]
wiki_docs = []
for city in cities:
try:
doc = WikipediaReader().load_data(pages=[city])
wiki_docs.extend(doc)
except Exception as e:
print(f"Error loading page for city {city}: {e}")
| [
"llama_index.readers.WikipediaReader"
] | [((292, 309), 'llama_index.readers.WikipediaReader', 'WikipediaReader', ([], {}), '()\n', (307, 309), False, 'from llama_index.readers import WikipediaReader\n')] |
import dotenv
import os
from llama_index.readers.github import GithubRepositoryReader, GithubClient
from llama_index.core import (VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.openai import OpenAIEmbedding
def load_environ_vars():
dotenv.load_dotenv()
github_token = os.environ['GITHUB_TOKEN']
# open_api = os.environ['OPEN_API_KEY']
if github_token is None:
print("Add the GITHUB_TOKEN environment variable in the .env file")
exit()
"""if open_api is None:
print("Add the OPEN_API_KEY environment variable. Read instrucitons in the readme")
exit()"""
return github_token
def load_data(github_token: str, owner: str, repo: str):
github_client = GithubClient(github_token)
loader = GithubRepositoryReader(
github_client,
owner=owner,
repo=repo,
filter_file_extensions=(
[".py", ".ipynb", ".js", ".ts", ".md"],
GithubRepositoryReader.FilterType.INCLUDE,
),
verbose=False,
concurrent_requests=5,
)
docs = loader.load_data(branch="main")
return docs
def load_embedding_model():
embedding_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
print("embedding model loaded")
return embedding_model
def main():
github_token = load_environ_vars()
PERSIST_DIR = "./basic/storage"
choice = input("Enter 1 to use OPEN API enter 0 to use loally setup llama2 model using Ollama:")
if not os.path.exists(PERSIST_DIR):
owner = input("Enter the username of the owner of the repo: ")
repo = input("Enter the name of the repo: ")
documents = load_data(github_token, owner, repo)
try:
if choice == '1':
print("Open API is being used")
embedding_model = OpenAIEmbedding()
index = VectorStoreIndex.from_documents(documents)
else:
print("Ollama is being used")
embedding_model = load_embedding_model()
Settings.embed_model = embedding_model
index = VectorStoreIndex.from_documents(
documents,
embed_model=embedding_model
)
except Exception as e:
print(e)
exit()
print("Documents Indexed")
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
print("Already indexed data loaded")
llama = Ollama(model="llama2", request_timeout=200.0)
Settings.llm = llama
query_engine = index.as_query_engine(llm=llama)
qa_prompt_tmpl_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information above I want you to think step by step to answer the query in a crisp manner, incase case you don't know the answer say 'I don't know!'.\n"
"Query: {query_str}\n"
"Answer: "
)
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
query_engine.update_prompts({"response_synthesizer:text_qa_template": qa_prompt_tmpl})
print("Press ctr + c to exit")
while True:
query = input("Enter your query: ")
response = query_engine.query(query)
print(response)
if __name__ == "__main__":
main()
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.llms.ollama.Ollama",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.PromptTemplate",
"llama_index.readers.github.GithubRepositoryReader",
"llama_index.readers.github.GithubClient",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((415, 435), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (433, 435), False, 'import dotenv\n'), ((886, 912), 'llama_index.readers.github.GithubClient', 'GithubClient', (['github_token'], {}), '(github_token)\n', (898, 912), False, 'from llama_index.readers.github import GithubRepositoryReader, GithubClient\n'), ((931, 1150), 'llama_index.readers.github.GithubRepositoryReader', 'GithubRepositoryReader', (['github_client'], {'owner': 'owner', 'repo': 'repo', 'filter_file_extensions': "(['.py', '.ipynb', '.js', '.ts', '.md'], GithubRepositoryReader.FilterType.\n INCLUDE)", 'verbose': '(False)', 'concurrent_requests': '(5)'}), "(github_client, owner=owner, repo=repo,\n filter_file_extensions=(['.py', '.ipynb', '.js', '.ts', '.md'],\n GithubRepositoryReader.FilterType.INCLUDE), verbose=False,\n concurrent_requests=5)\n", (953, 1150), False, 'from llama_index.readers.github import GithubRepositoryReader, GithubClient\n'), ((1363, 1420), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-small-en-v1.5"""'}), "(model_name='BAAI/bge-small-en-v1.5')\n", (1383, 1420), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((2826, 2871), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': '"""llama2"""', 'request_timeout': '(200.0)'}), "(model='llama2', request_timeout=200.0)\n", (2832, 2871), False, 'from llama_index.llms.ollama import Ollama\n'), ((3405, 3439), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['qa_prompt_tmpl_str'], {}), '(qa_prompt_tmpl_str)\n', (3419, 3439), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n'), ((1699, 1726), 'os.path.exists', 'os.path.exists', (['PERSIST_DIR'], {}), '(PERSIST_DIR)\n', (1713, 1726), False, 'import os\n'), ((2649, 2702), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (2677, 2702), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n'), ((2719, 2759), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2742, 2759), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n'), ((2034, 2051), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (2049, 2051), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((2076, 2118), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2107, 2118), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n'), ((2336, 2407), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'embed_model': 'embedding_model'}), '(documents, embed_model=embedding_model)\n', (2367, 2407), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n')] |
import sys
import openai
import key
import llama_index
import wikipedia
from llama_index import (
VectorStoreIndex,
get_response_synthesizer,
Document,
SimpleDirectoryReader,
)
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.postprocessor import SimilarityPostprocessor
import langchain
import os
openai.api_key = key.API_KEY
def download_wikipedia_article(page_title):
try:
page = wikipedia.page(page_title)
return page.content
except wikipedia.exceptions.PageError as e:
return f"Page not found: {e}"
# Example usage
documents = download_wikipedia_article("Greek mythology")
print(documents[:500]) # Print first 500 characters to check
documents = SimpleDirectoryReader('./data').load_data()
# Create an index of your documents
index = VectorStoreIndex(documents)
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=10,
)
# configure response synthesizer
response_synthesizer = get_response_synthesizer()
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[SimilarityPostprocessor(similarity_cutoff=0.7)],
)
# query
response1 = query_engine.query("What is Greek mythology?")
print(response1)
response2 = query_engine.query('Who is Zues?')
print(response2)
response3 = query_engine.query("What group of individuals have derived inspiration from Greek Mythology?")
print(response3)
response4 = query_engine.query("Who was Hesiod to Homer?")
print(response4)
response5 = query_engine.query("Why has Greek mythology changed over time?")
print(response5)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.get_response_synthesizer",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.VectorStoreIndex",
"llama_index.postprocessor.SimilarityPostprocessor"
] | [((884, 911), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['documents'], {}), '(documents)\n', (900, 911), False, 'from llama_index import VectorStoreIndex, get_response_synthesizer, Document, SimpleDirectoryReader\n'), ((925, 979), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(10)'}), '(index=index, similarity_top_k=10)\n', (945, 979), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((1048, 1074), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (1072, 1074), False, 'from llama_index import VectorStoreIndex, get_response_synthesizer, Document, SimpleDirectoryReader\n'), ((497, 523), 'wikipedia.page', 'wikipedia.page', (['page_title'], {}), '(page_title)\n', (511, 523), False, 'import wikipedia\n'), ((794, 825), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data"""'], {}), "('./data')\n", (815, 825), False, 'from llama_index import VectorStoreIndex, get_response_synthesizer, Document, SimpleDirectoryReader\n'), ((1234, 1280), 'llama_index.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.7)'}), '(similarity_cutoff=0.7)\n', (1257, 1280), False, 'from llama_index.postprocessor import SimilarityPostprocessor\n')] |
import os
from dotenv import load_dotenv
load_dotenv()
import s3fs
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage
)
# load documents
documents = SimpleDirectoryReader('../../../examples/paul_graham_essay/data/').load_data()
print(len(documents))
index = VectorStoreIndex.from_documents(documents)
# set up s3fs
AWS_KEY = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET = os.environ['AWS_SECRET_ACCESS_KEY']
R2_ACCOUNT_ID = os.environ['R2_ACCOUNT_ID']
assert AWS_KEY is not None and AWS_KEY != ""
s3 = s3fs.S3FileSystem(
key=AWS_KEY,
secret=AWS_SECRET,
endpoint_url=f'https://{R2_ACCOUNT_ID}.r2.cloudflarestorage.com',
s3_additional_kwargs={'ACL': 'public-read'}
)
# save index to remote blob storage
index.set_index_id("vector_index")
# this is {bucket_name}/{index_name}
index.storage_context.persist('llama-index/storage_demo', fs=s3)
# load index from s3
sc = StorageContext.from_defaults(persist_dir='llama-index/storage_demo', fs=s3)
index2 = load_index_from_storage(sc, 'vector_index') | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((41, 54), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (52, 54), False, 'from dotenv import load_dotenv\n'), ((329, 371), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (360, 371), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((575, 744), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {'key': 'AWS_KEY', 'secret': 'AWS_SECRET', 'endpoint_url': 'f"""https://{R2_ACCOUNT_ID}.r2.cloudflarestorage.com"""', 's3_additional_kwargs': "{'ACL': 'public-read'}"}), "(key=AWS_KEY, secret=AWS_SECRET, endpoint_url=\n f'https://{R2_ACCOUNT_ID}.r2.cloudflarestorage.com',\n s3_additional_kwargs={'ACL': 'public-read'})\n", (592, 744), False, 'import s3fs\n'), ((951, 1026), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""llama-index/storage_demo"""', 'fs': 's3'}), "(persist_dir='llama-index/storage_demo', fs=s3)\n", (979, 1026), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((1036, 1079), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['sc', '"""vector_index"""'], {}), "(sc, 'vector_index')\n", (1059, 1079), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((220, 286), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""../../../examples/paul_graham_essay/data/"""'], {}), "('../../../examples/paul_graham_essay/data/')\n", (241, 286), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n')] |
import sys
import logging
import chromadb
import streamlit as st
from llama_index.llms import OpenAI
from llama_index import SimpleDirectoryReader, VectorStoreIndex
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index import ServiceContext
from llama_index.node_parser.file.markdown import MarkdownNodeParser
from llama_index.chat_engine.types import ChatMode
from MarkdownReader import MarkdownReader
from sources import sources, get_file_metadata, Source
logger = logging.getLogger()
# logger.setLevel(logging.DEBUG)
# stream_handler = logging.StreamHandler(stream=sys.stdout)
# stream_handler.setLevel(logging.DEBUG)
# file_handler = logging.FileHandler("logs.log")
# file_handler.setLevel(logging.DEBUG)
# logger.addHandler(file_handler)
# logger.addHandler(stream_handler)
def get_filename_metadata(source, filename):
metadata = {
"source": source.get("description", source.get("title")),
**source.get("file_metadata", get_file_metadata)(filename),
}
# print(filename, metadata)
return metadata
def get_all_metadata(source):
return lambda filename: get_filename_metadata(source, filename)
def get_documents(source):
"""return Document for given source(path, file_metadata)"""
reader = SimpleDirectoryReader(
input_dir=source.get("path"),
required_exts=[".md"],
recursive=True,
exclude=source.get("exclude", []),
file_extractor={".md": MarkdownReader(source.get("include_metas", []))},
file_metadata=get_all_metadata(source),
)
# use MarkdownReader
docs = reader.load_data()
return docs
def index_source(chroma_client, source: Source):
"""index given source in chromadb"""
docs = get_documents(source)
chroma_collection = None
try:
chroma_collection = chroma_client.get_collection(source.get("id"))
logger.info("==> Collection {} already exist\n\n".format(source.get("id")))
except ValueError:
nodes = node_parser.get_nodes_from_documents(docs, show_progress=True)
chroma_collection = chroma_client.create_collection(source.get("id"))
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# todo: show nodes content length
logger.info(
"index {} documents and {} nodes in {}".format(
len(docs), len(nodes), source.get("id")
)
)
index = VectorStoreIndex.from_documents(
docs,
storage_context=storage_context,
service_context=service_context,
show_progress=True,
)
logger.info(f"==> Loaded {len(docs)} docs\n\n")
if source.get("on_finish"):
source.get("on_finish", lambda a, b: None)(
docs, index
) # lambda for typings
finally:
if chroma_collection:
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(vector_store)
return index
def debug_source(index, source):
query_engine = index.as_query_engine()
for query in source.get("examples", []):
response = query_engine.query(query)
print("\n", source.get("id"), ":", query, "\n")
print(str(response))
# print((response.get_formatted_sources()))
# print((response.source_nodes))
print("\n-------------")
# @st.cache_resource(show_spinner=False)
def index_sources1(sources):
logger.info("Indexing sources...")
indices = []
for source in sources:
logger.info("Indexing {}".format(source.get("id")))
index = index_source(chroma_client, source)
# debug_source(index, source)
indices.append(index)
return list(zip(indices, sources))
def index_sources(sources):
logger.info("Indexing sources...")
docs = []
index_id = "all_docs"
chroma_collection = None
for source in sources:
sourceDocs = get_documents(source)
docs += sourceDocs
if source.get("additional_documents"):
docs += source.get("additional_documents")(sourceDocs)
try:
chroma_collection = chroma_client.get_collection(index_id)
logger.info(f"==> Collection {index_id} already exist\n\n")
except ValueError:
# nodes = node_parser.get_nodes_from_documents(docs, show_progress=True)
chroma_collection = chroma_client.create_collection(index_id)
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# todo: show nodes content length
logger.info("index {} documents in {}".format(len(docs), index_id))
index = VectorStoreIndex.from_documents(
docs,
storage_context=storage_context,
service_context=service_context,
show_progress=True,
)
logger.info(f"==> Loaded {len(docs)} docs\n\n")
# if source.get("on_finish"):
# source.get("on_finish", lambda a, b: None)(docs, index) # lambda for typings
finally:
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(
vector_store, service_context=service_context
)
return index
node_parser = MarkdownNodeParser.from_defaults()
chroma_client = chromadb.PersistentClient(path="./chroma_db")
# llm = OpenAI(
# model="gpt-3.5-turbo",
# temperature=0.0,
# )
# use OpenAI by default
service_context = ServiceContext.from_defaults(
chunk_size=512,
# embed_model=embed_model,
node_parser=node_parser,
# llm=llm,
# prompt_helper=
)
index = index_sources(sources)
if __name__ == "__main__":
# query
chat = index.as_chat_engine(
chat_mode=ChatMode.CONTEXT,
verbose=True,
similarity_top_k=5,
)
chat.chat_repl()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.file.markdown.MarkdownNodeParser.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((553, 572), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (570, 572), False, 'import logging\n'), ((5510, 5544), 'llama_index.node_parser.file.markdown.MarkdownNodeParser.from_defaults', 'MarkdownNodeParser.from_defaults', ([], {}), '()\n', (5542, 5544), False, 'from llama_index.node_parser.file.markdown import MarkdownNodeParser\n'), ((5562, 5607), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (5587, 5607), False, 'import chromadb\n'), ((5725, 5794), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(512)', 'node_parser': 'node_parser'}), '(chunk_size=512, node_parser=node_parser)\n', (5753, 5794), False, 'from llama_index import ServiceContext\n'), ((5302, 5356), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (5319, 5356), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((5373, 5459), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (5407, 5459), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((2220, 2274), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (2237, 2274), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((2301, 2356), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2329, 2356), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((2576, 2703), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context', 'show_progress': '(True)'}), '(docs, storage_context=storage_context,\n service_context=service_context, show_progress=True)\n', (2607, 2703), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((3041, 3095), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (3058, 3095), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((3116, 3164), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (3150, 3164), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((4627, 4681), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (4644, 4681), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((4708, 4763), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4736, 4763), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((4898, 5025), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context', 'show_progress': '(True)'}), '(docs, storage_context=storage_context,\n service_context=service_context, show_progress=True)\n', (4929, 5025), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n')] |
from flask_restx import Resource
from flask import request, render_template, Response
import openai
import os
import json
from llama_index import GPTSimpleVectorIndex
from llama_index import Document
from furl import furl
from PyPDF2 import PdfReader
os.environ["OPENAI_API_KEY"] = "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"
openai.organization = "org-Ddi6ZSgWKe8kPZlpwd6M6WVe"
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_domain(link):
print("link", link)
f = furl(link)
host = f.host
tld = host.split(".")
if len(tld) > 2:
return tld[1]
else:
return tld[0]
def get_title(title):
f = furl(title)
host = f.host
if host != "":
return host
else:
return title
class Upload(Resource):
def post(self):
data = {}
userid = data.get('userid', 'cibi')
print(request.files)
file = request.files['userfile']
file.save(userid + file.filename)
print(file)
reader = PdfReader(userid + file.filename)
data = ""
for page in reader.pages:
data += page.extract_text()
unique_doc = file.filename
file_name = str(hash(userid + unique_doc)) + ".txt"
#dict_obj = {"userid":userid,"pageTitle":pageTitle}
alreadyPresentList = []
userDataJson = {}
if os.path.exists("./userData.json"):
with open('./userData.json', 'r') as userDataJsonFile:
userDataJson = json.loads(userDataJsonFile.read())
if userid in userDataJson:
alreadyPresentList = userDataJson[userid]
if unique_doc not in alreadyPresentList:
alreadyPresentList.append(unique_doc)
else:
alreadyPresentList.append(unique_doc)
userDataJson[userid] = alreadyPresentList
print("New data : ", str(userDataJson))
userDataJsonFileWrite = open('./userData.json', "w")
userDataJsonFileWrite.write(json.dumps(userDataJson))
userDataJsonFileWrite.close()
with open(str(file_name), 'w') as fl:
fl.write(data)
llama_doc = Document(data, doc_id=userid + "<sep>" + unique_doc)
if os.path.exists("database.json"):
existing_index = GPTSimpleVectorIndex.load_from_disk('database.json')
existing_index.update(llama_doc)
existing_index.save_to_disk("database.json")
else:
index = GPTSimpleVectorIndex.from_documents(documents=[llama_doc])
index.update(llama_doc)
index.save_to_disk("database.json")
response = ""
return response, 200 | [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.Document"
] | [((407, 434), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (416, 434), False, 'import os\n'), ((491, 501), 'furl.furl', 'furl', (['link'], {}), '(link)\n', (495, 501), False, 'from furl import furl\n'), ((653, 664), 'furl.furl', 'furl', (['title'], {}), '(title)\n', (657, 664), False, 'from furl import furl\n'), ((1010, 1043), 'PyPDF2.PdfReader', 'PdfReader', (['(userid + file.filename)'], {}), '(userid + file.filename)\n', (1019, 1043), False, 'from PyPDF2 import PdfReader\n'), ((1360, 1393), 'os.path.exists', 'os.path.exists', (['"""./userData.json"""'], {}), "('./userData.json')\n", (1374, 1393), False, 'import os\n'), ((2173, 2225), 'llama_index.Document', 'Document', (['data'], {'doc_id': "(userid + '<sep>' + unique_doc)"}), "(data, doc_id=userid + '<sep>' + unique_doc)\n", (2181, 2225), False, 'from llama_index import Document\n'), ((2237, 2268), 'os.path.exists', 'os.path.exists', (['"""database.json"""'], {}), "('database.json')\n", (2251, 2268), False, 'import os\n'), ((2016, 2040), 'json.dumps', 'json.dumps', (['userDataJson'], {}), '(userDataJson)\n', (2026, 2040), False, 'import json\n'), ((2299, 2351), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""database.json"""'], {}), "('database.json')\n", (2334, 2351), False, 'from llama_index import GPTSimpleVectorIndex\n'), ((2488, 2546), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', ([], {'documents': '[llama_doc]'}), '(documents=[llama_doc])\n', (2523, 2546), False, 'from llama_index import GPTSimpleVectorIndex\n')] |
from __future__ import annotations
import os
import dataclasses
from typing import TYPE_CHECKING, ClassVar
import time
import httpx
from rich import print
from xiaogpt.bot.base_bot import BaseBot, ChatHistoryMixin
from xiaogpt.utils import split_sentences
if TYPE_CHECKING:
import openai
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.azure_openai import AzureOpenAI, AsyncAzureOpenAI
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import (
Settings,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
PromptTemplate,
SimpleDirectoryReader
)
@dataclasses.dataclass
class RagBot(ChatHistoryMixin, BaseBot):
name: ClassVar[str] = "RAG"
default_options: ClassVar[dict[str, str]] = {"model": "gpt4-1106-prevision"}
openai_key: str
api_base: str | None = None
proxy: str | None = None
history: list[tuple[str, str]] = dataclasses.field(default_factory=list, init=False)
def _make_query_engine(self, sess: httpx.AsyncClient, stream=False):
llm = AzureOpenAI(
engine="gpt4-1106-prevision",
api_key=self.openai_key,
azure_endpoint=self.api_base,
api_version="2023-12-01-preview",
)
embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="embedding-ada-002-v2",
api_key=self.openai_key,
azure_endpoint="http://192.168.12.232:8880",
api_version="2023-05-15",
)
Settings.embed_model = embed_model
Settings.llm = llm
# check if storage already exists
PERSIST_DIR = "xiaogpt/rag/storage"
if not os.path.exists(PERSIST_DIR):
# load the documents and create the index
documents = SimpleDirectoryReader("xiaogpt/rag/data").load_data()
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=PERSIST_DIR)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# set Logging to DEBUG for more detailed outputs
text_qa_template_str = (
"Context information is"
" below.\n---------------------\n{context_str}\n---------------------\nUsing"
" both the context information and also using your own knowledge, answer"
" the question with less that 100 words: {query_str}\nIf the context isn't helpful, you can also"
" answer the question on your own.\n"
)
text_qa_template = PromptTemplate(text_qa_template_str)
refine_template_str = (
"The original question is as follows: {query_str}\nWe have provided an"
" existing answer: {existing_answer}\nWe have the opportunity to refine"
" the existing answer (only if needed) with some more context"
" below.\n------------\n{context_msg}\n------------\nUsing both the new"
" context and your own knowledge, update existing answer with less than 100 words. \n"
)
refine_template = PromptTemplate(refine_template_str)
query_engine = index.as_query_engine(
text_qa_template=text_qa_template,
refine_template=refine_template,
llm=llm,
streaming=stream
)
return query_engine
@classmethod
def from_config(cls, config):
return cls(
openai_key=config.openai_key,
api_base=config.api_base,
proxy=config.proxy
)
async def ask(self, query, **options):
ms = self.get_messages()
ms.append({"role": "user", "content": f"{query}"})
kwargs = {**self.default_options, **options}
httpx_kwargs = {}
if self.proxy:
httpx_kwargs["proxies"] = self.proxy
async with httpx.AsyncClient(trust_env=True, **httpx_kwargs) as sess:
query_engine = self._make_query_engine(sess)
try:
completion = query_engine.query(query)
except Exception as e:
print(str(e))
return ""
message = completion.response
# print(completion.source_nodes[0].get_text())
self.add_message(query, message)
print(message)
return message
async def ask_stream(self, query, **options):
ms = self.get_messages()
ms.append({"role": "user", "content": f"{query}"})
kwargs = {**self.default_options, **options}
httpx_kwargs = {}
if self.proxy:
httpx_kwargs["proxies"] = self.proxy
async with httpx.AsyncClient(trust_env=True, **httpx_kwargs) as sess:
query_engine = self._make_query_engine(sess, stream=True)
try:
completion = query_engine.query(query)
except Exception as e:
print(str(e))
return
async def text_gen():
async for event in completion:
if not event.response:
continue
chunk_message = event.response
if chunk_message.response is None:
continue
print(chunk_message.response, end="")
yield chunk_message.response
message = ""
try:
async for sentence in split_sentences(text_gen()):
message += sentence
yield sentence
finally:
print()
self.add_message(query, message)
import functools
import dataclasses
from typing import Any, AsyncIterator, Literal, Optional
@dataclasses.dataclass
class Config:
openai_key: str = "voxelcloud"
proxy: str | None = None
api_base: str = "http://192.168.12.232:8881"
stream: bool = False
bot: str = "chatgptapi"
gpt_options: dict[str, Any] = dataclasses.field(default_factory=dict)
import asyncio
async def main():
config = Config() # 假设 Config 类已经定义并可以接受默认参数
bot = RagBot.from_config(config)
# 询问问题
response = await bot.ask("什么是光疗?")
print(response)
# 运行异步 main 函数
if __name__ == "__main__":
asyncio.run(main())
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.PromptTemplate",
"llama_index.llms.azure_openai.AzureOpenAI",
"llama_index.core.SimpleDirectoryReader"
] | [((1030, 1081), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list', 'init': '(False)'}), '(default_factory=list, init=False)\n', (1047, 1081), False, 'import dataclasses\n'), ((6247, 6286), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (6264, 6286), False, 'import dataclasses\n'), ((6470, 6485), 'rich.print', 'print', (['response'], {}), '(response)\n', (6475, 6485), False, 'from rich import print\n'), ((1172, 1306), 'llama_index.llms.azure_openai.AzureOpenAI', 'AzureOpenAI', ([], {'engine': '"""gpt4-1106-prevision"""', 'api_key': 'self.openai_key', 'azure_endpoint': 'self.api_base', 'api_version': '"""2023-12-01-preview"""'}), "(engine='gpt4-1106-prevision', api_key=self.openai_key,\n azure_endpoint=self.api_base, api_version='2023-12-01-preview')\n", (1183, 1306), False, 'from llama_index.llms.azure_openai import AzureOpenAI, AsyncAzureOpenAI\n'), ((1384, 1582), 'llama_index.embeddings.azure_openai.AzureOpenAIEmbedding', 'AzureOpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'deployment_name': '"""embedding-ada-002-v2"""', 'api_key': 'self.openai_key', 'azure_endpoint': '"""http://192.168.12.232:8880"""', 'api_version': '"""2023-05-15"""'}), "(model='text-embedding-ada-002', deployment_name=\n 'embedding-ada-002-v2', api_key=self.openai_key, azure_endpoint=\n 'http://192.168.12.232:8880', api_version='2023-05-15')\n", (1404, 1582), False, 'from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n'), ((2838, 2874), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['text_qa_template_str'], {}), '(text_qa_template_str)\n', (2852, 2874), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((3372, 3407), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['refine_template_str'], {}), '(refine_template_str)\n', (3386, 3407), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((1815, 1842), 'os.path.exists', 'os.path.exists', (['PERSIST_DIR'], {}), '(PERSIST_DIR)\n', (1829, 1842), False, 'import os\n'), ((1996, 2038), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2027, 2038), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((2221, 2274), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (2249, 2274), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((2295, 2335), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2318, 2335), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((4149, 4198), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {'trust_env': '(True)'}), '(trust_env=True, **httpx_kwargs)\n', (4166, 4198), False, 'import httpx\n'), ((4586, 4600), 'rich.print', 'print', (['message'], {}), '(message)\n', (4591, 4600), False, 'from rich import print\n'), ((4941, 4990), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {'trust_env': '(True)'}), '(trust_env=True, **httpx_kwargs)\n', (4958, 4990), False, 'import httpx\n'), ((5856, 5863), 'rich.print', 'print', ([], {}), '()\n', (5861, 5863), False, 'from rich import print\n'), ((1922, 1963), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""xiaogpt/rag/data"""'], {}), "('xiaogpt/rag/data')\n", (1943, 1963), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((5547, 5584), 'rich.print', 'print', (['chunk_message.response'], {'end': '""""""'}), "(chunk_message.response, end='')\n", (5552, 5584), False, 'from rich import print\n')] |
"""
This file contains functions for loading data into MemGPT's archival storage.
Data can be loaded with the following command, once a load function is defined:
```
memgpt load <data-connector-type> --name <dataset-name> [ADDITIONAL ARGS]
```
"""
from typing import List
from tqdm import tqdm
import typer
from memgpt.embeddings import embedding_model
from memgpt.connectors.storage import StorageConnector, Passage
from memgpt.config import MemGPTConfig
from llama_index import (
VectorStoreIndex,
ServiceContext,
StorageContext,
load_index_from_storage,
)
app = typer.Typer()
def store_docs(name, docs, show_progress=True):
"""Common function for embedding and storing documents"""
storage = StorageConnector.get_storage_connector(name=name)
config = MemGPTConfig.load()
embed_model = embedding_model()
# use llama index to run embeddings code
service_context = ServiceContext.from_defaults(llm=None, embed_model=embed_model, chunk_size=config.embedding_chunk_size)
index = VectorStoreIndex.from_documents(docs, service_context=service_context, show_progress=True)
embed_dict = index._vector_store._data.embedding_dict
node_dict = index._docstore.docs
# gather passages
passages = []
for node_id, node in tqdm(node_dict.items()):
vector = embed_dict[node_id]
node.embedding = vector
text = node.text.replace("\x00", "\uFFFD") # hacky fix for error on null characters
assert (
len(node.embedding) == config.embedding_dim
), f"Expected embedding dimension {config.embedding_dim}, got {len(node.embedding)}: {node.embedding}"
passages.append(Passage(text=text, embedding=vector))
# insert into storage
storage.insert_many(passages)
storage.save()
@app.command("index")
def load_index(
name: str = typer.Option(help="Name of dataset to load."), dir: str = typer.Option(help="Path to directory containing index.")
):
"""Load a LlamaIndex saved VectorIndex into MemGPT"""
# load index data
storage_context = StorageContext.from_defaults(persist_dir=dir)
loaded_index = load_index_from_storage(storage_context)
# hacky code to extract out passages/embeddings (thanks a lot, llama index)
embed_dict = loaded_index._vector_store._data.embedding_dict
node_dict = loaded_index._docstore.docs
passages = []
for node_id, node in node_dict.items():
vector = embed_dict[node_id]
node.embedding = vector
passages.append(Passage(text=node.text, embedding=vector))
# create storage connector
storage = StorageConnector.get_storage_connector(name=name)
# add and save all passages
storage.insert_many(passages)
storage.save()
@app.command("directory")
def load_directory(
name: str = typer.Option(help="Name of dataset to load."),
input_dir: str = typer.Option(None, help="Path to directory containing dataset."),
input_files: List[str] = typer.Option(None, help="List of paths to files containing dataset."),
recursive: bool = typer.Option(False, help="Recursively search for files in directory."),
):
from llama_index import SimpleDirectoryReader
if recursive:
assert input_dir is not None, "Must provide input directory if recursive is True."
if input_dir is not None:
reader = SimpleDirectoryReader(
input_dir=input_dir,
recursive=recursive,
)
else:
reader = SimpleDirectoryReader(input_files=input_files)
# load docs
print("loading data")
docs = reader.load_data()
print("done loading data")
store_docs(name, docs)
@app.command("webpage")
def load_webpage(
name: str = typer.Option(help="Name of dataset to load."),
urls: List[str] = typer.Option(None, help="List of urls to load."),
):
from llama_index import SimpleWebPageReader
docs = SimpleWebPageReader(html_to_text=True).load_data(urls)
store_docs(name, docs)
@app.command("database")
def load_database(
name: str = typer.Option(help="Name of dataset to load."),
query: str = typer.Option(help="Database query."),
dump_path: str = typer.Option(None, help="Path to dump file."),
scheme: str = typer.Option(None, help="Database scheme."),
host: str = typer.Option(None, help="Database host."),
port: int = typer.Option(None, help="Database port."),
user: str = typer.Option(None, help="Database user."),
password: str = typer.Option(None, help="Database password."),
dbname: str = typer.Option(None, help="Database name."),
):
from llama_index.readers.database import DatabaseReader
print(dump_path, scheme)
if dump_path is not None:
# read from database dump file
from sqlalchemy import create_engine
engine = create_engine(f"sqlite:///{dump_path}")
db = DatabaseReader(engine=engine)
else:
assert dump_path is None, "Cannot provide both dump_path and database connection parameters."
assert scheme is not None, "Must provide database scheme."
assert host is not None, "Must provide database host."
assert port is not None, "Must provide database port."
assert user is not None, "Must provide database user."
assert password is not None, "Must provide database password."
assert dbname is not None, "Must provide database name."
db = DatabaseReader(
scheme=scheme, # Database Scheme
host=host, # Database Host
port=port, # Database Port
user=user, # Database User
password=password, # Database Password
dbname=dbname, # Database Name
)
# load data
docs = db.load_data(query=query)
store_docs(name, docs)
@app.command("vector-database")
def load_vector_database(
name: str = typer.Option(help="Name of dataset to load."),
uri: str = typer.Option(help="Database URI."),
table_name: str = typer.Option(help="Name of table containing data."),
text_column: str = typer.Option(help="Name of column containing text."),
embedding_column: str = typer.Option(help="Name of column containing embedding."),
):
"""Load pre-computed embeddings into MemGPT from a database."""
from sqlalchemy import create_engine, select, MetaData, Table, Inspector
from pgvector.sqlalchemy import Vector
# connect to db table
engine = create_engine(uri)
metadata = MetaData()
# Create an inspector to inspect the database
inspector = Inspector.from_engine(engine)
table_names = inspector.get_table_names()
assert table_name in table_names, f"Table {table_name} not found in database: tables that exist {table_names}."
table = Table(table_name, metadata, autoload_with=engine)
config = MemGPTConfig.load()
# Prepare a select statement
select_statement = select(table.c[text_column], table.c[embedding_column].cast(Vector(config.embedding_dim)))
# Execute the query and fetch the results
with engine.connect() as connection:
result = connection.execute(select_statement).fetchall()
# Convert to a list of tuples (text, embedding)
passages = []
for text, embedding in result:
passages.append(Passage(text=text, embedding=embedding))
assert config.embedding_dim == len(embedding), f"Expected embedding dimension {config.embedding_dim}, got {len(embedding)}"
# insert into storage
storage = StorageConnector.get_storage_connector(name=name)
storage.insert_many(passages)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.SimpleWebPageReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.readers.database.DatabaseReader",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((586, 599), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (597, 599), False, 'import typer\n'), ((727, 776), 'memgpt.connectors.storage.StorageConnector.get_storage_connector', 'StorageConnector.get_storage_connector', ([], {'name': 'name'}), '(name=name)\n', (765, 776), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((790, 809), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (807, 809), False, 'from memgpt.config import MemGPTConfig\n'), ((828, 845), 'memgpt.embeddings.embedding_model', 'embedding_model', ([], {}), '()\n', (843, 845), False, 'from memgpt.embeddings import embedding_model\n'), ((914, 1022), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'embed_model', 'chunk_size': 'config.embedding_chunk_size'}), '(llm=None, embed_model=embed_model, chunk_size=\n config.embedding_chunk_size)\n', (942, 1022), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage\n'), ((1030, 1124), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context', 'show_progress': '(True)'}), '(docs, service_context=service_context,\n show_progress=True)\n', (1061, 1124), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage\n'), ((1851, 1896), 'typer.Option', 'typer.Option', ([], {'help': '"""Name of dataset to load."""'}), "(help='Name of dataset to load.')\n", (1863, 1896), False, 'import typer\n'), ((1909, 1965), 'typer.Option', 'typer.Option', ([], {'help': '"""Path to directory containing index."""'}), "(help='Path to directory containing index.')\n", (1921, 1965), False, 'import typer\n'), ((2071, 2116), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'dir'}), '(persist_dir=dir)\n', (2099, 2116), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage\n'), ((2136, 2176), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2159, 2176), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage\n'), ((2612, 2661), 'memgpt.connectors.storage.StorageConnector.get_storage_connector', 'StorageConnector.get_storage_connector', ([], {'name': 'name'}), '(name=name)\n', (2650, 2661), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((2812, 2857), 'typer.Option', 'typer.Option', ([], {'help': '"""Name of dataset to load."""'}), "(help='Name of dataset to load.')\n", (2824, 2857), False, 'import typer\n'), ((2880, 2944), 'typer.Option', 'typer.Option', (['None'], {'help': '"""Path to directory containing dataset."""'}), "(None, help='Path to directory containing dataset.')\n", (2892, 2944), False, 'import typer\n'), ((2975, 3044), 'typer.Option', 'typer.Option', (['None'], {'help': '"""List of paths to files containing dataset."""'}), "(None, help='List of paths to files containing dataset.')\n", (2987, 3044), False, 'import typer\n'), ((3068, 3138), 'typer.Option', 'typer.Option', (['(False)'], {'help': '"""Recursively search for files in directory."""'}), "(False, help='Recursively search for files in directory.')\n", (3080, 3138), False, 'import typer\n'), ((3715, 3760), 'typer.Option', 'typer.Option', ([], {'help': '"""Name of dataset to load."""'}), "(help='Name of dataset to load.')\n", (3727, 3760), False, 'import typer\n'), ((3784, 3832), 'typer.Option', 'typer.Option', (['None'], {'help': '"""List of urls to load."""'}), "(None, help='List of urls to load.')\n", (3796, 3832), False, 'import typer\n'), ((4041, 4086), 'typer.Option', 'typer.Option', ([], {'help': '"""Name of dataset to load."""'}), "(help='Name of dataset to load.')\n", (4053, 4086), False, 'import typer\n'), ((4105, 4141), 'typer.Option', 'typer.Option', ([], {'help': '"""Database query."""'}), "(help='Database query.')\n", (4117, 4141), False, 'import typer\n'), ((4164, 4209), 'typer.Option', 'typer.Option', (['None'], {'help': '"""Path to dump file."""'}), "(None, help='Path to dump file.')\n", (4176, 4209), False, 'import typer\n'), ((4229, 4272), 'typer.Option', 'typer.Option', (['None'], {'help': '"""Database scheme."""'}), "(None, help='Database scheme.')\n", (4241, 4272), False, 'import typer\n'), ((4290, 4331), 'typer.Option', 'typer.Option', (['None'], {'help': '"""Database host."""'}), "(None, help='Database host.')\n", (4302, 4331), False, 'import typer\n'), ((4349, 4390), 'typer.Option', 'typer.Option', (['None'], {'help': '"""Database port."""'}), "(None, help='Database port.')\n", (4361, 4390), False, 'import typer\n'), ((4408, 4449), 'typer.Option', 'typer.Option', (['None'], {'help': '"""Database user."""'}), "(None, help='Database user.')\n", (4420, 4449), False, 'import typer\n'), ((4471, 4516), 'typer.Option', 'typer.Option', (['None'], {'help': '"""Database password."""'}), "(None, help='Database password.')\n", (4483, 4516), False, 'import typer\n'), ((4536, 4577), 'typer.Option', 'typer.Option', (['None'], {'help': '"""Database name."""'}), "(None, help='Database name.')\n", (4548, 4577), False, 'import typer\n'), ((5852, 5897), 'typer.Option', 'typer.Option', ([], {'help': '"""Name of dataset to load."""'}), "(help='Name of dataset to load.')\n", (5864, 5897), False, 'import typer\n'), ((5914, 5948), 'typer.Option', 'typer.Option', ([], {'help': '"""Database URI."""'}), "(help='Database URI.')\n", (5926, 5948), False, 'import typer\n'), ((5972, 6023), 'typer.Option', 'typer.Option', ([], {'help': '"""Name of table containing data."""'}), "(help='Name of table containing data.')\n", (5984, 6023), False, 'import typer\n'), ((6048, 6100), 'typer.Option', 'typer.Option', ([], {'help': '"""Name of column containing text."""'}), "(help='Name of column containing text.')\n", (6060, 6100), False, 'import typer\n'), ((6130, 6187), 'typer.Option', 'typer.Option', ([], {'help': '"""Name of column containing embedding."""'}), "(help='Name of column containing embedding.')\n", (6142, 6187), False, 'import typer\n'), ((6421, 6439), 'sqlalchemy.create_engine', 'create_engine', (['uri'], {}), '(uri)\n', (6434, 6439), False, 'from sqlalchemy import create_engine\n'), ((6455, 6465), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (6463, 6465), False, 'from sqlalchemy import create_engine, select, MetaData, Table, Inspector\n'), ((6532, 6561), 'sqlalchemy.Inspector.from_engine', 'Inspector.from_engine', (['engine'], {}), '(engine)\n', (6553, 6561), False, 'from sqlalchemy import create_engine, select, MetaData, Table, Inspector\n'), ((6737, 6786), 'sqlalchemy.Table', 'Table', (['table_name', 'metadata'], {'autoload_with': 'engine'}), '(table_name, metadata, autoload_with=engine)\n', (6742, 6786), False, 'from sqlalchemy import create_engine, select, MetaData, Table, Inspector\n'), ((6801, 6820), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (6818, 6820), False, 'from memgpt.config import MemGPTConfig\n'), ((7466, 7515), 'memgpt.connectors.storage.StorageConnector.get_storage_connector', 'StorageConnector.get_storage_connector', ([], {'name': 'name'}), '(name=name)\n', (7504, 7515), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((3351, 3414), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'input_dir', 'recursive': 'recursive'}), '(input_dir=input_dir, recursive=recursive)\n', (3372, 3414), False, 'from llama_index import SimpleDirectoryReader\n'), ((3477, 3523), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (3498, 3523), False, 'from llama_index import SimpleDirectoryReader\n'), ((4805, 4844), 'sqlalchemy.create_engine', 'create_engine', (['f"""sqlite:///{dump_path}"""'], {}), "(f'sqlite:///{dump_path}')\n", (4818, 4844), False, 'from sqlalchemy import create_engine\n'), ((4859, 4888), 'llama_index.readers.database.DatabaseReader', 'DatabaseReader', ([], {'engine': 'engine'}), '(engine=engine)\n', (4873, 4888), False, 'from llama_index.readers.database import DatabaseReader\n'), ((5407, 5508), 'llama_index.readers.database.DatabaseReader', 'DatabaseReader', ([], {'scheme': 'scheme', 'host': 'host', 'port': 'port', 'user': 'user', 'password': 'password', 'dbname': 'dbname'}), '(scheme=scheme, host=host, port=port, user=user, password=\n password, dbname=dbname)\n', (5421, 5508), False, 'from llama_index.readers.database import DatabaseReader\n'), ((1677, 1713), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': 'text', 'embedding': 'vector'}), '(text=text, embedding=vector)\n', (1684, 1713), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((2523, 2564), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': 'node.text', 'embedding': 'vector'}), '(text=node.text, embedding=vector)\n', (2530, 2564), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((3897, 3935), 'llama_index.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': '(True)'}), '(html_to_text=True)\n', (3916, 3935), False, 'from llama_index import SimpleWebPageReader\n'), ((6938, 6966), 'pgvector.sqlalchemy.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (6944, 6966), False, 'from pgvector.sqlalchemy import Vector\n'), ((7252, 7291), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': 'text', 'embedding': 'embedding'}), '(text=text, embedding=embedding)\n', (7259, 7291), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n')] |
import logging
import sys
import requests
import os
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
import torch
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding
#!CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
#un comment this to use GPU engine- CUBLAS
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
url = 'https://firebasestorage.googleapis.com/v0/b/ichiropractic.appspot.com/o/test.pdf?alt=media&token=c7b685c1-712d-4b0e-bbfd-3d80198c6584'
if not os.path.exists('Data'):
os.makedirs('Data')
file_path = os.path.join('Data', 'test.pdf')
response = requests.get(url)
if response.status_code == 200:
with open(file_path, 'wb') as file:
file.write(response.content)
else:
print(f'Failed to download the file: {response.status_code}')
# Setup LlamaCPP
llm = LlamaCPP(
model_url='', # compactible model is GGUF only.
model_path='./dolphin-2.1-mistral-7b.Q4_K_M.gguf', # Here I have use dolphin model from my local machine. please remove this and use your own model path
temperature=0.1,
max_new_tokens=3024,
context_window=3900,
generate_kwargs={},
model_kwargs={"n_gpu_layers": 128},
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=True,
)
print('LlamaCPP is ready to use.')
| [
"llama_index.llms.LlamaCPP"
] | [((511, 570), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (530, 570), False, 'import logging\n'), ((854, 886), 'os.path.join', 'os.path.join', (['"""Data"""', '"""test.pdf"""'], {}), "('Data', 'test.pdf')\n", (866, 886), False, 'import os\n'), ((898, 915), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (910, 915), False, 'import requests\n'), ((1122, 1423), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_url': '""""""', 'model_path': '"""./dolphin-2.1-mistral-7b.Q4_K_M.gguf"""', 'temperature': '(0.1)', 'max_new_tokens': '(3024)', 'context_window': '(3900)', 'generate_kwargs': '{}', 'model_kwargs': "{'n_gpu_layers': 128}", 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt', 'verbose': '(True)'}), "(model_url='', model_path='./dolphin-2.1-mistral-7b.Q4_K_M.gguf',\n temperature=0.1, max_new_tokens=3024, context_window=3900,\n generate_kwargs={}, model_kwargs={'n_gpu_layers': 128},\n messages_to_prompt=messages_to_prompt, completion_to_prompt=\n completion_to_prompt, verbose=True)\n", (1130, 1423), False, 'from llama_index.llms import LlamaCPP\n'), ((602, 642), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (623, 642), False, 'import logging\n'), ((794, 816), 'os.path.exists', 'os.path.exists', (['"""Data"""'], {}), "('Data')\n", (808, 816), False, 'import os\n'), ((822, 841), 'os.makedirs', 'os.makedirs', (['"""Data"""'], {}), "('Data')\n", (833, 841), False, 'import os\n'), ((571, 590), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (588, 590), False, 'import logging\n')] |
from llama_index import SimpleDirectoryReader, VectorStoreIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import gradio as gr
from pprint import pprint; import IPython
import sys
import os
from pathlib import Path
# Check if the environment variable exists
if "OPENAIKEY" in os.environ:
# If it exists, get its value into a Python variable
api_key = os.environ["OPENAIKEY"]
else:
raise ValueError("Please set the OPENAIKEY environment variable")
os.environ["OPENAI_API_KEY"] = api_key
from llama_index import VectorStoreIndex, download_loader
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader('/Users/despiegk1/Downloads/ai').load_data()
index = GPTVectorStoreIndex.from_documents(documents)
index.storage_context.persist()
query_engine = index.as_query_engine()
query_engine.query("what is ourworld?")
# ImageReader = download_loader("ImageReader")
# imageLoader = ImageReader(text_type="plain_text")
# FlatPdfReader = download_loader("FlatPdfReader")
# pdfLoader = FlatPdfReader(image_loader=imageLoader)
# document = pdfLoader.load_data(file=Path('~/Downloads/its not about what we have, its about what we believe in. (5).pdf'))
# index = VectorStoreIndex.from_documents([document])
# query_engine = index.as_query_engine()
# query_engine.query('how vulnerable are security protocols?')
IPython.embed()
| [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((745, 790), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (779, 790), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n'), ((1396, 1411), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (1409, 1411), False, 'import IPython\n'), ((670, 724), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""/Users/despiegk1/Downloads/ai"""'], {}), "('/Users/despiegk1/Downloads/ai')\n", (691, 724), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n')] |
import argparse
import copy
import logging
import os
import sys
import warnings
from typing import Optional, List, Callable
from langchain.llms import OpenAI
import faiss
import gradio as gr
import torch
import torch.distributed as dist
import transformers
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.hooks import (
AlignDevicesHook,
add_hook_to_module,
remove_hook_from_submodules,
)
from accelerate.utils import get_balanced_memory
from huggingface_hub import hf_hub_download
from llama_index import LLMPredictor
from llama_index import PromptHelper, SimpleDirectoryReader
from llama_index import ServiceContext
from llama_index import GPTKeywordTableIndex, GPTSimpleVectorIndex, GPTListIndex, GPTTreeIndex, GPTFaissIndex
from peft import PeftModelForCausalLM, LoraConfig
from peft.utils import PeftType, set_peft_model_state_dict
from torch import nn
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.generation.beam_search import BeamSearchScorer
from transformers.generation.utils import (
LogitsProcessorList,
StoppingCriteriaList,
GenerationMixin,
)
from model import CustomLLM, Llama7bHFLLM
assert (
"LlamaTokenizer" in transformers._import_structure["models.llama"]
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
class SteamGenerationMixin(PeftModelForCausalLM, GenerationMixin):
# support for streamly beam search
@torch.no_grad()
def stream_generate(
self,
input_ids: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[
Callable[[int, torch.Tensor], List[int]]
] = None,
**kwargs,
):
self._reorder_cache = self.base_model._reorder_cache
if is_deepspeed_zero3_enabled() and dist.world_size() > 1:
synced_gpus = True
else:
synced_gpus = False
if kwargs.get("attention_mask", None) is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(
kwargs["input_ids"].shape[0], self.peft_config.num_virtual_tokens
).to(kwargs["input_ids"].device)
kwargs["attention_mask"] = torch.cat(
(prefix_attention_mask, kwargs["attention_mask"]), dim=1
)
if kwargs.get("position_ids", None) is not None:
warnings.warn(
"Position ids are not supported for parameter efficient tuning. Ignoring position ids."
)
kwargs["position_ids"] = None
if kwargs.get("token_type_ids", None) is not None:
warnings.warn(
"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
)
kwargs["token_type_ids"] = None
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
if generation_config is None:
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs)
bos_token_id, eos_token_id, pad_token_id = (
generation_config.bos_token_id,
generation_config.eos_token_id,
generation_config.pad_token_id,
)
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
has_default_max_length = (
kwargs.get("max_length") is None
and generation_config.max_length is not None
)
if has_default_max_length and generation_config.max_new_tokens is None:
warnings.warn(
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
" recommend using `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif generation_config.max_new_tokens is not None:
generation_config.max_length = (
generation_config.max_new_tokens + input_ids_seq_length
)
if generation_config.min_new_tokens is not None:
generation_config.min_length = (
generation_config.min_new_tokens + input_ids_seq_length
)
if input_ids_seq_length >= generation_config.max_length:
input_ids_string = (
"decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
)
# 2. Set generation parameters if not already defined
logits_processor = (
logits_processor if logits_processor is not None else LogitsProcessorList()
)
stopping_criteria = (
stopping_criteria
if stopping_criteria is not None
else StoppingCriteriaList()
)
# 8. prepare distribution pre_processing samplers
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=input_ids,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
)
# 9. prepare stopping criteria
stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
logits_warper = self._get_logits_warper(generation_config)
# 10. go into beam search generation modes
# 11. prepare beam search scorer
num_beams = generation_config.num_beams
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=generation_config.num_beams,
device=input_ids.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
num_beam_hyps_to_keep=generation_config.num_return_sequences,
max_length=generation_config.max_length,
)
# 12. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# beam_search logits
batch_beam_size, cur_len = input_ids.shape
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
beam_scores = torch.zeros(
(batch_size, num_beams), dtype=torch.float, device=input_ids.device
)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(
0.0 if this_peer_finished else 1.0
).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=False,
output_hidden_states=False,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) hack: adjust tokens for Marian.
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores_processed = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores_processed + beam_scores[
:, None
].expand_as(next_token_scores)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(
batch_size, num_beams * vocab_size
)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True
)
next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
beam_indices=None,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat(
[input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1
)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past_key_values"] is not None:
model_kwargs["past_key_values"] = self._reorder_cache(
model_kwargs["past_key_values"], beam_idx
)
# increase cur_len
cur_len = cur_len + 1
yield input_ids
if beam_scorer.is_done or stopping_criteria(input_ids, None):
if not synced_gpus:
break
else:
this_peer_finished = True
final_result = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
beam_indices=None,
)
yield final_result["sequences"]
# default it call `model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config)`, not cls!! so inherent PeftModelForCausalLM is no sense
@classmethod
def from_pretrained(cls, model, model_id, **kwargs):
# load the config
config = LoraConfig.from_pretrained(model_id)
if getattr(model, "hf_device_map", None) is not None:
remove_hook_from_submodules(model)
# here is the hack
model = cls(model, config)
# load weights if any
if os.path.exists(os.path.join(model_id, "adapter_model.bin")):
filename = os.path.join(model_id, "adapter_model.bin")
else:
try:
filename = hf_hub_download(model_id, "adapter_model.bin")
except: # noqa
raise ValueError(
f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. "
f"Please check that the file {'adapter_model.bin'} is present at {model_id}."
)
adapters_weights = torch.load(
filename,
map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
)
# load the weights into the model
model = set_peft_model_state_dict(model, adapters_weights)
if getattr(model, "hf_device_map", None) is not None:
device_map = kwargs.get("device_map", "auto")
max_memory = kwargs.get("max_memory", None)
no_split_module_classes = model._no_split_modules
if device_map != "sequential":
max_memory = get_balanced_memory(
model,
max_memory=max_memory,
no_split_module_classes=no_split_module_classes,
low_zero=(device_map == "balanced_low_0"),
)
if isinstance(device_map, str):
device_map = infer_auto_device_map(
model,
max_memory=max_memory,
no_split_module_classes=no_split_module_classes,
)
model = dispatch_model(model, device_map=device_map)
hook = AlignDevicesHook(io_same_device=True)
if model.peft_config.peft_type == PeftType.LORA:
add_hook_to_module(model.base_model.model, hook)
else:
remove_hook_from_submodules(model.prompt_encoder)
add_hook_to_module(model.base_model, hook)
return model
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default="decapoda-research/llama-7b-hf")
parser.add_argument("--lora_path", type=str, default="./lora-Vicuna/checkpoint-3000")
parser.add_argument("--use_local", type=int, default=1)
args = parser.parse_args()
tokenizer = LlamaTokenizer.from_pretrained(args.model_path)
LOAD_8BIT = True
BASE_MODEL = args.model_path
LORA_WEIGHTS = args.lora_path
# fix the path for local checkpoint
lora_bin_path = os.path.join(args.lora_path, "adapter_model.bin")
print(lora_bin_path)
if not os.path.exists(lora_bin_path) and args.use_local:
pytorch_bin_path = os.path.join(args.lora_path, "pytorch_model.bin")
print(pytorch_bin_path)
if os.path.exists(pytorch_bin_path):
os.rename(pytorch_bin_path, lora_bin_path)
warnings.warn(
"The file name of the lora checkpoint'pytorch_model.bin' is replaced with 'adapter_model.bin'"
)
else:
assert ('Checkpoint is not Found!')
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
try:
if torch.backends.mps.is_available():
device = "mps"
except:
pass
if device == "cuda":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
load_in_8bit=LOAD_8BIT,
torch_dtype=torch.float16,
device_map={"": 0},
)
model = SteamGenerationMixin.from_pretrained(
model, LORA_WEIGHTS, torch_dtype=torch.float16, device_map={"": 0}
)
elif device == "mps":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
device_map={"": device},
torch_dtype=torch.float16,
)
model = SteamGenerationMixin.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
torch_dtype=torch.float16,
)
else:
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
)
model = SteamGenerationMixin.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
)
def generate_prompt(instruction, input=None):
if input:
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Input:
{input}
### Response:"""
else:
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Response:"""
if not LOAD_8BIT:
model.half() # seems to fix bugs for some users.
model.eval()
if torch.__version__ >= "2" and sys.platform != "win32":
model = torch.compile(model)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import openai
openai.api_key = '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'
os.environ["OPENAI_API_KEY"] = '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'
def evaluate(
input,
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=4,
max_new_tokens=2500,
min_new_tokens=1,
repetition_penalty=2.0,
**kwargs,
):
print('start text llama-index')
# TEST
#
# set maximum input size
max_input_size = 2048
# set number of output tokens
num_output = 1024
# set maximum chunk overlap
max_chunk_overlap = 20
gen_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
bos_token_id=1,
eos_token_id=2,
pad_token_id=0,
max_new_tokens=max_new_tokens,
# max_length=max_new_tokens+input_sequence
min_new_tokens=min_new_tokens,
# min_length=min_new_tokens+input_sequence
repetition_penalty=repetition_penalty
)
# service_context = ServiceContext.from_defaults(
# llm_predictor=LLMPredictor(llm=CustomLLM(mod=model, token=tokenizer, gen_config=gen_config, device=device)),
# prompt_helper=PromptHelper(max_input_size, num_output, max_chunk_overlap))
service_context = ServiceContext.from_defaults(
llm_predictor=LLMPredictor(llm=model),
prompt_helper=PromptHelper(max_input_size, num_output, max_chunk_overlap))
documents = SimpleDirectoryReader('Chinese-Vicuna/index-docs').load_data()
print(documents)
print('start init index')
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003"))
# default_service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
# index = GPTFaissIndex.from_documents(documents, service_context=service_context)
# index = GPTFaissIndex.from_documents(documents, faiss_index=faiss.IndexFlatL2(1536), service_context=default_service_context)
print('end init index done')
print('start save to disk')
# index.save_to_disk("clash-index.json")
# suffix do not matter
faiss_index_save_path = 'faiss_index.faiss'
faiss_index = faiss.IndexFlatL2(1536)
faiss.write_index(faiss_index, faiss_index_save_path)
index = GPTFaissIndex.load_from_disk(save_path='clash-index.json',
faiss_index=faiss_index_save_path,
service_context=service_context)
print('end save to disk')
# Query and print response
print('start query')
response = index.query(input)
print('end query')
print(response)
return response
# with torch.no_grad():
# # immOutPut = model.generate(input_ids=input_ids, generation_config=generation_config,
# # return_dict_in_generate=True, output_scores=False,
# # repetition_penalty=float(repetition_penalty), )
# # outputs = tokenizer.batch_decode(immOutPut)
# last_show_text = ''
# for generation_output in model.stream_generate(
# input_ids=input_ids,
# generation_config=generation_config,
# return_dict_in_generate=True,
# output_scores=False,
# repetition_penalty=float(repetition_penalty),
# ):
# outputs = tokenizer.batch_decode(generation_output)
# show_text = "\n--------------------------------------------\n".join(
# [output.split("### Response:")[1].strip().replace('�', '') for output in outputs]
# )
# # if show_text== '':
# # yield last_show_text
# # else:
# yield show_text
# last_show_text = outputs[0].split("### Response:")[1].strip().replace('�', '')
gr.Interface(
fn=evaluate,
inputs=[
gr.components.Textbox(
lines=2, label="Input", placeholder="Tell me about alpacas."
),
gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
gr.components.Slider(minimum=1, maximum=10, step=1, value=4, label="Beams Number"),
gr.components.Slider(
minimum=1, maximum=2000, step=1, value=256, label="Max New Tokens"
),
gr.components.Slider(
minimum=1, maximum=100, step=1, value=1, label="Min New Tokens"
),
gr.components.Slider(
minimum=0.1, maximum=10.0, step=0.1, value=1.0, label="Repetition Penalty"
),
],
outputs=[
gr.inputs.Textbox(
lines=15,
label="Output",
)
],
title="Chinese-Vicuna 中文小羊驼",
description="结合 llama-index prompt 搜索优化的 中文小羊驼",
).queue().launch(share=True)
| [
"llama_index.PromptHelper",
"llama_index.GPTFaissIndex.load_from_disk",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor"
] | [((13996, 14021), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14019, 14021), False, 'import argparse\n'), ((14291, 14338), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['args.model_path'], {}), '(args.model_path)\n', (14321, 14338), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((14471, 14520), 'os.path.join', 'os.path.join', (['args.lora_path', '"""adapter_model.bin"""'], {}), "(args.lora_path, 'adapter_model.bin')\n", (14483, 14520), False, 'import os\n'), ((14990, 15015), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15013, 15015), False, 'import torch\n'), ((16720, 16779), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (16739, 16779), False, 'import logging\n'), ((1625, 1640), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1638, 1640), False, 'import torch\n'), ((14622, 14671), 'os.path.join', 'os.path.join', (['args.lora_path', '"""pytorch_model.bin"""'], {}), "(args.lora_path, 'pytorch_model.bin')\n", (14634, 14671), False, 'import os\n'), ((14707, 14739), 'os.path.exists', 'os.path.exists', (['pytorch_bin_path'], {}), '(pytorch_bin_path)\n', (14721, 14739), False, 'import os\n'), ((15075, 15108), 'torch.backends.mps.is_available', 'torch.backends.mps.is_available', ([], {}), '()\n', (15106, 15108), False, 'import torch\n'), ((15184, 15303), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['BASE_MODEL'], {'load_in_8bit': 'LOAD_8BIT', 'torch_dtype': 'torch.float16', 'device_map': "{'': 0}"}), "(BASE_MODEL, load_in_8bit=LOAD_8BIT,\n torch_dtype=torch.float16, device_map={'': 0})\n", (15216, 15303), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((16696, 16716), 'torch.compile', 'torch.compile', (['model'], {}), '(model)\n', (16709, 16716), False, 'import torch\n'), ((16811, 16851), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (16832, 16851), False, 'import logging\n'), ((17490, 17739), 'transformers.GenerationConfig', 'GenerationConfig', ([], {'temperature': 'temperature', 'top_p': 'top_p', 'top_k': 'top_k', 'num_beams': 'num_beams', 'bos_token_id': '(1)', 'eos_token_id': '(2)', 'pad_token_id': '(0)', 'max_new_tokens': 'max_new_tokens', 'min_new_tokens': 'min_new_tokens', 'repetition_penalty': 'repetition_penalty'}), '(temperature=temperature, top_p=top_p, top_k=top_k,\n num_beams=num_beams, bos_token_id=1, eos_token_id=2, pad_token_id=0,\n max_new_tokens=max_new_tokens, min_new_tokens=min_new_tokens,\n repetition_penalty=repetition_penalty)\n', (17506, 17739), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((19097, 19120), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['(1536)'], {}), '(1536)\n', (19114, 19120), False, 'import faiss\n'), ((19126, 19179), 'faiss.write_index', 'faiss.write_index', (['faiss_index', 'faiss_index_save_path'], {}), '(faiss_index, faiss_index_save_path)\n', (19143, 19179), False, 'import faiss\n'), ((19193, 19324), 'llama_index.GPTFaissIndex.load_from_disk', 'GPTFaissIndex.load_from_disk', ([], {'save_path': '"""clash-index.json"""', 'faiss_index': 'faiss_index_save_path', 'service_context': 'service_context'}), "(save_path='clash-index.json', faiss_index=\n faiss_index_save_path, service_context=service_context)\n", (19221, 19324), False, 'from llama_index import GPTKeywordTableIndex, GPTSimpleVectorIndex, GPTListIndex, GPTTreeIndex, GPTFaissIndex\n'), ((3383, 3415), 'copy.deepcopy', 'copy.deepcopy', (['generation_config'], {}), '(generation_config)\n', (3396, 3415), False, 'import copy\n'), ((6104, 6429), 'transformers.generation.beam_search.BeamSearchScorer', 'BeamSearchScorer', ([], {'batch_size': 'batch_size', 'num_beams': 'generation_config.num_beams', 'device': 'input_ids.device', 'length_penalty': 'generation_config.length_penalty', 'do_early_stopping': 'generation_config.early_stopping', 'num_beam_hyps_to_keep': 'generation_config.num_return_sequences', 'max_length': 'generation_config.max_length'}), '(batch_size=batch_size, num_beams=generation_config.\n num_beams, device=input_ids.device, length_penalty=generation_config.\n length_penalty, do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length)\n', (6120, 6429), False, 'from transformers.generation.beam_search import BeamSearchScorer\n'), ((7160, 7245), 'torch.zeros', 'torch.zeros', (['(batch_size, num_beams)'], {'dtype': 'torch.float', 'device': 'input_ids.device'}), '((batch_size, num_beams), dtype=torch.float, device=input_ids.device\n )\n', (7171, 7245), False, 'import torch\n'), ((11738, 11774), 'peft.LoraConfig.from_pretrained', 'LoraConfig.from_pretrained', (['model_id'], {}), '(model_id)\n', (11764, 11774), False, 'from peft import PeftModelForCausalLM, LoraConfig\n'), ((12718, 12768), 'peft.utils.set_peft_model_state_dict', 'set_peft_model_state_dict', (['model', 'adapters_weights'], {}), '(model, adapters_weights)\n', (12743, 12768), False, 'from peft.utils import PeftType, set_peft_model_state_dict\n'), ((14549, 14578), 'os.path.exists', 'os.path.exists', (['lora_bin_path'], {}), '(lora_bin_path)\n', (14563, 14578), False, 'import os\n'), ((14749, 14791), 'os.rename', 'os.rename', (['pytorch_bin_path', 'lora_bin_path'], {}), '(pytorch_bin_path, lora_bin_path)\n', (14758, 14791), False, 'import os\n'), ((14800, 14919), 'warnings.warn', 'warnings.warn', (['"""The file name of the lora checkpoint\'pytorch_model.bin\' is replaced with \'adapter_model.bin\'"""'], {}), '(\n "The file name of the lora checkpoint\'pytorch_model.bin\' is replaced with \'adapter_model.bin\'"\n )\n', (14813, 14919), False, 'import warnings\n'), ((15504, 15604), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['BASE_MODEL'], {'device_map': "{'': device}", 'torch_dtype': 'torch.float16'}), "(BASE_MODEL, device_map={'': device},\n torch_dtype=torch.float16)\n", (15536, 15604), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((15811, 15908), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['BASE_MODEL'], {'device_map': "{'': device}", 'low_cpu_mem_usage': '(True)'}), "(BASE_MODEL, device_map={'': device},\n low_cpu_mem_usage=True)\n", (15843, 15908), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((16780, 16799), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (16797, 16799), False, 'import logging\n'), ((2134, 2162), 'transformers.deepspeed.is_deepspeed_zero3_enabled', 'is_deepspeed_zero3_enabled', ([], {}), '()\n', (2160, 2162), False, 'from transformers.deepspeed import is_deepspeed_zero3_enabled\n'), ((2584, 2651), 'torch.cat', 'torch.cat', (["(prefix_attention_mask, kwargs['attention_mask'])"], {'dim': '(1)'}), "((prefix_attention_mask, kwargs['attention_mask']), dim=1)\n", (2593, 2651), False, 'import torch\n'), ((2751, 2863), 'warnings.warn', 'warnings.warn', (['"""Position ids are not supported for parameter efficient tuning. Ignoring position ids."""'], {}), "(\n 'Position ids are not supported for parameter efficient tuning. Ignoring position ids.'\n )\n", (2764, 2863), False, 'import warnings\n'), ((2997, 3112), 'warnings.warn', 'warnings.warn', (['"""Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"""'], {}), "(\n 'Token type ids are not supported for parameter efficient tuning. Ignoring token type ids'\n )\n", (3010, 3112), False, 'import warnings\n'), ((3995, 4306), 'warnings.warn', 'warnings.warn', (['f"""Using `max_length`\'s default ({generation_config.max_length}) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation."""', 'UserWarning'], {}), '(\n f"Using `max_length`\'s default ({generation_config.max_length}) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation."\n , UserWarning)\n', (4008, 4306), False, 'import warnings\n'), ((5118, 5139), 'transformers.generation.utils.LogitsProcessorList', 'LogitsProcessorList', ([], {}), '()\n', (5137, 5139), False, 'from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationMixin\n'), ((5272, 5294), 'transformers.generation.utils.StoppingCriteriaList', 'StoppingCriteriaList', ([], {}), '()\n', (5292, 5294), False, 'from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationMixin\n'), ((8817, 8869), 'torch.nn.functional.log_softmax', 'nn.functional.log_softmax', (['next_token_logits'], {'dim': '(-1)'}), '(next_token_logits, dim=-1)\n', (8842, 8869), False, 'from torch import nn\n'), ((9542, 9620), 'torch.topk', 'torch.topk', (['next_token_scores', '(2 * num_beams)'], {'dim': '(1)', 'largest': '(True)', 'sorted': '(True)'}), '(next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True)\n', (9552, 9620), False, 'import torch\n'), ((9678, 9735), 'torch.div', 'torch.div', (['next_tokens', 'vocab_size'], {'rounding_mode': '"""floor"""'}), "(next_tokens, vocab_size, rounding_mode='floor')\n", (9687, 9735), False, 'import torch\n'), ((11850, 11884), 'accelerate.hooks.remove_hook_from_submodules', 'remove_hook_from_submodules', (['model'], {}), '(model)\n', (11877, 11884), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((12005, 12048), 'os.path.join', 'os.path.join', (['model_id', '"""adapter_model.bin"""'], {}), "(model_id, 'adapter_model.bin')\n", (12017, 12048), False, 'import os\n'), ((12074, 12117), 'os.path.join', 'os.path.join', (['model_id', '"""adapter_model.bin"""'], {}), "(model_id, 'adapter_model.bin')\n", (12086, 12117), False, 'import os\n'), ((13593, 13637), 'accelerate.dispatch_model', 'dispatch_model', (['model'], {'device_map': 'device_map'}), '(model, device_map=device_map)\n', (13607, 13637), False, 'from accelerate import dispatch_model, infer_auto_device_map\n'), ((13657, 13694), 'accelerate.hooks.AlignDevicesHook', 'AlignDevicesHook', ([], {'io_same_device': '(True)'}), '(io_same_device=True)\n', (13673, 13694), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((18251, 18274), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'model'}), '(llm=model)\n', (18263, 18274), False, 'from llama_index import LLMPredictor\n'), ((18298, 18357), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (18310, 18357), False, 'from llama_index import PromptHelper, SimpleDirectoryReader\n'), ((18376, 18426), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""Chinese-Vicuna/index-docs"""'], {}), "('Chinese-Vicuna/index-docs')\n", (18397, 18426), False, 'from llama_index import PromptHelper, SimpleDirectoryReader\n'), ((2167, 2184), 'torch.distributed.world_size', 'dist.world_size', ([], {}), '()\n', (2182, 2184), True, 'import torch.distributed as dist\n'), ((7914, 7976), 'torch.distributed.all_reduce', 'dist.all_reduce', (['this_peer_finished_flag'], {'op': 'dist.ReduceOp.SUM'}), '(this_peer_finished_flag, op=dist.ReduceOp.SUM)\n', (7929, 7976), True, 'import torch.distributed as dist\n'), ((12176, 12222), 'huggingface_hub.hf_hub_download', 'hf_hub_download', (['model_id', '"""adapter_model.bin"""'], {}), "(model_id, 'adapter_model.bin')\n", (12191, 12222), False, 'from huggingface_hub import hf_hub_download\n'), ((13079, 13223), 'accelerate.utils.get_balanced_memory', 'get_balanced_memory', (['model'], {'max_memory': 'max_memory', 'no_split_module_classes': 'no_split_module_classes', 'low_zero': "(device_map == 'balanced_low_0')"}), "(model, max_memory=max_memory, no_split_module_classes=\n no_split_module_classes, low_zero=device_map == 'balanced_low_0')\n", (13098, 13223), False, 'from accelerate.utils import get_balanced_memory\n'), ((13393, 13498), 'accelerate.infer_auto_device_map', 'infer_auto_device_map', (['model'], {'max_memory': 'max_memory', 'no_split_module_classes': 'no_split_module_classes'}), '(model, max_memory=max_memory, no_split_module_classes\n =no_split_module_classes)\n', (13414, 13498), False, 'from accelerate import dispatch_model, infer_auto_device_map\n'), ((13772, 13820), 'accelerate.hooks.add_hook_to_module', 'add_hook_to_module', (['model.base_model.model', 'hook'], {}), '(model.base_model.model, hook)\n', (13790, 13820), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((13855, 13904), 'accelerate.hooks.remove_hook_from_submodules', 'remove_hook_from_submodules', (['model.prompt_encoder'], {}), '(model.prompt_encoder)\n', (13882, 13904), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((13921, 13963), 'accelerate.hooks.add_hook_to_module', 'add_hook_to_module', (['model.base_model', 'hook'], {}), '(model.base_model, hook)\n', (13939, 13963), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((2406, 2483), 'torch.ones', 'torch.ones', (["kwargs['input_ids'].shape[0]", 'self.peft_config.num_virtual_tokens'], {}), "(kwargs['input_ids'].shape[0], self.peft_config.num_virtual_tokens)\n", (2416, 2483), False, 'import torch\n'), ((7733, 7781), 'torch.tensor', 'torch.tensor', (['(0.0 if this_peer_finished else 1.0)'], {}), '(0.0 if this_peer_finished else 1.0)\n', (7745, 7781), False, 'import torch\n'), ((12611, 12636), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12634, 12636), False, 'import torch\n'), ((20831, 20919), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(2)', 'label': '"""Input"""', 'placeholder': '"""Tell me about alpacas."""'}), "(lines=2, label='Input', placeholder=\n 'Tell me about alpacas.')\n", (20852, 20919), True, 'import gradio as gr\n'), ((20946, 21020), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(0)', 'maximum': '(1)', 'value': '(0.1)', 'label': '"""Temperature"""'}), "(minimum=0, maximum=1, value=0.1, label='Temperature')\n", (20966, 21020), True, 'import gradio as gr\n'), ((21030, 21099), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(0)', 'maximum': '(1)', 'value': '(0.75)', 'label': '"""Top p"""'}), "(minimum=0, maximum=1, value=0.75, label='Top p')\n", (21050, 21099), True, 'import gradio as gr\n'), ((21109, 21186), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(0)', 'maximum': '(100)', 'step': '(1)', 'value': '(40)', 'label': '"""Top k"""'}), "(minimum=0, maximum=100, step=1, value=40, label='Top k')\n", (21129, 21186), True, 'import gradio as gr\n'), ((21196, 21283), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(1)', 'maximum': '(10)', 'step': '(1)', 'value': '(4)', 'label': '"""Beams Number"""'}), "(minimum=1, maximum=10, step=1, value=4, label=\n 'Beams Number')\n", (21216, 21283), True, 'import gradio as gr\n'), ((21288, 21381), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(1)', 'maximum': '(2000)', 'step': '(1)', 'value': '(256)', 'label': '"""Max New Tokens"""'}), "(minimum=1, maximum=2000, step=1, value=256, label=\n 'Max New Tokens')\n", (21308, 21381), True, 'import gradio as gr\n'), ((21408, 21498), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(1)', 'maximum': '(100)', 'step': '(1)', 'value': '(1)', 'label': '"""Min New Tokens"""'}), "(minimum=1, maximum=100, step=1, value=1, label=\n 'Min New Tokens')\n", (21428, 21498), True, 'import gradio as gr\n'), ((21525, 21626), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(0.1)', 'maximum': '(10.0)', 'step': '(0.1)', 'value': '(1.0)', 'label': '"""Repetition Penalty"""'}), "(minimum=0.1, maximum=10.0, step=0.1, value=1.0, label=\n 'Repetition Penalty')\n", (21545, 21626), True, 'import gradio as gr\n'), ((21674, 21717), 'gradio.inputs.Textbox', 'gr.inputs.Textbox', ([], {'lines': '(15)', 'label': '"""Output"""'}), "(lines=15, label='Output')\n", (21691, 21717), True, 'import gradio as gr\n')] |
from typing import List, Set
from llama_index.core import Document, KnowledgeGraphIndex, StorageContext
from llama_index.core.query_engine import BaseQueryEngine
from llama_index.core import load_index_from_storage
import os
def load_kg_graph_index_storage_context(kg_graph_storage_dir: str) -> StorageContext:
return StorageContext.from_defaults(persist_dir=kg_graph_storage_dir)
def persist_kg_graph_index(idx: KnowledgeGraphIndex, kg_graph_storage_dir: str):
doc_count = len(idx.docstore.docs)
print(f"Persisting {doc_count} docs for kg_graph to {kg_graph_storage_dir} ...")
idx.storage_context.persist(persist_dir=kg_graph_storage_dir)
def delete_kg_graph_index(kg_graph_storage_dir: str):
print(f"Deleting kg_graph at {kg_graph_storage_dir} ...")
if os.path.exists(kg_graph_storage_dir):
import shutil
shutil.rmtree(kg_graph_storage_dir)
def load_kg_graph_index(kg_graph_storage_dir: str) -> KnowledgeGraphIndex:
if not os.path.exists(kg_graph_storage_dir):
print(f"About to initialize an empty kg-graph ...")
kg_graph = KnowledgeGraphIndex.from_documents(
[]
)
persist_kg_graph_index(kg_graph, kg_graph_storage_dir)
return load_index_from_storage(
storage_context=load_kg_graph_index_storage_context(kg_graph_storage_dir)
)
def get_kg_graph_doc_source_ids(graph_storage_dir: str, extract_key_from_doc=lambda: str) -> Set[str]:
s = set()
for doc in load_kg_graph_index(graph_storage_dir).docstore.docs.values():
s.add(extract_key_from_doc(doc))
return s
def get_kg_graph_index(graph_storage_dir: str) -> KnowledgeGraphIndex:
return load_kg_graph_index(graph_storage_dir)
def operate_on_kg_graph_index(kg_graph_index_dir: str, operation=lambda: None) -> KnowledgeGraphIndex:
import atexit
idx = get_kg_graph_index(kg_graph_index_dir)
atexist_reg_callable = atexit.register(persist_kg_graph_index, idx, kg_graph_index_dir)
try:
operation(idx)
finally:
persist_kg_graph_index(idx, kg_graph_index_dir)
atexit.unregister(atexist_reg_callable)
return idx
def add_to_or_update_in_kg_graph(graph_storage_dir: str, documents: List[Document]):
operate_on_kg_graph_index(
graph_storage_dir,
lambda graph_index: graph_index.refresh_ref_docs(documents)
)
def get_kg_graph_query_engine(graph_storage_dir: str) -> BaseQueryEngine:
return load_kg_graph_index(graph_storage_dir).as_query_engine()
| [
"llama_index.core.KnowledgeGraphIndex.from_documents",
"llama_index.core.StorageContext.from_defaults"
] | [((323, 385), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'kg_graph_storage_dir'}), '(persist_dir=kg_graph_storage_dir)\n', (351, 385), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext\n'), ((782, 818), 'os.path.exists', 'os.path.exists', (['kg_graph_storage_dir'], {}), '(kg_graph_storage_dir)\n', (796, 818), False, 'import os\n'), ((1908, 1972), 'atexit.register', 'atexit.register', (['persist_kg_graph_index', 'idx', 'kg_graph_index_dir'], {}), '(persist_kg_graph_index, idx, kg_graph_index_dir)\n', (1923, 1972), False, 'import atexit\n'), ((850, 885), 'shutil.rmtree', 'shutil.rmtree', (['kg_graph_storage_dir'], {}), '(kg_graph_storage_dir)\n', (863, 885), False, 'import shutil\n'), ((973, 1009), 'os.path.exists', 'os.path.exists', (['kg_graph_storage_dir'], {}), '(kg_graph_storage_dir)\n', (987, 1009), False, 'import os\n'), ((1090, 1128), 'llama_index.core.KnowledgeGraphIndex.from_documents', 'KnowledgeGraphIndex.from_documents', (['[]'], {}), '([])\n', (1124, 1128), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext\n'), ((2082, 2121), 'atexit.unregister', 'atexit.unregister', (['atexist_reg_callable'], {}), '(atexist_reg_callable)\n', (2099, 2121), False, 'import atexit\n')] |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(string)
def decode_string(token: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(token)
def num_tokens_from_string(string: str, encoding_name: str = "p50k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_text_to_doc(
text: str, current_chunk_id, chunk_size: int = 400
) -> List[Document]:
"""Split text into chunks of a given size."""
chunks = []
token_len = num_tokens_from_string(text)
for i in range(0, token_len, chunk_size):
encode_text = encode_string(text)
decode_text = decode_string(encode_text[i : i + chunk_size]).strip()
chunks.append(
Document(
decode_text,
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
return chunks
class CustomReader(BaseReader):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, html, filename) -> List[Document]:
soup = BeautifulSoup(html, "html.parser")
current_chunk_text = ""
current_chunk_id = 1
document_list = []
# 单位是token,openai限制4097,如果实现连续对话大概可以进行6轮对话
current_chunk_length = 0
chunk_size = 400
# 只处理前三级标题,其他的按照段落处理
headings = ["h1", "h2", "h3"]
heading_doms = soup.find_all(headings)
if len(heading_doms) == 0:
heading_doms = [soup.find()]
for tag in heading_doms:
tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
current_chunk_text = tag.text.strip()
# 遍历所有兄弟节点,不递归遍历子节点
next_tag = tag.find_next_sibling()
while next_tag and next_tag.name not in headings:
stripped_text = next_tag.text.strip()
if (
current_chunk_length + num_tokens_from_string(stripped_text)
> chunk_size
):
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
document_list += split_text_to_doc(stripped_text, current_chunk_id)
else:
current_chunk_text = f"{current_chunk_text} {stripped_text}"
current_chunk_length += num_tokens_from_string(stripped_text) + 1
next_tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
next_tag = next_tag.find_next_sibling()
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
# 保存修改后的HTML文件
with open(f"{staticPath}/file/{filename}.html", "w", encoding="utf-8") as f:
f.write(str(soup))
return document_list
| [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (685, 700), False, 'import tiktoken\n'), ((1571, 1605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1584, 1605), False, 'from bs4 import BeautifulSoup\n'), ((1182, 1257), 'llama_index.readers.schema.base.Document', 'Document', (['decode_text'], {'extra_info': "{'chunk_id': f'chunk-{current_chunk_id}'}"}), "(decode_text, extra_info={'chunk_id': f'chunk-{current_chunk_id}'})\n", (1190, 1257), False, 'from llama_index.readers.schema.base import Document\n')] |
import requests, os, time, datetime
from dotenv import load_dotenv
import pandas as pd
import numpy as np
load_dotenv()
from llama_index.indices import VectaraIndex
from llama_index import Document
#setting up secrets for Vectara
VECTARA_CUSTOMER_ID=os.environ["VECTARA_CUSTOMER_ID"]
VECTARA_CORPUS_ID=os.environ["VECTARA_CORPUS_ID"]
VECTARA_API_KEY=os.environ["VECTARA_API_KEY"]
os.environ['OPENAI_API_KEY'] = os.environ["OPENAI_API"]
index = VectaraIndex(vectara_api_key=VECTARA_API_KEY, vectara_customer_id=VECTARA_CUSTOMER_ID, vectara_corpus_id=VECTARA_CORPUS_ID)
query_engine = index.as_query_engine()
def get_gpt_ans(question):
response = query_engine.query(question)
print("Got response")
return response
def load(documents, df):
#we will load the documents into the Vectara Index
index.add_documents(documents)
df['Answers'] = df['Questions'].apply(lambda question: get_gpt_ans(question))
return df
# print(load())
| [
"llama_index.indices.VectaraIndex"
] | [((106, 119), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (117, 119), False, 'from dotenv import load_dotenv\n'), ((450, 578), 'llama_index.indices.VectaraIndex', 'VectaraIndex', ([], {'vectara_api_key': 'VECTARA_API_KEY', 'vectara_customer_id': 'VECTARA_CUSTOMER_ID', 'vectara_corpus_id': 'VECTARA_CORPUS_ID'}), '(vectara_api_key=VECTARA_API_KEY, vectara_customer_id=\n VECTARA_CUSTOMER_ID, vectara_corpus_id=VECTARA_CORPUS_ID)\n', (462, 578), False, 'from llama_index.indices import VectaraIndex\n')] |
from llama_index.prompts import PromptTemplate
ZERO_SHOT_QA_TEMPLATE = (
"<|system|>: Reponds à une question à partir du contexte en français.\n"
"<|user|>: {query_str} {context_str} \n"
"<|Réponse|>:"
)
ZERO_SHOT_PROMPT = PromptTemplate(ZERO_SHOT_QA_TEMPLATE)
ZERO_SHOT_QUESTION_TEMPLATE = """
<|system|>: Génère une question brève à partir du contexte suivant en français.\n
<|user|>: {context_str}\n
<|Question|>:
"""
ZERO_SHOT_QUESTION_PROMPT = PromptTemplate(ZERO_SHOT_QUESTION_TEMPLATE)
FEW_SHOT_QUESTION_PROMPT = """
<|system|>: Génère une question brève à partir du contexte suivant en français.\n
<|user|>: {context_str}\n
<|Question|>:
"""
| [
"llama_index.prompts.PromptTemplate"
] | [((237, 274), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['ZERO_SHOT_QA_TEMPLATE'], {}), '(ZERO_SHOT_QA_TEMPLATE)\n', (251, 274), False, 'from llama_index.prompts import PromptTemplate\n'), ((481, 524), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['ZERO_SHOT_QUESTION_TEMPLATE'], {}), '(ZERO_SHOT_QUESTION_TEMPLATE)\n', (495, 524), False, 'from llama_index.prompts import PromptTemplate\n')] |
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import sys
import os
def construct_index(src_path, out_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 512
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=num_outputs))
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
documents = SimpleDirectoryReader(src_path).load_data()
index = GPTSimpleVectorIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
index.save_to_disk(f"{out_path}/index.json")
return index
if __name__ == "__main__":
import os
src_path = os.getcwd()
dir_path = src_path + "/clean"
out_path = src_path
os.environ["OPENAI_API_KEY"] = "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"
index = construct_index(src_path, out_path) | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.PromptHelper"
] | [((565, 664), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (577, 664), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((740, 834), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (760, 834), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((970, 981), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (979, 981), False, 'import os\n'), ((467, 543), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""', 'max_tokens': 'num_outputs'}), "(temperature=0, model_name='text-davinci-003', max_tokens=num_outputs)\n", (473, 543), False, 'from langchain import OpenAI\n'), ((679, 710), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['src_path'], {}), '(src_path)\n', (700, 710), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n')] |
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, Document
from llama_index.llms import OpenAI
import openai
from llama_index import SimpleDirectoryReader
st.set_page_config(page_title="Chat with the docs, powered by LlamaIndex")
openai.api_key = st.secrets.openai_key
st.title("Chat with the custom docs, using LlamaIndex")
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Ask me a question about Streamlit's open-source Python library!"}
]
@st.cache_resource(show_spinner=False)
def load_data():
with st.spinner(text="Loading and indexing the Streamlit docs"):
reader = SimpleDirectoryReader(input_dir="./data", recursive=True)
docs = reader.load_data()
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo", temperature=0.5, system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features."))
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
return index
index = load_data()
# chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True, system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features.")
chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = chat_engine.chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI"
] | [((193, 267), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with the docs, powered by LlamaIndex"""'}), "(page_title='Chat with the docs, powered by LlamaIndex')\n", (211, 267), True, 'import streamlit as st\n'), ((309, 365), 'streamlit.title', 'st.title', (['"""Chat with the custom docs, using LlamaIndex"""'], {}), "('Chat with the custom docs, using LlamaIndex')\n", (317, 365), True, 'import streamlit as st\n'), ((618, 655), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (635, 655), True, 'import streamlit as st\n'), ((399, 422), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (420, 422), True, 'import streamlit as st\n'), ((1802, 1832), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (1815, 1832), True, 'import streamlit as st\n'), ((1888, 1957), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1920, 1957), True, 'import streamlit as st\n'), ((684, 742), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the Streamlit docs"""'}), "(text='Loading and indexing the Streamlit docs')\n", (694, 742), True, 'import streamlit as st\n'), ((762, 819), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./data"""', 'recursive': '(True)'}), "(input_dir='./data', recursive=True)\n", (783, 819), False, 'from llama_index import SimpleDirectoryReader\n'), ((1243, 1313), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (1274, 1313), False, 'from llama_index import VectorStoreIndex, ServiceContext, Document\n'), ((2047, 2079), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2062, 2079), True, 'import streamlit as st\n'), ((2090, 2118), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2098, 2118), True, 'import streamlit as st\n'), ((2255, 2283), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2270, 2283), True, 'import streamlit as st\n'), ((2299, 2324), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2309, 2324), True, 'import streamlit as st\n'), ((2388, 2415), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2396, 2415), True, 'import streamlit as st\n'), ((2504, 2545), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2536, 2545), True, 'import streamlit as st\n'), ((915, 1234), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)', 'system_prompt': '"""You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features."""'}), "(model='gpt-3.5-turbo', temperature=0.5, system_prompt=\n 'You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features.'\n )\n", (921, 1234), False, 'from llama_index.llms import OpenAI\n')] |
import streamlit as st
from pathlib import Path
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
download_loader,
)
from llama_index.llms import Ollama
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_hub.confluence import ConfluenceReader
import os
# Streamlit UI for input
st.title("Confluence Query Interface")
# Use secrets for sensitive information
access_token = st.secrets["CONFLUENCE_ACCESS_TOKEN"]
base_url = st.text_input("Confluence Base URL", "https://espace.agir.orange.com/")
space_key = st.text_input("Space Key", "OBSMA")
model_name = st.selectbox("Select Model", ["mistral:7b-instruct-q5_K_M", "Other Model"])
query_text = st.text_area("Enter your query", "What is OBSMA?")
# OAuth2 credentials dictionary using the access token from Streamlit secrets
oauth2_credentials = {
"client_id": "[email protected]", # You might want to secure this as well
"client_secret": st.secrets["CLIENT_SECRET"],
"token": {
"access_token": access_token,
"token_type": "Bearer"
}
}
# Initialize ConfluenceReader with OAuth2 credentials from Streamlit secrets
reader = ConfluenceReader(base_url=base_url, oauth2=oauth2_credentials)
# Query execution
if st.button('Run Query'):
# Load documents using the ConfluenceReader
documents = reader.load_data(space_key=space_key, include_attachments=True, page_status="current")
# Set up Qdrant client and vector store
client = qdrant_client.QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore(client=client, collection_name="conf_MA")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# Initialize LLM model
llm = Ollama(model=model_name)
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
# Create the index and query engine
index = VectorStoreIndex.from_documents(documents, service_context=service_context, storage_context=storage_context)
query_engine = index.as_query_engine()
# Query and display the response
response = query_engine.query(query_text)
st.write(response)
# Note: Ensure that you handle the client ID securely as well, possibly using Streamlit secrets.
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.llms.Ollama"
] | [((409, 447), 'streamlit.title', 'st.title', (['"""Confluence Query Interface"""'], {}), "('Confluence Query Interface')\n", (417, 447), True, 'import streamlit as st\n'), ((554, 625), 'streamlit.text_input', 'st.text_input', (['"""Confluence Base URL"""', '"""https://espace.agir.orange.com/"""'], {}), "('Confluence Base URL', 'https://espace.agir.orange.com/')\n", (567, 625), True, 'import streamlit as st\n'), ((638, 673), 'streamlit.text_input', 'st.text_input', (['"""Space Key"""', '"""OBSMA"""'], {}), "('Space Key', 'OBSMA')\n", (651, 673), True, 'import streamlit as st\n'), ((687, 762), 'streamlit.selectbox', 'st.selectbox', (['"""Select Model"""', "['mistral:7b-instruct-q5_K_M', 'Other Model']"], {}), "('Select Model', ['mistral:7b-instruct-q5_K_M', 'Other Model'])\n", (699, 762), True, 'import streamlit as st\n'), ((776, 826), 'streamlit.text_area', 'st.text_area', (['"""Enter your query"""', '"""What is OBSMA?"""'], {}), "('Enter your query', 'What is OBSMA?')\n", (788, 826), True, 'import streamlit as st\n'), ((1250, 1312), 'llama_hub.confluence.ConfluenceReader', 'ConfluenceReader', ([], {'base_url': 'base_url', 'oauth2': 'oauth2_credentials'}), '(base_url=base_url, oauth2=oauth2_credentials)\n', (1266, 1312), False, 'from llama_hub.confluence import ConfluenceReader\n'), ((1335, 1357), 'streamlit.button', 'st.button', (['"""Run Query"""'], {}), "('Run Query')\n", (1344, 1357), True, 'import streamlit as st\n'), ((1568, 1616), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (1594, 1616), False, 'import qdrant_client\n'), ((1636, 1695), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""conf_MA"""'}), "(client=client, collection_name='conf_MA')\n", (1653, 1695), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((1718, 1773), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1746, 1773), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1812, 1836), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': 'model_name'}), '(model=model_name)\n', (1818, 1836), False, 'from llama_index.llms import Ollama\n'), ((1859, 1917), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (1887, 1917), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((1971, 2083), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(documents, service_context=service_context,\n storage_context=storage_context)\n', (2002, 2083), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((2211, 2229), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (2219, 2229), True, 'import streamlit as st\n')] |
from llama_index import StorageContext, load_index_from_storage, ServiceContext
import gradio as gr
import sys
import os
import logging
from utils import get_automerging_query_engine
from utils import get_sentence_window_query_engine
import configparser
from TTS.api import TTS
from gtts import gTTS
import simpleaudio as sa
import threading
from datetime import datetime
import json
import subprocess
from llama_index.prompts.base import PromptTemplate
from inference import main as generateVideo
import pyttsx3
def run_inference(checkpoint_path, face_video, audio_file, resize_factor, outfile):
# Construct the command with dynamic parameters
command = [
"--checkpoint_path", checkpoint_path,
"--face", face_video,
"--audio", audio_file,
"--resize_factor", str(resize_factor),
"--outfile", outfile
]
print(command)
generateVideo(command)
def play_sound_then_delete(path_to_wav):
def play_and_delete():
try:
wave_obj = sa.WaveObject.from_wave_file(path_to_wav)
play_obj = wave_obj.play()
play_obj.wait_done() # Wait until the sound has finished playing
except Exception as e:
print(f"Error during playback: {e}")
finally:
try:
#os.remove(path_to_wav)
print(f"File {path_to_wav} successfully deleted.")
except Exception as e:
print(f"Error deleting file: {e}")
# Start playback in a new thread
threading.Thread(target=play_and_delete, daemon=True).start()
config = configparser.ConfigParser()
config.read('config.ini')
os.environ["GRADIO_ANALYTICS_ENABLED"]='False'
indextype=config['api']['indextype']
embed_modelname = config['api']['embedmodel']
basic_idx_dir = config['index']['basic_idx_dir']
sent_win_idx_dir = config['index']['sent_win_idx_dir']
auto_mrg_idx_dir = config['index']['auto_mrg_idx_dir']
serverip = config['api']['host']
serverport = config['api']['port']
sslcert = config['api']['sslcert']
sslkey = config['api']['sslkey']
useopenai = config.getboolean('api', 'useopenai')
ttsengine = config['api']['ttsengine']
# Get the logging level
log_level_str = config.get('api', 'loglevel', fallback='WARNING').upper()
# Convert the log level string to a logging level
log_level = getattr(logging, log_level_str, logging.WARNING)
def chatbot(input_text):
global tts
print("User Text:" + input_text)
response =query_engine.query(input_text)
# Save the output
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
output_audfile=f"output_{timestamp}.wav"
output_vidfile=f"output_{timestamp}.mp4"
output_path = "../web/public/audio/output/"+output_audfile
if ttsengine == 'coqui':
tts.tts_to_file(text=response.response, file_path=output_path ) # , speaker_wav=["bruce.wav"], language="en",split_sentences=True)
elif ttsengine == 'gtts':
tts = gTTS(text=response.response, lang='en')
tts.save(output_path)
else:
tts.save_to_file(response.response , output_path)
tts.runAndWait()
checkpoint_path = "./checkpoints/wav2lip_gan.pth"
face_video = "media/Avatar.mp4"
audio_file = "../web/public/audio/output/"+output_audfile
outfile="../web/public/video/output/"+output_vidfile
resize_factor = 2
run_inference(checkpoint_path, face_video, audio_file, resize_factor, outfile)
#play_sound_then_delete(output_path)
#construct response object
# Building the citation list from source_nodes
citation = [
{
"filename": node.metadata["file_name"],
"text": node.get_text()
} for node in response.source_nodes
]
# Creating the JSON object structure
jsonResponse = {
"response": response.response,
"video": output_vidfile,
"audio": output_audfile,
"citation": citation
}
# Convert to JSON string
jsonResponseStr = json.dumps(jsonResponse, indent=4)
return jsonResponseStr
logging.basicConfig(stream=sys.stdout, level=log_level)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
iface = gr.Interface(fn=chatbot,
inputs=gr.components.Textbox(lines=7, label="Enter your text"),
outputs="text",
title="Email data query")
from langchain.llms import LlamaCpp
from langchain.globals import set_llm_cache
from langchain.cache import InMemoryCache
#from langchain.globals import set_debug
#set_debug(True)
if useopenai:
from langchain.chat_models import ChatOpenAI
modelname = config['api']['openai_modelname']
llm =ChatOpenAI(temperature=0.1, model_name=modelname)
else:
modelname = config['api']['local_modelname']
n_gpu_layers = -1 # Change this value based on your model and your GPU VRAM pool.
n_batch = 2048 # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
#cache prompt/response pairs for faster retrieval next time.
set_llm_cache(InMemoryCache())
llm = LlamaCpp(
model_path="./models/"+ modelname,
cache=True,
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
n_ctx=2048,
n_threads=8,
temperature=0.01,
max_tokens=512,
f16_kv=True,
repeat_penalty=1.1,
min_p=0.05,
top_p=0.95,
top_k=40,
stop=["<|end_of_turn|>"]
)
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=embed_modelname
)
index_directory=''
if indextype == 'basic':
index_directory = basic_idx_dir
elif indextype == 'sentence' :
index_directory = sent_win_idx_dir
elif indextype == 'automerge':
index_directory = auto_mrg_idx_dir
print(config['api']['indextype'] )
print(index_directory)
if ttsengine == 'coqui':
tts = TTS(model_name="tts_models/en/ljspeech/vits--neon", progress_bar=False).to("cuda")
#tts = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", progress_bar=False).to("cuda")
elif ttsengine == 'gtts':
tts = gTTS(text='', lang='en')
else:
tts = pyttsx3.init()
voices = tts.getProperty('voices')
tts.setProperty('voice', voices[1].id) # this is female voice
rate = tts.getProperty('rate')
tts.setProperty('rate', rate-50)
# load index
storage_context = StorageContext.from_defaults(persist_dir=index_directory)
index = load_index_from_storage(storage_context=storage_context, service_context=service_context)
if indextype == 'basic':
query_engine = index.as_query_engine()
elif indextype == 'sentence' :
query_engine =get_sentence_window_query_engine(index)
elif indextype == 'automerge':
query_engine = get_automerging_query_engine(automerging_index=index, service_context=service_context)
#prompts_dict = query_engine.get_prompts()
#print(list(prompts_dict.keys()))
# Optional: Adjust prompts to suit the llms.
qa_prompt_tmpl_str = (
"GPT4 User: You are an assistant named Maggie. You assist with any questions regarding the organization kwaai.\n"
"Context information is below\n"
"----------------------\n"
"{context_str}\n"
"----------------------\n"
"Given the context information and not prior knowledge respond to user: {query_str}\n"
"<|end_of_turn|>GPT4 Assistant:"
)
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
query_engine.update_prompts(
{"response_synthesizer:text_qa_template": qa_prompt_tmpl}
)
iface.launch( share=False, server_name=serverip, server_port=int(serverport), ssl_verify=False, ssl_keyfile=sslkey, ssl_certfile=sslcert) | [
"llama_index.ServiceContext.from_defaults",
"llama_index.prompts.base.PromptTemplate",
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults"
] | [((1610, 1637), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1635, 1637), False, 'import configparser\n'), ((4094, 4149), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'log_level'}), '(stream=sys.stdout, level=log_level)\n', (4113, 4149), False, 'import logging\n'), ((5478, 5544), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_modelname'}), '(llm=llm, embed_model=embed_modelname)\n', (5506, 5544), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((6367, 6424), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_directory'}), '(persist_dir=index_directory)\n', (6395, 6424), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((6433, 6527), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (6456, 6527), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((7358, 7392), 'llama_index.prompts.base.PromptTemplate', 'PromptTemplate', (['qa_prompt_tmpl_str'], {}), '(qa_prompt_tmpl_str)\n', (7372, 7392), False, 'from llama_index.prompts.base import PromptTemplate\n'), ((895, 917), 'inference.main', 'generateVideo', (['command'], {}), '(command)\n', (908, 917), True, 'from inference import main as generateVideo\n'), ((4018, 4052), 'json.dumps', 'json.dumps', (['jsonResponse'], {'indent': '(4)'}), '(jsonResponse, indent=4)\n', (4028, 4052), False, 'import json\n'), ((4181, 4221), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (4202, 4221), False, 'import logging\n'), ((4733, 4782), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': 'modelname'}), '(temperature=0.1, model_name=modelname)\n', (4743, 4782), False, 'from langchain.chat_models import ChatOpenAI\n'), ((5139, 5405), 'langchain.llms.LlamaCpp', 'LlamaCpp', ([], {'model_path': "('./models/' + modelname)", 'cache': '(True)', 'n_gpu_layers': 'n_gpu_layers', 'n_batch': 'n_batch', 'n_ctx': '(2048)', 'n_threads': '(8)', 'temperature': '(0.01)', 'max_tokens': '(512)', 'f16_kv': '(True)', 'repeat_penalty': '(1.1)', 'min_p': '(0.05)', 'top_p': '(0.95)', 'top_k': '(40)', 'stop': "['<|end_of_turn|>']"}), "(model_path='./models/' + modelname, cache=True, n_gpu_layers=\n n_gpu_layers, n_batch=n_batch, n_ctx=2048, n_threads=8, temperature=\n 0.01, max_tokens=512, f16_kv=True, repeat_penalty=1.1, min_p=0.05,\n top_p=0.95, top_k=40, stop=['<|end_of_turn|>'])\n", (5147, 5405), False, 'from langchain.llms import LlamaCpp\n'), ((4150, 4169), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4167, 4169), False, 'import logging\n'), ((4284, 4339), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(7)', 'label': '"""Enter your text"""'}), "(lines=7, label='Enter your text')\n", (4305, 4339), True, 'import gradio as gr\n'), ((5107, 5122), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (5120, 5122), False, 'from langchain.cache import InMemoryCache\n'), ((6091, 6115), 'gtts.gTTS', 'gTTS', ([], {'text': '""""""', 'lang': '"""en"""'}), "(text='', lang='en')\n", (6095, 6115), False, 'from gtts import gTTS\n'), ((6141, 6155), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (6153, 6155), False, 'import pyttsx3\n'), ((6643, 6682), 'utils.get_sentence_window_query_engine', 'get_sentence_window_query_engine', (['index'], {}), '(index)\n', (6675, 6682), False, 'from utils import get_sentence_window_query_engine\n'), ((1029, 1070), 'simpleaudio.WaveObject.from_wave_file', 'sa.WaveObject.from_wave_file', (['path_to_wav'], {}), '(path_to_wav)\n', (1057, 1070), True, 'import simpleaudio as sa\n'), ((1537, 1590), 'threading.Thread', 'threading.Thread', ([], {'target': 'play_and_delete', 'daemon': '(True)'}), '(target=play_and_delete, daemon=True)\n', (1553, 1590), False, 'import threading\n'), ((2572, 2586), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2584, 2586), False, 'from datetime import datetime\n'), ((2988, 3027), 'gtts.gTTS', 'gTTS', ([], {'text': 'response.response', 'lang': '"""en"""'}), "(text=response.response, lang='en')\n", (2992, 3027), False, 'from gtts import gTTS\n'), ((5866, 5937), 'TTS.api.TTS', 'TTS', ([], {'model_name': '"""tts_models/en/ljspeech/vits--neon"""', 'progress_bar': '(False)'}), "(model_name='tts_models/en/ljspeech/vits--neon', progress_bar=False)\n", (5869, 5937), False, 'from TTS.api import TTS\n'), ((6733, 6824), 'utils.get_automerging_query_engine', 'get_automerging_query_engine', ([], {'automerging_index': 'index', 'service_context': 'service_context'}), '(automerging_index=index, service_context=\n service_context)\n', (6761, 6824), False, 'from utils import get_automerging_query_engine\n')] |
from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import os
def construct_index(directory_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 2000
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="text-davinci-003", max_tokens=num_outputs))
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
documents = SimpleDirectoryReader(directory_path).load_data()
index = GPTSimpleVectorIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
index.save_to_disk('index.json')
return index
def ask_ai(query):
index = GPTSimpleVectorIndex.load_from_disk('./Talking_Buddy/index.json')
response = index.query(query, response_mode="compact")
return response.response
os.environ["OPENAI_API_KEY"] = "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"
construct_index("./Talking_Buddy/data")
| [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.PromptHelper"
] | [((577, 676), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (589, 676), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((758, 852), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (778, 852), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((950, 1015), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""./Talking_Buddy/index.json"""'], {}), "('./Talking_Buddy/index.json')\n", (985, 1015), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((477, 555), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""text-davinci-003"""', 'max_tokens': 'num_outputs'}), "(temperature=0.5, model_name='text-davinci-003', max_tokens=num_outputs)\n", (483, 555), False, 'from langchain import OpenAI\n'), ((691, 728), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {}), '(directory_path)\n', (712, 728), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n')] |
import os
from llama_index import (
Document,
GPTVectorStoreIndex,
StorageContext,
load_index_from_storage,
)
class OpenAI:
"""
OpenAI class to handle all ChatGPT functions
"""
def __init__(self):
self.indices = {}
def add_document(self, room, context):
"""
adding context to chatGPT index based on room
"""
index_path = f"data/index/{room}"
index = self.get_index(room)
doc = Document(context)
index.insert(doc)
self.indices["room"] = index
index.storage_context.persist(persist_dir=index_path)
def build_index(self, room):
"""
build index for chatGPT based on the room
"""
index_path = f"data/index/{room}"
initial_context = "Initial Context"
doc = Document(initial_context)
index = GPTVectorStoreIndex([])
index.insert(doc)
index.storage_context.persist(persist_dir=index_path)
return index
def load_index(self, room):
"""
loading the chatGPT index based on the room
"""
print("read")
index_path = f"data/index/{room}"
if os.path.exists(index_path + "/docstore.json"):
storage_context = StorageContext.from_defaults(persist_dir=index_path)
index = load_index_from_storage(storage_context)
return index
index = self.build_index(room)
return index
def get_index(self, room):
"""
fetch the index based on the room
"""
if room not in self.indices:
self.indices["room"] = self.load_index(room)
return self.indices["room"]
openai = OpenAI()
| [
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults",
"llama_index.GPTVectorStoreIndex",
"llama_index.Document"
] | [((475, 492), 'llama_index.Document', 'Document', (['context'], {}), '(context)\n', (483, 492), False, 'from llama_index import Document, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((827, 852), 'llama_index.Document', 'Document', (['initial_context'], {}), '(initial_context)\n', (835, 852), False, 'from llama_index import Document, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((870, 893), 'llama_index.GPTVectorStoreIndex', 'GPTVectorStoreIndex', (['[]'], {}), '([])\n', (889, 893), False, 'from llama_index import Document, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((1188, 1233), 'os.path.exists', 'os.path.exists', (["(index_path + '/docstore.json')"], {}), "(index_path + '/docstore.json')\n", (1202, 1233), False, 'import os\n'), ((1265, 1317), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_path'}), '(persist_dir=index_path)\n', (1293, 1317), False, 'from llama_index import Document, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((1338, 1378), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1361, 1378), False, 'from llama_index import Document, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n')] |
import os
import streamlit as st
from dotenv import load_dotenv
from llama_index import GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext
from langchain.llms.openai import OpenAI
from biorxiv_manager import BioRxivManager
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
st.title("Ask BioRxiv")
query = st.text_input("What would you like to ask? (source: BioRxiv files)", "")
@st.cache_data
def fetch_and_parse():
# instantiating BioRxivManager runtime and fetch the parsed nodes
manager = BioRxivManager()
return manager.fetch_and_parse(interval="2023-07-01/2023-07-30")
embedded_documents = fetch_and_parse()
if st.button("Submit"):
if not query.strip():
st.error(f"Please provide the search query.")
else:
try:
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-4-32k", openai_api_key=openai_api_key))
max_input_size = 32767
num_output = 400
chunk_overlap_ratio = 0.2 # Adjust this value according to your need.
prompt_helper = PromptHelper(max_input_size, num_output, chunk_overlap_ratio)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTVectorStoreIndex.from_documents(embedded_documents, service_context=service_context)
response = index.query(query)
st.success(response)
except Exception as e:
st.error(f"An error occurred: {e}")
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.PromptHelper"
] | [((238, 251), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (249, 251), False, 'from dotenv import load_dotenv\n'), ((269, 296), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (278, 296), False, 'import os\n'), ((298, 321), 'streamlit.title', 'st.title', (['"""Ask BioRxiv"""'], {}), "('Ask BioRxiv')\n", (306, 321), True, 'import streamlit as st\n'), ((330, 402), 'streamlit.text_input', 'st.text_input', (['"""What would you like to ask? (source: BioRxiv files)"""', '""""""'], {}), "('What would you like to ask? (source: BioRxiv files)', '')\n", (343, 402), True, 'import streamlit as st\n'), ((656, 675), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (665, 675), True, 'import streamlit as st\n'), ((526, 542), 'biorxiv_manager.BioRxivManager', 'BioRxivManager', ([], {}), '()\n', (540, 542), False, 'from biorxiv_manager import BioRxivManager\n'), ((711, 756), 'streamlit.error', 'st.error', (['f"""Please provide the search query."""'], {}), "(f'Please provide the search query.')\n", (719, 756), True, 'import streamlit as st\n'), ((1079, 1140), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'chunk_overlap_ratio'], {}), '(max_input_size, num_output, chunk_overlap_ratio)\n', (1091, 1140), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext\n'), ((1172, 1263), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (1200, 1263), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext\n'), ((1279, 1371), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['embedded_documents'], {'service_context': 'service_context'}), '(embedded_documents, service_context=\n service_context)\n', (1313, 1371), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext\n'), ((1434, 1454), 'streamlit.success', 'st.success', (['response'], {}), '(response)\n', (1444, 1454), True, 'import streamlit as st\n'), ((1498, 1533), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}"""'], {}), "(f'An error occurred: {e}')\n", (1506, 1533), True, 'import streamlit as st\n'), ((825, 901), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-4-32k"""', 'openai_api_key': 'openai_api_key'}), "(temperature=0, model_name='gpt-4-32k', openai_api_key=openai_api_key)\n", (831, 901), False, 'from langchain.llms.openai import OpenAI\n')] |
import logging
from llama_index.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
from app.llama_index.index import setup_index
from app.llama_index.query_engine import setup_query_engine
from app.database.crud import get_vectorized_election_programs_from_db
from app.database.database import Session
def setup_agent_tools():
session = Session()
vectorized_election_programs = get_vectorized_election_programs_from_db(session)
logging.info(f"Loaded {len(vectorized_election_programs)} vectorized programs.")
vector_tools = []
for program in vectorized_election_programs:
meta_data_filters = MetadataFilters(
filters=[
ExactMatchFilter(key="group_id", value=program.id),
ExactMatchFilter(key="election_id", value=program.election_id),
ExactMatchFilter(key="party_id", value=program.party_id),
]
)
# define query engines
vector_index = setup_index()
vector_query_engine = setup_query_engine(
vector_index, filters=meta_data_filters
)
# define tools
query_engine_tool = LlamaIndexTool(
name="vector_tool",
description=(
f"Nützlich für Fragen zu spezifischen Aspekten des Wahlprogramms der {program.full_name} für die {program.label}."
),
query_engine=vector_query_engine,
)
logging.info(f"Loaded query engine tool for {program.full_name}.")
vector_tools.append(query_engine_tool)
return vector_tools
| [
"llama_index.langchain_helpers.agents.tools.LlamaIndexTool",
"llama_index.vector_stores.types.ExactMatchFilter"
] | [((424, 433), 'app.database.database.Session', 'Session', ([], {}), '()\n', (431, 433), False, 'from app.database.database import Session\n'), ((469, 518), 'app.database.crud.get_vectorized_election_programs_from_db', 'get_vectorized_election_programs_from_db', (['session'], {}), '(session)\n', (509, 518), False, 'from app.database.crud import get_vectorized_election_programs_from_db\n'), ((1044, 1057), 'app.llama_index.index.setup_index', 'setup_index', ([], {}), '()\n', (1055, 1057), False, 'from app.llama_index.index import setup_index\n'), ((1088, 1147), 'app.llama_index.query_engine.setup_query_engine', 'setup_query_engine', (['vector_index'], {'filters': 'meta_data_filters'}), '(vector_index, filters=meta_data_filters)\n', (1106, 1147), False, 'from app.llama_index.query_engine import setup_query_engine\n'), ((1221, 1427), 'llama_index.langchain_helpers.agents.tools.LlamaIndexTool', 'LlamaIndexTool', ([], {'name': '"""vector_tool"""', 'description': 'f"""Nützlich für Fragen zu spezifischen Aspekten des Wahlprogramms der {program.full_name} für die {program.label}."""', 'query_engine': 'vector_query_engine'}), "(name='vector_tool', description=\n f'Nützlich für Fragen zu spezifischen Aspekten des Wahlprogramms der {program.full_name} für die {program.label}.'\n , query_engine=vector_query_engine)\n", (1235, 1427), False, 'from llama_index.langchain_helpers.agents.tools import LlamaIndexTool\n'), ((1505, 1571), 'logging.info', 'logging.info', (['f"""Loaded query engine tool for {program.full_name}."""'], {}), "(f'Loaded query engine tool for {program.full_name}.')\n", (1517, 1571), False, 'import logging\n'), ((759, 809), 'llama_index.vector_stores.types.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': '"""group_id"""', 'value': 'program.id'}), "(key='group_id', value=program.id)\n", (775, 809), False, 'from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n'), ((827, 889), 'llama_index.vector_stores.types.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': '"""election_id"""', 'value': 'program.election_id'}), "(key='election_id', value=program.election_id)\n", (843, 889), False, 'from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n'), ((907, 963), 'llama_index.vector_stores.types.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': '"""party_id"""', 'value': 'program.party_id'}), "(key='party_id', value=program.party_id)\n", (923, 963), False, 'from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n')] |
import os
from configparser import ConfigParser, SectionProxy
from typing import Any, Type
from llama_index import (
LLMPredictor,
ServiceContext,
VectorStoreIndex,
)
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices import SimpleKeywordTableIndex
from llama_index.indices.base import BaseIndex
from llama_index.indices.loading import load_index_from_storage
from llama_index.llm_predictor import StructuredLLMPredictor
from llama_index.llms.llm import LLM
from llama_index.llms.openai import OpenAI
from llama_index.storage.storage_context import StorageContext
CONFIG_FILE_NAME = "config.ini"
DEFAULT_PERSIST_DIR = "./storage"
DEFAULT_CONFIG = {
"store": {"persist_dir": DEFAULT_PERSIST_DIR},
"index": {"type": "default"},
"embed_model": {"type": "default"},
"llm_predictor": {"type": "default"},
}
def load_config(root: str = ".") -> ConfigParser:
"""Load configuration from file."""
config = ConfigParser()
config.read_dict(DEFAULT_CONFIG)
config.read(os.path.join(root, CONFIG_FILE_NAME))
return config
def save_config(config: ConfigParser, root: str = ".") -> None:
"""Load configuration to file."""
with open(os.path.join(root, CONFIG_FILE_NAME), "w") as fd:
config.write(fd)
def load_index(root: str = ".") -> BaseIndex[Any]:
"""Load existing index file."""
config = load_config(root)
service_context = _load_service_context(config)
# Index type
index_type: Type
if config["index"]["type"] == "default" or config["index"]["type"] == "vector":
index_type = VectorStoreIndex
elif config["index"]["type"] == "keyword":
index_type = SimpleKeywordTableIndex
else:
raise KeyError(f"Unknown index.type {config['index']['type']}")
try:
# try loading index
storage_context = _load_storage_context(config)
index = load_index_from_storage(storage_context)
except ValueError:
# build index
storage_context = StorageContext.from_defaults()
index = index_type(
nodes=[], service_context=service_context, storage_context=storage_context
)
return index
def save_index(index: BaseIndex[Any], root: str = ".") -> None:
"""Save index to file."""
config = load_config(root)
persist_dir = config["store"]["persist_dir"]
index.storage_context.persist(persist_dir=persist_dir)
def _load_service_context(config: ConfigParser) -> ServiceContext:
"""Internal function to load service context based on configuration."""
embed_model = _load_embed_model(config)
llm_predictor = _load_llm_predictor(config)
return ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embed_model
)
def _load_storage_context(config: ConfigParser) -> StorageContext:
persist_dir = config["store"]["persist_dir"]
return StorageContext.from_defaults(persist_dir=persist_dir)
def _load_llm_predictor(config: ConfigParser) -> LLMPredictor:
"""Internal function to load LLM predictor based on configuration."""
model_type = config["llm_predictor"]["type"].lower()
if model_type == "default":
llm = _load_llm(config["llm_predictor"])
return LLMPredictor(llm=llm)
elif model_type == "structured":
llm = _load_llm(config["llm_predictor"])
return StructuredLLMPredictor(llm=llm)
else:
raise KeyError("llm_predictor.type")
def _load_llm(section: SectionProxy) -> LLM:
if "engine" in section:
return OpenAI(engine=section["engine"])
else:
return OpenAI()
def _load_embed_model(config: ConfigParser) -> BaseEmbedding:
"""Internal function to load embedding model based on configuration."""
model_type = config["embed_model"]["type"]
if model_type == "default":
return OpenAIEmbedding()
else:
raise KeyError("embed_model.type")
| [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.llm_predictor.StructuredLLMPredictor",
"llama_index.llms.openai.OpenAI",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1023, 1037), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1035, 1037), False, 'from configparser import ConfigParser, SectionProxy\n'), ((2725, 2812), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model)\n', (2753, 2812), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((2951, 3004), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (2979, 3004), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1091, 1127), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1103, 1127), False, 'import os\n'), ((1957, 1997), 'llama_index.indices.loading.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1980, 1997), False, 'from llama_index.indices.loading import load_index_from_storage\n'), ((3297, 3318), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3309, 3318), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((3597, 3629), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'engine': "section['engine']"}), "(engine=section['engine'])\n", (3603, 3629), False, 'from llama_index.llms.openai import OpenAI\n'), ((3655, 3663), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (3661, 3663), False, 'from llama_index.llms.openai import OpenAI\n'), ((3898, 3915), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (3913, 3915), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((1265, 1301), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1277, 1301), False, 'import os\n'), ((2069, 2099), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2097, 2099), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((3420, 3451), 'llama_index.llm_predictor.StructuredLLMPredictor', 'StructuredLLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3442, 3451), False, 'from llama_index.llm_predictor import StructuredLLMPredictor\n')] |
import os
from configparser import ConfigParser, SectionProxy
from typing import Any, Type
from llama_index import (
LLMPredictor,
ServiceContext,
VectorStoreIndex,
)
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices import SimpleKeywordTableIndex
from llama_index.indices.base import BaseIndex
from llama_index.indices.loading import load_index_from_storage
from llama_index.llm_predictor import StructuredLLMPredictor
from llama_index.llms.llm import LLM
from llama_index.llms.openai import OpenAI
from llama_index.storage.storage_context import StorageContext
CONFIG_FILE_NAME = "config.ini"
DEFAULT_PERSIST_DIR = "./storage"
DEFAULT_CONFIG = {
"store": {"persist_dir": DEFAULT_PERSIST_DIR},
"index": {"type": "default"},
"embed_model": {"type": "default"},
"llm_predictor": {"type": "default"},
}
def load_config(root: str = ".") -> ConfigParser:
"""Load configuration from file."""
config = ConfigParser()
config.read_dict(DEFAULT_CONFIG)
config.read(os.path.join(root, CONFIG_FILE_NAME))
return config
def save_config(config: ConfigParser, root: str = ".") -> None:
"""Load configuration to file."""
with open(os.path.join(root, CONFIG_FILE_NAME), "w") as fd:
config.write(fd)
def load_index(root: str = ".") -> BaseIndex[Any]:
"""Load existing index file."""
config = load_config(root)
service_context = _load_service_context(config)
# Index type
index_type: Type
if config["index"]["type"] == "default" or config["index"]["type"] == "vector":
index_type = VectorStoreIndex
elif config["index"]["type"] == "keyword":
index_type = SimpleKeywordTableIndex
else:
raise KeyError(f"Unknown index.type {config['index']['type']}")
try:
# try loading index
storage_context = _load_storage_context(config)
index = load_index_from_storage(storage_context)
except ValueError:
# build index
storage_context = StorageContext.from_defaults()
index = index_type(
nodes=[], service_context=service_context, storage_context=storage_context
)
return index
def save_index(index: BaseIndex[Any], root: str = ".") -> None:
"""Save index to file."""
config = load_config(root)
persist_dir = config["store"]["persist_dir"]
index.storage_context.persist(persist_dir=persist_dir)
def _load_service_context(config: ConfigParser) -> ServiceContext:
"""Internal function to load service context based on configuration."""
embed_model = _load_embed_model(config)
llm_predictor = _load_llm_predictor(config)
return ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embed_model
)
def _load_storage_context(config: ConfigParser) -> StorageContext:
persist_dir = config["store"]["persist_dir"]
return StorageContext.from_defaults(persist_dir=persist_dir)
def _load_llm_predictor(config: ConfigParser) -> LLMPredictor:
"""Internal function to load LLM predictor based on configuration."""
model_type = config["llm_predictor"]["type"].lower()
if model_type == "default":
llm = _load_llm(config["llm_predictor"])
return LLMPredictor(llm=llm)
elif model_type == "structured":
llm = _load_llm(config["llm_predictor"])
return StructuredLLMPredictor(llm=llm)
else:
raise KeyError("llm_predictor.type")
def _load_llm(section: SectionProxy) -> LLM:
if "engine" in section:
return OpenAI(engine=section["engine"])
else:
return OpenAI()
def _load_embed_model(config: ConfigParser) -> BaseEmbedding:
"""Internal function to load embedding model based on configuration."""
model_type = config["embed_model"]["type"]
if model_type == "default":
return OpenAIEmbedding()
else:
raise KeyError("embed_model.type")
| [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.llm_predictor.StructuredLLMPredictor",
"llama_index.llms.openai.OpenAI",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1023, 1037), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1035, 1037), False, 'from configparser import ConfigParser, SectionProxy\n'), ((2725, 2812), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model)\n', (2753, 2812), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((2951, 3004), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (2979, 3004), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1091, 1127), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1103, 1127), False, 'import os\n'), ((1957, 1997), 'llama_index.indices.loading.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1980, 1997), False, 'from llama_index.indices.loading import load_index_from_storage\n'), ((3297, 3318), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3309, 3318), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((3597, 3629), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'engine': "section['engine']"}), "(engine=section['engine'])\n", (3603, 3629), False, 'from llama_index.llms.openai import OpenAI\n'), ((3655, 3663), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (3661, 3663), False, 'from llama_index.llms.openai import OpenAI\n'), ((3898, 3915), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (3913, 3915), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((1265, 1301), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1277, 1301), False, 'import os\n'), ((2069, 2099), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2097, 2099), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((3420, 3451), 'llama_index.llm_predictor.StructuredLLMPredictor', 'StructuredLLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3442, 3451), False, 'from llama_index.llm_predictor import StructuredLLMPredictor\n')] |
import os
from configparser import ConfigParser, SectionProxy
from typing import Any, Type
from llama_index import (
LLMPredictor,
ServiceContext,
VectorStoreIndex,
)
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices import SimpleKeywordTableIndex
from llama_index.indices.base import BaseIndex
from llama_index.indices.loading import load_index_from_storage
from llama_index.llm_predictor import StructuredLLMPredictor
from llama_index.llms.llm import LLM
from llama_index.llms.openai import OpenAI
from llama_index.storage.storage_context import StorageContext
CONFIG_FILE_NAME = "config.ini"
DEFAULT_PERSIST_DIR = "./storage"
DEFAULT_CONFIG = {
"store": {"persist_dir": DEFAULT_PERSIST_DIR},
"index": {"type": "default"},
"embed_model": {"type": "default"},
"llm_predictor": {"type": "default"},
}
def load_config(root: str = ".") -> ConfigParser:
"""Load configuration from file."""
config = ConfigParser()
config.read_dict(DEFAULT_CONFIG)
config.read(os.path.join(root, CONFIG_FILE_NAME))
return config
def save_config(config: ConfigParser, root: str = ".") -> None:
"""Load configuration to file."""
with open(os.path.join(root, CONFIG_FILE_NAME), "w") as fd:
config.write(fd)
def load_index(root: str = ".") -> BaseIndex[Any]:
"""Load existing index file."""
config = load_config(root)
service_context = _load_service_context(config)
# Index type
index_type: Type
if config["index"]["type"] == "default" or config["index"]["type"] == "vector":
index_type = VectorStoreIndex
elif config["index"]["type"] == "keyword":
index_type = SimpleKeywordTableIndex
else:
raise KeyError(f"Unknown index.type {config['index']['type']}")
try:
# try loading index
storage_context = _load_storage_context(config)
index = load_index_from_storage(storage_context)
except ValueError:
# build index
storage_context = StorageContext.from_defaults()
index = index_type(
nodes=[], service_context=service_context, storage_context=storage_context
)
return index
def save_index(index: BaseIndex[Any], root: str = ".") -> None:
"""Save index to file."""
config = load_config(root)
persist_dir = config["store"]["persist_dir"]
index.storage_context.persist(persist_dir=persist_dir)
def _load_service_context(config: ConfigParser) -> ServiceContext:
"""Internal function to load service context based on configuration."""
embed_model = _load_embed_model(config)
llm_predictor = _load_llm_predictor(config)
return ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embed_model
)
def _load_storage_context(config: ConfigParser) -> StorageContext:
persist_dir = config["store"]["persist_dir"]
return StorageContext.from_defaults(persist_dir=persist_dir)
def _load_llm_predictor(config: ConfigParser) -> LLMPredictor:
"""Internal function to load LLM predictor based on configuration."""
model_type = config["llm_predictor"]["type"].lower()
if model_type == "default":
llm = _load_llm(config["llm_predictor"])
return LLMPredictor(llm=llm)
elif model_type == "structured":
llm = _load_llm(config["llm_predictor"])
return StructuredLLMPredictor(llm=llm)
else:
raise KeyError("llm_predictor.type")
def _load_llm(section: SectionProxy) -> LLM:
if "engine" in section:
return OpenAI(engine=section["engine"])
else:
return OpenAI()
def _load_embed_model(config: ConfigParser) -> BaseEmbedding:
"""Internal function to load embedding model based on configuration."""
model_type = config["embed_model"]["type"]
if model_type == "default":
return OpenAIEmbedding()
else:
raise KeyError("embed_model.type")
| [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.llm_predictor.StructuredLLMPredictor",
"llama_index.llms.openai.OpenAI",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1023, 1037), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1035, 1037), False, 'from configparser import ConfigParser, SectionProxy\n'), ((2725, 2812), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model)\n', (2753, 2812), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((2951, 3004), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (2979, 3004), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1091, 1127), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1103, 1127), False, 'import os\n'), ((1957, 1997), 'llama_index.indices.loading.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1980, 1997), False, 'from llama_index.indices.loading import load_index_from_storage\n'), ((3297, 3318), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3309, 3318), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((3597, 3629), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'engine': "section['engine']"}), "(engine=section['engine'])\n", (3603, 3629), False, 'from llama_index.llms.openai import OpenAI\n'), ((3655, 3663), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (3661, 3663), False, 'from llama_index.llms.openai import OpenAI\n'), ((3898, 3915), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (3913, 3915), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((1265, 1301), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1277, 1301), False, 'import os\n'), ((2069, 2099), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2097, 2099), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((3420, 3451), 'llama_index.llm_predictor.StructuredLLMPredictor', 'StructuredLLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3442, 3451), False, 'from llama_index.llm_predictor import StructuredLLMPredictor\n')] |
from components.store import get_storage_context
from llama_index import VectorStoreIndex
from llama_index.retrievers import (
VectorIndexRetriever,
)
from models.gpts import get_gpts_by_uuids
def search_gpts(question):
storage_context = get_storage_context()
index = VectorStoreIndex.from_documents([], storage_context=storage_context)
retriever = VectorIndexRetriever(index=index, similarity_top_k=10)
nodes = retriever.retrieve(question)
uuids = []
uuids_with_scores = {}
gpts = []
for node in nodes:
print("node metadata", node.metadata)
if node.score > 0.80:
uuid = node.metadata['uuid']
uuids.append(uuid)
uuids_with_scores[uuid] = node.score
if len(uuids) == 0:
return gpts
rows = get_gpts_by_uuids(uuids)
for row in rows:
gpts.append({
"uuid": row.uuid,
"name": row.name,
"description": row.description,
"avatar_url": row.avatar_url,
"author_name": row.author_name,
"created_at": row.created_at,
"updated_at": row.updated_at,
"visit_url": "https://chat.openai.com/g/" + row.short_url,
"score": uuids_with_scores[row.uuid],
})
sorted_gpts = sorted(gpts, key=lambda x: x['score'], reverse=True)
return sorted_gpts
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.VectorIndexRetriever"
] | [((248, 269), 'components.store.get_storage_context', 'get_storage_context', ([], {}), '()\n', (267, 269), False, 'from components.store import get_storage_context\n'), ((282, 350), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[]'], {'storage_context': 'storage_context'}), '([], storage_context=storage_context)\n', (313, 350), False, 'from llama_index import VectorStoreIndex\n'), ((368, 422), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(10)'}), '(index=index, similarity_top_k=10)\n', (388, 422), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((799, 823), 'models.gpts.get_gpts_by_uuids', 'get_gpts_by_uuids', (['uuids'], {}), '(uuids)\n', (816, 823), False, 'from models.gpts import get_gpts_by_uuids\n')] |
"""LanceDB vector store with cloud storage support."""
import os
from typing import Any, Optional
from dotenv import load_dotenv
from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase
from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities
from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult
from pandas import DataFrame
load_dotenv()
class LanceDBVectorStore(LanceDBVectorStoreBase):
"""Advanced LanceDB Vector Store supporting cloud storage and prefiltering."""
from lancedb.query import LanceQueryBuilder
from lancedb.table import Table
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
api_key: Optional[str] = None,
region: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self._setup_connection(uri, api_key, region)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
self.api_key = api_key
self.region = region
def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None):
"""Establishes a robust connection to LanceDB."""
api_key = api_key or os.getenv('LANCEDB_API_KEY')
region = region or os.getenv('LANCEDB_REGION')
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
if api_key and region:
self.connection = lancedb.connect(uri, api_key=api_key, region=region)
else:
self.connection = lancedb.connect(uri)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Enhanced query method to support prefiltering in LanceDB queries."""
table = self.connection.open_table(self.table_name)
lance_query = self._prepare_lance_query(query, table, **kwargs)
results = lance_query.to_df()
return self._construct_query_result(results)
def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder:
"""Prepares the LanceDB query considering prefiltering and additional parameters."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface.")
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
prefilter = kwargs.pop("prefilter", False)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding).limit(query.similarity_top_k).where(
where, prefilter=prefilter).nprobes(self.nprobes))
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
return lance_query
def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult:
"""Constructs a VectorStoreQueryResult from a LanceDB query result."""
nodes = []
for _, row in results.iterrows():
node = TextNode(
text=row.get('text', ''), # ensure text is a string
id_=row['id'],
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']),
})
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"llama_index.vector_stores.lancedb._to_llama_similarities",
"llama_index.schema.RelatedNodeInfo",
"llama_index.vector_stores.lancedb._to_lance_filter"
] | [((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types import MessageRole
from pydantic import BaseModel
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
data: _ChatData,
chat_engine: BaseChatEngine = Depends(get_chat_engine),
):
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
response = await chat_engine.astream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
async for token in response.async_response_gen():
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain")
| [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((758, 848), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (771, 848), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((976, 1076), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (989, 1076), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1199, 1242), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1210, 1242), False, 'from llama_index.llms.base import ChatMessage\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types import MessageRole
from pydantic import BaseModel
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
data: _ChatData,
chat_engine: BaseChatEngine = Depends(get_chat_engine),
):
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
response = await chat_engine.astream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
async for token in response.async_response_gen():
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain")
| [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((758, 848), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (771, 848), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((976, 1076), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (989, 1076), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1199, 1242), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1210, 1242), False, 'from llama_index.llms.base import ChatMessage\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types import MessageRole
from pydantic import BaseModel
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
data: _ChatData,
chat_engine: BaseChatEngine = Depends(get_chat_engine),
):
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
response = await chat_engine.astream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
async for token in response.async_response_gen():
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain")
| [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((758, 848), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (771, 848), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((976, 1076), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (989, 1076), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1199, 1242), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1210, 1242), False, 'from llama_index.llms.base import ChatMessage\n')] |
# Copyright (c) Timescale, Inc. (2023)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import streamlit as st
from streamlit.hello.utils import show_code
from llama_index.vector_stores import TimescaleVectorStore
from llama_index import ServiceContext, StorageContext
from llama_index.indices.vector_store import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index import set_global_service_context
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from timescale_vector import client
from typing import List, Tuple
from llama_index.schema import TextNode
from llama_index.embeddings import OpenAIEmbedding
import psycopg2
def get_repos():
with psycopg2.connect(dsn=st.secrets["TIMESCALE_SERVICE_URL"]) as connection:
# Create a cursor within the context manager
with connection.cursor() as cursor:
try:
select_data_sql = "SELECT * FROM time_machine_catalog;"
cursor.execute(select_data_sql)
except psycopg2.errors.UndefinedTable as e:
return {}
catalog_entries = cursor.fetchall()
catalog_dict = {}
for entry in catalog_entries:
repo_url, table_name = entry
catalog_dict[repo_url] = table_name
return catalog_dict
def get_auto_retriever(index, retriever_args):
from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="Description of the commits to PostgreSQL. Describes changes made to Postgres",
metadata_info=[
MetadataInfo(
name="commit_hash",
type="str",
description="Commit Hash",
),
MetadataInfo(
name="author",
type="str",
description="Author of the commit",
),
MetadataInfo(
name="__start_date",
type="datetime in iso format",
description="All results will be after this datetime",
),
MetadataInfo(
name="__end_date",
type="datetime in iso format",
description="All results will be before this datetime",
)
],
)
from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever
retriever = VectorIndexAutoRetriever(index,
vector_store_info=vector_store_info,
service_context=index.service_context,
**retriever_args)
# build query engine
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever, service_context=index.service_context
)
from llama_index.tools.query_engine import QueryEngineTool
# convert query engine to tool
query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
from llama_index.agent import OpenAIAgent
chat_engine = OpenAIAgent.from_tools(
tools=[query_engine_tool],
llm=index.service_context.llm,
verbose=True
#service_context=index.service_context
)
return chat_engine
def tm_demo():
repos = get_repos()
months = st.sidebar.slider('How many months back to search (0=no limit)?', 0, 130, 0)
if "config_months" not in st.session_state.keys() or months != st.session_state.config_months:
st.session_state.clear()
topk = st.sidebar.slider('How many commits to retrieve', 1, 150, 20)
if "config_topk" not in st.session_state.keys() or topk != st.session_state.config_topk:
st.session_state.clear()
if len(repos) > 0:
repo = st.sidebar.selectbox("Choose a repo", repos.keys())
else:
st.error("No repositiories found, please [load some data first](/LoadData)")
return
if "config_repo" not in st.session_state.keys() or repo != st.session_state.config_repo:
st.session_state.clear()
st.session_state.config_months = months
st.session_state.config_topk = topk
st.session_state.config_repo = repo
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Please choose a repo and time filter on the sidebar and then ask me a question about the git history"}
]
vector_store = TimescaleVectorStore.from_params(
service_url=st.secrets["TIMESCALE_SERVICE_URL"],
table_name=repos[repo],
time_partition_interval=timedelta(days=7),
);
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-4", temperature=0.1))
set_global_service_context(service_context)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
#chat engine goes into the session to retain history
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
retriever_args = {"similarity_top_k" : int(topk)}
if months > 0:
end_dt = datetime.now()
start_dt = end_dt - timedelta(weeks=4*months)
retriever_args["vector_store_kwargs"] = ({"start_date": start_dt, "end_date":end_dt})
st.session_state.chat_engine = get_auto_retriever(index, retriever_args)
#st.session_state.chat_engine = index.as_chat_engine(chat_mode="best", similarity_top_k=20, verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(prompt, function_call="query_engine_tool")
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
st.set_page_config(page_title="Time machine demo", page_icon="🧑💼")
st.markdown("# Time Machine")
st.sidebar.header("Welcome to the Time Machine")
debug_llamaindex = False
if debug_llamaindex:
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
tm_demo()
#show_code(tm_demo)
| [
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.vector_stores.types.MetadataInfo",
"llama_index.set_global_service_context",
"llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_index.indices.vector_store.VectorStoreIndex.from_vector_store",
"llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args"
] | [((7098, 7170), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Time machine demo"""', 'page_icon': '"""🧑\u200d💼"""'}), "(page_title='Time machine demo', page_icon='🧑\\u200d💼')\n", (7116, 7170), True, 'import streamlit as st\n'), ((7166, 7195), 'streamlit.markdown', 'st.markdown', (['"""# Time Machine"""'], {}), "('# Time Machine')\n", (7177, 7195), True, 'import streamlit as st\n'), ((7196, 7244), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Welcome to the Time Machine"""'], {}), "('Welcome to the Time Machine')\n", (7213, 7244), True, 'import streamlit as st\n'), ((2991, 3120), 'llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever', 'VectorIndexAutoRetriever', (['index'], {'vector_store_info': 'vector_store_info', 'service_context': 'index.service_context'}), '(index, vector_store_info=vector_store_info,\n service_context=index.service_context, **retriever_args)\n', (3015, 3120), False, 'from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n'), ((3376, 3471), 'llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', ([], {'retriever': 'retriever', 'service_context': 'index.service_context'}), '(retriever=retriever, service_context=index.\n service_context)\n', (3406, 3471), False, 'from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n'), ((3604, 3660), 'llama_index.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine)\n', (3633, 3660), False, 'from llama_index.tools.query_engine import QueryEngineTool\n'), ((3726, 3825), 'llama_index.agent.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', ([], {'tools': '[query_engine_tool]', 'llm': 'index.service_context.llm', 'verbose': '(True)'}), '(tools=[query_engine_tool], llm=index.service_context\n .llm, verbose=True)\n', (3748, 3825), False, 'from llama_index.agent import OpenAIAgent\n'), ((3975, 4051), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many months back to search (0=no limit)?"""', '(0)', '(130)', '(0)'], {}), "('How many months back to search (0=no limit)?', 0, 130, 0)\n", (3992, 4051), True, 'import streamlit as st\n'), ((4197, 4258), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many commits to retrieve"""', '(1)', '(150)', '(20)'], {}), "('How many commits to retrieve', 1, 150, 20)\n", (4214, 4258), True, 'import streamlit as st\n'), ((5443, 5486), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (5469, 5486), False, 'from llama_index import set_global_service_context\n'), ((5499, 5597), 'llama_index.indices.vector_store.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (5533, 5597), False, 'from llama_index.indices.vector_store import VectorStoreIndex\n'), ((7331, 7389), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (7350, 7389), False, 'import logging\n'), ((1240, 1297), 'psycopg2.connect', 'psycopg2.connect', ([], {'dsn': "st.secrets['TIMESCALE_SERVICE_URL']"}), "(dsn=st.secrets['TIMESCALE_SERVICE_URL'])\n", (1256, 1297), False, 'import psycopg2\n'), ((4160, 4184), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4182, 4184), True, 'import streamlit as st\n'), ((4360, 4384), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4382, 4384), True, 'import streamlit as st\n'), ((4502, 4578), 'streamlit.error', 'st.error', (['"""No repositiories found, please [load some data first](/LoadData)"""'], {}), "('No repositiories found, please [load some data first](/LoadData)')\n", (4510, 4578), True, 'import streamlit as st\n'), ((4700, 4724), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4722, 4724), True, 'import streamlit as st\n'), ((4881, 4904), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4902, 4904), True, 'import streamlit as st\n'), ((5693, 5716), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (5714, 5716), True, 'import streamlit as st\n'), ((6233, 6263), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (6246, 6263), True, 'import streamlit as st\n'), ((6322, 6391), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (6354, 6391), True, 'import streamlit as st\n'), ((7425, 7465), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (7446, 7465), False, 'import logging\n'), ((4083, 4106), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4104, 4106), True, 'import streamlit as st\n'), ((4287, 4310), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4308, 4310), True, 'import streamlit as st\n'), ((4627, 4650), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4648, 4650), True, 'import streamlit as st\n'), ((5317, 5334), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (5326, 5334), False, 'from datetime import datetime, timedelta\n'), ((5399, 5437), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0.1)'}), "(model='gpt-4', temperature=0.1)\n", (5405, 5437), False, 'from llama_index.llms import OpenAI\n'), ((5849, 5863), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5861, 5863), False, 'from datetime import datetime, timedelta\n'), ((6486, 6518), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (6501, 6518), True, 'import streamlit as st\n'), ((6532, 6560), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (6540, 6560), True, 'import streamlit as st\n'), ((6705, 6733), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6720, 6733), True, 'import streamlit as st\n'), ((7394, 7413), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7411, 7413), False, 'import logging\n'), ((2185, 2256), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""commit_hash"""', 'type': '"""str"""', 'description': '"""Commit Hash"""'}), "(name='commit_hash', type='str', description='Commit Hash')\n", (2197, 2256), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2333, 2408), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""author"""', 'type': '"""str"""', 'description': '"""Author of the commit"""'}), "(name='author', type='str', description='Author of the commit')\n", (2345, 2408), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2485, 2608), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__start_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be after this datetime"""'}), "(name='__start_date', type='datetime in iso format',\n description='All results will be after this datetime')\n", (2497, 2608), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2686, 2809), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__end_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be before this datetime"""'}), "(name='__end_date', type='datetime in iso format', description=\n 'All results will be before this datetime')\n", (2698, 2809), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((5896, 5923), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(4 * months)'}), '(weeks=4 * months)\n', (5905, 5923), False, 'from datetime import datetime, timedelta\n'), ((6752, 6777), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (6762, 6777), True, 'import streamlit as st\n'), ((6806, 6882), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['prompt'], {'function_call': '"""query_engine_tool"""'}), "(prompt, function_call='query_engine_tool')\n", (6839, 6882), True, 'import streamlit as st\n'), ((6899, 6926), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (6907, 6926), True, 'import streamlit as st\n'), ((7021, 7062), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (7053, 7062), True, 'import streamlit as st\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.