code
stringlengths 161
233k
| apis
sequencelengths 1
24
| extract_api
stringlengths 162
68.5k
|
---|---|---|
# Copyright (c) Timescale, Inc. (2023)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import streamlit as st
from streamlit.hello.utils import show_code
from llama_index.vector_stores import TimescaleVectorStore
from llama_index import ServiceContext, StorageContext
from llama_index.indices.vector_store import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index import set_global_service_context
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from timescale_vector import client
from typing import List, Tuple
from llama_index.schema import TextNode
from llama_index.embeddings import OpenAIEmbedding
import psycopg2
def get_repos():
with psycopg2.connect(dsn=st.secrets["TIMESCALE_SERVICE_URL"]) as connection:
# Create a cursor within the context manager
with connection.cursor() as cursor:
try:
select_data_sql = "SELECT * FROM time_machine_catalog;"
cursor.execute(select_data_sql)
except psycopg2.errors.UndefinedTable as e:
return {}
catalog_entries = cursor.fetchall()
catalog_dict = {}
for entry in catalog_entries:
repo_url, table_name = entry
catalog_dict[repo_url] = table_name
return catalog_dict
def get_auto_retriever(index, retriever_args):
from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="Description of the commits to PostgreSQL. Describes changes made to Postgres",
metadata_info=[
MetadataInfo(
name="commit_hash",
type="str",
description="Commit Hash",
),
MetadataInfo(
name="author",
type="str",
description="Author of the commit",
),
MetadataInfo(
name="__start_date",
type="datetime in iso format",
description="All results will be after this datetime",
),
MetadataInfo(
name="__end_date",
type="datetime in iso format",
description="All results will be before this datetime",
)
],
)
from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever
retriever = VectorIndexAutoRetriever(index,
vector_store_info=vector_store_info,
service_context=index.service_context,
**retriever_args)
# build query engine
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever, service_context=index.service_context
)
from llama_index.tools.query_engine import QueryEngineTool
# convert query engine to tool
query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
from llama_index.agent import OpenAIAgent
chat_engine = OpenAIAgent.from_tools(
tools=[query_engine_tool],
llm=index.service_context.llm,
verbose=True
#service_context=index.service_context
)
return chat_engine
def tm_demo():
repos = get_repos()
months = st.sidebar.slider('How many months back to search (0=no limit)?', 0, 130, 0)
if "config_months" not in st.session_state.keys() or months != st.session_state.config_months:
st.session_state.clear()
topk = st.sidebar.slider('How many commits to retrieve', 1, 150, 20)
if "config_topk" not in st.session_state.keys() or topk != st.session_state.config_topk:
st.session_state.clear()
if len(repos) > 0:
repo = st.sidebar.selectbox("Choose a repo", repos.keys())
else:
st.error("No repositiories found, please [load some data first](/LoadData)")
return
if "config_repo" not in st.session_state.keys() or repo != st.session_state.config_repo:
st.session_state.clear()
st.session_state.config_months = months
st.session_state.config_topk = topk
st.session_state.config_repo = repo
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Please choose a repo and time filter on the sidebar and then ask me a question about the git history"}
]
vector_store = TimescaleVectorStore.from_params(
service_url=st.secrets["TIMESCALE_SERVICE_URL"],
table_name=repos[repo],
time_partition_interval=timedelta(days=7),
);
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-4", temperature=0.1))
set_global_service_context(service_context)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
#chat engine goes into the session to retain history
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
retriever_args = {"similarity_top_k" : int(topk)}
if months > 0:
end_dt = datetime.now()
start_dt = end_dt - timedelta(weeks=4*months)
retriever_args["vector_store_kwargs"] = ({"start_date": start_dt, "end_date":end_dt})
st.session_state.chat_engine = get_auto_retriever(index, retriever_args)
#st.session_state.chat_engine = index.as_chat_engine(chat_mode="best", similarity_top_k=20, verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(prompt, function_call="query_engine_tool")
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
st.set_page_config(page_title="Time machine demo", page_icon="🧑💼")
st.markdown("# Time Machine")
st.sidebar.header("Welcome to the Time Machine")
debug_llamaindex = False
if debug_llamaindex:
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
tm_demo()
#show_code(tm_demo)
| [
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.vector_stores.types.MetadataInfo",
"llama_index.set_global_service_context",
"llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_index.indices.vector_store.VectorStoreIndex.from_vector_store",
"llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args"
] | [((7098, 7170), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Time machine demo"""', 'page_icon': '"""🧑\u200d💼"""'}), "(page_title='Time machine demo', page_icon='🧑\\u200d💼')\n", (7116, 7170), True, 'import streamlit as st\n'), ((7166, 7195), 'streamlit.markdown', 'st.markdown', (['"""# Time Machine"""'], {}), "('# Time Machine')\n", (7177, 7195), True, 'import streamlit as st\n'), ((7196, 7244), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Welcome to the Time Machine"""'], {}), "('Welcome to the Time Machine')\n", (7213, 7244), True, 'import streamlit as st\n'), ((2991, 3120), 'llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever', 'VectorIndexAutoRetriever', (['index'], {'vector_store_info': 'vector_store_info', 'service_context': 'index.service_context'}), '(index, vector_store_info=vector_store_info,\n service_context=index.service_context, **retriever_args)\n', (3015, 3120), False, 'from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n'), ((3376, 3471), 'llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', ([], {'retriever': 'retriever', 'service_context': 'index.service_context'}), '(retriever=retriever, service_context=index.\n service_context)\n', (3406, 3471), False, 'from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n'), ((3604, 3660), 'llama_index.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine)\n', (3633, 3660), False, 'from llama_index.tools.query_engine import QueryEngineTool\n'), ((3726, 3825), 'llama_index.agent.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', ([], {'tools': '[query_engine_tool]', 'llm': 'index.service_context.llm', 'verbose': '(True)'}), '(tools=[query_engine_tool], llm=index.service_context\n .llm, verbose=True)\n', (3748, 3825), False, 'from llama_index.agent import OpenAIAgent\n'), ((3975, 4051), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many months back to search (0=no limit)?"""', '(0)', '(130)', '(0)'], {}), "('How many months back to search (0=no limit)?', 0, 130, 0)\n", (3992, 4051), True, 'import streamlit as st\n'), ((4197, 4258), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many commits to retrieve"""', '(1)', '(150)', '(20)'], {}), "('How many commits to retrieve', 1, 150, 20)\n", (4214, 4258), True, 'import streamlit as st\n'), ((5443, 5486), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (5469, 5486), False, 'from llama_index import set_global_service_context\n'), ((5499, 5597), 'llama_index.indices.vector_store.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (5533, 5597), False, 'from llama_index.indices.vector_store import VectorStoreIndex\n'), ((7331, 7389), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (7350, 7389), False, 'import logging\n'), ((1240, 1297), 'psycopg2.connect', 'psycopg2.connect', ([], {'dsn': "st.secrets['TIMESCALE_SERVICE_URL']"}), "(dsn=st.secrets['TIMESCALE_SERVICE_URL'])\n", (1256, 1297), False, 'import psycopg2\n'), ((4160, 4184), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4182, 4184), True, 'import streamlit as st\n'), ((4360, 4384), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4382, 4384), True, 'import streamlit as st\n'), ((4502, 4578), 'streamlit.error', 'st.error', (['"""No repositiories found, please [load some data first](/LoadData)"""'], {}), "('No repositiories found, please [load some data first](/LoadData)')\n", (4510, 4578), True, 'import streamlit as st\n'), ((4700, 4724), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4722, 4724), True, 'import streamlit as st\n'), ((4881, 4904), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4902, 4904), True, 'import streamlit as st\n'), ((5693, 5716), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (5714, 5716), True, 'import streamlit as st\n'), ((6233, 6263), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (6246, 6263), True, 'import streamlit as st\n'), ((6322, 6391), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (6354, 6391), True, 'import streamlit as st\n'), ((7425, 7465), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (7446, 7465), False, 'import logging\n'), ((4083, 4106), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4104, 4106), True, 'import streamlit as st\n'), ((4287, 4310), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4308, 4310), True, 'import streamlit as st\n'), ((4627, 4650), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4648, 4650), True, 'import streamlit as st\n'), ((5317, 5334), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (5326, 5334), False, 'from datetime import datetime, timedelta\n'), ((5399, 5437), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0.1)'}), "(model='gpt-4', temperature=0.1)\n", (5405, 5437), False, 'from llama_index.llms import OpenAI\n'), ((5849, 5863), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5861, 5863), False, 'from datetime import datetime, timedelta\n'), ((6486, 6518), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (6501, 6518), True, 'import streamlit as st\n'), ((6532, 6560), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (6540, 6560), True, 'import streamlit as st\n'), ((6705, 6733), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6720, 6733), True, 'import streamlit as st\n'), ((7394, 7413), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7411, 7413), False, 'import logging\n'), ((2185, 2256), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""commit_hash"""', 'type': '"""str"""', 'description': '"""Commit Hash"""'}), "(name='commit_hash', type='str', description='Commit Hash')\n", (2197, 2256), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2333, 2408), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""author"""', 'type': '"""str"""', 'description': '"""Author of the commit"""'}), "(name='author', type='str', description='Author of the commit')\n", (2345, 2408), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2485, 2608), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__start_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be after this datetime"""'}), "(name='__start_date', type='datetime in iso format',\n description='All results will be after this datetime')\n", (2497, 2608), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2686, 2809), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__end_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be before this datetime"""'}), "(name='__end_date', type='datetime in iso format', description=\n 'All results will be before this datetime')\n", (2698, 2809), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((5896, 5923), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(4 * months)'}), '(weeks=4 * months)\n', (5905, 5923), False, 'from datetime import datetime, timedelta\n'), ((6752, 6777), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (6762, 6777), True, 'import streamlit as st\n'), ((6806, 6882), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['prompt'], {'function_call': '"""query_engine_tool"""'}), "(prompt, function_call='query_engine_tool')\n", (6839, 6882), True, 'import streamlit as st\n'), ((6899, 6926), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (6907, 6926), True, 'import streamlit as st\n'), ((7021, 7062), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (7053, 7062), True, 'import streamlit as st\n')] |
import logging
from threading import Thread
from typing import Any, List, Optional, Type
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.response.schema import RESPONSE_TYPE, StreamingResponse
from llama_index.core.callbacks import CallbackManager, trace_method
from llama_index.core.chat_engine.types import (
AgentChatResponse,
BaseChatEngine,
StreamingAgentChatResponse,
)
from llama_index.core.chat_engine.utils import response_gen_from_query_engine
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
from llama_index.core.base.llms.generic_utils import messages_to_history_str
from llama_index.core.llms.llm import LLM
from llama_index.core.memory import BaseMemory, ChatMemoryBuffer
from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.core.service_context import ServiceContext
from llama_index.core.service_context_elements.llm_predictor import LLMPredictorType
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
)
from llama_index.core.tools import ToolOutput
logger = logging.getLogger(__name__)
DEFAULT_TEMPLATE = """\
Given a conversation (between Human and Assistant) and a follow up message from Human, \
rewrite the message to be a standalone question that captures all relevant context \
from the conversation.
<Chat History>
{chat_history}
<Follow Up Message>
{question}
<Standalone question>
"""
DEFAULT_PROMPT = PromptTemplate(DEFAULT_TEMPLATE)
class CondenseQuestionChatEngine(BaseChatEngine):
"""Condense Question Chat Engine.
First generate a standalone question from conversation context and last message,
then query the query engine for a response.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
condense_question_prompt: BasePromptTemplate,
memory: BaseMemory,
llm: LLMPredictorType,
verbose: bool = False,
callback_manager: Optional[CallbackManager] = None,
) -> None:
self._query_engine = query_engine
self._condense_question_prompt = condense_question_prompt
self._memory = memory
self._llm = llm
self._verbose = verbose
self.callback_manager = callback_manager or CallbackManager([])
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
condense_question_prompt: Optional[BasePromptTemplate] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
service_context: Optional[ServiceContext] = None,
verbose: bool = False,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
llm: Optional[LLM] = None,
**kwargs: Any,
) -> "CondenseQuestionChatEngine":
"""Initialize a CondenseQuestionChatEngine from default parameters."""
condense_question_prompt = condense_question_prompt or DEFAULT_PROMPT
if llm is None:
service_context = service_context or ServiceContext.from_defaults(
embed_model=MockEmbedding(embed_dim=2)
)
llm = service_context.llm
else:
service_context = service_context or ServiceContext.from_defaults(
llm=llm, embed_model=MockEmbedding(embed_dim=2)
)
chat_history = chat_history or []
memory = memory or memory_cls.from_defaults(chat_history=chat_history, llm=llm)
if system_prompt is not None:
raise NotImplementedError(
"system_prompt is not supported for CondenseQuestionChatEngine."
)
if prefix_messages is not None:
raise NotImplementedError(
"prefix_messages is not supported for CondenseQuestionChatEngine."
)
return cls(
query_engine,
condense_question_prompt,
memory,
llm,
verbose=verbose,
callback_manager=callback_manager_from_settings_or_context(
Settings, service_context
),
)
def _condense_question(
self, chat_history: List[ChatMessage], last_message: str
) -> str:
"""
Generate standalone question from conversation context and last message.
"""
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
return self._llm.predict(
self._condense_question_prompt,
question=last_message,
chat_history=chat_history_str,
)
async def _acondense_question(
self, chat_history: List[ChatMessage], last_message: str
) -> str:
"""
Generate standalone question from conversation context and last message.
"""
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
return await self._llm.apredict(
self._condense_question_prompt,
question=last_message,
chat_history=chat_history_str,
)
def _get_tool_output_from_response(
self, query: str, response: RESPONSE_TYPE
) -> ToolOutput:
if isinstance(response, StreamingResponse):
return ToolOutput(
content="",
tool_name="query_engine",
raw_input={"query": query},
raw_output=response,
)
else:
return ToolOutput(
content=str(response),
tool_name="query_engine",
raw_input={"query": query},
raw_output=response,
)
@trace_method("chat")
def chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = self._condense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = False
# Query with standalone question
query_response = self._query_engine.query(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
self._memory.put(
ChatMessage(role=MessageRole.ASSISTANT, content=str(query_response))
)
return AgentChatResponse(response=str(query_response), sources=[tool_output])
@trace_method("chat")
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = self._condense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = True
# Query with standalone question
query_response = self._query_engine.query(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
if (
isinstance(query_response, StreamingResponse)
and query_response.response_gen is not None
):
# override the generator to include writing to chat history
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
response = StreamingAgentChatResponse(
chat_stream=response_gen_from_query_engine(query_response.response_gen),
sources=[tool_output],
)
thread = Thread(
target=response.write_response_to_history, args=(self._memory, True)
)
thread.start()
else:
raise ValueError("Streaming is not enabled. Please use chat() instead.")
return response
@trace_method("chat")
async def achat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = await self._acondense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = False
# Query with standalone question
query_response = await self._query_engine.aquery(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
self._memory.put(
ChatMessage(role=MessageRole.ASSISTANT, content=str(query_response))
)
return AgentChatResponse(response=str(query_response), sources=[tool_output])
@trace_method("chat")
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = await self._acondense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = True
# Query with standalone question
query_response = await self._query_engine.aquery(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
if (
isinstance(query_response, StreamingResponse)
and query_response.response_gen is not None
):
# override the generator to include writing to chat history
# TODO: query engine does not support async generator yet
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
response = StreamingAgentChatResponse(
chat_stream=response_gen_from_query_engine(query_response.response_gen),
sources=[tool_output],
)
thread = Thread(
target=response.write_response_to_history, args=(self._memory,)
)
thread.start()
else:
raise ValueError("Streaming is not enabled. Please use achat() instead.")
return response
def reset(self) -> None:
# Clear chat history
self._memory.reset()
@property
def chat_history(self) -> List[ChatMessage]:
"""Get chat history."""
return self._memory.get_all()
| [
"llama_index.core.tools.ToolOutput",
"llama_index.core.chat_engine.utils.response_gen_from_query_engine",
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding",
"llama_index.core.callbacks.trace_method",
"llama_index.core.base.llms.generic_utils.messages_to_history_str"
] | [((1220, 1247), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1237, 1247), False, 'import logging\n'), ((1579, 1611), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['DEFAULT_TEMPLATE'], {}), '(DEFAULT_TEMPLATE)\n', (1593, 1611), False, 'from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((5895, 5915), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (5907, 5915), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((7693, 7713), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (7705, 7713), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((9987, 10007), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (9999, 10007), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((11806, 11826), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (11818, 11826), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((4566, 4603), 'llama_index.core.base.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['chat_history'], {}), '(chat_history)\n', (4589, 4603), False, 'from llama_index.core.base.llms.generic_utils import messages_to_history_str\n'), ((5057, 5094), 'llama_index.core.base.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['chat_history'], {}), '(chat_history)\n', (5080, 5094), False, 'from llama_index.core.base.llms.generic_utils import messages_to_history_str\n'), ((2381, 2400), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2396, 2400), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((5491, 5592), 'llama_index.core.tools.ToolOutput', 'ToolOutput', ([], {'content': '""""""', 'tool_name': '"""query_engine"""', 'raw_input': "{'query': query}", 'raw_output': 'response'}), "(content='', tool_name='query_engine', raw_input={'query': query},\n raw_output=response)\n", (5501, 5592), False, 'from llama_index.core.tools import ToolOutput\n'), ((7430, 7481), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (7441, 7481), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((9724, 9800), 'threading.Thread', 'Thread', ([], {'target': 'response.write_response_to_history', 'args': '(self._memory, True)'}), '(target=response.write_response_to_history, args=(self._memory, True))\n', (9730, 9800), False, 'from threading import Thread\n'), ((11543, 11594), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (11554, 11594), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((13928, 13999), 'threading.Thread', 'Thread', ([], {'target': 'response.write_response_to_history', 'args': '(self._memory,)'}), '(target=response.write_response_to_history, args=(self._memory,))\n', (13934, 13999), False, 'from threading import Thread\n'), ((4216, 4284), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4257, 4284), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context\n'), ((9457, 9508), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (9468, 9508), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((13661, 13712), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (13672, 13712), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((9589, 9648), 'llama_index.core.chat_engine.utils.response_gen_from_query_engine', 'response_gen_from_query_engine', (['query_response.response_gen'], {}), '(query_response.response_gen)\n', (9619, 9648), False, 'from llama_index.core.chat_engine.utils import response_gen_from_query_engine\n'), ((13793, 13852), 'llama_index.core.chat_engine.utils.response_gen_from_query_engine', 'response_gen_from_query_engine', (['query_response.response_gen'], {}), '(query_response.response_gen)\n', (13823, 13852), False, 'from llama_index.core.chat_engine.utils import response_gen_from_query_engine\n'), ((3306, 3332), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(2)'}), '(embed_dim=2)\n', (3319, 3332), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((3515, 3541), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(2)'}), '(embed_dim=2)\n', (3528, 3541), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"`youtube_transcript_api` package not found, \
please run `pip install youtube-transcript-api`"
)
def load_data(self, urls: List[str]) -> List[Document]:
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=urls)
return documents
| [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"`youtube_transcript_api` package not found, \
please run `pip install youtube-transcript-api`"
)
def load_data(self, urls: List[str]) -> List[Document]:
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=urls)
return documents
| [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"`youtube_transcript_api` package not found, \
please run `pip install youtube-transcript-api`"
)
def load_data(self, urls: List[str]) -> List[Document]:
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=urls)
return documents
| [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"`youtube_transcript_api` package not found, \
please run `pip install youtube-transcript-api`"
)
def load_data(self, urls: List[str]) -> List[Document]:
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=urls)
return documents
| [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.getLogger(__name__)
CONTENT_TAGS = [
"p",
"div",
"span",
"a",
"td",
"tr",
"li",
"article",
"section",
"pre",
"code",
"blockquote",
"em",
"strong",
"b",
"i",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"title",
]
def scrape(html: str) -> str:
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
content: List[Tag] = soup.find_all(CONTENT_TAGS)
text_set: Set[str] = set()
for p in content:
for text in p.stripped_strings:
text_set.add(text)
return " ".join(text_set)
async def async_load_content_using_playwright(url: str) -> str:
try:
from playwright.async_api import async_playwright
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)
html = await page.content()
await browser.close()
return html
except ImportError:
raise ImportError(
"`playwright` package not found, please install it with "
"`pip install playwright && playwright install`"
)
def load_content_using_playwright(url: str) -> str:
return asyncio.get_event_loop().run_until_complete(
async_load_content_using_playwright(url)
)
class LyzrWebPageReader:
def __init__(self) -> None:
pass
@staticmethod
def load_data(url: str) -> List[Document]:
if IS_IPYKERNEL:
warning_msg = "Running in Google Colab or a Jupyter notebook. Consider using nest_asyncio.apply() to avoid event loop conflicts."
warnings.warn(warning_msg, RuntimeWarning)
html = load_content_using_playwright(url)
content = scrape(html)
document = Document(text=content, metadata={"url": url})
return [document]
| [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (689, 710), False, 'from bs4 import BeautifulSoup, Tag\n'), ((2182, 2227), 'llama_index.schema.Document', 'Document', ([], {'text': 'content', 'metadata': "{'url': url}"}), "(text=content, metadata={'url': url})\n", (2190, 2227), False, 'from llama_index.schema import Document\n'), ((1088, 1106), 'playwright.async_api.async_playwright', 'async_playwright', ([], {}), '()\n', (1104, 1106), False, 'from playwright.async_api import async_playwright\n'), ((1609, 1633), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1631, 1633), False, 'import asyncio\n'), ((2030, 2072), 'warnings.warn', 'warnings.warn', (['warning_msg', 'RuntimeWarning'], {}), '(warning_msg, RuntimeWarning)\n', (2043, 2072), False, 'import warnings\n')] |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.getLogger(__name__)
CONTENT_TAGS = [
"p",
"div",
"span",
"a",
"td",
"tr",
"li",
"article",
"section",
"pre",
"code",
"blockquote",
"em",
"strong",
"b",
"i",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"title",
]
def scrape(html: str) -> str:
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
content: List[Tag] = soup.find_all(CONTENT_TAGS)
text_set: Set[str] = set()
for p in content:
for text in p.stripped_strings:
text_set.add(text)
return " ".join(text_set)
async def async_load_content_using_playwright(url: str) -> str:
try:
from playwright.async_api import async_playwright
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)
html = await page.content()
await browser.close()
return html
except ImportError:
raise ImportError(
"`playwright` package not found, please install it with "
"`pip install playwright && playwright install`"
)
def load_content_using_playwright(url: str) -> str:
return asyncio.get_event_loop().run_until_complete(
async_load_content_using_playwright(url)
)
class LyzrWebPageReader:
def __init__(self) -> None:
pass
@staticmethod
def load_data(url: str) -> List[Document]:
if IS_IPYKERNEL:
warning_msg = "Running in Google Colab or a Jupyter notebook. Consider using nest_asyncio.apply() to avoid event loop conflicts."
warnings.warn(warning_msg, RuntimeWarning)
html = load_content_using_playwright(url)
content = scrape(html)
document = Document(text=content, metadata={"url": url})
return [document]
| [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (689, 710), False, 'from bs4 import BeautifulSoup, Tag\n'), ((2182, 2227), 'llama_index.schema.Document', 'Document', ([], {'text': 'content', 'metadata': "{'url': url}"}), "(text=content, metadata={'url': url})\n", (2190, 2227), False, 'from llama_index.schema import Document\n'), ((1088, 1106), 'playwright.async_api.async_playwright', 'async_playwright', ([], {}), '()\n', (1104, 1106), False, 'from playwright.async_api import async_playwright\n'), ((1609, 1633), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1631, 1633), False, 'import asyncio\n'), ((2030, 2072), 'warnings.warn', 'warnings.warn', (['warning_msg', 'RuntimeWarning'], {}), '(warning_msg, RuntimeWarning)\n', (2043, 2072), False, 'import warnings\n')] |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.getLogger(__name__)
CONTENT_TAGS = [
"p",
"div",
"span",
"a",
"td",
"tr",
"li",
"article",
"section",
"pre",
"code",
"blockquote",
"em",
"strong",
"b",
"i",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"title",
]
def scrape(html: str) -> str:
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
content: List[Tag] = soup.find_all(CONTENT_TAGS)
text_set: Set[str] = set()
for p in content:
for text in p.stripped_strings:
text_set.add(text)
return " ".join(text_set)
async def async_load_content_using_playwright(url: str) -> str:
try:
from playwright.async_api import async_playwright
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)
html = await page.content()
await browser.close()
return html
except ImportError:
raise ImportError(
"`playwright` package not found, please install it with "
"`pip install playwright && playwright install`"
)
def load_content_using_playwright(url: str) -> str:
return asyncio.get_event_loop().run_until_complete(
async_load_content_using_playwright(url)
)
class LyzrWebPageReader:
def __init__(self) -> None:
pass
@staticmethod
def load_data(url: str) -> List[Document]:
if IS_IPYKERNEL:
warning_msg = "Running in Google Colab or a Jupyter notebook. Consider using nest_asyncio.apply() to avoid event loop conflicts."
warnings.warn(warning_msg, RuntimeWarning)
html = load_content_using_playwright(url)
content = scrape(html)
document = Document(text=content, metadata={"url": url})
return [document]
| [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (689, 710), False, 'from bs4 import BeautifulSoup, Tag\n'), ((2182, 2227), 'llama_index.schema.Document', 'Document', ([], {'text': 'content', 'metadata': "{'url': url}"}), "(text=content, metadata={'url': url})\n", (2190, 2227), False, 'from llama_index.schema import Document\n'), ((1088, 1106), 'playwright.async_api.async_playwright', 'async_playwright', ([], {}), '()\n', (1104, 1106), False, 'from playwright.async_api import async_playwright\n'), ((1609, 1633), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1631, 1633), False, 'import asyncio\n'), ((2030, 2072), 'warnings.warn', 'warnings.warn', (['warning_msg', 'RuntimeWarning'], {}), '(warning_msg, RuntimeWarning)\n', (2043, 2072), False, 'import warnings\n')] |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.getLogger(__name__)
CONTENT_TAGS = [
"p",
"div",
"span",
"a",
"td",
"tr",
"li",
"article",
"section",
"pre",
"code",
"blockquote",
"em",
"strong",
"b",
"i",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"title",
]
def scrape(html: str) -> str:
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
content: List[Tag] = soup.find_all(CONTENT_TAGS)
text_set: Set[str] = set()
for p in content:
for text in p.stripped_strings:
text_set.add(text)
return " ".join(text_set)
async def async_load_content_using_playwright(url: str) -> str:
try:
from playwright.async_api import async_playwright
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)
html = await page.content()
await browser.close()
return html
except ImportError:
raise ImportError(
"`playwright` package not found, please install it with "
"`pip install playwright && playwright install`"
)
def load_content_using_playwright(url: str) -> str:
return asyncio.get_event_loop().run_until_complete(
async_load_content_using_playwright(url)
)
class LyzrWebPageReader:
def __init__(self) -> None:
pass
@staticmethod
def load_data(url: str) -> List[Document]:
if IS_IPYKERNEL:
warning_msg = "Running in Google Colab or a Jupyter notebook. Consider using nest_asyncio.apply() to avoid event loop conflicts."
warnings.warn(warning_msg, RuntimeWarning)
html = load_content_using_playwright(url)
content = scrape(html)
document = Document(text=content, metadata={"url": url})
return [document]
| [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (689, 710), False, 'from bs4 import BeautifulSoup, Tag\n'), ((2182, 2227), 'llama_index.schema.Document', 'Document', ([], {'text': 'content', 'metadata': "{'url': url}"}), "(text=content, metadata={'url': url})\n", (2190, 2227), False, 'from llama_index.schema import Document\n'), ((1088, 1106), 'playwright.async_api.async_playwright', 'async_playwright', ([], {}), '()\n', (1104, 1106), False, 'from playwright.async_api import async_playwright\n'), ((1609, 1633), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1631, 1633), False, 'import asyncio\n'), ((2030, 2072), 'warnings.warn', 'warnings.warn', (['warning_msg', 'RuntimeWarning'], {}), '(warning_msg, RuntimeWarning)\n', (2043, 2072), False, 'import warnings\n')] |
import logging
from typing import Optional, Union
from llama_index import ServiceContext
from llama_index.callbacks import CallbackManager
from llama_index.embeddings.utils import EmbedType
from llama_index.llms.utils import LLMType
from llama_index.prompts import PromptTemplate
from llama_index.prompts.base import BasePromptTemplate
from llama_index.node_parser import (
SimpleNodeParser,
)
logger = logging.getLogger(__name__)
class LyzrService:
@staticmethod
def from_defaults(
llm: Optional[LLMType] = "default",
embed_model: Optional[EmbedType] = "default",
system_prompt: str = None,
query_wrapper_prompt: Union[str, BasePromptTemplate] = None,
**kwargs,
) -> ServiceContext:
if isinstance(query_wrapper_prompt, str):
query_wrapper_prompt = PromptTemplate(template=query_wrapper_prompt)
callback_manager: CallbackManager = kwargs.get(
"callback_manager", CallbackManager()
)
node_parser = SimpleNodeParser.from_defaults(
chunk_size=750,
chunk_overlap=100,
callback_manager=callback_manager,
)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
callback_manager=callback_manager,
node_parser=node_parser,
**kwargs,
)
return service_context
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.CallbackManager",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.prompts.PromptTemplate"
] | [((409, 436), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (426, 436), False, 'import logging\n'), ((1016, 1120), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': '(750)', 'chunk_overlap': '(100)', 'callback_manager': 'callback_manager'}), '(chunk_size=750, chunk_overlap=100,\n callback_manager=callback_manager)\n', (1046, 1120), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1191, 1403), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt', 'callback_manager': 'callback_manager', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model,\n system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt,\n callback_manager=callback_manager, node_parser=node_parser, **kwargs)\n', (1219, 1403), False, 'from llama_index import ServiceContext\n'), ((830, 875), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'query_wrapper_prompt'}), '(template=query_wrapper_prompt)\n', (844, 875), False, 'from llama_index.prompts import PromptTemplate\n'), ((965, 982), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (980, 982), False, 'from llama_index.callbacks import CallbackManager\n')] |
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex
from llama_index.response.pprint_utils import pprint_response
from langchain.chat_models import ChatOpenAI
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.query_engine import SubQuestionQueryEngine
from dotenv import load_dotenv
import gradio as gr
import os, sys
import logging
#loads dotenv lib to retrieve API keys from .env file
load_dotenv()
# enable INFO level logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#define LLM service
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo"))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
#set the global service context object, avoiding passing service_context when building the index or when loading index from vector store
from llama_index import set_global_service_context
set_global_service_context(service_context)
def data_ingestion_indexing():
#load data
report_2021_docs = SimpleDirectoryReader(input_files=["reports/executive-summary-2021.pdf"]).load_data()
print(f"loaded executive summary 2021 with {len(report_2021_docs)} pages")
report_2022_docs = SimpleDirectoryReader(input_files=["reports/executive-summary-2022.pdf"]).load_data()
print(f"loaded executive summary 2022 with {len(report_2022_docs)} pages")
#build indices
report_2021_index = GPTVectorStoreIndex.from_documents(report_2021_docs)
print(f"built index for executive summary 2021 with {len(report_2021_index.docstore.docs)} nodes")
report_2022_index = GPTVectorStoreIndex.from_documents(report_2022_docs)
print(f"built index for executive summary 2022 with {len(report_2022_index.docstore.docs)} nodes")
#build query engines
report_2021_engine = report_2021_index.as_query_engine(similarity_top_k=3)
report_2022_engine = report_2022_index.as_query_engine(similarity_top_k=3)
#build query engine tools
query_engine_tools = [
QueryEngineTool(
query_engine = report_2021_engine,
metadata = ToolMetadata(name='executive_summary_2021', description='Provides information on US government financial report executive summary 2021')
),
QueryEngineTool(
query_engine = report_2022_engine,
metadata = ToolMetadata(name='executive_summary_2022', description='Provides information on US government financial report executive summary 2022')
)
]
#define SubQuestionQueryEngine
sub_question_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)
return sub_question_engine
def data_querying(input_text):
#queries the engine with the input text
response = sub_question_engine.query(input_text)
return response.response
iface = gr.Interface(fn=data_querying,
inputs=gr.components.Textbox(lines=3, label="Enter your question"),
outputs="text",
title="Analyzing the U.S. Government's Financial Reports for 2021 and 2022")
#data ingestion and indexing
sub_question_engine = data_ingestion_indexing()
iface.launch(share=False)
#run queries
#response = sub_question_engine.query('Compare and contrast the DoD costs between 2021 and 2022')
#print(response)
#response = sub_question_engine.query('Compare revenue growth from 2021 to 2022')
#print(response)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.set_global_service_context",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((460, 473), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (471, 473), False, 'from dotenv import load_dotenv\n'), ((503, 561), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (522, 561), False, 'import logging\n'), ((762, 819), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (790, 819), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((1009, 1052), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1035, 1052), False, 'from llama_index import set_global_service_context\n'), ((593, 633), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (614, 633), False, 'import logging\n'), ((1521, 1573), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['report_2021_docs'], {}), '(report_2021_docs)\n', (1555, 1573), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((1702, 1754), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['report_2022_docs'], {}), '(report_2022_docs)\n', (1736, 1754), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2653, 2728), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools'}), '(query_engine_tools=query_engine_tools)\n', (2689, 2728), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((562, 581), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (579, 581), False, 'import logging\n'), ((689, 742), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (699, 742), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3001, 3060), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(3)', 'label': '"""Enter your question"""'}), "(lines=3, label='Enter your question')\n", (3022, 3060), True, 'import gradio as gr\n'), ((1123, 1196), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['reports/executive-summary-2021.pdf']"}), "(input_files=['reports/executive-summary-2021.pdf'])\n", (1144, 1196), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((1312, 1385), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['reports/executive-summary-2022.pdf']"}), "(input_files=['reports/executive-summary-2022.pdf'])\n", (1333, 1385), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2195, 2341), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""executive_summary_2021"""', 'description': '"""Provides information on US government financial report executive summary 2021"""'}), "(name='executive_summary_2021', description=\n 'Provides information on US government financial report executive summary 2021'\n )\n", (2207, 2341), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((2438, 2584), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""executive_summary_2022"""', 'description': '"""Provides information on US government financial report executive summary 2022"""'}), "(name='executive_summary_2022', description=\n 'Provides information on US government financial report executive summary 2022'\n )\n", (2450, 2584), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
set_global_handler
)
import phoenix as px
px.launch_app()
set_global_handler("arize_phoenix")
documents = SimpleDirectoryReader('files').load_data()
index = VectorStoreIndex.from_documents(documents)
qe = index.as_query_engine()
response1 = qe.query("Tell me about ancient Rome")
response2 = qe.query("Where is the Colosseum?")
print(str(response1)+"\n"+str(response2))
# EVALUATION PART
# adapted from the examples available on the official Phoenix documentation: https://docs.arize.com/phoenix/
from phoenix.session.evaluation import (
get_qa_with_reference,
get_retrieved_documents
)
from phoenix.trace import DocumentEvaluations, SpanEvaluations
from phoenix.experimental.evals import (
HallucinationEvaluator,
QAEvaluator,
RelevanceEvaluator,
OpenAIModel,
run_evals
)
model = OpenAIModel(model="gpt-4-turbo-preview")
retrieved_documents_df = get_retrieved_documents(px.Client())
queries_df = get_qa_with_reference(px.Client())
hallucination_evaluator = HallucinationEvaluator(model)
qa_correctness_evaluator = QAEvaluator(model)
relevance_evaluator = RelevanceEvaluator(model)
hallucination_eval_df, qa_correctness_eval_df = run_evals(
dataframe=queries_df,
evaluators=[hallucination_evaluator, qa_correctness_evaluator],
provide_explanation=True,
)
relevance_eval_df = run_evals(
dataframe=retrieved_documents_df,
evaluators=[relevance_evaluator],
provide_explanation=True,
)[0]
px.Client().log_evaluations(
SpanEvaluations(
eval_name="Hallucination",
dataframe=hallucination_eval_df),
SpanEvaluations(
eval_name="QA Correctness",
dataframe=qa_correctness_eval_df),
DocumentEvaluations(
eval_name="Relevance",
dataframe=relevance_eval_df),
)
input("Press <ENTER> to exit...")
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.set_global_handler"
] | [((129, 144), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (142, 144), True, 'import phoenix as px\n'), ((145, 180), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""arize_phoenix"""'], {}), "('arize_phoenix')\n", (163, 180), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, set_global_handler\n'), ((245, 287), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (276, 287), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, set_global_handler\n'), ((901, 941), 'phoenix.experimental.evals.OpenAIModel', 'OpenAIModel', ([], {'model': '"""gpt-4-turbo-preview"""'}), "(model='gpt-4-turbo-preview')\n", (912, 941), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1080, 1109), 'phoenix.experimental.evals.HallucinationEvaluator', 'HallucinationEvaluator', (['model'], {}), '(model)\n', (1102, 1109), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1137, 1155), 'phoenix.experimental.evals.QAEvaluator', 'QAEvaluator', (['model'], {}), '(model)\n', (1148, 1155), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1178, 1203), 'phoenix.experimental.evals.RelevanceEvaluator', 'RelevanceEvaluator', (['model'], {}), '(model)\n', (1196, 1203), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1253, 1378), 'phoenix.experimental.evals.run_evals', 'run_evals', ([], {'dataframe': 'queries_df', 'evaluators': '[hallucination_evaluator, qa_correctness_evaluator]', 'provide_explanation': '(True)'}), '(dataframe=queries_df, evaluators=[hallucination_evaluator,\n qa_correctness_evaluator], provide_explanation=True)\n', (1262, 1378), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((992, 1003), 'phoenix.Client', 'px.Client', ([], {}), '()\n', (1001, 1003), True, 'import phoenix as px\n'), ((1040, 1051), 'phoenix.Client', 'px.Client', ([], {}), '()\n', (1049, 1051), True, 'import phoenix as px\n'), ((1410, 1518), 'phoenix.experimental.evals.run_evals', 'run_evals', ([], {'dataframe': 'retrieved_documents_df', 'evaluators': '[relevance_evaluator]', 'provide_explanation': '(True)'}), '(dataframe=retrieved_documents_df, evaluators=[relevance_evaluator\n ], provide_explanation=True)\n', (1419, 1518), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1566, 1641), 'phoenix.trace.SpanEvaluations', 'SpanEvaluations', ([], {'eval_name': '"""Hallucination"""', 'dataframe': 'hallucination_eval_df'}), "(eval_name='Hallucination', dataframe=hallucination_eval_df)\n", (1581, 1641), False, 'from phoenix.trace import DocumentEvaluations, SpanEvaluations\n'), ((1665, 1742), 'phoenix.trace.SpanEvaluations', 'SpanEvaluations', ([], {'eval_name': '"""QA Correctness"""', 'dataframe': 'qa_correctness_eval_df'}), "(eval_name='QA Correctness', dataframe=qa_correctness_eval_df)\n", (1680, 1742), False, 'from phoenix.trace import DocumentEvaluations, SpanEvaluations\n'), ((1766, 1837), 'phoenix.trace.DocumentEvaluations', 'DocumentEvaluations', ([], {'eval_name': '"""Relevance"""', 'dataframe': 'relevance_eval_df'}), "(eval_name='Relevance', dataframe=relevance_eval_df)\n", (1785, 1837), False, 'from phoenix.trace import DocumentEvaluations, SpanEvaluations\n'), ((194, 224), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (215, 224), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, set_global_handler\n'), ((1533, 1544), 'phoenix.Client', 'px.Client', ([], {}), '()\n', (1542, 1544), True, 'import phoenix as px\n')] |
from llama_index.core import Settings, Document, VectorStoreIndex
from llama_index.core.node_parser import SentenceWindowNodeParser
doc = Document(
text="Sentence 1. Sentence 2. Sentence 3."
)
text_splitter = SentenceWindowNodeParser.from_defaults(
window_size=2 ,
window_metadata_key="ContextWindow",
original_text_metadata_key="node_text"
)
Settings.text_splitter = text_splitter
index = VectorStoreIndex.from_documents([doc])
retriever = index.as_retriever(similarity_top_k=1)
response = retriever.retrieve("Display the second sentence")
print(response[0].node.metadata['node_text'])
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.core.Document"
] | [((138, 190), 'llama_index.core.Document', 'Document', ([], {'text': '"""Sentence 1. Sentence 2. Sentence 3."""'}), "(text='Sentence 1. Sentence 2. Sentence 3.')\n", (146, 190), False, 'from llama_index.core import Settings, Document, VectorStoreIndex\n'), ((213, 348), 'llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(2)', 'window_metadata_key': '"""ContextWindow"""', 'original_text_metadata_key': '"""node_text"""'}), "(window_size=2, window_metadata_key=\n 'ContextWindow', original_text_metadata_key='node_text')\n", (251, 348), False, 'from llama_index.core.node_parser import SentenceWindowNodeParser\n'), ((408, 446), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[doc]'], {}), '([doc])\n', (439, 446), False, 'from llama_index.core import Settings, Document, VectorStoreIndex\n')] |
import asyncio
from llama_index.core import KeywordTableIndex
from llama_index.core import SimpleDirectoryReader
async def retrieve(retriever, query, label):
response = await retriever.aretrieve(query)
print(f"{label} retrieved {str(len(response))} nodes")
async def main():
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
index = KeywordTableIndex.from_documents(documents)
retriever1 = index.as_retriever(
retriever_mode='default'
)
retriever2 = index.as_retriever(
retriever_mode='simple'
)
query = "Where is the Colosseum?"
await asyncio.gather(
retrieve(retriever1, query, '<llm>'),
retrieve(retriever2, query, '<simple>')
)
asyncio.run(main())
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.KeywordTableIndex.from_documents"
] | [((298, 328), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (319, 328), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((376, 419), 'llama_index.core.KeywordTableIndex.from_documents', 'KeywordTableIndex.from_documents', (['documents'], {}), '(documents)\n', (408, 419), False, 'from llama_index.core import KeywordTableIndex\n')] |
import tiktoken
from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
from llama_index.core.llms.mock import MockLLM
embed_model = MockEmbedding(embed_dim=1536)
llm = MockLLM(max_tokens=256)
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode
)
callback_manager = CallbackManager([token_counter])
Settings.embed_model=embed_model
Settings.llm=llm
Settings.callback_manager=callback_manager
documents = SimpleDirectoryReader("cost_prediction_samples").load_data()
index = VectorStoreIndex.from_documents(
documents=documents,
show_progress=True)
print("Embedding Token Count:", token_counter.total_embedding_token_count)
query_engine = index.as_query_engine()
response = query_engine.query("What's the cat's name?")
print("Query LLM Token Count:", token_counter.total_llm_token_count)
print("Query Embedding Token Count:",token_counter.total_embedding_token_count)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llms.mock.MockLLM",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.MockEmbedding"
] | [((249, 278), 'llama_index.core.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(1536)'}), '(embed_dim=1536)\n', (262, 278), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((285, 308), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {'max_tokens': '(256)'}), '(max_tokens=256)\n', (292, 308), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((434, 466), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (449, 466), False, 'from llama_index.core.callbacks import CallbackManager, TokenCountingHandler\n'), ((643, 715), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'show_progress': '(True)'}), '(documents=documents, show_progress=True)\n', (674, 715), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((574, 622), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""cost_prediction_samples"""'], {}), "('cost_prediction_samples')\n", (595, 622), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((361, 405), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (388, 405), False, 'import tiktoken\n')] |
from typing import Any, List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.response_synthesizers import (
BaseSynthesizer,
ResponseMode,
get_response_synthesizer,
)
from llama_index.core.schema import NodeWithScore, QueryBundle
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
llm_from_settings_or_context,
)
class RetrieverQueryEngine(BaseQueryEngine):
"""Retriever query engine.
Args:
retriever (BaseRetriever): A retriever object.
response_synthesizer (Optional[BaseSynthesizer]): A BaseSynthesizer
object.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
def __init__(
self,
retriever: BaseRetriever,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
) -> None:
self._retriever = retriever
self._response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm_from_settings_or_context(Settings, retriever.get_service_context()),
callback_manager=callback_manager
or callback_manager_from_settings_or_context(
Settings, retriever.get_service_context()
),
)
self._node_postprocessors = node_postprocessors or []
callback_manager = (
callback_manager or self._response_synthesizer.callback_manager
)
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = callback_manager
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {"response_synthesizer": self._response_synthesizer}
@classmethod
def from_args(
cls,
retriever: BaseRetriever,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
# response synthesizer args
response_mode: ResponseMode = ResponseMode.COMPACT,
text_qa_template: Optional[BasePromptTemplate] = None,
refine_template: Optional[BasePromptTemplate] = None,
summary_template: Optional[BasePromptTemplate] = None,
simple_template: Optional[BasePromptTemplate] = None,
output_cls: Optional[BaseModel] = None,
use_async: bool = False,
streaming: bool = False,
# deprecated
service_context: Optional[ServiceContext] = None,
**kwargs: Any,
) -> "RetrieverQueryEngine":
"""Initialize a RetrieverQueryEngine object.".
Args:
retriever (BaseRetriever): A retriever object.
service_context (Optional[ServiceContext]): A ServiceContext object.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
verbose (bool): Whether to print out debug info.
response_mode (ResponseMode): A ResponseMode object.
text_qa_template (Optional[BasePromptTemplate]): A BasePromptTemplate
object.
refine_template (Optional[BasePromptTemplate]): A BasePromptTemplate object.
simple_template (Optional[BasePromptTemplate]): A BasePromptTemplate object.
use_async (bool): Whether to use async.
streaming (bool): Whether to use streaming.
optimizer (Optional[BaseTokenUsageOptimizer]): A BaseTokenUsageOptimizer
object.
"""
llm = llm or llm_from_settings_or_context(Settings, service_context)
response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
service_context=service_context,
text_qa_template=text_qa_template,
refine_template=refine_template,
summary_template=summary_template,
simple_template=simple_template,
response_mode=response_mode,
output_cls=output_cls,
use_async=use_async,
streaming=streaming,
)
callback_manager = callback_manager_from_settings_or_context(
Settings, service_context
)
return cls(
retriever=retriever,
response_synthesizer=response_synthesizer,
callback_manager=callback_manager,
node_postprocessors=node_postprocessors,
)
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = self._retriever.retrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
async def aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever.aretrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
def with_retriever(self, retriever: BaseRetriever) -> "RetrieverQueryEngine":
return RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=self._response_synthesizer,
callback_manager=self.callback_manager,
node_postprocessors=self._node_postprocessors,
)
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
return self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
return await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = self.retrieve(query_bundle)
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = await self.aretrieve(query_bundle)
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@property
def retriever(self) -> BaseRetriever:
"""Get the retriever object."""
return self._retriever
| [
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.response_synthesizers.get_response_synthesizer"
] | [((4987, 5055), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5028, 5055), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((4419, 4474), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4447, 4474), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((4531, 4843), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm', 'service_context': 'service_context', 'text_qa_template': 'text_qa_template', 'refine_template': 'refine_template', 'summary_template': 'summary_template', 'simple_template': 'simple_template', 'response_mode': 'response_mode', 'output_cls': 'output_cls', 'use_async': 'use_async', 'streaming': 'streaming'}), '(llm=llm, service_context=service_context,\n text_qa_template=text_qa_template, refine_template=refine_template,\n summary_template=summary_template, simple_template=simple_template,\n response_mode=response_mode, output_cls=output_cls, use_async=use_async,\n streaming=streaming)\n', (4555, 4843), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, ResponseMode, get_response_synthesizer\n')] |
from typing import Any, List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.response_synthesizers import (
BaseSynthesizer,
ResponseMode,
get_response_synthesizer,
)
from llama_index.core.schema import NodeWithScore, QueryBundle
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
llm_from_settings_or_context,
)
class RetrieverQueryEngine(BaseQueryEngine):
"""Retriever query engine.
Args:
retriever (BaseRetriever): A retriever object.
response_synthesizer (Optional[BaseSynthesizer]): A BaseSynthesizer
object.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
def __init__(
self,
retriever: BaseRetriever,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
) -> None:
self._retriever = retriever
self._response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm_from_settings_or_context(Settings, retriever.get_service_context()),
callback_manager=callback_manager
or callback_manager_from_settings_or_context(
Settings, retriever.get_service_context()
),
)
self._node_postprocessors = node_postprocessors or []
callback_manager = (
callback_manager or self._response_synthesizer.callback_manager
)
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = callback_manager
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {"response_synthesizer": self._response_synthesizer}
@classmethod
def from_args(
cls,
retriever: BaseRetriever,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
# response synthesizer args
response_mode: ResponseMode = ResponseMode.COMPACT,
text_qa_template: Optional[BasePromptTemplate] = None,
refine_template: Optional[BasePromptTemplate] = None,
summary_template: Optional[BasePromptTemplate] = None,
simple_template: Optional[BasePromptTemplate] = None,
output_cls: Optional[BaseModel] = None,
use_async: bool = False,
streaming: bool = False,
# deprecated
service_context: Optional[ServiceContext] = None,
**kwargs: Any,
) -> "RetrieverQueryEngine":
"""Initialize a RetrieverQueryEngine object.".
Args:
retriever (BaseRetriever): A retriever object.
service_context (Optional[ServiceContext]): A ServiceContext object.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
verbose (bool): Whether to print out debug info.
response_mode (ResponseMode): A ResponseMode object.
text_qa_template (Optional[BasePromptTemplate]): A BasePromptTemplate
object.
refine_template (Optional[BasePromptTemplate]): A BasePromptTemplate object.
simple_template (Optional[BasePromptTemplate]): A BasePromptTemplate object.
use_async (bool): Whether to use async.
streaming (bool): Whether to use streaming.
optimizer (Optional[BaseTokenUsageOptimizer]): A BaseTokenUsageOptimizer
object.
"""
llm = llm or llm_from_settings_or_context(Settings, service_context)
response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
service_context=service_context,
text_qa_template=text_qa_template,
refine_template=refine_template,
summary_template=summary_template,
simple_template=simple_template,
response_mode=response_mode,
output_cls=output_cls,
use_async=use_async,
streaming=streaming,
)
callback_manager = callback_manager_from_settings_or_context(
Settings, service_context
)
return cls(
retriever=retriever,
response_synthesizer=response_synthesizer,
callback_manager=callback_manager,
node_postprocessors=node_postprocessors,
)
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = self._retriever.retrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
async def aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever.aretrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
def with_retriever(self, retriever: BaseRetriever) -> "RetrieverQueryEngine":
return RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=self._response_synthesizer,
callback_manager=self.callback_manager,
node_postprocessors=self._node_postprocessors,
)
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
return self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
return await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = self.retrieve(query_bundle)
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = await self.aretrieve(query_bundle)
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@property
def retriever(self) -> BaseRetriever:
"""Get the retriever object."""
return self._retriever
| [
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.response_synthesizers.get_response_synthesizer"
] | [((4987, 5055), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5028, 5055), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((4419, 4474), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4447, 4474), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((4531, 4843), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm', 'service_context': 'service_context', 'text_qa_template': 'text_qa_template', 'refine_template': 'refine_template', 'summary_template': 'summary_template', 'simple_template': 'simple_template', 'response_mode': 'response_mode', 'output_cls': 'output_cls', 'use_async': 'use_async', 'streaming': 'streaming'}), '(llm=llm, service_context=service_context,\n text_qa_template=text_qa_template, refine_template=refine_template,\n summary_template=summary_template, simple_template=simple_template,\n response_mode=response_mode, output_cls=output_cls, use_async=use_async,\n streaming=streaming)\n', (4555, 4843), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, ResponseMode, get_response_synthesizer\n')] |
from llama_index.core.postprocessor import KeywordNodePostprocessor
from llama_index.core.schema import TextNode, NodeWithScore
nodes = [
TextNode(
text="Entry no: 1, <SECRET> - Attack at Dawn"
),
TextNode(
text="Entry no: 2, <RESTRICTED> - Go to point Bravo"
),
TextNode(
text="Entry no: 3, <PUBLIC> - Roses are Red"
),
]
node_with_score_list = [
NodeWithScore(node=node) for node in nodes
]
pp = KeywordNodePostprocessor(
exclude_keywords=["SECRET", "RESTRICTED"]
)
remaining_nodes = pp.postprocess_nodes(
node_with_score_list
)
print('Remaining nodes:')
for node_with_score in remaining_nodes:
node = node_with_score.node
print(f"Text: {node.text}")
| [
"llama_index.core.schema.NodeWithScore",
"llama_index.core.schema.TextNode",
"llama_index.core.postprocessor.KeywordNodePostprocessor"
] | [((452, 519), 'llama_index.core.postprocessor.KeywordNodePostprocessor', 'KeywordNodePostprocessor', ([], {'exclude_keywords': "['SECRET', 'RESTRICTED']"}), "(exclude_keywords=['SECRET', 'RESTRICTED'])\n", (476, 519), False, 'from llama_index.core.postprocessor import KeywordNodePostprocessor\n'), ((143, 198), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': '"""Entry no: 1, <SECRET> - Attack at Dawn"""'}), "(text='Entry no: 1, <SECRET> - Attack at Dawn')\n", (151, 198), False, 'from llama_index.core.schema import TextNode, NodeWithScore\n'), ((218, 280), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': '"""Entry no: 2, <RESTRICTED> - Go to point Bravo"""'}), "(text='Entry no: 2, <RESTRICTED> - Go to point Bravo')\n", (226, 280), False, 'from llama_index.core.schema import TextNode, NodeWithScore\n'), ((300, 354), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': '"""Entry no: 3, <PUBLIC> - Roses are Red"""'}), "(text='Entry no: 3, <PUBLIC> - Roses are Red')\n", (308, 354), False, 'from llama_index.core.schema import TextNode, NodeWithScore\n'), ((402, 426), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'node'}), '(node=node)\n', (415, 426), False, 'from llama_index.core.schema import TextNode, NodeWithScore\n')] |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import KeywordExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_from_documents(documents)
key_extractor = KeywordExtractor(keywords=3)
metadata_list = key_extractor.extract(nodes)
print(metadata_list)
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.extractors.KeywordExtractor",
"llama_index.core.node_parser.SentenceSplitter"
] | [((176, 206), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (197, 206), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((247, 291), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), '(include_prev_next_rel=True)\n', (263, 291), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((360, 388), 'llama_index.core.extractors.KeywordExtractor', 'KeywordExtractor', ([], {'keywords': '(3)'}), '(keywords=3)\n', (376, 388), False, 'from llama_index.core.extractors import KeywordExtractor\n')] |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import SummaryExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_from_documents(documents)
summary_extractor = SummaryExtractor(
summaries=["prev", "self", "next"]
)
metadata_list = summary_extractor.extract(nodes)
print(metadata_list) | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.extractors.SummaryExtractor"
] | [((176, 206), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (197, 206), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((247, 291), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), '(include_prev_next_rel=True)\n', (263, 291), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((364, 416), 'llama_index.core.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'summaries': "['prev', 'self', 'next']"}), "(summaries=['prev', 'self', 'next'])\n", (380, 416), False, 'from llama_index.core.extractors import SummaryExtractor\n')] |
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/sample_document1.txt"))
print(f"Metadata: {document[0].metadata}")
print(f"Text: {document[0].text}")
| [
"llama_index.readers.file.FlatReader"
] | [((83, 95), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (93, 95), False, 'from llama_index.readers.file import FlatReader\n'), ((124, 158), 'pathlib.Path', 'Path', (['"""files/sample_document1.txt"""'], {}), "('files/sample_document1.txt')\n", (128, 158), False, 'from pathlib import Path\n')] |
from llama_index.core.node_parser import HierarchicalNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/sample_document1.txt"))
hierarchical_parser = HierarchicalNodeParser.from_defaults(
chunk_sizes=[128, 64, 32],
chunk_overlap=0,
)
nodes = hierarchical_parser.get_nodes_from_documents(document)
for node in nodes:
print(f"Metadata: {node.metadata} \nText: {node.text}")
| [
"llama_index.readers.file.FlatReader",
"llama_index.core.node_parser.HierarchicalNodeParser.from_defaults"
] | [((147, 159), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (157, 159), False, 'from llama_index.readers.file import FlatReader\n'), ((247, 332), 'llama_index.core.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': '[128, 64, 32]', 'chunk_overlap': '(0)'}), '(chunk_sizes=[128, 64, 32], chunk_overlap=0\n )\n', (283, 332), False, 'from llama_index.core.node_parser import HierarchicalNodeParser\n'), ((188, 222), 'pathlib.Path', 'Path', (['"""files/sample_document1.txt"""'], {}), "('files/sample_document1.txt')\n", (192, 222), False, 'from pathlib import Path\n')] |
from collections import ChainMap
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Protocol,
Sequence,
get_args,
runtime_checkable,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
)
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
StringableInput,
validate_and_convert_stringable,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
root_validator,
validator,
)
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.generic_utils import (
messages_to_prompt as generic_messages_to_prompt,
)
from llama_index.core.base.llms.generic_utils import (
prompt_to_messages,
)
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.types import (
BaseOutputParser,
PydanticProgramMode,
TokenAsyncGen,
TokenGen,
)
# NOTE: These two protocols are needed to appease mypy
@runtime_checkable
class MessagesToPromptType(Protocol):
def __call__(self, messages: Sequence[ChatMessage]) -> str:
pass
@runtime_checkable
class CompletionToPromptType(Protocol):
def __call__(self, prompt: str) -> str:
pass
def stream_completion_response_to_tokens(
completion_response_gen: CompletionResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in completion_response_gen:
yield response.delta or ""
return gen()
def stream_chat_response_to_tokens(
chat_response_gen: ChatResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in chat_response_gen:
yield response.delta or ""
return gen()
async def astream_completion_response_to_tokens(
completion_response_gen: CompletionResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in completion_response_gen:
yield response.delta or ""
return gen()
async def astream_chat_response_to_tokens(
chat_response_gen: ChatResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in chat_response_gen:
yield response.delta or ""
return gen()
def default_completion_to_prompt(prompt: str) -> str:
return prompt
class LLM(BaseLLM):
system_prompt: Optional[str] = Field(
default=None, description="System prompt for LLM calls."
)
messages_to_prompt: Callable = Field(
description="Function to convert a list of messages to an LLM prompt.",
default=None,
exclude=True,
)
completion_to_prompt: Callable = Field(
description="Function to convert a completion to an LLM prompt.",
default=None,
exclude=True,
)
output_parser: Optional[BaseOutputParser] = Field(
description="Output parser to parse, validate, and correct errors programmatically.",
default=None,
exclude=True,
)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT
# deprecated
query_wrapper_prompt: Optional[BasePromptTemplate] = Field(
description="Query wrapper prompt for LLM calls.",
default=None,
exclude=True,
)
@validator("messages_to_prompt", pre=True)
def set_messages_to_prompt(
cls, messages_to_prompt: Optional[MessagesToPromptType]
) -> MessagesToPromptType:
return messages_to_prompt or generic_messages_to_prompt
@validator("completion_to_prompt", pre=True)
def set_completion_to_prompt(
cls, completion_to_prompt: Optional[CompletionToPromptType]
) -> CompletionToPromptType:
return completion_to_prompt or default_completion_to_prompt
@root_validator
def check_prompts(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if values.get("completion_to_prompt") is None:
values["completion_to_prompt"] = default_completion_to_prompt
if values.get("messages_to_prompt") is None:
values["messages_to_prompt"] = generic_messages_to_prompt
return values
def _log_template_data(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> None:
template_vars = {
k: v
for k, v in ChainMap(prompt.kwargs, prompt_args).items()
if k in prompt.template_vars
}
with self.callback_manager.event(
CBEventType.TEMPLATING,
payload={
EventPayload.TEMPLATE: prompt.get_template(llm=self),
EventPayload.TEMPLATE_VARS: template_vars,
EventPayload.SYSTEM_PROMPT: self.system_prompt,
EventPayload.QUERY_WRAPPER_PROMPT: self.query_wrapper_prompt,
},
):
pass
def _get_prompt(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str:
formatted_prompt = prompt.format(
llm=self,
messages_to_prompt=self.messages_to_prompt,
completion_to_prompt=self.completion_to_prompt,
**prompt_args,
)
if self.output_parser is not None:
formatted_prompt = self.output_parser.format(formatted_prompt)
return self._extend_prompt(formatted_prompt)
def _get_messages(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> List[ChatMessage]:
messages = prompt.format_messages(llm=self, **prompt_args)
if self.output_parser is not None:
messages = self.output_parser.format_messages(messages)
return self._extend_messages(messages)
def structured_predict(
self,
output_cls: BaseModel,
prompt: PromptTemplate,
**prompt_args: Any,
) -> BaseModel:
from llama_index.core.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return program(**prompt_args)
async def astructured_predict(
self,
output_cls: BaseModel,
prompt: PromptTemplate,
**prompt_args: Any,
) -> BaseModel:
from llama_index.core.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return await program.acall(**prompt_args)
def _parse_output(self, output: str) -> str:
if self.output_parser is not None:
return str(self.output_parser.parse(output))
return output
def predict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""Predict."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.chat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = self.complete(formatted_prompt, formatted=True)
output = response.text
return self._parse_output(output)
def stream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenGen:
"""Stream."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.stream_chat(messages)
stream_tokens = stream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = self.stream_complete(formatted_prompt, formatted=True)
stream_tokens = stream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
async def apredict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""Async predict."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.achat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = await self.acomplete(formatted_prompt, formatted=True)
output = response.text
return self._parse_output(output)
async def astream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenAsyncGen:
"""Async stream."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.astream_chat(messages)
stream_tokens = await astream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = await self.astream_complete(
formatted_prompt, formatted=True
)
stream_tokens = await astream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
def _extend_prompt(
self,
formatted_prompt: str,
) -> str:
"""Add system and query wrapper prompts to base prompt."""
extended_prompt = formatted_prompt
if self.system_prompt:
extended_prompt = self.system_prompt + "\n\n" + extended_prompt
if self.query_wrapper_prompt:
extended_prompt = self.query_wrapper_prompt.format(
query_str=extended_prompt
)
return extended_prompt
def _extend_messages(self, messages: List[ChatMessage]) -> List[ChatMessage]:
"""Add system prompt to chat message list."""
if self.system_prompt:
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt),
*messages,
]
return messages
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""Return query component."""
if self.metadata.is_chat_model:
return LLMChatComponent(llm=self, **kwargs)
else:
return LLMCompleteComponent(llm=self, **kwargs)
class BaseLLMComponent(QueryComponent):
"""Base LLM component."""
llm: LLM = Field(..., description="LLM")
streaming: bool = Field(default=False, description="Streaming mode")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: Any) -> None:
"""Set callback manager."""
self.llm.callback_manager = callback_manager
class LLMCompleteComponent(BaseLLMComponent):
"""LLM completion component."""
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
if "prompt" not in input:
raise ValueError("Prompt must be in input dict.")
# do special check to see if prompt is a list of chat messages
if isinstance(input["prompt"], get_args(List[ChatMessage])):
input["prompt"] = self.llm.messages_to_prompt(input["prompt"])
input["prompt"] = validate_and_convert_stringable(input["prompt"])
else:
input["prompt"] = validate_and_convert_stringable(input["prompt"])
input["prompt"] = self.llm.completion_to_prompt(input["prompt"])
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
prompt = kwargs["prompt"]
# ignore all other kwargs for now
if self.streaming:
response = self.llm.stream_complete(prompt, formatted=True)
else:
response = self.llm.complete(prompt, formatted=True)
return {"output": response}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
prompt = kwargs["prompt"]
# ignore all other kwargs for now
response = await self.llm.acomplete(prompt, formatted=True)
return {"output": response}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# TODO: support only complete for now
return InputKeys.from_keys({"prompt"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
class LLMChatComponent(BaseLLMComponent):
"""LLM chat component."""
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
if "messages" not in input:
raise ValueError("Messages must be in input dict.")
# if `messages` is a string, convert to a list of chat message
if isinstance(input["messages"], get_args(StringableInput)):
input["messages"] = validate_and_convert_stringable(input["messages"])
input["messages"] = prompt_to_messages(str(input["messages"]))
for message in input["messages"]:
if not isinstance(message, ChatMessage):
raise ValueError("Messages must be a list of ChatMessage")
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
messages = kwargs["messages"]
if self.streaming:
response = self.llm.stream_chat(messages)
else:
response = self.llm.chat(messages)
return {"output": response}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
messages = kwargs["messages"]
if self.streaming:
response = await self.llm.astream_chat(messages)
else:
response = await self.llm.achat(messages)
return {"output": response}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# TODO: support only complete for now
return InputKeys.from_keys({"messages"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
| [
"llama_index.core.bridge.pydantic.validator",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.program.utils.get_program_for_llm",
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.base.query_pipeline.query.validate_and_convert_stringable"
] | [((2866, 2929), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""System prompt for LLM calls."""'}), "(default=None, description='System prompt for LLM calls.')\n", (2871, 2929), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((2979, 3094), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a list of messages to an LLM prompt."""', 'default': 'None', 'exclude': '(True)'}), "(description=\n 'Function to convert a list of messages to an LLM prompt.', default=\n None, exclude=True)\n", (2984, 3094), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3153, 3256), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a completion to an LLM prompt."""', 'default': 'None', 'exclude': '(True)'}), "(description='Function to convert a completion to an LLM prompt.',\n default=None, exclude=True)\n", (3158, 3256), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3332, 3460), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Output parser to parse, validate, and correct errors programmatically."""', 'default': 'None', 'exclude': '(True)'}), "(description=\n 'Output parser to parse, validate, and correct errors programmatically.',\n default=None, exclude=True)\n", (3337, 3460), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3635, 3723), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query wrapper prompt for LLM calls."""', 'default': 'None', 'exclude': '(True)'}), "(description='Query wrapper prompt for LLM calls.', default=None,\n exclude=True)\n", (3640, 3723), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3757, 3798), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""messages_to_prompt"""'], {'pre': '(True)'}), "('messages_to_prompt', pre=True)\n", (3766, 3798), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3996, 4039), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""completion_to_prompt"""'], {'pre': '(True)'}), "('completion_to_prompt', pre=True)\n", (4005, 4039), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((11484, 11513), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""LLM"""'}), "(..., description='LLM')\n", (11489, 11513), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((11536, 11586), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Streaming mode"""'}), "(default=False, description='Streaming mode')\n", (11541, 11586), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((6343, 6443), 'llama_index.core.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6362, 6443), False, 'from llama_index.core.program.utils import get_program_for_llm\n'), ((6788, 6888), 'llama_index.core.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6807, 6888), False, 'from llama_index.core.program.utils import get_program_for_llm\n'), ((13621, 13652), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'prompt'}"], {}), "({'prompt'})\n", (13640, 13652), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((13751, 13783), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (13771, 13783), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15577, 15610), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'messages'}"], {}), "({'messages'})\n", (15596, 15610), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15709, 15741), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (15729, 15741), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((12239, 12266), 'typing.get_args', 'get_args', (['List[ChatMessage]'], {}), '(List[ChatMessage])\n', (12247, 12266), False, 'from typing import Any, Callable, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((12374, 12422), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12405, 12422), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((12467, 12515), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12498, 12515), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((14217, 14242), 'typing.get_args', 'get_args', (['StringableInput'], {}), '(StringableInput)\n', (14225, 14242), False, 'from typing import Any, Callable, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((14277, 14327), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['messages']"], {}), "(input['messages'])\n", (14308, 14327), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((10988, 11052), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': 'self.system_prompt'}), '(role=MessageRole.SYSTEM, content=self.system_prompt)\n', (10999, 11052), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponseAsyncGen, ChatResponseGen, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n'), ((4780, 4816), 'collections.ChainMap', 'ChainMap', (['prompt.kwargs', 'prompt_args'], {}), '(prompt.kwargs, prompt_args)\n', (4788, 4816), False, 'from collections import ChainMap\n')] |
import os
import json
import logging
import sys
import requests
from dotenv import load_dotenv
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from llama_index.core import VectorStoreIndex, Document
from llama_index.tools.brave_search import BraveSearchToolSpec
from llama_index.readers.web import SimpleWebPageReader
# Constants
USER_AGENT = 'Mozilla/5.0 (compatible; YourBot/1.0; +http://yourwebsite.com/bot.html)'
HEADERS = {'User-Agent': USER_AGENT}
RETRIES = Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
def setup_logging():
"""
Initialize logging configuration to output logs to stdout.
"""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
def load_environment_variables():
"""
Load environment variables from the .env file.
:return: The Brave API key.
"""
load_dotenv()
return os.getenv('BRAVE_API_KEY')
def perform_search(query, api_key):
"""
Perform a search using the Brave Search API.
:param query: The search query.
:param api_key: The Brave API key.
:return: The search response.
"""
tool_spec = BraveSearchToolSpec(api_key=api_key)
return tool_spec.brave_search(query=query)
def extract_search_results(response):
"""
Extract search results from the Brave Search API response.
:param response: The search response.
:return: A list of search results.
"""
documents = [doc.text for doc in response]
search_results = []
for document in documents:
response_data = json.loads(document)
search_results.extend(response_data.get('web', {}).get('results', []))
return search_results
def scrape_web_pages(search_results):
"""
Scrape web pages from the URLs obtained from the search results.
:param search_results: The list of search results.
:return: A list of scraped documents.
"""
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=RETRIES))
session.mount('https://', HTTPAdapter(max_retries=RETRIES))
all_documents = []
for result in search_results:
url = result.get('url')
try:
response = session.get(url, headers=HEADERS, timeout=10)
response.raise_for_status()
doc = Document(text=response.text, url=url)
all_documents.append(doc)
except requests.exceptions.RequestException as e:
logging.error(f"Failed to scrape {url}: {e}")
return all_documents
def main():
"""
Main function to orchestrate the search, scraping, and querying process.
"""
setup_logging()
api_key = load_environment_variables()
my_query = "What is the latest news about llamaindex?"
response = perform_search(my_query, api_key)
search_results = extract_search_results(response)
all_documents = scrape_web_pages(search_results)
# Load all the scraped documents into the vector store
index = VectorStoreIndex.from_documents(all_documents)
# Use the index to query with the language model
query_engine = index.as_query_engine()
response = query_engine.query(my_query)
print(response)
if __name__ == "__main__":
main() | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.Document",
"llama_index.tools.brave_search.BraveSearchToolSpec"
] | [((496, 569), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(0.1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])\n', (501, 569), False, 'from urllib3.util.retry import Retry\n'), ((675, 733), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (694, 733), False, 'import logging\n'), ((949, 962), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (960, 962), False, 'from dotenv import load_dotenv\n'), ((974, 1000), 'os.getenv', 'os.getenv', (['"""BRAVE_API_KEY"""'], {}), "('BRAVE_API_KEY')\n", (983, 1000), False, 'import os\n'), ((1228, 1264), 'llama_index.tools.brave_search.BraveSearchToolSpec', 'BraveSearchToolSpec', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (1247, 1264), False, 'from llama_index.tools.brave_search import BraveSearchToolSpec\n'), ((1998, 2016), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2014, 2016), False, 'import requests\n'), ((3052, 3098), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['all_documents'], {}), '(all_documents)\n', (3083, 3098), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((769, 809), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (790, 809), False, 'import logging\n'), ((1637, 1657), 'json.loads', 'json.loads', (['document'], {}), '(document)\n', (1647, 1657), False, 'import json\n'), ((2046, 2078), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2057, 2078), False, 'from requests.adapters import HTTPAdapter\n'), ((2110, 2142), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2121, 2142), False, 'from requests.adapters import HTTPAdapter\n'), ((738, 757), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (755, 757), False, 'import logging\n'), ((2374, 2411), 'llama_index.core.Document', 'Document', ([], {'text': 'response.text', 'url': 'url'}), '(text=response.text, url=url)\n', (2382, 2411), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((2520, 2565), 'logging.error', 'logging.error', (['f"""Failed to scrape {url}: {e}"""'], {}), "(f'Failed to scrape {url}: {e}')\n", (2533, 2565), False, 'import logging\n')] |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/loader_hub/file/cjk_pdf/base.py
staticPath = "static"
class CJKPDFReader(BaseReader):
"""CJK PDF reader.
Extract text from PDF including CJK (Chinese, Japanese and Korean) languages using pdfminer.six.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, filepath: Path, filename) -> List[Document]:
"""Parse file."""
# Import pdfminer
from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
# Create a resource manager
rsrcmgr = PDFResourceManager()
# Create an object to store the text
retstr = StringIO()
# Create a text converter
codec = "utf-8"
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
# Create a PDF interpreter
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Open the PDF file
fp = open(filepath, "rb")
# Create a list to store the text of each page
document_list = []
# Extract text from each page
for i, page in enumerate(PDFPage.get_pages(fp)):
interpreter.process_page(page)
# Get the text
text = retstr.getvalue()
sentence_splitter = SentenceSplitter(chunk_size=400)
text_chunks = sentence_splitter.split_text(text)
document_list += [
Document(t, extra_info={"page_no": i + 1}) for t in text_chunks
]
# Clear the text
retstr.truncate(0)
retstr.seek(0)
# Close the file
fp.close()
# Close the device
device.close()
shutil.copy2(filepath, f"{staticPath}/file/{filename}")
return document_list
| [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (1281, 1283), False, 'from pdfminer.layout import LAParams\n'), ((1301, 1363), 'pdfminer.converter.TextConverter', 'TextConverter', (['rsrcmgr', 'retstr'], {'codec': 'codec', 'laparams': 'laparams'}), '(rsrcmgr, retstr, codec=codec, laparams=laparams)\n', (1314, 1363), False, 'from pdfminer.converter import TextConverter\n'), ((1421, 1456), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (1439, 1456), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((2247, 2302), 'shutil.copy2', 'shutil.copy2', (['filepath', 'f"""{staticPath}/file/{filename}"""'], {}), "(filepath, f'{staticPath}/file/{filename}')\n", (2259, 2302), False, 'import shutil\n'), ((1672, 1693), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['fp'], {}), '(fp)\n', (1689, 1693), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1837, 1869), 'llama_index.langchain_helpers.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(400)'}), '(chunk_size=400)\n', (1853, 1869), False, 'from llama_index.langchain_helpers.text_splitter import SentenceSplitter\n'), ((1979, 2021), 'llama_index.readers.schema.base.Document', 'Document', (['t'], {'extra_info': "{'page_no': i + 1}"}), "(t, extra_info={'page_no': i + 1})\n", (1987, 2021), False, 'from llama_index.readers.schema.base import Document\n')] |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/loader_hub/file/cjk_pdf/base.py
staticPath = "static"
class CJKPDFReader(BaseReader):
"""CJK PDF reader.
Extract text from PDF including CJK (Chinese, Japanese and Korean) languages using pdfminer.six.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, filepath: Path, filename) -> List[Document]:
"""Parse file."""
# Import pdfminer
from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
# Create a resource manager
rsrcmgr = PDFResourceManager()
# Create an object to store the text
retstr = StringIO()
# Create a text converter
codec = "utf-8"
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
# Create a PDF interpreter
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Open the PDF file
fp = open(filepath, "rb")
# Create a list to store the text of each page
document_list = []
# Extract text from each page
for i, page in enumerate(PDFPage.get_pages(fp)):
interpreter.process_page(page)
# Get the text
text = retstr.getvalue()
sentence_splitter = SentenceSplitter(chunk_size=400)
text_chunks = sentence_splitter.split_text(text)
document_list += [
Document(t, extra_info={"page_no": i + 1}) for t in text_chunks
]
# Clear the text
retstr.truncate(0)
retstr.seek(0)
# Close the file
fp.close()
# Close the device
device.close()
shutil.copy2(filepath, f"{staticPath}/file/{filename}")
return document_list
| [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (1281, 1283), False, 'from pdfminer.layout import LAParams\n'), ((1301, 1363), 'pdfminer.converter.TextConverter', 'TextConverter', (['rsrcmgr', 'retstr'], {'codec': 'codec', 'laparams': 'laparams'}), '(rsrcmgr, retstr, codec=codec, laparams=laparams)\n', (1314, 1363), False, 'from pdfminer.converter import TextConverter\n'), ((1421, 1456), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (1439, 1456), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((2247, 2302), 'shutil.copy2', 'shutil.copy2', (['filepath', 'f"""{staticPath}/file/{filename}"""'], {}), "(filepath, f'{staticPath}/file/{filename}')\n", (2259, 2302), False, 'import shutil\n'), ((1672, 1693), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['fp'], {}), '(fp)\n', (1689, 1693), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1837, 1869), 'llama_index.langchain_helpers.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(400)'}), '(chunk_size=400)\n', (1853, 1869), False, 'from llama_index.langchain_helpers.text_splitter import SentenceSplitter\n'), ((1979, 2021), 'llama_index.readers.schema.base.Document', 'Document', (['t'], {'extra_info': "{'page_no': i + 1}"}), "(t, extra_info={'page_no': i + 1})\n", (1987, 2021), False, 'from llama_index.readers.schema.base import Document\n')] |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/loader_hub/file/cjk_pdf/base.py
staticPath = "static"
class CJKPDFReader(BaseReader):
"""CJK PDF reader.
Extract text from PDF including CJK (Chinese, Japanese and Korean) languages using pdfminer.six.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, filepath: Path, filename) -> List[Document]:
"""Parse file."""
# Import pdfminer
from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
# Create a resource manager
rsrcmgr = PDFResourceManager()
# Create an object to store the text
retstr = StringIO()
# Create a text converter
codec = "utf-8"
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
# Create a PDF interpreter
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Open the PDF file
fp = open(filepath, "rb")
# Create a list to store the text of each page
document_list = []
# Extract text from each page
for i, page in enumerate(PDFPage.get_pages(fp)):
interpreter.process_page(page)
# Get the text
text = retstr.getvalue()
sentence_splitter = SentenceSplitter(chunk_size=400)
text_chunks = sentence_splitter.split_text(text)
document_list += [
Document(t, extra_info={"page_no": i + 1}) for t in text_chunks
]
# Clear the text
retstr.truncate(0)
retstr.seek(0)
# Close the file
fp.close()
# Close the device
device.close()
shutil.copy2(filepath, f"{staticPath}/file/{filename}")
return document_list
| [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (1281, 1283), False, 'from pdfminer.layout import LAParams\n'), ((1301, 1363), 'pdfminer.converter.TextConverter', 'TextConverter', (['rsrcmgr', 'retstr'], {'codec': 'codec', 'laparams': 'laparams'}), '(rsrcmgr, retstr, codec=codec, laparams=laparams)\n', (1314, 1363), False, 'from pdfminer.converter import TextConverter\n'), ((1421, 1456), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (1439, 1456), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((2247, 2302), 'shutil.copy2', 'shutil.copy2', (['filepath', 'f"""{staticPath}/file/{filename}"""'], {}), "(filepath, f'{staticPath}/file/{filename}')\n", (2259, 2302), False, 'import shutil\n'), ((1672, 1693), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['fp'], {}), '(fp)\n', (1689, 1693), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1837, 1869), 'llama_index.langchain_helpers.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(400)'}), '(chunk_size=400)\n', (1853, 1869), False, 'from llama_index.langchain_helpers.text_splitter import SentenceSplitter\n'), ((1979, 2021), 'llama_index.readers.schema.base.Document', 'Document', (['t'], {'extra_info': "{'page_no': i + 1}"}), "(t, extra_info={'page_no': i + 1})\n", (1987, 2021), False, 'from llama_index.readers.schema.base import Document\n')] |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/loader_hub/file/cjk_pdf/base.py
staticPath = "static"
class CJKPDFReader(BaseReader):
"""CJK PDF reader.
Extract text from PDF including CJK (Chinese, Japanese and Korean) languages using pdfminer.six.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, filepath: Path, filename) -> List[Document]:
"""Parse file."""
# Import pdfminer
from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
# Create a resource manager
rsrcmgr = PDFResourceManager()
# Create an object to store the text
retstr = StringIO()
# Create a text converter
codec = "utf-8"
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
# Create a PDF interpreter
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Open the PDF file
fp = open(filepath, "rb")
# Create a list to store the text of each page
document_list = []
# Extract text from each page
for i, page in enumerate(PDFPage.get_pages(fp)):
interpreter.process_page(page)
# Get the text
text = retstr.getvalue()
sentence_splitter = SentenceSplitter(chunk_size=400)
text_chunks = sentence_splitter.split_text(text)
document_list += [
Document(t, extra_info={"page_no": i + 1}) for t in text_chunks
]
# Clear the text
retstr.truncate(0)
retstr.seek(0)
# Close the file
fp.close()
# Close the device
device.close()
shutil.copy2(filepath, f"{staticPath}/file/{filename}")
return document_list
| [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (1281, 1283), False, 'from pdfminer.layout import LAParams\n'), ((1301, 1363), 'pdfminer.converter.TextConverter', 'TextConverter', (['rsrcmgr', 'retstr'], {'codec': 'codec', 'laparams': 'laparams'}), '(rsrcmgr, retstr, codec=codec, laparams=laparams)\n', (1314, 1363), False, 'from pdfminer.converter import TextConverter\n'), ((1421, 1456), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (1439, 1456), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((2247, 2302), 'shutil.copy2', 'shutil.copy2', (['filepath', 'f"""{staticPath}/file/{filename}"""'], {}), "(filepath, f'{staticPath}/file/{filename}')\n", (2259, 2302), False, 'import shutil\n'), ((1672, 1693), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['fp'], {}), '(fp)\n', (1689, 1693), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1837, 1869), 'llama_index.langchain_helpers.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(400)'}), '(chunk_size=400)\n', (1853, 1869), False, 'from llama_index.langchain_helpers.text_splitter import SentenceSplitter\n'), ((1979, 2021), 'llama_index.readers.schema.base.Document', 'Document', (['t'], {'extra_info': "{'page_no': i + 1}"}), "(t, extra_info={'page_no': i + 1})\n", (1987, 2021), False, 'from llama_index.readers.schema.base import Document\n')] |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(string)
def decode_string(token: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(token)
def num_tokens_from_string(string: str, encoding_name: str = "p50k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_text_to_doc(
text: str, current_chunk_id, chunk_size: int = 400
) -> List[Document]:
"""Split text into chunks of a given size."""
chunks = []
token_len = num_tokens_from_string(text)
for i in range(0, token_len, chunk_size):
encode_text = encode_string(text)
decode_text = decode_string(encode_text[i : i + chunk_size]).strip()
chunks.append(
Document(
decode_text,
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
return chunks
class CustomReader(BaseReader):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, html, filename) -> List[Document]:
soup = BeautifulSoup(html, "html.parser")
current_chunk_text = ""
current_chunk_id = 1
document_list = []
# 单位是token,openai限制4097,如果实现连续对话大概可以进行6轮对话
current_chunk_length = 0
chunk_size = 400
# 只处理前三级标题,其他的按照段落处理
headings = ["h1", "h2", "h3"]
heading_doms = soup.find_all(headings)
if len(heading_doms) == 0:
heading_doms = [soup.find()]
for tag in heading_doms:
tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
current_chunk_text = tag.text.strip()
# 遍历所有兄弟节点,不递归遍历子节点
next_tag = tag.find_next_sibling()
while next_tag and next_tag.name not in headings:
stripped_text = next_tag.text.strip()
if (
current_chunk_length + num_tokens_from_string(stripped_text)
> chunk_size
):
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
document_list += split_text_to_doc(stripped_text, current_chunk_id)
else:
current_chunk_text = f"{current_chunk_text} {stripped_text}"
current_chunk_length += num_tokens_from_string(stripped_text) + 1
next_tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
next_tag = next_tag.find_next_sibling()
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
# 保存修改后的HTML文件
with open(f"{staticPath}/file/{filename}.html", "w", encoding="utf-8") as f:
f.write(str(soup))
return document_list
| [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (685, 700), False, 'import tiktoken\n'), ((1571, 1605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1584, 1605), False, 'from bs4 import BeautifulSoup\n'), ((1182, 1257), 'llama_index.readers.schema.base.Document', 'Document', (['decode_text'], {'extra_info': "{'chunk_id': f'chunk-{current_chunk_id}'}"}), "(decode_text, extra_info={'chunk_id': f'chunk-{current_chunk_id}'})\n", (1190, 1257), False, 'from llama_index.readers.schema.base import Document\n')] |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(string)
def decode_string(token: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(token)
def num_tokens_from_string(string: str, encoding_name: str = "p50k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_text_to_doc(
text: str, current_chunk_id, chunk_size: int = 400
) -> List[Document]:
"""Split text into chunks of a given size."""
chunks = []
token_len = num_tokens_from_string(text)
for i in range(0, token_len, chunk_size):
encode_text = encode_string(text)
decode_text = decode_string(encode_text[i : i + chunk_size]).strip()
chunks.append(
Document(
decode_text,
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
return chunks
class CustomReader(BaseReader):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, html, filename) -> List[Document]:
soup = BeautifulSoup(html, "html.parser")
current_chunk_text = ""
current_chunk_id = 1
document_list = []
# 单位是token,openai限制4097,如果实现连续对话大概可以进行6轮对话
current_chunk_length = 0
chunk_size = 400
# 只处理前三级标题,其他的按照段落处理
headings = ["h1", "h2", "h3"]
heading_doms = soup.find_all(headings)
if len(heading_doms) == 0:
heading_doms = [soup.find()]
for tag in heading_doms:
tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
current_chunk_text = tag.text.strip()
# 遍历所有兄弟节点,不递归遍历子节点
next_tag = tag.find_next_sibling()
while next_tag and next_tag.name not in headings:
stripped_text = next_tag.text.strip()
if (
current_chunk_length + num_tokens_from_string(stripped_text)
> chunk_size
):
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
document_list += split_text_to_doc(stripped_text, current_chunk_id)
else:
current_chunk_text = f"{current_chunk_text} {stripped_text}"
current_chunk_length += num_tokens_from_string(stripped_text) + 1
next_tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
next_tag = next_tag.find_next_sibling()
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
# 保存修改后的HTML文件
with open(f"{staticPath}/file/{filename}.html", "w", encoding="utf-8") as f:
f.write(str(soup))
return document_list
| [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (685, 700), False, 'import tiktoken\n'), ((1571, 1605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1584, 1605), False, 'from bs4 import BeautifulSoup\n'), ((1182, 1257), 'llama_index.readers.schema.base.Document', 'Document', (['decode_text'], {'extra_info': "{'chunk_id': f'chunk-{current_chunk_id}'}"}), "(decode_text, extra_info={'chunk_id': f'chunk-{current_chunk_id}'})\n", (1190, 1257), False, 'from llama_index.readers.schema.base import Document\n')] |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(string)
def decode_string(token: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(token)
def num_tokens_from_string(string: str, encoding_name: str = "p50k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_text_to_doc(
text: str, current_chunk_id, chunk_size: int = 400
) -> List[Document]:
"""Split text into chunks of a given size."""
chunks = []
token_len = num_tokens_from_string(text)
for i in range(0, token_len, chunk_size):
encode_text = encode_string(text)
decode_text = decode_string(encode_text[i : i + chunk_size]).strip()
chunks.append(
Document(
decode_text,
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
return chunks
class CustomReader(BaseReader):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, html, filename) -> List[Document]:
soup = BeautifulSoup(html, "html.parser")
current_chunk_text = ""
current_chunk_id = 1
document_list = []
# 单位是token,openai限制4097,如果实现连续对话大概可以进行6轮对话
current_chunk_length = 0
chunk_size = 400
# 只处理前三级标题,其他的按照段落处理
headings = ["h1", "h2", "h3"]
heading_doms = soup.find_all(headings)
if len(heading_doms) == 0:
heading_doms = [soup.find()]
for tag in heading_doms:
tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
current_chunk_text = tag.text.strip()
# 遍历所有兄弟节点,不递归遍历子节点
next_tag = tag.find_next_sibling()
while next_tag and next_tag.name not in headings:
stripped_text = next_tag.text.strip()
if (
current_chunk_length + num_tokens_from_string(stripped_text)
> chunk_size
):
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
document_list += split_text_to_doc(stripped_text, current_chunk_id)
else:
current_chunk_text = f"{current_chunk_text} {stripped_text}"
current_chunk_length += num_tokens_from_string(stripped_text) + 1
next_tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
next_tag = next_tag.find_next_sibling()
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
# 保存修改后的HTML文件
with open(f"{staticPath}/file/{filename}.html", "w", encoding="utf-8") as f:
f.write(str(soup))
return document_list
| [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (685, 700), False, 'import tiktoken\n'), ((1571, 1605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1584, 1605), False, 'from bs4 import BeautifulSoup\n'), ((1182, 1257), 'llama_index.readers.schema.base.Document', 'Document', (['decode_text'], {'extra_info': "{'chunk_id': f'chunk-{current_chunk_id}'}"}), "(decode_text, extra_info={'chunk_id': f'chunk-{current_chunk_id}'})\n", (1190, 1257), False, 'from llama_index.readers.schema.base import Document\n')] |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(string)
def decode_string(token: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(token)
def num_tokens_from_string(string: str, encoding_name: str = "p50k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_text_to_doc(
text: str, current_chunk_id, chunk_size: int = 400
) -> List[Document]:
"""Split text into chunks of a given size."""
chunks = []
token_len = num_tokens_from_string(text)
for i in range(0, token_len, chunk_size):
encode_text = encode_string(text)
decode_text = decode_string(encode_text[i : i + chunk_size]).strip()
chunks.append(
Document(
decode_text,
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
return chunks
class CustomReader(BaseReader):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, html, filename) -> List[Document]:
soup = BeautifulSoup(html, "html.parser")
current_chunk_text = ""
current_chunk_id = 1
document_list = []
# 单位是token,openai限制4097,如果实现连续对话大概可以进行6轮对话
current_chunk_length = 0
chunk_size = 400
# 只处理前三级标题,其他的按照段落处理
headings = ["h1", "h2", "h3"]
heading_doms = soup.find_all(headings)
if len(heading_doms) == 0:
heading_doms = [soup.find()]
for tag in heading_doms:
tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
current_chunk_text = tag.text.strip()
# 遍历所有兄弟节点,不递归遍历子节点
next_tag = tag.find_next_sibling()
while next_tag and next_tag.name not in headings:
stripped_text = next_tag.text.strip()
if (
current_chunk_length + num_tokens_from_string(stripped_text)
> chunk_size
):
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
document_list += split_text_to_doc(stripped_text, current_chunk_id)
else:
current_chunk_text = f"{current_chunk_text} {stripped_text}"
current_chunk_length += num_tokens_from_string(stripped_text) + 1
next_tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
next_tag = next_tag.find_next_sibling()
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
# 保存修改后的HTML文件
with open(f"{staticPath}/file/{filename}.html", "w", encoding="utf-8") as f:
f.write(str(soup))
return document_list
| [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (685, 700), False, 'import tiktoken\n'), ((1571, 1605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1584, 1605), False, 'from bs4 import BeautifulSoup\n'), ((1182, 1257), 'llama_index.readers.schema.base.Document', 'Document', (['decode_text'], {'extra_info': "{'chunk_id': f'chunk-{current_chunk_id}'}"}), "(decode_text, extra_info={'chunk_id': f'chunk-{current_chunk_id}'})\n", (1190, 1257), False, 'from llama_index.readers.schema.base import Document\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/8 14:03
@Author : alexanderwu
@File : document.py
@Desc : Classes and Operations Related to Files in the File System.
"""
from enum import Enum
from pathlib import Path
from typing import Optional, Union
import pandas as pd
from llama_index.core import Document, SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.readers.file import PDFReader
from pydantic import BaseModel, ConfigDict, Field
from tqdm import tqdm
from metagpt.logs import logger
from metagpt.repo_parser import RepoParser
def validate_cols(content_col: str, df: pd.DataFrame):
if content_col not in df.columns:
raise ValueError("Content column not found in DataFrame.")
def read_data(data_path: Path) -> Union[pd.DataFrame, list[Document]]:
suffix = data_path.suffix
if ".xlsx" == suffix:
data = pd.read_excel(data_path)
elif ".csv" == suffix:
data = pd.read_csv(data_path)
elif ".json" == suffix:
data = pd.read_json(data_path)
elif suffix in (".docx", ".doc"):
data = SimpleDirectoryReader(input_files=[str(data_path)]).load_data()
elif ".txt" == suffix:
data = SimpleDirectoryReader(input_files=[str(data_path)]).load_data()
node_parser = SimpleNodeParser.from_defaults(separator="\n", chunk_size=256, chunk_overlap=0)
data = node_parser.get_nodes_from_documents(data)
elif ".pdf" == suffix:
data = PDFReader.load_data(str(data_path))
else:
raise NotImplementedError("File format not supported.")
return data
class DocumentStatus(Enum):
"""Indicates document status, a mechanism similar to RFC/PEP"""
DRAFT = "draft"
UNDERREVIEW = "underreview"
APPROVED = "approved"
DONE = "done"
class Document(BaseModel):
"""
Document: Handles operations related to document files.
"""
path: Path = Field(default=None)
name: str = Field(default="")
content: str = Field(default="")
# metadata? in content perhaps.
author: str = Field(default="")
status: DocumentStatus = Field(default=DocumentStatus.DRAFT)
reviews: list = Field(default_factory=list)
@classmethod
def from_path(cls, path: Path):
"""
Create a Document instance from a file path.
"""
if not path.exists():
raise FileNotFoundError(f"File {path} not found.")
content = path.read_text()
return cls(content=content, path=path)
@classmethod
def from_text(cls, text: str, path: Optional[Path] = None):
"""
Create a Document from a text string.
"""
return cls(content=text, path=path)
def to_path(self, path: Optional[Path] = None):
"""
Save content to the specified file path.
"""
if path is not None:
self.path = path
if self.path is None:
raise ValueError("File path is not set.")
self.path.parent.mkdir(parents=True, exist_ok=True)
# TODO: excel, csv, json, etc.
self.path.write_text(self.content, encoding="utf-8")
def persist(self):
"""
Persist document to disk.
"""
return self.to_path()
class IndexableDocument(Document):
"""
Advanced document handling: For vector databases or search engines.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
data: Union[pd.DataFrame, list]
content_col: Optional[str] = Field(default="")
meta_col: Optional[str] = Field(default="")
@classmethod
def from_path(cls, data_path: Path, content_col="content", meta_col="metadata"):
if not data_path.exists():
raise FileNotFoundError(f"File {data_path} not found.")
data = read_data(data_path)
if isinstance(data, pd.DataFrame):
validate_cols(content_col, data)
return cls(data=data, content=str(data), content_col=content_col, meta_col=meta_col)
try:
content = data_path.read_text()
except Exception as e:
logger.debug(f"Load {str(data_path)} error: {e}")
content = ""
return cls(data=data, content=content, content_col=content_col, meta_col=meta_col)
def _get_docs_and_metadatas_by_df(self) -> (list, list):
df = self.data
docs = []
metadatas = []
for i in tqdm(range(len(df))):
docs.append(df[self.content_col].iloc[i])
if self.meta_col:
metadatas.append({self.meta_col: df[self.meta_col].iloc[i]})
else:
metadatas.append({})
return docs, metadatas
def _get_docs_and_metadatas_by_llamaindex(self) -> (list, list):
data = self.data
docs = [i.text for i in data]
metadatas = [i.metadata for i in data]
return docs, metadatas
def get_docs_and_metadatas(self) -> (list, list):
if isinstance(self.data, pd.DataFrame):
return self._get_docs_and_metadatas_by_df()
elif isinstance(self.data, list):
return self._get_docs_and_metadatas_by_llamaindex()
else:
raise NotImplementedError("Data type not supported for metadata extraction.")
class RepoMetadata(BaseModel):
name: str = Field(default="")
n_docs: int = Field(default=0)
n_chars: int = Field(default=0)
symbols: list = Field(default_factory=list)
class Repo(BaseModel):
# Name of this repo.
name: str = Field(default="")
# metadata: RepoMetadata = Field(default=RepoMetadata)
docs: dict[Path, Document] = Field(default_factory=dict)
codes: dict[Path, Document] = Field(default_factory=dict)
assets: dict[Path, Document] = Field(default_factory=dict)
path: Path = Field(default=None)
def _path(self, filename):
return self.path / filename
@classmethod
def from_path(cls, path: Path):
"""Load documents, code, and assets from a repository path."""
path.mkdir(parents=True, exist_ok=True)
repo = Repo(path=path, name=path.name)
for file_path in path.rglob("*"):
# FIXME: These judgments are difficult to support multiple programming languages and need to be more general
if file_path.is_file() and file_path.suffix in [".json", ".txt", ".md", ".py", ".js", ".css", ".html"]:
repo._set(file_path.read_text(), file_path)
return repo
def to_path(self):
"""Persist all documents, code, and assets to the given repository path."""
for doc in self.docs.values():
doc.to_path()
for code in self.codes.values():
code.to_path()
for asset in self.assets.values():
asset.to_path()
def _set(self, content: str, path: Path):
"""Add a document to the appropriate category based on its file extension."""
suffix = path.suffix
doc = Document(content=content, path=path, name=str(path.relative_to(self.path)))
# FIXME: These judgments are difficult to support multiple programming languages and need to be more general
if suffix.lower() == ".md":
self.docs[path] = doc
elif suffix.lower() in [".py", ".js", ".css", ".html"]:
self.codes[path] = doc
else:
self.assets[path] = doc
return doc
def set(self, filename: str, content: str):
"""Set a document and persist it to disk."""
path = self._path(filename)
doc = self._set(content, path)
doc.to_path()
def get(self, filename: str) -> Optional[Document]:
"""Get a document by its filename."""
path = self._path(filename)
return self.docs.get(path) or self.codes.get(path) or self.assets.get(path)
def get_text_documents(self) -> list[Document]:
return list(self.docs.values()) + list(self.codes.values())
def eda(self) -> RepoMetadata:
n_docs = sum(len(i) for i in [self.docs, self.codes, self.assets])
n_chars = sum(sum(len(j.content) for j in i.values()) for i in [self.docs, self.codes, self.assets])
symbols = RepoParser(base_directory=self.path).generate_symbols()
return RepoMetadata(name=self.name, n_docs=n_docs, n_chars=n_chars, symbols=symbols)
| [
"llama_index.core.node_parser.SimpleNodeParser.from_defaults"
] | [((1946, 1965), 'pydantic.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (1951, 1965), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((1982, 1999), 'pydantic.Field', 'Field', ([], {'default': '""""""'}), "(default='')\n", (1987, 1999), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((2019, 2036), 'pydantic.Field', 'Field', ([], {'default': '""""""'}), "(default='')\n", (2024, 2036), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((2092, 2109), 'pydantic.Field', 'Field', ([], {'default': '""""""'}), "(default='')\n", (2097, 2109), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((2139, 2174), 'pydantic.Field', 'Field', ([], {'default': 'DocumentStatus.DRAFT'}), '(default=DocumentStatus.DRAFT)\n', (2144, 2174), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((2195, 2222), 'pydantic.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2200, 2222), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((3412, 3452), 'pydantic.ConfigDict', 'ConfigDict', ([], {'arbitrary_types_allowed': '(True)'}), '(arbitrary_types_allowed=True)\n', (3422, 3452), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((3523, 3540), 'pydantic.Field', 'Field', ([], {'default': '""""""'}), "(default='')\n", (3528, 3540), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((3571, 3588), 'pydantic.Field', 'Field', ([], {'default': '""""""'}), "(default='')\n", (3576, 3588), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((5323, 5340), 'pydantic.Field', 'Field', ([], {'default': '""""""'}), "(default='')\n", (5328, 5340), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((5359, 5375), 'pydantic.Field', 'Field', ([], {'default': '(0)'}), '(default=0)\n', (5364, 5375), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((5395, 5411), 'pydantic.Field', 'Field', ([], {'default': '(0)'}), '(default=0)\n', (5400, 5411), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((5432, 5459), 'pydantic.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (5437, 5459), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((5526, 5543), 'pydantic.Field', 'Field', ([], {'default': '""""""'}), "(default='')\n", (5531, 5543), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((5636, 5663), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (5641, 5663), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((5698, 5725), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (5703, 5725), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((5761, 5788), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (5766, 5788), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((5806, 5825), 'pydantic.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (5811, 5825), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((920, 944), 'pandas.read_excel', 'pd.read_excel', (['data_path'], {}), '(data_path)\n', (933, 944), True, 'import pandas as pd\n'), ((987, 1009), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (998, 1009), True, 'import pandas as pd\n'), ((1053, 1076), 'pandas.read_json', 'pd.read_json', (['data_path'], {}), '(data_path)\n', (1065, 1076), True, 'import pandas as pd\n'), ((8174, 8210), 'metagpt.repo_parser.RepoParser', 'RepoParser', ([], {'base_directory': 'self.path'}), '(base_directory=self.path)\n', (8184, 8210), False, 'from metagpt.repo_parser import RepoParser\n'), ((1322, 1401), 'llama_index.core.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'separator': '"""\n"""', 'chunk_size': '(256)', 'chunk_overlap': '(0)'}), "(separator='\\n', chunk_size=256, chunk_overlap=0)\n", (1352, 1401), False, 'from llama_index.core.node_parser import SimpleNodeParser\n')] |
from collections.abc import Generator
from typing import Any
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.utils import node_to_metadata_dict
from llama_index.vector_stores.chroma import ChromaVectorStore # type: ignore
def chunk_list(
lst: list[BaseNode], max_chunk_size: int
) -> Generator[list[BaseNode], None, None]:
"""Yield successive max_chunk_size-sized chunks from lst.
Args:
lst (List[BaseNode]): list of nodes with embeddings
max_chunk_size (int): max chunk size
Yields:
Generator[List[BaseNode], None, None]: list of nodes with embeddings
"""
for i in range(0, len(lst), max_chunk_size):
yield lst[i : i + max_chunk_size]
class BatchedChromaVectorStore(ChromaVectorStore): # type: ignore
"""Chroma vector store, batching additions to avoid reaching the max batch limit.
In this vector store, embeddings are stored within a ChromaDB collection.
During query time, the index uses ChromaDB to query for the top
k most similar nodes.
Args:
chroma_client (from chromadb.api.API):
API instance
chroma_collection (chromadb.api.models.Collection.Collection):
ChromaDB collection instance
"""
chroma_client: Any | None
def __init__(
self,
chroma_client: Any,
chroma_collection: Any,
host: str | None = None,
port: str | None = None,
ssl: bool = False,
headers: dict[str, str] | None = None,
collection_kwargs: dict[Any, Any] | None = None,
) -> None:
super().__init__(
chroma_collection=chroma_collection,
host=host,
port=port,
ssl=ssl,
headers=headers,
collection_kwargs=collection_kwargs or {},
)
self.chroma_client = chroma_client
def add(self, nodes: list[BaseNode], **add_kwargs: Any) -> list[str]:
"""Add nodes to index, batching the insertion to avoid issues.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
add_kwargs: _
"""
if not self.chroma_client:
raise ValueError("Client not initialized")
if not self._collection:
raise ValueError("Collection not initialized")
max_chunk_size = self.chroma_client.max_batch_size
node_chunks = chunk_list(nodes, max_chunk_size)
all_ids = []
for node_chunk in node_chunks:
embeddings = []
metadatas = []
ids = []
documents = []
for node in node_chunk:
embeddings.append(node.get_embedding())
metadatas.append(
node_to_metadata_dict(
node, remove_text=True, flat_metadata=self.flat_metadata
)
)
ids.append(node.node_id)
documents.append(node.get_content(metadata_mode=MetadataMode.NONE))
self._collection.add(
embeddings=embeddings,
ids=ids,
metadatas=metadatas,
documents=documents,
)
all_ids.extend(ids)
return all_ids
| [
"llama_index.core.vector_stores.utils.node_to_metadata_dict"
] | [((2766, 2845), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (2787, 2845), False, 'from llama_index.core.vector_stores.utils import node_to_metadata_dict\n')] |
import os
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import LLMPredictor, PromptHelper, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index import StorageContext, load_index_from_storage
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
# Configure prompt parameters and initialise helper
max_input_size = 500
num_output = 256
max_chunk_overlap = 0.2
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
# load index
index = load_index_from_storage(storage_context, service_context=service_context, )
query_engine = index.as_query_engine()
data = input("Question: ")
response = query_engine.query(data)
print(response)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper"
] | [((380, 441), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (394, 441), False, 'import os\n'), ((766, 825), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (778, 825), False, 'from llama_index import LLMPredictor, PromptHelper, ServiceContext\n'), ((888, 979), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (916, 979), False, 'from llama_index import LLMPredictor, PromptHelper, ServiceContext\n'), ((1020, 1073), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1048, 1073), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1096, 1169), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1119, 1169), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((556, 632), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_base': 'base_path'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_base=base_path)\n", (562, 632), False, 'from langchain.llms.openai import OpenAI\n')] |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.huggingface import HuggingFaceLLM
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.core.base.llms.types import CompletionResponse
from dotenv import load_dotenv
import os
import torch
load_dotenv()
DEFAULT_EMBED_MODEL = "BAAI/bge-small-en-v1.5"
DEFAULT_LOCAL_LLM = "HuggingFaceH4/zephyr-7b-gemma-v0.1"
DEFAULT_LLM = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
DEFAULT_MAX_NEW_TOKENS = 512
HF_TOKEN = os.getenv("HF_TOKEN", "")
API_KEY = os.getenv("AZURE_OPENAI_TOKEN", "")
AZURE_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT", "")
DEPLOYMENT_NAME = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "")
# DEFAULT_QUANTIZATION_CONFIG = BitsAndBytesConfig(
# load_in_4bit=True,
# bnb_4bit_use_double_quant=True,
# bnb_4bit_quant_type="nf4",
# bnb_4bit_compute_dtype=torch.bfloat16
# )
class DefaultEmbedder(HuggingFaceEmbedding):
def __init__(self, model_name=DEFAULT_EMBED_MODEL, device="cuda"):
super().__init__(model_name, device)
class DefaultLocalLLM(HuggingFaceLLM):
def __init__(self, model_name=DEFAULT_LOCAL_LLM, max_new_tokens=DEFAULT_MAX_NEW_TOKENS, quantization_config=None):
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", quantization_config=quantization_config)
super().__init__(model=model, tokenizer=tokenizer, max_new_tokens=max_new_tokens)
# Monkey patch because stream_complete is not implemented in the current version of llama_index
def stream_complete(self, prompt: str, **kwargs):
def gen():
# patch the patch, on some versions the caller tries to pass the formatted keyword, which doesn't exist
kwargs.pop("formatted", None)
text = ""
for x in self._sync_client.text_generation(
prompt, **{**{"max_new_tokens": self.num_output, "stream": True}, **kwargs}
):
text += x
yield CompletionResponse(text=text, delta=x)
return gen()
HuggingFaceInferenceAPI.stream_complete = stream_complete
class AzureOpenAILLM(AzureOpenAI):
def __init__(self, model="", deployment_name=DEPLOYMENT_NAME, api_key=API_KEY, azure_endpoint=AZURE_ENDPOINT, api_version=""):
super().__init__(model=model, deployment_name=deployment_name, api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version, temperature=0.0)
class DefaultLLM(HuggingFaceInferenceAPI):
def __init__(self, model_name = DEFAULT_LLM, token=HF_TOKEN, num_output=DEFAULT_MAX_NEW_TOKENS):
super().__init__(model_name=model_name, token=token, num_output=num_output)
| [
"llama_index.core.base.llms.types.CompletionResponse"
] | [((443, 456), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (454, 456), False, 'from dotenv import load_dotenv\n'), ((662, 687), 'os.getenv', 'os.getenv', (['"""HF_TOKEN"""', '""""""'], {}), "('HF_TOKEN', '')\n", (671, 687), False, 'import os\n'), ((698, 733), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_TOKEN"""', '""""""'], {}), "('AZURE_OPENAI_TOKEN', '')\n", (707, 733), False, 'import os\n'), ((751, 789), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""', '""""""'], {}), "('AZURE_OPENAI_ENDPOINT', '')\n", (760, 789), False, 'import os\n'), ((808, 853), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_DEPLOYMENT_NAME"""', '""""""'], {}), "('AZURE_OPENAI_DEPLOYMENT_NAME', '')\n", (817, 853), False, 'import os\n'), ((1358, 1399), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (1387, 1399), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n'), ((1410, 1522), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_name'], {'device_map': '"""auto"""', 'quantization_config': 'quantization_config'}), "(model_name, device_map='auto',\n quantization_config=quantization_config)\n", (1446, 1522), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n'), ((2067, 2105), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'text', 'delta': 'x'}), '(text=text, delta=x)\n', (2085, 2105), False, 'from llama_index.core.base.llms.types import CompletionResponse\n')] |
from dotenv import load_dotenv
load_dotenv()
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import SimpleVectorStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index.graph_stores import SimpleGraphStore
documents = SimpleDirectoryReader('news').load_data()
index = GPTVectorStoreIndex.from_documents(documents)
# save to disk
index.storage_context.persist()
# load from disk
storage_context = StorageContext(
docstore=SimpleDocumentStore.from_persist_dir('storage'),
vector_store=SimpleVectorStore.from_persist_dir('storage'),
index_store=SimpleIndexStore.from_persist_dir('storage'),
graph_store=SimpleGraphStore.from_persist_dir('storage')
)
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine()
r = query_engine.query("Who are the main exporters of Coal to China? What is the role of Indonesia in this?")
print(r)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.graph_stores.SimpleGraphStore.from_persist_dir",
"llama_index.vector_stores.SimpleVectorStore.from_persist_dir",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((31, 44), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (42, 44), False, 'from dotenv import load_dotenv\n'), ((450, 495), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (484, 495), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((855, 895), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (878, 895), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((399, 428), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""news"""'], {}), "('news')\n", (420, 428), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((609, 656), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', (['"""storage"""'], {}), "('storage')\n", (645, 656), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((675, 720), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', (['"""storage"""'], {}), "('storage')\n", (709, 720), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((738, 782), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', (['"""storage"""'], {}), "('storage')\n", (771, 782), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((800, 844), 'llama_index.graph_stores.SimpleGraphStore.from_persist_dir', 'SimpleGraphStore.from_persist_dir', (['"""storage"""'], {}), "('storage')\n", (833, 844), False, 'from llama_index.graph_stores import SimpleGraphStore\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.schema import Document
from autollm.utils.logging import logger
class LangchainPDFReader(BaseReader):
"""Custom PDF reader that uses langchain's PDFMinerLoader."""
def __init__(self, extract_images: bool = False) -> None:
"""Initialize the reader."""
self.extract_images = extract_images
def load_data(self, file_path: str, extra_info: dict = None) -> List[Document]:
"""Load data from a PDF file using langchain's PDFMinerLoader."""
from langchain.document_loaders import PDFMinerLoader
# Convert the PosixPath object to a string before passing it to PDFMinerLoader
loader = PDFMinerLoader(str(file_path), extract_images=self.extract_images)
langchain_documents = loader.load() # This returns a list of langchain Document objects
# Convert langchain documents into llama-index documents
documents = []
for langchain_document in langchain_documents:
# Create a llama-index document for each langchain document
doc = Document.from_langchain_format(langchain_document)
# If there's extra info, we can add it to the Document's metadata
if extra_info is not None:
doc.metadata.update(extra_info)
documents.append(doc)
return documents
| [
"llama_index.schema.Document.from_langchain_format"
] | [((1131, 1181), 'llama_index.schema.Document.from_langchain_format', 'Document.from_langchain_format', (['langchain_document'], {}), '(langchain_document)\n', (1161, 1181), False, 'from llama_index.schema import Document\n')] |
from rag.agents.interface import Pipeline
from rich.progress import Progress, SpinnerColumn, TextColumn
from typing import Any
from pydantic import create_model
from typing import List
import warnings
import box
import yaml
import timeit
from rich import print
from llama_index.core import SimpleDirectoryReader
from llama_index.multi_modal_llms.ollama import OllamaMultiModal
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
# Import config vars
with open('config.yml', 'r', encoding='utf8') as ymlfile:
cfg = box.Box(yaml.safe_load(ymlfile))
class VLlamaIndexPipeline(Pipeline):
def run_pipeline(self,
payload: str,
query_inputs: [str],
query_types: [str],
query: str,
file_path: str,
index_name: str,
debug: bool = False,
local: bool = True) -> Any:
print(f"\nRunning pipeline with {payload}\n")
start = timeit.default_timer()
if file_path is None:
raise ValueError("File path is required for vllamaindex pipeline")
mm_model = self.invoke_pipeline_step(lambda: OllamaMultiModal(model=cfg.LLM_VLLAMAINDEX),
"Loading Ollama MultiModal...",
local)
# load as image documents
image_documents = self.invoke_pipeline_step(lambda: SimpleDirectoryReader(input_files=[file_path],
required_exts=[".jpg", ".JPG",
".JPEG"]).load_data(),
"Loading image documents...",
local)
ResponseModel = self.invoke_pipeline_step(lambda: self.build_response_class(query_inputs, query_types),
"Building dynamic response class...",
local)
prompt_template_str = """\
{query_str}
Return the answer as a Pydantic object. The Pydantic schema is given below:
"""
mm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(ResponseModel),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=mm_model,
verbose=True,
)
try:
response = self.invoke_pipeline_step(lambda: mm_program(query_str=query),
"Running inference...",
local)
except ValueError as e:
print(f"Error: {e}")
msg = 'Inference failed'
return '{"answer": "' + msg + '"}'
end = timeit.default_timer()
print(f"\nJSON response:\n")
for res in response:
print(res)
print('=' * 50)
print(f"Time to retrieve answer: {end - start}")
return response
# Function to safely evaluate type strings
def safe_eval_type(self, type_str, context):
try:
return eval(type_str, {}, context)
except NameError:
raise ValueError(f"Type '{type_str}' is not recognized")
def build_response_class(self, query_inputs, query_types_as_strings):
# Controlled context for eval
context = {
'List': List,
'str': str,
'int': int,
'float': float
# Include other necessary types or typing constructs here
}
# Convert string representations to actual types
query_types = [self.safe_eval_type(type_str, context) for type_str in query_types_as_strings]
# Create fields dictionary
fields = {name: (type_, ...) for name, type_ in zip(query_inputs, query_types)}
DynamicModel = create_model('DynamicModel', **fields)
return DynamicModel
def invoke_pipeline_step(self, task_call, task_description, local):
if local:
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=False,
) as progress:
progress.add_task(description=task_description, total=None)
ret = task_call()
else:
print(task_description)
ret = task_call()
return ret
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.multi_modal_llms.ollama.OllamaMultiModal",
"llama_index.core.output_parsers.PydanticOutputParser"
] | [((512, 574), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (535, 574), False, 'import warnings\n'), ((575, 630), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (598, 630), False, 'import warnings\n'), ((730, 753), 'yaml.safe_load', 'yaml.safe_load', (['ymlfile'], {}), '(ymlfile)\n', (744, 753), False, 'import yaml\n'), ((1146, 1193), 'rich.print', 'print', (['f"""\nRunning pipeline with {payload}\n"""'], {}), '(f"""\nRunning pipeline with {payload}\n""")\n', (1151, 1193), False, 'from rich import print\n'), ((1209, 1231), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1229, 1231), False, 'import timeit\n'), ((3193, 3215), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3213, 3215), False, 'import timeit\n'), ((3225, 3255), 'rich.print', 'print', (['f"""\nJSON response:\n"""'], {}), '(f"""\nJSON response:\n""")\n', (3230, 3255), False, 'from rich import print\n'), ((3314, 3329), 'rich.print', 'print', (["('=' * 50)"], {}), "('=' * 50)\n", (3319, 3329), False, 'from rich import print\n'), ((3339, 3387), 'rich.print', 'print', (['f"""Time to retrieve answer: {end - start}"""'], {}), "(f'Time to retrieve answer: {end - start}')\n", (3344, 3387), False, 'from rich import print\n'), ((4288, 4326), 'pydantic.create_model', 'create_model', (['"""DynamicModel"""'], {}), "('DynamicModel', **fields)\n", (4300, 4326), False, 'from pydantic import create_model\n'), ((3295, 3305), 'rich.print', 'print', (['res'], {}), '(res)\n', (3300, 3305), False, 'from rich import print\n'), ((4787, 4810), 'rich.print', 'print', (['task_description'], {}), '(task_description)\n', (4792, 4810), False, 'from rich import print\n'), ((1396, 1439), 'llama_index.multi_modal_llms.ollama.OllamaMultiModal', 'OllamaMultiModal', ([], {'model': 'cfg.LLM_VLLAMAINDEX'}), '(model=cfg.LLM_VLLAMAINDEX)\n', (1412, 1439), False, 'from llama_index.multi_modal_llms.ollama import OllamaMultiModal\n'), ((2591, 2626), 'llama_index.core.output_parsers.PydanticOutputParser', 'PydanticOutputParser', (['ResponseModel'], {}), '(ResponseModel)\n', (2611, 2626), False, 'from llama_index.core.output_parsers import PydanticOutputParser\n'), ((3073, 3093), 'rich.print', 'print', (['f"""Error: {e}"""'], {}), "(f'Error: {e}')\n", (3078, 3093), False, 'from rich import print\n'), ((4494, 4509), 'rich.progress.SpinnerColumn', 'SpinnerColumn', ([], {}), '()\n', (4507, 4509), False, 'from rich.progress import Progress, SpinnerColumn, TextColumn\n'), ((4531, 4585), 'rich.progress.TextColumn', 'TextColumn', (['"""[progress.description]{task.description}"""'], {}), "('[progress.description]{task.description}')\n", (4541, 4585), False, 'from rich.progress import Progress, SpinnerColumn, TextColumn\n'), ((1665, 1756), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]', 'required_exts': "['.jpg', '.JPG', '.JPEG']"}), "(input_files=[file_path], required_exts=['.jpg',\n '.JPG', '.JPEG'])\n", (1686, 1756), False, 'from llama_index.core import SimpleDirectoryReader\n')] |
import functools
import os
import random
import tempfile
import traceback
import asyncio
from collections import defaultdict
import aiohttp
import discord
import aiofiles
import httpx
import openai
import tiktoken
from functools import partial
from typing import List, Optional, cast
from pathlib import Path
from datetime import date
from discord import Interaction
from discord.ext import pages
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationSummaryBufferMemory
from langchain.prompts import MessagesPlaceholder
from langchain.schema import SystemMessage
from langchain.tools import Tool
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.evaluation.guideline import DEFAULT_GUIDELINES, GuidelineEvaluator
from llama_index.llms import OpenAI
from llama_index.node_parser import SimpleNodeParser
from llama_index.response_synthesizers import ResponseMode
from llama_index.indices.query.query_transform import StepDecomposeQueryTransform
from llama_index.langchain_helpers.agents import (
IndexToolConfig,
LlamaToolkit,
create_llama_chat_agent,
LlamaIndexTool,
)
from llama_index.prompts.chat_prompts import (
CHAT_REFINE_PROMPT,
CHAT_TREE_SUMMARIZE_PROMPT,
TEXT_QA_SYSTEM_PROMPT,
)
from llama_index.readers import YoutubeTranscriptReader
from llama_index.readers.schema.base import Document
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index.retrievers import VectorIndexRetriever, TreeSelectLeafRetriever
from llama_index.query_engine import (
RetrieverQueryEngine,
MultiStepQueryEngine,
RetryGuidelineQueryEngine,
)
from llama_index import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
QuestionAnswerPrompt,
BeautifulSoupWebReader,
GPTTreeIndex,
GoogleDocsReader,
MockLLMPredictor,
OpenAIEmbedding,
GithubRepositoryReader,
MockEmbedding,
download_loader,
LLMPredictor,
ServiceContext,
StorageContext,
load_index_from_storage,
get_response_synthesizer,
VectorStoreIndex,
)
from llama_index.schema import TextNode
from llama_index.storage.docstore.types import RefDocInfo
from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
from llama_index.composability import ComposableGraph
from llama_index.vector_stores import DocArrayInMemoryVectorStore
from models.embed_statics_model import EmbedStatics
from models.openai_model import Models
from models.check_model import UrlCheck
from services.environment_service import EnvService
from utils.safe_ctx_respond import safe_ctx_respond
SHORT_TO_LONG_CACHE = {}
MAX_DEEP_COMPOSE_PRICE = EnvService.get_max_deep_compose_price()
EpubReader = download_loader("EpubReader")
MarkdownReader = download_loader("MarkdownReader")
RemoteReader = download_loader("RemoteReader")
RemoteDepthReader = download_loader("RemoteDepthReader")
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
node_parser = SimpleNodeParser.from_defaults(
text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20)
)
callback_manager = CallbackManager([token_counter])
service_context_no_llm = ServiceContext.from_defaults(
embed_model=embedding_model,
callback_manager=callback_manager,
node_parser=node_parser,
)
timeout = httpx.Timeout(1, read=1, write=1, connect=1)
def get_service_context_with_llm(llm):
service_context = ServiceContext.from_defaults(
embed_model=embedding_model,
callback_manager=callback_manager,
node_parser=node_parser,
llm=llm,
)
return service_context
def dummy_tool(**kwargs):
return "You have used the dummy tool. Forget about this and do not even mention this to the user."
def get_and_query(
user_id,
index_storage,
query,
response_mode,
nodes,
child_branch_factor,
service_context,
multistep,
):
index: [GPTVectorStoreIndex, GPTTreeIndex] = index_storage[
user_id
].get_index_or_throw()
if isinstance(index, GPTTreeIndex):
retriever = TreeSelectLeafRetriever(
index=index,
child_branch_factor=child_branch_factor,
service_context=service_context,
)
else:
retriever = VectorIndexRetriever(
index=index, similarity_top_k=nodes, service_context=service_context
)
response_synthesizer = get_response_synthesizer(
response_mode=response_mode,
use_async=True,
refine_template=CHAT_REFINE_PROMPT,
service_context=service_context,
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
multistep_query_engine = MultiStepQueryEngine(
query_engine=query_engine,
query_transform=StepDecomposeQueryTransform(multistep),
index_summary="Provides information about everything you need to know about this topic, use this to answer the question.",
)
if multistep:
response = multistep_query_engine.query(query)
else:
response = query_engine.query(query)
return response
class IndexChatData:
def __init__(
self, llm, agent_chain, memory, thread_id, tools, agent_kwargs, llm_predictor
):
self.llm = llm
self.agent_chain = agent_chain
self.memory = memory
self.thread_id = thread_id
self.tools = tools
self.agent_kwargs = agent_kwargs
self.llm_predictor = llm_predictor
class IndexData:
def __init__(self):
self.queryable_index = None
self.individual_indexes = []
# A safety check for the future
def get_index_or_throw(self):
if not self.queryable():
raise Exception(
"An index access was attempted before an index was created. This is a programmer error, please report this to the maintainers."
)
return self.queryable_index
def queryable(self):
return self.queryable_index is not None
def has_indexes(self, user_id):
try:
return (
len(os.listdir(EnvService.find_shared_file(f"indexes/{user_id}"))) > 0
)
except Exception:
return False
def has_search_indexes(self, user_id):
try:
return (
len(
os.listdir(EnvService.find_shared_file(f"indexes/{user_id}_search"))
)
> 0
)
except Exception:
return False
def add_index(self, index, user_id, file_name):
self.individual_indexes.append(index)
self.queryable_index = index
# Create a folder called "indexes/{USER_ID}" if it doesn't exist already
Path(f"{EnvService.save_path()}/indexes/{user_id}").mkdir(
parents=True, exist_ok=True
)
# Save the index to file under the user id
file = f"{date.today().month}_{date.today().day}_{file_name}"
# If file is > 93 in length, cut it off to 93
if len(file) > 93:
file = file[:93]
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ f"{str(user_id)}"
/ f"{file}"
)
def reset_indexes(self, user_id):
self.individual_indexes = []
self.queryable_index = None
# Delete the user indexes
try:
# First, clear all the files inside it
for file in os.listdir(EnvService.find_shared_file(f"indexes/{user_id}")):
try:
os.remove(EnvService.find_shared_file(f"indexes/{user_id}/{file}"))
except:
traceback.print_exc()
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{user_id}_search")
):
try:
os.remove(
EnvService.find_shared_file(f"indexes/{user_id}_search/{file}")
)
except:
traceback.print_exc()
except Exception:
traceback.print_exc()
class Index_handler:
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
node_parser = SimpleNodeParser.from_defaults(
text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20)
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model,
callback_manager=callback_manager,
node_parser=node_parser,
)
type_to_suffix_mappings = {
"text/plain": ".txt",
"text/csv": ".csv",
"application/pdf": ".pdf",
"application/json": ".json",
"image/png": ".png",
"image/jpeg": ".jpg",
"image/gif": ".gif",
"image/svg+xml": ".svg",
"image/webp": ".webp",
"application/mspowerpoint": ".ppt",
"application/vnd.ms-powerpoint": ".ppt",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
"application/msexcel": ".xls",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
"application/msword": ".doc",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
"audio/mpeg": ".mp3",
"audio/x-wav": ".wav",
"audio/ogg": ".ogg",
"video/mpeg": ".mpeg",
"video/mp4": ".mp4",
"application/epub+zip": ".epub",
"text/markdown": ".md",
"text/html": ".html",
"application/rtf": ".rtf",
"application/x-msdownload": ".exe",
"application/xml": ".xml",
"application/vnd.adobe.photoshop": ".psd",
"application/x-sql": ".sql",
"application/x-latex": ".latex",
"application/x-httpd-php": ".php",
"application/java-archive": ".jar",
"application/x-sh": ".sh",
"application/x-csh": ".csh",
"text/x-c": ".c",
"text/x-c++": ".cpp",
"text/x-java-source": ".java",
"text/x-python": ".py",
"text/x-ruby": ".rb",
"text/x-perl": ".pl",
"text/x-shellscript": ".sh",
}
# For when content type doesnt get picked up by discord.
secondary_mappings = {
".epub": ".epub",
}
def __init__(self, bot, usage_service):
self.bot = bot
self.openai_key = os.getenv("OPENAI_TOKEN")
self.index_storage = defaultdict(IndexData)
self.loop = asyncio.get_running_loop()
self.usage_service = usage_service
self.qaprompt = QuestionAnswerPrompt(
"Context information is below. The text '<|endofstatement|>' is used to separate chat entries and make it "
"easier for you to understand the context\n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Never say '<|endofstatement|>'\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
self.EMBED_CUTOFF = 2000
self.index_chat_chains = {}
self.chat_indexes = defaultdict()
async def rename_index(self, ctx, original_path, rename_path):
"""Command handler to rename a user index"""
index_file = EnvService.find_shared_file(original_path)
if not index_file:
return False
# Rename the file at f"indexes/{ctx.user.id}/{user_index}" to f"indexes/{ctx.user.id}/{new_name}" using Pathlib
try:
Path(original_path).rename(rename_path)
return True
except Exception as e:
traceback.print_exc()
return False
async def get_is_in_index_chat(self, ctx):
return ctx.channel.id in self.index_chat_chains.keys()
async def execute_index_chat_message(self, ctx, message):
if ctx.channel.id not in self.index_chat_chains:
return None
if message.lower() in ["stop", "end", "quit", "exit"]:
await ctx.reply("Ending chat session.")
self.index_chat_chains.pop(ctx.channel.id)
# close the thread
thread = await self.bot.fetch_channel(ctx.channel.id)
await thread.edit(name="Closed-GPT")
await thread.edit(archived=True)
return "Ended chat session."
self.usage_service.update_usage_memory(ctx.guild.name, "index_chat_message", 1)
agent_output = await self.loop.run_in_executor(
None,
partial(self.index_chat_chains[ctx.channel.id].agent_chain.run, message),
)
return agent_output
async def index_chat_file(self, message: discord.Message, file: discord.Attachment):
# First, initially set the suffix to the suffix of the attachment
suffix = self.get_file_suffix(file.content_type, file.filename) or None
if not suffix:
await message.reply(
"The file you uploaded is unable to be indexed. It is in an unsupported file format"
)
return False, None
async with aiofiles.tempfile.TemporaryDirectory() as temp_path:
async with aiofiles.tempfile.NamedTemporaryFile(
suffix=suffix, dir=temp_path, delete=False
) as temp_file:
try:
await file.save(temp_file.name)
filename = file.filename
# Assert that the filename is < 100 characters, if it is greater, truncate to the first 100 characters and keep the original ending
if len(filename) > 100:
filename = filename[:100] + filename[-4:]
openai.log = "debug"
print("Indexing")
index: VectorStoreIndex = await self.loop.run_in_executor(
None,
partial(
self.index_file,
Path(temp_file.name),
get_service_context_with_llm(
self.index_chat_chains[message.channel.id].llm
),
suffix,
),
)
print("Done Indexing")
self.usage_service.update_usage_memory(
message.guild.name, "index_chat_file", 1
)
summary = await index.as_query_engine(
response_mode="tree_summarize",
service_context=get_service_context_with_llm(
self.index_chat_chains[message.channel.id].llm
),
).aquery(
f"What is a summary or general idea of this data? Be detailed in your summary (e.g "
f"extract key names, etc) but not too verbose. Your summary should be under a hundred words. "
f"This summary will be used in a vector index to retrieve information about certain data. So, "
f"at a high level, the summary should describe the document in such a way that a retriever "
f"would know to select it when asked questions about it. The data name was {filename}. Include "
f"the file name in the summary. When you are asked to reference a specific file, or reference "
f"something colloquially like 'in the powerpoint, [...]?', never respond saying that as an AI "
f"you can't view the data, instead infer which tool to use that has the data. Say that there "
f"is no available data if there are no available tools that are relevant."
)
engine = self.get_query_engine(
index, self.index_chat_chains[message.channel.id].llm
)
# Get rid of all special characters in the filename
filename = "".join(
[c for c in filename if c.isalpha() or c.isdigit()]
).rstrip()
tool_config = IndexToolConfig(
query_engine=engine,
name=f"{filename}-index",
description=f"Use this tool if the query seems related to this summary: {summary}",
tool_kwargs={
"return_direct": False,
},
max_iterations=5,
)
tool = LlamaIndexTool.from_tool_config(tool_config)
tools = self.index_chat_chains[message.channel.id].tools
tools.append(tool)
agent_chain = initialize_agent(
tools=tools,
llm=self.index_chat_chains[message.channel.id].llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=self.index_chat_chains[
message.channel.id
].agent_kwargs,
memory=self.index_chat_chains[message.channel.id].memory,
handle_parsing_errors="Check your output and make sure it conforms!",
)
index_chat_data = IndexChatData(
self.index_chat_chains[message.channel.id].llm,
agent_chain,
self.index_chat_chains[message.channel.id].memory,
message.channel.id,
tools,
self.index_chat_chains[message.channel.id].agent_kwargs,
self.index_chat_chains[message.channel.id].llm_predictor,
)
self.index_chat_chains[message.channel.id] = index_chat_data
return True, summary
except Exception as e:
await message.reply(
"There was an error indexing your file: " + str(e)
)
traceback.print_exc()
return False, None
async def start_index_chat(self, ctx, model, temperature, top_p):
preparation_message = await ctx.channel.send(
embed=EmbedStatics.get_index_chat_preparation_message()
)
llm = ChatOpenAI(
model=model, temperature=temperature, top_p=top_p, max_retries=2
)
llm_predictor = LLMPredictor(
llm=ChatOpenAI(temperature=temperature, top_p=top_p, model_name=model)
)
max_token_limit = 29000 if "gpt-4" in model else 7500
memory = ConversationSummaryBufferMemory(
memory_key="memory",
return_messages=True,
llm=llm,
max_token_limit=100000 if "preview" in model else max_token_limit,
)
agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
"system_message": SystemMessage(
content="You are a superpowered version of GPT that is able to answer questions about the data you're "
"connected to. Each different tool you have represents a different dataset to interact with. "
"If you are asked to perform a task that spreads across multiple datasets, use multiple tools "
"for the same prompt. When the user types links in chat, you will have already been connected "
"to the data at the link by the time you respond. When using tools, the input should be "
"clearly created based on the request of the user. For example, if a user uploads an invoice "
"and asks how many usage hours of X was present in the invoice, a good query is 'X hours'. "
"Avoid using single word queries unless the request is very simple. You can query multiple times to break down complex requests and retrieve more information. When calling functions, no special characters are allowed in the function name, keep that in mind."
),
}
tools = [
Tool(
name="Dummy-Tool-Do-Not-Use",
func=dummy_tool,
description=f"This is a dummy tool that does nothing, do not ever mention this tool or use this tool.",
)
]
print(f"{tools}{llm}{AgentType.OPENAI_FUNCTIONS}{True}{agent_kwargs}{memory}")
agent_chain = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=agent_kwargs,
memory=memory,
handle_parsing_errors="Check your output and make sure it conforms!",
)
embed_title = f"{ctx.user.name}'s data-connected conversation with GPT"
message_embed = discord.Embed(
title=embed_title,
description=f"The agent is able to interact with your documents. Simply drag your documents into discord or give the agent a link from where to download the documents.\nModel: {model}",
color=0x00995B,
)
message_embed.set_thumbnail(url="https://i.imgur.com/7V6apMT.png")
message_embed.set_footer(
text="Data Chat", icon_url="https://i.imgur.com/7V6apMT.png"
)
message_thread = await ctx.send(embed=message_embed)
thread = await message_thread.create_thread(
name=ctx.user.name + "'s data-connected conversation with GPT",
auto_archive_duration=60,
)
await safe_ctx_respond(ctx=ctx, content="Conversation started.")
try:
await preparation_message.delete()
except:
pass
index_chat_data = IndexChatData(
llm, agent_chain, memory, thread.id, tools, agent_kwargs, llm_predictor
)
self.index_chat_chains[thread.id] = index_chat_data
async def paginate_embed(self, response_text):
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + self.EMBED_CUTOFF]
for i in range(0, len(response_text), self.EMBED_CUTOFF)
]
pages = []
first = False
# Send each chunk as a message
for count, chunk in enumerate(response_text, start=1):
if not first:
page = discord.Embed(
title=f"Index Query Results",
description=chunk,
)
first = True
else:
page = discord.Embed(
title=f"Page {count}",
description=chunk,
)
pages.append(page)
return pages
def index_file(
self, file_path, service_context, suffix=None
) -> GPTVectorStoreIndex:
if suffix and suffix == ".md":
loader = MarkdownReader()
document = loader.load_data(file_path)
elif suffix and suffix == ".epub":
epub_loader = EpubReader()
document = epub_loader.load_data(file_path)
else:
document = SimpleDirectoryReader(input_files=[file_path]).load_data()
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
return index
def index_gdoc(self, doc_id, service_context) -> GPTVectorStoreIndex:
document = GoogleDocsReader().load_data(doc_id)
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
return index
def index_youtube_transcript(self, link, service_context):
try:
def convert_shortlink_to_full_link(short_link):
# Check if the link is a shortened YouTube link
if "youtu.be" in short_link:
# Extract the video ID from the link
video_id = short_link.split("/")[-1].split("?")[0]
# Construct the full YouTube desktop link
desktop_link = f"https://www.youtube.com/watch?v={video_id}"
return desktop_link
else:
return short_link
documents = YoutubeTranscriptReader().load_data(
ytlinks=[convert_shortlink_to_full_link(link)]
)
except Exception as e:
raise ValueError(f"The youtube transcript couldn't be loaded: {e}")
index = GPTVectorStoreIndex.from_documents(
documents,
service_context=service_context,
use_async=True,
)
return index
def index_github_repository(self, link, service_context):
# Extract the "owner" and the "repo" name from the github link.
owner = link.split("/")[3]
repo = link.split("/")[4]
try:
documents = GithubRepositoryReader(owner=owner, repo=repo).load_data(
branch="main"
)
except KeyError:
documents = GithubRepositoryReader(owner=owner, repo=repo).load_data(
branch="master"
)
index = GPTVectorStoreIndex.from_documents(
documents,
service_context=service_context,
use_async=True,
)
return index
def index_load_file(self, file_path) -> [GPTVectorStoreIndex, ComposableGraph]:
storage_context = StorageContext.from_defaults(persist_dir=file_path)
index = load_index_from_storage(storage_context)
return index
def index_discord(self, document, service_context) -> GPTVectorStoreIndex:
index = GPTVectorStoreIndex.from_documents(
document,
service_context=service_context,
use_async=True,
)
return index
async def index_pdf(self, url) -> list[Document]:
# Download the PDF at the url and save it to a tempfile
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
data = await response.read()
f = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
f.write(data)
f.close()
else:
return "An error occurred while downloading the PDF."
# Get the file path of this tempfile.NamedTemporaryFile
# Save this temp file to an actual file that we can put into something else to read it
documents = SimpleDirectoryReader(input_files=[f.name]).load_data()
# Delete the temporary file
return documents
async def index_webpage(self, url, service_context) -> GPTVectorStoreIndex:
# First try to connect to the URL to see if we can even reach it.
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, timeout=5) as response:
# Add another entry to links from all_links if the link is not already in it to compensate for the failed request
if response.status not in [200, 203, 202, 204]:
raise ValueError(
"Invalid URL or could not connect to the provided URL."
)
else:
# Detect if the link is a PDF, if it is, we load it differently
if response.headers["Content-Type"] == "application/pdf":
documents = await self.index_pdf(url)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
return index
except:
traceback.print_exc()
raise ValueError("Could not load webpage")
documents = BeautifulSoupWebReader(
website_extractor=DEFAULT_WEBSITE_EXTRACTOR
).load_data(urls=[url])
# index = GPTVectorStoreIndex(documents, embed_model=embed_model, use_async=True)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
return index
def reset_indexes(self, user_id):
self.index_storage[user_id].reset_indexes(user_id)
def get_file_suffix(self, content_type, filename):
print("The content type is " + content_type)
if content_type:
# Apply the suffix mappings to the file
for key, value in self.type_to_suffix_mappings.items():
if key in content_type:
return value
else:
for key, value in self.secondary_mappings.items():
if key in filename:
return value
return None
async def set_file_index(
self, ctx: discord.ApplicationContext, file: discord.Attachment, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
# First, initially set the suffix to the suffix of the attachment
suffix = self.get_file_suffix(file.content_type, file.filename) or None
if not suffix:
await ctx.respond(
embed=EmbedStatics.get_index_set_failure_embed("Unsupported file")
)
return
# Send indexing message
response = await ctx.respond(
embed=EmbedStatics.build_index_progress_embed()
)
async with aiofiles.tempfile.TemporaryDirectory() as temp_path:
async with aiofiles.tempfile.NamedTemporaryFile(
suffix=suffix, dir=temp_path, delete=False
) as temp_file:
await file.save(temp_file.name)
index = await self.loop.run_in_executor(
None,
partial(
self.index_file,
Path(temp_file.name),
service_context_no_llm,
suffix,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
file_name = file.filename
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
await response.edit(
embed=EmbedStatics.get_index_set_success_embed(str(price))
)
except Exception as e:
await ctx.channel.send(
embed=EmbedStatics.get_index_set_failure_embed(str(e))
)
traceback.print_exc()
async def set_link_index_recurse(
self, ctx: discord.ApplicationContext, link: str, depth, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
response = await ctx.respond(embed=EmbedStatics.build_index_progress_embed())
try:
# Pre-emptively connect and get the content-type of the response
try:
async with aiohttp.ClientSession() as session:
async with session.get(link, timeout=2) as _response:
print(_response.status)
if _response.status == 200:
content_type = _response.headers.get("content-type")
else:
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL."
)
)
return
except Exception as e:
traceback.print_exc()
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL. "
+ str(e)
)
)
return
# Check if the link contains youtube in it
loader = RemoteDepthReader(depth=depth)
documents = await self.loop.run_in_executor(
None, partial(loader.load_data, [link])
)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex,
documents=documents,
service_context=service_context_no_llm,
use_async=True,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
# Make the url look nice, remove https, useless stuff, random characters
file_name = (
link.replace("https://", "")
.replace("http://", "")
.replace("www.", "")
.replace("/", "_")
.replace("?", "_")
.replace("&", "_")
.replace("=", "_")
.replace("-", "_")
.replace(".", "_")
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
except ValueError as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
except Exception as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
await response.edit(embed=EmbedStatics.get_index_set_success_embed(price))
def get_query_engine(self, index, llm):
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=6,
service_context=get_service_context_with_llm(llm),
)
response_synthesizer = get_response_synthesizer(
response_mode=ResponseMode.COMPACT_ACCUMULATE,
use_async=True,
refine_template=TEXT_QA_SYSTEM_PROMPT,
service_context=get_service_context_with_llm(llm),
verbose=True,
)
engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
return engine
async def index_link(self, link, summarize=False, index_chat_ctx=None):
try:
if await UrlCheck.check_youtube_link(link):
print("Indexing youtube transcript")
index = await self.loop.run_in_executor(
None,
partial(
self.index_youtube_transcript, link, service_context_no_llm
),
)
print("Indexed youtube transcript")
elif "github" in link:
index = await self.loop.run_in_executor(
None,
partial(self.index_github_repository, link, service_context_no_llm),
)
else:
index = await self.index_webpage(link, service_context_no_llm)
except Exception as e:
if index_chat_ctx:
await index_chat_ctx.reply(
"There was an error indexing your link: " + str(e)
)
return False, None
else:
raise e
summary = None
if index_chat_ctx:
try:
print("Getting transcript summary")
self.usage_service.update_usage_memory(
index_chat_ctx.guild.name, "index_chat_link", 1
)
summary = await index.as_query_engine(
response_mode="tree_summarize",
service_context=get_service_context_with_llm(
self.index_chat_chains[index_chat_ctx.channel.id].llm
),
).aquery(
"What is a summary or general idea of this document? Be detailed in your summary but not too verbose. Your summary should be under 50 words. This summary will be used in a vector index to retrieve information about certain data. So, at a high level, the summary should describe the document in such a way that a retriever would know to select it when asked questions about it. The link was {link}. Include the an easy identifier derived from the link at the end of the summary."
)
print("Got transcript summary")
engine = self.get_query_engine(
index, self.index_chat_chains[index_chat_ctx.channel.id].llm
)
# Get rid of all special characters in the link, replace periods with _
link_cleaned = "".join(
[c for c in link if c.isalpha() or c.isdigit() or c == "."]
).rstrip()
# replace .
link_cleaned = link_cleaned.replace(".", "_")
# Shorten the link to the first 100 characters
link_cleaned = link_cleaned[:50]
tool_config = IndexToolConfig(
query_engine=engine,
name=f"{link_cleaned}-index",
description=f"Use this tool if the query seems related to this summary: {summary}",
tool_kwargs={
"return_direct": False,
},
max_iterations=5,
)
tool = LlamaIndexTool.from_tool_config(tool_config)
tools = self.index_chat_chains[index_chat_ctx.channel.id].tools
tools.append(tool)
agent_chain = initialize_agent(
tools=tools,
llm=self.index_chat_chains[index_chat_ctx.channel.id].llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=self.index_chat_chains[
index_chat_ctx.channel.id
].agent_kwargs,
memory=self.index_chat_chains[index_chat_ctx.channel.id].memory,
handle_parsing_errors="Check your output and make sure it conforms!",
max_iterations=5,
)
index_chat_data = IndexChatData(
self.index_chat_chains[index_chat_ctx.channel.id].llm,
agent_chain,
self.index_chat_chains[index_chat_ctx.channel.id].memory,
index_chat_ctx.channel.id,
tools,
self.index_chat_chains[index_chat_ctx.channel.id].agent_kwargs,
self.index_chat_chains[index_chat_ctx.channel.id].llm_predictor,
)
self.index_chat_chains[index_chat_ctx.channel.id] = index_chat_data
return True, summary
except Exception as e:
await index_chat_ctx.reply(
"There was an error indexing your link: " + str(e)
)
return False, None
return index, summary
async def set_link_index(
self, ctx: discord.ApplicationContext, link: str, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
response = await ctx.respond(embed=EmbedStatics.build_index_progress_embed())
try:
# Check if the link contains youtube in it
index, _ = await self.index_link(link)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.embedding_token_counts, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
# Make the url look nice, remove https, useless stuff, random characters
file_name = (
link.replace("https://", "")
.replace("http://", "")
.replace("www.", "")
.replace("/", "_")
.replace("?", "_")
.replace("&", "_")
.replace("=", "_")
.replace("-", "_")
.replace(".", "_")
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
except Exception as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
await response.edit(embed=EmbedStatics.get_index_set_success_embed(price))
async def set_discord_index(
self,
ctx: discord.ApplicationContext,
channel: discord.TextChannel,
user_api_key,
message_limit: int = 2500,
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
document = await self.load_data(
channel_ids=[channel.id], limit=message_limit, oldest_first=False
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context_no_llm)
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except Exception:
traceback.print_exc()
price = "Unknown"
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, channel.name)
await ctx.respond(embed=EmbedStatics.get_index_set_success_embed(price))
except Exception as e:
await ctx.respond(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
async def load_index(
self, ctx: discord.ApplicationContext, index, server, search, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
if server:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.guild.id}/{index}"
)
elif search:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}_search/{index}"
)
else:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}/{index}"
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
self.index_storage[ctx.user.id].queryable_index = index
await ctx.respond(embed=EmbedStatics.get_index_load_success_embed())
except Exception as e:
traceback.print_exc()
await ctx.respond(embed=EmbedStatics.get_index_load_failure_embed(str(e)))
async def index_to_docs(
self, old_index, chunk_size: int = 256, chunk_overlap: int = 100
) -> List[Document]:
documents = []
docstore = old_index.docstore
ref_docs = old_index.ref_doc_info
for document in ref_docs.values():
text = ""
for node in document.node_ids:
node = docstore.get_node(node)
text += f"{node.text} "
text_splitter = TokenTextSplitter(
separator=" ", chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
text_chunks = text_splitter.split_text(text)
for chunk_text in text_chunks:
new_doc = Document(text=chunk_text, extra_info=document.metadata)
documents.append(new_doc)
return documents
async def compose_indexes(self, user_id, indexes, name, deep_compose):
# Load all the indexes first
index_objects = []
for _index in indexes:
try:
index_file = EnvService.find_shared_file(f"indexes/{user_id}/{_index}")
except ValueError:
index_file = EnvService.find_shared_file(
f"indexes/{user_id}_search/{_index}"
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
index_objects.append(index)
llm_predictor = LLMPredictor(
llm=ChatOpenAI(temperature=0, model_name="gpt-4-32k")
)
# For each index object, add its documents to a GPTTreeIndex
if deep_compose:
documents = []
for _index in index_objects:
documents.extend(await self.index_to_docs(_index, 256, 20))
embedding_model = OpenAIEmbedding()
llm_predictor_mock = MockLLMPredictor()
embedding_model_mock = MockEmbedding(1536)
token_counter_mock = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
callback_manager_mock = CallbackManager([token_counter_mock])
service_context_mock = ServiceContext.from_defaults(
llm_predictor=llm_predictor_mock,
embed_model=embedding_model_mock,
callback_manager=callback_manager_mock,
)
# Run the mock call first
await self.loop.run_in_executor(
None,
partial(
GPTTreeIndex.from_documents,
documents=documents,
service_context=service_context_mock,
),
)
total_usage_price = await self.usage_service.get_price(
token_counter_mock.total_llm_token_count,
"turbo", # TODO Enable again when tree indexes are fixed
) + await self.usage_service.get_price(
token_counter_mock.total_embedding_token_count, "embedding"
)
print("The total composition price is: ", total_usage_price)
if total_usage_price > MAX_DEEP_COMPOSE_PRICE:
raise ValueError(
"Doing this deep search would be prohibitively expensive. Please try a narrower search scope."
)
tree_index = await self.loop.run_in_executor(
None,
partial(
GPTTreeIndex.from_documents,
documents=documents,
service_context=self.service_context,
use_async=True,
),
)
await self.usage_service.update_usage(
self.token_counter.total_llm_token_count, "turbo"
)
await self.usage_service.update_usage(
self.token_counter.total_embedding_token_count, "embedding"
)
# Now we have a list of tree indexes, we can compose them
if not name:
name = f"{date.today().month}_{date.today().day}_composed_deep_index"
# Save the composed index
tree_index.storage_context.persist(
persist_dir=EnvService.save_path() / "indexes" / str(user_id) / name
)
self.index_storage[user_id].queryable_index = tree_index
return total_usage_price
else:
documents = []
for _index in index_objects:
documents.extend(await self.index_to_docs(_index))
simple_index = await self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context_no_llm,
use_async=True,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
if not name:
name = f"{date.today().month}_{date.today().day}_composed_index"
# Save the composed index
simple_index.storage_context.persist(
persist_dir=EnvService.save_path() / "indexes" / str(user_id) / name
)
self.index_storage[user_id].queryable_index = simple_index
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
price = "Unknown"
return price
async def backup_discord(
self, ctx: discord.ApplicationContext, user_api_key, message_limit
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
channel_ids: List[int] = []
for c in ctx.guild.text_channels:
channel_ids.append(c.id)
document = await self.load_data(
channel_ids=channel_ids, limit=message_limit, oldest_first=False
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context_no_llm)
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except Exception:
traceback.print_exc()
price = "Unknown"
Path(EnvService.save_path() / "indexes" / str(ctx.guild.id)).mkdir(
parents=True, exist_ok=True
)
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ str(ctx.guild.id)
/ f"{ctx.guild.name.replace(' ', '-')}_{date.today().month}_{date.today().day}"
)
await ctx.respond(embed=EmbedStatics.get_index_set_success_embed(price))
except Exception as e:
await ctx.respond(embed=EmbedStatics.get_index_set_failure_embed((str(e))))
traceback.print_exc()
async def query(
self,
ctx: discord.ApplicationContext,
query: str,
response_mode,
nodes,
user_api_key,
child_branch_factor,
model="gpt-4-32k",
multistep=False,
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=model))
ctx_response = await ctx.respond(
embed=EmbedStatics.build_index_query_progress_embed(query)
)
try:
token_counter.reset_counts()
response = await self.loop.run_in_executor(
None,
partial(
get_and_query,
ctx.user.id,
self.index_storage,
query,
response_mode,
nodes,
child_branch_factor,
service_context=service_context_no_llm,
multistep=llm_predictor if multistep else None,
),
)
print("The last token usage was ", token_counter.total_llm_token_count)
await self.usage_service.update_usage(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
total_price = round(
await self.usage_service.get_price(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
+ await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
),
6,
)
except:
total_price = "Unknown"
query_response_message = f"**Query:**\n\n`{query.strip()}`\n\n**Query response:**\n\n{response.response.strip()}"
query_response_message = query_response_message.replace(
"<|endofstatement|>", ""
)
embed_pages = await self.paginate_embed(query_response_message)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
author_check=False,
)
await ctx_response.edit(
embed=EmbedStatics.build_index_query_success_embed(query, total_price)
)
await paginator.respond(ctx.interaction)
except Exception:
traceback.print_exc()
await ctx_response.edit(
embed=EmbedStatics.get_index_query_failure_embed(
"Failed to send query. You may not have an index set, load an index with /index load"
)
)
# Extracted functions from DiscordReader
async def read_channel(
self, channel_id: int, limit: Optional[int], oldest_first: bool
) -> str:
"""Async read channel."""
messages: List[discord.Message] = []
try:
channel = self.bot.get_channel(channel_id)
print(f"Added {channel.name} from {channel.guild.name}")
# only work for text channels for now
if not isinstance(channel, discord.TextChannel):
raise ValueError(
f"Channel {channel_id} is not a text channel. "
"Only text channels are supported for now."
)
# thread_dict maps thread_id to thread
thread_dict = {}
for thread in channel.threads:
thread_dict[thread.id] = thread
async for msg in channel.history(limit=limit, oldest_first=oldest_first):
if msg.author.bot:
pass
else:
messages.append(msg)
if msg.id in thread_dict:
thread = thread_dict[msg.id]
async for thread_msg in thread.history(
limit=limit, oldest_first=oldest_first
):
messages.append(thread_msg)
except Exception as e:
print("Encountered error: " + str(e))
channel = self.bot.get_channel(channel_id)
msg_txt_list = [
f"user:{m.author.display_name}, content:{m.content}" for m in messages
]
return ("<|endofstatement|>\n\n".join(msg_txt_list), channel.name)
async def load_data(
self,
channel_ids: List[int],
limit: Optional[int] = None,
oldest_first: bool = True,
) -> List[Document]:
"""Load data from the input directory.
Args:
channel_ids (List[int]): List of channel ids to read.
limit (Optional[int]): Maximum number of messages to read.
oldest_first (bool): Whether to read oldest messages first.
Defaults to `True`.
Returns:
List[Document]: List of documents.
"""
results: List[Document] = []
for channel_id in channel_ids:
if not isinstance(channel_id, int):
raise ValueError(
f"Channel id {channel_id} must be an integer, "
f"not {type(channel_id)}."
)
(channel_content, channel_name) = await self.read_channel(
channel_id, limit=limit, oldest_first=oldest_first
)
results.append(
Document(
text=channel_content, extra_info={"channel_name": channel_name}
)
)
return results
async def compose(self, ctx: discord.ApplicationContext, name, user_api_key):
# Send the ComposeModal
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
if not self.index_storage[ctx.user.id].has_indexes(ctx.user.id):
await ctx.respond(
embed=EmbedStatics.get_index_compose_failure_embed(
"You must have at least one index to compose."
)
)
return
await ctx.respond(
"Select the index(es) to compose. You can compose multiple indexes together, you can also Deep Compose a single index.",
view=ComposeModal(self, ctx.user.id, name),
ephemeral=True,
)
class ComposeModal(discord.ui.View):
def __init__(self, index_cog, user_id, name=None, deep=None) -> None:
super().__init__()
# Get the argument named "user_key_db" and save it as USER_KEY_DB
self.index_cog = index_cog
self.user_id = user_id
self.deep = deep
# Get all the indexes for the user
self.indexes = [
file
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{str(user_id)}/")
)
]
if index_cog.index_storage[user_id].has_search_indexes(user_id):
self.indexes.extend(
[
file
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{str(user_id)}_search/")
)
]
)
print("Found the indexes, they are ", self.indexes)
# Map everything into the short to long cache
for index in self.indexes:
if len(index) > 93:
index_name = index[:93] + "-" + str(random.randint(0000, 9999))
SHORT_TO_LONG_CACHE[index_name] = index
else:
SHORT_TO_LONG_CACHE[index[:99]] = index
# Reverse the SHORT_TO_LONG_CACHE index
LONG_TO_SHORT_CACHE = {v: k for k, v in SHORT_TO_LONG_CACHE.items()}
# A text entry field for the name of the composed index
self.name = name
# A discord UI select menu with all the indexes. Limited to 25 entries. For the label field in the SelectOption,
# cut it off at 100 characters to prevent the message from being too long
self.index_select = discord.ui.Select(
placeholder="Select index(es) to compose",
options=[
discord.SelectOption(
label=LONG_TO_SHORT_CACHE[index], value=LONG_TO_SHORT_CACHE[index]
)
for index in self.indexes
][0:25],
max_values=len(self.indexes) if len(self.indexes) < 25 else 25,
min_values=1,
)
# Add the select menu to the modal
self.add_item(self.index_select)
# If we have more than 25 entries, add more Select fields as neccessary
self.extra_index_selects = []
if len(self.indexes) > 25:
for i in range(25, len(self.indexes), 25):
self.extra_index_selects.append(
discord.ui.Select(
placeholder="Select index(es) to compose",
options=[
discord.SelectOption(
label=LONG_TO_SHORT_CACHE[index],
value=LONG_TO_SHORT_CACHE[index],
)
for index in self.indexes
][i : i + 25],
max_values=len(self.indexes[i : i + 25]),
min_values=1,
)
)
self.add_item(self.extra_index_selects[-1])
# Add an input field for "Deep", a "yes" or "no" option, default no
self.deep_select = discord.ui.Select(
placeholder="Deep Compose",
options=[
discord.SelectOption(label="Yes", value="yes"),
discord.SelectOption(label="No", value="no"),
],
max_values=1,
min_values=1,
)
self.add_item(self.deep_select)
# Add a button to the modal called "Compose"
self.add_item(
discord.ui.Button(
label="Compose", style=discord.ButtonStyle.green, custom_id="compose"
)
)
# The callback for the button
async def interaction_check(self, interaction: discord.Interaction) -> bool:
# Check that the interaction was for custom_id "compose"
if interaction.data["custom_id"] == "compose":
# Check that the user selected at least one index
# The total list of indexes is the union of the values of all the select menus
indexes = self.index_select.values + [
select.values[0] for select in self.extra_index_selects
]
# Remap them from the SHORT_TO_LONG_CACHE
indexes = [SHORT_TO_LONG_CACHE[index] for index in indexes]
if len(indexes) < 1:
await interaction.response.send_message(
embed=EmbedStatics.get_index_compose_failure_embed(
"You must select at least 1 index"
),
ephemeral=True,
)
else:
composing_message = await interaction.response.send_message(
embed=EmbedStatics.get_index_compose_progress_embed(),
ephemeral=True,
)
# Compose the indexes
try:
price = await self.index_cog.compose_indexes(
self.user_id,
indexes,
self.name,
(
False
if not self.deep_select.values
or self.deep_select.values[0] == "no"
else True
),
)
except ValueError as e:
await interaction.followup.send(
str(e), ephemeral=True, delete_after=180
)
return False
except Exception as e:
traceback.print_exc()
await interaction.followup.send(
embed=EmbedStatics.get_index_compose_failure_embed(
"An error occurred while composing the indexes: " + str(e)
),
ephemeral=True,
delete_after=180,
)
return False
await interaction.followup.send(
embed=EmbedStatics.get_index_compose_success_embed(price),
ephemeral=True,
delete_after=180,
)
# Try to direct message the user that their composed index is ready
try:
await self.index_cog.bot.get_user(self.user_id).send(
f"Your composed index is ready! You can load it with /index load now in the server."
)
except discord.Forbidden:
pass
try:
composing_message: Interaction
await composing_message.delete_original_response()
except:
traceback.print_exc()
else:
await interaction.response.defer(ephemeral=True)
| [
"llama_index.langchain_helpers.agents.IndexToolConfig",
"llama_index.download_loader",
"llama_index.retrievers.TreeSelectLeafRetriever",
"llama_index.GithubRepositoryReader",
"llama_index.langchain_helpers.text_splitter.TokenTextSplitter",
"llama_index.BeautifulSoupWebReader",
"llama_index.langchain_helpers.agents.LlamaIndexTool.from_tool_config",
"llama_index.callbacks.CallbackManager",
"llama_index.readers.schema.base.Document",
"llama_index.OpenAIEmbedding",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.StorageContext.from_defaults",
"llama_index.MockEmbedding",
"llama_index.GoogleDocsReader",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.indices.query.query_transform.StepDecomposeQueryTransform",
"llama_index.query_engine.RetrieverQueryEngine",
"llama_index.SimpleDirectoryReader",
"llama_index.get_response_synthesizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.QuestionAnswerPrompt",
"llama_index.load_index_from_storage",
"llama_index.MockLLMPredictor",
"llama_index.readers.YoutubeTranscriptReader"
] | [((2731, 2770), 'services.environment_service.EnvService.get_max_deep_compose_price', 'EnvService.get_max_deep_compose_price', ([], {}), '()\n', (2768, 2770), False, 'from services.environment_service import EnvService\n'), ((2784, 2813), 'llama_index.download_loader', 'download_loader', (['"""EpubReader"""'], {}), "('EpubReader')\n", (2799, 2813), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((2831, 2864), 'llama_index.download_loader', 'download_loader', (['"""MarkdownReader"""'], {}), "('MarkdownReader')\n", (2846, 2864), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((2880, 2911), 'llama_index.download_loader', 'download_loader', (['"""RemoteReader"""'], {}), "('RemoteReader')\n", (2895, 2911), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((2932, 2968), 'llama_index.download_loader', 'download_loader', (['"""RemoteDepthReader"""'], {}), "('RemoteDepthReader')\n", (2947, 2968), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((2988, 3005), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (3003, 3005), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((3273, 3305), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (3288, 3305), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((3331, 3453), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embedding_model', 'callback_manager': 'callback_manager', 'node_parser': 'node_parser'}), '(embed_model=embedding_model, callback_manager=\n callback_manager, node_parser=node_parser)\n', (3359, 3453), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((3474, 3518), 'httpx.Timeout', 'httpx.Timeout', (['(1)'], {'read': '(1)', 'write': '(1)', 'connect': '(1)'}), '(1, read=1, write=1, connect=1)\n', (3487, 3518), False, 'import httpx\n'), ((3582, 3713), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embedding_model', 'callback_manager': 'callback_manager', 'node_parser': 'node_parser', 'llm': 'llm'}), '(embed_model=embedding_model, callback_manager=\n callback_manager, node_parser=node_parser, llm=llm)\n', (3610, 3713), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((4561, 4703), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'response_mode', 'use_async': '(True)', 'refine_template': 'CHAT_REFINE_PROMPT', 'service_context': 'service_context'}), '(response_mode=response_mode, use_async=True,\n refine_template=CHAT_REFINE_PROMPT, service_context=service_context)\n', (4585, 4703), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((4759, 4848), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (4779, 4848), False, 'from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine, RetryGuidelineQueryEngine\n'), ((8371, 8388), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (8386, 8388), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((8688, 8720), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (8703, 8720), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((8743, 8865), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embedding_model', 'callback_manager': 'callback_manager', 'node_parser': 'node_parser'}), '(embed_model=embedding_model, callback_manager=\n callback_manager, node_parser=node_parser)\n', (8771, 8865), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((3199, 3251), 'llama_index.langchain_helpers.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(20)'}), '(chunk_size=1024, chunk_overlap=20)\n', (3216, 3251), False, 'from llama_index.langchain_helpers.text_splitter import TokenTextSplitter\n'), ((4232, 4347), 'llama_index.retrievers.TreeSelectLeafRetriever', 'TreeSelectLeafRetriever', ([], {'index': 'index', 'child_branch_factor': 'child_branch_factor', 'service_context': 'service_context'}), '(index=index, child_branch_factor=\n child_branch_factor, service_context=service_context)\n', (4255, 4347), False, 'from llama_index.retrievers import VectorIndexRetriever, TreeSelectLeafRetriever\n'), ((4420, 4515), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': 'nodes', 'service_context': 'service_context'}), '(index=index, similarity_top_k=nodes, service_context=\n service_context)\n', (4440, 4515), False, 'from llama_index.retrievers import VectorIndexRetriever, TreeSelectLeafRetriever\n'), ((10748, 10773), 'os.getenv', 'os.getenv', (['"""OPENAI_TOKEN"""'], {}), "('OPENAI_TOKEN')\n", (10757, 10773), False, 'import os\n'), ((10803, 10825), 'collections.defaultdict', 'defaultdict', (['IndexData'], {}), '(IndexData)\n', (10814, 10825), False, 'from collections import defaultdict\n'), ((10846, 10872), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (10870, 10872), False, 'import asyncio\n'), ((10940, 11301), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['"""Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question: {query_str}\n"""'], {}), '(\n """Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question: {query_str}\n"""\n )\n', (10960, 11301), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((11518, 11531), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (11529, 11531), False, 'from collections import defaultdict\n'), ((11675, 11717), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['original_path'], {}), '(original_path)\n', (11702, 11717), False, 'from services.environment_service import EnvService\n'), ((18915, 18991), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'model', 'temperature': 'temperature', 'top_p': 'top_p', 'max_retries': '(2)'}), '(model=model, temperature=temperature, top_p=top_p, max_retries=2)\n', (18925, 18991), False, 'from langchain.chat_models import ChatOpenAI\n'), ((19226, 19380), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'memory_key': '"""memory"""', 'return_messages': '(True)', 'llm': 'llm', 'max_token_limit': "(100000 if 'preview' in model else max_token_limit)"}), "(memory_key='memory', return_messages=True,\n llm=llm, max_token_limit=100000 if 'preview' in model else max_token_limit)\n", (19257, 19380), False, 'from langchain.memory import ConversationSummaryBufferMemory\n'), ((21043, 21249), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'agent_kwargs': 'agent_kwargs', 'memory': 'memory', 'handle_parsing_errors': '"""Check your output and make sure it conforms!"""'}), "(tools=tools, llm=llm, agent=AgentType.OPENAI_FUNCTIONS,\n verbose=True, agent_kwargs=agent_kwargs, memory=memory,\n handle_parsing_errors='Check your output and make sure it conforms!')\n", (21059, 21249), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((21443, 21687), 'discord.Embed', 'discord.Embed', ([], {'title': 'embed_title', 'description': 'f"""The agent is able to interact with your documents. Simply drag your documents into discord or give the agent a link from where to download the documents.\nModel: {model}"""', 'color': '(39259)'}), '(title=embed_title, description=\n f"""The agent is able to interact with your documents. Simply drag your documents into discord or give the agent a link from where to download the documents.\nModel: {model}"""\n , color=39259)\n', (21456, 21687), False, 'import discord\n'), ((23850, 23948), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['document'], {'service_context': 'service_context', 'use_async': '(True)'}), '(document, service_context=\n service_context, use_async=True)\n', (23884, 23948), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((24134, 24232), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['document'], {'service_context': 'service_context', 'use_async': '(True)'}), '(document, service_context=\n service_context, use_async=True)\n', (24168, 24232), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((25156, 25255), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'use_async': '(True)'}), '(documents, service_context=\n service_context, use_async=True)\n', (25190, 25255), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((25833, 25932), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'use_async': '(True)'}), '(documents, service_context=\n service_context, use_async=True)\n', (25867, 25932), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((26107, 26158), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'file_path'}), '(persist_dir=file_path)\n', (26135, 26158), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((26175, 26215), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (26198, 26215), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((26333, 26431), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['document'], {'service_context': 'service_context', 'use_async': '(True)'}), '(document, service_context=\n service_context, use_async=True)\n', (26367, 26431), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((36388, 36477), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (36408, 36477), False, 'from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine, RetryGuidelineQueryEngine\n'), ((3058, 3105), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""text-davinci-003"""'], {}), "('text-davinci-003')\n", (3085, 3105), False, 'import tiktoken\n'), ((4969, 5007), 'llama_index.indices.query.query_transform.StepDecomposeQueryTransform', 'StepDecomposeQueryTransform', (['multistep'], {}), '(multistep)\n', (4996, 5007), False, 'from llama_index.indices.query.query_transform import StepDecomposeQueryTransform\n'), ((8606, 8658), 'llama_index.langchain_helpers.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(20)'}), '(chunk_size=1024, chunk_overlap=20)\n', (8623, 8658), False, 'from llama_index.langchain_helpers.text_splitter import TokenTextSplitter\n'), ((13484, 13522), 'aiofiles.tempfile.TemporaryDirectory', 'aiofiles.tempfile.TemporaryDirectory', ([], {}), '()\n', (13520, 13522), False, 'import aiofiles\n'), ((19576, 20507), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a superpowered version of GPT that is able to answer questions about the data you\'re connected to. Each different tool you have represents a different dataset to interact with. If you are asked to perform a task that spreads across multiple datasets, use multiple tools for the same prompt. When the user types links in chat, you will have already been connected to the data at the link by the time you respond. When using tools, the input should be clearly created based on the request of the user. For example, if a user uploads an invoice and asks how many usage hours of X was present in the invoice, a good query is \'X hours\'. Avoid using single word queries unless the request is very simple. You can query multiple times to break down complex requests and retrieve more information. When calling functions, no special characters are allowed in the function name, keep that in mind."""'}), '(content=\n "You are a superpowered version of GPT that is able to answer questions about the data you\'re connected to. Each different tool you have represents a different dataset to interact with. If you are asked to perform a task that spreads across multiple datasets, use multiple tools for the same prompt. When the user types links in chat, you will have already been connected to the data at the link by the time you respond. When using tools, the input should be clearly created based on the request of the user. For example, if a user uploads an invoice and asks how many usage hours of X was present in the invoice, a good query is \'X hours\'. Avoid using single word queries unless the request is very simple. You can query multiple times to break down complex requests and retrieve more information. When calling functions, no special characters are allowed in the function name, keep that in mind."\n )\n', (19589, 20507), False, 'from langchain.schema import SystemMessage\n'), ((20703, 20868), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""Dummy-Tool-Do-Not-Use"""', 'func': 'dummy_tool', 'description': 'f"""This is a dummy tool that does nothing, do not ever mention this tool or use this tool."""'}), "(name='Dummy-Tool-Do-Not-Use', func=dummy_tool, description=\n f'This is a dummy tool that does nothing, do not ever mention this tool or use this tool.'\n )\n", (20707, 20868), False, 'from langchain.tools import Tool\n'), ((22169, 22227), 'utils.safe_ctx_respond.safe_ctx_respond', 'safe_ctx_respond', ([], {'ctx': 'ctx', 'content': '"""Conversation started."""'}), "(ctx=ctx, content='Conversation started.')\n", (22185, 22227), False, 'from utils.safe_ctx_respond import safe_ctx_respond\n'), ((23326, 23344), 'discord.ext.pages.append', 'pages.append', (['page'], {}), '(page)\n', (23338, 23344), False, 'from discord.ext import pages\n'), ((26633, 26656), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (26654, 26656), False, 'import aiohttp\n'), ((46236, 46325), 'llama_index.langchain_helpers.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'separator': '""" """', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(separator=' ', chunk_size=chunk_size, chunk_overlap=\n chunk_overlap)\n", (46253, 46325), False, 'from llama_index.langchain_helpers.text_splitter import TokenTextSplitter\n'), ((47599, 47616), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (47614, 47616), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((47651, 47669), 'llama_index.MockLLMPredictor', 'MockLLMPredictor', ([], {}), '()\n', (47667, 47669), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((47705, 47724), 'llama_index.MockEmbedding', 'MockEmbedding', (['(1536)'], {}), '(1536)\n', (47718, 47724), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((47945, 47982), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter_mock]'], {}), '([token_counter_mock])\n', (47960, 47982), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((48019, 48160), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_mock', 'embed_model': 'embedding_model_mock', 'callback_manager': 'callback_manager_mock'}), '(llm_predictor=llm_predictor_mock, embed_model=\n embedding_model_mock, callback_manager=callback_manager_mock)\n', (48047, 48160), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((55795, 55863), 'discord.ext.pages.Paginator', 'pages.Paginator', ([], {'pages': 'embed_pages', 'timeout': 'None', 'author_check': '(False)'}), '(pages=embed_pages, timeout=None, author_check=False)\n', (55810, 55863), False, 'from discord.ext import pages\n'), ((63797, 63889), 'discord.ui.Button', 'discord.ui.Button', ([], {'label': '"""Compose"""', 'style': 'discord.ButtonStyle.green', 'custom_id': '"""compose"""'}), "(label='Compose', style=discord.ButtonStyle.green,\n custom_id='compose')\n", (63814, 63889), False, 'import discord\n'), ((7687, 7736), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}"""'], {}), "(f'indexes/{user_id}')\n", (7714, 7736), False, 'from services.environment_service import EnvService\n'), ((7966, 8022), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}_search"""'], {}), "(f'indexes/{user_id}_search')\n", (7993, 8022), False, 'from services.environment_service import EnvService\n'), ((8304, 8325), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8323, 8325), False, 'import traceback\n'), ((8449, 8496), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""text-davinci-003"""'], {}), "('text-davinci-003')\n", (8476, 8496), False, 'import tiktoken\n'), ((12023, 12044), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (12042, 12044), False, 'import traceback\n'), ((12905, 12977), 'functools.partial', 'partial', (['self.index_chat_chains[ctx.channel.id].agent_chain.run', 'message'], {}), '(self.index_chat_chains[ctx.channel.id].agent_chain.run, message)\n', (12912, 12977), False, 'from functools import partial\n'), ((13560, 13645), 'aiofiles.tempfile.NamedTemporaryFile', 'aiofiles.tempfile.NamedTemporaryFile', ([], {'suffix': 'suffix', 'dir': 'temp_path', 'delete': '(False)'}), '(suffix=suffix, dir=temp_path, delete=False\n )\n', (13596, 13645), False, 'import aiofiles\n'), ((19068, 19134), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'top_p': 'top_p', 'model_name': 'model'}), '(temperature=temperature, top_p=top_p, model_name=model)\n', (19078, 19134), False, 'from langchain.chat_models import ChatOpenAI\n'), ((19500, 19543), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""memory"""'}), "(variable_name='memory')\n", (19519, 19543), False, 'from langchain.prompts import MessagesPlaceholder\n'), ((23007, 23069), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Index Query Results"""', 'description': 'chunk'}), "(title=f'Index Query Results', description=chunk)\n", (23020, 23069), False, 'import discord\n'), ((23199, 23254), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Page {count}"""', 'description': 'chunk'}), "(title=f'Page {count}', description=chunk)\n", (23212, 23254), False, 'import discord\n'), ((24081, 24099), 'llama_index.GoogleDocsReader', 'GoogleDocsReader', ([], {}), '()\n', (24097, 24099), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((27234, 27277), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[f.name]'}), '(input_files=[f.name])\n', (27255, 27277), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((27543, 27566), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (27564, 27566), False, 'import aiohttp\n'), ((28806, 28827), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (28825, 28827), False, 'import traceback\n'), ((28904, 28971), 'llama_index.BeautifulSoupWebReader', 'BeautifulSoupWebReader', ([], {'website_extractor': 'DEFAULT_WEBSITE_EXTRACTOR'}), '(website_extractor=DEFAULT_WEBSITE_EXTRACTOR)\n', (28926, 28971), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((29186, 29313), 'functools.partial', 'functools.partial', (['GPTVectorStoreIndex.from_documents'], {'documents': 'documents', 'service_context': 'service_context', 'use_async': '(True)'}), '(GPTVectorStoreIndex.from_documents, documents=documents,\n service_context=service_context, use_async=True)\n', (29203, 29313), False, 'import functools\n'), ((30898, 30936), 'aiofiles.tempfile.TemporaryDirectory', 'aiofiles.tempfile.TemporaryDirectory', ([], {}), '()\n', (30934, 30936), False, 'import aiofiles\n'), ((32352, 32373), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (32371, 32373), False, 'import traceback\n'), ((35560, 35581), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (35579, 35581), False, 'import traceback\n'), ((35733, 35754), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (35752, 35754), False, 'import traceback\n'), ((36629, 36662), 'models.check_model.UrlCheck.check_youtube_link', 'UrlCheck.check_youtube_link', (['link'], {}), '(link)\n', (36656, 36662), False, 'from models.check_model import UrlCheck\n'), ((39345, 39564), 'llama_index.langchain_helpers.agents.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'engine', 'name': 'f"""{link_cleaned}-index"""', 'description': 'f"""Use this tool if the query seems related to this summary: {summary}"""', 'tool_kwargs': "{'return_direct': False}", 'max_iterations': '(5)'}), "(query_engine=engine, name=f'{link_cleaned}-index',\n description=\n f'Use this tool if the query seems related to this summary: {summary}',\n tool_kwargs={'return_direct': False}, max_iterations=5)\n", (39360, 39564), False, 'from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaToolkit, create_llama_chat_agent, LlamaIndexTool\n'), ((39742, 39786), 'llama_index.langchain_helpers.agents.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', (['tool_config'], {}), '(tool_config)\n', (39773, 39786), False, 'from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaToolkit, create_llama_chat_agent, LlamaIndexTool\n'), ((39934, 40324), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'self.index_chat_chains[index_chat_ctx.channel.id].llm', 'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'agent_kwargs': 'self.index_chat_chains[index_chat_ctx.channel.id].agent_kwargs', 'memory': 'self.index_chat_chains[index_chat_ctx.channel.id].memory', 'handle_parsing_errors': '"""Check your output and make sure it conforms!"""', 'max_iterations': '(5)'}), "(tools=tools, llm=self.index_chat_chains[index_chat_ctx.\n channel.id].llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True,\n agent_kwargs=self.index_chat_chains[index_chat_ctx.channel.id].\n agent_kwargs, memory=self.index_chat_chains[index_chat_ctx.channel.id].\n memory, handle_parsing_errors=\n 'Check your output and make sure it conforms!', max_iterations=5)\n", (39950, 40324), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((42978, 42999), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (42997, 42999), False, 'import traceback\n'), ((44522, 44543), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (44541, 44543), False, 'import traceback\n'), ((44939, 45001), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{ctx.guild.id}/{index}"""'], {}), "(f'indexes/{ctx.guild.id}/{index}')\n", (44966, 45001), False, 'from services.environment_service import EnvService\n'), ((45671, 45692), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (45690, 45692), False, 'import traceback\n'), ((46478, 46533), 'llama_index.readers.schema.base.Document', 'Document', ([], {'text': 'chunk_text', 'extra_info': 'document.metadata'}), '(text=chunk_text, extra_info=document.metadata)\n', (46486, 46533), False, 'from llama_index.readers.schema.base import Document\n'), ((46819, 46877), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}/{_index}"""'], {}), "(f'indexes/{user_id}/{_index}')\n", (46846, 46877), False, 'from services.environment_service import EnvService\n'), ((47269, 47318), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-4-32k"""'}), "(temperature=0, model_name='gpt-4-32k')\n", (47279, 47318), False, 'from langchain.chat_models import ChatOpenAI\n'), ((53270, 53291), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (53289, 53291), False, 'import traceback\n'), ((53791, 53834), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': 'model'}), '(temperature=0, model_name=model)\n', (53801, 53834), False, 'from langchain.chat_models import ChatOpenAI\n'), ((56156, 56177), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (56175, 56177), False, 'import traceback\n'), ((59155, 59228), 'llama_index.readers.schema.base.Document', 'Document', ([], {'text': 'channel_content', 'extra_info': "{'channel_name': channel_name}"}), "(text=channel_content, extra_info={'channel_name': channel_name})\n", (59163, 59228), False, 'from llama_index.readers.schema.base import Document\n'), ((7102, 7114), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7112, 7114), False, 'from datetime import date\n'), ((7123, 7135), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7133, 7135), False, 'from datetime import date\n'), ((11916, 11935), 'pathlib.Path', 'Path', (['original_path'], {}), '(original_path)\n', (11920, 11935), False, 'from pathlib import Path\n'), ((16619, 16830), 'llama_index.langchain_helpers.agents.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'engine', 'name': 'f"""{filename}-index"""', 'description': 'f"""Use this tool if the query seems related to this summary: {summary}"""', 'tool_kwargs': "{'return_direct': False}", 'max_iterations': '(5)'}), "(query_engine=engine, name=f'{filename}-index', description=\n f'Use this tool if the query seems related to this summary: {summary}',\n tool_kwargs={'return_direct': False}, max_iterations=5)\n", (16634, 16830), False, 'from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaToolkit, create_llama_chat_agent, LlamaIndexTool\n'), ((17048, 17092), 'llama_index.langchain_helpers.agents.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', (['tool_config'], {}), '(tool_config)\n', (17079, 17092), False, 'from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaToolkit, create_llama_chat_agent, LlamaIndexTool\n'), ((17245, 17592), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'self.index_chat_chains[message.channel.id].llm', 'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'agent_kwargs': 'self.index_chat_chains[message.channel.id].agent_kwargs', 'memory': 'self.index_chat_chains[message.channel.id].memory', 'handle_parsing_errors': '"""Check your output and make sure it conforms!"""'}), "(tools=tools, llm=self.index_chat_chains[message.channel.id\n ].llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True, agent_kwargs=\n self.index_chat_chains[message.channel.id].agent_kwargs, memory=self.\n index_chat_chains[message.channel.id].memory, handle_parsing_errors=\n 'Check your output and make sure it conforms!')\n", (17261, 17592), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((18841, 18890), 'models.embed_statics_model.EmbedStatics.get_index_chat_preparation_message', 'EmbedStatics.get_index_chat_preparation_message', ([], {}), '()\n', (18888, 18890), False, 'from models.embed_statics_model import EmbedStatics\n'), ((24914, 24939), 'llama_index.readers.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (24937, 24939), False, 'from llama_index.readers import YoutubeTranscriptReader\n'), ((25561, 25607), 'llama_index.GithubRepositoryReader', 'GithubRepositoryReader', ([], {'owner': 'owner', 'repo': 'repo'}), '(owner=owner, repo=repo)\n', (25583, 25607), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((26838, 26894), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".pdf"""', 'delete': '(False)'}), "(suffix='.pdf', delete=False)\n", (26865, 26894), False, 'import tempfile\n'), ((30978, 31063), 'aiofiles.tempfile.NamedTemporaryFile', 'aiofiles.tempfile.NamedTemporaryFile', ([], {'suffix': 'suffix', 'dir': 'temp_path', 'delete': '(False)'}), '(suffix=suffix, dir=temp_path, delete=False\n )\n', (31014, 31063), False, 'import aiofiles\n'), ((31886, 31907), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (31905, 31907), False, 'import traceback\n'), ((32754, 32795), 'models.embed_statics_model.EmbedStatics.build_index_progress_embed', 'EmbedStatics.build_index_progress_embed', ([], {}), '()\n', (32793, 32795), False, 'from models.embed_statics_model import EmbedStatics\n'), ((32931, 32954), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (32952, 32954), False, 'import aiohttp\n'), ((33623, 33644), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (33642, 33644), False, 'import traceback\n'), ((34114, 34147), 'functools.partial', 'partial', (['loader.load_data', '[link]'], {}), '(loader.load_data, [link])\n', (34121, 34147), False, 'from functools import partial\n'), ((34253, 34373), 'functools.partial', 'functools.partial', (['GPTVectorStoreIndex'], {'documents': 'documents', 'service_context': 'service_context_no_llm', 'use_async': '(True)'}), '(GPTVectorStoreIndex, documents=documents, service_context\n =service_context_no_llm, use_async=True)\n', (34270, 34373), False, 'import functools\n'), ((34827, 34848), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (34846, 34848), False, 'import traceback\n'), ((35809, 35856), 'models.embed_statics_model.EmbedStatics.get_index_set_success_embed', 'EmbedStatics.get_index_set_success_embed', (['price'], {}), '(price)\n', (35849, 35856), False, 'from models.embed_statics_model import EmbedStatics\n'), ((41745, 41786), 'models.embed_statics_model.EmbedStatics.build_index_progress_embed', 'EmbedStatics.build_index_progress_embed', ([], {}), '()\n', (41784, 41786), False, 'from models.embed_statics_model import EmbedStatics\n'), ((42246, 42267), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (42265, 42267), False, 'import traceback\n'), ((43054, 43101), 'models.embed_statics_model.EmbedStatics.get_index_set_success_embed', 'EmbedStatics.get_index_set_success_embed', (['price'], {}), '(price)\n', (43094, 43101), False, 'from models.embed_statics_model import EmbedStatics\n'), ((43736, 43797), 'functools.partial', 'partial', (['self.index_discord', 'document', 'service_context_no_llm'], {}), '(self.index_discord, document, service_context_no_llm)\n', (43743, 43797), False, 'from functools import partial\n'), ((44028, 44049), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (44047, 44049), False, 'import traceback\n'), ((45094, 45162), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{ctx.user.id}_search/{index}"""'], {}), "(f'indexes/{ctx.user.id}_search/{index}')\n", (45121, 45162), False, 'from services.environment_service import EnvService\n'), ((45248, 45309), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{ctx.user.id}/{index}"""'], {}), "(f'indexes/{ctx.user.id}/{index}')\n", (45275, 45309), False, 'from services.environment_service import EnvService\n'), ((45423, 45464), 'functools.partial', 'partial', (['self.index_load_file', 'index_file'], {}), '(self.index_load_file, index_file)\n', (45430, 45464), False, 'from functools import partial\n'), ((46938, 47003), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}_search/{_index}"""'], {}), "(f'indexes/{user_id}_search/{_index}')\n", (46965, 47003), False, 'from services.environment_service import EnvService\n'), ((47118, 47159), 'functools.partial', 'partial', (['self.index_load_file', 'index_file'], {}), '(self.index_load_file, index_file)\n', (47125, 47159), False, 'from functools import partial\n'), ((48341, 48441), 'functools.partial', 'partial', (['GPTTreeIndex.from_documents'], {'documents': 'documents', 'service_context': 'service_context_mock'}), '(GPTTreeIndex.from_documents, documents=documents, service_context=\n service_context_mock)\n', (48348, 48441), False, 'from functools import partial\n'), ((49269, 49385), 'functools.partial', 'partial', (['GPTTreeIndex.from_documents'], {'documents': 'documents', 'service_context': 'self.service_context', 'use_async': '(True)'}), '(GPTTreeIndex.from_documents, documents=documents, service_context=\n self.service_context, use_async=True)\n', (49276, 49385), False, 'from functools import partial\n'), ((50492, 50616), 'functools.partial', 'partial', (['GPTVectorStoreIndex.from_documents'], {'documents': 'documents', 'service_context': 'service_context_no_llm', 'use_async': '(True)'}), '(GPTVectorStoreIndex.from_documents, documents=documents,\n service_context=service_context_no_llm, use_async=True)\n', (50499, 50616), False, 'from functools import partial\n'), ((52163, 52224), 'functools.partial', 'partial', (['self.index_discord', 'document', 'service_context_no_llm'], {}), '(self.index_discord, document, service_context_no_llm)\n', (52170, 52224), False, 'from functools import partial\n'), ((52591, 52612), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (52610, 52612), False, 'import traceback\n'), ((53897, 53949), 'models.embed_statics_model.EmbedStatics.build_index_query_progress_embed', 'EmbedStatics.build_index_query_progress_embed', (['query'], {}), '(query)\n', (53942, 53949), False, 'from models.embed_statics_model import EmbedStatics\n'), ((54109, 54311), 'functools.partial', 'partial', (['get_and_query', 'ctx.user.id', 'self.index_storage', 'query', 'response_mode', 'nodes', 'child_branch_factor'], {'service_context': 'service_context_no_llm', 'multistep': '(llm_predictor if multistep else None)'}), '(get_and_query, ctx.user.id, self.index_storage, query,\n response_mode, nodes, child_branch_factor, service_context=\n service_context_no_llm, multistep=llm_predictor if multistep else None)\n', (54116, 54311), False, 'from functools import partial\n'), ((63481, 63527), 'discord.SelectOption', 'discord.SelectOption', ([], {'label': '"""Yes"""', 'value': '"""yes"""'}), "(label='Yes', value='yes')\n", (63501, 63527), False, 'import discord\n'), ((63545, 63589), 'discord.SelectOption', 'discord.SelectOption', ([], {'label': '"""No"""', 'value': '"""no"""'}), "(label='No', value='no')\n", (63565, 63589), False, 'import discord\n'), ((6286, 6335), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}"""'], {}), "(f'indexes/{user_id}')\n", (6313, 6335), False, 'from services.environment_service import EnvService\n'), ((6537, 6593), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}_search"""'], {}), "(f'indexes/{user_id}_search')\n", (6564, 6593), False, 'from services.environment_service import EnvService\n'), ((7790, 7846), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}/{file}"""'], {}), "(f'indexes/{user_id}/{file}')\n", (7817, 7846), False, 'from services.environment_service import EnvService\n'), ((7892, 7913), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7911, 7913), False, 'import traceback\n'), ((8114, 8177), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}_search/{file}"""'], {}), "(f'indexes/{user_id}_search/{file}')\n", (8141, 8177), False, 'from services.environment_service import EnvService\n'), ((8244, 8265), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8263, 8265), False, 'import traceback\n'), ((18637, 18658), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (18656, 18658), False, 'import traceback\n'), ((23775, 23821), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]'}), '(input_files=[file_path])\n', (23796, 23821), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((25712, 25758), 'llama_index.GithubRepositoryReader', 'GithubRepositoryReader', ([], {'owner': 'owner', 'repo': 'repo'}), '(owner=owner, repo=repo)\n', (25734, 25758), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((30818, 30859), 'models.embed_statics_model.EmbedStatics.build_index_progress_embed', 'EmbedStatics.build_index_progress_embed', ([], {}), '()\n', (30857, 30859), False, 'from models.embed_statics_model import EmbedStatics\n'), ((36820, 36888), 'functools.partial', 'partial', (['self.index_youtube_transcript', 'link', 'service_context_no_llm'], {}), '(self.index_youtube_transcript, link, service_context_no_llm)\n', (36827, 36888), False, 'from functools import partial\n'), ((44344, 44391), 'models.embed_statics_model.EmbedStatics.get_index_set_success_embed', 'EmbedStatics.get_index_set_success_embed', (['price'], {}), '(price)\n', (44384, 44391), False, 'from models.embed_statics_model import EmbedStatics\n'), ((45583, 45626), 'models.embed_statics_model.EmbedStatics.get_index_load_success_embed', 'EmbedStatics.get_index_load_success_embed', ([], {}), '()\n', (45624, 45626), False, 'from models.embed_statics_model import EmbedStatics\n'), ((47807, 47854), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""text-davinci-003"""'], {}), "('text-davinci-003')\n", (47834, 47854), False, 'import tiktoken\n'), ((53090, 53137), 'models.embed_statics_model.EmbedStatics.get_index_set_success_embed', 'EmbedStatics.get_index_set_success_embed', (['price'], {}), '(price)\n', (53130, 53137), False, 'from models.embed_statics_model import EmbedStatics\n'), ((55986, 56050), 'models.embed_statics_model.EmbedStatics.build_index_query_success_embed', 'EmbedStatics.build_index_query_success_embed', (['query', 'total_price'], {}), '(query, total_price)\n', (56030, 56050), False, 'from models.embed_statics_model import EmbedStatics\n'), ((59758, 59855), 'models.embed_statics_model.EmbedStatics.get_index_compose_failure_embed', 'EmbedStatics.get_index_compose_failure_embed', (['"""You must have at least one index to compose."""'], {}), "(\n 'You must have at least one index to compose.')\n", (59802, 59855), False, 'from models.embed_statics_model import EmbedStatics\n'), ((61269, 61292), 'random.randint', 'random.randint', (['(0)', '(9999)'], {}), '(0, 9999)\n', (61283, 61292), False, 'import random\n'), ((61987, 62080), 'discord.SelectOption', 'discord.SelectOption', ([], {'label': 'LONG_TO_SHORT_CACHE[index]', 'value': 'LONG_TO_SHORT_CACHE[index]'}), '(label=LONG_TO_SHORT_CACHE[index], value=\n LONG_TO_SHORT_CACHE[index])\n', (62007, 62080), False, 'import discord\n'), ((65878, 65899), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (65897, 65899), False, 'import traceback\n'), ((67068, 67089), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (67087, 67089), False, 'import traceback\n'), ((6932, 6954), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (6952, 6954), False, 'from services.environment_service import EnvService\n'), ((7328, 7350), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (7348, 7350), False, 'from services.environment_service import EnvService\n'), ((30615, 30675), 'models.embed_statics_model.EmbedStatics.get_index_set_failure_embed', 'EmbedStatics.get_index_set_failure_embed', (['"""Unsupported file"""'], {}), "('Unsupported file')\n", (30655, 30675), False, 'from models.embed_statics_model import EmbedStatics\n'), ((37144, 37211), 'functools.partial', 'partial', (['self.index_github_repository', 'link', 'service_context_no_llm'], {}), '(self.index_github_repository, link, service_context_no_llm)\n', (37151, 37211), False, 'from functools import partial\n'), ((49890, 49902), 'datetime.date.today', 'date.today', ([], {}), '()\n', (49900, 49902), False, 'from datetime import date\n'), ((49911, 49923), 'datetime.date.today', 'date.today', ([], {}), '()\n', (49921, 49923), False, 'from datetime import date\n'), ((50916, 50928), 'datetime.date.today', 'date.today', ([], {}), '()\n', (50926, 50928), False, 'from datetime import date\n'), ((50937, 50949), 'datetime.date.today', 'date.today', ([], {}), '()\n', (50947, 50949), False, 'from datetime import date\n'), ((56237, 56376), 'models.embed_statics_model.EmbedStatics.get_index_query_failure_embed', 'EmbedStatics.get_index_query_failure_embed', (['"""Failed to send query. You may not have an index set, load an index with /index load"""'], {}), "(\n 'Failed to send query. You may not have an index set, load an index with /index load'\n )\n", (56279, 56376), False, 'from models.embed_statics_model import EmbedStatics\n'), ((64697, 64782), 'models.embed_statics_model.EmbedStatics.get_index_compose_failure_embed', 'EmbedStatics.get_index_compose_failure_embed', (['"""You must select at least 1 index"""'], {}), "('You must select at least 1 index'\n )\n", (64741, 64782), False, 'from models.embed_statics_model import EmbedStatics\n'), ((65000, 65047), 'models.embed_statics_model.EmbedStatics.get_index_compose_progress_embed', 'EmbedStatics.get_index_compose_progress_embed', ([], {}), '()\n', (65045, 65047), False, 'from models.embed_statics_model import EmbedStatics\n'), ((66356, 66407), 'models.embed_statics_model.EmbedStatics.get_index_compose_success_embed', 'EmbedStatics.get_index_compose_success_embed', (['price'], {}), '(price)\n', (66400, 66407), False, 'from models.embed_statics_model import EmbedStatics\n'), ((14362, 14382), 'pathlib.Path', 'Path', (['temp_file.name'], {}), '(temp_file.name)\n', (14366, 14382), False, 'from pathlib import Path\n'), ((31360, 31380), 'pathlib.Path', 'Path', (['temp_file.name'], {}), '(temp_file.name)\n', (31364, 31380), False, 'from pathlib import Path\n'), ((50065, 50087), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (50085, 50087), False, 'from services.environment_service import EnvService\n'), ((51088, 51110), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (51108, 51110), False, 'from services.environment_service import EnvService\n'), ((52664, 52686), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (52684, 52686), False, 'from services.environment_service import EnvService\n'), ((52856, 52878), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (52876, 52878), False, 'from services.environment_service import EnvService\n'), ((28402, 28529), 'functools.partial', 'functools.partial', (['GPTVectorStoreIndex.from_documents'], {'documents': 'documents', 'service_context': 'service_context', 'use_async': '(True)'}), '(GPTVectorStoreIndex.from_documents, documents=documents,\n service_context=service_context, use_async=True)\n', (28419, 28529), False, 'import functools\n'), ((52999, 53011), 'datetime.date.today', 'date.today', ([], {}), '()\n', (53009, 53011), False, 'from datetime import date\n'), ((53020, 53032), 'datetime.date.today', 'date.today', ([], {}), '()\n', (53030, 53032), False, 'from datetime import date\n'), ((62799, 62892), 'discord.SelectOption', 'discord.SelectOption', ([], {'label': 'LONG_TO_SHORT_CACHE[index]', 'value': 'LONG_TO_SHORT_CACHE[index]'}), '(label=LONG_TO_SHORT_CACHE[index], value=\n LONG_TO_SHORT_CACHE[index])\n', (62819, 62892), False, 'import discord\n'), ((33339, 33441), 'models.embed_statics_model.EmbedStatics.get_index_set_failure_embed', 'EmbedStatics.get_index_set_failure_embed', (['"""Invalid URL or could not connect to the provided URL."""'], {}), "(\n 'Invalid URL or could not connect to the provided URL.')\n", (33379, 33441), False, 'from models.embed_statics_model import EmbedStatics\n')] |
import os
from langchain import OpenAI
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex
import sqlalchemy
import time
DatabaseReader = download_loader('DatabaseReader')
databasePath = f'sqlite:///{os.path.dirname(__file__)}/vulns.db'
print('Reading database:'+ databasePath)
dbEngine=sqlalchemy.create_engine(databasePath)
sql_database = SQLDatabase(dbEngine, include_tables=["processed_references"])
# NOTE: the table_name specified here is the table that you
# want to extract into from unstructured documents.
index = GPTSQLStructStoreIndex(
[],
sql_database=sql_database,
table_name="processed_references",
)
response = index.query('Tell me what would be required to exploit GHSA-9j49-mfvp-vmhm in practice')
print(response)
# sqliteReader = DatabaseReader(
# engine=dbEngine
# )
#
# query = f"""
# SELECT normalized_content FROM processed_references WHERE vulnerability_id = 'GHSA-9j49-mfvp-vmhm' UNION SELECT normalized_content FROM processed_references LIMIT 100;
# """
# documents = sqliteReader.load_data(query=query)
# documents = SimpleDirectoryReader('data').load_data()
# llm_predictor = LLMPredictor(llm=OpenAI(model_name="davinci-instruct-beta:2.0.0"))
#
# savePath = f'/{os.path.dirname(__file__)}/../indexes/index.json'
# #
# # index = GPTSimpleVectorIndex(documents)#, llm_predictor=llm_predictor)
# # index.save_to_disk(savePath)
#
# index = GPTSimpleVectorIndex.load_from_disk(savePath)
#
#
# response = index.query("Summarize the vulnerability CVE-2021-23406", response_mode="tree_summarize")
# print(response)
| [
"llama_index.GPTSQLStructStoreIndex",
"llama_index.SQLDatabase",
"llama_index.download_loader"
] | [((223, 256), 'llama_index.download_loader', 'download_loader', (['"""DatabaseReader"""'], {}), "('DatabaseReader')\n", (238, 256), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex\n'), ((372, 410), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['databasePath'], {}), '(databasePath)\n', (396, 410), False, 'import sqlalchemy\n'), ((427, 489), 'llama_index.SQLDatabase', 'SQLDatabase', (['dbEngine'], {'include_tables': "['processed_references']"}), "(dbEngine, include_tables=['processed_references'])\n", (438, 489), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex\n'), ((611, 704), 'llama_index.GPTSQLStructStoreIndex', 'GPTSQLStructStoreIndex', (['[]'], {'sql_database': 'sql_database', 'table_name': '"""processed_references"""'}), "([], sql_database=sql_database, table_name=\n 'processed_references')\n", (633, 704), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex\n'), ((285, 310), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (300, 310), False, 'import os\n')] |
# https://blog.streamlit.io/build-a-chatbot-with-custom-data-sources-powered-by-llamaindex/
import os
import streamlit as st
from llama_index.core import ServiceContext, Document, SimpleDirectoryReader, VectorStoreIndex, Settings
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
OLLAMA_HOST = os.getenv('OLLAMA_HOST', 'localhost')
print(f"Connecting to ollama server {OLLAMA_HOST}")
# connect to ollama service running on OpenShift
my_llm = Ollama(model="zephyr", base_url="http://"+OLLAMA_HOST+":11434")
system_prompt = \
"You are Linuxbot, an expert on Linux and Linus Torvalds and your job is to answer questions about these two topics." \
"Assume that all questions are related to Linus Torvalds or Linux." \
"Keep your answers to a few sentences and based on context – do not hallucinate facts." \
"Always try to cite your source document."
st.title("Linuxbot 🐧🤖")
st.subheader("Everything you want to know about Linux or Linus")
if "messages" not in st.session_state.keys(): # Initialize the chat message history
st.session_state.messages = [
{"role": "assistant", "content": "Ask me a question about Linus or Linux"}
]
@st.cache_resource(show_spinner=False)
def load_data(_llm):
with st.spinner(text="Loading and indexing the document data – might take 1-2 minutes."):
reader = SimpleDirectoryReader(input_dir="./docs", recursive=True)
docs = reader.load_data()
# ServiceContext is deprected ...
# Also see https://docs.llamaindex.ai/en/stable/examples/embeddings/huggingface.html
Settings.llm = my_llm
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
index = VectorStoreIndex.from_documents(docs)
return index
index = load_data(my_llm)
chat_engine = index.as_chat_engine(
chat_mode="context", verbose=True, system_prompt=system_prompt
)
if prompt := st.chat_input("Ask me a question about Linus or Linux"):
st.session_state.messages.append({"role": "user", "content": prompt})
# Display previous chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Querying..."):
streaming_response = chat_engine.stream_chat(prompt)
placeholder = st.empty()
full_response = ''
for token in streaming_response.response_gen:
full_response += token
placeholder.markdown(full_response)
placeholder.markdown(full_response)
message = {"role": "assistant", "content": full_response}
st.session_state.messages.append(message) # Add response to message history
| [
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.ollama.Ollama",
"llama_index.core.VectorStoreIndex.from_documents"
] | [((357, 394), 'os.getenv', 'os.getenv', (['"""OLLAMA_HOST"""', '"""localhost"""'], {}), "('OLLAMA_HOST', 'localhost')\n", (366, 394), False, 'import os\n'), ((506, 573), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': '"""zephyr"""', 'base_url': "('http://' + OLLAMA_HOST + ':11434')"}), "(model='zephyr', base_url='http://' + OLLAMA_HOST + ':11434')\n", (512, 573), False, 'from llama_index.llms.ollama import Ollama\n'), ((929, 952), 'streamlit.title', 'st.title', (['"""Linuxbot 🐧🤖"""'], {}), "('Linuxbot 🐧🤖')\n", (937, 952), True, 'import streamlit as st\n'), ((953, 1017), 'streamlit.subheader', 'st.subheader', (['"""Everything you want to know about Linux or Linus"""'], {}), "('Everything you want to know about Linux or Linus')\n", (965, 1017), True, 'import streamlit as st\n'), ((1228, 1265), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1245, 1265), True, 'import streamlit as st\n'), ((1040, 1063), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1061, 1063), True, 'import streamlit as st\n'), ((1977, 2032), 'streamlit.chat_input', 'st.chat_input', (['"""Ask me a question about Linus or Linux"""'], {}), "('Ask me a question about Linus or Linux')\n", (1990, 2032), True, 'import streamlit as st\n'), ((2039, 2108), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (2071, 2108), True, 'import streamlit as st\n'), ((1296, 1384), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the document data – might take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the document data – might take 1-2 minutes.')\n", (1306, 1384), True, 'import streamlit as st\n'), ((1398, 1455), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./docs"""', 'recursive': '(True)'}), "(input_dir='./docs', recursive=True)\n", (1419, 1455), False, 'from llama_index.core import ServiceContext, Document, SimpleDirectoryReader, VectorStoreIndex, Settings\n'), ((1695, 1752), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-small-en-v1.5"""'}), "(model_name='BAAI/bge-small-en-v1.5')\n", (1715, 1752), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((1769, 1806), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (1800, 1806), False, 'from llama_index.core import ServiceContext, Document, SimpleDirectoryReader, VectorStoreIndex, Settings\n'), ((2194, 2226), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2209, 2226), True, 'import streamlit as st\n'), ((2236, 2264), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2244, 2264), True, 'import streamlit as st\n'), ((2396, 2424), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2411, 2424), True, 'import streamlit as st\n'), ((2439, 2464), 'streamlit.spinner', 'st.spinner', (['"""Querying..."""'], {}), "('Querying...')\n", (2449, 2464), True, 'import streamlit as st\n'), ((2557, 2567), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (2565, 2567), True, 'import streamlit as st\n'), ((2878, 2919), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2910, 2919), True, 'import streamlit as st\n')] |
import argparse
import logging
import sys
import re
import os
import argparse
import requests
from pathlib import Path
from urllib.parse import urlparse
from llama_index import ServiceContext, StorageContext
from llama_index import set_global_service_context
from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.llms import OpenAI
from llama_index.readers.file.flat_reader import FlatReader
from llama_index.vector_stores import MilvusVectorStore
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index.node_parser.text import SentenceWindowNodeParser
from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate
from llama_index.postprocessor import MetadataReplacementPostProcessor
from llama_index.postprocessor import SentenceTransformerRerank
#from llama_index.indices import ZillizCloudPipelineIndex
from custom.zilliz.base import ZillizCloudPipelineIndex
from llama_index.indices.query.schema import QueryBundle
from llama_index.schema import BaseNode, ImageNode, MetadataMode
from custom.history_sentence_window import HistorySentenceWindowNodeParser
from custom.llms.QwenLLM import QwenUnofficial
from custom.llms.GeminiLLM import Gemini
from custom.llms.proxy_model import ProxyModel
from pymilvus import MilvusClient
QA_PROMPT_TMPL_STR = (
"请你仔细阅读相关内容,结合历史资料进行回答,每一条史资料使用'出处:《书名》原文内容'的形式标注 (如果回答请清晰无误地引用原文,先给出回答,再贴上对应的原文,使用《书名》[]对原文进行标识),,如果发现资料无法得到答案,就回答不知道 \n"
"搜索的相关历史资料如下所示.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"问题: {query_str}\n"
"答案: "
)
QA_SYSTEM_PROMPT = "你是一个严谨的历史知识问答智能体,你会仔细阅读历史材料并给出准确的回答,你的回答都会非常准确,因为你在回答的之后,使用在《书名》[]内给出原文用来支撑你回答的证据.并且你会在开头说明原文是否有回答所需的知识"
REFINE_PROMPT_TMPL_STR = (
"你是一个历史知识回答修正机器人,你严格按以下方式工作"
"1.只有原答案为不知道时才进行修正,否则输出原答案的内容\n"
"2.修正的时候为了体现你的精准和客观,你非常喜欢使用《书名》[]将原文展示出来.\n"
"3.如果感到疑惑的时候,就用原答案的内容回答。"
"新的知识: {context_msg}\n"
"问题: {query_str}\n"
"原答案: {existing_answer}\n"
"新答案: "
)
def is_valid_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def is_github_folder_url(url):
return url.startswith('https://raw.githubusercontent.com/') and '.' not in os.path.basename(url)
def get_branch_head_sha(owner, repo, branch):
url = f"https://api.github.com/repos/{owner}/{repo}/git/ref/heads/{branch}"
response = requests.get(url)
data = response.json()
sha = data['object']['sha']
return sha
def get_github_repo_contents(repo_url):
# repo_url example: https://raw.githubusercontent.com/wxywb/history_rag/master/data/history_24/
repo_owner = repo_url.split('/')[3]
repo_name = repo_url.split('/')[4]
branch = repo_url.split('/')[5]
folder_path = '/'.join(repo_url.split('/')[6:])
sha = get_branch_head_sha(repo_owner, repo_name, branch)
url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/git/trees/{sha}?recursive=1"
try:
response = requests.get(url)
if response.status_code == 200:
data = response.json()
raw_urls = []
for file in data['tree']:
if file['path'].startswith(folder_path) and file['path'].endswith('.txt'):
raw_url = f"https://raw.githubusercontent.com/{repo_owner}/{repo_name}/{branch}/{file['path']}"
raw_urls.append(raw_url)
return raw_urls
else:
print(f"Failed to fetch contents. Status code: {response.status_code}")
except Exception as e:
print(f"Failed to fetch contents. Error: {str(e)}")
return []
class Executor:
def __init__(self, model):
pass
def build_index(self, path, overwrite):
pass
def build_query_engine(self):
pass
def delete_file(self, path):
pass
def query(self, question):
pass
class MilvusExecutor(Executor):
def __init__(self, config):
self.index = None
self.query_engine = None
self.config = config
self.node_parser = HistorySentenceWindowNodeParser.from_defaults(
sentence_splitter=lambda text: re.findall("[^,.;。?!]+[,.;。?!]?", text),
window_size=config.milvus.window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",)
embed_model = HuggingFaceEmbedding(model_name=config.embedding.name)
# 使用Qwen 通义千问模型
if config.llm.name.find("qwen") != -1:
llm = QwenUnofficial(temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
elif config.llm.name.find("gemini") != -1:
llm = Gemini(temperature=config.llm.temperature, model_name=config.llm.name, max_tokens=2048)
elif 'proxy_model' in config.llm:
llm = ProxyModel(model_name=config.llm.name, api_base=config.llm.api_base, api_key=config.llm.api_key,
temperature=config.llm.temperature, max_tokens=2048)
print(f"使用{config.llm.name},PROXY_SERVER_URL为{config.llm.api_base},PROXY_API_KEY为{config.llm.api_key}")
else:
api_base = None
if 'api_base' in config.llm:
api_base = config.llm.api_base
llm = OpenAI(api_base = api_base, temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
set_global_service_context(service_context)
rerank_k = config.milvus.rerank_topk
self.rerank_postprocessor = SentenceTransformerRerank(
model=config.rerank.name, top_n=rerank_k)
self._milvus_client = None
self._debug = False
def set_debug(self, mode):
self._debug = mode
def build_index(self, path, overwrite):
config = self.config
vector_store = MilvusVectorStore(
uri = f"http://{config.milvus.host}:{config.milvus.port}",
collection_name = config.milvus.collection_name,
overwrite=overwrite,
dim=config.embedding.dim)
self._milvus_client = vector_store.milvusclient
if path.endswith('.txt'):
if os.path.exists(path) is False:
print(f'(rag) 没有找到文件{path}')
return
else:
documents = FlatReader().load_data(Path(path))
documents[0].metadata['file_name'] = documents[0].metadata['filename']
elif os.path.isfile(path):
print('(rag) 目前仅支持txt文件')
elif os.path.isdir(path):
if os.path.exists(path) is False:
print(f'(rag) 没有找到目录{path}')
return
else:
documents = SimpleDirectoryReader(path).load_data()
else:
return
storage_context = StorageContext.from_defaults(vector_store=vector_store)
nodes = self.node_parser.get_nodes_from_documents(documents)
self.index = VectorStoreIndex(nodes, storage_context=storage_context, show_progress=True)
def _get_index(self):
config = self.config
vector_store = MilvusVectorStore(
uri = f"http://{config.milvus.host}:{config.milvus.port}",
collection_name = config.milvus.collection_name,
dim=config.embedding.dim)
self.index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
self._milvus_client = vector_store.milvusclient
def build_query_engine(self):
config = self.config
if self.index is None:
self._get_index()
self.query_engine = self.index.as_query_engine(node_postprocessors=[
self.rerank_postprocessor,
MetadataReplacementPostProcessor(target_metadata_key="window")
])
self.query_engine._retriever.similarity_top_k=config.milvus.retrieve_topk
message_templates = [
ChatMessage(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM),
ChatMessage(
content=QA_PROMPT_TMPL_STR,
role=MessageRole.USER,
),
]
chat_template = ChatPromptTemplate(message_templates=message_templates)
self.query_engine.update_prompts(
{"response_synthesizer:text_qa_template": chat_template}
)
self.query_engine._response_synthesizer._refine_template.conditionals[0][1].message_templates[0].content = REFINE_PROMPT_TMPL_STR
def delete_file(self, path):
config = self.config
if self._milvus_client is None:
self._get_index()
num_entities_prev = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
res = self._milvus_client.delete(collection_name=config.milvus.collection_name, filter=f"file_name=='{path}'")
num_entities = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
print(f'(rag) 现有{num_entities}条,删除{num_entities_prev - num_entities}条数据')
def query(self, question):
if self.index is None:
self._get_index()
if question.endswith('?') or question.endswith('?'):
question = question[:-1]
if self._debug is True:
contexts = self.query_engine.retrieve(QueryBundle(question))
for i, context in enumerate(contexts):
print(f'{question}', i)
content = context.node.get_content(metadata_mode=MetadataMode.LLM)
print(content)
print('-------------------------------------------------------参考资料---------------------------------------------------------')
response = self.query_engine.query(question)
return response
class PipelineExecutor(Executor):
def __init__(self, config):
self.ZILLIZ_CLUSTER_ID = os.getenv("ZILLIZ_CLUSTER_ID")
self.ZILLIZ_TOKEN = os.getenv("ZILLIZ_TOKEN")
self.ZILLIZ_PROJECT_ID = os.getenv("ZILLIZ_PROJECT_ID")
self.ZILLIZ_CLUSTER_ENDPOINT = f"https://{self.ZILLIZ_CLUSTER_ID}.api.gcp-us-west1.zillizcloud.com"
self.config = config
if len(self.ZILLIZ_CLUSTER_ID) == 0:
print('ZILLIZ_CLUSTER_ID 参数为空')
exit()
if len(self.ZILLIZ_TOKEN) == 0:
print('ZILLIZ_TOKEN 参数为空')
exit()
self.config = config
self._debug = False
if config.llm.name.find("qwen") != -1:
llm = QwenUnofficial(temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
elif config.llm.name.find("gemini") != -1:
llm = Gemini(model_name=config.llm.name, temperature=config.llm.temperature, max_tokens=2048)
else:
api_base = None
if 'api_base' in config.llm:
api_base = config.llm.api_base
llm = OpenAI(api_base = api_base, temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=None)
self.service_context = service_context
set_global_service_context(service_context)
self._initialize_pipeline(service_context)
#rerank_k = config.rerankl
#self.rerank_postprocessor = SentenceTransformerRerank(
# model="BAAI/bge-reranker-large", top_n=rerank_k)
def set_debug(self, mode):
self._debug = mode
def _initialize_pipeline(self, service_context: ServiceContext):
config = self.config
try:
self.index = ZillizCloudPipelineIndex(
project_id = self.ZILLIZ_PROJECT_ID,
cluster_id=self.ZILLIZ_CLUSTER_ID,
token=self.ZILLIZ_TOKEN,
collection_name=config.pipeline.collection_name,
service_context=service_context,
)
if len(self._list_pipeline_ids()) == 0:
self.index.create_pipelines(
metadata_schema={"digest_from":"VarChar"}, chunk_size=self.config.pipeline.chunk_size
)
except Exception as e:
print('(rag) zilliz pipeline 连接异常', str(e))
exit()
try:
self._milvus_client = MilvusClient(
uri=self.ZILLIZ_CLUSTER_ENDPOINT,
token=self.ZILLIZ_TOKEN
)
except Exception as e:
print('(rag) zilliz cloud 连接异常', str(e))
def build_index(self, path, overwrite):
config = self.config
if not is_valid_url(path) or 'github' not in path:
print('(rag) 不是一个合法的url,请尝试`https://raw.githubusercontent.com/wxywb/history_rag/master/data/history_24/baihuasanguozhi.txt`')
return
if overwrite == True:
self._milvus_client.drop_collection(config.pipeline.collection_name)
pipeline_ids = self._list_pipeline_ids()
self._delete_pipeline_ids(pipeline_ids)
self._initialize_pipeline(self.service_context)
if is_github_folder_url(path):
urls = get_github_repo_contents(path)
for url in urls:
print(f'(rag) 正在构建索引 {url}')
self.build_index(url, False) # already deleted original collection
elif path.endswith('.txt'):
self.index.insert_doc_url(
url=path,
metadata={"digest_from": HistorySentenceWindowNodeParser.book_name(os.path.basename(path))},
)
else:
print('(rag) 只有github上以txt结尾或文件夹可以被支持。')
def build_query_engine(self):
config = self.config
self.query_engine = self.index.as_query_engine(
search_top_k=config.pipeline.retrieve_topk)
message_templates = [
ChatMessage(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM),
ChatMessage(
content=QA_PROMPT_TMPL_STR,
role=MessageRole.USER,
),
]
chat_template = ChatPromptTemplate(message_templates=message_templates)
self.query_engine.update_prompts(
{"response_synthesizer:text_qa_template": chat_template}
)
self.query_engine._response_synthesizer._refine_template.conditionals[0][1].message_templates[0].content = REFINE_PROMPT_TMPL_STR
def delete_file(self, path):
config = self.config
if self._milvus_client is None:
self._get_index()
num_entities_prev = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
res = self._milvus_client.delete(collection_name=config.milvus.collection_name, filter=f"doc_name=='{path}'")
num_entities = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
print(f'(rag) 现有{num_entities}条,删除{num_entities_prev - num_entities}条数据')
def query(self, question):
if self.index is None:
self.get_index()
if question.endswith("?") or question.endswith("?"):
question = question[:-1]
if self._debug is True:
contexts = self.query_engine.retrieve(QueryBundle(question))
for i, context in enumerate(contexts):
print(f'{question}', i)
content = context.node.get_content(metadata_mode=MetadataMode.LLM)
print(content)
print('-------------------------------------------------------参考资料---------------------------------------------------------')
response = self.query_engine.query(question)
return response
def _list_pipeline_ids(self):
url = f"https://controller.api.gcp-us-west1.zillizcloud.com/v1/pipelines?projectId={self.ZILLIZ_PROJECT_ID}"
headers = {
"Authorization": f"Bearer {self.ZILLIZ_TOKEN}",
"Accept": "application/json",
"Content-Type": "application/json",
}
collection_name = self.config.milvus.collection_name
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise RuntimeError(response.text)
response_dict = response.json()
if response_dict["code"] != 200:
raise RuntimeError(response_dict)
pipeline_ids = []
for pipeline in response_dict['data']:
if collection_name in pipeline['name']:
pipeline_ids.append(pipeline['pipelineId'])
return pipeline_ids
def _delete_pipeline_ids(self, pipeline_ids):
for pipeline_id in pipeline_ids:
url = f"https://controller.api.gcp-us-west1.zillizcloud.com/v1/pipelines/{pipeline_id}/"
headers = {
"Authorization": f"Bearer {self.ZILLIZ_TOKEN}",
"Accept": "application/json",
"Content-Type": "application/json",
}
response = requests.delete(url, headers=headers)
if response.status_code != 200:
raise RuntimeError(response.text)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.postprocessor.SentenceTransformerRerank",
"llama_index.ServiceContext.from_defaults",
"llama_index.prompts.ChatMessage",
"llama_index.vector_stores.MilvusVectorStore",
"llama_index.llms.OpenAI",
"llama_index.readers.file.flat_reader.FlatReader",
"llama_index.StorageContext.from_defaults",
"llama_index.indices.query.schema.QueryBundle",
"llama_index.set_global_service_context",
"llama_index.postprocessor.MetadataReplacementPostProcessor",
"llama_index.prompts.ChatPromptTemplate",
"llama_index.VectorStoreIndex",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((2448, 2465), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2460, 2465), False, 'import requests\n'), ((2063, 2076), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (2071, 2076), False, 'from urllib.parse import urlparse\n'), ((3032, 3049), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3044, 3049), False, 'import requests\n'), ((4423, 4477), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'config.embedding.name'}), '(model_name=config.embedding.name)\n', (4443, 4477), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((5451, 5513), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (5479, 5513), False, 'from llama_index import ServiceContext, StorageContext\n'), ((5522, 5565), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (5548, 5565), False, 'from llama_index import set_global_service_context\n'), ((5647, 5714), 'llama_index.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'model': 'config.rerank.name', 'top_n': 'rerank_k'}), '(model=config.rerank.name, top_n=rerank_k)\n', (5672, 5714), False, 'from llama_index.postprocessor import SentenceTransformerRerank\n'), ((5955, 6132), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': 'f"""http://{config.milvus.host}:{config.milvus.port}"""', 'collection_name': 'config.milvus.collection_name', 'overwrite': 'overwrite', 'dim': 'config.embedding.dim'}), "(uri=f'http://{config.milvus.host}:{config.milvus.port}',\n collection_name=config.milvus.collection_name, overwrite=overwrite, dim\n =config.embedding.dim)\n", (5972, 6132), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((6938, 6993), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (6966, 6993), False, 'from llama_index import ServiceContext, StorageContext\n'), ((7084, 7160), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context', 'show_progress': '(True)'}), '(nodes, storage_context=storage_context, show_progress=True)\n', (7100, 7160), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document\n'), ((7240, 7391), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': 'f"""http://{config.milvus.host}:{config.milvus.port}"""', 'collection_name': 'config.milvus.collection_name', 'dim': 'config.embedding.dim'}), "(uri=f'http://{config.milvus.host}:{config.milvus.port}',\n collection_name=config.milvus.collection_name, dim=config.embedding.dim)\n", (7257, 7391), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((7450, 7511), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (7484, 7511), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document\n'), ((8241, 8296), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'message_templates': 'message_templates'}), '(message_templates=message_templates)\n', (8259, 8296), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((9980, 10010), 'os.getenv', 'os.getenv', (['"""ZILLIZ_CLUSTER_ID"""'], {}), "('ZILLIZ_CLUSTER_ID')\n", (9989, 10010), False, 'import os\n'), ((10039, 10064), 'os.getenv', 'os.getenv', (['"""ZILLIZ_TOKEN"""'], {}), "('ZILLIZ_TOKEN')\n", (10048, 10064), False, 'import os\n'), ((10098, 10128), 'os.getenv', 'os.getenv', (['"""ZILLIZ_PROJECT_ID"""'], {}), "('ZILLIZ_PROJECT_ID')\n", (10107, 10128), False, 'import os\n'), ((11138, 11193), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'None'}), '(llm=llm, embed_model=None)\n', (11166, 11193), False, 'from llama_index import ServiceContext, StorageContext\n'), ((11249, 11292), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (11275, 11292), False, 'from llama_index import set_global_service_context\n'), ((14123, 14178), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'message_templates': 'message_templates'}), '(message_templates=message_templates)\n', (14141, 14178), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((16170, 16204), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (16182, 16204), False, 'import requests\n'), ((2283, 2304), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (2299, 2304), False, 'import os\n'), ((4568, 4662), 'custom.llms.QwenLLM.QwenUnofficial', 'QwenUnofficial', ([], {'temperature': 'config.llm.temperature', 'model': 'config.llm.name', 'max_tokens': '(2048)'}), '(temperature=config.llm.temperature, model=config.llm.name,\n max_tokens=2048)\n', (4582, 4662), False, 'from custom.llms.QwenLLM import QwenUnofficial\n'), ((6573, 6593), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (6587, 6593), False, 'import os\n'), ((8020, 8082), 'llama_index.prompts.ChatMessage', 'ChatMessage', ([], {'content': 'QA_SYSTEM_PROMPT', 'role': 'MessageRole.SYSTEM'}), '(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM)\n', (8031, 8082), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((8096, 8158), 'llama_index.prompts.ChatMessage', 'ChatMessage', ([], {'content': 'QA_PROMPT_TMPL_STR', 'role': 'MessageRole.USER'}), '(content=QA_PROMPT_TMPL_STR, role=MessageRole.USER)\n', (8107, 8158), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((10611, 10705), 'custom.llms.QwenLLM.QwenUnofficial', 'QwenUnofficial', ([], {'temperature': 'config.llm.temperature', 'model': 'config.llm.name', 'max_tokens': '(2048)'}), '(temperature=config.llm.temperature, model=config.llm.name,\n max_tokens=2048)\n', (10625, 10705), False, 'from custom.llms.QwenLLM import QwenUnofficial\n'), ((11702, 11913), 'custom.zilliz.base.ZillizCloudPipelineIndex', 'ZillizCloudPipelineIndex', ([], {'project_id': 'self.ZILLIZ_PROJECT_ID', 'cluster_id': 'self.ZILLIZ_CLUSTER_ID', 'token': 'self.ZILLIZ_TOKEN', 'collection_name': 'config.pipeline.collection_name', 'service_context': 'service_context'}), '(project_id=self.ZILLIZ_PROJECT_ID, cluster_id=self\n .ZILLIZ_CLUSTER_ID, token=self.ZILLIZ_TOKEN, collection_name=config.\n pipeline.collection_name, service_context=service_context)\n', (11726, 11913), False, 'from custom.zilliz.base import ZillizCloudPipelineIndex\n'), ((12376, 12447), 'pymilvus.MilvusClient', 'MilvusClient', ([], {'uri': 'self.ZILLIZ_CLUSTER_ENDPOINT', 'token': 'self.ZILLIZ_TOKEN'}), '(uri=self.ZILLIZ_CLUSTER_ENDPOINT, token=self.ZILLIZ_TOKEN)\n', (12388, 12447), False, 'from pymilvus import MilvusClient\n'), ((13902, 13964), 'llama_index.prompts.ChatMessage', 'ChatMessage', ([], {'content': 'QA_SYSTEM_PROMPT', 'role': 'MessageRole.SYSTEM'}), '(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM)\n', (13913, 13964), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((13978, 14040), 'llama_index.prompts.ChatMessage', 'ChatMessage', ([], {'content': 'QA_PROMPT_TMPL_STR', 'role': 'MessageRole.USER'}), '(content=QA_PROMPT_TMPL_STR, role=MessageRole.USER)\n', (13989, 14040), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((17063, 17100), 'requests.delete', 'requests.delete', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (17078, 17100), False, 'import requests\n'), ((4728, 4819), 'custom.llms.GeminiLLM.Gemini', 'Gemini', ([], {'temperature': 'config.llm.temperature', 'model_name': 'config.llm.name', 'max_tokens': '(2048)'}), '(temperature=config.llm.temperature, model_name=config.llm.name,\n max_tokens=2048)\n', (4734, 4819), False, 'from custom.llms.GeminiLLM import Gemini\n'), ((6292, 6312), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6306, 6312), False, 'import os\n'), ((6657, 6676), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6670, 6676), False, 'import os\n'), ((9436, 9457), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', (['question'], {}), '(question)\n', (9447, 9457), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((10771, 10862), 'custom.llms.GeminiLLM.Gemini', 'Gemini', ([], {'model_name': 'config.llm.name', 'temperature': 'config.llm.temperature', 'max_tokens': '(2048)'}), '(model_name=config.llm.name, temperature=config.llm.temperature,\n max_tokens=2048)\n', (10777, 10862), False, 'from custom.llms.GeminiLLM import Gemini\n'), ((11007, 11113), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'api_base': 'api_base', 'temperature': 'config.llm.temperature', 'model': 'config.llm.name', 'max_tokens': '(2048)'}), '(api_base=api_base, temperature=config.llm.temperature, model=config.\n llm.name, max_tokens=2048)\n', (11013, 11113), False, 'from llama_index.llms import OpenAI\n'), ((15313, 15334), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', (['question'], {}), '(question)\n', (15324, 15334), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((4209, 4248), 're.findall', 're.findall', (['"""[^,.;。?!]+[,.;。?!]?"""', 'text'], {}), "('[^,.;。?!]+[,.;。?!]?', text)\n", (4219, 4248), False, 'import re\n'), ((4876, 5033), 'custom.llms.proxy_model.ProxyModel', 'ProxyModel', ([], {'model_name': 'config.llm.name', 'api_base': 'config.llm.api_base', 'api_key': 'config.llm.api_key', 'temperature': 'config.llm.temperature', 'max_tokens': '(2048)'}), '(model_name=config.llm.name, api_base=config.llm.api_base,\n api_key=config.llm.api_key, temperature=config.llm.temperature,\n max_tokens=2048)\n', (4886, 5033), False, 'from custom.llms.proxy_model import ProxyModel\n'), ((5320, 5426), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'api_base': 'api_base', 'temperature': 'config.llm.temperature', 'model': 'config.llm.name', 'max_tokens': '(2048)'}), '(api_base=api_base, temperature=config.llm.temperature, model=config.\n llm.name, max_tokens=2048)\n', (5326, 5426), False, 'from llama_index.llms import OpenAI\n'), ((6460, 6470), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6464, 6470), False, 'from pathlib import Path\n'), ((7821, 7883), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (7853, 7883), False, 'from llama_index.postprocessor import MetadataReplacementPostProcessor\n'), ((6437, 6449), 'llama_index.readers.file.flat_reader.FlatReader', 'FlatReader', ([], {}), '()\n', (6447, 6449), False, 'from llama_index.readers.file.flat_reader import FlatReader\n'), ((6693, 6713), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6707, 6713), False, 'import os\n'), ((6838, 6865), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (6859, 6865), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document\n'), ((13579, 13601), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (13595, 13601), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ================================================== #
# This file is a part of PYGPT package #
# Website: https://pygpt.net #
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License #
# Created By : Marcin Szczygliński #
# Updated Date: 2024.02.28 02:00:00 #
# ================================================== #
import os.path
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.service_context import ServiceContext
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from pygpt_net.provider.vector_stores.base import BaseStore # <--- vector store must inherit from BaseStore
class ExampleVectorStore(BaseStore):
def __init__(self, *args, **kwargs):
super(ExampleVectorStore, self).__init__(*args, **kwargs)
"""
Example vector store provider.
This example is based on the `SimpleProvider` (SimpleVectorStore) from the `pygpt_net.provider.vector_stores.simple`.
See `pygpt_net.provider.vector_stores` for more examples.
The rest of the shared methods (like `exists`, `delete`, `truncate`, etc.) are declared in the base class: `BaseStore`.
:param args: args
:param kwargs: kwargs
"""
self.window = kwargs.get('window', None)
self.id = "example_store" # identifier must be unique
self.prefix = "example_" # prefix for index config files subdirectory in "idx" directory in %workdir%
self.indexes = {} # indexes cache dictionary (in-memory)
def create(self, id: str):
"""
Create the empty index with the provided `id` (`base` is default)
In this example, we create an empty index with the name `id` and store it in the `self.indexes` dictionary.
Example is a simple copy of the `SimpleVectorStore` provider.
The `create` method is called when the index does not exist.
See `pygpt_net.core.idx` for more details how it is handled internally.
:param id: index name
"""
path = self.get_path(id) # get path for the index configuration, declared in the `BaseStore` class
# check if index does not exist on disk and create it if not exists
if not os.path.exists(path):
index = VectorStoreIndex([]) # create empty index
# store the index on disk
self.store(
id=id,
index=index,
)
def get(self, id: str, service_context: ServiceContext = None) -> BaseIndex:
"""
Get the index instance with the provided `id` (`base` is default)
In this example, we get the index with the name `id` from the `self.indexes` dictionary.
The `get` method is called when getting the index instance.
It must return the `BaseIndex` index instance.
See `pygpt_net.core.idx` for more details how it is handled internally.
:param id: index name
:param service_context: Service context
:return: index instance
"""
# check if index exists on disk and load it
if not self.exists(id):
# if index does not exist, then create it
self.create(id)
# get path for the index configuration on disk (in "%workdir%/idx" directory)
path = self.get_path(id)
# get the storage context
storage_context = StorageContext.from_defaults(
persist_dir=path,
)
# load index from storage and update it in the `self.indexes` dictionary
self.indexes[id] = load_index_from_storage(
storage_context,
service_context=service_context,
)
# return the index instance
return self.indexes[id]
def store(self, id: str, index: BaseIndex = None):
"""
Store (persist) the index instance with the provided `id` (`base` is default)
In this example, we store the index with the name `id` in the `self.indexes` dictionary.
The `store` method is called when storing (persisting) index to disk/db.
It must provide logic to store the index in the storage.
See `pygpt_net.core.idx` for more details how it is handled internally.
:param id: index name
:param index: index instance
"""
# prepare the index instance
if index is None:
index = self.indexes[id]
# get path for the index configuration on disk (in "%workdir%/idx" directory)
path = self.get_path(id)
# persist the index on disk
index.storage_context.persist(
persist_dir=path,
)
# update the index in the `self.indexes` dictionary
self.indexes[id] = index
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.indices.vector_store.base.VectorStoreIndex"
] | [((3613, 3659), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'path'}), '(persist_dir=path)\n', (3641, 3659), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((3792, 3865), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (3815, 3865), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((2500, 2520), 'llama_index.core.indices.vector_store.base.VectorStoreIndex', 'VectorStoreIndex', (['[]'], {}), '([])\n', (2516, 2520), False, 'from llama_index.core.indices.vector_store.base import VectorStoreIndex\n')] |
"""Loads files from GitHub using the LlamaIndex GithubRepositoryReader."""
import os
from typing import ClassVar, Iterable, Optional
from pydantic import Field, field_serializer
from typing_extensions import override
from ..schema import Item
from ..source import Source, SourceSchema
from .llama_index_docs_source import LlamaIndexDocsSource
# We currently don't support images or videos, so we filter them out to reduce the load time.
IGNORE_MEDIA_EXTENSIONS = [
'.png',
'.jpg',
'.jpeg',
'.gif',
'.mp4',
'.mov',
'.avi',
'.PNG',
'.JPG',
'.JPEG',
'.GIF',
'.MP4',
'.MOV',
'.AVI',
]
class GithubSource(Source):
"""GitHub source code loader
Loads source code from GitHub repositories using the LlamaIndex GithubRepositoryReader.
Each file becomes a separate row.
The following extensions are automatically ignored as Lilac does not yet support media:
.png, .jpg, .jpeg, .gif, .mp4, .mov, .avi
""" # noqa: D415, D400
name: ClassVar[str] = 'github'
repo: str = Field(description='The GitHub repository to load from. Format: <owner>/<repo>.')
branch: Optional[str] = Field(
default='main', description='The branch to load from. Defaults to the main branch.'
)
ignore_directories: Optional[list[str]] = Field(
default=None,
description='A list of directories to ignore. Can only be used if filter_directories '
'is not specified.',
)
ignore_file_extensions: Optional[list[str]] = Field(
default=None,
description='A list of file extensions to ignore. Can only be used if filter_file_extensions '
'is not specified.',
)
github_token: Optional[str] = Field(
default=None,
description='The GitHub token to use for authentication. If not specified, '
'uses the `GITHUB_TOKEN` environment variable.',
)
@field_serializer('github_token')
def scrub_github_token(self, github_token: str) -> str:
"""Scrubs the github token so it isn't stored on disk."""
del github_token
return ''
_llama_index_docs_source: LlamaIndexDocsSource
@override
def setup(self) -> None:
try:
from llama_index.core.readers import download_loader
except ImportError:
raise ImportError(
'Could not import dependencies for the "github" source. '
'Please install with pip install lilac[github]'
)
try:
from llama_hub.github_repo import GithubClient, GithubRepositoryReader
except ImportError:
raise ImportError(
'Could not import dependencies for the "github" source. '
'Please install with pip install lilac[github]'
)
download_loader('GithubRepositoryReader')
github_token = os.getenv('GITHUB_TOKEN', self.github_token)
if not github_token:
raise ValueError(
'Environment variable `GITHUB_TOKEN` is not set and the github_token arg is not set.'
)
github_client = GithubClient(github_token)
owner, repo = self.repo.split('/')
loader = GithubRepositoryReader(
github_client=github_client,
owner=owner,
repo=repo,
filter_directories=(self.ignore_directories, GithubRepositoryReader.FilterType.EXCLUDE)
if self.ignore_directories
else None,
filter_file_extensions=(
(self.ignore_file_extensions or []) + IGNORE_MEDIA_EXTENSIONS,
GithubRepositoryReader.FilterType.EXCLUDE,
),
verbose=True,
concurrent_requests=10,
)
docs = loader.load_data(branch=self.branch)
self._llama_index_docs_source = LlamaIndexDocsSource(docs)
self._llama_index_docs_source.setup()
@override
def source_schema(self) -> SourceSchema:
"""Return the source schema."""
return self._llama_index_docs_source.source_schema()
@override
def yield_items(self) -> Iterable[Item]:
"""Read from GitHub."""
return self._llama_index_docs_source.yield_items()
| [
"llama_index.core.readers.download_loader"
] | [((1012, 1097), 'pydantic.Field', 'Field', ([], {'description': '"""The GitHub repository to load from. Format: <owner>/<repo>."""'}), "(description='The GitHub repository to load from. Format: <owner>/<repo>.'\n )\n", (1017, 1097), False, 'from pydantic import Field, field_serializer\n'), ((1119, 1214), 'pydantic.Field', 'Field', ([], {'default': '"""main"""', 'description': '"""The branch to load from. Defaults to the main branch."""'}), "(default='main', description=\n 'The branch to load from. Defaults to the main branch.')\n", (1124, 1214), False, 'from pydantic import Field, field_serializer\n'), ((1262, 1396), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""A list of directories to ignore. Can only be used if filter_directories is not specified."""'}), "(default=None, description=\n 'A list of directories to ignore. Can only be used if filter_directories is not specified.'\n )\n", (1267, 1396), False, 'from pydantic import Field, field_serializer\n'), ((1455, 1597), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""A list of file extensions to ignore. Can only be used if filter_file_extensions is not specified."""'}), "(default=None, description=\n 'A list of file extensions to ignore. Can only be used if filter_file_extensions is not specified.'\n )\n", (1460, 1597), False, 'from pydantic import Field, field_serializer\n'), ((1641, 1793), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The GitHub token to use for authentication. If not specified, uses the `GITHUB_TOKEN` environment variable."""'}), "(default=None, description=\n 'The GitHub token to use for authentication. If not specified, uses the `GITHUB_TOKEN` environment variable.'\n )\n", (1646, 1793), False, 'from pydantic import Field, field_serializer\n'), ((1808, 1840), 'pydantic.field_serializer', 'field_serializer', (['"""github_token"""'], {}), "('github_token')\n", (1824, 1840), False, 'from pydantic import Field, field_serializer\n'), ((2606, 2647), 'llama_index.core.readers.download_loader', 'download_loader', (['"""GithubRepositoryReader"""'], {}), "('GithubRepositoryReader')\n", (2621, 2647), False, 'from llama_index.core.readers import download_loader\n'), ((2668, 2712), 'os.getenv', 'os.getenv', (['"""GITHUB_TOKEN"""', 'self.github_token'], {}), "('GITHUB_TOKEN', self.github_token)\n", (2677, 2712), False, 'import os\n'), ((2885, 2911), 'llama_hub.github_repo.GithubClient', 'GithubClient', (['github_token'], {}), '(github_token)\n', (2897, 2911), False, 'from llama_hub.github_repo import GithubClient, GithubRepositoryReader\n'), ((2966, 3357), 'llama_hub.github_repo.GithubRepositoryReader', 'GithubRepositoryReader', ([], {'github_client': 'github_client', 'owner': 'owner', 'repo': 'repo', 'filter_directories': '((self.ignore_directories, GithubRepositoryReader.FilterType.EXCLUDE) if\n self.ignore_directories else None)', 'filter_file_extensions': '((self.ignore_file_extensions or []) + IGNORE_MEDIA_EXTENSIONS,\n GithubRepositoryReader.FilterType.EXCLUDE)', 'verbose': '(True)', 'concurrent_requests': '(10)'}), '(github_client=github_client, owner=owner, repo=repo,\n filter_directories=(self.ignore_directories, GithubRepositoryReader.\n FilterType.EXCLUDE) if self.ignore_directories else None,\n filter_file_extensions=((self.ignore_file_extensions or []) +\n IGNORE_MEDIA_EXTENSIONS, GithubRepositoryReader.FilterType.EXCLUDE),\n verbose=True, concurrent_requests=10)\n', (2988, 3357), False, 'from llama_hub.github_repo import GithubClient, GithubRepositoryReader\n')] |
from __future__ import annotations
from typing import Optional
import os
from llama_index.core import ServiceContext
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.core.llms import OpenAI as LlamaIndexOpenAI
from llama_index.core.llms.llm import LLM # noqa: TCH002
from llama_index.core.llms.openai_utils import ALL_AVAILABLE_MODELS, CHAT_MODELS
from openssa.utils.config import Config
# import sys
# import logging
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# Add the extended models to the list of available models in LlamaIndex
_EXTENDED_CHAT_MODELS = {
"01-ai/Yi-34B-Chat": 4096,
"Intel/neural-chat-7b-v3-1": 4096,
"llama2-70b": 4096,
"llama2-13b": 4096,
"llama2-7b": 4096,
}
ALL_AVAILABLE_MODELS.update(_EXTENDED_CHAT_MODELS)
CHAT_MODELS.update(_EXTENDED_CHAT_MODELS)
# TODO: there should be a single Aitomatic api_base and api_key
Config.AITOMATIC_API_KEY: Optional[str] = os.environ.get("AITOMATIC_API_KEY")
Config.AITOMATIC_API_URL: Optional[str] = (
os.environ.get("AITOMATIC_API_URL")
or "https://aimo-api-mvp.platform.aitomatic.com/api/v1"
)
Config.AITOMATIC_API_URL_7B: Optional[str] = (
os.environ.get("AITOMATIC_API_URL_7B") or "https://llama2-7b.lepton.run/api/v1"
)
Config.AITOMATIC_API_URL_70B: Optional[str] = (
os.environ.get("AITOMATIC_API_URL_70B") or "https://llama2-70b.lepton.run/api/v1"
)
Config.OPENAI_API_KEY: Optional[str] = os.environ.get("OPENAI_API_KEY")
Config.OPENAI_API_URL: Optional[str] = (
os.environ.get("OPENAI_API_URL") or "https://api.openai.com/v1"
)
Config.AZURE_OPENAI_API_KEY: Optional[str] = os.environ.get("AZURE_OPENAI_API_KEY")
Config.AZURE_OPENAI_API_URL: Optional[str] = (
os.environ.get("AZURE_OPENAI_API_URL") or "https://aiva-japan.openai.azure.com"
)
Config.LEPTON_API_KEY: Optional[str] = os.environ.get("LEPTON_API_KEY")
Config.LEPTON_API_URL: Optional[str] = (
os.environ.get("LEPTON_API_URL") or "https://llama2-7b.lepton.run/api/v1"
)
class LlamaIndexApi: # no-pylint: disable=too-many-public-methods
class LLMs:
"""
This class represents the LLMs from different services
"""
class _AnOpenAIAPIProvider:
"""
This class represents an OpenAI-API provider
"""
@classmethod
def _get(cls, model=None, api_base=None, api_key=None, additional_kwargs=None) -> LLM:
if model is None:
if api_base is None:
llm = LlamaIndexOpenAI(api_key=api_key, additional_kwargs=additional_kwargs)
else:
llm = LlamaIndexOpenAI(api_key=api_key, additional_kwargs=additional_kwargs)
elif api_base is None:
llm = LlamaIndexOpenAI(api_key=api_key, additional_kwargs=additional_kwargs)
else:
llm = LlamaIndexOpenAI(model=model, api_base=api_base, api_key=api_key)
# Forcibly set the get_openai method to the _get_client method
llm.__dict__['get_openai'] = llm._get_client # pylint: disable=protected-access
return llm
class Aitomatic(_AnOpenAIAPIProvider):
"""
This class represents the Aitomatic-hosted LLMs
"""
@classmethod
def get(cls, model=None, api_base=None, api_key=None, additional_kwargs=None) -> LLM:
if model is None:
model = "llama2-7b"
if api_key is None:
api_key = Config.AITOMATIC_API_KEY
return super()._get(model=model, api_base=api_base, api_key=api_key, additional_kwargs=additional_kwargs)
@classmethod
def get_llama2_70b(cls) -> LLM:
# TODO: there should be a single Aitomatic api_base and api_key
llm = cls.get(
model="llama2-70b",
api_base=Config.AITOMATIC_API_URL_70B,
api_key=Config.LEPTON_API_KEY,
)
return llm
@classmethod
def get_llama2_7b(cls) -> LLM:
# TODO: there should be a single Aitomatic api_base and api_key
llm = cls.get(
model="llama2-7b",
api_base=Config.AITOMATIC_API_URL,
api_key=Config.LEPTON_API_KEY,
)
return llm
@classmethod
def get_13b(cls) -> LLM:
# TODO: there should be a single Aitomatic api_base and api_key
# not running
llm = cls.get(
model="gpt-3.5-turbo-0613",
api_base="http://35.199.34.91:8000/v1",
additional_kwargs={"stop": "\n"},
)
return llm
@classmethod
def get_yi_34b(cls) -> LLM: # running
llm = cls.get(
model="01-ai/Yi-34B-Chat",
api_base="http://35.230.174.89:8000/v1",
additional_kwargs={"stop": "\n###"},
)
return llm
@classmethod
def get_intel_neural_chat_7b(cls) -> LLM: # running
llm = cls.get(
model="Intel/neural-chat-7b-v3-1",
api_base="http://34.145.174.152:8000/v1",
)
return llm
@classmethod
def get_aimo(cls):
llm = cls.get(api_base=os.environ.get("AIMO_STANDARD_URL_BASE"))
return llm
class OpenAI(_AnOpenAIAPIProvider):
"""
This class represents the OpenAI-hosted LLMs
"""
@classmethod
def get(cls, model=None) -> LLM:
if model is None:
model = "gpt-3.5-turbo-1106"
return super()._get(model=model, api_key=Config.OPENAI_API_KEY)
@classmethod
def get_gpt_35_turbo_1106(cls) -> LLM:
return cls.get(model="gpt-3.5-turbo-1106")
@classmethod
def get_gpt_35_turbo_0613(cls) -> LLM:
return cls.get(model="gpt-3.5-turbo")
@classmethod
def get_gpt_35_turbo(cls) -> LLM:
return cls.get(model="gpt-3.5-turbo-0613")
@classmethod
def get_gpt_4(cls) -> LLM:
return cls.get(model="gpt-4")
class Azure:
"""
This class represents the Azure-hosted LLMs
"""
@classmethod
def _get(cls, model=None, engine=None, api_base=None) -> LLM:
if model is None:
model = "gpt-35-turbo-16k"
if engine is None:
engine = "aiva-dev-gpt35"
if api_base is None:
api_base = Config.AZURE_OPENAI_API_URL
return AzureOpenAI(
engine=model,
model=model,
temperature=0.0,
api_version="2023-09-01-preview",
api_key=Config.AZURE_OPENAI_API_KEY,
azure_endpoint=api_base,
)
@classmethod
def get(cls) -> LLM:
return cls.get_gpt_35()
@classmethod
def get_gpt_35(cls) -> LLM:
return cls._get(model="gpt-35-turbo")
@classmethod
def get_gpt_35_16k(cls) -> LLM:
return cls._get(model="gpt-35-turbo-16k")
@classmethod
def get_gpt_4(cls) -> LLM:
return cls.get_gpt_4_32k()
@classmethod
def get_gpt_4_32k(cls) -> LLM:
return cls._get(model="gpt-4-32k")
class Embeddings:
"""
This class represents the different embedding services
"""
class Aitomatic:
"""
This class represents the Aitomatic-hosted embedding service
"""
@classmethod
def _get(cls, api_base=None, api_key=None) -> OpenAIEmbedding:
if api_key is None:
api_key = Config.AITOMATIC_API_KEY
return OpenAIEmbedding(api_base=api_base, api_key=api_key)
@classmethod
def get(cls) -> OpenAIEmbedding: # running
return cls._get(api_base=Config.AITOMATIC_API_URL)
@classmethod
def get_llama2_7b(cls) -> OpenAIEmbedding:
return cls._get(api_base=Config.AITOMATIC_API_URL_7B)
@classmethod
def get_llama2_70b(cls) -> OpenAIEmbedding:
return cls._get(api_base=Config.AITOMATIC_API_URL_70B)
class OpenAI:
"""
This class represents the OpenAI-hosted embedding service
"""
@classmethod
def get(cls) -> OpenAIEmbedding:
return OpenAIEmbedding(api_key=Config.OPENAI_API_KEY)
class Azure:
"""
This class represents the Azure-hosted embedding service
"""
@classmethod
def get(cls) -> AzureOpenAIEmbedding:
return AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="text-embedding-ada-002",
api_key=Config.AZURE_OPENAI_API_KEY,
api_version="2023-09-01-preview",
azure_endpoint=Config.AZURE_OPENAI_API_URL,
)
class ServiceContexts:
"""
This class represents the service contexts for different models.
"""
class _AServiceContextHelper:
"""
This class represents the service contexts for the different embedding services.
"""
@classmethod
def _get(cls, llm=None, embedding=None) -> ServiceContext:
sc = ServiceContext.from_defaults(llm=llm, embed_model=embedding)
return sc
class Aitomatic(_AServiceContextHelper):
"""
This class represents the service contexts for the Aitomatic-hosted models.
"""
@classmethod
def get_llama2_7b(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Aitomatic.get_llama2_7b()
embedding = LlamaIndexApi.Embeddings.Aitomatic.get_llama2_7b()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_llama_2_70b(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Aitomatic.get_llama2_7b()
embedding = LlamaIndexApi.Embeddings.Aitomatic.get_llama2_70b()
return cls._get(llm=llm, embedding=embedding)
class OpenAI(_AServiceContextHelper):
"""
This class represents the service contexts for the OpenAI-hosted models.
"""
@classmethod
def get_gpt_35_turbo_1106(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.OpenAI.get_gpt_35_turbo_1106()
embedding = LlamaIndexApi.Embeddings.OpenAI.get()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_gpt_35_turbo(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.OpenAI.get_gpt_35_turbo()
embedding = LlamaIndexApi.Embeddings.OpenAI.get()
return cls._get(llm=llm, embedding=embedding)
class Azure(_AServiceContextHelper):
"""
This class represents the service contexts for the Azure-hosted models.
"""
@classmethod
def get(cls) -> ServiceContext:
return cls.get_gpt_35()
@classmethod
def get_gpt_35(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Azure.get_gpt_35()
embedding = LlamaIndexApi.Embeddings.Azure.get()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_gpt_35_16k(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Azure.get_gpt_35_16k()
embedding = LlamaIndexApi.Embeddings.Azure.get()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_gpt4(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Azure.get_gpt_4()
embedding = LlamaIndexApi.Embeddings.Azure.get()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_gpt4_32k(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Azure.get_gpt_4_32k()
embedding = LlamaIndexApi.Embeddings.Azure.get()
return cls._get(llm=llm, embedding=embedding)
# Convenience methods
get_aitomatic_llm = LLMs.Aitomatic.get
get_openai_llm = LLMs.OpenAI.get
get_azure_llm = LLMs.Azure.get
| [
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding",
"llama_index.llms.azure_openai.AzureOpenAI",
"llama_index.core.llms.openai_utils.ALL_AVAILABLE_MODELS.update",
"llama_index.core.ServiceContext.from_defaults",
"llama_index.core.llms.openai_utils.CHAT_MODELS.update",
"llama_index.core.llms.OpenAI",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((948, 998), 'llama_index.core.llms.openai_utils.ALL_AVAILABLE_MODELS.update', 'ALL_AVAILABLE_MODELS.update', (['_EXTENDED_CHAT_MODELS'], {}), '(_EXTENDED_CHAT_MODELS)\n', (975, 998), False, 'from llama_index.core.llms.openai_utils import ALL_AVAILABLE_MODELS, CHAT_MODELS\n'), ((999, 1040), 'llama_index.core.llms.openai_utils.CHAT_MODELS.update', 'CHAT_MODELS.update', (['_EXTENDED_CHAT_MODELS'], {}), '(_EXTENDED_CHAT_MODELS)\n', (1017, 1040), False, 'from llama_index.core.llms.openai_utils import ALL_AVAILABLE_MODELS, CHAT_MODELS\n'), ((1148, 1183), 'os.environ.get', 'os.environ.get', (['"""AITOMATIC_API_KEY"""'], {}), "('AITOMATIC_API_KEY')\n", (1162, 1183), False, 'import os\n'), ((1639, 1671), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1653, 1671), False, 'import os\n'), ((1829, 1867), 'os.environ.get', 'os.environ.get', (['"""AZURE_OPENAI_API_KEY"""'], {}), "('AZURE_OPENAI_API_KEY')\n", (1843, 1867), False, 'import os\n'), ((2041, 2073), 'os.environ.get', 'os.environ.get', (['"""LEPTON_API_KEY"""'], {}), "('LEPTON_API_KEY')\n", (2055, 2073), False, 'import os\n'), ((1232, 1267), 'os.environ.get', 'os.environ.get', (['"""AITOMATIC_API_URL"""'], {}), "('AITOMATIC_API_URL')\n", (1246, 1267), False, 'import os\n'), ((1381, 1419), 'os.environ.get', 'os.environ.get', (['"""AITOMATIC_API_URL_7B"""'], {}), "('AITOMATIC_API_URL_7B')\n", (1395, 1419), False, 'import os\n'), ((1515, 1554), 'os.environ.get', 'os.environ.get', (['"""AITOMATIC_API_URL_70B"""'], {}), "('AITOMATIC_API_URL_70B')\n", (1529, 1554), False, 'import os\n'), ((1717, 1749), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_URL"""'], {}), "('OPENAI_API_URL')\n", (1731, 1749), False, 'import os\n'), ((1919, 1957), 'os.environ.get', 'os.environ.get', (['"""AZURE_OPENAI_API_URL"""'], {}), "('AZURE_OPENAI_API_URL')\n", (1933, 1957), False, 'import os\n'), ((2119, 2151), 'os.environ.get', 'os.environ.get', (['"""LEPTON_API_URL"""'], {}), "('LEPTON_API_URL')\n", (2133, 2151), False, 'import os\n'), ((7221, 7381), 'llama_index.llms.azure_openai.AzureOpenAI', 'AzureOpenAI', ([], {'engine': 'model', 'model': 'model', 'temperature': '(0.0)', 'api_version': '"""2023-09-01-preview"""', 'api_key': 'Config.AZURE_OPENAI_API_KEY', 'azure_endpoint': 'api_base'}), "(engine=model, model=model, temperature=0.0, api_version=\n '2023-09-01-preview', api_key=Config.AZURE_OPENAI_API_KEY,\n azure_endpoint=api_base)\n", (7232, 7381), False, 'from llama_index.llms.azure_openai import AzureOpenAI\n'), ((8543, 8594), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_base': 'api_base', 'api_key': 'api_key'}), '(api_base=api_base, api_key=api_key)\n', (8558, 8594), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((9267, 9313), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_key': 'Config.OPENAI_API_KEY'}), '(api_key=Config.OPENAI_API_KEY)\n', (9282, 9313), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((9536, 9759), 'llama_index.embeddings.azure_openai.AzureOpenAIEmbedding', 'AzureOpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'deployment_name': '"""text-embedding-ada-002"""', 'api_key': 'Config.AZURE_OPENAI_API_KEY', 'api_version': '"""2023-09-01-preview"""', 'azure_endpoint': 'Config.AZURE_OPENAI_API_URL'}), "(model='text-embedding-ada-002', deployment_name=\n 'text-embedding-ada-002', api_key=Config.AZURE_OPENAI_API_KEY,\n api_version='2023-09-01-preview', azure_endpoint=Config.\n AZURE_OPENAI_API_URL)\n", (9556, 9759), False, 'from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n'), ((10272, 10332), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embedding'}), '(llm=llm, embed_model=embedding)\n', (10300, 10332), False, 'from llama_index.core import ServiceContext\n'), ((2724, 2794), 'llama_index.core.llms.OpenAI', 'LlamaIndexOpenAI', ([], {'api_key': 'api_key', 'additional_kwargs': 'additional_kwargs'}), '(api_key=api_key, additional_kwargs=additional_kwargs)\n', (2740, 2794), True, 'from llama_index.core.llms import OpenAI as LlamaIndexOpenAI\n'), ((2851, 2921), 'llama_index.core.llms.OpenAI', 'LlamaIndexOpenAI', ([], {'api_key': 'api_key', 'additional_kwargs': 'additional_kwargs'}), '(api_key=api_key, additional_kwargs=additional_kwargs)\n', (2867, 2921), True, 'from llama_index.core.llms import OpenAI as LlamaIndexOpenAI\n'), ((2987, 3057), 'llama_index.core.llms.OpenAI', 'LlamaIndexOpenAI', ([], {'api_key': 'api_key', 'additional_kwargs': 'additional_kwargs'}), '(api_key=api_key, additional_kwargs=additional_kwargs)\n', (3003, 3057), True, 'from llama_index.core.llms import OpenAI as LlamaIndexOpenAI\n'), ((3106, 3171), 'llama_index.core.llms.OpenAI', 'LlamaIndexOpenAI', ([], {'model': 'model', 'api_base': 'api_base', 'api_key': 'api_key'}), '(model=model, api_base=api_base, api_key=api_key)\n', (3122, 3171), True, 'from llama_index.core.llms import OpenAI as LlamaIndexOpenAI\n'), ((5783, 5823), 'os.environ.get', 'os.environ.get', (['"""AIMO_STANDARD_URL_BASE"""'], {}), "('AIMO_STANDARD_URL_BASE')\n", (5797, 5823), False, 'import os\n')] |
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import os
import graphsignal
import logging
import time
import random
load_dotenv()
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
graphsignal.configure(api_key=os.getenv('GRAPHSIGNAL_API_KEY'), deployment='DevSecOpsKB')
# set context window
context_window = 4096
# set number of output tokens
num_output = 512
#LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_output))
#constructs service_context
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, context_window=context_window, num_output=num_output)
#set the global service context object
from llama_index import set_global_service_context
set_global_service_context(service_context)
#loads data from the specified directory path
documents = SimpleDirectoryReader("./data").load_data()
#when first building the index
index = GPTVectorStoreIndex.from_documents(documents)
def data_querying(input_text):
#queries the index with the input text
response = index.as_query_engine().query(input_text)
return response.response
# predefine a list of 10 questions
questions = [
'what does Trivy image scan do?',
'What are the main benefits of using Harden Runner?',
'What is the 3-2-1 rule in DevOps self-service model?',
'What is Infracost? and what does it do?',
'What is the terraform command to auto generate README?',
'How to pin Terraform module source to a particular branch?',
'What are the benefits of reusable Terraform modules?',
'How do I resolve error "npm ERR! code E400"?',
'How to fix error "NoCredentialProviders: no valid providers in chain"?',
'How to fix error "Credentials could not be loaded, please check your action inputs: Could not load credentials from any providers"?'
]
start_time = time.time()
while time.time() - start_time < 1800: # let it run for 30 minutes (1800 seconds)
try:
num = random.randint(0, len(questions) - 1)
print("Question: ", questions[num])
answer = data_querying(questions[num])
print("Answer: ", answer)
except:
logger.error("Error during data query", exc_info=True)
time.sleep(5 * random.random())
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.set_global_service_context",
"llama_index.SimpleDirectoryReader"
] | [((244, 257), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (255, 257), False, 'from dotenv import load_dotenv\n'), ((259, 280), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (278, 280), False, 'import logging\n'), ((290, 309), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (307, 309), False, 'import logging\n'), ((790, 906), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'context_window': 'context_window', 'num_output': 'num_output'}), '(llm_predictor=llm_predictor, context_window=\n context_window, num_output=num_output)\n', (818, 906), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((993, 1036), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1019, 1036), False, 'from llama_index import set_global_service_context\n'), ((1180, 1225), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1214, 1225), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2128, 2139), 'time.time', 'time.time', ([], {}), '()\n', (2137, 2139), False, 'import time\n'), ((372, 404), 'os.getenv', 'os.getenv', (['"""GRAPHSIGNAL_API_KEY"""'], {}), "('GRAPHSIGNAL_API_KEY')\n", (381, 404), False, 'import os\n'), ((663, 741), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'num_output'}), "(temperature=0.5, model_name='gpt-3.5-turbo', max_tokens=num_output)\n", (673, 741), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1096, 1127), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data"""'], {}), "('./data')\n", (1117, 1127), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2147, 2158), 'time.time', 'time.time', ([], {}), '()\n', (2156, 2158), False, 'import time\n'), ((2505, 2520), 'random.random', 'random.random', ([], {}), '()\n', (2518, 2520), False, 'import random\n')] |
# Ref https://github.com/amrrs/QABot-LangChain/blob/main/Q%26A_Bot_with_Llama_Index_and_LangChain.ipynb
#from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex,GPTSimpleVectorIndex, PromptHelper
from llama_index import LLMPredictor, ServiceContext
import sys
import os
def construct_index(directory_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 256
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-002", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
index_obj = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
index_obj.save_to_disk('model/index.json')
return index_obj
def ask_bot(input_index='model/index.json'):
index_obj = GPTSimpleVectorIndex.load_from_disk(input_index)
while True:
query = input('What do you want to ask the bot? \n')
if query == "nothing":
return
response = index_obj.query(query, response_mode="compact")
print("\nBot says: \n\n" + response.response + "\n\n\n")
index = construct_index("data/")
ask_bot('model/index.json')
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((716, 815), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (728, 815), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, GPTSimpleVectorIndex, PromptHelper\n'), ((1035, 1092), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (1063, 1092), False, 'from llama_index import LLMPredictor, ServiceContext\n'), ((1109, 1188), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1144, 1188), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, GPTSimpleVectorIndex, PromptHelper\n'), ((1322, 1370), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['input_index'], {}), '(input_index)\n', (1357, 1370), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, GPTSimpleVectorIndex, PromptHelper\n'), ((867, 943), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-002"""', 'max_tokens': 'num_outputs'}), "(temperature=0, model_name='text-davinci-002', max_tokens=num_outputs)\n", (873, 943), False, 'from langchain import OpenAI\n'), ((962, 999), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {}), '(directory_path)\n', (983, 999), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, GPTSimpleVectorIndex, PromptHelper\n')] |
# chroma.py
import streamlit as st
import os
import re
from pathlib import Path
import chromadb
from chromadb.config import Settings
from llama_index import GPTVectorStoreIndex, load_index_from_storage
from llama_index.vector_stores import ChromaVectorStore
from utils.model_settings import sentenceTransformers, get_service_context, get_embed_model
import logging
from utils.qa_template import QA_PROMPT
from llama_index.storage.storage_context import StorageContext
def get_collection_index_path(collection):
return (f'./data/{collection}-index.json')
# INDEX_PATH = './data/chroma_index.json'
PERSIST_DIRECTORY = './data/chromadb'
service_context = get_service_context()
@st.cache_resource
def create_chroma_client():
return chromadb.Client(Settings(chroma_db_impl="chromadb.db.duckdb.PersistentDuckDB",persist_directory=PERSIST_DIRECTORY, anonymized_telemetry=False))
def get_chroma_collection(collection_name):
client = create_chroma_client()
try:
return client.get_collection(collection_name)
except Exception as e:
logging.error(f"Failed to get collection '{collection_name}': {e}")
return None
@st.cache_resource
def load_chroma_index(collection):
# collection_index_path = get_collection_index_path(collection)
_chroma_collection = get_chroma_collection(collection)
vector_store = ChromaVectorStore(chroma_collection=_chroma_collection)
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIRECTORY, vector_store=vector_store)
if Path(PERSIST_DIRECTORY).exists():
index = load_index_from_storage(storage_context, service_context=service_context)
logging.info('Index loaded for collection ' + collection )
else:
index = None
return index
# def build_chroma_index(documents, collection, reindex=False, chunk_size_limit=512, model_name='sentence-transformers/all-MiniLM-L6-v2'):
# collection_index_path = get_collection_index_path(collection)
# chroma_client = create_chroma_client()
# if reindex is True:
# chroma_client.delete_collection(collection)
# os.remove(get_collection_index_path(collection))
# _chroma_collection = chroma_client.get_or_create_collection(collection)
# index = None
# index = GPTChromaIndex.from_documents(documents, chroma_collection=_chroma_collection,
# service_context=get_service_context(embed_model=get_embed_model(model_name), chunk_size_limit=chunk_size_limit)
# )
# index.save_to_disk(collection_index_path)
# chroma_client.persist()
def create_or_refresh_chroma_index(documents, collection, reindex=False, chunk_size_limit=512, model_name='sentence-transformers/all-MiniLM-L6-v2'):
collection_index_path = get_collection_index_path(collection)
chroma_client = create_chroma_client()
if reindex is True:
logging.info(chroma_client.list_collections())
if collection in chroma_client.list_collections():
chroma_client.delete_collection(collection)
_chroma_collection = chroma_client.get_or_create_collection(collection)
vector_store = ChromaVectorStore(chroma_collection=_chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = None
index = GPTVectorStoreIndex.from_documents(documents, storage_context=storage_context,
service_context=get_service_context(embed_model=get_embed_model(model_name=model_name), chunk_size_limit=chunk_size_limit)
)
index.storage_context.persist(persist_dir=PERSIST_DIRECTORY)
chroma_client.persist()
else:
refresh_chroma_index(documents, collection)
def refresh_chroma_index(documents, collection):
index = load_chroma_index(collection)
logging.info('refreshing collection ' + collection)
refreshed_docs = index.refresh(documents)
chroma_client = create_chroma_client()
chroma_client.persist()
return refreshed_docs
def query_index(query_str, collection, similarity_top_k=5, response_mode='compact', streaming=False, model_name=sentenceTransformers.OPTION1.value):
index = None
_chroma_collection = get_chroma_collection(collection)
index = load_chroma_index(collection)
query_engine = index.as_query_engine(chroma_collection=_chroma_collection,
mode="embedding",
similarity_top_k=similarity_top_k,
response_mode=response_mode, # default, compact, tree_summarize, no_text
service_context=get_service_context(embed_model=get_embed_model(model_name=model_name)),
text_qa_template=QA_PROMPT,
verbose= True,
use_async= True,
streaming= streaming
)
return query_engine.query(query_str)
def persist_chroma_index():
chroma_client = create_chroma_client()
chroma_client.persist()
def generate_chroma_compliant_name(name: str) -> str:
# Replace non-alphanumeric characters with underscores
new_name = re.sub(r"[^a-zA-Z0-9_\-\.]", "_", name)
# Replace consecutive periods with a single underscore
new_name = re.sub(r"\.{2,}", "_", new_name)
# Ensure the name starts and ends with an alphanumeric character
if not new_name[0].isalnum():
new_name = "a" + new_name[1:]
if not new_name[-1].isalnum():
new_name = new_name[:-1] + "a"
# Truncate or pad the name to be between 3 and 63 characters
new_name = new_name[:63]
while len(new_name) < 3:
new_name += "0"
return new_name
| [
"llama_index.load_index_from_storage",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore"
] | [((665, 686), 'utils.model_settings.get_service_context', 'get_service_context', ([], {}), '()\n', (684, 686), False, 'from utils.model_settings import sentenceTransformers, get_service_context, get_embed_model\n'), ((1363, 1418), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': '_chroma_collection'}), '(chroma_collection=_chroma_collection)\n', (1380, 1418), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((1441, 1532), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIRECTORY', 'vector_store': 'vector_store'}), '(persist_dir=PERSIST_DIRECTORY, vector_store=\n vector_store)\n', (1469, 1532), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((3852, 3903), 'logging.info', 'logging.info', (["('refreshing collection ' + collection)"], {}), "('refreshing collection ' + collection)\n", (3864, 3903), False, 'import logging\n'), ((5186, 5226), 're.sub', 're.sub', (['"""[^a-zA-Z0-9_\\\\-\\\\.]"""', '"""_"""', 'name'], {}), "('[^a-zA-Z0-9_\\\\-\\\\.]', '_', name)\n", (5192, 5226), False, 'import re\n'), ((5300, 5332), 're.sub', 're.sub', (['"""\\\\.{2,}"""', '"""_"""', 'new_name'], {}), "('\\\\.{2,}', '_', new_name)\n", (5306, 5332), False, 'import re\n'), ((762, 893), 'chromadb.config.Settings', 'Settings', ([], {'chroma_db_impl': '"""chromadb.db.duckdb.PersistentDuckDB"""', 'persist_directory': 'PERSIST_DIRECTORY', 'anonymized_telemetry': '(False)'}), "(chroma_db_impl='chromadb.db.duckdb.PersistentDuckDB',\n persist_directory=PERSIST_DIRECTORY, anonymized_telemetry=False)\n", (770, 893), False, 'from chromadb.config import Settings\n'), ((1585, 1658), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1608, 1658), False, 'from llama_index import GPTVectorStoreIndex, load_index_from_storage\n'), ((1667, 1724), 'logging.info', 'logging.info', (["('Index loaded for collection ' + collection)"], {}), "('Index loaded for collection ' + collection)\n", (1679, 1724), False, 'import logging\n'), ((3159, 3214), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': '_chroma_collection'}), '(chroma_collection=_chroma_collection)\n', (3176, 3214), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((3241, 3296), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (3269, 3296), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1069, 1136), 'logging.error', 'logging.error', (['f"""Failed to get collection \'{collection_name}\': {e}"""'], {}), '(f"Failed to get collection \'{collection_name}\': {e}")\n', (1082, 1136), False, 'import logging\n'), ((1535, 1558), 'pathlib.Path', 'Path', (['PERSIST_DIRECTORY'], {}), '(PERSIST_DIRECTORY)\n', (1539, 1558), False, 'from pathlib import Path\n'), ((4673, 4711), 'utils.model_settings.get_embed_model', 'get_embed_model', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (4688, 4711), False, 'from utils.model_settings import sentenceTransformers, get_service_context, get_embed_model\n'), ((3486, 3524), 'utils.model_settings.get_embed_model', 'get_embed_model', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (3501, 3524), False, 'from utils.model_settings import sentenceTransformers, get_service_context, get_embed_model\n')] |
# /app/src/tools/doc_search.py
import logging
# Primary Components
from llama_index import ServiceContext, VectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from src.utils.config import load_config, setup_environment_variables
from src.utils.embedding_selector import EmbeddingConfig, EmbeddingSelector
logger = logging.getLogger(__name__)
class DocumentSearch:
"""
Class to perform document searches using a vector store index.
Attributes:
- collection (str): Name of the collection to be queried.
- query (str): User input query for searching documents.
- CONFIG (dict): Loaded configuration settings.
- client (QdrantClient): Client to interact with the Qdrant service.
"""
def __init__(self, query: str, collection: str):
"""
Initializes with collection name and user input.
Parameters:
- collection (str): Name of the collection to be queried.
- query (str): User input query for searching documents.
"""
self.collection = collection
self.query = query
self.CONFIG = load_config()
setup_environment_variables(self.CONFIG)
self.client = QdrantClient(url="http://RAG_BOT_QDRANT:6333")
# self.embed_model = OpenAIEmbedding()
# self.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/multi-qa-mpnet-base-dot-v1")
self.embedding_config = EmbeddingConfig(type=self.CONFIG["Embedding_Type"])
self.embed_model = EmbeddingSelector(self.embedding_config).get_embedding_model()
def setup_index(self) -> VectorStoreIndex:
"""
Sets up and returns the vector store index for the collection.
Returns:
- VectorStoreIndex: The set up vector store index.
Raises:
- Exception: Propagates any exceptions that occur during the index setup.
"""
try:
vector_store = QdrantVectorStore(client=self.client, collection_name=self.collection)
service_context = ServiceContext.from_defaults(embed_model=self.embed_model)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
return index
except Exception as e:
logging.error(f"setup_index: Error - {str(e)}")
raise e
def search_documents(self):
"""
Searches and returns documents based on the user input query.
Returns:
- Any: The response received from querying the index.
Raises:
- Exception: Propagates any exceptions that occur during the document search.
"""
try:
query_engine = (self.setup_index()).as_query_engine()
response = query_engine.query(self.query)
logging.info(f"search_documents: Response - {response}")
return response
except Exception as e:
logging.error(f"search_documents: Error - {str(e)}")
raise e
| [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((384, 411), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (401, 411), False, 'import logging\n'), ((1157, 1170), 'src.utils.config.load_config', 'load_config', ([], {}), '()\n', (1168, 1170), False, 'from src.utils.config import load_config, setup_environment_variables\n'), ((1179, 1219), 'src.utils.config.setup_environment_variables', 'setup_environment_variables', (['self.CONFIG'], {}), '(self.CONFIG)\n', (1206, 1219), False, 'from src.utils.config import load_config, setup_environment_variables\n'), ((1242, 1288), 'qdrant_client.QdrantClient', 'QdrantClient', ([], {'url': '"""http://RAG_BOT_QDRANT:6333"""'}), "(url='http://RAG_BOT_QDRANT:6333')\n", (1254, 1288), False, 'from qdrant_client import QdrantClient\n'), ((1481, 1532), 'src.utils.embedding_selector.EmbeddingConfig', 'EmbeddingConfig', ([], {'type': "self.CONFIG['Embedding_Type']"}), "(type=self.CONFIG['Embedding_Type'])\n", (1496, 1532), False, 'from src.utils.embedding_selector import EmbeddingConfig, EmbeddingSelector\n'), ((1982, 2052), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'self.client', 'collection_name': 'self.collection'}), '(client=self.client, collection_name=self.collection)\n', (1999, 2052), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((2083, 2141), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'self.embed_model'}), '(embed_model=self.embed_model)\n', (2111, 2141), False, 'from llama_index import ServiceContext, VectorStoreIndex\n'), ((2162, 2260), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (2196, 2260), False, 'from llama_index import ServiceContext, VectorStoreIndex\n'), ((2850, 2906), 'logging.info', 'logging.info', (['f"""search_documents: Response - {response}"""'], {}), "(f'search_documents: Response - {response}')\n", (2862, 2906), False, 'import logging\n'), ((1560, 1600), 'src.utils.embedding_selector.EmbeddingSelector', 'EmbeddingSelector', (['self.embedding_config'], {}), '(self.embedding_config)\n', (1577, 1600), False, 'from src.utils.embedding_selector import EmbeddingConfig, EmbeddingSelector\n')] |
import logging
import traceback
from typing import Sequence, List, Optional, Dict
from llama_index import Document
from llama_index.callbacks import CBEventType, CallbackManager
from llama_index.callbacks.schema import EventPayload
from llama_index.node_parser import NodeParser, SimpleNodeParser
from llama_index.node_parser.extractors import MetadataExtractor
from llama_index.schema import BaseNode, MetadataMode, TextNode, NodeRelationship
from llama_index.text_splitter import TokenTextSplitter, SplitterType, get_default_text_splitter
from llama_index.utils import get_tqdm_iterable
from pydantic import Field
from ghostcoder.codeblocks import create_parser, CodeBlock, CodeBlockType
from ghostcoder.utils import count_tokens
class CodeNodeParser(NodeParser):
"""Route to the right node parser depending on language set in document metadata"""
text_splitter: SplitterType = Field(
description="The text splitter to use when splitting documents."
)
include_metadata: bool = Field(
default=True, description="Whether or not to consider metadata when splitting."
)
include_prev_next_rel: bool = Field(
default=True, description="Include prev/next node relationships."
)
metadata_extractor: Optional[MetadataExtractor] = Field(
default=None, description="Metadata extraction pipeline to apply to nodes."
)
callback_manager: CallbackManager = Field(
default_factory=CallbackManager, exclude=True
)
@classmethod
def from_defaults(
cls,
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
text_splitter: Optional[SplitterType] = None,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
metadata_extractor: Optional[MetadataExtractor] = None,
) -> "CodeNodeParser":
callback_manager = callback_manager or CallbackManager([])
text_splitter = text_splitter or get_default_text_splitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
return cls(
text_splitter=text_splitter,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
metadata_extractor=metadata_extractor,
_node_parser_map={}
)
@classmethod
def class_name(cls):
return "CodeNodeParser"
def get_nodes_from_documents(
self,
documents: Sequence[Document],
show_progress: bool = False,
) -> List[BaseNode]:
with self.callback_manager.event(
CBEventType.NODE_PARSING, payload={EventPayload.DOCUMENTS: documents}
) as event:
documents_with_progress = get_tqdm_iterable(
documents, show_progress, "Parsing documents into nodes"
)
all_nodes: List[BaseNode] = []
for document in documents_with_progress:
language = document.metadata.get("language", None)
if language:
try:
parser = create_parser(language)
except Exception as e:
logging.warning(f"Could not get parser for language {language}. Will not parse document {document.id_}")
continue
content = document.get_content(metadata_mode=MetadataMode.NONE)
if not content:
logging.warning(f"Could not get content for document {document.id_}")
continue
codeblock = parser.parse(content)
logging.debug(codeblock.to_tree(include_tree_sitter_type=False,
show_tokens=True,
include_types=[CodeBlockType.FUNCTION, CodeBlockType.CLASS]))
splitted_blocks = codeblock.split_blocks()
for splitted_block in splitted_blocks:
definitions, parent = self.get_parent_and_definitions(splitted_block)
node_metadata = document.metadata
node_metadata["definition"] = splitted_block.content
node_metadata["block_type"] = str(splitted_block.type)
if splitted_block.identifier:
node_metadata["identifier"] = splitted_block.identifier
else:
node_metadata["identifier"] = splitted_block.content[:80].replace("\n", "\\n")
node_metadata["start_line"] = splitted_block.start_line
tokens = count_tokens(parent.to_string())
if tokens > 4000:
logging.info(f"Skip node [{node_metadata['identifier']}] in {document.id_} with {tokens} tokens")
continue
if tokens > 1000:
logging.info(f"Big node [{node_metadata['identifier']}] in {document.id_} with {tokens} tokens")
# TODO: Add relationships between code blocks
node = TextNode(
text=parent.to_string(),
embedding=document.embedding,
metadata=node_metadata,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_seperator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships={NodeRelationship.SOURCE: document.as_related_node_info()},
)
all_nodes.append(node)
event.on_end(payload={EventPayload.NODES: all_nodes})
return all_nodes
def get_parent_and_definitions(self, codeblock: CodeBlock) -> (List[str], CodeBlock):
definitions = [codeblock.content]
if codeblock.parent:
parent_defs, parent = self.get_parent_and_definitions(codeblock.parent)
definitions.extend(parent_defs)
return definitions, parent
else:
return definitions, codeblock
| [
"llama_index.utils.get_tqdm_iterable",
"llama_index.callbacks.CallbackManager",
"llama_index.text_splitter.get_default_text_splitter"
] | [((893, 964), 'pydantic.Field', 'Field', ([], {'description': '"""The text splitter to use when splitting documents."""'}), "(description='The text splitter to use when splitting documents.')\n", (898, 964), False, 'from pydantic import Field\n'), ((1008, 1099), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Whether or not to consider metadata when splitting."""'}), "(default=True, description=\n 'Whether or not to consider metadata when splitting.')\n", (1013, 1099), False, 'from pydantic import Field\n'), ((1143, 1215), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Include prev/next node relationships."""'}), "(default=True, description='Include prev/next node relationships.')\n", (1148, 1215), False, 'from pydantic import Field\n'), ((1284, 1371), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Metadata extraction pipeline to apply to nodes."""'}), "(default=None, description=\n 'Metadata extraction pipeline to apply to nodes.')\n", (1289, 1371), False, 'from pydantic import Field\n'), ((1421, 1473), 'pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (1426, 1473), False, 'from pydantic import Field\n'), ((1964, 1983), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1979, 1983), False, 'from llama_index.callbacks import CBEventType, CallbackManager\n'), ((2026, 2143), 'llama_index.text_splitter.get_default_text_splitter', 'get_default_text_splitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (2051, 2143), False, 'from llama_index.text_splitter import TokenTextSplitter, SplitterType, get_default_text_splitter\n'), ((2903, 2978), 'llama_index.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['documents', 'show_progress', '"""Parsing documents into nodes"""'], {}), "(documents, show_progress, 'Parsing documents into nodes')\n", (2920, 2978), False, 'from llama_index.utils import get_tqdm_iterable\n'), ((3260, 3283), 'ghostcoder.codeblocks.create_parser', 'create_parser', (['language'], {}), '(language)\n', (3273, 3283), False, 'from ghostcoder.codeblocks import create_parser, CodeBlock, CodeBlockType\n'), ((3634, 3703), 'logging.warning', 'logging.warning', (['f"""Could not get content for document {document.id_}"""'], {}), "(f'Could not get content for document {document.id_}')\n", (3649, 3703), False, 'import logging\n'), ((3351, 3465), 'logging.warning', 'logging.warning', (['f"""Could not get parser for language {language}. Will not parse document {document.id_}"""'], {}), "(\n f'Could not get parser for language {language}. Will not parse document {document.id_}'\n )\n", (3366, 3465), False, 'import logging\n'), ((4987, 5094), 'logging.info', 'logging.info', (['f"""Skip node [{node_metadata[\'identifier\']}] in {document.id_} with {tokens} tokens"""'], {}), '(\n f"Skip node [{node_metadata[\'identifier\']}] in {document.id_} with {tokens} tokens"\n )\n', (4999, 5094), False, 'import logging\n'), ((5193, 5299), 'logging.info', 'logging.info', (['f"""Big node [{node_metadata[\'identifier\']}] in {document.id_} with {tokens} tokens"""'], {}), '(\n f"Big node [{node_metadata[\'identifier\']}] in {document.id_} with {tokens} tokens"\n )\n', (5205, 5299), False, 'import logging\n')] |
# RAG/TAG Tiger - llm.py
# Copyright (c) 2024 Stuart Riffle
# github.com/stuartriffle/ragtag-tiger
import os
import torch
from .files import *
from .lograg import lograg, lograg_verbose, lograg_error
from .timer import TimerUntil
openai_model_default = "gpt-3.5-turbo-instruct"
google_model_default = "models/text-bison-001"
anthropic_model_default = "claude-2"
mistral_default = "mistral-small"
perplexity_default = "llama-2-70b-chat"
replicate_default = "mistralai/mixtral-8x7b-instruct-v0.1"
fireworks_ai_default = "accounts/fireworks/models/mixtral-8x7b-instruct"
together_ai_default = "codellama/CodeLlama-70b-Instruct-hf"
default_timeout = 180
default_temperature = 0.1
default_max_tokens = 500
default_llm_provider = "huggingface"
hf_model_nicknames = { "default": "codellama/CodeLlama-7b-Instruct-hf" }
def load_llm(provider, model, server, api_key, params, global_params, verbose=False, set_service_context=True, torch_device=None):
result = None
streaming_supported = True
try:
with TimerUntil("ready"):
all_params = global_params.copy()
model_params = dict([param.split("=") for param in params]) if params else {}
for k, v in model_params.items():
all_params[k] = v
model_kwargs = {}
for k, v in all_params.items():
model_kwargs[k] = float(v) if v.replace(".", "", 1).isdigit() else v
temperature = float(model_kwargs.get("temperature", default_temperature))
max_tokens = int(model_kwargs.get("max_tokens", default_max_tokens))
### OpenAI
if provider == "openai" and not server:
model_name = model or openai_model_default
api_key = api_key or os.environ.get("OPENAI_API_KEY", "")
lograg(f"OpenAI model \"{model_name}\"...")
from llama_index.llms import OpenAI
result = OpenAI(
model=model_name,
timeout=default_timeout,
api_key=api_key,
additional_kwargs=model_kwargs,
temperature=temperature,
max_tokens=max_tokens,
verbose=verbose)
### OpenAI API-compatible third party server
elif provider == "openai" and server:
# Auto-populate API key and model for known providers
if "together.ai" in server or "together.xyz" in server:
api_key = api_key or os.environ.get("TOGETHERAI_API_KEY", "")
model = model or together_ai_default
if "fireworks.ai" in server:
api_key = api_key or os.environ.get("FIREWORKS_API_KEY", "")
model = model or fireworks_ai_default
api_key = api_key or os.environ.get("OPENAI_API_KEY", "")
model_name = model or "default"
lograg(f"Model \"{model_name}\" on \"{server}\"...")
from llama_index.llms import OpenAILike
result = OpenAILike(
api_key=api_key,
model=model_name,
additional_kwargs=model_kwargs,
api_base=server,
max_iterations=100,
timeout=default_timeout,
max_tokens=max_tokens,
temperature=temperature,
verbose=verbose)
### Google
elif provider == "google":
gemini_api_key = os.environ.get("GEMINI_API_KEY", "")
google_api_key = os.environ.get("GOOGLE_API_KEY", "")
model_name = model or google_model_default
import google.generativeai as genai
genai.configure(api_key=google_api_key)
if "gemini" in str(model_name).lower():
lograg(f"Google Gemini model \"{model_name}\"...")
from llama_index.llms import Gemini
result = Gemini(
api_key=api_key or gemini_api_key,
model_name=model_name,
max_tokens=max_tokens,
temperature=temperature,
model_kwargs=model_kwargs)
else:
lograg(f"Google PaLM model \"{model_name}\"...")
from llama_index.llms import PaLM
result = PaLM(
api_key=api_key or google_api_key,
model_name=model_name,
generate_kwargs=model_kwargs)
streaming_supported = False
### Llama.cpp
elif provider == "llamacpp":
if torch.cuda.is_available():
# FIXME - this does nothing? Always on CPU
model_kwargs["n_gpu_layers"] = -1
lograg(f"llama.cpp model \"{cleanpath(model)}\"...")
from llama_index.llms import LlamaCPP
result = LlamaCPP(
model_path=model,
model_kwargs=model_kwargs,
max_new_tokens=max_tokens,
temperature=temperature,
verbose=verbose)
### Mistral
elif provider == "mistral":
api_key = api_key or os.environ.get("MISTRAL_API_KEY", None)
model_name = model or mistral_default
lograg(f"Mistral model \"{model_name}\"...")
from llama_index.llms import MistralAI
result = MistralAI(
api_key=api_key,
model=model_name,
max_tokens=max_tokens,
temperature=temperature,
additional_kwargs=model_kwargs)
### Perplexity
elif provider == "perplexity":
api_key = api_key or os.environ.get("PERPLEXITYAI_API_KEY", "")
model_name = model or perplexity_default
lograg(f"Perplexity model \"{model_name}\"...")
from llama_index.llms import Perplexity
result = Perplexity(
api_key=api_key,
model=model_name,
max_tokens=max_tokens,
temperature=temperature,
model_kwargs=model_kwargs)
### Replicate
elif provider == "replicate":
api_key = api_key or os.environ.get("REPLICATE_API_TOKEN", "")
model_name = model or replicate_default
lograg(f"Replicate model \"{model_name}\"...")
from llama_index.llms import Replicate
result = Replicate(
model=model_name,
temperature=temperature,
additional_kwargs=model_kwargs)
### HuggingFace
else:
os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
model_desc = ""
model_name = model or "default"
if model_name in hf_model_nicknames:
model_desc = f" (\"{model_name}\")"
model_name = hf_model_nicknames[model_name]
lograg(f"HuggingFace model \"{model_name}\"{model_desc}...")
from llama_index.llms import HuggingFaceLLM
result = HuggingFaceLLM(
model_name=model_name,
model_kwargs=model_kwargs,
max_new_tokens=max_tokens,
device_map=torch_device or "auto")
#system_prompt=system_prompt)
from llama_index import ServiceContext, set_global_service_context
service_context = ServiceContext.from_defaults(
embed_model='local',
llm=result)
if set_service_context:
set_global_service_context(service_context)
except Exception as e:
lograg_error(f"failure initializing LLM: {e}", exit_code=1)
return result, streaming_supported, service_context
def split_llm_config(config):
"""Split an LLM from a config string of format "[alias=]provider[,model[,server[,api-key[,parameters...]]]]" into its components"""
fields = config.strip("\"' ").split(",")
provider = fields[0].strip() if len(fields) > 0 else default_llm_provider
model = fields[1].strip() if len(fields) > 1 else None
server = fields[2].strip() if len(fields) > 2 else None
api_key = fields[3].strip() if len(fields) > 3 else None
params = fields[4:] if len(fields) > 4 else []
alias = None
if "=" in provider:
alias, provider = provider.split("=", 1)
provider = provider.strip()
return provider, model, server, api_key, params, alias
def load_llm_config(config, global_params, set_service_context=True):
"""Load an LLM from a config string like "provider,model,server,api-key,param1,param2,..."""
provider, model, server, api_key, params, _ = split_llm_config(config)
return load_llm(provider.lower(), model, server, api_key, params, global_params, set_service_context)
| [
"llama_index.llms.Gemini",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.PaLM",
"llama_index.llms.OpenAI",
"llama_index.llms.LlamaCPP",
"llama_index.llms.Replicate",
"llama_index.llms.HuggingFaceLLM",
"llama_index.llms.MistralAI",
"llama_index.set_global_service_context",
"llama_index.llms.Perplexity",
"llama_index.llms.OpenAILike"
] | [((8029, 8090), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': '"""local"""', 'llm': 'result'}), "(embed_model='local', llm=result)\n", (8057, 8090), False, 'from llama_index import ServiceContext, set_global_service_context\n'), ((1986, 2158), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'model_name', 'timeout': 'default_timeout', 'api_key': 'api_key', 'additional_kwargs': 'model_kwargs', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'verbose': 'verbose'}), '(model=model_name, timeout=default_timeout, api_key=api_key,\n additional_kwargs=model_kwargs, temperature=temperature, max_tokens=\n max_tokens, verbose=verbose)\n', (1992, 2158), False, 'from llama_index.llms import OpenAI\n'), ((8177, 8220), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (8203, 8220), False, 'from llama_index import ServiceContext, set_global_service_context\n'), ((1811, 1847), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (1825, 1847), False, 'import os\n'), ((3186, 3404), 'llama_index.llms.OpenAILike', 'OpenAILike', ([], {'api_key': 'api_key', 'model': 'model_name', 'additional_kwargs': 'model_kwargs', 'api_base': 'server', 'max_iterations': '(100)', 'timeout': 'default_timeout', 'max_tokens': 'max_tokens', 'temperature': 'temperature', 'verbose': 'verbose'}), '(api_key=api_key, model=model_name, additional_kwargs=\n model_kwargs, api_base=server, max_iterations=100, timeout=\n default_timeout, max_tokens=max_tokens, temperature=temperature,\n verbose=verbose)\n', (3196, 3404), False, 'from llama_index.llms import OpenAILike\n'), ((2934, 2970), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (2948, 2970), False, 'import os\n'), ((3684, 3720), 'os.environ.get', 'os.environ.get', (['"""GEMINI_API_KEY"""', '""""""'], {}), "('GEMINI_API_KEY', '')\n", (3698, 3720), False, 'import os\n'), ((3754, 3790), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_API_KEY"""', '""""""'], {}), "('GOOGLE_API_KEY', '')\n", (3768, 3790), False, 'import os\n'), ((3918, 3957), 'google.generativeai.configure', 'genai.configure', ([], {'api_key': 'google_api_key'}), '(api_key=google_api_key)\n', (3933, 3957), True, 'import google.generativeai as genai\n'), ((2614, 2654), 'os.environ.get', 'os.environ.get', (['"""TOGETHERAI_API_KEY"""', '""""""'], {}), "('TOGETHERAI_API_KEY', '')\n", (2628, 2654), False, 'import os\n'), ((2799, 2838), 'os.environ.get', 'os.environ.get', (['"""FIREWORKS_API_KEY"""', '""""""'], {}), "('FIREWORKS_API_KEY', '')\n", (2813, 2838), False, 'import os\n'), ((4171, 4315), 'llama_index.llms.Gemini', 'Gemini', ([], {'api_key': '(api_key or gemini_api_key)', 'model_name': 'model_name', 'max_tokens': 'max_tokens', 'temperature': 'temperature', 'model_kwargs': 'model_kwargs'}), '(api_key=api_key or gemini_api_key, model_name=model_name, max_tokens\n =max_tokens, temperature=temperature, model_kwargs=model_kwargs)\n', (4177, 4315), False, 'from llama_index.llms import Gemini\n'), ((4606, 4702), 'llama_index.llms.PaLM', 'PaLM', ([], {'api_key': '(api_key or google_api_key)', 'model_name': 'model_name', 'generate_kwargs': 'model_kwargs'}), '(api_key=api_key or google_api_key, model_name=model_name,\n generate_kwargs=model_kwargs)\n', (4610, 4702), False, 'from llama_index.llms import PaLM\n'), ((4923, 4948), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4946, 4948), False, 'import torch\n'), ((5215, 5342), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'model', 'model_kwargs': 'model_kwargs', 'max_new_tokens': 'max_tokens', 'temperature': 'temperature', 'verbose': 'verbose'}), '(model_path=model, model_kwargs=model_kwargs, max_new_tokens=\n max_tokens, temperature=temperature, verbose=verbose)\n', (5223, 5342), False, 'from llama_index.llms import LlamaCPP\n'), ((5792, 5920), 'llama_index.llms.MistralAI', 'MistralAI', ([], {'api_key': 'api_key', 'model': 'model_name', 'max_tokens': 'max_tokens', 'temperature': 'temperature', 'additional_kwargs': 'model_kwargs'}), '(api_key=api_key, model=model_name, max_tokens=max_tokens,\n temperature=temperature, additional_kwargs=model_kwargs)\n', (5801, 5920), False, 'from llama_index.llms import MistralAI\n'), ((5557, 5596), 'os.environ.get', 'os.environ.get', (['"""MISTRAL_API_KEY"""', 'None'], {}), "('MISTRAL_API_KEY', None)\n", (5571, 5596), False, 'import os\n'), ((6387, 6511), 'llama_index.llms.Perplexity', 'Perplexity', ([], {'api_key': 'api_key', 'model': 'model_name', 'max_tokens': 'max_tokens', 'temperature': 'temperature', 'model_kwargs': 'model_kwargs'}), '(api_key=api_key, model=model_name, max_tokens=max_tokens,\n temperature=temperature, model_kwargs=model_kwargs)\n', (6397, 6511), False, 'from llama_index.llms import Perplexity\n'), ((6142, 6184), 'os.environ.get', 'os.environ.get', (['"""PERPLEXITYAI_API_KEY"""', '""""""'], {}), "('PERPLEXITYAI_API_KEY', '')\n", (6156, 6184), False, 'import os\n'), ((6972, 7061), 'llama_index.llms.Replicate', 'Replicate', ([], {'model': 'model_name', 'temperature': 'temperature', 'additional_kwargs': 'model_kwargs'}), '(model=model_name, temperature=temperature, additional_kwargs=\n model_kwargs)\n', (6981, 7061), False, 'from llama_index.llms import Replicate\n'), ((7661, 7791), 'llama_index.llms.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'model_name': 'model_name', 'model_kwargs': 'model_kwargs', 'max_new_tokens': 'max_tokens', 'device_map': "(torch_device or 'auto')"}), "(model_name=model_name, model_kwargs=model_kwargs,\n max_new_tokens=max_tokens, device_map=torch_device or 'auto')\n", (7675, 7791), False, 'from llama_index.llms import HuggingFaceLLM\n'), ((6731, 6772), 'os.environ.get', 'os.environ.get', (['"""REPLICATE_API_TOKEN"""', '""""""'], {}), "('REPLICATE_API_TOKEN', '')\n", (6745, 6772), False, 'import os\n')] |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode
from opentelemetry.trace.span import Span
from opentelemetry.context import Context, get_current, attach, detach
from typing import Any, Dict, List, Optional, Callable
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.base import CallbackManager
from llama_index.callbacks.schema import CBEventType, EventPayload, BASE_TRACE_EVENT
from llama_index.callbacks.token_counting import get_llm_token_counts, TokenCountingEvent
from llama_index.utilities.token_counting import TokenCounter
from llama_index.utils import get_tokenizer
from dataclasses import dataclass
from contextvars import ContextVar
import threading
global_root_trace = ContextVar("trace", default=None)
@dataclass
class SpanWithContext:
"""Object for tracking a span, its context, and its context token"""
span: Span
context: Context
token: object
def __init__(self, span: Span, context: Context, token: object, thread_identity):
self.span = span
self.context = context
self.token = token
self.thread_identity = thread_identity
class OpenTelemetryCallbackHandler(BaseCallbackHandler):
"""Callback handler for creating OpenTelemetry traces from llamaindex traces and events."""
def __init__(
self,
tracer: Optional[Tracer] = get_tracer(__name__),
tokenizer: Optional[Callable[[str], List]] = None,
) -> None:
"""Initializes the OpenTelemetryCallbackHandler.
Args:
tracer: Optional[Tracer]: A OpenTelemetry tracer used to create OpenTelemetry spans
"""
super().__init__(event_starts_to_ignore=[], event_ends_to_ignore=[])
self._tracer = tracer
self._event_map: Dict[str, SpanWithContext] = {}
self.tokenizer = tokenizer or get_tokenizer()
self._token_counter = TokenCounter(tokenizer=self.tokenizer)
def start_trace(self, trace_id: Optional[str] = None) -> None:
trace_name = "llamaindex.trace"
if trace_id is not None:
trace_name = "llamaindex.trace." + trace_id
span = self._tracer.start_span(trace_name)
ctx = set_span_in_context(span)
token = attach(ctx)
global_root_trace.set(SpanWithContext(span=span, context=ctx, token=token, thread_identity=threading.get_ident()))
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
root_trace = global_root_trace.get()
if root_trace is not None:
if root_trace.thread_identity == threading.get_ident():
detach(root_trace.token)
root_trace.span.end()
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
parent_id: str = "",
**kwargs: Any,
) -> str:
parent_ctx = None
# Case where the parent of this event is another event
if parent_id in self._event_map:
parent_ctx = self._event_map[parent_id].context
# Case where the parent of this event is the root trace, and the root trace exists
elif parent_id is BASE_TRACE_EVENT and global_root_trace.get() is not None:
parent_ctx = global_root_trace.get().context
# Case where the parent of this event is the root trace, but the trace does not exist
else:
return
span_prefix = "llamaindex.event."
span = self._tracer.start_span(span_prefix + event_type.value, context=parent_ctx)
ctx = set_span_in_context(span)
token = attach(ctx)
self._event_map[event_id] = SpanWithContext(span=span, context=ctx, token=token, thread_identity=threading.get_ident())
span.set_attribute("event_id", event_id)
if payload is not None:
if event_type is CBEventType.QUERY:
span.set_attribute("query.text", payload[EventPayload.QUERY_STR])
elif event_type is CBEventType.RETRIEVE:
pass
elif event_type is CBEventType.EMBEDDING:
span.set_attribute("embedding.model", payload[EventPayload.SERIALIZED]['model_name'])
span.set_attribute("embedding.batch_size", payload[EventPayload.SERIALIZED]['embed_batch_size'])
span.set_attribute("embedding.class_name", payload[EventPayload.SERIALIZED]['class_name'])
elif event_type is CBEventType.SYNTHESIZE:
span.set_attribute("synthesize.query_text", payload[EventPayload.QUERY_STR])
elif event_type is CBEventType.CHUNKING:
for i, chunk in enumerate(payload[EventPayload.CHUNKS]):
span.set_attribute(f"chunk.{i}", chunk)
elif event_type is CBEventType.TEMPLATING:
if payload[EventPayload.QUERY_WRAPPER_PROMPT]:
span.set_attribute("query_wrapper_prompt", payload[EventPayload.QUERY_WRAPPER_PROMPT])
if payload[EventPayload.SYSTEM_PROMPT]:
span.set_attribute("system_prompt", payload[EventPayload.SYSTEM_PROMPT])
if payload[EventPayload.TEMPLATE]:
span.set_attribute("template", payload[EventPayload.TEMPLATE])
if payload[EventPayload.TEMPLATE_VARS]:
for key, var in payload[EventPayload.TEMPLATE_VARS].items():
span.set_attribute(f"template_variables.{key}", var)
elif event_type is CBEventType.LLM:
span.set_attribute("llm.class_name", payload[EventPayload.SERIALIZED]['class_name'])
span.set_attribute("llm.formatted_prompt", payload[EventPayload.PROMPT])
span.set_attribute("llm.additional_kwargs", str(payload[EventPayload.ADDITIONAL_KWARGS]))
elif event_type is CBEventType.NODE_PARSING:
span.set_attribute("node_parsing.num_documents", len(payload[EventPayload.DOCUMENTS]))
elif event_type is CBEventType.EXCEPTION:
span.set_status(Status(StatusCode.ERROR))
span.record_exception(payload[EventPayload.EXCEPTION])
return event_id
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> None:
if event_id in self._event_map:
span = self._event_map[event_id].span
span.set_attribute("event_id", event_id)
if payload is not None:
if event_type is CBEventType.QUERY:
pass
elif event_type is CBEventType.RETRIEVE:
for i, node_with_score in enumerate(payload[EventPayload.NODES]):
node = node_with_score.node
score = node_with_score.score
span.set_attribute(f"query.node.{i}.id", node.hash)
span.set_attribute(f"query.node.{i}.score", score)
span.set_attribute(f"query.node.{i}.text", node.text)
elif event_type is CBEventType.EMBEDDING:
texts = payload[EventPayload.CHUNKS]
vectors = payload[EventPayload.EMBEDDINGS]
total_chunk_tokens = 0
for text, vector in zip(texts, vectors) :
span.set_attribute(f"embedding_text_{texts.index(text)}", text)
span.set_attribute(f"embedding_vector_{vectors.index(vector)}", vector)
total_chunk_tokens +=self._token_counter.get_string_tokens(text)
span.set_attribute(f"embedding_token_usage", total_chunk_tokens)
elif event_type is CBEventType.SYNTHESIZE:
pass
elif event_type is CBEventType.CHUNKING:
pass
elif event_type is CBEventType.TEMPLATING:
pass
elif event_type is CBEventType.LLM:
span.set_attribute("response.text", str(
payload.get(EventPayload.RESPONSE, "")
) or str(payload.get(EventPayload.COMPLETION, ""))
)
token_counts = get_llm_token_counts(self._token_counter, payload, event_id)
span.set_attribute("llm_prompt.token_usage", token_counts.prompt_token_count)
span.set_attribute("llm_completion.token_usage", token_counts.completion_token_count)
span.set_attribute("total_tokens_used", token_counts.total_token_count)
elif event_type is CBEventType.NODE_PARSING:
span.set_attribute("node_parsing.num_nodes", len(payload[EventPayload.NODES]))
elif event_type is CBEventType.EXCEPTION:
span.set_status(Status(StatusCode.ERROR))
span.record_exception(payload[EventPayload.EXCEPTION])
if self._event_map[event_id].thread_identity == threading.get_ident():
detach(self._event_map[event_id].token)
self._event_map.pop(event_id, None)
span.end()
| [
"llama_index.utilities.token_counting.TokenCounter",
"llama_index.callbacks.token_counting.get_llm_token_counts",
"llama_index.utils.get_tokenizer"
] | [((1450, 1483), 'contextvars.ContextVar', 'ContextVar', (['"""trace"""'], {'default': 'None'}), "('trace', default=None)\n", (1460, 1483), False, 'from contextvars import ContextVar\n'), ((2085, 2105), 'opentelemetry.trace.get_tracer', 'get_tracer', (['__name__'], {}), '(__name__)\n', (2095, 2105), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n'), ((2609, 2647), 'llama_index.utilities.token_counting.TokenCounter', 'TokenCounter', ([], {'tokenizer': 'self.tokenizer'}), '(tokenizer=self.tokenizer)\n', (2621, 2647), False, 'from llama_index.utilities.token_counting import TokenCounter\n'), ((2918, 2943), 'opentelemetry.trace.set_span_in_context', 'set_span_in_context', (['span'], {}), '(span)\n', (2937, 2943), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n'), ((2960, 2971), 'opentelemetry.context.attach', 'attach', (['ctx'], {}), '(ctx)\n', (2966, 2971), False, 'from opentelemetry.context import Context, get_current, attach, detach\n'), ((4386, 4411), 'opentelemetry.trace.set_span_in_context', 'set_span_in_context', (['span'], {}), '(span)\n', (4405, 4411), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n'), ((4428, 4439), 'opentelemetry.context.attach', 'attach', (['ctx'], {}), '(ctx)\n', (4434, 4439), False, 'from opentelemetry.context import Context, get_current, attach, detach\n'), ((2563, 2578), 'llama_index.utils.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (2576, 2578), False, 'from llama_index.utils import get_tokenizer\n'), ((3367, 3388), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (3386, 3388), False, 'import threading\n'), ((3406, 3430), 'opentelemetry.context.detach', 'detach', (['root_trace.token'], {}), '(root_trace.token)\n', (3412, 3430), False, 'from opentelemetry.context import Context, get_current, attach, detach\n'), ((4545, 4566), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (4564, 4566), False, 'import threading\n'), ((9898, 9919), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (9917, 9919), False, 'import threading\n'), ((9937, 9976), 'opentelemetry.context.detach', 'detach', (['self._event_map[event_id].token'], {}), '(self._event_map[event_id].token)\n', (9943, 9976), False, 'from opentelemetry.context import Context, get_current, attach, detach\n'), ((3071, 3092), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (3090, 3092), False, 'import threading\n'), ((9123, 9183), 'llama_index.callbacks.token_counting.get_llm_token_counts', 'get_llm_token_counts', (['self._token_counter', 'payload', 'event_id'], {}), '(self._token_counter, payload, event_id)\n', (9143, 9183), False, 'from llama_index.callbacks.token_counting import get_llm_token_counts, TokenCountingEvent\n'), ((6876, 6900), 'opentelemetry.trace.Status', 'Status', (['StatusCode.ERROR'], {}), '(StatusCode.ERROR)\n', (6882, 6900), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n'), ((9737, 9761), 'opentelemetry.trace.Status', 'Status', (['StatusCode.ERROR'], {}), '(StatusCode.ERROR)\n', (9743, 9761), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n')] |
from pathlib import Path
from llama_index import download_loader
ImageReader = download_loader("ImageReader")
# If the Image has key-value pairs text, use text_type = "key_value"
loader = ImageReader(text_type = "key_value")
documents = loader.load_data(file=Path('./receipt.webp'))
print(documents) | [
"llama_index.download_loader"
] | [((80, 110), 'llama_index.download_loader', 'download_loader', (['"""ImageReader"""'], {}), "('ImageReader')\n", (95, 110), False, 'from llama_index import download_loader\n'), ((261, 283), 'pathlib.Path', 'Path', (['"""./receipt.webp"""'], {}), "('./receipt.webp')\n", (265, 283), False, 'from pathlib import Path\n')] |
# https://github.com/jerryjliu/llama_index/blob/main/examples/langchain_demo/LangchainDemo.ipynb
# Using LlamaIndex as a Callable Tool
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from langchain import HuggingFaceHub
from llama_index import LangchainEmbedding, ServiceContext
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext
from llama_index.query_engine import SubQuestionQueryEngine
documents = SimpleDirectoryReader('data/experiment').load_data()
repo_id = "tiiuae/falcon-7b"
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.1, 'truncation': 'only_first',
"max_length": 1024})
llm_predictor = LLMPredictor(llm=llm)
service_context = ServiceContext.from_defaults(chunk_size=512, llm_predictor=llm_predictor, embed_model=embed_model)
index = VectorStoreIndex.from_documents(documents=documents, service_context=service_context)
engine = index.as_query_engine(similarity_top_k=3)
query_engine_tools = [
QueryEngineTool(
query_engine=engine,
metadata=ToolMetadata(name='Paulindex', description='Provides information about Paul Graham Essay')
)
]
s_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)
response = s_engine.query('Explain childhood')
print(response)
### As a chat bot
# tools = [
# Tool(
# name="LlamaIndex",
# func=lambda q: str(index.as_query_engine().query(q)),
# description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
# return_direct=True
# ),
# ]
# memory = ConversationBufferMemory(memory_key="chat_history")
# # llm = ChatOpenAI(temperature=0)
# agent_executor = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory)
#
# agent_executor.run(input="hi, i am bob")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults"
] | [((874, 992), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'truncation': 'only_first', 'max_length': 1024}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'truncation': 'only_first', 'max_length': 1024})\n", (888, 992), False, 'from langchain import HuggingFaceHub\n'), ((1057, 1078), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1069, 1078), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1097, 1199), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(512)', 'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(chunk_size=512, llm_predictor=llm_predictor,\n embed_model=embed_model)\n', (1125, 1199), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1205, 1295), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(documents=documents, service_context=\n service_context)\n', (1236, 1295), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1544, 1619), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools'}), '(query_engine_tools=query_engine_tools)\n', (1580, 1619), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((842, 865), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (863, 865), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((727, 767), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data/experiment"""'], {}), "('data/experiment')\n", (748, 767), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1433, 1528), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""Paulindex"""', 'description': '"""Provides information about Paul Graham Essay"""'}), "(name='Paulindex', description=\n 'Provides information about Paul Graham Essay')\n", (1445, 1528), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
"""This module provides functionality for loading chat prompts.
The main function in this module is `load_chat_prompt`, which loads a chat prompt from a given JSON file.
The JSON file should contain two keys: "system_template" and "human_template", which correspond to the system and user messages respectively.
Typical usage example:
from wandbot.chat import prompts
chat_prompt = prompts.load_chat_prompt('path_to_your_json_file.json')
"""
import json
import logging
import pathlib
from typing import Union
from llama_index import ChatPromptTemplate
from llama_index.llms import ChatMessage, MessageRole
logger = logging.getLogger(__name__)
def partial_format(s, **kwargs):
# Manually parse the string and extract the field names
place_holders = set()
field_name = ""
in_field = False
for c in s:
if c == "{" and not in_field:
in_field = True
elif c == "}" and in_field:
place_holders.add(field_name)
field_name = ""
in_field = False
elif in_field:
field_name += c
replacements = {k: kwargs.get(k, "{" + k + "}") for k in place_holders}
# Escape all curly braces
s = s.replace("{", "{{").replace("}", "}}")
# Replace the placeholders
for k, v in replacements.items():
s = s.replace("{{" + k + "}}", v)
return s
ROLE_MAP = {
"system": MessageRole.SYSTEM,
"human": MessageRole.USER,
"assistant": MessageRole.ASSISTANT,
}
def load_chat_prompt(
f_name: Union[pathlib.Path, str] = None,
language_code: str = "en",
query_intent: str = "",
) -> ChatPromptTemplate:
"""
Loads a chat prompt from a given file.
This function reads a JSON file specified by f_name and constructs a ChatPromptTemplate
object from the data. The JSON file should contain two keys: "system_template" and "human_template",
which correspond to the system and user messages respectively.
Args:
f_name: A string or a pathlib.Path object representing the path to the JSON file.
If None, a default path is used.
Returns:
A ChatPromptTemplate object constructed from the data in the JSON file.
"""
f_name = pathlib.Path(f_name)
template = json.load(f_name.open("r"))
human_template = partial_format(
template["messages"][-1]["human"],
language_code=language_code,
query_intent=query_intent,
)
messages = []
for message in template["messages"][:-1]:
for k, v in message.items():
messages.append(ChatMessage(role=ROLE_MAP[k], content=v))
messages.append(ChatMessage(role=MessageRole.USER, content=human_template))
prompt = ChatPromptTemplate(messages)
return prompt
| [
"llama_index.llms.ChatMessage",
"llama_index.ChatPromptTemplate"
] | [((626, 653), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (643, 653), False, 'import logging\n'), ((2217, 2237), 'pathlib.Path', 'pathlib.Path', (['f_name'], {}), '(f_name)\n', (2229, 2237), False, 'import pathlib\n'), ((2706, 2734), 'llama_index.ChatPromptTemplate', 'ChatPromptTemplate', (['messages'], {}), '(messages)\n', (2724, 2734), False, 'from llama_index import ChatPromptTemplate\n'), ((2633, 2691), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'human_template'}), '(role=MessageRole.USER, content=human_template)\n', (2644, 2691), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((2571, 2611), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'ROLE_MAP[k]', 'content': 'v'}), '(role=ROLE_MAP[k], content=v)\n', (2582, 2611), False, 'from llama_index.llms import ChatMessage, MessageRole\n')] |
from llama_index import RssReader
from flask import Flask, request, render_template
import json
# Load template
with open('app/template.md') as f:
template = f.read()
# Get rss content
def get_rss_content(websites:list) -> list:
reader = RssReader()
results = []
for web in range(len(websites)):
documents = reader.load_data([websites[web]])
for doc in documents:
text = str(doc.get_text())
results.append(text)
return results
app = Flask(__name__)
@app.route('/rss', methods=['POST'])
def endpiont_generate_rss():
feed_requests = ["insert rss feed url"]
markdown_string = get_rss_content(feed_requests)
#Format the Markdown string using mdformat
concate_markdown_string = ""
for string in markdown_string:
concate_markdown_string = concate_markdown_string + string
columun = list(range(len(markdown_string)))
format_dict = {}
for num in range(len(columun)):
format_dict[f"key number {columun[num]}"] = markdown_string[num]
#Populate the template with the generated content and image URL
output = template.format(topic="topic", overview=concate_markdown_string)
# # Write output to file
with open(f"", 'w') as f:
f.write(output)
format_json_dict = json.dumps(format_dict, indent = 4)
return format_json_dict
if __name__ == '__main__':
app.run() | [
"llama_index.RssReader"
] | [((510, 525), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (515, 525), False, 'from flask import Flask, request, render_template\n'), ((250, 261), 'llama_index.RssReader', 'RssReader', ([], {}), '()\n', (259, 261), False, 'from llama_index import RssReader\n'), ((1308, 1341), 'json.dumps', 'json.dumps', (['format_dict'], {'indent': '(4)'}), '(format_dict, indent=4)\n', (1318, 1341), False, 'import json\n')] |
# Copyright 2023 osiworx
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import os
from llama_index.vector_stores.milvus import MilvusVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
)
from llama_index.core.storage.storage_context import StorageContext
vector_store = MilvusVectorStore(
uri = "http://localhost:19530",
port = 19530 ,
collection_name = 'llama_index_prompts_large',
dim = 384,
similarity_metric = "L2",
)
sample_files_path = "E:\short_large"
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L12-v2")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
for subdir, dirs, files in os.walk(sample_files_path):
if len(files) > 0:
now = datetime.datetime.now()
print(f'{now.strftime("%H:%M:%S")} adding folder: {subdir}')
documents = SimpleDirectoryReader(subdir).load_data()
# here we set the file_path to become no part of the embedding, its not for this usecase
# also we check if a doc has zero content then we don't try to embedd it as it would result in an error
docs = []
for doc in documents:
doc.excluded_llm_metadata_keys.append("file_path")
doc.excluded_embed_metadata_keys.append("file_path")
if doc.text != '':
docs = docs + [doc]
del documents
vector_index = VectorStoreIndex.from_documents(docs, storage_context=storage_context,embed_model=embed_model, show_progress=True)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.vector_stores.milvus.MilvusVectorStore",
"llama_index.core.storage.storage_context.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader"
] | [((894, 1036), 'llama_index.vector_stores.milvus.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': '"""http://localhost:19530"""', 'port': '(19530)', 'collection_name': '"""llama_index_prompts_large"""', 'dim': '(384)', 'similarity_metric': '"""L2"""'}), "(uri='http://localhost:19530', port=19530, collection_name\n ='llama_index_prompts_large', dim=384, similarity_metric='L2')\n", (911, 1036), False, 'from llama_index.vector_stores.milvus import MilvusVectorStore\n'), ((1122, 1196), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L12-v2"""'}), "(model_name='sentence-transformers/all-MiniLM-L12-v2')\n", (1142, 1196), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((1216, 1271), 'llama_index.core.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1244, 1271), False, 'from llama_index.core.storage.storage_context import StorageContext\n'), ((1301, 1327), 'os.walk', 'os.walk', (['sample_files_path'], {}), '(sample_files_path)\n', (1308, 1327), False, 'import os\n'), ((1366, 1389), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1387, 1389), False, 'import datetime\n'), ((2023, 2142), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'embed_model': 'embed_model', 'show_progress': '(True)'}), '(docs, storage_context=storage_context,\n embed_model=embed_model, show_progress=True)\n', (2054, 2142), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n'), ((1480, 1509), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['subdir'], {}), '(subdir)\n', (1501, 1509), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n')] |
import numpy as np
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.llms.litellm import LiteLLM
from langchain_google_genai import ChatGoogleGenerativeAI
from trulens_eval.feedback.provider.langchain import Langchain
from trulens_eval import Tru, Feedback, TruLlama
from trulens_eval.feedback import Groundedness
# Setup RAG
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir="base_index"),
embed_model="local:../models/bge-small-en-v1.5",
)
llm = LiteLLM(model="gemini/gemini-pro", temperature=0.1)
query_engine = index.as_query_engine(llm=llm)
# Evaluate with trulens-eval
# Define provider and database
_llm = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0)
provider = Langchain(chain=_llm)
database_url = "sqlite:///data/trulens.db"
tru = Tru(database_url=database_url, database_redact_keys=True)
# tru.reset_database()
# Using TruLlama
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons, name="Answer Relevance"
).on_input_output()
f_context_relevance = (
Feedback(provider.relevance_with_cot_reasons, name="Context Relevance")
.on_input()
.on(TruLlama.select_source_nodes().node.text)
.aggregate(np.mean)
)
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness")
.on(TruLlama.select_source_nodes().node.text)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
app_id = "Chain2"
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks=[
f_qa_relevance,
f_context_relevance,
f_groundedness,
],
)
qns = ...
for qn in qns:
with tru_recorder as recording:
res = query_engine.query(qn)
# Results
# dashboard
tru.run_dashboard(port=8601)
# # dataframe
# records_df, feednack = tru.get_records_and_feednack(app_ids=[app_id])
# records_df.head()
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.llms.litellm.LiteLLM"
] | [((519, 570), 'llama_index.llms.litellm.LiteLLM', 'LiteLLM', ([], {'model': '"""gemini/gemini-pro"""', 'temperature': '(0.1)'}), "(model='gemini/gemini-pro', temperature=0.1)\n", (526, 570), False, 'from llama_index.llms.litellm import LiteLLM\n'), ((687, 744), 'langchain_google_genai.ChatGoogleGenerativeAI', 'ChatGoogleGenerativeAI', ([], {'model': '"""gemini-pro"""', 'temperature': '(0)'}), "(model='gemini-pro', temperature=0)\n", (709, 744), False, 'from langchain_google_genai import ChatGoogleGenerativeAI\n'), ((756, 777), 'trulens_eval.feedback.provider.langchain.Langchain', 'Langchain', ([], {'chain': '_llm'}), '(chain=_llm)\n', (765, 777), False, 'from trulens_eval.feedback.provider.langchain import Langchain\n'), ((828, 885), 'trulens_eval.Tru', 'Tru', ([], {'database_url': 'database_url', 'database_redact_keys': '(True)'}), '(database_url=database_url, database_redact_keys=True)\n', (831, 885), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1245, 1289), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (1257, 1289), False, 'from trulens_eval.feedback import Groundedness\n'), ((1551, 1657), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_context_relevance, f_groundedness]'}), '(query_engine, app_id=app_id, feedbacks=[f_qa_relevance,\n f_context_relevance, f_groundedness])\n', (1559, 1657), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((401, 455), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""base_index"""'}), "(persist_dir='base_index')\n", (429, 455), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((945, 1015), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Answer Relevance')\n", (953, 1015), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1165, 1195), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1193, 1195), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1069, 1140), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Context Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Context Relevance')\n", (1077, 1140), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1313, 1390), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (1321, 1390), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1399, 1429), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1427, 1429), False, 'from trulens_eval import Tru, Feedback, TruLlama\n')] |
import os
from dotenv import load_dotenv
from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine
from llama_index.llms.openai import OpenAI
from llama_index.llms.types import ChatMessage, MessageRole
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.retrievers import PathwayRetriever
from traceloop.sdk import Traceloop
from pathway.xpacks.llm.vector_store import VectorStoreClient
load_dotenv()
Traceloop.init(app_name=os.environ.get("APP_NAME", "PW - LlamaIndex (Streamlit)"))
DEFAULT_PATHWAY_HOST = "demo-document-indexing.pathway.stream"
PATHWAY_HOST = os.environ.get("PATHWAY_HOST", DEFAULT_PATHWAY_HOST)
PATHWAY_PORT = int(os.environ.get("PATHWAY_PORT", "80"))
def get_additional_headers():
headers = {}
key = os.environ.get("PATHWAY_API_KEY")
if key is not None:
headers = {"X-Pathway-API-Key": key}
return headers
vector_client = VectorStoreClient(
PATHWAY_HOST,
PATHWAY_PORT,
# additional_headers=get_additional_headers(),
)
retriever = PathwayRetriever(host=PATHWAY_HOST, port=PATHWAY_PORT)
retriever.client = VectorStoreClient(
host=PATHWAY_HOST,
port=PATHWAY_PORT,
# additional_headers=get_additional_headers()
)
llm = OpenAI(model="gpt-3.5-turbo")
query_engine = RetrieverQueryEngine.from_args(
retriever,
)
pathway_explaination = "Pathway is a high-throughput, low-latency data processing framework that handles live data & streaming for you."
DEFAULT_MESSAGES = [
ChatMessage(role=MessageRole.USER, content="What is Pathway?"),
ChatMessage(role=MessageRole.ASSISTANT, content=pathway_explaination),
]
chat_engine = CondensePlusContextChatEngine.from_defaults(
retriever=retriever,
system_prompt="""You are RAG AI that answers users questions based on provided sources.
IF QUESTION IS NOT RELATED TO ANY OF THE CONTEXT DOCUMENTS, SAY IT'S NOT POSSIBLE TO ANSWER USING PHRASE `The looked-up documents do not provde information about...`""",
verbose=True,
chat_history=DEFAULT_MESSAGES,
llm=llm,
)
| [
"llama_index.llms.openai.OpenAI",
"llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.llms.types.ChatMessage",
"llama_index.retrievers.PathwayRetriever"
] | [((443, 456), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (454, 456), False, 'from dotenv import load_dotenv\n'), ((622, 674), 'os.environ.get', 'os.environ.get', (['"""PATHWAY_HOST"""', 'DEFAULT_PATHWAY_HOST'], {}), "('PATHWAY_HOST', DEFAULT_PATHWAY_HOST)\n", (636, 674), False, 'import os\n'), ((932, 977), 'pathway.xpacks.llm.vector_store.VectorStoreClient', 'VectorStoreClient', (['PATHWAY_HOST', 'PATHWAY_PORT'], {}), '(PATHWAY_HOST, PATHWAY_PORT)\n', (949, 977), False, 'from pathway.xpacks.llm.vector_store import VectorStoreClient\n'), ((1054, 1108), 'llama_index.retrievers.PathwayRetriever', 'PathwayRetriever', ([], {'host': 'PATHWAY_HOST', 'port': 'PATHWAY_PORT'}), '(host=PATHWAY_HOST, port=PATHWAY_PORT)\n', (1070, 1108), False, 'from llama_index.retrievers import PathwayRetriever\n'), ((1128, 1183), 'pathway.xpacks.llm.vector_store.VectorStoreClient', 'VectorStoreClient', ([], {'host': 'PATHWAY_HOST', 'port': 'PATHWAY_PORT'}), '(host=PATHWAY_HOST, port=PATHWAY_PORT)\n', (1145, 1183), False, 'from pathway.xpacks.llm.vector_store import VectorStoreClient\n'), ((1254, 1283), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (1260, 1283), False, 'from llama_index.llms.openai import OpenAI\n'), ((1300, 1341), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {}), '(retriever)\n', (1330, 1341), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((1668, 2062), 'llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults', 'CondensePlusContextChatEngine.from_defaults', ([], {'retriever': 'retriever', 'system_prompt': '"""You are RAG AI that answers users questions based on provided sources.\n IF QUESTION IS NOT RELATED TO ANY OF THE CONTEXT DOCUMENTS, SAY IT\'S NOT POSSIBLE TO ANSWER USING PHRASE `The looked-up documents do not provde information about...`"""', 'verbose': '(True)', 'chat_history': 'DEFAULT_MESSAGES', 'llm': 'llm'}), '(retriever=retriever,\n system_prompt=\n """You are RAG AI that answers users questions based on provided sources.\n IF QUESTION IS NOT RELATED TO ANY OF THE CONTEXT DOCUMENTS, SAY IT\'S NOT POSSIBLE TO ANSWER USING PHRASE `The looked-up documents do not provde information about...`"""\n , verbose=True, chat_history=DEFAULT_MESSAGES, llm=llm)\n', (1711, 2062), False, 'from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine\n'), ((695, 731), 'os.environ.get', 'os.environ.get', (['"""PATHWAY_PORT"""', '"""80"""'], {}), "('PATHWAY_PORT', '80')\n", (709, 731), False, 'import os\n'), ((792, 825), 'os.environ.get', 'os.environ.get', (['"""PATHWAY_API_KEY"""'], {}), "('PATHWAY_API_KEY')\n", (806, 825), False, 'import os\n'), ((1512, 1574), 'llama_index.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""What is Pathway?"""'}), "(role=MessageRole.USER, content='What is Pathway?')\n", (1523, 1574), False, 'from llama_index.llms.types import ChatMessage, MessageRole\n'), ((1580, 1649), 'llama_index.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'pathway_explaination'}), '(role=MessageRole.ASSISTANT, content=pathway_explaination)\n', (1591, 1649), False, 'from llama_index.llms.types import ChatMessage, MessageRole\n'), ((483, 540), 'os.environ.get', 'os.environ.get', (['"""APP_NAME"""', '"""PW - LlamaIndex (Streamlit)"""'], {}), "('APP_NAME', 'PW - LlamaIndex (Streamlit)')\n", (497, 540), False, 'import os\n')] |
# My OpenAI Key
import logging
import os
import sys
from IPython.display import Markdown, display
from llama_index import GPTTreeIndex, SimpleDirectoryReader
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
documents = SimpleDirectoryReader("data").load_data()
index = GPTTreeIndex.from_documents(documents)
index.save_to_disk("index.json")
# try loading
new_index = GPTTreeIndex.load_from_disk("index.json")
# set Logging to DEBUG for more detailed outputs
response = new_index.query("What did the author do growing up?")
print(response)
# set Logging to DEBUG for more detailed outputs
response = new_index.query("What did the author do after his time at Y Combinator?")
print(response)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTTreeIndex.load_from_disk",
"llama_index.GPTTreeIndex.from_documents"
] | [((160, 218), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (179, 218), False, 'import logging\n'), ((324, 351), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (333, 351), False, 'import os\n'), ((415, 453), 'llama_index.GPTTreeIndex.from_documents', 'GPTTreeIndex.from_documents', (['documents'], {}), '(documents)\n', (442, 453), False, 'from llama_index import GPTTreeIndex, SimpleDirectoryReader\n'), ((515, 556), 'llama_index.GPTTreeIndex.load_from_disk', 'GPTTreeIndex.load_from_disk', (['"""index.json"""'], {}), "('index.json')\n", (542, 556), False, 'from llama_index import GPTTreeIndex, SimpleDirectoryReader\n'), ((250, 290), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (271, 290), False, 'import logging\n'), ((219, 238), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (236, 238), False, 'import logging\n'), ((365, 394), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (386, 394), False, 'from llama_index import GPTTreeIndex, SimpleDirectoryReader\n')] |
from llama_index import Document
import json, os
from llama_index.node_parser import SimpleNodeParser
from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex
from langchain import OpenAI
from llama_index.composability import ComposableGraph
from llama_index.data_structs.node_v2 import Node, DocumentRelationship
class ConfigLLM:
# define LLM
name = "gpt-3.5-turbo"
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo"))
# define prompt helper
# set maximum input size
max_input_size = 2096
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
def index_construct_and_save(timechunk_path: str, save_loc: str):
for filename in os.listdir(timechunk_path):
file = os.path.join(timechunk_path, filename)
data = json.load(open(file=file, mode="r"))
# keys, text = list(zip(*data.items()))
nodes = [Node(text=text, doc_id=keys) for keys, text in data.items()]
index = GPTTreeIndex(nodes=nodes)
index.save_to_disk(f"{save_loc}/{filename}.json")
def load_index_with_summary(index_loc: str):
index_list = []
index_summary_list = []
for filename in os.listdir(index_loc):
index_file = os.path.join(index_loc, filename)
index = GPTTreeIndex.load_from_disk(index_file)
summary = index.query(
"What is the summary of this document chunk?", mode="summarize"
)
index_summary_list.append(str(summary))
index_list.append(index)
#! logging
print("index list", len(index_list), index_list)
return index_list, index_summary_list
def compose_graph_and_save(index_loc: str, save_loc: str):
index_list, index_summary_list = load_index_with_summary(index_loc)
#! logging
print(index_summary_list)
graph = ComposableGraph.from_indices(GPTListIndex, index_list, index_summary_list)
graph.save_to_disk(save_loc)
def load_graph(graph_location: str):
return ComposableGraph.load_from_disk(graph_location)
def query_graph(query: str, graph: ComposableGraph):
response = graph.query(query, query_configs=get_query_configs())
return response
def parse_response(response: ComposableGraph.query):
print("-" * 50)
print(response)
print("-" * 50)
print(
str(response),
# response.source_nodes,
[node_with_score.node.doc_id for node_with_score in response.source_nodes],
# [node.ref_doc_id for node in response.source_nodes],
response.get_formatted_sources(),
sep="\n" + "+" * 80 + "\n",
)
print("-" * 50)
def query_composed_index(query: str, graph_loc: str):
graph = load_graph(graph_loc)
response = query_graph(query, graph)
parse_response(response)
def query_single_index(query: str, index_loc: str):
index = GPTTreeIndex.load_from_disk(index_loc)
response = index.query(query)
parse_response(response)
def get_query_configs():
# set query config
query_configs = [
{
"index_struct_type": "simple_dict",
"query_mode": "default",
"query_kwargs": {"similarity_top_k": 1},
},
{
"index_struct_type": "keyword_table",
"query_mode": "simple",
"query_kwargs": {},
},
]
return query_configs
| [
"llama_index.GPTTreeIndex.load_from_disk",
"llama_index.composability.ComposableGraph.from_indices",
"llama_index.GPTTreeIndex",
"llama_index.data_structs.node_v2.Node",
"llama_index.PromptHelper",
"llama_index.composability.ComposableGraph.load_from_disk"
] | [((705, 764), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (717, 764), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((853, 879), 'os.listdir', 'os.listdir', (['timechunk_path'], {}), '(timechunk_path)\n', (863, 879), False, 'import json, os\n'), ((1328, 1349), 'os.listdir', 'os.listdir', (['index_loc'], {}), '(index_loc)\n', (1338, 1349), False, 'import json, os\n'), ((1966, 2040), 'llama_index.composability.ComposableGraph.from_indices', 'ComposableGraph.from_indices', (['GPTListIndex', 'index_list', 'index_summary_list'], {}), '(GPTListIndex, index_list, index_summary_list)\n', (1994, 2040), False, 'from llama_index.composability import ComposableGraph\n'), ((2124, 2170), 'llama_index.composability.ComposableGraph.load_from_disk', 'ComposableGraph.load_from_disk', (['graph_location'], {}), '(graph_location)\n', (2154, 2170), False, 'from llama_index.composability import ComposableGraph\n'), ((2974, 3012), 'llama_index.GPTTreeIndex.load_from_disk', 'GPTTreeIndex.load_from_disk', (['index_loc'], {}), '(index_loc)\n', (3001, 3012), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((896, 934), 'os.path.join', 'os.path.join', (['timechunk_path', 'filename'], {}), '(timechunk_path, filename)\n', (908, 934), False, 'import json, os\n'), ((1129, 1154), 'llama_index.GPTTreeIndex', 'GPTTreeIndex', ([], {'nodes': 'nodes'}), '(nodes=nodes)\n', (1141, 1154), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((1372, 1405), 'os.path.join', 'os.path.join', (['index_loc', 'filename'], {}), '(index_loc, filename)\n', (1384, 1405), False, 'import json, os\n'), ((1422, 1461), 'llama_index.GPTTreeIndex.load_from_disk', 'GPTTreeIndex.load_from_disk', (['index_file'], {}), '(index_file)\n', (1449, 1461), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((437, 486), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (443, 486), False, 'from langchain import OpenAI\n'), ((1052, 1080), 'llama_index.data_structs.node_v2.Node', 'Node', ([], {'text': 'text', 'doc_id': 'keys'}), '(text=text, doc_id=keys)\n', (1056, 1080), False, 'from llama_index.data_structs.node_v2 import Node, DocumentRelationship\n')] |
import logging
from llama_index import SimpleDirectoryReader
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
from IPython.display import Markdown, display
from llama_index.node_parser import SentenceSplitter
from embeddings import EmbeddingComponent
import chromadb
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class FileReader:
def __init__(self, file_paths):
self.file_paths = file_paths
def read_data(self):
logger.info("Reading data from files: %s", self.file_paths)
file_reader = SimpleDirectoryReader(input_files=self.file_paths)
return file_reader.load_data()
def parse_data(self, data):
logger.info("Parsing data")
node_parser = SentenceSplitter(
chunk_size=500,
chunk_overlap=50,
separator=" ",
paragraph_separator="\n\n\n",
secondary_chunking_regex="[^,.;。]+[,.;。]?"
)
return node_parser.get_nodes_from_documents(data)
class DatabaseManager:
def __init__(self, db_path, collection_name):
self.db_path = db_path
self.collection_name = collection_name
def initialize_db(self):
logger.info("Initializing the database at path: %s", self.db_path)
db = chromadb.PersistentClient(path=self.db_path)
return db.get_or_create_collection(self.collection_name)
class VectorIndexer:
def __init__(self, nodes, vector_store, embedding_model):
self.nodes = nodes
self.vector_store = vector_store
self.embedding_model = embedding_model
def create_index(self):
logger.info("Creating the vector index")
storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
service_context = ServiceContext.from_defaults(embed_model=self.embedding_model, llm=None)
return VectorStoreIndex(
self.nodes, storage_context=storage_context, service_context=service_context
)
def main():
# User Inputs
file_path = input("Enter the path to the file: ")
db_path = input("Enter the path to the database: ")
collection_name = input("Enter the database collection name: ")
# File Reading
file_reader = FileReader(file_paths=[file_path])
file_data = file_reader.read_data()
file_nodes = file_reader.parse_data(file_data)
# Database Initialization
db_manager = DatabaseManager(db_path=db_path, collection_name=collection_name)
db_collection = db_manager.initialize_db()
# Embedding Model Initialization
embedding_mode = input("Enter the embedding mode ('local' or 'openai'): ")
hf_model_name = input("Enter the Hugging Face model name: ")
embedding_model = EmbeddingComponent(embedding_mode, hf_model_name)
# Vector Store and Index Creation
vector_store = ChromaVectorStore(chroma_collection=db_collection)
indexer = VectorIndexer(nodes=file_nodes, vector_store=vector_store, embedding_model=embedding_model.embedding_model)
index = indexer.create_index()
if __name__ == "__main__":
main()
| [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.node_parser.SentenceSplitter"
] | [((418, 457), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (437, 457), False, 'import logging\n'), ((467, 494), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (484, 494), False, 'import logging\n'), ((2869, 2918), 'embeddings.EmbeddingComponent', 'EmbeddingComponent', (['embedding_mode', 'hf_model_name'], {}), '(embedding_mode, hf_model_name)\n', (2887, 2918), False, 'from embeddings import EmbeddingComponent\n'), ((2977, 3027), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'db_collection'}), '(chroma_collection=db_collection)\n', (2994, 3027), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((703, 753), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'self.file_paths'}), '(input_files=self.file_paths)\n', (724, 753), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((884, 1027), 'llama_index.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(50)', 'separator': '""" """', 'paragraph_separator': '"""\n\n\n"""', 'secondary_chunking_regex': '"""[^,.;。]+[,.;。]?"""'}), "(chunk_size=500, chunk_overlap=50, separator=' ',\n paragraph_separator='\\n\\n\\n', secondary_chunking_regex='[^,.;。]+[,.;。]?')\n", (900, 1027), False, 'from llama_index.node_parser import SentenceSplitter\n'), ((1423, 1467), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'self.db_path'}), '(path=self.db_path)\n', (1448, 1467), False, 'import chromadb\n'), ((1837, 1897), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'self.vector_store'}), '(vector_store=self.vector_store)\n', (1865, 1897), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1924, 1996), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'self.embedding_model', 'llm': 'None'}), '(embed_model=self.embedding_model, llm=None)\n', (1952, 1996), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((2012, 2110), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['self.nodes'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(self.nodes, storage_context=storage_context,\n service_context=service_context)\n', (2028, 2110), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n')] |
import os
from llama_index import (
GPTSimpleVectorIndex,
GPTTreeIndex,
GPTKeywordTableIndex,
GPTListIndex,
)
from llama_index import SimpleDirectoryReader, download_loader
from llama_index import (
Document,
LLMPredictor,
PromptHelper,
QuestionAnswerPrompt,
RefinePrompt,
)
from langchain.llms import OpenAIChat, OpenAI
from googlesearch import search as google_search
from baidusearch.baidusearch import search as baidu_search
from duckduckgo_search import ddg
from utils import *
import logging
import sys
def get_documents(file_src):
documents = []
logging.debug("Loading documents...")
print(f"file_src: {file_src}")
for file in file_src:
if type(file) == str:
print(f"file: {file}")
if "http" in file:
logging.debug("Loading web page...")
BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
loader = BeautifulSoupWebReader()
documents += loader.load_data([file])
else:
logging.debug(f"file: {file.name}")
if os.path.splitext(file.name)[1] == ".pdf":
logging.debug("Loading PDF...")
CJKPDFReader = download_loader("CJKPDFReader")
loader = CJKPDFReader()
documents += loader.load_data(file=file.name)
elif os.path.splitext(file.name)[1] == ".docx":
logging.debug("Loading DOCX...")
DocxReader = download_loader("DocxReader")
loader = DocxReader()
documents += loader.load_data(file=file.name)
elif os.path.splitext(file.name)[1] == ".epub":
logging.debug("Loading EPUB...")
EpubReader = download_loader("EpubReader")
loader = EpubReader()
documents += loader.load_data(file=file.name)
else:
logging.debug("Loading text file...")
with open(file.name, "r", encoding="utf-8") as f:
text = add_space(f.read())
documents += [Document(text)]
return documents
def construct_index(
api_key,
file_src,
index_name,
index_type,
max_input_size=4096,
num_outputs=512,
max_chunk_overlap=20,
chunk_size_limit=None,
embedding_limit=None,
separator=" ",
num_children=10,
max_keywords_per_chunk=10,
):
chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
embedding_limit = None if embedding_limit == 0 else embedding_limit
separator = " " if separator == "" else separator
llm_predictor = LLMPredictor(
llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key)
)
prompt_helper = PromptHelper(
max_input_size,
num_outputs,
max_chunk_overlap,
embedding_limit,
chunk_size_limit,
separator=separator,
)
documents = get_documents(file_src)
try:
if index_type == "GPTSimpleVectorIndex":
index = GPTSimpleVectorIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
index_name += "_GPTSimpleVectorIndex"
elif index_type == "GPTTreeIndex":
index = GPTTreeIndex(
documents,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
num_children=num_children,
)
index_name += "_GPTTreeIndex"
elif index_type == "GPTKeywordTableIndex":
index = GPTKeywordTableIndex(
documents,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
max_keywords_per_chunk=max_keywords_per_chunk,
)
index_name += "_GPTKeywordTableIndex"
elif index_type == "GPTListIndex":
index = GPTListIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
index_name += "_GPTListIndex"
except Exception as e:
print(e)
return None
save_index(index, index_name)
newlist = refresh_json_list(plain=True)
return gr.Dropdown.update(choices=newlist, value=index_name)
def chat_ai(
api_key,
index_select,
question,
prompt_tmpl,
refine_tmpl,
sim_k,
chat_tone,
context,
chatbot,
search_mode=[],
):
if index_select == "请选择索引文件" and search_mode==[]:
chatbot.append((question, "❗请选择索引文件"))
return context, chatbot
os.environ["OPENAI_API_KEY"] = api_key
logging.info(f"Question: {question}")
temprature = 2 if chat_tone == 0 else 1 if chat_tone == 1 else 0.5
if search_mode:
index_select = search_construct(api_key, question, search_mode, index_select)
logging.debug(f"Index: {index_select}")
response = ask_ai(
api_key,
index_select,
question,
prompt_tmpl,
refine_tmpl,
sim_k,
temprature,
context,
)
if response is None:
chatbot.append((question, "查询失败,请换个问法试试"))
return context, chatbot
response = parse_text(response)
context.append({"role": "user", "content": question})
context.append({"role": "assistant", "content": response})
chatbot.append((question, response))
os.environ["OPENAI_API_KEY"] = ""
return context, chatbot
def ask_ai(
api_key,
index_select,
question,
prompt_tmpl,
refine_tmpl,
sim_k=1,
temprature=0,
prefix_messages=[],
):
os.environ["OPENAI_API_KEY"] = api_key
# 判断索引文件是否存在
index_path = f"./index/{index_select}.json"
logging.debug(f"Index path: {index_path}")
if not os.path.exists(index_path):
logging.debug("Index file not found")
return None
logging.debug("Index file found")
logging.debug("Querying index...")
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=temprature,
model_name="gpt-3.5-turbo-0301",
prefix_messages=prefix_messages,
)
)
response = None # Initialize response variable to avoid UnboundLocalError
if "GPTTreeIndex" in index_select:
logging.debug("Using GPTTreeIndex")
index = GPTTreeIndex.load_from_disk(index_path)
response = index.query(question, llm_predictor=llm_predictor)
elif "GPTKeywordTableIndex" in index_select:
logging.debug("Using GPTKeywordTableIndex")
index = GPTKeywordTableIndex.load_from_disk(index_path)
response = index.query(question, llm_predictor=llm_predictor)
elif "GPTListIndex" in index_select:
logging.debug("Using GPTListIndex")
index = GPTListIndex.load_from_disk(index_path)
qa_prompt = QuestionAnswerPrompt(prompt_tmpl)
response = index.query(question, llm_predictor=llm_predictor)
else:
# if "GPTSimpleVectorIndex" in index_select or not specified
logging.debug("Using GPTSimpleVectorIndex")
index = GPTSimpleVectorIndex.load_from_disk(index_path)
qa_prompt = QuestionAnswerPrompt(prompt_tmpl)
rf_prompt = RefinePrompt(refine_tmpl)
response = index.query(
question,
llm_predictor=llm_predictor,
similarity_top_k=sim_k,
text_qa_template=qa_prompt,
refine_template=rf_prompt,
response_mode="compact"
)
if response is not None:
logging.info(f"Response: {response}")
ret_text = response.response
ret_text += "\n----------\n"
nodes = []
for index, node in enumerate(response.source_nodes):
nodes.append(f"[{index+1}] {node.source_text}")
ret_text += "\n\n".join(nodes)
os.environ["OPENAI_API_KEY"] = ""
return ret_text
else:
logging.debug("No response found, returning None")
os.environ["OPENAI_API_KEY"] = ""
return None
def search_construct(api_key, question, search_mode, index_select):
print(f"You asked: {question}")
chat = OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key)
search_terms = (
chat.generate(
[
f"Please extract search terms from the user’s question. The search terms is a concise sentence, which will be searched on Google to obtain relevant information to answer the user’s question, too generalized search terms doesn’t help. Please provide no more than two search terms. Please provide the most relevant search terms only, the search terms should directly correspond to the user’s question. Please separate different search items with commas, with no quote marks. The user’s question is: {question}"
]
)
.generations[0][0]
.text.strip()
)
search_terms = search_terms.replace('"', "")
search_terms = search_terms.replace(".", "")
links = []
for keywords in search_terms.split(","):
keywords = keywords.strip()
for search_engine in search_mode:
if "Google" in search_engine:
print(f"Googling: {keywords}")
search_iter = google_search(keywords, num_results=5)
links += [next(search_iter) for _ in range(10)]
if "Baidu" in search_engine:
print(f"Baiduing: {keywords}")
search_results = baidu_search(keywords, num_results=5)
links += [
i["url"]
for i in search_results
if i["url"].startswith("http") and (not "@" in i["url"])
]
if "DuckDuckGo" in search_engine:
results = ddg(keywords, max_results=5)
links += [r["href"] for r in results]
if "Manual" in search_engine:
print(f"Searching manually: {keywords}")
print("Please input links manually. (Enter 'q' to quit.)")
while True:
link = input("请手动输入一个链接:\n")
if link == "q":
break
else:
links.append(link)
links = list(set(links))
if len(links) == 0:
return index_select
print("Extracting data from links...")
print("\n".join(links))
search_index_name = " ".join(search_terms.split(","))
construct_index(api_key, links, search_index_name, "GPTSimpleVectorIndex")
print(f"Index {search_index_name} constructed.")
return search_index_name + "_GPTSimpleVectorIndex"
| [
"llama_index.GPTKeywordTableIndex.load_from_disk",
"llama_index.download_loader",
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTListIndex",
"llama_index.GPTTreeIndex",
"llama_index.GPTListIndex.load_from_disk",
"llama_index.RefinePrompt",
"llama_index.QuestionAnswerPrompt",
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex",
"llama_index.GPTKeywordTableIndex",
"llama_index.GPTTreeIndex.load_from_disk",
"llama_index.Document"
] | [((601, 638), 'logging.debug', 'logging.debug', (['"""Loading documents..."""'], {}), "('Loading documents...')\n", (614, 638), False, 'import logging\n'), ((2779, 2899), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap', 'embedding_limit', 'chunk_size_limit'], {'separator': 'separator'}), '(max_input_size, num_outputs, max_chunk_overlap,\n embedding_limit, chunk_size_limit, separator=separator)\n', (2791, 2899), False, 'from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, RefinePrompt\n'), ((4646, 4683), 'logging.info', 'logging.info', (['f"""Question: {question}"""'], {}), "(f'Question: {question}')\n", (4658, 4683), False, 'import logging\n'), ((4866, 4905), 'logging.debug', 'logging.debug', (['f"""Index: {index_select}"""'], {}), "(f'Index: {index_select}')\n", (4879, 4905), False, 'import logging\n'), ((5724, 5766), 'logging.debug', 'logging.debug', (['f"""Index path: {index_path}"""'], {}), "(f'Index path: {index_path}')\n", (5737, 5766), False, 'import logging\n'), ((5877, 5910), 'logging.debug', 'logging.debug', (['"""Index file found"""'], {}), "('Index file found')\n", (5890, 5910), False, 'import logging\n'), ((5915, 5949), 'logging.debug', 'logging.debug', (['"""Querying index..."""'], {}), "('Querying index...')\n", (5928, 5949), False, 'import logging\n'), ((8129, 8192), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-0301"""', 'openai_api_key': 'api_key'}), "(model_name='gpt-3.5-turbo-0301', openai_api_key=api_key)\n", (8135, 8192), False, 'from langchain.llms import OpenAIChat, OpenAI\n'), ((5778, 5804), 'os.path.exists', 'os.path.exists', (['index_path'], {}), '(index_path)\n', (5792, 5804), False, 'import os\n'), ((5814, 5851), 'logging.debug', 'logging.debug', (['"""Index file not found"""'], {}), "('Index file not found')\n", (5827, 5851), False, 'import logging\n'), ((6273, 6308), 'logging.debug', 'logging.debug', (['"""Using GPTTreeIndex"""'], {}), "('Using GPTTreeIndex')\n", (6286, 6308), False, 'import logging\n'), ((6325, 6364), 'llama_index.GPTTreeIndex.load_from_disk', 'GPTTreeIndex.load_from_disk', (['index_path'], {}), '(index_path)\n', (6352, 6364), False, 'from llama_index import GPTSimpleVectorIndex, GPTTreeIndex, GPTKeywordTableIndex, GPTListIndex\n'), ((7524, 7561), 'logging.info', 'logging.info', (['f"""Response: {response}"""'], {}), "(f'Response: {response}')\n", (7536, 7561), False, 'import logging\n'), ((7899, 7949), 'logging.debug', 'logging.debug', (['"""No response found, returning None"""'], {}), "('No response found, returning None')\n", (7912, 7949), False, 'import logging\n'), ((1062, 1097), 'logging.debug', 'logging.debug', (['f"""file: {file.name}"""'], {}), "(f'file: {file.name}')\n", (1075, 1097), False, 'import logging\n'), ((2689, 2752), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-0301"""', 'openai_api_key': 'api_key'}), "(model_name='gpt-3.5-turbo-0301', openai_api_key=api_key)\n", (2695, 2752), False, 'from langchain.llms import OpenAIChat, OpenAI\n'), ((3070, 3164), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (3090, 3164), False, 'from llama_index import GPTSimpleVectorIndex, GPTTreeIndex, GPTKeywordTableIndex, GPTListIndex\n'), ((5996, 6096), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'temprature', 'model_name': '"""gpt-3.5-turbo-0301"""', 'prefix_messages': 'prefix_messages'}), "(temperature=temprature, model_name='gpt-3.5-turbo-0301',\n prefix_messages=prefix_messages)\n", (6002, 6096), False, 'from langchain.llms import OpenAIChat, OpenAI\n'), ((6492, 6535), 'logging.debug', 'logging.debug', (['"""Using GPTKeywordTableIndex"""'], {}), "('Using GPTKeywordTableIndex')\n", (6505, 6535), False, 'import logging\n'), ((6552, 6599), 'llama_index.GPTKeywordTableIndex.load_from_disk', 'GPTKeywordTableIndex.load_from_disk', (['index_path'], {}), '(index_path)\n', (6587, 6599), False, 'from llama_index import GPTSimpleVectorIndex, GPTTreeIndex, GPTKeywordTableIndex, GPTListIndex\n'), ((812, 848), 'logging.debug', 'logging.debug', (['"""Loading web page..."""'], {}), "('Loading web page...')\n", (825, 848), False, 'import logging\n'), ((890, 931), 'llama_index.download_loader', 'download_loader', (['"""BeautifulSoupWebReader"""'], {}), "('BeautifulSoupWebReader')\n", (905, 931), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((1171, 1202), 'logging.debug', 'logging.debug', (['"""Loading PDF..."""'], {}), "('Loading PDF...')\n", (1184, 1202), False, 'import logging\n'), ((1234, 1265), 'llama_index.download_loader', 'download_loader', (['"""CJKPDFReader"""'], {}), "('CJKPDFReader')\n", (1249, 1265), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((3303, 3416), 'llama_index.GPTTreeIndex', 'GPTTreeIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper', 'num_children': 'num_children'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper, num_children=num_children)\n', (3315, 3416), False, 'from llama_index import GPTSimpleVectorIndex, GPTTreeIndex, GPTKeywordTableIndex, GPTListIndex\n'), ((6719, 6754), 'logging.debug', 'logging.debug', (['"""Using GPTListIndex"""'], {}), "('Using GPTListIndex')\n", (6732, 6754), False, 'import logging\n'), ((6771, 6810), 'llama_index.GPTListIndex.load_from_disk', 'GPTListIndex.load_from_disk', (['index_path'], {}), '(index_path)\n', (6798, 6810), False, 'from llama_index import GPTSimpleVectorIndex, GPTTreeIndex, GPTKeywordTableIndex, GPTListIndex\n'), ((6831, 6864), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['prompt_tmpl'], {}), '(prompt_tmpl)\n', (6851, 6864), False, 'from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, RefinePrompt\n'), ((7022, 7065), 'logging.debug', 'logging.debug', (['"""Using GPTSimpleVectorIndex"""'], {}), "('Using GPTSimpleVectorIndex')\n", (7035, 7065), False, 'import logging\n'), ((7082, 7129), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['index_path'], {}), '(index_path)\n', (7117, 7129), False, 'from llama_index import GPTSimpleVectorIndex, GPTTreeIndex, GPTKeywordTableIndex, GPTListIndex\n'), ((7150, 7183), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['prompt_tmpl'], {}), '(prompt_tmpl)\n', (7170, 7183), False, 'from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, RefinePrompt\n'), ((7204, 7229), 'llama_index.RefinePrompt', 'RefinePrompt', (['refine_tmpl'], {}), '(refine_tmpl)\n', (7216, 7229), False, 'from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, RefinePrompt\n'), ((9210, 9248), 'googlesearch.search', 'google_search', (['keywords'], {'num_results': '(5)'}), '(keywords, num_results=5)\n', (9223, 9248), True, 'from googlesearch import search as google_search\n'), ((9434, 9471), 'baidusearch.baidusearch.search', 'baidu_search', (['keywords'], {'num_results': '(5)'}), '(keywords, num_results=5)\n', (9446, 9471), True, 'from baidusearch.baidusearch import search as baidu_search\n'), ((9739, 9767), 'duckduckgo_search.ddg', 'ddg', (['keywords'], {'max_results': '(5)'}), '(keywords, max_results=5)\n', (9742, 9767), False, 'from duckduckgo_search import ddg\n'), ((1113, 1140), 'os.path.splitext', 'os.path.splitext', (['file.name'], {}), '(file.name)\n', (1129, 1140), False, 'import os\n'), ((1444, 1476), 'logging.debug', 'logging.debug', (['"""Loading DOCX..."""'], {}), "('Loading DOCX...')\n", (1457, 1476), False, 'import logging\n'), ((1506, 1535), 'llama_index.download_loader', 'download_loader', (['"""DocxReader"""'], {}), "('DocxReader')\n", (1521, 1535), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((3604, 3745), 'llama_index.GPTKeywordTableIndex', 'GPTKeywordTableIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper', 'max_keywords_per_chunk': 'max_keywords_per_chunk'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper, max_keywords_per_chunk=max_keywords_per_chunk)\n', (3624, 3745), False, 'from llama_index import GPTSimpleVectorIndex, GPTTreeIndex, GPTKeywordTableIndex, GPTListIndex\n'), ((1385, 1412), 'os.path.splitext', 'os.path.splitext', (['file.name'], {}), '(file.name)\n', (1401, 1412), False, 'import os\n'), ((1712, 1744), 'logging.debug', 'logging.debug', (['"""Loading EPUB..."""'], {}), "('Loading EPUB...')\n", (1725, 1744), False, 'import logging\n'), ((1774, 1803), 'llama_index.download_loader', 'download_loader', (['"""EpubReader"""'], {}), "('EpubReader')\n", (1789, 1803), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((1938, 1975), 'logging.debug', 'logging.debug', (['"""Loading text file..."""'], {}), "('Loading text file...')\n", (1951, 1975), False, 'import logging\n'), ((3933, 4019), 'llama_index.GPTListIndex', 'GPTListIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (3945, 4019), False, 'from llama_index import GPTSimpleVectorIndex, GPTTreeIndex, GPTKeywordTableIndex, GPTListIndex\n'), ((1653, 1680), 'os.path.splitext', 'os.path.splitext', (['file.name'], {}), '(file.name)\n', (1669, 1680), False, 'import os\n'), ((2123, 2137), 'llama_index.Document', 'Document', (['text'], {}), '(text)\n', (2131, 2137), False, 'from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, RefinePrompt\n')] |
# This file has been modified by the Nextpy Team in 2023 using AI tools and automation scripts.
# We have rigorously tested these modifications to ensure reliability and performance. Based on successful test results, we are confident in the quality and stability of these changes.
"""Base reader class."""
from abc import abstractmethod
from typing import Any, List
from nextpy.ai.schema import Document
class BaseReader:
"""Utilities for loading data from a directory."""
@abstractmethod
def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
"""Load data from the input directory."""
"""Slack reader."""
import logging
import os
import time
from datetime import datetime
from ssl import SSLContext
from typing import List, Optional
from llama_index.readers.base import BaseReader
from llama_index.schema import Document
logger = logging.getLogger(__name__)
class SlackReader(BaseReader):
"""Slack reader.
Reads conversations from channels. If an earliest_date is provided, an
optional latest_date can also be provided. If no latest_date is provided,
we assume the latest date is the current timestamp.
Args:
slack_token (Optional[str]): Slack token. If not provided, we
assume the environment variable `SLACK_BOT_TOKEN` is set.
ssl (Optional[str]): Custom SSL context. If not provided, it is assumed
there is already an SSL context available.
earliest_date (Optional[datetime]): Earliest date from which
to read conversations. If not provided, we read all messages.
latest_date (Optional[datetime]): Latest date from which to
read conversations. If not provided, defaults to current timestamp
in combination with earliest_date.
"""
def __init__(
self,
slack_token: Optional[str] = None,
ssl: Optional[SSLContext] = None,
earliest_date: Optional[datetime] = None,
latest_date: Optional[datetime] = None,
) -> None:
"""Initialize with parameters."""
from slack_sdk import WebClient
if slack_token is None:
slack_token = os.environ["SLACK_BOT_TOKEN"]
if slack_token is None:
raise ValueError(
"Must specify `slack_token` or set environment "
"variable `SLACK_BOT_TOKEN`."
)
if ssl is None:
self.client = WebClient(token=slack_token)
else:
self.client = WebClient(token=slack_token, ssl=ssl)
if latest_date is not None and earliest_date is None:
raise ValueError(
"Must specify `earliest_date` if `latest_date` is specified."
)
if earliest_date is not None:
self.earliest_date_timestamp: Optional[float] = earliest_date.timestamp()
else:
self.earliest_date_timestamp = None
if latest_date is not None:
self.latest_date_timestamp = latest_date.timestamp()
else:
self.latest_date_timestamp = datetime.now().timestamp()
res = self.client.api_test()
if not res["ok"]:
raise ValueError(f"Error initializing Slack API: {res['error']}")
def _read_message(self, channel_id: str, message_ts: str) -> str:
from slack_sdk.errors import SlackApiError
"""Read a message."""
messages_text: List[str] = []
next_cursor = None
while True:
try:
# https://slack.com/api/conversations.replies
# List all replies to a message, including the message itself.
if self.earliest_date_timestamp is None:
result = self.client.conversations_replies(
channel=channel_id, ts=message_ts, cursor=next_cursor
)
else:
conversations_replies_kwargs = {
"channel": channel_id,
"ts": message_ts,
"cursor": next_cursor,
"latest": str(self.latest_date_timestamp),
}
if self.earliest_date_timestamp is not None:
conversations_replies_kwargs["oldest"] = str(
self.earliest_date_timestamp
)
result = self.client.conversations_replies(
**conversations_replies_kwargs # type: ignore
)
messages = result["messages"]
messages_text.extend(message["text"] for message in messages)
if not result["has_more"]:
break
next_cursor = result["response_metadata"]["next_cursor"]
except SlackApiError as e:
if e.response["error"] == "ratelimited":
logger.error(
"Rate limit error reached, sleeping for: {} seconds".format(
e.response.headers["retry-after"]
)
)
time.sleep(int(e.response.headers["retry-after"]))
else:
logger.error("Error parsing conversation replies: {}".format(e))
return "\n\n".join(messages_text)
def _read_channel(self, channel_id: str, reverse_chronological: bool) -> str:
from slack_sdk.errors import SlackApiError
"""Read a channel."""
result_messages: List[str] = []
next_cursor = None
while True:
try:
# Call the conversations.history method using the WebClient
# conversations.history returns the first 100 messages by default
# These results are paginated,
# see: https://api.slack.com/methods/conversations.history$pagination
conversations_history_kwargs = {
"channel": channel_id,
"cursor": next_cursor,
"latest": str(self.latest_date_timestamp),
}
if self.earliest_date_timestamp is not None:
conversations_history_kwargs["oldest"] = str(
self.earliest_date_timestamp
)
result = self.client.conversations_history(
**conversations_history_kwargs # type: ignore
)
conversation_history = result["messages"]
# Print results
logger.info(
"{} messages found in {}".format(
len(conversation_history), channel_id
)
)
result_messages.extend(
self._read_message(channel_id, message["ts"])
for message in conversation_history
)
if not result["has_more"]:
break
next_cursor = result["response_metadata"]["next_cursor"]
except SlackApiError as e:
if e.response["error"] == "ratelimited":
logger.error(
"Rate limit error reached, sleeping for: {} seconds".format(
e.response.headers["retry-after"]
)
)
time.sleep(int(e.response.headers["retry-after"]))
else:
logger.error("Error parsing conversation replies: {}".format(e))
return (
"\n\n".join(result_messages)
if reverse_chronological
else "\n\n".join(result_messages[::-1])
)
def load_data(
self, channel_ids: List[str], reverse_chronological: bool = True
) -> List[Document]:
"""Load data from the input directory.
Args:
channel_ids (List[str]): List of channel ids to read.
Returns:
List[Document]: List of documents.
"""
results = []
for channel_id in channel_ids:
channel_content = self._read_channel(
channel_id, reverse_chronological=reverse_chronological
)
results.append(
Document(text=channel_content, metadata={"channel": channel_id})
)
return results
if __name__ == "__main__":
reader = SlackReader()
logger.info(reader.load_data(channel_ids=["C04DC2VUY3F"]))
| [
"llama_index.schema.Document"
] | [((877, 904), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (894, 904), False, 'import logging\n'), ((2439, 2467), 'slack_sdk.WebClient', 'WebClient', ([], {'token': 'slack_token'}), '(token=slack_token)\n', (2448, 2467), False, 'from slack_sdk import WebClient\n'), ((2508, 2545), 'slack_sdk.WebClient', 'WebClient', ([], {'token': 'slack_token', 'ssl': 'ssl'}), '(token=slack_token, ssl=ssl)\n', (2517, 2545), False, 'from slack_sdk import WebClient\n'), ((8292, 8356), 'llama_index.schema.Document', 'Document', ([], {'text': 'channel_content', 'metadata': "{'channel': channel_id}"}), "(text=channel_content, metadata={'channel': channel_id})\n", (8300, 8356), False, 'from llama_index.schema import Document\n'), ((3072, 3086), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3084, 3086), False, 'from datetime import datetime\n')] |
"""
This is the documentaion of the Llama2-7B-chat model from hugging face models
This model has 7 billion parameters develped by Meta
This is used for QnA purposes on local machine for testing...
Model hardware config:
- GPU: Nvidia RTX 40 Series (12GB) --> CUDA support
- RAM: 32GB
- i7 processor 13th gen
"""
import torch
from transformers import BitsAndBytesConfig
from langchain.embeddings.huggingface import HuggingFaceInstructEmbeddings
from llama_index.llms import HuggingFaceLLM
from llama_index import ServiceContext, SimpleDirectoryReader, \
VectorStoreIndex, get_response_synthesizer, set_global_service_context
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.prompts import PromptTemplate
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores import ChromaVectorStore
from llama_index.postprocessor import SimilarityPostprocessor
from chromadb import PersistentClient
from chromadb.utils import embedding_functions
from dotenv import load_dotenv
from transformers import AutoTokenizer
import os
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
LLM = "meta-llama/Llama-2-7b-chat-hf"
EMBED_MODEL = "hkunlp/instructor-large"
DEVICE_MAP = "auto"
DEVICE = "cuda"
class Llama2_7B_Chat:
"""Class for Llama-7B Chat model from HuggingFace"""
def __init__(self) -> None:
"""Constrcutor of the class Llama2_7B_Chat"""
print("==================== starting constructor... ======================")
# Start chroma client
self.__chroma_client = PersistentClient('./chroma_db')
# for model bit quantization for more effiency in computation by the LLM
self.__quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
llm_int8_enable_fp32_cpu_offload=True
)
tokenizer = AutoTokenizer.from_pretrained(LLM)
# HuggingFaceLLM object - uses pretrained models from HuggingFace (Llama2-7B-chat model)
self.__llm = HuggingFaceLLM(
model_name=LLM,
tokenizer=tokenizer,
is_chat_model=True,
max_new_tokens=512,
query_wrapper_prompt=PromptTemplate(
"<s> [INST] {query_str} [/INST]"),
context_window=4000,
model_kwargs={
"quantization_config": self.__quantization_config,
"token": HF_TOKEN
},
tokenizer_kwargs={
"token": HF_TOKEN
},
device_map=DEVICE_MAP
)
# embedding model - pretrained embedding model (it is wrapper around sentence_transformers)
self.__embed_model = HuggingFaceInstructEmbeddings(
model_name=EMBED_MODEL,
model_kwargs={
"device": DEVICE
}
)
self.__index = None
# Service context
self.__service_context = ServiceContext.from_defaults(
llm=self.__llm, embed_model=self.__embed_model)
set_global_service_context(self.__service_context)
def create_index(self, data_dir: str) -> None:
"""Creates the Vector Index for querying with LLM"""
print("============= creating index.... ================")
# embedding function for chromadb
embedding_func = embedding_functions.HuggingFaceEmbeddingFunction(
api_key=HF_TOKEN,
model_name=EMBED_MODEL
)
# Load the documents from data_dir
docs = SimpleDirectoryReader(data_dir).load_data()
# Creating collection in chroma database
chroma_collection = self.__chroma_client.get_or_create_collection("data_embeddings",
embedding_function=embedding_func)
# Creating Chroma Vector Store
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
# Create storage context using chroma vector store
storage_context = StorageContext.from_defaults(
vector_store=vector_store)
self.__index = VectorStoreIndex.from_documents(docs, storage_context=storage_context)
def start_query_engine(self):
"""Initialize the query engine"""
print("=========== starting query engine... ===============")
# configure retriever
retriever = VectorIndexRetriever(
index=self.__index,
similarity_top_k=6
)
# configure node postproceesors
s_processor = SimilarityPostprocessor(similarity_cutoff=0.65)
# configure response synthesizer
response_synthesizer = get_response_synthesizer()
query_engine = RetrieverQueryEngine(
retriever=retriever,
node_postprocessors=[s_processor],
response_synthesizer=response_synthesizer
)
return query_engine
def ask_llm(self, user_query: str, query_engine):
"""
Ask LLM for querying data based on context
returns: (RESPONSE_TYPE, List[NodeWithScore])
"""
# print("User asking -->", user_query)
response = query_engine.query(user_query)
return response, response.source_nodes
def reset_model():
"""resets the model's knowledge base"""
os.system("rm -rf Data_*")
os.system("rm -rf vector_store_data/")
os.system("rm -rf chroma_db/")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.get_response_synthesizer",
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.prompts.PromptTemplate",
"llama_index.set_global_service_context",
"llama_index.postprocessor.SimilarityPostprocessor",
"llama_index.query_engine.RetrieverQueryEngine"
] | [((1191, 1204), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1202, 1204), False, 'from dotenv import load_dotenv\n'), ((1216, 1237), 'os.getenv', 'os.getenv', (['"""HF_TOKEN"""'], {}), "('HF_TOKEN')\n", (1225, 1237), False, 'import os\n'), ((5540, 5566), 'os.system', 'os.system', (['"""rm -rf Data_*"""'], {}), "('rm -rf Data_*')\n", (5549, 5566), False, 'import os\n'), ((5571, 5609), 'os.system', 'os.system', (['"""rm -rf vector_store_data/"""'], {}), "('rm -rf vector_store_data/')\n", (5580, 5609), False, 'import os\n'), ((5614, 5644), 'os.system', 'os.system', (['"""rm -rf chroma_db/"""'], {}), "('rm -rf chroma_db/')\n", (5623, 5644), False, 'import os\n'), ((1669, 1700), 'chromadb.PersistentClient', 'PersistentClient', (['"""./chroma_db"""'], {}), "('./chroma_db')\n", (1685, 1700), False, 'from chromadb import PersistentClient\n'), ((1820, 2002), 'transformers.BitsAndBytesConfig', 'BitsAndBytesConfig', ([], {'load_in_4bit': '(True)', 'bnb_4bit_compute_dtype': 'torch.bfloat16', 'bnb_4bit_quant_type': '"""nf4"""', 'bnb_4bit_use_double_quant': '(True)', 'llm_int8_enable_fp32_cpu_offload': '(True)'}), "(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16,\n bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True,\n llm_int8_enable_fp32_cpu_offload=True)\n", (1838, 2002), False, 'from transformers import BitsAndBytesConfig\n'), ((2094, 2128), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['LLM'], {}), '(LLM)\n', (2123, 2128), False, 'from transformers import AutoTokenizer\n'), ((2919, 3010), 'langchain.embeddings.huggingface.HuggingFaceInstructEmbeddings', 'HuggingFaceInstructEmbeddings', ([], {'model_name': 'EMBED_MODEL', 'model_kwargs': "{'device': DEVICE}"}), "(model_name=EMBED_MODEL, model_kwargs={\n 'device': DEVICE})\n", (2948, 3010), False, 'from langchain.embeddings.huggingface import HuggingFaceInstructEmbeddings\n'), ((3159, 3235), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'self.__llm', 'embed_model': 'self.__embed_model'}), '(llm=self.__llm, embed_model=self.__embed_model)\n', (3187, 3235), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n'), ((3258, 3308), 'llama_index.set_global_service_context', 'set_global_service_context', (['self.__service_context'], {}), '(self.__service_context)\n', (3284, 3308), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n'), ((3558, 3652), 'chromadb.utils.embedding_functions.HuggingFaceEmbeddingFunction', 'embedding_functions.HuggingFaceEmbeddingFunction', ([], {'api_key': 'HF_TOKEN', 'model_name': 'EMBED_MODEL'}), '(api_key=HF_TOKEN,\n model_name=EMBED_MODEL)\n', (3606, 3652), False, 'from chromadb.utils import embedding_functions\n'), ((4101, 4155), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (4118, 4155), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((4242, 4297), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4270, 4297), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((4335, 4405), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context'}), '(docs, storage_context=storage_context)\n', (4366, 4405), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n'), ((4605, 4665), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'self.__index', 'similarity_top_k': '(6)'}), '(index=self.__index, similarity_top_k=6)\n', (4625, 4665), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((4763, 4810), 'llama_index.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.65)'}), '(similarity_cutoff=0.65)\n', (4786, 4810), False, 'from llama_index.postprocessor import SimilarityPostprocessor\n'), ((4884, 4910), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (4908, 4910), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n'), ((4935, 5058), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'node_postprocessors': '[s_processor]', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, node_postprocessors=[s_processor],\n response_synthesizer=response_synthesizer)\n', (4955, 5058), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((2422, 2470), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['"""<s> [INST] {query_str} [/INST]"""'], {}), "('<s> [INST] {query_str} [/INST]')\n", (2436, 2470), False, 'from llama_index.prompts import PromptTemplate\n'), ((3742, 3773), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['data_dir'], {}), '(data_dir)\n', (3763, 3773), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n')] |
import os
import re
from llama_index import ListIndex
from llama_index import ServiceContext
from llama_index.llms import OpenAI
from llama_index.llms.palm import PaLM
from llama_index.response_synthesizers import get_response_synthesizer
from llama_index.schema import NodeRelationship
from llama_index.schema import RelatedNodeInfo
from llama_index.schema import TextNode
import openai
class ServiceConfiguration:
def __init__(self, api_key, model_name):
if model_name == "PaLM":
self.llm = PaLM(api_key=api_key)
else:
openai.api_key = api_key
self.llm = OpenAI(model=model_name, temperature=0, max_tokens=512)
def get_service_context(self):
return ServiceContext.from_defaults(llm=self.llm)
class TextNodeManager:
@staticmethod
def get_nodes(texts):
nodes = [TextNode(text=text, id_=str(idx)) for idx, text in enumerate(texts, start=1)]
TextNodeManager._set_relationships(nodes)
return nodes
@staticmethod
def _set_relationships(nodes):
for idx, node in enumerate(nodes):
if idx > 0:
node.relationships[NodeRelationship.PREVIOUS] = RelatedNodeInfo(
node_id=nodes[idx - 1].node_id
)
if idx < len(nodes) - 1:
node.relationships[NodeRelationship.NEXT] = RelatedNodeInfo(
node_id=nodes[idx + 1].node_id
)
return nodes
class ResponseParser:
PATTERN = r"Response \d+: \n(.*?)(?:\n---------------------|$)"
@staticmethod
def parse(response):
return [resp.strip() for resp in re.findall(ResponseParser.PATTERN, response, re.DOTALL)]
class PromptManager:
def __init__(self):
self.short_line_description_prompt = (
"Give a simple one-line description of what the code does?"
)
self.explanation_prompt = (
"Give an explanation in 40 words maximum for the given code base. "
"Don't include any code in your explanation. If the code is about import statements, "
"give an overall explanation for import statements."
)
self.summary_prompt = "Give short summary for given codebase."
def get_short_summaries_prompt(self):
return self.short_line_description_prompt
def get_explanation_prompt(self):
return self.explanation_prompt
def get_summary_prompt(self):
return self.summary_prompt
class QueryHandler:
def __init__(self, nodes, service_context):
self.index = ListIndex(nodes, service_context=service_context)
self.prompt_manager = PromptManager()
def get_response(self, prompt="short_summaries"):
query = ""
response_mode = ""
if prompt == "short_summaries":
query = self.prompt_manager.get_short_summaries_prompt()
response_mode = "accumulate"
elif prompt == "explaination":
query = self.prompt_manager.get_explanation_prompt()
response_mode = "accumulate"
elif prompt == "summary":
query = self.prompt_manager.get_summary_prompt()
response_mode = "tree_summarize"
response_synthesizer = get_response_synthesizer(response_mode=response_mode)
query_engine = self.index.as_query_engine(response_synthesizer=response_synthesizer)
return query_engine.query(query)
@staticmethod
def modify_texts(original_texts, short_summaries):
new_texts = [original_texts[0]]
for i in range(1, len(original_texts)):
new_text = f"The previous code has the following explanation: \n {short_summaries[i-1]}. \n Use this explanation only if required to explain the following code. \n {original_texts[i]}"
new_texts.append(new_text)
return new_texts
| [
"llama_index.ListIndex",
"llama_index.response_synthesizers.get_response_synthesizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.schema.RelatedNodeInfo",
"llama_index.llms.palm.PaLM"
] | [((722, 764), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'self.llm'}), '(llm=self.llm)\n', (750, 764), False, 'from llama_index import ServiceContext\n'), ((2577, 2626), 'llama_index.ListIndex', 'ListIndex', (['nodes'], {'service_context': 'service_context'}), '(nodes, service_context=service_context)\n', (2586, 2626), False, 'from llama_index import ListIndex\n'), ((3242, 3295), 'llama_index.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'response_mode'}), '(response_mode=response_mode)\n', (3266, 3295), False, 'from llama_index.response_synthesizers import get_response_synthesizer\n'), ((519, 540), 'llama_index.llms.palm.PaLM', 'PaLM', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (523, 540), False, 'from llama_index.llms.palm import PaLM\n'), ((615, 670), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'model_name', 'temperature': '(0)', 'max_tokens': '(512)'}), '(model=model_name, temperature=0, max_tokens=512)\n', (621, 670), False, 'from llama_index.llms import OpenAI\n'), ((1183, 1230), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'nodes[idx - 1].node_id'}), '(node_id=nodes[idx - 1].node_id)\n', (1198, 1230), False, 'from llama_index.schema import RelatedNodeInfo\n'), ((1366, 1413), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'nodes[idx + 1].node_id'}), '(node_id=nodes[idx + 1].node_id)\n', (1381, 1413), False, 'from llama_index.schema import RelatedNodeInfo\n'), ((1651, 1706), 're.findall', 're.findall', (['ResponseParser.PATTERN', 'response', 're.DOTALL'], {}), '(ResponseParser.PATTERN, response, re.DOTALL)\n', (1661, 1706), False, 'import re\n')] |
import sys
sys.stdout.reconfigure(encoding="utf-8")
sys.stdin.reconfigure(encoding="utf-8")
import streamlit as st
import streamlit.components.v1 as components
import re
import random
CODE_BUILD_KG = """
# Prepare for GraphStore
os.environ['NEBULA_USER'] = "root"
os.environ['NEBULA_PASSWORD'] = "nebula" # default password
os.environ['NEBULA_ADDRESS'] = "127.0.0.1:9669" # assumed we have NebulaGraph installed locally
space_name = "guardians"
edge_types, rel_prop_names = ["relationship"], ["relationship"] # default, could be omit if create from an empty kg
tags = ["entity"] # default, could be omit if create from an empty kg
graph_store = NebulaGraphStore(space_name=space_name, edge_types=edge_types, rel_prop_names=rel_prop_names, tags=tags)
storage_context = StorageContext.from_defaults(graph_store=graph_store)
# Download and Preprocess Data
from llama_index import download_loader
WikipediaReader = download_loader("WikipediaReader")
loader = WikipediaReader()
documents = loader.load_data(pages=['Guardians of the Galaxy Vol. 3'], auto_suggest=False)
# Build Knowledge Graph
kg_index = KnowledgeGraphIndex.from_documents(
documents,
storage_context=storage_context,
max_triplets_per_chunk=10,
service_context=service_context,
space_name=space_name,
edge_types=edge_types,
rel_prop_names=rel_prop_names,
tags=tags,
include_embeddings=True,
)
"""
CODE_NL2CYPHER_LANGCHAIN = """
## Langchain
# Doc: https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa
from langchain.chat_models import ChatOpenAI
from langchain.chains import NebulaGraphQAChain
from langchain.graphs import NebulaGraph
graph = NebulaGraph(
space=space_name,
username="root",
password="nebula",
address="127.0.0.1",
port=9669,
session_pool_size=30,
)
chain = NebulaGraphQAChain.from_llm(
llm, graph=graph, verbose=True
)
chain.run(
"Tell me about Peter Quill?",
)
"""
CODE_NL2CYPHER_LLAMAINDEX = """
## Llama Index
# Doc: https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html
from llama_index.query_engine import KnowledgeGraphQueryEngine
from llama_index.storage.storage_context import StorageContext
from llama_index.graph_stores import NebulaGraphStore
nl2kg_query_engine = KnowledgeGraphQueryEngine(
storage_context=storage_context,
service_context=service_context,
llm=llm,
verbose=True,
)
response = nl2kg_query_engine.query(
"Tell me about Peter Quill?",
)
"""
import os
import json
import openai
from llama_index.llms import AzureOpenAI
from langchain.embeddings import OpenAIEmbeddings
from llama_index import LangchainEmbedding
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
KnowledgeGraphIndex,
LLMPredictor,
ServiceContext,
)
from llama_index.storage.storage_context import StorageContext
from llama_index.graph_stores import NebulaGraphStore
import logging
import sys
logging.basicConfig(
stream=sys.stdout, level=logging.INFO
) # logging.DEBUG for more verbose output
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
openai.api_type = "azure"
openai.api_base = st.secrets["OPENAI_API_BASE"]
# openai.api_version = "2022-12-01" azure gpt-3
openai.api_version = "2023-05-15" # azure gpt-3.5 turbo
openai.api_key = st.secrets["OPENAI_API_KEY"]
llm = AzureOpenAI(
engine=st.secrets["DEPLOYMENT_NAME"],
temperature=0,
model="gpt-35-turbo",
)
llm_predictor = LLMPredictor(llm=llm)
# You need to deploy your own embedding model as well as your own chat completion model
embedding_llm = LangchainEmbedding(
OpenAIEmbeddings(
model="text-embedding-ada-002",
deployment=st.secrets["EMBEDDING_DEPLOYMENT_NAME"],
openai_api_key=openai.api_key,
openai_api_base=openai.api_base,
openai_api_type=openai.api_type,
openai_api_version=openai.api_version,
),
embed_batch_size=1,
)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embedding_llm,
)
os.environ["NEBULA_USER"] = st.secrets["graphd_user"]
os.environ["NEBULA_PASSWORD"] = st.secrets["graphd_password"]
os.environ[
"NEBULA_ADDRESS"
] = f"{st.secrets['graphd_host']}:{st.secrets['graphd_port']}"
space_name = "guardians"
edge_types, rel_prop_names = ["relationship"], [
"relationship"
] # default, could be omit if create from an empty kg
tags = ["entity"] # default, could be omit if create from an empty kg
graph_store = NebulaGraphStore(
space_name=space_name,
edge_types=edge_types,
rel_prop_names=rel_prop_names,
tags=tags,
)
storage_context = StorageContext.from_defaults(graph_store=graph_store)
from llama_index.query_engine import KnowledgeGraphQueryEngine
from llama_index.storage.storage_context import StorageContext
from llama_index.graph_stores import NebulaGraphStore
nl2kg_query_engine = KnowledgeGraphQueryEngine(
storage_context=storage_context,
service_context=service_context,
llm=llm,
verbose=True,
)
def cypher_to_all_paths(query):
# Find the MATCH and RETURN parts
match_parts = re.findall(r"(MATCH .+?(?=MATCH|$))", query, re.I | re.S)
return_part = re.search(r"RETURN .+", query).group()
modified_matches = []
path_ids = []
# Go through each MATCH part
for i, part in enumerate(match_parts):
path_id = f"path_{i}"
path_ids.append(path_id)
# Replace the MATCH keyword with "MATCH path_i = "
modified_part = part.replace("MATCH ", f"MATCH {path_id} = ")
modified_matches.append(modified_part)
# Join the modified MATCH parts
matches_string = " ".join(modified_matches)
# Construct the new RETURN part
return_string = f"RETURN {', '.join(path_ids)};"
# Remove the old RETURN part from matches_string
matches_string = matches_string.replace(return_part, "")
# Combine everything
modified_query = f"{matches_string}\n{return_string}"
return modified_query
# write string to file
def result_to_df(result):
from typing import Dict
import pandas as pd
columns = result.keys()
d: Dict[str, list] = {}
for col_num in range(result.col_size()):
col_name = columns[col_num]
col_list = result.column_values(col_name)
d[col_name] = [x.cast() for x in col_list]
return pd.DataFrame(d)
def render_pd_item(g, item):
from nebula3.data.DataObject import Node, PathWrapper, Relationship
if isinstance(item, Node):
node_id = item.get_id().cast()
tags = item.tags() # list of strings
props = dict()
for tag in tags:
props.update(item.properties(tag))
g.add_node(node_id, label=node_id, title=str(props))
elif isinstance(item, Relationship):
src_id = item.start_vertex_id().cast()
dst_id = item.end_vertex_id().cast()
edge_name = item.edge_name()
props = item.properties()
# ensure start and end vertex exist in graph
if not src_id in g.node_ids:
g.add_node(src_id)
if not dst_id in g.node_ids:
g.add_node(dst_id)
g.add_edge(src_id, dst_id, label=edge_name, title=str(props))
elif isinstance(item, PathWrapper):
for node in item.nodes():
render_pd_item(g, node)
for edge in item.relationships():
render_pd_item(g, edge)
elif isinstance(item, list):
for it in item:
render_pd_item(g, it)
def create_pyvis_graph(result_df):
from pyvis.network import Network
g = Network(
notebook=True,
directed=True,
cdn_resources="in_line",
height="500px",
width="100%",
)
for _, row in result_df.iterrows():
for item in row:
render_pd_item(g, item)
g.repulsion(
node_distance=100,
central_gravity=0.2,
spring_length=200,
spring_strength=0.05,
damping=0.09,
)
return g
def query_nebulagraph(
query,
space_name=space_name,
address=st.secrets["graphd_host"],
port=9669,
user=st.secrets["graphd_user"],
password=st.secrets["graphd_password"],
):
from nebula3.Config import SessionPoolConfig
from nebula3.gclient.net.SessionPool import SessionPool
config = SessionPoolConfig()
session_pool = SessionPool(user, password, space_name, [(address, port)])
session_pool.init(config)
return session_pool.execute(query)
st.title("Demo: Knowledge Graph Build and Query with LLM")
(
tab_code_kg,
tab_notebook,
tab_graph_view,
tab_cypher,
tab_nl2cypher,
tab_code_nl2cypher,
) = st.tabs(
[
"Code: Build KG",
"Full Notebook",
"Graph View",
"Query",
"Natural Language to Cypher",
"Code: NL2Cypher",
]
)
with tab_code_kg:
st.write(
"With a few lines of code, we can build a knowledge graph with LLM, LlamaIndex and NebulaGraph."
)
st.write(
"See full notebook for more details and try Graph Visualizations, Query, and Natural Language to Cypher by clicking on the tabs on the right."
)
st.code(body=CODE_BUILD_KG, language="python")
with tab_notebook:
st.write("> Full Notebook")
st.markdown(
"""
This is the full notebook to demonstrate how to:
- Extract from data sources and build a knowledge graph with LLM and Llama Index, NebulaGraph in 3 lines of code
- Query the Knowledge Graph with nGQL and visualize the graph
- Query the knowledge graph with natural language in 1 line of code(both Langchain and Llama Index)
"""
)
# link to download notebook
st.markdown(
"""
[Download](https://www.siwei.io/demo-dumps/kg-llm/KG_Building.ipynb) the notebook.
"""
)
components.iframe(
src="https://www.siwei.io/demo-dumps/kg-llm/KG_Building.html",
height=2000,
width=800,
scrolling=True,
)
with tab_graph_view:
st.write(
"> Sub-Graph View of the Knowledge Graph about [Guardians of the Galaxy Vol. 3](https://en.wikipedia.org/wiki/Guardians_of_the_Galaxy_Vol._3)"
)
components.iframe(
src="https://www.siwei.io/demo-dumps/kg-llm/nebulagraph_draw_sample.html",
height=500,
scrolling=True,
)
with tab_cypher:
st.write("> Query Knowledge Graph in nGQL")
query_string = st.text_input(
label="Enter nGQL query string", value="MATCH ()-[e]->() RETURN e LIMIT 25"
)
if st.button("> execute"):
# run query
result = query_nebulagraph(query_string)
# convert to pandas dataframe
result_df = result_to_df(result)
# display pd dataframe
st.dataframe(result_df)
# create pyvis graph
g = create_pyvis_graph(result_df)
# render with random file name
import random
graph_html = g.generate_html(f"graph_{random.randint(0, 1000)}.html")
components.html(graph_html, height=500, scrolling=True)
with tab_nl2cypher:
st.write("> Natural Language to Cypher")
nl_query_string = st.text_input(
label="Enter natural language query string", value="Tell me about Peter Quill?"
)
if st.button("Ask KG"):
response = nl2kg_query_engine.query(nl_query_string)
graph_query = list(response.metadata.values())[0]["graph_store_query"]
graph_query = graph_query.replace("WHERE", "\n WHERE").replace(
"RETURN", "\nRETURN"
)
answer = str(response)
st.write(f"*Answer*: {answer}")
st.markdown(
f"""
## Generated NebulaGraph Cypher Query
```cypher
{graph_query}
```
"""
)
st.write("## Rendered Graph")
render_query = cypher_to_all_paths(graph_query)
result = query_nebulagraph(render_query)
result_df = result_to_df(result)
# create pyvis graph
g = create_pyvis_graph(result_df)
# render with random file name
graph_html = g.generate_html(f"graph_{random.randint(0, 1000)}.html")
components.html(graph_html, height=500, scrolling=True)
with tab_code_nl2cypher:
st.write(
"> Natural Language to NebulaGraph Cypher Code with Langchain and Llama Index"
)
tab_langchain, tab_llamaindex = st.tabs(["Langchain", "Llama Index"])
with tab_langchain:
st.code(body=CODE_NL2CYPHER_LANGCHAIN, language="python")
with tab_llamaindex:
st.code(body=CODE_NL2CYPHER_LLAMAINDEX, language="python")
st.markdown(
"""
## References
- [Langchain: NebulaGraphQAChain](https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa)
- [Llama Index: KnowledgeGraphQueryEngine](https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html)
"""
)
| [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.LLMPredictor",
"llama_index.graph_stores.NebulaGraphStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.query_engine.KnowledgeGraphQueryEngine",
"llama_index.llms.AzureOpenAI"
] | [((12, 52), 'sys.stdout.reconfigure', 'sys.stdout.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (34, 52), False, 'import sys\n'), ((53, 92), 'sys.stdin.reconfigure', 'sys.stdin.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (74, 92), False, 'import sys\n'), ((2986, 3044), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (3005, 3044), False, 'import logging\n'), ((3400, 3491), 'llama_index.llms.AzureOpenAI', 'AzureOpenAI', ([], {'engine': "st.secrets['DEPLOYMENT_NAME']", 'temperature': '(0)', 'model': '"""gpt-35-turbo"""'}), "(engine=st.secrets['DEPLOYMENT_NAME'], temperature=0, model=\n 'gpt-35-turbo')\n", (3411, 3491), False, 'from llama_index.llms import AzureOpenAI\n'), ((3518, 3539), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3530, 3539), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, KnowledgeGraphIndex, LLMPredictor, ServiceContext\n'), ((4007, 4096), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embedding_llm'}), '(llm_predictor=llm_predictor, embed_model=\n embedding_llm)\n', (4035, 4096), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, KnowledgeGraphIndex, LLMPredictor, ServiceContext\n'), ((4550, 4658), 'llama_index.graph_stores.NebulaGraphStore', 'NebulaGraphStore', ([], {'space_name': 'space_name', 'edge_types': 'edge_types', 'rel_prop_names': 'rel_prop_names', 'tags': 'tags'}), '(space_name=space_name, edge_types=edge_types,\n rel_prop_names=rel_prop_names, tags=tags)\n', (4566, 4658), False, 'from llama_index.graph_stores import NebulaGraphStore\n'), ((4692, 4745), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'graph_store': 'graph_store'}), '(graph_store=graph_store)\n', (4720, 4745), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((4950, 5069), 'llama_index.query_engine.KnowledgeGraphQueryEngine', 'KnowledgeGraphQueryEngine', ([], {'storage_context': 'storage_context', 'service_context': 'service_context', 'llm': 'llm', 'verbose': '(True)'}), '(storage_context=storage_context, service_context=\n service_context, llm=llm, verbose=True)\n', (4975, 5069), False, 'from llama_index.query_engine import KnowledgeGraphQueryEngine\n'), ((8528, 8586), 'streamlit.title', 'st.title', (['"""Demo: Knowledge Graph Build and Query with LLM"""'], {}), "('Demo: Knowledge Graph Build and Query with LLM')\n", (8536, 8586), True, 'import streamlit as st\n'), ((8708, 8828), 'streamlit.tabs', 'st.tabs', (["['Code: Build KG', 'Full Notebook', 'Graph View', 'Query',\n 'Natural Language to Cypher', 'Code: NL2Cypher']"], {}), "(['Code: Build KG', 'Full Notebook', 'Graph View', 'Query',\n 'Natural Language to Cypher', 'Code: NL2Cypher'])\n", (8715, 8828), True, 'import streamlit as st\n'), ((3669, 3918), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""', 'deployment': "st.secrets['EMBEDDING_DEPLOYMENT_NAME']", 'openai_api_key': 'openai.api_key', 'openai_api_base': 'openai.api_base', 'openai_api_type': 'openai.api_type', 'openai_api_version': 'openai.api_version'}), "(model='text-embedding-ada-002', deployment=st.secrets[\n 'EMBEDDING_DEPLOYMENT_NAME'], openai_api_key=openai.api_key,\n openai_api_base=openai.api_base, openai_api_type=openai.api_type,\n openai_api_version=openai.api_version)\n", (3685, 3918), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((5174, 5230), 're.findall', 're.findall', (['"""(MATCH .+?(?=MATCH|$))"""', 'query', '(re.I | re.S)'], {}), "('(MATCH .+?(?=MATCH|$))', query, re.I | re.S)\n", (5184, 5230), False, 'import re\n'), ((6406, 6421), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (6418, 6421), True, 'import pandas as pd\n'), ((7624, 7721), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'directed': '(True)', 'cdn_resources': '"""in_line"""', 'height': '"""500px"""', 'width': '"""100%"""'}), "(notebook=True, directed=True, cdn_resources='in_line', height=\n '500px', width='100%')\n", (7631, 7721), False, 'from pyvis.network import Network\n'), ((8359, 8378), 'nebula3.Config.SessionPoolConfig', 'SessionPoolConfig', ([], {}), '()\n', (8376, 8378), False, 'from nebula3.Config import SessionPoolConfig\n'), ((8398, 8456), 'nebula3.gclient.net.SessionPool.SessionPool', 'SessionPool', (['user', 'password', 'space_name', '[(address, port)]'], {}), '(user, password, space_name, [(address, port)])\n', (8409, 8456), False, 'from nebula3.gclient.net.SessionPool import SessionPool\n'), ((8909, 9025), 'streamlit.write', 'st.write', (['"""With a few lines of code, we can build a knowledge graph with LLM, LlamaIndex and NebulaGraph."""'], {}), "(\n 'With a few lines of code, we can build a knowledge graph with LLM, LlamaIndex and NebulaGraph.'\n )\n", (8917, 9025), True, 'import streamlit as st\n'), ((9034, 9196), 'streamlit.write', 'st.write', (['"""See full notebook for more details and try Graph Visualizations, Query, and Natural Language to Cypher by clicking on the tabs on the right."""'], {}), "(\n 'See full notebook for more details and try Graph Visualizations, Query, and Natural Language to Cypher by clicking on the tabs on the right.'\n )\n", (9042, 9196), True, 'import streamlit as st\n'), ((9205, 9251), 'streamlit.code', 'st.code', ([], {'body': 'CODE_BUILD_KG', 'language': '"""python"""'}), "(body=CODE_BUILD_KG, language='python')\n", (9212, 9251), True, 'import streamlit as st\n'), ((9276, 9303), 'streamlit.write', 'st.write', (['"""> Full Notebook"""'], {}), "('> Full Notebook')\n", (9284, 9303), True, 'import streamlit as st\n'), ((9308, 9680), 'streamlit.markdown', 'st.markdown', (['"""\n\nThis is the full notebook to demonstrate how to:\n\n- Extract from data sources and build a knowledge graph with LLM and Llama Index, NebulaGraph in 3 lines of code\n- Query the Knowledge Graph with nGQL and visualize the graph\n- Query the knowledge graph with natural language in 1 line of code(both Langchain and Llama Index)\n """'], {}), '(\n """\n\nThis is the full notebook to demonstrate how to:\n\n- Extract from data sources and build a knowledge graph with LLM and Llama Index, NebulaGraph in 3 lines of code\n- Query the Knowledge Graph with nGQL and visualize the graph\n- Query the knowledge graph with natural language in 1 line of code(both Langchain and Llama Index)\n """\n )\n', (9319, 9680), True, 'import streamlit as st\n'), ((9721, 9834), 'streamlit.markdown', 'st.markdown', (['"""\n[Download](https://www.siwei.io/demo-dumps/kg-llm/KG_Building.ipynb) the notebook.\n"""'], {}), '(\n """\n[Download](https://www.siwei.io/demo-dumps/kg-llm/KG_Building.ipynb) the notebook.\n"""\n )\n', (9732, 9834), True, 'import streamlit as st\n'), ((9844, 9973), 'streamlit.components.v1.iframe', 'components.iframe', ([], {'src': '"""https://www.siwei.io/demo-dumps/kg-llm/KG_Building.html"""', 'height': '(2000)', 'width': '(800)', 'scrolling': '(True)'}), "(src=\n 'https://www.siwei.io/demo-dumps/kg-llm/KG_Building.html', height=2000,\n width=800, scrolling=True)\n", (9861, 9973), True, 'import streamlit.components.v1 as components\n'), ((10030, 10192), 'streamlit.write', 'st.write', (['"""> Sub-Graph View of the Knowledge Graph about [Guardians of the Galaxy Vol. 3](https://en.wikipedia.org/wiki/Guardians_of_the_Galaxy_Vol._3)"""'], {}), "(\n '> Sub-Graph View of the Knowledge Graph about [Guardians of the Galaxy Vol. 3](https://en.wikipedia.org/wiki/Guardians_of_the_Galaxy_Vol._3)'\n )\n", (10038, 10192), True, 'import streamlit as st\n'), ((10201, 10330), 'streamlit.components.v1.iframe', 'components.iframe', ([], {'src': '"""https://www.siwei.io/demo-dumps/kg-llm/nebulagraph_draw_sample.html"""', 'height': '(500)', 'scrolling': '(True)'}), "(src=\n 'https://www.siwei.io/demo-dumps/kg-llm/nebulagraph_draw_sample.html',\n height=500, scrolling=True)\n", (10218, 10330), True, 'import streamlit.components.v1 as components\n'), ((10375, 10418), 'streamlit.write', 'st.write', (['"""> Query Knowledge Graph in nGQL"""'], {}), "('> Query Knowledge Graph in nGQL')\n", (10383, 10418), True, 'import streamlit as st\n'), ((10438, 10533), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Enter nGQL query string"""', 'value': '"""MATCH ()-[e]->() RETURN e LIMIT 25"""'}), "(label='Enter nGQL query string', value=\n 'MATCH ()-[e]->() RETURN e LIMIT 25')\n", (10451, 10533), True, 'import streamlit as st\n'), ((10550, 10572), 'streamlit.button', 'st.button', (['"""> execute"""'], {}), "('> execute')\n", (10559, 10572), True, 'import streamlit as st\n'), ((11090, 11130), 'streamlit.write', 'st.write', (['"""> Natural Language to Cypher"""'], {}), "('> Natural Language to Cypher')\n", (11098, 11130), True, 'import streamlit as st\n'), ((11153, 11252), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Enter natural language query string"""', 'value': '"""Tell me about Peter Quill?"""'}), "(label='Enter natural language query string', value=\n 'Tell me about Peter Quill?')\n", (11166, 11252), True, 'import streamlit as st\n'), ((11269, 11288), 'streamlit.button', 'st.button', (['"""Ask KG"""'], {}), "('Ask KG')\n", (11278, 11288), True, 'import streamlit as st\n'), ((12205, 12303), 'streamlit.write', 'st.write', (['"""> Natural Language to NebulaGraph Cypher Code with Langchain and Llama Index"""'], {}), "(\n '> Natural Language to NebulaGraph Cypher Code with Langchain and Llama Index'\n )\n", (12213, 12303), True, 'import streamlit as st\n'), ((12344, 12381), 'streamlit.tabs', 'st.tabs', (["['Langchain', 'Llama Index']"], {}), "(['Langchain', 'Llama Index'])\n", (12351, 12381), True, 'import streamlit as st\n'), ((12569, 12885), 'streamlit.markdown', 'st.markdown', (['"""\n\n## References\n \n- [Langchain: NebulaGraphQAChain](https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa)\n- [Llama Index: KnowledgeGraphQueryEngine](https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html)\n"""'], {}), '(\n """\n\n## References\n \n- [Langchain: NebulaGraphQAChain](https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa)\n- [Llama Index: KnowledgeGraphQueryEngine](https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html)\n"""\n )\n', (12580, 12885), True, 'import streamlit as st\n'), ((10763, 10786), 'streamlit.dataframe', 'st.dataframe', (['result_df'], {}), '(result_df)\n', (10775, 10786), True, 'import streamlit as st\n'), ((11009, 11064), 'streamlit.components.v1.html', 'components.html', (['graph_html'], {'height': '(500)', 'scrolling': '(True)'}), '(graph_html, height=500, scrolling=True)\n', (11024, 11064), True, 'import streamlit.components.v1 as components\n'), ((11585, 11616), 'streamlit.write', 'st.write', (['f"""*Answer*: {answer}"""'], {}), "(f'*Answer*: {answer}')\n", (11593, 11616), True, 'import streamlit as st\n'), ((11625, 11717), 'streamlit.markdown', 'st.markdown', (['f"""\n## Generated NebulaGraph Cypher Query\n```cypher\n{graph_query}\n```\n"""'], {}), '(\n f"""\n## Generated NebulaGraph Cypher Query\n```cypher\n{graph_query}\n```\n""")\n', (11636, 11717), True, 'import streamlit as st\n'), ((11743, 11772), 'streamlit.write', 'st.write', (['"""## Rendered Graph"""'], {}), "('## Rendered Graph')\n", (11751, 11772), True, 'import streamlit as st\n'), ((12118, 12173), 'streamlit.components.v1.html', 'components.html', (['graph_html'], {'height': '(500)', 'scrolling': '(True)'}), '(graph_html, height=500, scrolling=True)\n', (12133, 12173), True, 'import streamlit.components.v1 as components\n'), ((12414, 12471), 'streamlit.code', 'st.code', ([], {'body': 'CODE_NL2CYPHER_LANGCHAIN', 'language': '"""python"""'}), "(body=CODE_NL2CYPHER_LANGCHAIN, language='python')\n", (12421, 12471), True, 'import streamlit as st\n'), ((12505, 12563), 'streamlit.code', 'st.code', ([], {'body': 'CODE_NL2CYPHER_LLAMAINDEX', 'language': '"""python"""'}), "(body=CODE_NL2CYPHER_LLAMAINDEX, language='python')\n", (12512, 12563), True, 'import streamlit as st\n'), ((5250, 5279), 're.search', 're.search', (['"""RETURN .+"""', 'query'], {}), "('RETURN .+', query)\n", (5259, 5279), False, 'import re\n'), ((10968, 10991), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (10982, 10991), False, 'import random\n'), ((12077, 12100), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (12091, 12100), False, 'import random\n')] |
import datetime
import uuid
from llama_index.core.memory import ChatMemoryBuffer
class Chat:
def __init__(self, model):
self.model = model
if model.id is None:
self.id = str(uuid.uuid4())
else:
self.id = model.id
self.history = ChatMemoryBuffer.from_defaults(token_limit=3900)
self.created = datetime.datetime.now()
def clearHistory(self):
self.history.reset()
def __eq__(self, other):
return self.id == other.id
| [
"llama_index.core.memory.ChatMemoryBuffer.from_defaults"
] | [((293, 341), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(3900)'}), '(token_limit=3900)\n', (323, 341), False, 'from llama_index.core.memory import ChatMemoryBuffer\n'), ((366, 389), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (387, 389), False, 'import datetime\n'), ((210, 222), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (220, 222), False, 'import uuid\n')] |
from pathlib import Path
from llama_index import Document, SimpleDirectoryReader, download_loader
from llama_index.query_engine import RetrieverQueryEngine
from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores import PineconeVectorStore
import pinecone
import os
from llama_index.node_parser import SimpleNodeParser
import openai
from dotenv import load_dotenv
from os import getenv
def query_research(message):
load_dotenv()
#openai.api_key_path = getenv('OPENAI_API_KEY')
#constructor
# def __init__(
# self,
# api_key,
# api_username,
# openai_api_key=None,
# base_url='https://forum.subspace.network',
# verbose=True,
# ):
#load PDF
# PDFReader = download_loader("PDFReader")
# loader = PDFReader()
# docs = loader.load_data(file=Path('../../data/whitepaper.pdf'))
docs = SimpleDirectoryReader('/Users/ryanyeung/Code/Crypto/SupportGPT/supportgpt/sources/data').load_data()
#parse PDF
parser = SimpleNodeParser()
nodes = parser.get_nodes_from_documents(docs)
# initialize connection to pinecone
# pinecone.init(
# getenv('PINECONE_API_KEY'),
# getenv('PINECONE_ENVIRONMENT'),
# )
pinecone.init(
api_key=os.environ['PINECONE_API_KEY'],
environment=os.environ['PINECONE_ENVIRONMENT']
)
# create the index if it does not exist already
index_name = 'research-test'
if index_name not in pinecone.list_indexes():
pinecone.create_index(
index_name,
dimension=1536,
metric='cosine'
)
# connect to the index
pinecone_index = pinecone.Index(index_name)
# we can select a namespace (acts as a partition in an index)
namespace = '' # default namespace
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
# setup our storage (vector db)
storage_context = StorageContext.from_defaults(
vector_store=vector_store
)
# setup the index/query process, ie the embedding model (and completion if used)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
index = GPTVectorStoreIndex.from_documents(
docs, storage_context=storage_context,
service_context=service_context
)
# retriever = index.as_retriever(retriever_mode='default')
# query_engine = RetrieverQueryEngine(retriever)
# #query_engine = RetrieverQueryEngine.from_args(retriever, response_mode='default')
query_engine = index.as_query_engine()
res = query_engine.query(message)
return str(res)
# print(str(res))
# print(res.get_formatted_sources())
#pinecone.delete_index(index_name) | [
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((531, 544), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (542, 544), False, 'from dotenv import load_dotenv\n'), ((1142, 1160), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (1158, 1160), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1366, 1472), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINECONE_API_KEY']", 'environment': "os.environ['PINECONE_ENVIRONMENT']"}), "(api_key=os.environ['PINECONE_API_KEY'], environment=os.\n environ['PINECONE_ENVIRONMENT'])\n", (1379, 1472), False, 'import pinecone\n'), ((1804, 1830), 'pinecone.Index', 'pinecone.Index', (['index_name'], {}), '(index_name)\n', (1818, 1830), False, 'import pinecone\n'), ((1965, 2015), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pinecone_index'}), '(pinecone_index=pinecone_index)\n', (1984, 2015), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((2076, 2131), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2104, 2131), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((2249, 2318), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-ada-002', embed_batch_size=100)\n", (2264, 2318), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((2341, 2394), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (2369, 2394), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((2408, 2518), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(docs, storage_context=storage_context,\n service_context=service_context)\n', (2442, 2518), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((1609, 1632), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (1630, 1632), False, 'import pinecone\n'), ((1642, 1708), 'pinecone.create_index', 'pinecone.create_index', (['index_name'], {'dimension': '(1536)', 'metric': '"""cosine"""'}), "(index_name, dimension=1536, metric='cosine')\n", (1663, 1708), False, 'import pinecone\n'), ((1013, 1106), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""/Users/ryanyeung/Code/Crypto/SupportGPT/supportgpt/sources/data"""'], {}), "(\n '/Users/ryanyeung/Code/Crypto/SupportGPT/supportgpt/sources/data')\n", (1034, 1106), False, 'from llama_index import Document, SimpleDirectoryReader, download_loader\n')] |
from llama_index.node_parser import SimpleNodeParser
from typing import *
from llama_index.data_structs import Node
import requests
from collections import defaultdict
from llama_index import Document
from config import config
def load_and_parse(all_docs):
documents = []
for file_row in all_docs:
url = file_row["url"]
content = file_row["text"]
images = file_row['images']
metadata = defaultdict()
metadata['URL'] = url
metadata['images'] = images
body_text = content
documents.append(Document(text=body_text, metadata=dict(metadata)))
return documents
def reader(urls, imgs_links):
all_pages = []
for url in urls:
try:
res = requests.get(url, timeout=10)
except:
continue
if res.status_code == 200:
all_pages.append((url, res.text, imgs_links))
return all_pages
def convert_documents_into_nodes(documents):
all_nodes = []
for document in documents:
parser = SimpleNodeParser.from_defaults(
chunk_size=config.node_chunk_size, chunk_overlap=50)
nodes = parser.get_nodes_from_documents([document])
all_nodes.extend(nodes)
return all_nodes
| [
"llama_index.node_parser.SimpleNodeParser.from_defaults"
] | [((430, 443), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (441, 443), False, 'from collections import defaultdict\n'), ((1033, 1120), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'config.node_chunk_size', 'chunk_overlap': '(50)'}), '(chunk_size=config.node_chunk_size,\n chunk_overlap=50)\n', (1063, 1120), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((738, 767), 'requests.get', 'requests.get', (['url'], {'timeout': '(10)'}), '(url, timeout=10)\n', (750, 767), False, 'import requests\n')] |
# bring in our LLAMA_CLOUD_API_KEY
import os
from dotenv import load_dotenv
load_dotenv()
import nest_asyncio # noqa: E402
nest_asyncio.apply()
# bring in deps
from llama_parse import LlamaParse # noqa: E402
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader # noqa: E402
# set up parser
llamaparse_api_key = os.getenv("LLAMA_CLOUD_API_KEY")
parser = LlamaParse(
api_key=llamaparse_api_key,
result_type="markdown" # "markdown" and "text" are available
)
# use SimpleDirectoryReader to parse our file
file_extractor = {".pdf": parser}
documents = SimpleDirectoryReader(input_files=['data/gpt4all.pdf'], file_extractor=file_extractor).load_data()
documents
#len(documents)
#documents[0].text
documents[0].text[:200]
########### Ollama Models ###############
# by default llamaindex uses OpenAI models
from llama_index.embeddings.ollama import OllamaEmbedding # noqa: E402
embed_model = OllamaEmbedding(
#model_name="nomic-embed-text",
model_name="llama2",
base_url="http://localhost:11434",
ollama_additional_kwargs={"mirostat": 0},
)
from llama_index.llms.ollama import Ollama # noqa: E402
llm = Ollama(model="llama2", request_timeout=30.0)
from llama_index.core import Settings # noqa: E402
Settings.llm = llm
Settings.embed_model = embed_model
# get the answer out of it
# create an index from the parsed markdown
index = VectorStoreIndex.from_documents(documents)
# create a query engine for the index
query_engine = index.as_query_engine()
# query the engine
from IPython.display import Markdown, display # noqa: E402
# query the engine
query = "what is the BoolQ value of GPT4All-J 6B v1.0* model ?"
response = query_engine.query(query)
display(Markdown(f"<b>{response}</b>"))
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.ollama.Ollama",
"llama_index.embeddings.ollama.OllamaEmbedding"
] | [((76, 89), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (87, 89), False, 'from dotenv import load_dotenv\n'), ((125, 145), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (143, 145), False, 'import nest_asyncio\n'), ((333, 365), 'os.getenv', 'os.getenv', (['"""LLAMA_CLOUD_API_KEY"""'], {}), "('LLAMA_CLOUD_API_KEY')\n", (342, 365), False, 'import os\n'), ((375, 437), 'llama_parse.LlamaParse', 'LlamaParse', ([], {'api_key': 'llamaparse_api_key', 'result_type': '"""markdown"""'}), "(api_key=llamaparse_api_key, result_type='markdown')\n", (385, 437), False, 'from llama_parse import LlamaParse\n'), ((925, 1042), 'llama_index.embeddings.ollama.OllamaEmbedding', 'OllamaEmbedding', ([], {'model_name': '"""llama2"""', 'base_url': '"""http://localhost:11434"""', 'ollama_additional_kwargs': "{'mirostat': 0}"}), "(model_name='llama2', base_url='http://localhost:11434',\n ollama_additional_kwargs={'mirostat': 0})\n", (940, 1042), False, 'from llama_index.embeddings.ollama import OllamaEmbedding\n'), ((1154, 1198), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': '"""llama2"""', 'request_timeout': '(30.0)'}), "(model='llama2', request_timeout=30.0)\n", (1160, 1198), False, 'from llama_index.llms.ollama import Ollama\n'), ((1386, 1428), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1417, 1428), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n'), ((1716, 1746), 'IPython.display.Markdown', 'Markdown', (['f"""<b>{response}</b>"""'], {}), "(f'<b>{response}</b>')\n", (1724, 1746), False, 'from IPython.display import Markdown, display\n'), ((580, 671), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['data/gpt4all.pdf']", 'file_extractor': 'file_extractor'}), "(input_files=['data/gpt4all.pdf'], file_extractor=\n file_extractor)\n", (601, 671), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n')] |
from setup import documents, eval_questions, tru
from utils import get_prebuilt_trulens_recorder, build_automerging_index
#Auto-Merging Retrieval
from llama_index.llms import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
automerging_index = build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index"
)
from utils import get_automerging_query_engine
automerging_query_engine = get_automerging_query_engine(
automerging_index,
)
auto_merging_response = automerging_query_engine.query(
"How does news and market sentiment affect stock prices?"
)
test_question = "How does news and market sentiment affect stock prices?"
print("\n" + test_question+"\n" + "\n" +str(auto_merging_response) + "\n")
tru.reset_database()
tru_recorder_automerging = get_prebuilt_trulens_recorder(automerging_query_engine,
app_id="Automerging Query Engine")
test_question = "What is technical analysis and fundamental analysis?"
eval_questions.append(test_question)
for question in eval_questions:
with tru_recorder_automerging as recording:
response = automerging_query_engine.query(question)
print(question)
print(response)
tru.get_leaderboard(app_ids=[])
tru.run_dashboard() | [
"llama_index.llms.OpenAI"
] | [((190, 236), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo', temperature=0.1)\n", (196, 236), False, 'from llama_index.llms import OpenAI\n'), ((258, 372), 'utils.build_automerging_index', 'build_automerging_index', (['documents', 'llm'], {'embed_model': '"""local:BAAI/bge-small-en-v1.5"""', 'save_dir': '"""merging_index"""'}), "(documents, llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5', save_dir='merging_index')\n", (281, 372), False, 'from utils import get_prebuilt_trulens_recorder, build_automerging_index\n'), ((461, 508), 'utils.get_automerging_query_engine', 'get_automerging_query_engine', (['automerging_index'], {}), '(automerging_index)\n', (489, 508), False, 'from utils import get_automerging_query_engine\n'), ((785, 805), 'setup.tru.reset_database', 'tru.reset_database', ([], {}), '()\n', (803, 805), False, 'from setup import documents, eval_questions, tru\n'), ((833, 928), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['automerging_query_engine'], {'app_id': '"""Automerging Query Engine"""'}), "(automerging_query_engine, app_id=\n 'Automerging Query Engine')\n", (862, 928), False, 'from utils import get_prebuilt_trulens_recorder, build_automerging_index\n'), ((1051, 1087), 'setup.eval_questions.append', 'eval_questions.append', (['test_question'], {}), '(test_question)\n', (1072, 1087), False, 'from setup import documents, eval_questions, tru\n'), ((1277, 1308), 'setup.tru.get_leaderboard', 'tru.get_leaderboard', ([], {'app_ids': '[]'}), '(app_ids=[])\n', (1296, 1308), False, 'from setup import documents, eval_questions, tru\n'), ((1309, 1328), 'setup.tru.run_dashboard', 'tru.run_dashboard', ([], {}), '()\n', (1326, 1328), False, 'from setup import documents, eval_questions, tru\n')] |
from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage
from langchain.chat_models import ChatOpenAI
import gradio as gr
import sys
import os
import openai
openai.api_base = "https://api.app4gpt.com/v1"
os.environ["OPENAI_API_KEY"] = 'you-API-KEY'
def create_service_context():
#constraint parameters
max_input_size = 4096
num_outputs = 3072
max_chunk_overlap = 20
chunk_size_limit = 600
#allows the user to explicitly set certain constraint parameters
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
#LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
#constructs service_context
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
return service_context
def data_ingestion_indexing(directory_path):
#loads data from the specified directory path
documents = SimpleDirectoryReader(directory_path).load_data()
#when first building the index
index = GPTVectorStoreIndex.from_documents(
documents, service_context=create_service_context()
)
#persist index to disk, default "storage" folder
index.storage_context.persist()
return index
def data_querying(input_text):
#rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="./storage")
#loads index from storage
index = load_index_from_storage(storage_context, service_context=create_service_context())
#queries the index with the input text
response = index.as_query_engine().query(input_text)
return response.response
iface = gr.Interface(fn=data_querying,
inputs=gr.components.Textbox(lines=7, label="Enter your question"),
outputs="text",
title="Custom-Pdf Demo by Gpt4")
#passes in data directory
index = data_ingestion_indexing("data")
iface.launch(share=False)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper"
] | [((597, 696), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (609, 696), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage\n'), ((977, 1068), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (1005, 1068), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage\n'), ((1601, 1654), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1629, 1654), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage\n'), ((1988, 2047), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(7)', 'label': '"""Enter your question"""'}), "(lines=7, label='Enter your question')\n", (2009, 2047), True, 'import gradio as gr\n'), ((841, 920), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'num_outputs'}), "(temperature=0.5, model_name='gpt-3.5-turbo', max_tokens=num_outputs)\n", (851, 920), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1205, 1242), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {}), '(directory_path)\n', (1226, 1242), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage\n')] |
import logging
import sys
# Uncomment to see debug logs
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.vector_stores import MilvusVectorStore
from IPython.display import Markdown, display
import textwrap
import os
os.environ["OPENAI_API_KEY"] = "sk-"
vector_store = MilvusVectorStore(overwrite=False)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
query_engine = index.as_query_engine()
res = query_engine.query("What is the number?")
print("Res:", res)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.vector_stores.MilvusVectorStore"
] | [((451, 485), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'overwrite': '(False)'}), '(overwrite=False)\n', (468, 485), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((568, 643), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (599, 643), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document\n')] |
from llama_index.core.llms import ChatMessage
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core.prompts import PromptTemplate
from projectgurukul.custom_models import model_utils
import logging
def get_tinyllama_llm(context_window = 2048, max_new_tokens = 256, system_prompt = ""):
def messages_to_prompt(messages: ChatMessage):
messages_dict = [
{"role": message.role.value, "content": message.content}
for message in messages
]
prompt = huggingllm._tokenizer.apply_chat_template(messages_dict, tokenize=False, add_generation_prompt=True)
logging.debug(prompt)
return prompt
device, dtype = model_utils.get_device_and_dtype()
# This will wrap the default prompts that are internal to llama-index
query_wrapper_prompt = PromptTemplate(
f"<|system|>{system_prompt}"+"<|user|>{query_str}<|assistant|>")
huggingllm = HuggingFaceLLM(
context_window=context_window,
is_chat_model=True,
model_name="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
tokenizer_name="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
max_new_tokens=max_new_tokens,
stopping_ids=[2, 50256],
generate_kwargs={'do_sample': False},
model_kwargs={"torch_dtype": dtype},
query_wrapper_prompt=query_wrapper_prompt,
device_map = device,
system_prompt=system_prompt
)
huggingllm.messages_to_prompt = messages_to_prompt
return huggingllm
def get_phi2_llm(context_window = 2048, max_new_tokens = 256, system_prompt = ""):
role_maps = {
"system" :"Instructions",
"user" :"User Instructions"
}
def messages_to_prompt(messages: ChatMessage):
prompt = ""
for message in messages:
role = message.role.value
role = role_maps[role] if role in role_maps else role
prompt += f"\n{role}:: {message.content}\n\n"
prompt += "Response ::"
logging.debug(prompt)
return prompt
device, dtype = model_utils.get_device_and_dtype()
# This will wrap the default prompts that are internal to llama-index
query_wrapper_prompt = PromptTemplate("Instruct: {query_str}\nOutput: ")
huggingllm = HuggingFaceLLM(
context_window=context_window,
is_chat_model=True,
model_name="microsoft/phi-2",
tokenizer_name="microsoft/phi-2",
max_new_tokens=max_new_tokens,
stopping_ids=[50256],
generate_kwargs={'do_sample': False},
model_kwargs={"torch_dtype": dtype, "trust_remote_code" :True},
query_wrapper_prompt=query_wrapper_prompt,
messages_to_prompt = messages_to_prompt,
device_map = device,
system_prompt=system_prompt
)
return huggingllm | [
"llama_index.core.prompts.PromptTemplate",
"llama_index.llms.huggingface.HuggingFaceLLM"
] | [((720, 754), 'projectgurukul.custom_models.model_utils.get_device_and_dtype', 'model_utils.get_device_and_dtype', ([], {}), '()\n', (752, 754), False, 'from projectgurukul.custom_models import model_utils\n'), ((857, 942), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (["(f'<|system|>{system_prompt}' + '<|user|>{query_str}<|assistant|>')"], {}), "(f'<|system|>{system_prompt}' +\n '<|user|>{query_str}<|assistant|>')\n", (871, 942), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((964, 1375), 'llama_index.llms.huggingface.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': 'context_window', 'is_chat_model': '(True)', 'model_name': '"""TinyLlama/TinyLlama-1.1B-Chat-v1.0"""', 'tokenizer_name': '"""TinyLlama/TinyLlama-1.1B-Chat-v1.0"""', 'max_new_tokens': 'max_new_tokens', 'stopping_ids': '[2, 50256]', 'generate_kwargs': "{'do_sample': False}", 'model_kwargs': "{'torch_dtype': dtype}", 'query_wrapper_prompt': 'query_wrapper_prompt', 'device_map': 'device', 'system_prompt': 'system_prompt'}), "(context_window=context_window, is_chat_model=True,\n model_name='TinyLlama/TinyLlama-1.1B-Chat-v1.0', tokenizer_name=\n 'TinyLlama/TinyLlama-1.1B-Chat-v1.0', max_new_tokens=max_new_tokens,\n stopping_ids=[2, 50256], generate_kwargs={'do_sample': False},\n model_kwargs={'torch_dtype': dtype}, query_wrapper_prompt=\n query_wrapper_prompt, device_map=device, system_prompt=system_prompt)\n", (978, 1375), False, 'from llama_index.llms.huggingface import HuggingFaceLLM\n'), ((2079, 2113), 'projectgurukul.custom_models.model_utils.get_device_and_dtype', 'model_utils.get_device_and_dtype', ([], {}), '()\n', (2111, 2113), False, 'from projectgurukul.custom_models import model_utils\n'), ((2216, 2268), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Instruct: {query_str}\nOutput: """'], {}), '("""Instruct: {query_str}\nOutput: """)\n', (2230, 2268), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((2284, 2724), 'llama_index.llms.huggingface.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': 'context_window', 'is_chat_model': '(True)', 'model_name': '"""microsoft/phi-2"""', 'tokenizer_name': '"""microsoft/phi-2"""', 'max_new_tokens': 'max_new_tokens', 'stopping_ids': '[50256]', 'generate_kwargs': "{'do_sample': False}", 'model_kwargs': "{'torch_dtype': dtype, 'trust_remote_code': True}", 'query_wrapper_prompt': 'query_wrapper_prompt', 'messages_to_prompt': 'messages_to_prompt', 'device_map': 'device', 'system_prompt': 'system_prompt'}), "(context_window=context_window, is_chat_model=True,\n model_name='microsoft/phi-2', tokenizer_name='microsoft/phi-2',\n max_new_tokens=max_new_tokens, stopping_ids=[50256], generate_kwargs={\n 'do_sample': False}, model_kwargs={'torch_dtype': dtype,\n 'trust_remote_code': True}, query_wrapper_prompt=query_wrapper_prompt,\n messages_to_prompt=messages_to_prompt, device_map=device, system_prompt\n =system_prompt)\n", (2298, 2724), False, 'from llama_index.llms.huggingface import HuggingFaceLLM\n'), ((654, 675), 'logging.debug', 'logging.debug', (['prompt'], {}), '(prompt)\n', (667, 675), False, 'import logging\n'), ((2015, 2036), 'logging.debug', 'logging.debug', (['prompt'], {}), '(prompt)\n', (2028, 2036), False, 'import logging\n')] |
from functools import reduce
from pathlib import Path
from typing import List
from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, ServiceContext, StorageContext, \
load_index_from_storage, LLMPredictor, OpenAIEmbedding, download_loader, Document
from llama_index.indices.base import IndexType
from llama_index.llms import OpenAI
import chromadb
from llama_index import VectorStoreIndex
from llama_index.vector_stores import ChromaVectorStore
import os
import logging
from llama_index.readers.file.markdown_reader import MarkdownReader
class ProposalsLoader:
@property
def cache_path(self) -> str:
return os.path.join(os.getcwd(), ".caches")
def __init__(self, directory_path: str):
self.directory_path = directory_path
self.llm = OpenAI(model='gpt-4-1106-preview')
def load(self) -> IndexType:
embed_model = OpenAIEmbedding(model="text-embedding-ada-002")
predictor = LLMPredictor(llm=self.llm)
service_context = ServiceContext.from_defaults(
embed_model=embed_model,
llm_predictor=predictor
)
# documents = SimpleDirectoryReader(self.directory_path).load_data()
markdown_reader = MarkdownReader()
proposals = [os.path.join(self.directory_path, markdown)
for markdown in os.listdir(self.directory_path) if markdown.endswith(".md")]
def extend_markdowns(list: List[Document], filepath: str) -> List[Document]:
docs = markdown_reader.load_data(file=Path(filepath))
list.extend(docs)
return list
documents: List[Document] = reduce(
extend_markdowns,
proposals,
[]
)
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("swift-evolution-gpt")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = GPTVectorStoreIndex.from_documents(documents,
service_context=service_context,
storage_context=storage_context)
return index | [
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.readers.file.markdown_reader.MarkdownReader",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((793, 827), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (799, 827), False, 'from llama_index.llms import OpenAI\n'), ((884, 931), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (899, 931), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, LLMPredictor, OpenAIEmbedding, download_loader, Document\n'), ((952, 978), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'self.llm'}), '(llm=self.llm)\n', (964, 978), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, LLMPredictor, OpenAIEmbedding, download_loader, Document\n'), ((1005, 1083), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm_predictor': 'predictor'}), '(embed_model=embed_model, llm_predictor=predictor)\n', (1033, 1083), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, LLMPredictor, OpenAIEmbedding, download_loader, Document\n'), ((1222, 1238), 'llama_index.readers.file.markdown_reader.MarkdownReader', 'MarkdownReader', ([], {}), '()\n', (1236, 1238), False, 'from llama_index.readers.file.markdown_reader import MarkdownReader\n'), ((1645, 1684), 'functools.reduce', 'reduce', (['extend_markdowns', 'proposals', '[]'], {}), '(extend_markdowns, proposals, [])\n', (1651, 1684), False, 'from functools import reduce\n'), ((1745, 1790), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (1770, 1790), False, 'import chromadb\n'), ((1895, 1949), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (1912, 1949), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((1976, 2031), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2004, 2031), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, LLMPredictor, OpenAIEmbedding, download_loader, Document\n'), ((2049, 2165), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(documents, service_context=\n service_context, storage_context=storage_context)\n', (2083, 2165), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, LLMPredictor, OpenAIEmbedding, download_loader, Document\n'), ((659, 670), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (668, 670), False, 'import os\n'), ((1260, 1303), 'os.path.join', 'os.path.join', (['self.directory_path', 'markdown'], {}), '(self.directory_path, markdown)\n', (1272, 1303), False, 'import os\n'), ((1341, 1372), 'os.listdir', 'os.listdir', (['self.directory_path'], {}), '(self.directory_path)\n', (1351, 1372), False, 'import os\n'), ((1538, 1552), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (1542, 1552), False, 'from pathlib import Path\n')] |
from llama_index.multi_modal_llms import GeminiMultiModal
from llama_index.program import MultiModalLLMCompletionProgram
from llama_index.output_parsers import PydanticOutputParser
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from pydantic import BaseModel, Field
from typing_extensions import Annotated
damages_initial_prompt_str = """
The images are of a damaged {make_name} {model_name} {year} car.
The images are taken from different angles.
Please analyze them and tell me what parts are damaged and what is the estimated cost of repair.
"""
conditions_report_initial_prompt_str = """
The images are of a damaged vehicle.
I need to fill a vehicle condition report based on the picture(s).
Please fill the following details based on the image(s):
FRONT
1. Roof
2. Windshield
3. Hood
4. Grill
5. Front bumper
6. Right mirror
7. Left mirror
8. Front right light
9. Front left light
BACK
10. Rear Window
11. Trunk/TGate
12. Trunk/Cargo area
13. Rear bumper
14. Tail lights
DRIVERS SIDE
15. Left fender
16. Left front door
17. Left rear door
18. Left rear quarter panel
PASSENGER SIDE
19. Right rear quarter
20. Right rear door
21. Right front door
22. Right fender
TIRES
T1. Front left tire
T2. Front right tire
T3. Rear left tire
T4. Rear right tire
For each of the details you must answer with a score based on this descriptions to reflect the condition:
- 0: Not visible
- 1: Seems OK (no damage)
- 2: Minor damage (scratches, dents)
- 3: Major damage (bent, broken, missing)
"""
class DamagedPart(BaseModel):
"""Data model of the damaged part"""
part_name: str = Field(..., description="Name of the damaged part")
cost: float = Field(..., description="Estimated cost of repair")
class DamagedParts(BaseModel):
"""Data model of the damaged parts"""
damaged_parts: list[DamagedPart] = Field(..., description="List of damaged parts")
summary: str = Field(..., description="Summary of the damage")
class ConditionsReport(BaseModel):
"""Data model of conditions report"""
roof: Annotated[int, Field(0, ge=0, le=3, description="Roof condition")]
windshield: Annotated[int, Field(0, ge=0, le=3, description="Windshield condition")]
hood: Annotated[int, Field(0, ge=0, le=3, description="Hood condition")]
grill: Annotated[int, Field(0, ge=0, le=3, description="Grill condition")]
front_bumper: Annotated[
int, Field(0, ge=0, le=3, description="Front bumper condition")
]
right_mirror: Annotated[
int, Field(0, ge=0, le=3, description="Right mirror condition")
]
left_mirror: Annotated[
int, Field(0, ge=0, le=3, description="Left mirror condition")
]
front_right_light: Annotated[
int, Field(0, ge=0, le=3, description="Front right light condition")
]
front_left_light: Annotated[
int, Field(0, ge=0, le=3, description="Front left light condition")
]
# back
rear_window: Annotated[
int, Field(0, ge=0, le=3, description="Rear window condition")
]
trunk_tgate: Annotated[
int, Field(0, ge=0, le=3, description="Trunk/TGate condition")
]
trunk_cargo_area: Annotated[
int, Field(0, ge=0, le=3, description="Trunk/Cargo area condition")
]
rear_bumper: Annotated[
int, Field(0, ge=0, le=3, description="Rear bumper condition")
]
right_tail_light: Annotated[
int, Field(0, ge=0, le=3, description="Right tail light condition")
]
left_tail_light: Annotated[
int, Field(0, ge=0, le=3, description="Left tail light condition")
]
# left
left_rear_quarter: Annotated[
int, Field(0, ge=0, le=3, description="Left rear quarter condition")
]
left_rear_door: Annotated[
int, Field(0, ge=0, le=3, description="Left rear door condition")
]
left_front_door: Annotated[
int, Field(0, ge=0, le=3, description="Left front door condition")
]
left_fender: Annotated[
int, Field(0, ge=0, le=3, description="Left fender condition")
]
left_front_tire: Annotated[
int, Field(0, ge=0, le=3, description="Left front tire condition")
]
left_rear_tire: Annotated[
int, Field(0, ge=0, le=3, description="Left rear tire condition")
]
# right
right_rear_quarter: Annotated[
int, Field(0, ge=0, le=3, description="Right rear quarter condition")
]
right_rear_door: Annotated[
int, Field(0, ge=0, le=3, description="Right rear door condition")
]
right_front_door: Annotated[
int, Field(0, ge=0, le=3, description="Right front door condition")
]
right_fender: Annotated[
int, Field(0, ge=0, le=3, description="Right fender condition")
]
right_front_tire: Annotated[
int, Field(0, ge=0, le=3, description="Right front tire condition")
]
right_rear_tire: Annotated[
int, Field(0, ge=0, le=3, description="Right rear tire condition")
]
def pydantic_llm(
output_class, image_documents, prompt_template_str, selected_llm_model
):
openai_mm_llm = OpenAIMultiModal(model="gpt-4-vision-preview")
gemini_llm = GeminiMultiModal(model_name="models/gemini-pro-vision")
multi_modal_llm = gemini_llm
if selected_llm_model == "OpenAI":
multi_modal_llm = openai_mm_llm
llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(output_class),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=multi_modal_llm,
verbose=True,
)
response = llm_program()
return response
| [
"llama_index.multi_modal_llms.GeminiMultiModal",
"llama_index.output_parsers.PydanticOutputParser",
"llama_index.multi_modal_llms.openai.OpenAIMultiModal"
] | [((1607, 1657), 'pydantic.Field', 'Field', (['...'], {'description': '"""Name of the damaged part"""'}), "(..., description='Name of the damaged part')\n", (1612, 1657), False, 'from pydantic import BaseModel, Field\n'), ((1676, 1726), 'pydantic.Field', 'Field', (['...'], {'description': '"""Estimated cost of repair"""'}), "(..., description='Estimated cost of repair')\n", (1681, 1726), False, 'from pydantic import BaseModel, Field\n'), ((1842, 1889), 'pydantic.Field', 'Field', (['...'], {'description': '"""List of damaged parts"""'}), "(..., description='List of damaged parts')\n", (1847, 1889), False, 'from pydantic import BaseModel, Field\n'), ((1909, 1956), 'pydantic.Field', 'Field', (['...'], {'description': '"""Summary of the damage"""'}), "(..., description='Summary of the damage')\n", (1914, 1956), False, 'from pydantic import BaseModel, Field\n'), ((5072, 5118), 'llama_index.multi_modal_llms.openai.OpenAIMultiModal', 'OpenAIMultiModal', ([], {'model': '"""gpt-4-vision-preview"""'}), "(model='gpt-4-vision-preview')\n", (5088, 5118), False, 'from llama_index.multi_modal_llms.openai import OpenAIMultiModal\n'), ((5136, 5191), 'llama_index.multi_modal_llms.GeminiMultiModal', 'GeminiMultiModal', ([], {'model_name': '"""models/gemini-pro-vision"""'}), "(model_name='models/gemini-pro-vision')\n", (5152, 5191), False, 'from llama_index.multi_modal_llms import GeminiMultiModal\n'), ((2062, 2112), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Roof condition"""'}), "(0, ge=0, le=3, description='Roof condition')\n", (2067, 2112), False, 'from pydantic import BaseModel, Field\n'), ((2145, 2201), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Windshield condition"""'}), "(0, ge=0, le=3, description='Windshield condition')\n", (2150, 2201), False, 'from pydantic import BaseModel, Field\n'), ((2228, 2278), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Hood condition"""'}), "(0, ge=0, le=3, description='Hood condition')\n", (2233, 2278), False, 'from pydantic import BaseModel, Field\n'), ((2306, 2357), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Grill condition"""'}), "(0, ge=0, le=3, description='Grill condition')\n", (2311, 2357), False, 'from pydantic import BaseModel, Field\n'), ((2401, 2459), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Front bumper condition"""'}), "(0, ge=0, le=3, description='Front bumper condition')\n", (2406, 2459), False, 'from pydantic import BaseModel, Field\n'), ((2508, 2566), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right mirror condition"""'}), "(0, ge=0, le=3, description='Right mirror condition')\n", (2513, 2566), False, 'from pydantic import BaseModel, Field\n'), ((2614, 2671), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left mirror condition"""'}), "(0, ge=0, le=3, description='Left mirror condition')\n", (2619, 2671), False, 'from pydantic import BaseModel, Field\n'), ((2725, 2788), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Front right light condition"""'}), "(0, ge=0, le=3, description='Front right light condition')\n", (2730, 2788), False, 'from pydantic import BaseModel, Field\n'), ((2841, 2903), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Front left light condition"""'}), "(0, ge=0, le=3, description='Front left light condition')\n", (2846, 2903), False, 'from pydantic import BaseModel, Field\n'), ((2962, 3019), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Rear window condition"""'}), "(0, ge=0, le=3, description='Rear window condition')\n", (2967, 3019), False, 'from pydantic import BaseModel, Field\n'), ((3067, 3124), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Trunk/TGate condition"""'}), "(0, ge=0, le=3, description='Trunk/TGate condition')\n", (3072, 3124), False, 'from pydantic import BaseModel, Field\n'), ((3177, 3239), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Trunk/Cargo area condition"""'}), "(0, ge=0, le=3, description='Trunk/Cargo area condition')\n", (3182, 3239), False, 'from pydantic import BaseModel, Field\n'), ((3287, 3344), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Rear bumper condition"""'}), "(0, ge=0, le=3, description='Rear bumper condition')\n", (3292, 3344), False, 'from pydantic import BaseModel, Field\n'), ((3397, 3459), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right tail light condition"""'}), "(0, ge=0, le=3, description='Right tail light condition')\n", (3402, 3459), False, 'from pydantic import BaseModel, Field\n'), ((3511, 3572), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left tail light condition"""'}), "(0, ge=0, le=3, description='Left tail light condition')\n", (3516, 3572), False, 'from pydantic import BaseModel, Field\n'), ((3637, 3700), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left rear quarter condition"""'}), "(0, ge=0, le=3, description='Left rear quarter condition')\n", (3642, 3700), False, 'from pydantic import BaseModel, Field\n'), ((3751, 3811), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left rear door condition"""'}), "(0, ge=0, le=3, description='Left rear door condition')\n", (3756, 3811), False, 'from pydantic import BaseModel, Field\n'), ((3863, 3924), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left front door condition"""'}), "(0, ge=0, le=3, description='Left front door condition')\n", (3868, 3924), False, 'from pydantic import BaseModel, Field\n'), ((3972, 4029), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left fender condition"""'}), "(0, ge=0, le=3, description='Left fender condition')\n", (3977, 4029), False, 'from pydantic import BaseModel, Field\n'), ((4081, 4142), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left front tire condition"""'}), "(0, ge=0, le=3, description='Left front tire condition')\n", (4086, 4142), False, 'from pydantic import BaseModel, Field\n'), ((4193, 4253), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left rear tire condition"""'}), "(0, ge=0, le=3, description='Left rear tire condition')\n", (4198, 4253), False, 'from pydantic import BaseModel, Field\n'), ((4320, 4384), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right rear quarter condition"""'}), "(0, ge=0, le=3, description='Right rear quarter condition')\n", (4325, 4384), False, 'from pydantic import BaseModel, Field\n'), ((4436, 4497), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right rear door condition"""'}), "(0, ge=0, le=3, description='Right rear door condition')\n", (4441, 4497), False, 'from pydantic import BaseModel, Field\n'), ((4550, 4612), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right front door condition"""'}), "(0, ge=0, le=3, description='Right front door condition')\n", (4555, 4612), False, 'from pydantic import BaseModel, Field\n'), ((4661, 4719), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right fender condition"""'}), "(0, ge=0, le=3, description='Right fender condition')\n", (4666, 4719), False, 'from pydantic import BaseModel, Field\n'), ((4772, 4834), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right front tire condition"""'}), "(0, ge=0, le=3, description='Right front tire condition')\n", (4777, 4834), False, 'from pydantic import BaseModel, Field\n'), ((4886, 4947), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right rear tire condition"""'}), "(0, ge=0, le=3, description='Right rear tire condition')\n", (4891, 4947), False, 'from pydantic import BaseModel, Field\n'), ((5393, 5427), 'llama_index.output_parsers.PydanticOutputParser', 'PydanticOutputParser', (['output_class'], {}), '(output_class)\n', (5413, 5427), False, 'from llama_index.output_parsers import PydanticOutputParser\n')] |
from llama_index.core.storage.chat_store import SimpleChatStore
from llama_index.core.chat_engine import SimpleChatEngine
from llama_index.core.memory import ChatMemoryBuffer
try:
chat_store = SimpleChatStore.from_persist_path(
persist_path="chat_memory.json"
)
except FileNotFoundError:
chat_store = SimpleChatStore()
memory = ChatMemoryBuffer.from_defaults(
token_limit=2000,
chat_store=chat_store,
chat_store_key="user_X"
)
chat_engine = SimpleChatEngine.from_defaults(memory=memory)
while True:
user_message = input("You: ")
if user_message.lower() == 'exit':
print("Exiting chat...")
break
response = chat_engine.chat(user_message)
print(f"Chatbot: {response}")
chat_store.persist(persist_path="chat_memory.json")
| [
"llama_index.core.chat_engine.SimpleChatEngine.from_defaults",
"llama_index.core.storage.chat_store.SimpleChatStore",
"llama_index.core.storage.chat_store.SimpleChatStore.from_persist_path",
"llama_index.core.memory.ChatMemoryBuffer.from_defaults"
] | [((351, 451), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(2000)', 'chat_store': 'chat_store', 'chat_store_key': '"""user_X"""'}), "(token_limit=2000, chat_store=chat_store,\n chat_store_key='user_X')\n", (381, 451), False, 'from llama_index.core.memory import ChatMemoryBuffer\n'), ((483, 528), 'llama_index.core.chat_engine.SimpleChatEngine.from_defaults', 'SimpleChatEngine.from_defaults', ([], {'memory': 'memory'}), '(memory=memory)\n', (513, 528), False, 'from llama_index.core.chat_engine import SimpleChatEngine\n'), ((198, 264), 'llama_index.core.storage.chat_store.SimpleChatStore.from_persist_path', 'SimpleChatStore.from_persist_path', ([], {'persist_path': '"""chat_memory.json"""'}), "(persist_path='chat_memory.json')\n", (231, 264), False, 'from llama_index.core.storage.chat_store import SimpleChatStore\n'), ((322, 339), 'llama_index.core.storage.chat_store.SimpleChatStore', 'SimpleChatStore', ([], {}), '()\n', (337, 339), False, 'from llama_index.core.storage.chat_store import SimpleChatStore\n')] |
import streamlit as st
import pandas as pd
import os
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import (
messages_to_prompt,
completion_to_prompt,
)
import subprocess
import time
# set version
# st.session_state.demo_lite = False
# initialize model
# llm = "tbd"
print("BP 4 ")
# initialize model- get 11m depending on st.session_state.demo_lite, and model
def init_llm(model, demo_lite):
# st.write("BP 4.1: model: ", model)
if demo_lite == False:
print("BP 5 : running full demo")
if model == "Llama2-7b_CPP":
model_path = "/Users/dheym/Library/CloudStorage/OneDrive-Personal/Documents/side_projects/GRDN/src/models/llama-2-7b-chat.Q4_K_M.gguf"
print("model path: ", model_path)
llm = LlamaCPP(
# You can pass in the URL to a GGML model to download it automatically
# model_url=model_url,
# optionally, you can set the path to a pre-downloaded model instead of model_url
model_path=model_path,
temperature=0.1,
max_new_tokens=1000,
# llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
context_window=3000,
# kwargs to pass to __call__()
generate_kwargs={},
# kwargs to pass to __init__()
# set to at least 1 to use GPU
model_kwargs={"n_gpu_layers": 10},
# transform inputs into Llama2 format
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=True,
)
elif model == "deci-7b_CPP":
model_path = "/Users/dheym/Library/CloudStorage/OneDrive-Personal/Documents/side_projects/GRDN/src/models/decilm-7b-uniform-gqa-q8_0.gguf"
print("model path: ", model_path)
llm = LlamaCPP(
# You can pass in the URL to a GGML model to download it automatically
# model_url=model_url,
# optionally, you can set the path to a pre-downloaded model instead of model_url
model_path=model_path,
# model_url = "https://huggingface.co/Deci/DeciLM-7B-instruct-GGUF/resolve/main/decilm-7b-uniform-gqa-q8_0.gguf",
temperature=0.1,
max_new_tokens=1000,
# llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
context_window=3000,
# kwargs to pass to __call__()
generate_kwargs={},
# kwargs to pass to __init__()
# set to at least 1 to use GPU
model_kwargs={"n_gpu_layers": 1},
# transform inputs into Llama2 format
# messages_to_prompt=messages_to_prompt,
# completion_to_prompt=completion_to_prompt,
verbose=True,
)
else:
print("Error with chatbot model")
return None
return llm
def parse_and_evaluate_text(text):
# Find the indices of the opening and closing brackets
opening_bracket_index = text.find("[")
closing_bracket_index = text.find("]")
if opening_bracket_index != -1 and closing_bracket_index != -1:
# Extract the text within the brackets
extracted_list = (
"[" + text[opening_bracket_index + 1 : closing_bracket_index] + "]"
)
# Return the evaluated text list
return eval(extracted_list)
else:
print("Error with parsing plant list")
return None
def chat_response(template, prompt_text, model, demo_lite):
if model == "openai-gpt35turbo":
chat = ChatOpenAI(temperature=0.1)
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
response = chat(chat_prompt.format_prompt(text=prompt_text).to_messages())
return response
# return response.content
elif model == "Llama2-7b_CPP" or model == "deci-7b_CPP":
print("BP 5.1: running full demo, model: ", model)
if "llm" not in st.session_state:
st.session_state.llm = init_llm(model, demo_lite)
response = st.session_state.llm.complete(template + prompt_text)
return response.text
else:
print("Error with chatbot model")
return None
# # get the plant list from user input
# def get_plant_list(input_plant_text, model):
# template="You are a helpful assistant that knows all about gardening and plants and python data structures."
# text = 'which of the elements of this list can be grown in a garden, [' + input_plant_text + ']? Return JUST a python list object containing the elements that can be grown in a garden. Do not include any other text or explanation.'
# plant_list_text = chat_response(template, text, model)
# plant_list = parse_and_evaluate_text(plant_list_text.content)
# print(plant_list)
# return plant_list
# get plant care tips based on plant list
def get_plant_care_tips(plant_list, model, demo_lite):
plant_care_tips = ""
template = "You are a helpful assistant that knows all about gardening, plants, and companion planting."
text = (
"from this list of plants, ["
+ str(st.session_state.input_plants_raw)
+ "], generate 1-2 plant care tips for each plant based on what you know. Return just the plant care tips in HTML markdown format. Make sure to use ### for headers. Do not include any other text or explanation before or after the markdown. It must be in HTML markdown format."
)
if model == "deci-7b_CPP":
template = (
"### System: \n\n You are a helpful assistant that knows all about gardening, plants, and companion planting."
+ "\n\n ### User: Generate gardening tips. Return just the plant care tips in HTML markdown format. Make sure to use ### for headers. Do not include any other text or explanation before or after the markdown. It must be in HTML markdown format. \n\n"
)
text = "### Assistant: \n\n"
print("deci-7b_CPP")
plant_care_tips = chat_response(template, text, model, demo_lite)
# check to see if response contains ### or < for headers
print("BP6", plant_care_tips)
# st.write(plant_care_tips)
if (
"###" not in plant_care_tips
and "<" not in plant_care_tips
and model != "deci-7b_CPP"
): # deci-7b_CPP has more general plant care tips
st.write(plant_care_tips)
print("Error with parsing plant care tips")
# try again up to 5 times
for i in range(5):
print(
"Error with parsing plant care tips. Trying for attempt #" + str(i + 1)
)
plant_care_tips = chat_response(template, text, model, demo_lite)
# check to see if response contains ### for headers
if "###" not in plant_care_tips and "<" not in plant_care_tips:
continue
else:
break
# remove any text before the first ### or < in the response
print(plant_care_tips)
# look for either # or < for headers
if "###" in plant_care_tips:
plant_care_tips = "\n\n" + plant_care_tips[plant_care_tips.find("###") :]
elif "<" in plant_care_tips:
plant_care_tips = "\n\n" + plant_care_tips[plant_care_tips.find("<") :]
else:
print("funky formatting")
plant_care_tips = plant_care_tips
print(plant_care_tips)
return plant_care_tips
# get compatability matrix for companion planting
def get_compatibility_matrix(plant_list, model, demo_lite):
# Convert the compatibility matrix to a string
with open("data/compatibilities_text.txt", "r") as file:
# Read the contents of the file
compatibility_text = file.read()
plant_comp_context = compatibility_text
template = "You are a helpful assistant that knows all about gardening, companion planting, and python data structures- specifically compatibility matrices."
text = (
"from this list of plants, ["
+ str(plant_list)
+ "], Return JUST a python array (with values separated by commas like this: [[0,1],[1,0]]\n\n ) for companion plant compatibility. Each row and column should represent plants, and the element of the array will contain a -1, 0, or 1 depending on if the relationship between plants is antagonists, neutral, or companions, respectively. You must refer to this knowledge base of information on plant compatibility: \n\n, "
+ plant_comp_context
+ "\n\n A plant's compatibility with itself is always 0. Do not include any other text or explanation."
)
compatibility_mat = chat_response(template, text, model, demo_lite)
# Find the indices of the opening and closing brackets
opening_bracket_index = compatibility_mat.content.find("[[")
closing_bracket_index = compatibility_mat.content.find("]]")
if opening_bracket_index != -1 and closing_bracket_index != -1:
# Extract the text within the brackets
extracted_mat = (
"["
+ compatibility_mat.content[
opening_bracket_index + 1 : closing_bracket_index
]
+ "]]"
)
# Return the evaluated mat
# check to see if compatiblity matrix only contains values of -1, 0, or 1
if eval(extracted_mat).count("0") + eval(extracted_mat).count("1") == len(
eval(extracted_mat)
):
# continue
pass
else:
# try again up to 5 times
for i in range(5):
print(
"Error with parsing plant compatibility matrix. Trying for attempt #"
+ str(i + 1)
)
print(extracted_mat)
extracted_mat = chat_response(
template
+ "remember, it MUST ONLY CONTAIN -1s, 0s, and 1s, like this structure: [[0,1],[1,0]]",
text,
model,
demo_lite,
)
# Extract the text within the brackets
extracted_mat = (
"["
+ compatibility_mat.content[
opening_bracket_index + 1 : closing_bracket_index
]
+ "]]"
)
print(extracted_mat)
total_count = 0
count_0 = extracted_mat.count("0")
count_1 = extracted_mat.count("1")
total_count = count_0 + count_1
print("matrix count of -1, 0, 1: ", total_count)
# if count euals the number of plants squared, then we have a valid matrix
print("plant_list_len: ", len(plant_list) ** 2)
if total_count == (len(plant_list)) ** 2:
# if count == eval(extracted_mat):
print("success")
return eval(extracted_mat)
break
else:
print("Error with parsing plant compatibility matrix")
# try again up to 5 times
for i in range(5):
print(
"Error with parsing plant compatibility matrix. Trying for attempt #"
+ str(i + 1)
)
extracted_mat = chat_response(
template
+ "remember, it MUST ONLY CONTAIN -1s, 0s, and 1s, like this structure: [[0,1],[1,0]]",
text,
model,
demo_lite,
)
# Extract the text within the brackets
extracted_mat = (
"["
+ compatibility_mat.content[
opening_bracket_index + 1 : closing_bracket_index
]
+ "]]"
)
print(extracted_mat)
total_count = 0
count_0 = extracted_mat.count("0")
count_1 = extracted_mat.count("1")
total_count = count_0 + count_1
print("matrix count of -1, 0, 1: ", total_count)
# if count euals the number of plants squared, then we have a valid matrix
print("plant_list_len: ", len(plant_list) ** 2)
if total_count == (len(plant_list)) ** 2:
# if count == eval(extracted_mat):
print("success")
return eval(extracted_mat)
break
return None
# get compatability matrix for companion planting via subsetting a hardcoded matrix
# make plant_compatibility.csv into a matrix. it currently has indexes as rows and columns for plant names and then compatibility values as the values
plant_compatibility = pd.read_csv("src/data/plant_compatibility.csv", index_col=0)
def get_compatibility_matrix_2(plant_list):
# Subset the matrix to only include the plants in the user's list
plant_compatibility = st.session_state.raw_plant_compatibility.loc[
plant_list, plant_list
]
# full matrix
full_mat = st.session_state.raw_plant_compatibility.to_numpy()
# Convert the DataFrame to a NumPy array
plant_compatibility_matrix = plant_compatibility.to_numpy()
# Get the list of original indices (from the DataFrame)
original_indices = plant_compatibility.index.tolist()
# Create a dictionary to map plant names to their original indices
plant_index_mapping = {plant: index for index, plant in enumerate(original_indices)}
# Return the matrix and the plant-index mapping
return plant_compatibility_matrix, full_mat, plant_index_mapping
# get plant groupings from LLM
def get_seed_groupings_from_LLM(model, demo_lite):
plant_groupings_evaluated = "no response yet"
if demo_lite:
# just return "no response yet" for now
return plant_groupings_evaluated
template = "You are a helpful assistant that only outputs python lists of lists of lists of plants."
# make sure output is strictly and only a list of lists for one grouping
text = (
"""I am working on a gardening project and need to optimally group a set of plants based on their compatibility. Below is the compatibility matrix for the plants, where each value represents how well two plants grow together (positive values indicate good compatibility, negative values indicate poor compatibility). I also have specific constraints for planting: there are a certain number of plant beds (n_plant_beds), each bed can have a minimum of min_species species and a maximum of max_species species. Given these constraints, please suggest several groupings of these plants into n_plant_beds beds, optimizing for overall compatibility.
Number of Plant Beds: """
+ str(st.session_state.n_plant_beds)
+ """
Minimum Species per Bed: """
+ str(st.session_state.min_species)
+ """
Maximum Species per Bed: """
+ str(st.session_state.max_species)
+ """
Plants and Compatibility Matrix:"""
+ str(
st.session_state.raw_plant_compatibility.loc[
st.session_state.input_plants_raw, st.session_state.input_plants_raw
]
)
+ """
Please provide a grouping that maximize positive interactions within each bed and minimize negative interactions, adhering to the specified bed constraints. Return a list of lists where each list represents an iteration of plant groupings. Each list within the list represents a bed, and each list within the bed represents the plants in that bed.
sample output: [['plant1', 'plant2'] #bed1, ['plant3', 'plant4'] #bed2, ['plant1', 'plant3'] #bed3]
another sample output: [['plant1', 'plant2', 'plant3'] #bed1, ['plant4', 'plant5', 'plant6'] #bed2, ['plant7', 'plant8', 'plant9'] #bed3]
Note: the number of beds, the number of plants per bed, and the number of plants in the list may vary.
Note: only output ONE python list of lists of plants. Do not include any other text or explanation.
"""
)
plant_groupings = chat_response(template, text, model, demo_lite)
# check to see if we've cut off the response due to time limit. if so, return "no response yet" for now
if plant_groupings == None:
return "no response yet"
print("response about LLMs choice on groupings", plant_groupings)
# try to eval the string to a list of lists
try:
plant_groupings_evaluated = eval(plant_groupings)
# check type of output
print(type(plant_groupings_evaluated))
# we expect a list of lists
except:
print("Error with parsing plant groupings")
# try again up to 5 times
for i in range(5):
print(
"Error with parsing plant groupings. Trying for attempt #" + str(i + 1)
)
plant_groupings = chat_response(template, text, model, demo_lite)
print(plant_groupings)
# try to eval the string to a list of lists
try:
# make sure plant1 is not in the output
if "plant1" in plant_groupings.lower():
print("plant1 is in the output")
continue
else:
plant_groupings_evaluated = eval(plant_groupings)
print("successful eval; output: ", plant_groupings_evaluated)
break
except:
# try to find the list of lists within the string
opening_bracket_index = plant_groupings.find("[[")
closing_bracket_index = plant_groupings.find("]]")
if opening_bracket_index != -1 and closing_bracket_index != -1:
# Extract the text within the brackets
extracted_list = (
"["
+ plant_groupings[
opening_bracket_index + 1 : closing_bracket_index
]
+ "]]"
)
# Return the evaluated text list
if "plant1" in extracted_list.lower():
print("plant1 is in the output")
continue
else:
plant_groupings_evaluated = eval(extracted_list)
print("successful eval; output: ", plant_groupings_evaluated)
break
else:
print("Error with parsing plant groupings")
continue
return plant_groupings_evaluated
| [
"llama_index.llms.LlamaCPP"
] | [((13454, 13514), 'pandas.read_csv', 'pd.read_csv', (['"""src/data/plant_compatibility.csv"""'], {'index_col': '(0)'}), "('src/data/plant_compatibility.csv', index_col=0)\n", (13465, 13514), True, 'import pandas as pd\n'), ((13774, 13825), 'streamlit.session_state.raw_plant_compatibility.to_numpy', 'st.session_state.raw_plant_compatibility.to_numpy', ([], {}), '()\n', (13823, 13825), True, 'import streamlit as st\n'), ((4124, 4151), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)'}), '(temperature=0.1)\n', (4134, 4151), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4184, 4235), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (4225, 4235), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((4301, 4357), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (4341, 4357), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((4380, 4459), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (4412, 4459), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((7159, 7184), 'streamlit.write', 'st.write', (['plant_care_tips'], {}), '(plant_care_tips)\n', (7167, 7184), True, 'import streamlit as st\n'), ((1088, 1343), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'model_path', 'temperature': '(0.1)', 'max_new_tokens': '(1000)', 'context_window': '(3000)', 'generate_kwargs': '{}', 'model_kwargs': "{'n_gpu_layers': 10}", 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt', 'verbose': '(True)'}), "(model_path=model_path, temperature=0.1, max_new_tokens=1000,\n context_window=3000, generate_kwargs={}, model_kwargs={'n_gpu_layers': \n 10}, messages_to_prompt=messages_to_prompt, completion_to_prompt=\n completion_to_prompt, verbose=True)\n", (1096, 1343), False, 'from llama_index.llms import LlamaCPP\n'), ((4867, 4920), 'streamlit.session_state.llm.complete', 'st.session_state.llm.complete', (['(template + prompt_text)'], {}), '(template + prompt_text)\n', (4896, 4920), True, 'import streamlit as st\n'), ((2272, 2439), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'model_path', 'temperature': '(0.1)', 'max_new_tokens': '(1000)', 'context_window': '(3000)', 'generate_kwargs': '{}', 'model_kwargs': "{'n_gpu_layers': 1}", 'verbose': '(True)'}), "(model_path=model_path, temperature=0.1, max_new_tokens=1000,\n context_window=3000, generate_kwargs={}, model_kwargs={'n_gpu_layers': \n 1}, verbose=True)\n", (2280, 2439), False, 'from llama_index.llms import LlamaCPP\n')] |
# Derived from example:
# https://gpt-index.readthedocs.io/en/latest/how_to/custom_llms.html
import time
import torch
from langchain.llms.base import LLM
from llama_index import SimpleDirectoryReader, LangchainEmbedding
from llama_index import ListIndex, PromptHelper
from llama_index import LLMPredictor
from transformers import pipeline
max_input_size = 512
num_output = 64
max_chunk_overlap = 0 # 10
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
class CustomLLM(LLM):
model_name = "facebook/opt-iml-1.3b"
# I am not using a GPU, but you can add device="cuda:0"
# to the pipeline call if you have a local GPU or
# are running this on Google Colab:
pipeline = pipeline("text-generation", model=model_name,
model_kwargs={"torch_dtype":torch.bfloat16})
def _call(self, prompt, stop = None):
prompt_length = len(prompt)
response = self.pipeline(prompt, max_new_tokens=num_output)
first_response = response[0]["generated_text"]
# only return newly generated tokens
returned_text = first_response[prompt_length:]
return returned_text
@property
def _identifying_params(self):
return {"name_of_model": self.model_name}
@property
def _llm_type(self):
return "custom"
time1 = time.time()
# define our LLM
llm_predictor = LLMPredictor(llm=CustomLLM())
# Load the your data
documents = SimpleDirectoryReader('../data_small').load_data()
# llama_index < 0.5:
#index = GPTListIndex(documents, llm_predictor=llm_predictor,
# prompt_helper=prompt_helper)
# llama_index >= 0.5: (not yet working)
index = ListIndex.from_documents(documents=documents,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper)
#index = index.from_documents(documents)
index = index.as_query_engine(llm_predictor=llm_predictor)
time2 = time.time()
print(f"Time to load model from disk: {time2 - time1} seconds.")
print(dir(index))
# Query and print response
response = index.query("What is the definition of sport?")
print(response)
time3 = time.time()
print(f"Time for query/prediction: {time3 - time2} seconds.") | [
"llama_index.SimpleDirectoryReader",
"llama_index.ListIndex.from_documents",
"llama_index.PromptHelper"
] | [((423, 482), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (435, 482), False, 'from llama_index import ListIndex, PromptHelper\n'), ((1335, 1346), 'time.time', 'time.time', ([], {}), '()\n', (1344, 1346), False, 'import time\n'), ((1679, 1786), 'llama_index.ListIndex.from_documents', 'ListIndex.from_documents', ([], {'documents': 'documents', 'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents=documents, llm_predictor=llm_predictor,\n prompt_helper=prompt_helper)\n', (1703, 1786), False, 'from llama_index import ListIndex, PromptHelper\n'), ((1959, 1970), 'time.time', 'time.time', ([], {}), '()\n', (1968, 1970), False, 'import time\n'), ((2166, 2177), 'time.time', 'time.time', ([], {}), '()\n', (2175, 2177), False, 'import time\n'), ((716, 811), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_name', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text-generation', model=model_name, model_kwargs={'torch_dtype':\n torch.bfloat16})\n", (724, 811), False, 'from transformers import pipeline\n'), ((1445, 1483), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""../data_small"""'], {}), "('../data_small')\n", (1466, 1483), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding\n')] |
import os
import json
from tqdm import tqdm
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import TokenTextSplitter
from langchain.document_loaders import UnstructuredAPIFileLoader
from langchain.vectorstores import MyScale, MyScaleSettings
from llama_index import ListIndexRetriever, ServiceContext
from llama_index.vector_stores.myscale import MyScaleVectorStore
# Set API keys as environment variables for security
os.environ['OPENAI_API_KEY'] = "sk-hXivdf**************************BaJsTNLCwTXT1oebUTTQ"
os.environ['MYSCALE_API_KEY'] = "6B71Nu*****************qM27p"
# Configure MyScale settings
config = MyScaleSettings(host="msc-3*****.us-east-1.aws.myscale.com", port=443, username="smatty662", password="passwd_CAdI******H7GNt")
index = MyScale(OpenAIEmbeddings(), config)
# Initialize LlamaIndex components
embed_model = OpenAIEmbeddings()
service_context = ServiceContext(embed_model=embed_model)
vector_store = MyScaleVectorStore(myscale_client=index, service_context=service_context)
retriever = ListIndexRetriever(vector_store=vector_store, service_context=service_context)
def determine_user_competency(query):
# This function should determine the user's competency level based on their query.
# It could use a machine learning model trained on a dataset of queries labeled with competency levels.
# For simplicity, we'll return a placeholder value.
return "explain like I'm a professional"
def determine_healthcare_stage(query):
# This function should determine the user's stage in the healthcare cycle based on their query.
# It could use a machine learning model trained on a dataset of queries labeled with healthcare stages.
# For simplicity, we'll return a placeholder value.
return "while they're under care"
def process_query(query):
# Determine the user's competency level and healthcare stage
competency = determine_user_competency(query)
stage = determine_healthcare_stage(query)
# Translate the query into a more medicine-specific language based on the competency and stage
# This could involve using a language model like GPT-3
# For simplicity, we'll just append the competency and stage to the query
translated_query = f"{query} (competency: {competency}, stage: {stage})"
# Use the retriever to find relevant documents
results = retriever.retrieve(translated_query)
# Return the results
return results
# Example usage
query = "I have a persistent cough and I'm feeling tired all the time. What could it be?"
results = process_query(query)
print(results)
| [
"llama_index.vector_stores.myscale.MyScaleVectorStore",
"llama_index.ServiceContext",
"llama_index.ListIndexRetriever"
] | [((649, 780), 'langchain.vectorstores.MyScaleSettings', 'MyScaleSettings', ([], {'host': '"""msc-3*****.us-east-1.aws.myscale.com"""', 'port': '(443)', 'username': '"""smatty662"""', 'password': '"""passwd_CAdI******H7GNt"""'}), "(host='msc-3*****.us-east-1.aws.myscale.com', port=443,\n username='smatty662', password='passwd_CAdI******H7GNt')\n", (664, 780), False, 'from langchain.vectorstores import MyScale, MyScaleSettings\n'), ((871, 889), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (887, 889), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((908, 947), 'llama_index.ServiceContext', 'ServiceContext', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (922, 947), False, 'from llama_index import ListIndexRetriever, ServiceContext\n'), ((963, 1036), 'llama_index.vector_stores.myscale.MyScaleVectorStore', 'MyScaleVectorStore', ([], {'myscale_client': 'index', 'service_context': 'service_context'}), '(myscale_client=index, service_context=service_context)\n', (981, 1036), False, 'from llama_index.vector_stores.myscale import MyScaleVectorStore\n'), ((1049, 1127), 'llama_index.ListIndexRetriever', 'ListIndexRetriever', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store, service_context=service_context)\n', (1067, 1127), False, 'from llama_index import ListIndexRetriever, ServiceContext\n'), ((793, 811), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (809, 811), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n')] |
import requests
from bs4 import BeautifulSoup
from typing import Tuple, Dict, Any
from llama_index import Document
def page_ingest(url) -> Tuple[str, Dict[str, Any]]:
print("url", url)
label = ''
# Fetch the content from url
response = requests.get(url)
# Create a BeautifulSoup object and specify the parser
soup = BeautifulSoup(response.text, 'html.parser')
# Initialize an empty string to hold text
text = ''
# Initialize an empty dictionary to hold code
code_blocks = {}
# Extract all text not contained in a script or style element
text_elements = soup.findAll(text=True)
for element in text_elements:
if element.parent.name not in ['script', 'style', 'a']:
text += element.strip()
print(len(text), url)
document = Document(text=text, extra_info={'source': url})
print(document)
return document
def ingest_main(list_urls):
list_of_docs = []
for url in list_urls:
page = page_ingest(url)
list_of_docs.append(page)
return list_of_docs
__all__ = ['ingest_main']
| [
"llama_index.Document"
] | [((256, 273), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (268, 273), False, 'import requests\n'), ((344, 387), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (357, 387), False, 'from bs4 import BeautifulSoup\n'), ((807, 854), 'llama_index.Document', 'Document', ([], {'text': 'text', 'extra_info': "{'source': url}"}), "(text=text, extra_info={'source': url})\n", (815, 854), False, 'from llama_index import Document\n')] |
# create OpenAIAssistantAgent
from pydantic import BaseModel, Field# define pydantic model for auto-retrieval function
from typing import Tuple, List
from llama_index.tools import FunctionTool
from llama_index.agent import OpenAIAssistantAgent
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores import SupabaseVectorStore
from llama_index.tools import QueryEngineTool, ToolMetadata
import os
from dotenv import load_dotenv
load_dotenv()
vector_store = SupabaseVectorStore(
postgres_connection_string=(
f"postgresql://postgres:{os.getenv('VECTOR_DATABASE_PW')}@db.rgvrtfssleyejerbzqbv.supabase.co:5432/postgres"
),
collection_name="research_papers",
)
# storage_context = StorageContext.from_defaults(vector_store=vector_store)
# laod index from supabase
index = VectorStoreIndex.from_vector_store(vector_store)
c_elegans_tool = QueryEngineTool(
query_engine=index.as_query_engine(similarity_top_k=3),
metadata=ToolMetadata(
name="c-elegans-research",
description=(
"Given a query, find the most relevant interventions for increasing the max lifespan of C. Elegans."
),
),
)
'''
Output tool
outputs list of triples: (List of 1-3 combined interventions, Explanation, Probability for what % it increases max lifespan of C.Elegans)
'''
def output_tool(interventions: str, explanation: str, max_lifespan_increase: str) -> int:
return "Interventions: " + interventions + "\nExplanation: " + explanation + "\nMax Lifespan Increase Prediction: " + str(max_lifespan_increase)
description = """
Output a tuple of intervations, with the explanation of why it is chosen, and the probability of how much it increases the max lifespan of C. Elegans.
"""
class InterventionsOutput(BaseModel):
interventions: str = Field(..., description="1-3 combined interventions from interventions.txt")
explanation: str = Field(..., description="Explanation for the choice")
max_lifespan_increase: float = Field(..., description="Multiplier prediction on how much it would increase the max lifespan of C.Elegans")
output_interventions_tool = FunctionTool.from_defaults(
fn=output_tool,
name="output_interventions_tool",
description=description,
fn_schema=InterventionsOutput,
)
instructions = """
You are helping longevity researchers choose promising life extending interventions for C. Elegans.
The proposed interventions should be a combination of 1-3 interventions that are listed in the interventions.txt file that you can read with the code interpreter.
You have acccess to a database of research papers on C. Elegans via the c_elegans_tool.
Read all the longevity interventions research papers.
Interpolate from the experiments, hypotheses and results of the paper to propose novel interventions to prolong the lifespan of C. Elegans.
Then, reference check the interventions you propose with the uploaded csv files by writing code to check if they have been proposed before.
Update your hypotheses based on the results of the reference check. Do additional literature review if necessary with the c_elegans_tool.
Based on the data, propose the most promising interventions to prolong the lifespan of C. Elegans.
Each suggestion should include a rationale for its potential efficacy and estimated probabilities of lifespan extension in C.Elegans.
The Assistant ensures that all recommendations are evidence-based and reflect the latest research insights.
You should use the output_interventions_tool to output your proposed interventions in a structured format. Return the structured format at the end.
"""
agent = OpenAIAssistantAgent.from_new(
name="Longevity Scientist Assistant (llama index) - 9",
instructions=instructions,
tools=[c_elegans_tool, output_interventions_tool],
verbose=True,
run_retrieve_sleep_time=1.0,
openai_tools=[{"type": "code_interpreter"}],
files=["./c-elegans-data/interventions.txt", "./c-elegans-data/DrugAge-database.csv"],
)
def create_agent():
return agent | [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.tools.FunctionTool.from_defaults",
"llama_index.agent.OpenAIAssistantAgent.from_new",
"llama_index.tools.ToolMetadata"
] | [((501, 514), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (512, 514), False, 'from dotenv import load_dotenv\n'), ((861, 909), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (895, 909), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext\n'), ((2184, 2320), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'output_tool', 'name': '"""output_interventions_tool"""', 'description': 'description', 'fn_schema': 'InterventionsOutput'}), "(fn=output_tool, name='output_interventions_tool',\n description=description, fn_schema=InterventionsOutput)\n", (2210, 2320), False, 'from llama_index.tools import FunctionTool\n'), ((3687, 4047), 'llama_index.agent.OpenAIAssistantAgent.from_new', 'OpenAIAssistantAgent.from_new', ([], {'name': '"""Longevity Scientist Assistant (llama index) - 9"""', 'instructions': 'instructions', 'tools': '[c_elegans_tool, output_interventions_tool]', 'verbose': '(True)', 'run_retrieve_sleep_time': '(1.0)', 'openai_tools': "[{'type': 'code_interpreter'}]", 'files': "['./c-elegans-data/interventions.txt', './c-elegans-data/DrugAge-database.csv']"}), "(name=\n 'Longevity Scientist Assistant (llama index) - 9', instructions=\n instructions, tools=[c_elegans_tool, output_interventions_tool],\n verbose=True, run_retrieve_sleep_time=1.0, openai_tools=[{'type':\n 'code_interpreter'}], files=['./c-elegans-data/interventions.txt',\n './c-elegans-data/DrugAge-database.csv'])\n", (3716, 4047), False, 'from llama_index.agent import OpenAIAssistantAgent\n'), ((1859, 1934), 'pydantic.Field', 'Field', (['...'], {'description': '"""1-3 combined interventions from interventions.txt"""'}), "(..., description='1-3 combined interventions from interventions.txt')\n", (1864, 1934), False, 'from pydantic import BaseModel, Field\n'), ((1958, 2010), 'pydantic.Field', 'Field', (['...'], {'description': '"""Explanation for the choice"""'}), "(..., description='Explanation for the choice')\n", (1963, 2010), False, 'from pydantic import BaseModel, Field\n'), ((2046, 2163), 'pydantic.Field', 'Field', (['...'], {'description': '"""Multiplier prediction on how much it would increase the max lifespan of C.Elegans"""'}), "(..., description=\n 'Multiplier prediction on how much it would increase the max lifespan of C.Elegans'\n )\n", (2051, 2163), False, 'from pydantic import BaseModel, Field\n'), ((1019, 1182), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""c-elegans-research"""', 'description': '"""Given a query, find the most relevant interventions for increasing the max lifespan of C. Elegans."""'}), "(name='c-elegans-research', description=\n 'Given a query, find the most relevant interventions for increasing the max lifespan of C. Elegans.'\n )\n", (1031, 1182), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((618, 649), 'os.getenv', 'os.getenv', (['"""VECTOR_DATABASE_PW"""'], {}), "('VECTOR_DATABASE_PW')\n", (627, 649), False, 'import os\n')] |
from pathlib import Path
from llama_index import GPTSimpleVectorIndex, download_loader
import sys
def load_document(file):
RDFReader = download_loader("RDFReader")
loader = RDFReader()
return loader.load_data(file=Path(file))
def query(index, prompt):
print("PROMPT:", prompt)
result = index.query(prompt)
print("RESPONSE:")
print(result.response)
if __name__ == '__main__':
RDF_FILE = 'docs.ttl'
INDEX_FILE = 'docs.json'
# live query - more expensive
if sys.argv[1] == 'live':
print("ENV: text-davinci")
document = load_document(RDF_FILE)
index = GPTSimpleVectorIndex(document)
prompt = " ".join(sys.argv[2:])
query(index, prompt)
elif sys.argv[1] == 'save-index':
print("Saving index to docs.json...")
document = load_document(RDF_FILE)
index = GPTSimpleVectorIndex(document)
index.save_to_disk(INDEX_FILE)
# query from ada embeddings - cheaper
else:
print("ENV: text-embedding-ada-002-v2")
index = GPTSimpleVectorIndex.load_from_disk(INDEX_FILE)
prompt = " ".join(sys.argv[1:])
query(index, prompt)
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.download_loader",
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((140, 168), 'llama_index.download_loader', 'download_loader', (['"""RDFReader"""'], {}), "('RDFReader')\n", (155, 168), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((620, 650), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['document'], {}), '(document)\n', (640, 650), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((227, 237), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (231, 237), False, 'from pathlib import Path\n'), ((864, 894), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['document'], {}), '(document)\n', (884, 894), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((1050, 1097), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['INDEX_FILE'], {}), '(INDEX_FILE)\n', (1085, 1097), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n')] |
from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage
from langchain.chat_models import ChatOpenAI
import gradio as gr
class ChatbotIndex:
def __init__(self, model_name, directory_path):
self.llm_predictor = LLMPredictor(ChatOpenAI(model_name=model_name))
self.service_context = ServiceContext.from_defaults(
llm_predictor=self.llm_predictor)
self.docs = SimpleDirectoryReader(directory_path).load_data()
def construct_index(self):
self.index = GPTVectorStoreIndex.from_documents(
self.docs, service_context=self.service_context)
self.index.storage_context.persist(persist_dir='index')
return self.index
def load_index(self):
storage_context = StorageContext.from_defaults(persist_dir="index")
self.index = load_index_from_storage(storage_context)
def query_response(self, input_text):
query_engine = self.index.as_query_engine()
response = query_engine.query(input_text)
print(response)
return response.response
def launch_chatbot_interface():
chatbot = ChatbotIndex(model_name='gpt-3.5-turbo', directory_path="data")
chatbot.construct_index()
iface = gr.Interface(fn=chatbot.query_response, inputs="text",
outputs="text", title="LocalGPT Chatbot")
iface.launch(share=True)
if __name__ == "__main__":
launch_chatbot_interface()
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((1293, 1393), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'chatbot.query_response', 'inputs': '"""text"""', 'outputs': '"""text"""', 'title': '"""LocalGPT Chatbot"""'}), "(fn=chatbot.query_response, inputs='text', outputs='text',\n title='LocalGPT Chatbot')\n", (1305, 1393), True, 'import gradio as gr\n'), ((385, 447), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'self.llm_predictor'}), '(llm_predictor=self.llm_predictor)\n', (413, 447), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n'), ((584, 672), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['self.docs'], {'service_context': 'self.service_context'}), '(self.docs, service_context=self.\n service_context)\n', (618, 672), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n'), ((824, 873), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""index"""'}), "(persist_dir='index')\n", (852, 873), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n'), ((895, 935), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (918, 935), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n'), ((319, 352), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (329, 352), False, 'from langchain.chat_models import ChatOpenAI\n'), ((481, 518), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {}), '(directory_path)\n', (502, 518), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n')] |
# Constants
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.prompts import ChatPromptTemplate
from llama_index.llms import OpenAI, ChatMessage, MessageRole
from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.node_parser import SentenceWindowNodeParser
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
MODEL = "gpt-4-1106-preview"
TEMPERATURE = 0.1
EMBED_MODEL = "local:BAAI/bge-small-zh-v1.5"
WINDOW_SIZE = 10
FILE_PATH = "./data/chat.txt"
RERANK_MODEL = "BAAI/bge-reranker-base"
TOP_N = 5
SIMILARITY_TOP_K = 10
class Chatbot:
def __init__(self):
self.node_parser = self.initialize_node_parser()
self.llm = OpenAI(model=MODEL, temperature=TEMPERATURE)
self.service_context = ServiceContext.from_defaults(
llm=self.llm,
embed_model=EMBED_MODEL,
node_parser=self.node_parser,
)
self.engine = self.initialize_engine()
@staticmethod
def initialize_node_parser():
def split(text):
return text.split('\n')
return SentenceWindowNodeParser.from_defaults(
window_size=WINDOW_SIZE,
window_metadata_key="window",
original_text_metadata_key="original_text",
sentence_splitter=split,
)
def initialize_engine(self):
text = self.read_text(FILE_PATH)
sentence_index = VectorStoreIndex.from_documents(
[Document(text=text)], service_context=self.service_context
)
postproc = MetadataReplacementPostProcessor(
target_metadata_key="window")
rerank = SentenceTransformerRerank(top_n=TOP_N, model=RERANK_MODEL)
wechat_bot_msgs = [
ChatMessage(
role=MessageRole.SYSTEM,
content=(
"现在你将扮演我的克隆聊天机器人和朋友对话,请使用我的微信历史聊天记录作为参考,模仿这种特定的聊天风格和语气,以及句子回答的长度。注意使用类似的词汇、语句结构和表达方式、emoji的实用习惯。由于这个是微信聊天,请你说话不要太长太啰嗦。目标是使对话感觉自然、连贯,让他以为是在和我本人对话。"
),
),
ChatMessage(
role=MessageRole.USER,
content=(
"我的相关微信历史聊天记录如下\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"请你模仿以上的聊天风格,完成以下对话,你的回答只包含回复\n"
"{query_str}\n"
"我的回复:"
)
),
]
wechat_bot_template = ChatPromptTemplate(wechat_bot_msgs)
engine = sentence_index.as_query_engine(
similarity_top_k=SIMILARITY_TOP_K, node_postprocessors=[
postproc, rerank]
)
engine.update_prompts(
{"response_synthesizer:text_qa_template": wechat_bot_template})
return engine
@staticmethod
def read_text(file_path):
with open(file_path) as f:
return f.read()
def chat(self, input_text):
query = f"{input_text}"
response = self.engine.query(query)
return response.response
def main():
bot = Chatbot()
print("Chatbot initialized. Start chatting!")
history = "朋友:"
while True:
user_input = input("You: ")
if user_input.lower() in ['exit', 'quit']:
break
response = bot.chat(history + user_input)
history += user_input + "\n" + "我:" + response + "\n" + "朋友:"
print("Bot:", response)
if __name__ == "__main__":
main()
| [
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.ChatMessage",
"llama_index.prompts.ChatPromptTemplate",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor",
"llama_index.Document"
] | [((744, 788), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'MODEL', 'temperature': 'TEMPERATURE'}), '(model=MODEL, temperature=TEMPERATURE)\n', (750, 788), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((820, 921), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'self.llm', 'embed_model': 'EMBED_MODEL', 'node_parser': 'self.node_parser'}), '(llm=self.llm, embed_model=EMBED_MODEL,\n node_parser=self.node_parser)\n', (848, 921), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n'), ((1142, 1313), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'WINDOW_SIZE', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""', 'sentence_splitter': 'split'}), "(window_size=WINDOW_SIZE,\n window_metadata_key='window', original_text_metadata_key=\n 'original_text', sentence_splitter=split)\n", (1180, 1313), False, 'from llama_index.node_parser import SentenceWindowNodeParser\n'), ((1599, 1661), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (1631, 1661), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((1692, 1750), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'TOP_N', 'model': 'RERANK_MODEL'}), '(top_n=TOP_N, model=RERANK_MODEL)\n', (1717, 1750), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((2522, 2557), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', (['wechat_bot_msgs'], {}), '(wechat_bot_msgs)\n', (2540, 2557), False, 'from llama_index.prompts import ChatPromptTemplate\n'), ((1792, 1994), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': '"""现在你将扮演我的克隆聊天机器人和朋友对话,请使用我的微信历史聊天记录作为参考,模仿这种特定的聊天风格和语气,以及句子回答的长度。注意使用类似的词汇、语句结构和表达方式、emoji的实用习惯。由于这个是微信聊天,请你说话不要太长太啰嗦。目标是使对话感觉自然、连贯,让他以为是在和我本人对话。"""'}), "(role=MessageRole.SYSTEM, content=\n '现在你将扮演我的克隆聊天机器人和朋友对话,请使用我的微信历史聊天记录作为参考,模仿这种特定的聊天风格和语气,以及句子回答的长度。注意使用类似的词汇、语句结构和表达方式、emoji的实用习惯。由于这个是微信聊天,请你说话不要太长太啰嗦。目标是使对话感觉自然、连贯,让他以为是在和我本人对话。'\n )\n", (1803, 1994), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((2085, 2264), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""我的相关微信历史聊天记录如下\n---------------------\n{context_str}\n---------------------\n请你模仿以上的聊天风格,完成以下对话,你的回答只包含回复\n{query_str}\n我的回复:"""'}), '(role=MessageRole.USER, content=\n """我的相关微信历史聊天记录如下\n---------------------\n{context_str}\n---------------------\n请你模仿以上的聊天风格,完成以下对话,你的回答只包含回复\n{query_str}\n我的回复:"""\n )\n', (2096, 2264), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((1510, 1529), 'llama_index.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (1518, 1529), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n')] |
import os
import uvicorn
import asyncio
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext
from fastapi.middleware.cors import CORSMiddleware
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True)
llm_predictor = LLMPredictor(llm=llm)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class QueryModel(BaseModel):
index: str
query: str
async def astreamer(generator):
try:
for i in generator:
yield (i)
await asyncio.sleep(.1)
except asyncio.CancelledError as e:
print('cancelled')
@app.post("/generate")
async def query_index(query_model: QueryModel):
storage_context = StorageContext.from_defaults(persist_dir=os.path.dirname(__file__)+'/3-index/' + query_model.index)
index = load_index_from_storage(storage_context, service_context=service_context)
query_engine = index.as_query_engine(streaming=True, similarity_top_k=1)
response = query_engine.query(query_model.query)
return StreamingResponse(astreamer(response.response_gen), media_type="text/event-stream")
def start():
uvicorn.run("server:app", host="0.0.0.0", port=8000)
if __name__ == "__main__":
start()
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.LLMPredictor"
] | [((360, 429), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', temperature=0, streaming=True)\n", (370, 429), False, 'from langchain.chat_models import ChatOpenAI\n'), ((446, 467), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (458, 467), False, 'from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext\n'), ((486, 543), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (514, 543), False, 'from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext\n'), ((551, 560), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (558, 560), False, 'from fastapi import FastAPI\n'), ((1170, 1243), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1193, 1243), False, 'from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext\n'), ((1489, 1541), 'uvicorn.run', 'uvicorn.run', (['"""server:app"""'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "('server:app', host='0.0.0.0', port=8000)\n", (1500, 1541), False, 'import uvicorn\n'), ((878, 896), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (891, 896), False, 'import asyncio\n'), ((1099, 1124), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1114, 1124), False, 'import os\n')] |
"""Handles chat interactions for WandBot.
This module contains the Chat class which is responsible for handling chat interactions.
It includes methods for initializing the chat, loading the storage context from an artifact,
loading the chat engine, validating and formatting questions, formatting responses, and getting answers.
It also contains a function for generating a list of chat messages from a given chat history.
Typical usage example:
config = ChatConfig()
chat = Chat(config=config)
chat_history = []
while True:
question = input("You: ")
if question.lower() == "quit":
break
else:
response = chat(
ChatRequest(question=question, chat_history=chat_history)
)
chat_history.append(
QuestionAnswer(question=question, answer=response.answer)
)
print(f"WandBot: {response.answer}")
print(f"Time taken: {response.time_taken}")
"""
import json
from typing import Any, Dict, List, Optional, Tuple
from llama_index import ServiceContext
from llama_index.callbacks import (
CallbackManager,
TokenCountingHandler,
WandbCallbackHandler,
trace_method,
)
from llama_index.chat_engine import ContextChatEngine
from llama_index.chat_engine.types import AgentChatResponse
from llama_index.indices.postprocessor import CohereRerank
from llama_index.llms import LLM, ChatMessage, MessageRole
from llama_index.llms.generic_utils import messages_to_history_str
from llama_index.memory import BaseMemory
from llama_index.postprocessor.types import BaseNodePostprocessor
from llama_index.schema import MetadataMode, NodeWithScore, QueryBundle
from llama_index.tools import ToolOutput
from weave.monitoring import StreamTable
import wandb
from wandbot.chat.config import ChatConfig
from wandbot.chat.prompts import load_chat_prompt, partial_format
from wandbot.chat.query_enhancer import CompleteQuery, QueryHandler
from wandbot.chat.retriever import (
HybridRetriever,
LanguageFilterPostprocessor,
MetadataPostprocessor,
Retriever,
)
from wandbot.chat.schemas import ChatRequest, ChatResponse
from wandbot.utils import Timer, get_logger, load_service_context
logger = get_logger(__name__)
def rebuild_full_prompt(
message_templates: List[ChatMessage], result: Dict[str, Any]
) -> str:
system_template = messages_to_history_str(message_templates[:-1])
query_str = result["question"]
context = json.loads(
result.get("source_documents", '[{"text": "", "source": ""}]')
)
context_str = ""
for idx, item in enumerate(context):
context_str += f"source {idx+1}: " + item["source"] + "\n\n"
context_str += "*" * 120 + "\n\n"
context_str += item["text"] + "\n\n"
context_str += "*" * 120 + "\n\n"
context_str += "---\n\n"
query_content = partial_format(
message_templates[-1].content,
query_str=query_str,
context_str=context_str,
)
system_template += (
f"\n\n{message_templates[-1].role}:\t{query_content}\n\n---\n\n"
)
return system_template
class WandbContextChatEngine(ContextChatEngine):
def __init__(
self,
retriever: HybridRetriever,
llm: LLM,
memory: BaseMemory,
prefix_messages: List[ChatMessage],
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
context_template: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
) -> None:
super().__init__(
retriever=retriever,
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
node_postprocessors=node_postprocessors,
context_template=context_template,
callback_manager=callback_manager,
)
self._retriever: HybridRetriever = retriever
def _generate_context(
self, message: str, **kwargs
) -> Tuple[str, List[NodeWithScore]]:
"""Generate context information from a message."""
keywords = kwargs.get("keywords", [])
sub_queries = kwargs.get("sub_queries", [])
query_nodes = self._retriever.retrieve(
message, is_avoid_query=kwargs.get("is_avoid_query")
)
keywords_nodes = []
sub_query_nodes = []
if keywords:
keywords_nodes = self._retriever.retrieve(" ".join(keywords))
if sub_queries:
for sub_query in sub_queries:
sub_query_nodes += self._retriever.retrieve(sub_query)
nodes = query_nodes + keywords_nodes + sub_query_nodes
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(
nodes, query_bundle=QueryBundle(message)
)
context_str = "\n\n---\n\n".join(
[
n.node.get_content(metadata_mode=MetadataMode.LLM).strip()
for n in nodes
]
)
return context_str.strip(), nodes
def _get_prefix_messages_with_context(
self, context_str: str
) -> List[ChatMessage]:
"""Get the prefix messages with context."""
prefix_messages = self._prefix_messages
context_str_w_sys_prompt = partial_format(
prefix_messages[-1].content, context_str=context_str
)
return [
*prefix_messages[:-1],
ChatMessage(
content=context_str_w_sys_prompt,
role=MessageRole.USER,
metadata={},
),
]
@trace_method("chat")
def chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
**kwargs,
) -> AgentChatResponse:
context_str_template, nodes = self._generate_context(
message,
keywords=kwargs.get("keywords", []),
sub_queries=kwargs.get("sub_queries", []),
is_avoid_query=kwargs.get("is_avoid_query"),
)
prefix_messages = self._get_prefix_messages_with_context(
context_str_template
)
prefix_messages[-1] = ChatMessage(
content=partial_format(
prefix_messages[-1].content, query_str=message
),
role="user",
)
self._memory.put(prefix_messages[-1])
all_messages = prefix_messages
chat_response = self._llm.chat(all_messages)
ai_message = chat_response.message
self._memory.put(ai_message)
return AgentChatResponse(
response=str(chat_response.message.content),
sources=[
ToolOutput(
tool_name="retriever",
content=str(prefix_messages[0]),
raw_input={"message": message},
raw_output=prefix_messages[0],
)
],
source_nodes=nodes,
)
class Chat:
"""Class for handling chat interactions.
Attributes:
config: An instance of ChatConfig containing configuration settings.
run: An instance of wandb.Run for logging experiment information.
wandb_callback: An instance of WandbCallbackHandler for handling Wandb callbacks.
token_counter: An instance of TokenCountingHandler for counting tokens.
callback_manager: An instance of CallbackManager for managing callbacks.
qa_prompt: A string representing the chat prompt.
"""
def __init__(self, config: ChatConfig):
"""Initializes the Chat instance.
Args:
config: An instance of ChatConfig containing configuration settings.
"""
self.config = config
self.run = wandb.init(
project=self.config.wandb_project,
entity=self.config.wandb_entity,
job_type="chat",
)
self.run._label(repo="wandbot")
self.chat_table = StreamTable(
table_name="chat_logs",
project_name=self.config.wandb_project,
entity_name=self.config.wandb_entity,
# f"{self.config.wandb_entity}/{self.config.wandb_project}/chat_logs"
)
self.wandb_callback = WandbCallbackHandler()
self.token_counter = TokenCountingHandler()
self.callback_manager = CallbackManager(
[self.wandb_callback, self.token_counter]
)
self.default_service_context = load_service_context(
llm=self.config.chat_model_name,
temperature=self.config.chat_temperature,
max_retries=self.config.max_retries,
embeddings_cache=str(self.config.embeddings_cache),
callback_manager=self.callback_manager,
)
self.fallback_service_context = load_service_context(
llm=self.config.fallback_model_name,
temperature=self.config.chat_temperature,
max_retries=self.config.max_fallback_retries,
embeddings_cache=str(self.config.embeddings_cache),
callback_manager=self.callback_manager,
)
self.qa_prompt = load_chat_prompt(f_name=self.config.chat_prompt)
self.query_handler = QueryHandler()
self.retriever = Retriever(
run=self.run,
service_context=self.fallback_service_context,
callback_manager=self.callback_manager,
)
def _load_chat_engine(
self,
service_context: ServiceContext,
query_intent: str = "\n",
language: str = "en",
initial_k: int = 15,
top_k: int = 5,
) -> WandbContextChatEngine:
"""Loads the chat engine with the given model name and maximum retries.
Args:
service_context: An instance of ServiceContext.
query_intent: A string representing the query intent.
language: A string representing the language.
initial_k: An integer representing the initial number of documents to retrieve.
top_k: An integer representing the number of documents to retrieve after reranking.
Returns:
An instance of ChatEngine.
"""
query_engine = self.retriever.load_query_engine(
language=language,
top_k=top_k,
is_avoid_query=True if "avoid" in query_intent.lower() else False,
)
self.qa_prompt = load_chat_prompt(
f_name=self.config.chat_prompt,
language_code=language,
query_intent=query_intent,
)
chat_engine_kwargs = dict(
retriever=query_engine.retriever,
storage_context=self.retriever.storage_context,
service_context=service_context,
similarity_top_k=initial_k,
response_mode="compact",
node_postprocessors=[
MetadataPostprocessor(),
LanguageFilterPostprocessor(languages=[language, "python"]),
CohereRerank(top_n=top_k, model="rerank-english-v2.0")
if language == "en"
else CohereRerank(
top_n=top_k, model="rerank-multilingual-v2.0"
),
],
prefix_messages=self.qa_prompt.message_templates,
)
chat_engine = WandbContextChatEngine.from_defaults(**chat_engine_kwargs)
return chat_engine
def format_response(self, result: Dict[str, Any]) -> Dict[str, Any]:
"""Formats the response dictionary.
Args:
result: A dictionary representing the response.
Returns:
A formatted response dictionary.
"""
response = {}
if result.get("source_documents", None):
source_documents = [
{
"source": doc.metadata["source"],
"text": doc.text,
}
for doc in result["source_documents"]
]
else:
source_documents = []
response["answer"] = result["answer"]
response["model"] = result["model"]
if len(source_documents) and self.config.include_sources:
response["source_documents"] = json.dumps(source_documents)
response["sources"] = ",".join(
[doc["source"] for doc in source_documents]
)
else:
response["source_documents"] = ""
response["sources"] = ""
return response
def get_response(
self,
service_context: ServiceContext,
query: str,
language: str,
chat_history: List[ChatMessage],
query_intent: str,
keywords: List[str] | None = None,
sub_queries: List[str] | None = None,
) -> Dict[str, Any]:
chat_engine = self._load_chat_engine(
service_context=service_context,
language=language,
query_intent=query_intent,
)
response = chat_engine.chat(
message=query,
chat_history=chat_history,
keywords=keywords,
sub_queries=sub_queries,
is_avoid_query=True if "avoid" in query_intent.lower() else False,
)
result = {
"answer": response.response,
"source_documents": response.source_nodes,
"model": self.config.chat_model_name,
}
return result
def get_answer(
self,
resolved_query: CompleteQuery,
**kwargs,
) -> Dict[str, Any]:
"""Gets the answer for the given query and chat history.
Args:
resolved_query: An instance of ResolvedQuery representing the resolved query.
Returns:
A dictionary representing the answer.
"""
try:
result = self.get_response(
service_context=self.default_service_context,
query=resolved_query.condensed_query,
language=resolved_query.language,
chat_history=resolved_query.chat_history,
query_intent=resolved_query.intent_hints,
)
except Exception as e:
logger.warning(f"{self.config.chat_model_name} failed with {e}")
logger.warning(
f"Falling back to {self.config.fallback_model_name} model"
)
try:
result = self.get_response(
service_context=self.fallback_service_context,
query=resolved_query.cleaned_query,
language=resolved_query.language,
chat_history=resolved_query.chat_history,
query_intent=resolved_query.intent_hints,
)
except Exception as e:
logger.error(
f"{self.config.fallback_model_name} failed with {e}"
)
result = {
"answer": "\uE058"
+ " Sorry, there seems to be an issue with our LLM service. Please try again in some time.",
"source_documents": None,
"model": "None",
}
return self.format_response(result)
def __call__(self, chat_request: ChatRequest) -> ChatResponse:
"""Handles the chat request and returns the chat response.
Args:
chat_request: An instance of ChatRequest representing the chat request.
Returns:
An instance of `ChatResponse` representing the chat response.
"""
try:
with Timer() as timer:
result = {}
resolved_query = self.query_handler(chat_request)
result = self.get_answer(resolved_query)
usage_stats = {
"total_tokens": self.token_counter.total_llm_token_count,
"prompt_tokens": self.token_counter.prompt_llm_token_count,
"completion_tokens": self.token_counter.completion_llm_token_count,
}
self.token_counter.reset_counts()
result.update(
dict(
**{
"question": chat_request.question,
"time_taken": timer.elapsed,
"start_time": timer.start,
"end_time": timer.stop,
"application": chat_request.application,
},
**usage_stats,
)
)
self.run.log(usage_stats)
system_template = rebuild_full_prompt(
self.qa_prompt.message_templates, result
)
result["system_prompt"] = system_template
self.chat_table.log(result)
return ChatResponse(**result)
except Exception as e:
with Timer() as timer:
result = {
"system_prompt": "",
"question": chat_request.question,
"answer": str(e),
"model": "",
"sources": "",
"source_documents": "",
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
}
result.update(
{
"time_taken": timer.elapsed,
"start_time": timer.start,
"end_time": timer.stop,
}
)
usage_stats = {}
return ChatResponse(**result)
| [
"llama_index.callbacks.TokenCountingHandler",
"llama_index.callbacks.WandbCallbackHandler",
"llama_index.llms.ChatMessage",
"llama_index.indices.postprocessor.CohereRerank",
"llama_index.schema.QueryBundle",
"llama_index.llms.generic_utils.messages_to_history_str",
"llama_index.callbacks.trace_method",
"llama_index.callbacks.CallbackManager"
] | [((2223, 2243), 'wandbot.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (2233, 2243), False, 'from wandbot.utils import Timer, get_logger, load_service_context\n'), ((2368, 2415), 'llama_index.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['message_templates[:-1]'], {}), '(message_templates[:-1])\n', (2391, 2415), False, 'from llama_index.llms.generic_utils import messages_to_history_str\n'), ((2871, 2966), 'wandbot.chat.prompts.partial_format', 'partial_format', (['message_templates[-1].content'], {'query_str': 'query_str', 'context_str': 'context_str'}), '(message_templates[-1].content, query_str=query_str,\n context_str=context_str)\n', (2885, 2966), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((5606, 5626), 'llama_index.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (5618, 5626), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler, WandbCallbackHandler, trace_method\n'), ((5289, 5357), 'wandbot.chat.prompts.partial_format', 'partial_format', (['prefix_messages[-1].content'], {'context_str': 'context_str'}), '(prefix_messages[-1].content, context_str=context_str)\n', (5303, 5357), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((7759, 7859), 'wandb.init', 'wandb.init', ([], {'project': 'self.config.wandb_project', 'entity': 'self.config.wandb_entity', 'job_type': '"""chat"""'}), "(project=self.config.wandb_project, entity=self.config.\n wandb_entity, job_type='chat')\n", (7769, 7859), False, 'import wandb\n'), ((7968, 8085), 'weave.monitoring.StreamTable', 'StreamTable', ([], {'table_name': '"""chat_logs"""', 'project_name': 'self.config.wandb_project', 'entity_name': 'self.config.wandb_entity'}), "(table_name='chat_logs', project_name=self.config.wandb_project,\n entity_name=self.config.wandb_entity)\n", (7979, 8085), False, 'from weave.monitoring import StreamTable\n'), ((8242, 8264), 'llama_index.callbacks.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '()\n', (8262, 8264), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler, WandbCallbackHandler, trace_method\n'), ((8294, 8316), 'llama_index.callbacks.TokenCountingHandler', 'TokenCountingHandler', ([], {}), '()\n', (8314, 8316), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler, WandbCallbackHandler, trace_method\n'), ((8349, 8407), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[self.wandb_callback, self.token_counter]'], {}), '([self.wandb_callback, self.token_counter])\n', (8364, 8407), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler, WandbCallbackHandler, trace_method\n'), ((9140, 9188), 'wandbot.chat.prompts.load_chat_prompt', 'load_chat_prompt', ([], {'f_name': 'self.config.chat_prompt'}), '(f_name=self.config.chat_prompt)\n', (9156, 9188), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((9218, 9232), 'wandbot.chat.query_enhancer.QueryHandler', 'QueryHandler', ([], {}), '()\n', (9230, 9232), False, 'from wandbot.chat.query_enhancer import CompleteQuery, QueryHandler\n'), ((9258, 9372), 'wandbot.chat.retriever.Retriever', 'Retriever', ([], {'run': 'self.run', 'service_context': 'self.fallback_service_context', 'callback_manager': 'self.callback_manager'}), '(run=self.run, service_context=self.fallback_service_context,\n callback_manager=self.callback_manager)\n', (9267, 9372), False, 'from wandbot.chat.retriever import HybridRetriever, LanguageFilterPostprocessor, MetadataPostprocessor, Retriever\n'), ((10414, 10517), 'wandbot.chat.prompts.load_chat_prompt', 'load_chat_prompt', ([], {'f_name': 'self.config.chat_prompt', 'language_code': 'language', 'query_intent': 'query_intent'}), '(f_name=self.config.chat_prompt, language_code=language,\n query_intent=query_intent)\n', (10430, 10517), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((5444, 5529), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'content': 'context_str_w_sys_prompt', 'role': 'MessageRole.USER', 'metadata': '{}'}), '(content=context_str_w_sys_prompt, role=MessageRole.USER,\n metadata={})\n', (5455, 5529), False, 'from llama_index.llms import LLM, ChatMessage, MessageRole\n'), ((12216, 12244), 'json.dumps', 'json.dumps', (['source_documents'], {}), '(source_documents)\n', (12226, 12244), False, 'import json\n'), ((16815, 16837), 'wandbot.chat.schemas.ChatResponse', 'ChatResponse', ([], {}), '(**result)\n', (16827, 16837), False, 'from wandbot.chat.schemas import ChatRequest, ChatResponse\n'), ((6208, 6270), 'wandbot.chat.prompts.partial_format', 'partial_format', (['prefix_messages[-1].content'], {'query_str': 'message'}), '(prefix_messages[-1].content, query_str=message)\n', (6222, 6270), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((15587, 15594), 'wandbot.utils.Timer', 'Timer', ([], {}), '()\n', (15592, 15594), False, 'from wandbot.utils import Timer, get_logger, load_service_context\n'), ((17583, 17605), 'wandbot.chat.schemas.ChatResponse', 'ChatResponse', ([], {}), '(**result)\n', (17595, 17605), False, 'from wandbot.chat.schemas import ChatRequest, ChatResponse\n'), ((4785, 4805), 'llama_index.schema.QueryBundle', 'QueryBundle', (['message'], {}), '(message)\n', (4796, 4805), False, 'from llama_index.schema import MetadataMode, NodeWithScore, QueryBundle\n'), ((10874, 10897), 'wandbot.chat.retriever.MetadataPostprocessor', 'MetadataPostprocessor', ([], {}), '()\n', (10895, 10897), False, 'from wandbot.chat.retriever import HybridRetriever, LanguageFilterPostprocessor, MetadataPostprocessor, Retriever\n'), ((10915, 10974), 'wandbot.chat.retriever.LanguageFilterPostprocessor', 'LanguageFilterPostprocessor', ([], {'languages': "[language, 'python']"}), "(languages=[language, 'python'])\n", (10942, 10974), False, 'from wandbot.chat.retriever import HybridRetriever, LanguageFilterPostprocessor, MetadataPostprocessor, Retriever\n'), ((16886, 16893), 'wandbot.utils.Timer', 'Timer', ([], {}), '()\n', (16891, 16893), False, 'from wandbot.utils import Timer, get_logger, load_service_context\n'), ((10992, 11046), 'llama_index.indices.postprocessor.CohereRerank', 'CohereRerank', ([], {'top_n': 'top_k', 'model': '"""rerank-english-v2.0"""'}), "(top_n=top_k, model='rerank-english-v2.0')\n", (11004, 11046), False, 'from llama_index.indices.postprocessor import CohereRerank\n'), ((11104, 11163), 'llama_index.indices.postprocessor.CohereRerank', 'CohereRerank', ([], {'top_n': 'top_k', 'model': '"""rerank-multilingual-v2.0"""'}), "(top_n=top_k, model='rerank-multilingual-v2.0')\n", (11116, 11163), False, 'from llama_index.indices.postprocessor import CohereRerank\n')] |
import streamlit as st
from llama_index import VectorStoreIndex
from llama_index.vector_stores import ChromaVectorStore
import chromadb
st.title('Precident')
# load and prime the index
db2 = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db2.get_or_create_collection("quickstart")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(
vector_store,
)
query_engine = index.as_query_engine()
# add search bar
search = st.text_input('Search', 'Enter query here')
response = query_engine.query(search)
st.write(response)
| [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.ChromaVectorStore"
] | [((137, 158), 'streamlit.title', 'st.title', (['"""Precident"""'], {}), "('Precident')\n", (145, 158), True, 'import streamlit as st\n'), ((193, 238), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (218, 238), False, 'import chromadb\n'), ((317, 371), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (334, 371), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((380, 428), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (414, 428), False, 'from llama_index import VectorStoreIndex\n'), ((502, 545), 'streamlit.text_input', 'st.text_input', (['"""Search"""', '"""Enter query here"""'], {}), "('Search', 'Enter query here')\n", (515, 545), True, 'import streamlit as st\n'), ((584, 602), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (592, 602), True, 'import streamlit as st\n')] |
import os
import time
from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context
import llama_index
from Models import Models
from DocumentClass import DocumentClass
class MediawikiLLM:
service_context = None
mediawiki_url = None
api_url = None
DocumentClass = None
index = None
index_filename = None
query_engine = None
def __init__(self, mediawiki_url, api_url):
self.mediawiki_url = mediawiki_url
self.DocumentClass = DocumentClass(api_url)
llm = Models.CreateLlamaCCP(
model_url=os.getenv("MODEL_URL"), model_path=os.getenv("MODEL_PATH"))
# llm = Models.CreateHuggingFaceLLM(model_name="Writer/camel-5b-hf")
self.service_context = ServiceContext.from_defaults(
llm=llm,
embed_model="local",
chunk_size=1024,
)
def init_from_mediawiki(self):
set_global_service_context(self.service_context)
if os.path.isdir(str(os.getenv("PERSISTENT_STORAGE_DIR"))):
storage_context = StorageContext.from_defaults(
persist_dir=os.getenv("PERSISTENT_STORAGE_DIR"))
self.index = load_index_from_storage(storage_context)
else:
self.DocumentClass.mediawiki_get_all_pages(self.mediawiki_url)
self.index = VectorStoreIndex.from_documents(
self.DocumentClass.documents, service_context=self.service_context)
if os.getenv("PERSISTENT_STORAGE_DIR") is not None:
self.index.storage_context.persist(
os.getenv("PERSISTENT_STORAGE_DIR"))
self.query_engine = self.index.as_query_engine()
def init_no_documents(self):
self.index = llama_index.indices.empty.EmptyIndex(
service_context=self.service_context)
self.query_engine = self.index.as_query_engine()
def updateVectorStore(self, type: str, page_url: str):
if type == 'edit' or type == 'create':
print("create/edit " + page_url)
self.DocumentClass.mediawiki_update_page(page_url)
elif type == 'delete':
print("delete " + page_url)
self.DocumentClass.mediawiki_delete_page(page_url)
self.index.refresh(self.DocumentClass.documents)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.indices.empty.EmptyIndex",
"llama_index.ServiceContext.from_defaults",
"llama_index.set_global_service_context",
"llama_index.load_index_from_storage"
] | [((542, 564), 'DocumentClass.DocumentClass', 'DocumentClass', (['api_url'], {}), '(api_url)\n', (555, 564), False, 'from DocumentClass import DocumentClass\n'), ((795, 870), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""', 'chunk_size': '(1024)'}), "(llm=llm, embed_model='local', chunk_size=1024)\n", (823, 870), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context\n'), ((962, 1010), 'llama_index.set_global_service_context', 'set_global_service_context', (['self.service_context'], {}), '(self.service_context)\n', (988, 1010), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context\n'), ((1789, 1863), 'llama_index.indices.empty.EmptyIndex', 'llama_index.indices.empty.EmptyIndex', ([], {'service_context': 'self.service_context'}), '(service_context=self.service_context)\n', (1825, 1863), False, 'import llama_index\n'), ((1230, 1270), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1253, 1270), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context\n'), ((1386, 1489), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['self.DocumentClass.documents'], {'service_context': 'self.service_context'}), '(self.DocumentClass.documents,\n service_context=self.service_context)\n', (1417, 1489), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context\n'), ((625, 647), 'os.getenv', 'os.getenv', (['"""MODEL_URL"""'], {}), "('MODEL_URL')\n", (634, 647), False, 'import os\n'), ((660, 683), 'os.getenv', 'os.getenv', (['"""MODEL_PATH"""'], {}), "('MODEL_PATH')\n", (669, 683), False, 'import os\n'), ((1041, 1076), 'os.getenv', 'os.getenv', (['"""PERSISTENT_STORAGE_DIR"""'], {}), "('PERSISTENT_STORAGE_DIR')\n", (1050, 1076), False, 'import os\n'), ((1518, 1553), 'os.getenv', 'os.getenv', (['"""PERSISTENT_STORAGE_DIR"""'], {}), "('PERSISTENT_STORAGE_DIR')\n", (1527, 1553), False, 'import os\n'), ((1168, 1203), 'os.getenv', 'os.getenv', (['"""PERSISTENT_STORAGE_DIR"""'], {}), "('PERSISTENT_STORAGE_DIR')\n", (1177, 1203), False, 'import os\n'), ((1639, 1674), 'os.getenv', 'os.getenv', (['"""PERSISTENT_STORAGE_DIR"""'], {}), "('PERSISTENT_STORAGE_DIR')\n", (1648, 1674), False, 'import os\n')] |
import os
import shutil
import chromadb
import redis
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.storage import StorageContext
from app.tools import FindEmbeddingsPath
from llama_index.vector_stores.redis import RedisVectorStore
from llama_index.vector_stores.chroma import ChromaVectorStore
def vector_init(brain, project):
path = FindEmbeddingsPath(project.model.name)
if project.model.vectorstore == "chroma":
db = chromadb.PersistentClient(path=path)
chroma_collection = db.get_or_create_collection(project.model.name)
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(
vector_store=vector_store)
index = VectorStoreIndex.from_vector_store(
vector_store, storage_context=storage_context, embed_model=brain.getEmbedding(
project.model.embeddings))
return index
elif project.model.vectorstore == "redis":
if path is None or len(os.listdir(path)) == 0:
vector_store = RedisVectorStore(
redis_url="redis://" +
os.environ["REDIS_HOST"] +
":" +
os.environ["REDIS_PORT"],
index_name=project.model.name,
metadata_fields=["source", "keywords"],
index_prefix="llama_" + project.model.name,
overwrite=False)
storage_context = StorageContext.from_defaults(
vector_store=vector_store)
index = VectorStoreIndex.from_vector_store(vector_store, storage_context=storage_context, embed_model=brain.getEmbedding(project.model.embeddings))
return index
else:
return vector_load(brain, project)
def vector_save(project):
if project.model.vectorstore == "chroma":
pass
elif project.model.vectorstore == "redis":
try:
project.db.vector_store.persist(persist_path="")
except BaseException:
print("REDIS - Error saving vectors")
def vector_load(brain, project):
if project.model.vectorstore == "chroma":
return vector_init(brain, project)
if project.model.vectorstore == "redis":
vector_store = RedisVectorStore(
redis_url="redis://" +
os.environ["REDIS_HOST"] +
":" +
os.environ["REDIS_PORT"],
index_name=project.model.name,
metadata_fields=["source", "keywords"],
index_prefix="llama_" + project.model.name,
overwrite=False)
return VectorStoreIndex.from_vector_store(embed_model=brain.getEmbedding(
project.model.embeddings), vector_store=vector_store)
def vector_list(project):
output = []
if project.model.vectorstore == "chroma":
path = FindEmbeddingsPath(project.model.name)
db = chromadb.PersistentClient(path=path)
collection = db.get_or_create_collection(project.model.name)
docs = collection.get(
include=["metadatas"]
)
index = 0
for metadata in docs["metadatas"]:
if metadata["source"] not in output:
output.append(metadata["source"])
index = index + 1
elif project.model.vectorstore == "redis":
lredis = redis.Redis(
host=os.environ["REDIS_HOST"],
port=os.environ["REDIS_PORT"],
decode_responses=True)
keys = lredis.keys("llama_" + project.model.name + "/*")
for key in keys:
source = lredis.hget(key, "source")
if source not in output:
output.append(source)
return {"embeddings": output}
def vector_list_source(project, source):
output = []
if project.model.vectorstore == "chroma":
path = FindEmbeddingsPath(project.model.name)
db = chromadb.PersistentClient(path=path)
collection = db.get_or_create_collection(project.model.name)
docs = collection.get(
include=["metadatas"]
)
index = 0
for metadata in docs["metadatas"]:
if metadata["source"] == source:
output.append(metadata["source"])
index = index + 1
elif project.model.vectorstore == "redis":
lredis = redis.Redis(
host=os.environ["REDIS_HOST"],
port=os.environ["REDIS_PORT"],
decode_responses=True)
keys = lredis.keys("llama_" + project.model.name + "/*")
for key in keys:
sourcer = lredis.hget(key, "source").strip()
id = lredis.hget(key, "id").strip()
if source == sourcer:
output.append({"source": source, "id": id, "score": 1})
return output
def vector_info(project):
if project.model.vectorstore == "chroma":
path = FindEmbeddingsPath(project.model.name)
db = chromadb.PersistentClient(path=path)
collection = db.get_or_create_collection(project.model.name)
docs = collection.get(
include=["metadatas"]
)
return len(docs["ids"])
elif project.model.vectorstore == "redis":
lredis = redis.Redis(
host=os.environ["REDIS_HOST"],
port=os.environ["REDIS_PORT"],
decode_responses=True)
keys = lredis.keys("llama_" + project.model.name + "/*")
return len(keys)
def vector_find_source(project, source):
docs = []
if project.model.vectorstore == "chroma":
path = FindEmbeddingsPath(project.model.name)
db = chromadb.PersistentClient(path=path)
collection = db.get_or_create_collection(project.model.name)
docs = collection.get(where={'source': source})
elif project.model.vectorstore == "redis":
lredis = redis.Redis(
host=os.environ["REDIS_HOST"],
port=os.environ["REDIS_PORT"],
decode_responses=True)
keys = lredis.keys("llama_" + project.model.name + "/*")
ids = []
metadatas = []
documents = []
for key in keys:
lsource = lredis.hget(key, "source")
if lsource == source:
ids.append(key)
metadatas.append(
{"source": lsource, "keywords": lredis.hget(key, "keywords")})
documents.append(lredis.hget(key, "text"))
docs = {"ids": ids, "metadatas": metadatas, "documents": documents}
return docs
def vector_find_id(project, id):
output = {"id": id}
if project.model.vectorstore == "chroma":
path = FindEmbeddingsPath(project.model.name)
db = chromadb.PersistentClient(path=path)
collection = db.get_or_create_collection(project.model.name)
docs = collection.get(ids=[id])
output["metadata"] = {
k: v for k, v in docs["metadatas"][0].items() if not k.startswith('_')}
output["document"] = docs["documents"][0]
elif project.model.vectorstore == "redis":
lredis = redis.Redis(
host=os.environ["REDIS_HOST"],
port=os.environ["REDIS_PORT"],
decode_responses=True)
ids = "llama_" + project.model.name + "/vector_" + id
keys = lredis.hkeys(ids)
keys = [k for k in keys if not k.startswith(
'_') and k != "vector" and k != "text" and k != "doc_id" and k != "id"]
data = lredis.hmget(ids, keys)
text = lredis.hget(ids, "text")
output["metadata"] = dict(zip(keys, data))
output["document"] = text
return output
def vector_delete(project):
if project.model.vectorstore == "chroma":
try:
embeddingsPath = FindEmbeddingsPath(project.model.name)
shutil.rmtree(embeddingsPath, ignore_errors=True)
except BaseException:
pass
elif project.model.vectorstore == "redis":
lredis = redis.Redis(
host=os.environ["REDIS_HOST"],
port=os.environ["REDIS_PORT"],
decode_responses=True)
try:
lredis.ft(project.model.name).dropindex(True)
embeddingsPath = FindEmbeddingsPath(project.model.name)
shutil.rmtree(embeddingsPath, ignore_errors=True)
except BaseException:
pass
def vector_delete_source(project, source):
ids = []
if project.model.vectorstore == "chroma":
path = FindEmbeddingsPath(project.model.name)
db = chromadb.PersistentClient(path=path)
collection = db.get_or_create_collection(project.model.name)
ids = collection.get(where={'source': source})['ids']
if len(ids):
collection.delete(ids)
elif project.model.vectorstore == "redis":
lredis = redis.Redis(
host=os.environ["REDIS_HOST"],
port=os.environ["REDIS_PORT"],
decode_responses=True)
keys = lredis.keys("llama_" + project.model.name + "/*")
for key in keys:
lsource = lredis.hget(key, "source")
if lsource == source:
ids.append(key)
lredis.delete(key)
return ids
def vector_delete_id(project, id):
if project.model.vectorstore == "chroma":
path = FindEmbeddingsPath(project.model.name)
db = chromadb.PersistentClient(path=path)
collection = db.get_or_create_collection(project.model.name)
ids = collection.get(ids=[id])['ids']
if len(ids):
collection.delete(ids)
elif project.model.vectorstore == "redis":
lredis = redis.Redis(
host=os.environ["REDIS_HOST"],
port=os.environ["REDIS_PORT"],
decode_responses=True)
lredis.delete(id)
return id
def vector_reset(brain, project):
if project.model.vectorstore == "chroma":
path = FindEmbeddingsPath(project.model.name)
db = chromadb.PersistentClient(path=path)
db.reset()
elif project.model.vectorstore == "redis":
lredis = redis.Redis(
host=os.environ["REDIS_HOST"],
port=os.environ["REDIS_PORT"],
decode_responses=True)
lredis.ft(project.model.name).dropindex(True)
project.db = vector_init(brain, project)
| [
"llama_index.vector_stores.redis.RedisVectorStore",
"llama_index.core.storage.StorageContext.from_defaults",
"llama_index.vector_stores.chroma.ChromaVectorStore"
] | [((370, 408), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (388, 408), False, 'from app.tools import FindEmbeddingsPath\n'), ((469, 505), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (494, 505), False, 'import chromadb\n'), ((605, 659), 'llama_index.vector_stores.chroma.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (622, 659), False, 'from llama_index.vector_stores.chroma import ChromaVectorStore\n'), ((687, 742), 'llama_index.core.storage.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (715, 742), False, 'from llama_index.core.storage import StorageContext\n'), ((2278, 2522), 'llama_index.vector_stores.redis.RedisVectorStore', 'RedisVectorStore', ([], {'redis_url': "('redis://' + os.environ['REDIS_HOST'] + ':' + os.environ['REDIS_PORT'])", 'index_name': 'project.model.name', 'metadata_fields': "['source', 'keywords']", 'index_prefix': "('llama_' + project.model.name)", 'overwrite': '(False)'}), "(redis_url='redis://' + os.environ['REDIS_HOST'] + ':' + os\n .environ['REDIS_PORT'], index_name=project.model.name, metadata_fields=\n ['source', 'keywords'], index_prefix='llama_' + project.model.name,\n overwrite=False)\n", (2294, 2522), False, 'from llama_index.vector_stores.redis import RedisVectorStore\n'), ((2859, 2897), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (2877, 2897), False, 'from app.tools import FindEmbeddingsPath\n'), ((2911, 2947), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (2936, 2947), False, 'import chromadb\n'), ((3851, 3889), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (3869, 3889), False, 'from app.tools import FindEmbeddingsPath\n'), ((3903, 3939), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (3928, 3939), False, 'import chromadb\n'), ((4880, 4918), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (4898, 4918), False, 'from app.tools import FindEmbeddingsPath\n'), ((4932, 4968), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (4957, 4968), False, 'import chromadb\n'), ((5552, 5590), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (5570, 5590), False, 'from app.tools import FindEmbeddingsPath\n'), ((5604, 5640), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (5629, 5640), False, 'import chromadb\n'), ((6622, 6660), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (6640, 6660), False, 'from app.tools import FindEmbeddingsPath\n'), ((6674, 6710), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (6699, 6710), False, 'import chromadb\n'), ((8429, 8467), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (8447, 8467), False, 'from app.tools import FindEmbeddingsPath\n'), ((8481, 8517), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (8506, 8517), False, 'import chromadb\n'), ((9256, 9294), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (9274, 9294), False, 'from app.tools import FindEmbeddingsPath\n'), ((9308, 9344), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (9333, 9344), False, 'import chromadb\n'), ((9851, 9889), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (9869, 9889), False, 'from app.tools import FindEmbeddingsPath\n'), ((9903, 9939), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (9928, 9939), False, 'import chromadb\n'), ((3349, 3449), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (3360, 3449), False, 'import redis\n'), ((4337, 4437), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (4348, 4437), False, 'import redis\n'), ((5210, 5310), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (5221, 5310), False, 'import redis\n'), ((5830, 5930), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (5841, 5930), False, 'import redis\n'), ((7049, 7149), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (7060, 7149), False, 'import redis\n'), ((7716, 7754), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (7734, 7754), False, 'from app.tools import FindEmbeddingsPath\n'), ((7767, 7816), 'shutil.rmtree', 'shutil.rmtree', (['embeddingsPath'], {'ignore_errors': '(True)'}), '(embeddingsPath, ignore_errors=True)\n', (7780, 7816), False, 'import shutil\n'), ((7928, 8028), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (7939, 8028), False, 'import redis\n'), ((8769, 8869), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (8780, 8869), False, 'import redis\n'), ((9580, 9680), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (9591, 9680), False, 'import redis\n'), ((10023, 10123), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (10034, 10123), False, 'import redis\n'), ((1088, 1332), 'llama_index.vector_stores.redis.RedisVectorStore', 'RedisVectorStore', ([], {'redis_url': "('redis://' + os.environ['REDIS_HOST'] + ':' + os.environ['REDIS_PORT'])", 'index_name': 'project.model.name', 'metadata_fields': "['source', 'keywords']", 'index_prefix': "('llama_' + project.model.name)", 'overwrite': '(False)'}), "(redis_url='redis://' + os.environ['REDIS_HOST'] + ':' + os\n .environ['REDIS_PORT'], index_name=project.model.name, metadata_fields=\n ['source', 'keywords'], index_prefix='llama_' + project.model.name,\n overwrite=False)\n", (1104, 1332), False, 'from llama_index.vector_stores.redis import RedisVectorStore\n'), ((1479, 1534), 'llama_index.core.storage.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1507, 1534), False, 'from llama_index.core.storage import StorageContext\n'), ((8162, 8200), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (8180, 8200), False, 'from app.tools import FindEmbeddingsPath\n'), ((8213, 8262), 'shutil.rmtree', 'shutil.rmtree', (['embeddingsPath'], {'ignore_errors': '(True)'}), '(embeddingsPath, ignore_errors=True)\n', (8226, 8262), False, 'import shutil\n'), ((1037, 1053), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1047, 1053), False, 'import os\n')] |
#!/usr/bin/env python3
import json
import logging
import re
import requests
import altair as alt
import matplotlib.pyplot as plt
import pandas as pd
import streamlit as st
from datetime import datetime, timedelta
from langchain.llms import OpenAI
from llama_index import GPTVectorStoreIndex, Document, LLMPredictor, ServiceContext
from requests.exceptions import HTTPError
from wordcloud import WordCloud
logger = logging.getLogger("llama_index")
logger.setLevel(logging.WARNING)
TITLE = "Daily News Summary"
ICON = "https://archive.org/favicon.ico"
VISEXP = "https://storage.googleapis.com/data.gdeltproject.org/gdeltv3/iatv/visualexplorer"
BGNDT = pd.to_datetime("2022-03-25").date()
ENDDT = (datetime.now() - timedelta(hours=30)).date()
CHANNELS = {
"": "-- Select --",
"ESPRESO": "Espreso TV",
"RUSSIA1": "Russia-1",
"RUSSIA24": "Russia-24",
"1TV": "Channel One Russia",
"NTV": "NTV",
"BELARUSTV": "Belarus TV",
"IRINN": "Islamic Republic of Iran News Network"
}
st.set_page_config(page_title=TITLE, page_icon=ICON, layout="centered", initial_sidebar_state="collapsed")
st.title(TITLE)
llm_predictor = LLMPredictor(llm=OpenAI(max_tokens=1024, model_name="text-davinci-003"))
@st.cache_resource(show_spinner=False)
def load_transcript(id, lg):
lang = "" if lg == "Original" else ".en"
r = requests.get(f"{VISEXP}/{id}.transcript{lang}.txt")
r.raise_for_status()
return r.content
@st.cache_resource(show_spinner=False)
def load_index(ch, dt, lg):
r = requests.get(f"{VISEXP}/{ch}.{dt}.inventory.json")
r.raise_for_status()
shows = r.json()["shows"]
idx = GPTVectorStoreIndex.from_documents([], service_context=ServiceContext.from_defaults(llm_predictor=llm_predictor))
msg = f"Loading `{dt[:4]}-{dt[4:6]}-{dt[6:8]}` {lg} transcripts for `{CHANNELS.get(ch, 'selected')}` channel..."
prog = st.progress(0.0, text=msg)
for i, tr in enumerate(shows, start=1):
try:
idx.insert(Document(load_transcript(tr["id"], lg).decode("utf-8")), llm_predictor=llm_predictor)
except HTTPError as e:
pass
prog.progress(i/len(shows), text=msg)
prog.empty()
return idx.as_query_engine()
@st.cache_resource(show_spinner="Extracting top entities...")
def get_top_entities(_idx, ch, dt, lg):
res = _idx.query("20 most frequent entities with their frequency in these articles as key-value in JSON format")
kw = json.loads(res.response.strip())
wc = WordCloud(background_color="white")
wc.generate_from_frequencies(kw)
fig, ax = plt.subplots()
ax.imshow(wc)
ax.axis("off")
return fig, pd.DataFrame(kw.items()).rename(columns={0: "Entity", 1: "Frequency"}).sort_values("Frequency", ascending=False)
@st.cache_resource(show_spinner="Constructing news headlines...")
def get_headlines(_idx, ch, dt, lg):
return _idx.query("Top 10 news headlines with summary in these articles in Markdown format")
qp = st.experimental_get_query_params()
if "date" not in st.session_state and qp.get("date"):
st.session_state["date"] = datetime.strptime(qp.get("date")[0], "%Y-%m-%d").date()
if "chan" not in st.session_state and qp.get("chan"):
st.session_state["chan"] = qp.get("chan")[0]
if "lang" not in st.session_state and qp.get("lang"):
st.session_state["lang"] = qp.get("lang")[0]
cols = st.columns(3)
dt = cols[0].date_input("Date", value=ENDDT, min_value=BGNDT, max_value=ENDDT, key="date").strftime("%Y%m%d")
ch = cols[1].selectbox("Channel", CHANNELS, format_func=lambda x: CHANNELS.get(x, ""), key="chan")
lg = cols[2].selectbox("Language", ["English", "Original"], format_func=lambda x: "English (Translation)" if x == "English" else x, key="lang", disabled=True) # Disabled due to a bug https://github.com/jerryjliu/gpt_index/issues/294
if not ch:
st.info(f"Select a channel to summarize for the selected day.")
st.stop()
st.experimental_set_query_params(**st.session_state)
try:
idx = load_index(ch, dt, lg)
except HTTPError as e:
st.warning(f"Transcripts for `{CHANNELS.get(ch, 'selected')}` channel are not available for `{dt[:4]}-{dt[4:6]}-{dt[6:8]}` yet, try selecting another date!", icon="⚠️")
st.stop()
tbs = st.tabs(["Top Entities", "Frequencies"])
try:
fig, d = get_top_entities(idx, ch, dt, lg)
tbs[0].pyplot(fig)
tbs[1].dataframe(d, use_container_width=True)
except:
msg = "Entity frequency data is not in the expected JSON shape!"
tbs[0].warning(msg)
tbs[1].warning(msg)
"### Top Headlines"
res = get_headlines(idx, ch, dt, lg)
st.markdown(res.response)
| [
"llama_index.ServiceContext.from_defaults"
] | [((419, 451), 'logging.getLogger', 'logging.getLogger', (['"""llama_index"""'], {}), "('llama_index')\n", (436, 451), False, 'import logging\n'), ((992, 1102), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'TITLE', 'page_icon': 'ICON', 'layout': '"""centered"""', 'initial_sidebar_state': '"""collapsed"""'}), "(page_title=TITLE, page_icon=ICON, layout='centered',\n initial_sidebar_state='collapsed')\n", (1010, 1102), True, 'import streamlit as st\n'), ((1099, 1114), 'streamlit.title', 'st.title', (['TITLE'], {}), '(TITLE)\n', (1107, 1114), True, 'import streamlit as st\n'), ((1208, 1245), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1225, 1245), True, 'import streamlit as st\n'), ((1421, 1458), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1438, 1458), True, 'import streamlit as st\n'), ((2151, 2211), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '"""Extracting top entities..."""'}), "(show_spinner='Extracting top entities...')\n", (2168, 2211), True, 'import streamlit as st\n'), ((2675, 2739), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '"""Constructing news headlines..."""'}), "(show_spinner='Constructing news headlines...')\n", (2692, 2739), True, 'import streamlit as st\n'), ((2879, 2913), 'streamlit.experimental_get_query_params', 'st.experimental_get_query_params', ([], {}), '()\n', (2911, 2913), True, 'import streamlit as st\n'), ((3269, 3282), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (3279, 3282), True, 'import streamlit as st\n'), ((3816, 3868), 'streamlit.experimental_set_query_params', 'st.experimental_set_query_params', ([], {}), '(**st.session_state)\n', (3848, 3868), True, 'import streamlit as st\n'), ((4119, 4159), 'streamlit.tabs', 'st.tabs', (["['Top Entities', 'Frequencies']"], {}), "(['Top Entities', 'Frequencies'])\n", (4126, 4159), True, 'import streamlit as st\n'), ((4458, 4483), 'streamlit.markdown', 'st.markdown', (['res.response'], {}), '(res.response)\n', (4469, 4483), True, 'import streamlit as st\n'), ((1324, 1375), 'requests.get', 'requests.get', (['f"""{VISEXP}/{id}.transcript{lang}.txt"""'], {}), "(f'{VISEXP}/{id}.transcript{lang}.txt')\n", (1336, 1375), False, 'import requests\n'), ((1493, 1543), 'requests.get', 'requests.get', (['f"""{VISEXP}/{ch}.{dt}.inventory.json"""'], {}), "(f'{VISEXP}/{ch}.{dt}.inventory.json')\n", (1505, 1543), False, 'import requests\n'), ((1841, 1867), 'streamlit.progress', 'st.progress', (['(0.0)'], {'text': 'msg'}), '(0.0, text=msg)\n', (1852, 1867), True, 'import streamlit as st\n'), ((2414, 2449), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""white"""'}), "(background_color='white')\n", (2423, 2449), False, 'from wordcloud import WordCloud\n'), ((2497, 2511), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2509, 2511), True, 'import matplotlib.pyplot as plt\n'), ((3739, 3802), 'streamlit.info', 'st.info', (['f"""Select a channel to summarize for the selected day."""'], {}), "(f'Select a channel to summarize for the selected day.')\n", (3746, 3802), True, 'import streamlit as st\n'), ((3805, 3814), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (3812, 3814), True, 'import streamlit as st\n'), ((657, 685), 'pandas.to_datetime', 'pd.to_datetime', (['"""2022-03-25"""'], {}), "('2022-03-25')\n", (671, 685), True, 'import pandas as pd\n'), ((1149, 1203), 'langchain.llms.OpenAI', 'OpenAI', ([], {'max_tokens': '(1024)', 'model_name': '"""text-davinci-003"""'}), "(max_tokens=1024, model_name='text-davinci-003')\n", (1155, 1203), False, 'from langchain.llms import OpenAI\n'), ((4102, 4111), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (4109, 4111), True, 'import streamlit as st\n'), ((702, 716), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (714, 716), False, 'from datetime import datetime, timedelta\n'), ((719, 738), 'datetime.timedelta', 'timedelta', ([], {'hours': '(30)'}), '(hours=30)\n', (728, 738), False, 'from datetime import datetime, timedelta\n'), ((1658, 1715), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (1686, 1715), False, 'from llama_index import GPTVectorStoreIndex, Document, LLMPredictor, ServiceContext\n')] |
# https://gpt-index.readthedocs.io/en/latest/examples/query_engine/sub_question_query_engine.html
# Using LlamaIndex as a Callable Tool
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from langchain import HuggingFaceHub
from llama_index import LangchainEmbedding, ServiceContext
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext
from llama_index.query_engine import SubQuestionQueryEngine
documents = SimpleDirectoryReader('data/experiment').load_data()
repo_id = "tiiuae/falcon-7b"
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.1, 'truncation': 'only_first',
"max_length": 1024})
llm_predictor = LLMPredictor(llm=llm)
service_context = ServiceContext.from_defaults(chunk_size=512, llm_predictor=llm_predictor, embed_model=embed_model)
index = VectorStoreIndex.from_documents(documents=documents, service_context=service_context)
query_engine = index.as_query_engine(similarity_top_k=3)
# setup base query engine as tool
query_engine_tools = [
QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(name='pg_essay', description='Paul Graham essay on What I Worked On')
)
]
query_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)
# response = s_engine.query('Explain childhood')
response = query_engine.query('How was Paul Grahams life different before and after YC?')
print(response)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults"
] | [((874, 992), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'truncation': 'only_first', 'max_length': 1024}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'truncation': 'only_first', 'max_length': 1024})\n", (888, 992), False, 'from langchain import HuggingFaceHub\n'), ((1057, 1078), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1069, 1078), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1097, 1199), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(512)', 'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(chunk_size=512, llm_predictor=llm_predictor,\n embed_model=embed_model)\n', (1125, 1199), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1205, 1295), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(documents=documents, service_context=\n service_context)\n', (1236, 1295), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1586, 1661), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools'}), '(query_engine_tools=query_engine_tools)\n', (1622, 1661), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((842, 865), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (863, 865), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((727, 767), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data/experiment"""'], {}), "('data/experiment')\n", (748, 767), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1479, 1566), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""pg_essay"""', 'description': '"""Paul Graham essay on What I Worked On"""'}), "(name='pg_essay', description=\n 'Paul Graham essay on What I Worked On')\n", (1491, 1566), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
from llama_index.core.node_parser import HTMLNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/others/sample.html"))
my_tags = ["p", "span"]
html_parser = HTMLNodeParser(tags=my_tags)
nodes = html_parser.get_nodes_from_documents(document)
print('<span> elements:')
for node in nodes:
if node.metadata['tag']=='span':
print(node.text)
print('<p> elements:')
for node in nodes:
if node.metadata['tag']=='p':
print(node.text) | [
"llama_index.core.node_parser.HTMLNodeParser",
"llama_index.readers.file.FlatReader"
] | [((139, 151), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (149, 151), False, 'from llama_index.readers.file import FlatReader\n'), ((255, 283), 'llama_index.core.node_parser.HTMLNodeParser', 'HTMLNodeParser', ([], {'tags': 'my_tags'}), '(tags=my_tags)\n', (269, 283), False, 'from llama_index.core.node_parser import HTMLNodeParser\n'), ((180, 212), 'pathlib.Path', 'Path', (['"""files/others/sample.html"""'], {}), "('files/others/sample.html')\n", (184, 212), False, 'from pathlib import Path\n')] |
import os
import streamlit as st
from PIL import Image
from llama_index import (
Document,
GPTVectorStoreIndex,
GPTListIndex,
LLMPredictor,
ServiceContext,
SimpleDirectoryReader,
PromptHelper,
StorageContext,
load_index_from_storage,
download_loader,
)
from llama_index.readers.file.base import DEFAULT_FILE_READER_CLS
from constants import DEFAULT_TERM_STR, DEFAULT_TERMS, REFINE_TEMPLATE, TEXT_QA_TEMPLATE
from utils import get_llm
if "all_terms" not in st.session_state:
st.session_state["all_terms"] = DEFAULT_TERMS
@st.cache_resource
def get_file_extractor():
ImageReader = download_loader("ImageReader")
image_loader = ImageReader(text_type="plain_text")
file_extractor = DEFAULT_FILE_READER_CLS
file_extractor.update(
{
".jpg": image_loader,
".png": image_loader,
".jpeg": image_loader,
}
)
return file_extractor
file_extractor = get_file_extractor()
def extract_terms(documents, term_extract_str, llm_name, model_temperature, api_key):
llm = get_llm(llm_name, model_temperature, api_key, max_tokens=1024)
service_context = ServiceContext.from_defaults(
llm_predictor=LLMPredictor(llm=llm),
prompt_helper=PromptHelper(
max_input_size=4096, max_chunk_overlap=20, num_output=1024
),
chunk_size_limit=1024,
)
temp_index = GPTListIndex.from_documents(documents, service_context=service_context)
terms_definitions = str(
temp_index.as_query_engine(response_mode="tree_summarize").query(
term_extract_str
)
)
terms_definitions = [
x
for x in terms_definitions.split("\n")
if x and "Term:" in x and "Definition:" in x
]
# parse the text into a dict
terms_to_definition = {
x.split("Definition:")[0]
.split("Term:")[-1]
.strip(): x.split("Definition:")[-1]
.strip()
for x in terms_definitions
}
return terms_to_definition
def insert_terms(terms_to_definition):
for term, definition in terms_to_definition.items():
doc = Document(f"Term: {term}\nDefinition: {definition}")
st.session_state["llama_index"].insert(doc)
@st.cache_resource
def initialize_index(llm_name, model_temperature, api_key):
"""Create the GPTSQLStructStoreIndex object."""
llm = get_llm(llm_name, model_temperature, api_key)
service_context = ServiceContext.from_defaults(llm_predictor=LLMPredictor(llm=llm))
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir="./initial_index"),
service_context=service_context,
)
return index
st.title("🦙 Llama Index Term Extractor 🦙")
st.markdown(
(
"This demo allows you to upload your own documents (either a screenshot/image or the actual text) and extract terms and definitions, building a knowledge base!\n\n"
"Powered by [Llama Index](https://gpt-index.readthedocs.io/en/latest/index.html) and OpenAI, you can augment the existing knowledge of an "
"LLM using your own notes, documents, and images. Then, when you ask about a term or definition, it will use your data first! "
"The app is currently pre-loaded with terms from the NYC Wikipedia page."
)
)
setup_tab, terms_tab, upload_tab, query_tab = st.tabs(
["Setup", "All Terms", "Upload/Extract Terms", "Query Terms"]
)
with setup_tab:
st.subheader("LLM Setup")
api_key = st.text_input("Enter your OpenAI API key here", type="password")
llm_name = st.selectbox(
"Which LLM?", ["text-davinci-003", "gpt-3.5-turbo", "gpt-4"]
)
model_temperature = st.slider(
"LLM Temperature", min_value=0.0, max_value=1.0, step=0.1
)
term_extract_str = st.text_area(
"The query to extract terms and definitions with.", value=DEFAULT_TERM_STR
)
with terms_tab:
st.subheader("Current Extracted Terms and Definitions")
st.json(st.session_state["all_terms"])
with upload_tab:
st.subheader("Extract and Query Definitions")
if st.button("Initialize Index and Reset Terms", key="init_index_1"):
st.session_state["llama_index"] = initialize_index(
llm_name, model_temperature, api_key
)
st.session_state["all_terms"] = DEFAULT_TERMS
if "llama_index" in st.session_state:
st.markdown(
"Either upload an image/screenshot of a document, or enter the text manually."
)
uploaded_file = st.file_uploader(
"Upload an image/screenshot of a document:", type=["png", "jpg", "jpeg"]
)
document_text = st.text_area("Or enter raw text")
if st.button("Extract Terms and Definitions") and (
uploaded_file or document_text
):
st.session_state["terms"] = {}
terms_docs = {}
with st.spinner("Extracting (images may be slow)..."):
if document_text:
terms_docs.update(
extract_terms(
[Document(document_text)],
term_extract_str,
llm_name,
model_temperature,
api_key,
)
)
if uploaded_file:
Image.open(uploaded_file).convert("RGB").save("temp.png")
img_reader = SimpleDirectoryReader(
input_files=["temp.png"], file_extractor=file_extractor
)
img_docs = img_reader.load_data()
os.remove("temp.png")
terms_docs.update(
extract_terms(
img_docs,
term_extract_str,
llm_name,
model_temperature,
api_key,
)
)
st.session_state["terms"].update(terms_docs)
if "terms" in st.session_state and st.session_state["terms"]:
st.markdown("Extracted terms")
st.json(st.session_state["terms"])
if st.button("Insert terms?"):
with st.spinner("Inserting terms"):
insert_terms(st.session_state["terms"])
st.session_state["all_terms"].update(st.session_state["terms"])
st.session_state["terms"] = {}
st.experimental_rerun()
with query_tab:
st.subheader("Query for Terms/Definitions!")
st.markdown(
(
"The LLM will attempt to answer your query, and augment it's answers using the terms/definitions you've inserted. "
"If a term is not in the index, it will answer using it's internal knowledge."
)
)
if st.button("Initialize Index and Reset Terms", key="init_index_2"):
st.session_state["llama_index"] = initialize_index(
llm_name, model_temperature, api_key
)
st.session_state["all_terms"] = DEFAULT_TERMS
if "llama_index" in st.session_state:
query_text = st.text_input("Ask about a term or definition:")
if query_text:
with st.spinner("Generating answer..."):
response = (
st.session_state["llama_index"]
.as_query_engine(
similarity_top_k=5,
response_mode="compact",
text_qa_template=TEXT_QA_TEMPLATE,
refine_template=REFINE_TEMPLATE,
)
.query(query_text)
)
st.markdown(str(response))
| [
"llama_index.SimpleDirectoryReader",
"llama_index.download_loader",
"llama_index.LLMPredictor",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper",
"llama_index.GPTListIndex.from_documents",
"llama_index.Document"
] | [((2706, 2748), 'streamlit.title', 'st.title', (['"""🦙 Llama Index Term Extractor 🦙"""'], {}), "('🦙 Llama Index Term Extractor 🦙')\n", (2714, 2748), True, 'import streamlit as st\n'), ((2749, 3271), 'streamlit.markdown', 'st.markdown', (['"""This demo allows you to upload your own documents (either a screenshot/image or the actual text) and extract terms and definitions, building a knowledge base!\n\nPowered by [Llama Index](https://gpt-index.readthedocs.io/en/latest/index.html) and OpenAI, you can augment the existing knowledge of an LLM using your own notes, documents, and images. Then, when you ask about a term or definition, it will use your data first! The app is currently pre-loaded with terms from the NYC Wikipedia page."""'], {}), '(\n """This demo allows you to upload your own documents (either a screenshot/image or the actual text) and extract terms and definitions, building a knowledge base!\n\nPowered by [Llama Index](https://gpt-index.readthedocs.io/en/latest/index.html) and OpenAI, you can augment the existing knowledge of an LLM using your own notes, documents, and images. Then, when you ask about a term or definition, it will use your data first! The app is currently pre-loaded with terms from the NYC Wikipedia page."""\n )\n', (2760, 3271), True, 'import streamlit as st\n'), ((3362, 3432), 'streamlit.tabs', 'st.tabs', (["['Setup', 'All Terms', 'Upload/Extract Terms', 'Query Terms']"], {}), "(['Setup', 'All Terms', 'Upload/Extract Terms', 'Query Terms'])\n", (3369, 3432), True, 'import streamlit as st\n'), ((633, 663), 'llama_index.download_loader', 'download_loader', (['"""ImageReader"""'], {}), "('ImageReader')\n", (648, 663), False, 'from llama_index import Document, GPTVectorStoreIndex, GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, PromptHelper, StorageContext, load_index_from_storage, download_loader\n'), ((1085, 1147), 'utils.get_llm', 'get_llm', (['llm_name', 'model_temperature', 'api_key'], {'max_tokens': '(1024)'}), '(llm_name, model_temperature, api_key, max_tokens=1024)\n', (1092, 1147), False, 'from utils import get_llm\n'), ((1419, 1490), 'llama_index.GPTListIndex.from_documents', 'GPTListIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1446, 1490), False, 'from llama_index import Document, GPTVectorStoreIndex, GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, PromptHelper, StorageContext, load_index_from_storage, download_loader\n'), ((2397, 2442), 'utils.get_llm', 'get_llm', (['llm_name', 'model_temperature', 'api_key'], {}), '(llm_name, model_temperature, api_key)\n', (2404, 2442), False, 'from utils import get_llm\n'), ((3460, 3485), 'streamlit.subheader', 'st.subheader', (['"""LLM Setup"""'], {}), "('LLM Setup')\n", (3472, 3485), True, 'import streamlit as st\n'), ((3500, 3564), 'streamlit.text_input', 'st.text_input', (['"""Enter your OpenAI API key here"""'], {'type': '"""password"""'}), "('Enter your OpenAI API key here', type='password')\n", (3513, 3564), True, 'import streamlit as st\n'), ((3580, 3654), 'streamlit.selectbox', 'st.selectbox', (['"""Which LLM?"""', "['text-davinci-003', 'gpt-3.5-turbo', 'gpt-4']"], {}), "('Which LLM?', ['text-davinci-003', 'gpt-3.5-turbo', 'gpt-4'])\n", (3592, 3654), True, 'import streamlit as st\n'), ((3693, 3761), 'streamlit.slider', 'st.slider', (['"""LLM Temperature"""'], {'min_value': '(0.0)', 'max_value': '(1.0)', 'step': '(0.1)'}), "('LLM Temperature', min_value=0.0, max_value=1.0, step=0.1)\n", (3702, 3761), True, 'import streamlit as st\n'), ((3799, 3892), 'streamlit.text_area', 'st.text_area', (['"""The query to extract terms and definitions with."""'], {'value': 'DEFAULT_TERM_STR'}), "('The query to extract terms and definitions with.', value=\n DEFAULT_TERM_STR)\n", (3811, 3892), True, 'import streamlit as st\n'), ((3924, 3979), 'streamlit.subheader', 'st.subheader', (['"""Current Extracted Terms and Definitions"""'], {}), "('Current Extracted Terms and Definitions')\n", (3936, 3979), True, 'import streamlit as st\n'), ((3984, 4022), 'streamlit.json', 'st.json', (["st.session_state['all_terms']"], {}), "(st.session_state['all_terms'])\n", (3991, 4022), True, 'import streamlit as st\n'), ((4046, 4091), 'streamlit.subheader', 'st.subheader', (['"""Extract and Query Definitions"""'], {}), "('Extract and Query Definitions')\n", (4058, 4091), True, 'import streamlit as st\n'), ((4099, 4164), 'streamlit.button', 'st.button', (['"""Initialize Index and Reset Terms"""'], {'key': '"""init_index_1"""'}), "('Initialize Index and Reset Terms', key='init_index_1')\n", (4108, 4164), True, 'import streamlit as st\n'), ((6558, 6602), 'streamlit.subheader', 'st.subheader', (['"""Query for Terms/Definitions!"""'], {}), "('Query for Terms/Definitions!')\n", (6570, 6602), True, 'import streamlit as st\n'), ((6607, 6821), 'streamlit.markdown', 'st.markdown', (['"""The LLM will attempt to answer your query, and augment it\'s answers using the terms/definitions you\'ve inserted. If a term is not in the index, it will answer using it\'s internal knowledge."""'], {}), '(\n "The LLM will attempt to answer your query, and augment it\'s answers using the terms/definitions you\'ve inserted. If a term is not in the index, it will answer using it\'s internal knowledge."\n )\n', (6618, 6821), True, 'import streamlit as st\n'), ((6872, 6937), 'streamlit.button', 'st.button', (['"""Initialize Index and Reset Terms"""'], {'key': '"""init_index_2"""'}), "('Initialize Index and Reset Terms', key='init_index_2')\n", (6881, 6937), True, 'import streamlit as st\n'), ((2150, 2204), 'llama_index.Document', 'Document', (['f"""Term: {term}\nDefinition: {definition}"""'], {}), '(f"""Term: {term}\nDefinition: {definition}""")\n', (2158, 2204), False, 'from llama_index import Document, GPTVectorStoreIndex, GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, PromptHelper, StorageContext, load_index_from_storage, download_loader\n'), ((2578, 2637), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./initial_index"""'}), "(persist_dir='./initial_index')\n", (2606, 2637), False, 'from llama_index import Document, GPTVectorStoreIndex, GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, PromptHelper, StorageContext, load_index_from_storage, download_loader\n'), ((4390, 4491), 'streamlit.markdown', 'st.markdown', (['"""Either upload an image/screenshot of a document, or enter the text manually."""'], {}), "(\n 'Either upload an image/screenshot of a document, or enter the text manually.'\n )\n", (4401, 4491), True, 'import streamlit as st\n'), ((4528, 4622), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload an image/screenshot of a document:"""'], {'type': "['png', 'jpg', 'jpeg']"}), "('Upload an image/screenshot of a document:', type=['png',\n 'jpg', 'jpeg'])\n", (4544, 4622), True, 'import streamlit as st\n'), ((4665, 4698), 'streamlit.text_area', 'st.text_area', (['"""Or enter raw text"""'], {}), "('Or enter raw text')\n", (4677, 4698), True, 'import streamlit as st\n'), ((6164, 6194), 'streamlit.markdown', 'st.markdown', (['"""Extracted terms"""'], {}), "('Extracted terms')\n", (6175, 6194), True, 'import streamlit as st\n'), ((6203, 6237), 'streamlit.json', 'st.json', (["st.session_state['terms']"], {}), "(st.session_state['terms'])\n", (6210, 6237), True, 'import streamlit as st\n'), ((6250, 6276), 'streamlit.button', 'st.button', (['"""Insert terms?"""'], {}), "('Insert terms?')\n", (6259, 6276), True, 'import streamlit as st\n'), ((7176, 7224), 'streamlit.text_input', 'st.text_input', (['"""Ask about a term or definition:"""'], {}), "('Ask about a term or definition:')\n", (7189, 7224), True, 'import streamlit as st\n'), ((1223, 1244), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1235, 1244), False, 'from llama_index import Document, GPTVectorStoreIndex, GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, PromptHelper, StorageContext, load_index_from_storage, download_loader\n'), ((1268, 1340), 'llama_index.PromptHelper', 'PromptHelper', ([], {'max_input_size': '(4096)', 'max_chunk_overlap': '(20)', 'num_output': '(1024)'}), '(max_input_size=4096, max_chunk_overlap=20, num_output=1024)\n', (1280, 1340), False, 'from llama_index import Document, GPTVectorStoreIndex, GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, PromptHelper, StorageContext, load_index_from_storage, download_loader\n'), ((2509, 2530), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (2521, 2530), False, 'from llama_index import Document, GPTVectorStoreIndex, GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, PromptHelper, StorageContext, load_index_from_storage, download_loader\n'), ((4710, 4752), 'streamlit.button', 'st.button', (['"""Extract Terms and Definitions"""'], {}), "('Extract Terms and Definitions')\n", (4719, 4752), True, 'import streamlit as st\n'), ((6513, 6536), 'streamlit.experimental_rerun', 'st.experimental_rerun', ([], {}), '()\n', (6534, 6536), True, 'import streamlit as st\n'), ((4901, 4949), 'streamlit.spinner', 'st.spinner', (['"""Extracting (images may be slow)..."""'], {}), "('Extracting (images may be slow)...')\n", (4911, 4949), True, 'import streamlit as st\n'), ((6295, 6324), 'streamlit.spinner', 'st.spinner', (['"""Inserting terms"""'], {}), "('Inserting terms')\n", (6305, 6324), True, 'import streamlit as st\n'), ((7265, 7299), 'streamlit.spinner', 'st.spinner', (['"""Generating answer..."""'], {}), "('Generating answer...')\n", (7275, 7299), True, 'import streamlit as st\n'), ((5479, 5557), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['temp.png']", 'file_extractor': 'file_extractor'}), "(input_files=['temp.png'], file_extractor=file_extractor)\n", (5500, 5557), False, 'from llama_index import Document, GPTVectorStoreIndex, GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, PromptHelper, StorageContext, load_index_from_storage, download_loader\n'), ((5678, 5699), 'os.remove', 'os.remove', (['"""temp.png"""'], {}), "('temp.png')\n", (5687, 5699), False, 'import os\n'), ((5092, 5115), 'llama_index.Document', 'Document', (['document_text'], {}), '(document_text)\n', (5100, 5115), False, 'from llama_index import Document, GPTVectorStoreIndex, GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, PromptHelper, StorageContext, load_index_from_storage, download_loader\n'), ((5388, 5413), 'PIL.Image.open', 'Image.open', (['uploaded_file'], {}), '(uploaded_file)\n', (5398, 5413), False, 'from PIL import Image\n')] |
from django.shortcuts import render
from django.views import generic
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
import os
from llama_index import (
StorageContext, load_index_from_storage
)
def Bot(q):
storage_context = StorageContext.from_defaults(persist_dir=os.path.join(settings.MEDIA_ROOT,"blog_store"))
# load index
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine()
response = query_engine.query(q)
return response
# Create your views here.
class PageView(LoginRequiredMixin,generic.TemplateView):
template_name = 'chatbot/chatbot.html'
@api_view(['POST'])
def chatbot(request):
question = request.data['question']
print (question)
answer = Bot(question)
#print(answer,answer.source_nodes)
#answer = f"Answer to {question}"
return Response({'answer': str(answer)}) | [
"llama_index.load_index_from_storage"
] | [((817, 835), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (825, 835), False, 'from rest_framework.decorators import api_view\n'), ((545, 585), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (568, 585), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((468, 515), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', '"""blog_store"""'], {}), "(settings.MEDIA_ROOT, 'blog_store')\n", (480, 515), False, 'import os\n')] |
from pathlib import Path
from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex
from llama_index.vector_stores import MilvusVectorStore
from llama_index.readers import PDFReader
from llama_index import StorageContext
from pymilvus import MilvusClient
import os
# Define constants for Milvus configuration
MILVUS_HOST = os.environ.get("MILVUS_HOST", "10.97.151.193")
MILVUS_PORT = os.environ.get("MILVUS_PORT", "19530")
MILVUS_URI = f"http://{MILVUS_HOST}:{MILVUS_PORT}"
# Initialize PDFReader
pdf_reader = PDFReader()
# Load documents from a PDF file
document_path = Path('ingestion/keiichi_tsuchiya.pdf') #ToDo: load from S3 instead of local
documents = pdf_reader.load_data(file=document_path)
# Create an LLMPredictor with default parameters
llm_predictor = LLMPredictor(llm=None)
# Create a ServiceContext with LLMPredictor
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
# Initialize a MilvusVectorStore with Milvus server configuration
vector_store = MilvusVectorStore(
uri=MILVUS_URI,
dim=384,
use_secure=False
)
# Create a StorageContext with the MilvusVectorStore
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# Create a VectorStoreIndex from the loaded documents
index = VectorStoreIndex.from_documents(
documents=documents,
overwrite=True, # Set to False if you don't want to overwrite the index
service_context=service_context,
storage_context=storage_context
)
# You can now perform queries with the index
# For example:
# result = index.query("What communication protocol is used in Pymilvus for communicating with Milvus?")
# print(result)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.MilvusVectorStore",
"llama_index.StorageContext.from_defaults",
"llama_index.readers.PDFReader"
] | [((353, 399), 'os.environ.get', 'os.environ.get', (['"""MILVUS_HOST"""', '"""10.97.151.193"""'], {}), "('MILVUS_HOST', '10.97.151.193')\n", (367, 399), False, 'import os\n'), ((414, 452), 'os.environ.get', 'os.environ.get', (['"""MILVUS_PORT"""', '"""19530"""'], {}), "('MILVUS_PORT', '19530')\n", (428, 452), False, 'import os\n'), ((541, 552), 'llama_index.readers.PDFReader', 'PDFReader', ([], {}), '()\n', (550, 552), False, 'from llama_index.readers import PDFReader\n'), ((603, 641), 'pathlib.Path', 'Path', (['"""ingestion/keiichi_tsuchiya.pdf"""'], {}), "('ingestion/keiichi_tsuchiya.pdf')\n", (607, 641), False, 'from pathlib import Path\n'), ((799, 821), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'None'}), '(llm=None)\n', (811, 821), False, 'from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((885, 942), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (913, 942), False, 'from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((1025, 1085), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': 'MILVUS_URI', 'dim': '(384)', 'use_secure': '(False)'}), '(uri=MILVUS_URI, dim=384, use_secure=False)\n', (1042, 1085), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((1172, 1227), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1200, 1227), False, 'from llama_index import StorageContext\n'), ((1291, 1429), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'overwrite': '(True)', 'service_context': 'service_context', 'storage_context': 'storage_context'}), '(documents=documents, overwrite=True,\n service_context=service_context, storage_context=storage_context)\n', (1322, 1429), False, 'from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex\n')] |
from llama_index.llms.ollama import Ollama
from typing import Any, Sequence
from llama_index.core.bridge.pydantic import Field
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
)
from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback
class Ollama(Ollama):
system: str = Field(
default="", description="Default system message to send to the model."
)
keep_alive: int = Field(
default=0,
description="Time, in minutes, to wait before unloading model.",
)
request_timeout = 120.0
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any):
if self.system and len(messages) > 0 and messages[0].role != "system":
messages.insert(
0, ChatMessage(role="system", content=self.system)
)
kwargs["keep_alive"] = self.keep_alive
return super().chat(messages, **kwargs)
@llm_chat_callback()
def stream_chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponseGen:
if self.system and len(messages) > 0 and messages[0].role != "system":
messages.insert(
0, ChatMessage(role="system", content=self.system)
)
kwargs["keep_alive"] = self.keep_alive
yield from super().stream_chat(messages, **kwargs)
@llm_completion_callback()
def complete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> CompletionResponse:
if self.system:
kwargs["system"] = self.system
kwargs["keep_alive"] = self.keep_alive
return super().complete(prompt, formatted, **kwargs)
@llm_completion_callback()
def stream_complete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> CompletionResponseGen:
if self.system:
kwargs["system"] = self.system
kwargs["keep_alive"] = self.keep_alive
yield from super().stream_complete(prompt, formatted, **kwargs) | [
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.llms.callbacks.llm_completion_callback",
"llama_index.core.llms.callbacks.llm_chat_callback"
] | [((396, 473), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '""""""', 'description': '"""Default system message to send to the model."""'}), "(default='', description='Default system message to send to the model.')\n", (401, 473), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((510, 596), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(0)', 'description': '"""Time, in minutes, to wait before unloading model."""'}), "(default=0, description=\n 'Time, in minutes, to wait before unloading model.')\n", (515, 596), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((651, 670), 'llama_index.core.llms.callbacks.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (668, 670), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((1033, 1052), 'llama_index.core.llms.callbacks.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1050, 1052), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((1452, 1477), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1475, 1477), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((1762, 1787), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1785, 1787), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((866, 913), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'self.system'}), "(role='system', content=self.system)\n", (877, 913), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponseGen, CompletionResponse, CompletionResponseGen\n'), ((1274, 1321), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'self.system'}), "(role='system', content=self.system)\n", (1285, 1321), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponseGen, CompletionResponse, CompletionResponseGen\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.