ncert-helper / app.py
ishaan-mital's picture
initial commit
083f629
raw
history blame
1.76 kB
from gradio_client import Client
import gradio as gr
import requests
from langchain.chains import RetrievalQA
import pinecone
from langchain.vectorstores import Pinecone
import os
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
# retrieval = Client("https://ishaan-mital-ncert-helper-vector-db.hf.space/--replicas/149bg26k5/")
embed_model_id = 'sentence-transformers/all-MiniLM-L6-v2'
embed_model = HuggingFaceEmbeddings(
model_name=embed_model_id,
)
pinecone.init(
api_key=os.environ.get('PINECONE_API_KEY'),
environment=os.environ.get('PINECONE_ENVIRONMENT')
)
index_name = 'llama-rag'
index = pinecone.Index(index_name)
text_field = 'text' # field in metadata that contains text content
vectorstore = Pinecone(
index, embed_model.embed_query, text_field
)
def call_llm_api(input_text):
headers = {"Authorization": f"Bearer {os.environ.get('API_KEY')}"}
payload = {"input": input_text}
response = requests.post(API_URL, headers=headers, json=payload)
return response.json() # Adjust as needed based on your API response format
from langchain.llms import Runnable
class APIRunnable(Runnable):
def __init__(self, api_func):
self.api_func = api_func
def run(self, input_text):
return self.api_func(input_text)
api_runnable = APIRunnable(api_func=call_llm_api)
rag_pipeline = RetrievalQA.from_chain_type(
llm=api_runnable, chain_type='stuff',
retriever=vectorstore.as_retriever()
)
def main(question):
global chatbot
return rag_pipeline(question)
demo = gr.Interface(main, inputs = "text", outputs = "text")
if __name__ == "__main__":
demo.launch(share=True)