File size: 1,756 Bytes
4d82c20
 
1e5cddc
 
 
330d3a8
1e5cddc
 
4d82c20
1e5cddc
 
4d82c20
1e5cddc
 
 
 
46737c8
bdd7518
1e5cddc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51d15eb
1e5cddc
 
 
51d15eb
083f629
 
 
 
 
 
 
 
 
 
 
1e5cddc
083f629
1e5cddc
 
4d82c20
 
 
 
1e5cddc
4d82c20
 
 
 
1e5cddc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from gradio_client import Client
import gradio as gr
import requests
from langchain.chains import RetrievalQA
import pinecone
from langchain.vectorstores import Pinecone
import os
from langchain.embeddings.huggingface import HuggingFaceEmbeddings

API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
# retrieval = Client("https://ishaan-mital-ncert-helper-vector-db.hf.space/--replicas/149bg26k5/")

embed_model_id = 'sentence-transformers/all-MiniLM-L6-v2'
embed_model = HuggingFaceEmbeddings(
    model_name=embed_model_id,
)


pinecone.init(
    api_key=os.environ.get('PINECONE_API_KEY'),
    environment=os.environ.get('PINECONE_ENVIRONMENT')
)

index_name = 'llama-rag'
index = pinecone.Index(index_name)
text_field = 'text'  # field in metadata that contains text content

vectorstore = Pinecone(
    index, embed_model.embed_query, text_field
)

def call_llm_api(input_text):
    headers = {"Authorization": f"Bearer {os.environ.get('API_KEY')}"}
    payload = {"input": input_text}
    response = requests.post(API_URL, headers=headers, json=payload)
    return response.json()  # Adjust as needed based on your API response format
	

from langchain.llms import Runnable

class APIRunnable(Runnable):
    def __init__(self, api_func):
        self.api_func = api_func

    def run(self, input_text):
        return self.api_func(input_text)

api_runnable = APIRunnable(api_func=call_llm_api)

rag_pipeline = RetrievalQA.from_chain_type(
    llm=api_runnable, chain_type='stuff',
    retriever=vectorstore.as_retriever()
)


def main(question):
    global chatbot
    return rag_pipeline(question)

demo = gr.Interface(main, inputs = "text", outputs = "text")

if __name__ == "__main__":
    demo.launch(share=True)