File size: 3,193 Bytes
6df5c93
 
 
df2b26b
6df5c93
 
21b7541
fcfb36c
125fa0c
 
f356efb
6df5c93
 
 
 
ebd0b92
6df5c93
 
 
 
ebd0b92
 
0b47392
6df5c93
c7fa549
6df5c93
ebd0b92
 
 
f79e678
0ae54ee
f356efb
0ae54ee
ebd0b92
0ae54ee
 
f356efb
0ae54ee
a74f77b
da0c2cc
d8207a8
f356efb
d8207a8
 
f356efb
cbd9da8
34ce225
d8207a8
 
5cebc05
aace96d
 
 
 
159b472
aace96d
159b472
aace96d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8c715b2
31d2d4e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import json
import gradio as gr
import streamlit as st
from huggingface_hub import HfApi, login
from dotenv import load_dotenv

from llm import get_groq_llm
from vectorstore import get_chroma_vectorstore
from embeddings import get_SFR_Code_embedding_model
from kadiApy_ragchain import KadiApyRagchain

# Load environment variables from .env file
load_dotenv()

vectorstore_path = "data/vectorstore"

GROQ_API_KEY = os.environ["GROQ_API_KEY"]
HF_TOKEN = os.environ["HF_Token"]

with open("config.json", "r") as file:
    config = json.load(file)

login(HF_TOKEN)
hf_api = HfApi()

# Access the values
LLM_MODEL_NAME = config["llm_model_name"]
LLM_MODEL_TEMPERATURE = float(config["llm_model_temperature"])

def initialize():
    global kadiAPY_ragchain

    vectorstore = get_chroma_vectorstore(get_SFR_Code_embedding_model(), vectorstore_path)
    llm = get_groq_llm(LLM_MODEL_NAME, LLM_MODEL_TEMPERATURE, GROQ_API_KEY)

    kadiAPY_ragchain = KadiApyRagchain(llm, vectorstore)

initialize()



def bot_kadi(history):
    user_query = history[-1][0]   
    response = kadiAPY_ragchain.process_query(user_query)
    history[-1] = (user_query, response)

    yield history  


import gradio as gr

def add_text_to_chatbot(chat_history, user_input):
    if user_input:
        chat_history.append((user_input, None))
        response = "This is a placeholder response. Replace this with your AI logic."
        chat_history.append((None, response))
    return chat_history, ""

def main():
    with gr.Blocks() as demo:
        gr.Markdown("## KadiAPY - AI Coding-Assistant")
        gr.Markdown("AI assistant for KadiAPY based on RAG architecture powered by LLM")

        with gr.Tab("KadiAPY - AI Assistant"):
            with gr.Row():
                with gr.Column(scale=10):
                    chatbot = gr.Chatbot([], elem_id="chatbot", label="Kadi Bot", bubble_full_width=False, show_copy_button=True, height=600)
                    user_txt = gr.Textbox(label="Question", placeholder="Type in your question and press Enter or click Submit")

                    with gr.Row():
                        with gr.Column(scale=1):
                            submit_btn = gr.Button("Submit", variant="primary")
                        with gr.Column(scale=1):
                            clear_btn = gr.Button("Clear", variant="stop")

                    gr.Examples(
                        examples=[
                            "Write me a python script with which can convert plain JSON to a Kadi4Mat-compatible extra metadata structure",
                            "I need a method to upload a file to a record. The id of the record is 3",
                        ],
                        inputs=user_txt,
                        outputs=chatbot,
                        fn=add_text_to_chatbot,
                        label="Try asking...",
                        cache_examples=False,
                        examples_per_page=3,
                    )

        submit_btn.click(add_text_to_chatbot, [chatbot, user_txt], [chatbot, user_txt])
        clear_btn.click(lambda: ([], ""), None, [chatbot, user_txt])

    demo.launch()

if __name__ == "__main__":
    main()